diff options
Diffstat (limited to 'arch/powerpc')
638 files changed, 23565 insertions, 11467 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 631e5a0fb6ab..2729c6663d8a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -1,15 +1,13 @@ | |||
1 | # For a description of the syntax of this configuration file, | ||
2 | # see Documentation/kbuild/kconfig-language.txt. | ||
3 | # | ||
4 | |||
5 | mainmenu "Linux/PowerPC Kernel Configuration" | ||
6 | |||
7 | source "arch/powerpc/platforms/Kconfig.cputype" | 1 | source "arch/powerpc/platforms/Kconfig.cputype" |
8 | 2 | ||
9 | config PPC32 | 3 | config PPC32 |
10 | bool | 4 | bool |
11 | default y if !PPC64 | 5 | default y if !PPC64 |
12 | 6 | ||
7 | config 32BIT | ||
8 | bool | ||
9 | default y if PPC32 | ||
10 | |||
13 | config 64BIT | 11 | config 64BIT |
14 | bool | 12 | bool |
15 | default y if PPC64 | 13 | default y if PPC64 |
@@ -22,6 +20,9 @@ config WORD_SIZE | |||
22 | config ARCH_PHYS_ADDR_T_64BIT | 20 | config ARCH_PHYS_ADDR_T_64BIT |
23 | def_bool PPC64 || PHYS_64BIT | 21 | def_bool PPC64 || PHYS_64BIT |
24 | 22 | ||
23 | config ARCH_DMA_ADDR_T_64BIT | ||
24 | def_bool ARCH_PHYS_ADDR_T_64BIT | ||
25 | |||
25 | config MMU | 26 | config MMU |
26 | bool | 27 | bool |
27 | default y | 28 | default y |
@@ -35,24 +36,12 @@ config GENERIC_TIME_VSYSCALL | |||
35 | config GENERIC_CLOCKEVENTS | 36 | config GENERIC_CLOCKEVENTS |
36 | def_bool y | 37 | def_bool y |
37 | 38 | ||
38 | config GENERIC_HARDIRQS | ||
39 | bool | ||
40 | default y | ||
41 | |||
42 | config GENERIC_HARDIRQS_NO__DO_IRQ | ||
43 | bool | ||
44 | default y | ||
45 | |||
46 | config HAVE_SETUP_PER_CPU_AREA | 39 | config HAVE_SETUP_PER_CPU_AREA |
47 | def_bool PPC64 | 40 | def_bool PPC64 |
48 | 41 | ||
49 | config NEED_PER_CPU_EMBED_FIRST_CHUNK | 42 | config NEED_PER_CPU_EMBED_FIRST_CHUNK |
50 | def_bool PPC64 | 43 | def_bool PPC64 |
51 | 44 | ||
52 | config IRQ_PER_CPU | ||
53 | bool | ||
54 | default y | ||
55 | |||
56 | config NR_IRQS | 45 | config NR_IRQS |
57 | int "Number of virtual interrupt numbers" | 46 | int "Number of virtual interrupt numbers" |
58 | range 32 32768 | 47 | range 32 32768 |
@@ -102,10 +91,6 @@ config GENERIC_HWEIGHT | |||
102 | bool | 91 | bool |
103 | default y | 92 | default y |
104 | 93 | ||
105 | config GENERIC_FIND_NEXT_BIT | ||
106 | bool | ||
107 | default y | ||
108 | |||
109 | config GENERIC_GPIO | 94 | config GENERIC_GPIO |
110 | bool | 95 | bool |
111 | help | 96 | help |
@@ -118,7 +103,7 @@ config PPC | |||
118 | bool | 103 | bool |
119 | default y | 104 | default y |
120 | select OF | 105 | select OF |
121 | select OF_FLATTREE | 106 | select OF_EARLY_FLATTREE |
122 | select HAVE_FTRACE_MCOUNT_RECORD | 107 | select HAVE_FTRACE_MCOUNT_RECORD |
123 | select HAVE_DYNAMIC_FTRACE | 108 | select HAVE_DYNAMIC_FTRACE |
124 | select HAVE_FUNCTION_TRACER | 109 | select HAVE_FUNCTION_TRACER |
@@ -138,9 +123,17 @@ config PPC | |||
138 | select HAVE_OPROFILE | 123 | select HAVE_OPROFILE |
139 | select HAVE_SYSCALL_WRAPPERS if PPC64 | 124 | select HAVE_SYSCALL_WRAPPERS if PPC64 |
140 | select GENERIC_ATOMIC64 if PPC32 | 125 | select GENERIC_ATOMIC64 if PPC32 |
126 | select HAVE_IRQ_WORK | ||
141 | select HAVE_PERF_EVENTS | 127 | select HAVE_PERF_EVENTS |
142 | select HAVE_REGS_AND_STACK_ACCESS_API | 128 | select HAVE_REGS_AND_STACK_ACCESS_API |
143 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 | 129 | select HAVE_HW_BREAKPOINT if PERF_EVENTS && PPC_BOOK3S_64 |
130 | select HAVE_GENERIC_HARDIRQS | ||
131 | select HAVE_SPARSE_IRQ | ||
132 | select IRQ_PER_CPU | ||
133 | select GENERIC_IRQ_SHOW | ||
134 | select GENERIC_IRQ_SHOW_LEVEL | ||
135 | select HAVE_RCU_TABLE_FREE if SMP | ||
136 | select HAVE_SYSCALL_TRACEPOINTS | ||
144 | 137 | ||
145 | config EARLY_PRINTK | 138 | config EARLY_PRINTK |
146 | bool | 139 | bool |
@@ -194,6 +187,12 @@ config SYS_SUPPORTS_APM_EMULATION | |||
194 | default y if PMAC_APM_EMU | 187 | default y if PMAC_APM_EMU |
195 | bool | 188 | bool |
196 | 189 | ||
190 | config EPAPR_BOOT | ||
191 | bool | ||
192 | help | ||
193 | Used to allow a board to specify it wants an ePAPR compliant wrapper. | ||
194 | default n | ||
195 | |||
197 | config DEFAULT_UIMAGE | 196 | config DEFAULT_UIMAGE |
198 | bool | 197 | bool |
199 | help | 198 | help |
@@ -210,7 +209,7 @@ config ARCH_HIBERNATION_POSSIBLE | |||
210 | config ARCH_SUSPEND_POSSIBLE | 209 | config ARCH_SUSPEND_POSSIBLE |
211 | def_bool y | 210 | def_bool y |
212 | depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ | 211 | depends on ADB_PMU || PPC_EFIKA || PPC_LITE5200 || PPC_83xx || \ |
213 | PPC_85xx || PPC_86xx || PPC_PSERIES | 212 | (PPC_85xx && !SMP) || PPC_86xx || PPC_PSERIES || 44x || 40x |
214 | 213 | ||
215 | config PPC_DCR_NATIVE | 214 | config PPC_DCR_NATIVE |
216 | bool | 215 | bool |
@@ -390,19 +389,6 @@ config IRQ_ALL_CPUS | |||
390 | CPU. Generally saying Y is safe, although some problems have been | 389 | CPU. Generally saying Y is safe, although some problems have been |
391 | reported with SMP Power Macintoshes with this option enabled. | 390 | reported with SMP Power Macintoshes with this option enabled. |
392 | 391 | ||
393 | config SPARSE_IRQ | ||
394 | bool "Support sparse irq numbering" | ||
395 | default n | ||
396 | help | ||
397 | This enables support for sparse irqs. This is useful for distro | ||
398 | kernels that want to define a high CONFIG_NR_CPUS value but still | ||
399 | want to have low kernel memory footprint on smaller machines. | ||
400 | |||
401 | ( Sparse IRQs can also be beneficial on NUMA boxes, as they spread | ||
402 | out the irq_desc[] array in a more NUMA-friendly way. ) | ||
403 | |||
404 | If you don't know what to do here, say N. | ||
405 | |||
406 | config NUMA | 392 | config NUMA |
407 | bool "NUMA support" | 393 | bool "NUMA support" |
408 | depends on PPC64 | 394 | depends on PPC64 |
@@ -596,13 +582,11 @@ config EXTRA_TARGETS | |||
596 | 582 | ||
597 | If unsure, leave blank | 583 | If unsure, leave blank |
598 | 584 | ||
599 | if !44x || BROKEN | ||
600 | config ARCH_WANTS_FREEZER_CONTROL | 585 | config ARCH_WANTS_FREEZER_CONTROL |
601 | def_bool y | 586 | def_bool y |
602 | depends on ADB_PMU | 587 | depends on ADB_PMU |
603 | 588 | ||
604 | source kernel/power/Kconfig | 589 | source kernel/power/Kconfig |
605 | endif | ||
606 | 590 | ||
607 | config SECCOMP | 591 | config SECCOMP |
608 | bool "Enable seccomp to safely compute untrusted bytecode" | 592 | bool "Enable seccomp to safely compute untrusted bytecode" |
@@ -683,13 +667,25 @@ config FSL_PMC | |||
683 | Freescale MPC85xx/MPC86xx power management controller support | 667 | Freescale MPC85xx/MPC86xx power management controller support |
684 | (suspend/resume). For MPC83xx see platforms/83xx/suspend.c | 668 | (suspend/resume). For MPC83xx see platforms/83xx/suspend.c |
685 | 669 | ||
670 | config PPC4xx_CPM | ||
671 | bool | ||
672 | default y | ||
673 | depends on SUSPEND && (44x || 40x) | ||
674 | help | ||
675 | PPC4xx Clock Power Management (CPM) support (suspend/resume). | ||
676 | It also enables support for two different idle states (idle-wait | ||
677 | and idle-doze). | ||
678 | |||
686 | config 4xx_SOC | 679 | config 4xx_SOC |
687 | bool | 680 | bool |
688 | 681 | ||
689 | config FSL_LBC | 682 | config FSL_LBC |
690 | bool | 683 | bool "Freescale Local Bus support" |
684 | depends on FSL_SOC | ||
691 | help | 685 | help |
692 | Freescale Localbus support | 686 | Enables reporting of errors from the Freescale local bus |
687 | controller. Also contains some common code used by | ||
688 | drivers for specific local bus peripherals. | ||
693 | 689 | ||
694 | config FSL_GTM | 690 | config FSL_GTM |
695 | bool | 691 | bool |
@@ -777,11 +773,19 @@ config HAS_RAPIDIO | |||
777 | 773 | ||
778 | config RAPIDIO | 774 | config RAPIDIO |
779 | bool "RapidIO support" | 775 | bool "RapidIO support" |
780 | depends on HAS_RAPIDIO | 776 | depends on HAS_RAPIDIO || PCI |
781 | help | 777 | help |
782 | If you say Y here, the kernel will include drivers and | 778 | If you say Y here, the kernel will include drivers and |
783 | infrastructure code to support RapidIO interconnect devices. | 779 | infrastructure code to support RapidIO interconnect devices. |
784 | 780 | ||
781 | config FSL_RIO | ||
782 | bool "Freescale Embedded SRIO Controller support" | ||
783 | depends on RAPIDIO && HAS_RAPIDIO | ||
784 | default "n" | ||
785 | ---help--- | ||
786 | Include support for RapidIO controller on Freescale embedded | ||
787 | processors (MPC8548, MPC8641, etc). | ||
788 | |||
785 | source "drivers/rapidio/Kconfig" | 789 | source "drivers/rapidio/Kconfig" |
786 | 790 | ||
787 | endmenu | 791 | endmenu |
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug index 2d38a50e66ba..e72dcf6a421d 100644 --- a/arch/powerpc/Kconfig.debug +++ b/arch/powerpc/Kconfig.debug | |||
@@ -35,27 +35,6 @@ config DEBUG_STACKOVERFLOW | |||
35 | This option will cause messages to be printed if free stack space | 35 | This option will cause messages to be printed if free stack space |
36 | drops below a certain limit. | 36 | drops below a certain limit. |
37 | 37 | ||
38 | config DEBUG_STACK_USAGE | ||
39 | bool "Stack utilization instrumentation" | ||
40 | depends on DEBUG_KERNEL | ||
41 | help | ||
42 | Enables the display of the minimum amount of free stack which each | ||
43 | task has ever had available in the sysrq-T and sysrq-P debug output. | ||
44 | |||
45 | This option will slow down process creation somewhat. | ||
46 | |||
47 | config DEBUG_PER_CPU_MAPS | ||
48 | bool "Debug access to per_cpu maps" | ||
49 | depends on DEBUG_KERNEL | ||
50 | depends on SMP | ||
51 | default n | ||
52 | ---help--- | ||
53 | Say Y to verify that the per_cpu map being accessed has | ||
54 | been setup. Adds a fair amount of code to kernel memory | ||
55 | and decreases performance. | ||
56 | |||
57 | Say N if unsure. | ||
58 | |||
59 | config HCALL_STATS | 38 | config HCALL_STATS |
60 | bool "Hypervisor call instrumentation" | 39 | bool "Hypervisor call instrumentation" |
61 | depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS | 40 | depends on PPC_PSERIES && DEBUG_FS && TRACEPOINTS |
@@ -267,6 +246,11 @@ config PPC_EARLY_DEBUG_USBGECKO | |||
267 | Select this to enable early debugging for Nintendo GameCube/Wii | 246 | Select this to enable early debugging for Nintendo GameCube/Wii |
268 | consoles via an external USB Gecko adapter. | 247 | consoles via an external USB Gecko adapter. |
269 | 248 | ||
249 | config PPC_EARLY_DEBUG_WSP | ||
250 | bool "Early debugging via WSP's internal UART" | ||
251 | depends on PPC_WSP | ||
252 | select PPC_UDBG_16550 | ||
253 | |||
270 | endchoice | 254 | endchoice |
271 | 255 | ||
272 | config PPC_EARLY_DEBUG_44x_PHYSLOW | 256 | config PPC_EARLY_DEBUG_44x_PHYSLOW |
diff --git a/arch/powerpc/boot/.gitignore b/arch/powerpc/boot/.gitignore index 3d80c3e9cf60..12da77ec0228 100644 --- a/arch/powerpc/boot/.gitignore +++ b/arch/powerpc/boot/.gitignore | |||
@@ -1,5 +1,4 @@ | |||
1 | addnote | 1 | addnote |
2 | dtc | ||
3 | empty.c | 2 | empty.c |
4 | hack-coff | 3 | hack-coff |
5 | infblock.c | 4 | infblock.c |
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index fae8192c8fcc..c26200b40a47 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -35,7 +35,7 @@ endif | |||
35 | 35 | ||
36 | BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj) | 36 | BOOTCFLAGS += -I$(obj) -I$(srctree)/$(obj) |
37 | 37 | ||
38 | DTS_FLAGS ?= -p 1024 | 38 | DTC_FLAGS ?= -p 1024 |
39 | 39 | ||
40 | $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405 | 40 | $(obj)/4xx.o: BOOTCFLAGS += -mcpu=405 |
41 | $(obj)/ebony.o: BOOTCFLAGS += -mcpu=405 | 41 | $(obj)/ebony.o: BOOTCFLAGS += -mcpu=405 |
@@ -69,7 +69,8 @@ src-wlib := string.S crt0.S crtsavres.S stdio.c main.c \ | |||
69 | cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \ | 69 | cpm-serial.c stdlib.c mpc52xx-psc.c planetcore.c uartlite.c \ |
70 | fsl-soc.c mpc8xx.c pq2.c ugecon.c | 70 | fsl-soc.c mpc8xx.c pq2.c ugecon.c |
71 | src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \ | 71 | src-plat := of.c cuboot-52xx.c cuboot-824x.c cuboot-83xx.c cuboot-85xx.c holly.c \ |
72 | cuboot-ebony.c cuboot-hotfoot.c treeboot-ebony.c prpmc2800.c \ | 72 | cuboot-ebony.c cuboot-hotfoot.c epapr.c treeboot-ebony.c \ |
73 | prpmc2800.c \ | ||
73 | ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \ | 74 | ps3-head.S ps3-hvcall.S ps3.c treeboot-bamboo.c cuboot-8xx.c \ |
74 | cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \ | 75 | cuboot-pq2.c cuboot-sequoia.c treeboot-walnut.c \ |
75 | cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \ | 76 | cuboot-bamboo.c cuboot-mpc7448hpc2.c cuboot-taishan.c \ |
@@ -127,7 +128,7 @@ quiet_cmd_bootas = BOOTAS $@ | |||
127 | cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< | 128 | cmd_bootas = $(CROSS32CC) -Wp,-MD,$(depfile) $(BOOTAFLAGS) -c -o $@ $< |
128 | 129 | ||
129 | quiet_cmd_bootar = BOOTAR $@ | 130 | quiet_cmd_bootar = BOOTAR $@ |
130 | cmd_bootar = $(CROSS32AR) -cr $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ | 131 | cmd_bootar = $(CROSS32AR) -cr$(KBUILD_ARFLAGS) $@.$$$$ $(filter-out FORCE,$^); mv $@.$$$$ $@ |
131 | 132 | ||
132 | $(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE | 133 | $(obj-libfdt): $(obj)/%.o: $(srctree)/scripts/dtc/libfdt/%.c FORCE |
133 | $(call if_changed_dep,bootcc) | 134 | $(call if_changed_dep,bootcc) |
@@ -182,6 +183,7 @@ image-$(CONFIG_PPC_HOLLY) += dtbImage.holly | |||
182 | image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800 | 183 | image-$(CONFIG_PPC_PRPMC2800) += dtbImage.prpmc2800 |
183 | image-$(CONFIG_PPC_ISERIES) += zImage.iseries | 184 | image-$(CONFIG_PPC_ISERIES) += zImage.iseries |
184 | image-$(CONFIG_DEFAULT_UIMAGE) += uImage | 185 | image-$(CONFIG_DEFAULT_UIMAGE) += uImage |
186 | image-$(CONFIG_EPAPR_BOOT) += zImage.epapr | ||
185 | 187 | ||
186 | # | 188 | # |
187 | # Targets which embed a device tree blob | 189 | # Targets which embed a device tree blob |
@@ -332,10 +334,8 @@ $(obj)/treeImage.%: vmlinux $(obj)/%.dtb $(wrapperbits) | |||
332 | $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb) | 334 | $(call if_changed,wrap,treeboot-$*,,$(obj)/$*.dtb) |
333 | 335 | ||
334 | # Rule to build device tree blobs | 336 | # Rule to build device tree blobs |
335 | DTC = $(objtree)/scripts/dtc/dtc | 337 | $(obj)/%.dtb: $(src)/dts/%.dts |
336 | 338 | $(call cmd,dtc) | |
337 | $(obj)/%.dtb: $(dtstree)/%.dts | ||
338 | $(DTC) -O dtb -o $(obj)/$*.dtb -b 0 $(DTS_FLAGS) $(dtstree)/$*.dts | ||
339 | 339 | ||
340 | # If there isn't a platform selected then just strip the vmlinux. | 340 | # If there isn't a platform selected then just strip the vmlinux. |
341 | ifeq (,$(image-y)) | 341 | ifeq (,$(image-y)) |
@@ -370,7 +370,7 @@ INSTALL := install | |||
370 | extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y)) | 370 | extra-installed := $(patsubst $(obj)/%, $(DESTDIR)$(WRAPPER_OBJDIR)/%, $(extra-y)) |
371 | hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y)) | 371 | hostprogs-installed := $(patsubst %, $(DESTDIR)$(WRAPPER_BINDIR)/%, $(hostprogs-y)) |
372 | wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper | 372 | wrapper-installed := $(DESTDIR)$(WRAPPER_BINDIR)/wrapper |
373 | dts-installed := $(patsubst $(obj)/dts/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(obj)/dts/*.dts)) | 373 | dts-installed := $(patsubst $(dtstree)/%, $(DESTDIR)$(WRAPPER_DTSDIR)/%, $(wildcard $(dtstree)/*.dts)) |
374 | 374 | ||
375 | all-installed := $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed) | 375 | all-installed := $(extra-installed) $(hostprogs-installed) $(wrapper-installed) $(dts-installed) |
376 | 376 | ||
diff --git a/arch/powerpc/boot/addnote.c b/arch/powerpc/boot/addnote.c index b1e5611b2ab1..349b5530d2c4 100644 --- a/arch/powerpc/boot/addnote.c +++ b/arch/powerpc/boot/addnote.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <string.h> | 20 | #include <string.h> |
21 | 21 | ||
22 | /* CHRP note section */ | 22 | /* CHRP note section */ |
23 | char arch[] = "PowerPC"; | 23 | static const char arch[] = "PowerPC"; |
24 | 24 | ||
25 | #define N_DESCR 6 | 25 | #define N_DESCR 6 |
26 | unsigned int descr[N_DESCR] = { | 26 | unsigned int descr[N_DESCR] = { |
@@ -33,7 +33,7 @@ unsigned int descr[N_DESCR] = { | |||
33 | }; | 33 | }; |
34 | 34 | ||
35 | /* RPA note section */ | 35 | /* RPA note section */ |
36 | char rpaname[] = "IBM,RPA-Client-Config"; | 36 | static const char rpaname[] = "IBM,RPA-Client-Config"; |
37 | 37 | ||
38 | /* | 38 | /* |
39 | * Note: setting ignore_my_client_config *should* mean that OF ignores | 39 | * Note: setting ignore_my_client_config *should* mean that OF ignores |
diff --git a/arch/powerpc/boot/crt0.S b/arch/powerpc/boot/crt0.S index f1c4dfc635be..0f7428a37efb 100644 --- a/arch/powerpc/boot/crt0.S +++ b/arch/powerpc/boot/crt0.S | |||
@@ -6,16 +6,28 @@ | |||
6 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | * | 8 | * |
9 | * NOTE: this code runs in 32 bit mode and is packaged as ELF32. | 9 | * NOTE: this code runs in 32 bit mode, is position-independent, |
10 | * and is packaged as ELF32. | ||
10 | */ | 11 | */ |
11 | 12 | ||
12 | #include "ppc_asm.h" | 13 | #include "ppc_asm.h" |
13 | 14 | ||
14 | .text | 15 | .text |
15 | /* a procedure descriptor used when booting this as a COFF file */ | 16 | /* A procedure descriptor used when booting this as a COFF file. |
17 | * When making COFF, this comes first in the link and we're | ||
18 | * linked at 0x500000. | ||
19 | */ | ||
16 | .globl _zimage_start_opd | 20 | .globl _zimage_start_opd |
17 | _zimage_start_opd: | 21 | _zimage_start_opd: |
18 | .long _zimage_start, 0, 0, 0 | 22 | .long 0x500000, 0, 0, 0 |
23 | |||
24 | p_start: .long _start | ||
25 | p_etext: .long _etext | ||
26 | p_bss_start: .long __bss_start | ||
27 | p_end: .long _end | ||
28 | |||
29 | .weak _platform_stack_top | ||
30 | p_pstack: .long _platform_stack_top | ||
19 | 31 | ||
20 | .weak _zimage_start | 32 | .weak _zimage_start |
21 | .globl _zimage_start | 33 | .globl _zimage_start |
@@ -24,37 +36,65 @@ _zimage_start: | |||
24 | _zimage_start_lib: | 36 | _zimage_start_lib: |
25 | /* Work out the offset between the address we were linked at | 37 | /* Work out the offset between the address we were linked at |
26 | and the address where we're running. */ | 38 | and the address where we're running. */ |
27 | bl 1f | 39 | bl .+4 |
28 | 1: mflr r0 | 40 | p_base: mflr r10 /* r10 now points to runtime addr of p_base */ |
29 | lis r9,1b@ha | 41 | /* grab the link address of the dynamic section in r11 */ |
30 | addi r9,r9,1b@l | 42 | addis r11,r10,(_GLOBAL_OFFSET_TABLE_-p_base)@ha |
31 | subf. r0,r9,r0 | 43 | lwz r11,(_GLOBAL_OFFSET_TABLE_-p_base)@l(r11) |
32 | beq 3f /* if running at same address as linked */ | 44 | cmpwi r11,0 |
45 | beq 3f /* if not linked -pie */ | ||
46 | /* get the runtime address of the dynamic section in r12 */ | ||
47 | .weak __dynamic_start | ||
48 | addis r12,r10,(__dynamic_start-p_base)@ha | ||
49 | addi r12,r12,(__dynamic_start-p_base)@l | ||
50 | subf r11,r11,r12 /* runtime - linktime offset */ | ||
51 | |||
52 | /* The dynamic section contains a series of tagged entries. | ||
53 | * We need the RELA and RELACOUNT entries. */ | ||
54 | RELA = 7 | ||
55 | RELACOUNT = 0x6ffffff9 | ||
56 | li r9,0 | ||
57 | li r0,0 | ||
58 | 9: lwz r8,0(r12) /* get tag */ | ||
59 | cmpwi r8,0 | ||
60 | beq 10f /* end of list */ | ||
61 | cmpwi r8,RELA | ||
62 | bne 11f | ||
63 | lwz r9,4(r12) /* get RELA pointer in r9 */ | ||
64 | b 12f | ||
65 | 11: addis r8,r8,(-RELACOUNT)@ha | ||
66 | cmpwi r8,RELACOUNT@l | ||
67 | bne 12f | ||
68 | lwz r0,4(r12) /* get RELACOUNT value in r0 */ | ||
69 | 12: addi r12,r12,8 | ||
70 | b 9b | ||
33 | 71 | ||
34 | /* The .got2 section contains a list of addresses, so add | 72 | /* The relocation section contains a list of relocations. |
35 | the address offset onto each entry. */ | 73 | * We now do the R_PPC_RELATIVE ones, which point to words |
36 | lis r9,__got2_start@ha | 74 | * which need to be initialized with addend + offset. |
37 | addi r9,r9,__got2_start@l | 75 | * The R_PPC_RELATIVE ones come first and there are RELACOUNT |
38 | lis r8,__got2_end@ha | 76 | * of them. */ |
39 | addi r8,r8,__got2_end@l | 77 | 10: /* skip relocation if we don't have both */ |
40 | subf. r8,r9,r8 | 78 | cmpwi r0,0 |
41 | beq 3f | 79 | beq 3f |
42 | srwi. r8,r8,2 | 80 | cmpwi r9,0 |
43 | mtctr r8 | 81 | beq 3f |
44 | add r9,r0,r9 | 82 | |
45 | 2: lwz r8,0(r9) | 83 | add r9,r9,r11 /* Relocate RELA pointer */ |
46 | add r8,r8,r0 | 84 | mtctr r0 |
47 | stw r8,0(r9) | 85 | 2: lbz r0,4+3(r9) /* ELF32_R_INFO(reloc->r_info) */ |
48 | addi r9,r9,4 | 86 | cmpwi r0,22 /* R_PPC_RELATIVE */ |
87 | bne 3f | ||
88 | lwz r12,0(r9) /* reloc->r_offset */ | ||
89 | lwz r0,8(r9) /* reloc->r_addend */ | ||
90 | add r0,r0,r11 | ||
91 | stwx r0,r11,r12 | ||
92 | addi r9,r9,12 | ||
49 | bdnz 2b | 93 | bdnz 2b |
50 | 94 | ||
51 | /* Do a cache flush for our text, in case the loader didn't */ | 95 | /* Do a cache flush for our text, in case the loader didn't */ |
52 | 3: lis r9,_start@ha | 96 | 3: lwz r9,p_start-p_base(r10) /* note: these are relocated now */ |
53 | addi r9,r9,_start@l | 97 | lwz r8,p_etext-p_base(r10) |
54 | add r9,r0,r9 | ||
55 | lis r8,_etext@ha | ||
56 | addi r8,r8,_etext@l | ||
57 | add r8,r0,r8 | ||
58 | 4: dcbf r0,r9 | 98 | 4: dcbf r0,r9 |
59 | icbi r0,r9 | 99 | icbi r0,r9 |
60 | addi r9,r9,0x20 | 100 | addi r9,r9,0x20 |
@@ -64,27 +104,19 @@ _zimage_start_lib: | |||
64 | isync | 104 | isync |
65 | 105 | ||
66 | /* Clear the BSS */ | 106 | /* Clear the BSS */ |
67 | lis r9,__bss_start@ha | 107 | lwz r9,p_bss_start-p_base(r10) |
68 | addi r9,r9,__bss_start@l | 108 | lwz r8,p_end-p_base(r10) |
69 | add r9,r0,r9 | 109 | li r0,0 |
70 | lis r8,_end@ha | 110 | 5: stw r0,0(r9) |
71 | addi r8,r8,_end@l | ||
72 | add r8,r0,r8 | ||
73 | li r10,0 | ||
74 | 5: stw r10,0(r9) | ||
75 | addi r9,r9,4 | 111 | addi r9,r9,4 |
76 | cmplw cr0,r9,r8 | 112 | cmplw cr0,r9,r8 |
77 | blt 5b | 113 | blt 5b |
78 | 114 | ||
79 | /* Possibly set up a custom stack */ | 115 | /* Possibly set up a custom stack */ |
80 | .weak _platform_stack_top | 116 | lwz r8,p_pstack-p_base(r10) |
81 | lis r8,_platform_stack_top@ha | ||
82 | addi r8,r8,_platform_stack_top@l | ||
83 | cmpwi r8,0 | 117 | cmpwi r8,0 |
84 | beq 6f | 118 | beq 6f |
85 | add r8,r0,r8 | ||
86 | lwz r1,0(r8) | 119 | lwz r1,0(r8) |
87 | add r1,r0,r1 | ||
88 | li r0,0 | 120 | li r0,0 |
89 | stwu r0,-16(r1) /* establish a stack frame */ | 121 | stwu r0,-16(r1) /* establish a stack frame */ |
90 | 6: | 122 | 6: |
diff --git a/arch/powerpc/boot/div64.S b/arch/powerpc/boot/div64.S index 722f360a32a9..d271ab542673 100644 --- a/arch/powerpc/boot/div64.S +++ b/arch/powerpc/boot/div64.S | |||
@@ -33,9 +33,10 @@ __div64_32: | |||
33 | cntlzw r0,r5 # we are shifting the dividend right | 33 | cntlzw r0,r5 # we are shifting the dividend right |
34 | li r10,-1 # to make it < 2^32, and shifting | 34 | li r10,-1 # to make it < 2^32, and shifting |
35 | srw r10,r10,r0 # the divisor right the same amount, | 35 | srw r10,r10,r0 # the divisor right the same amount, |
36 | add r9,r4,r10 # rounding up (so the estimate cannot | 36 | addc r9,r4,r10 # rounding up (so the estimate cannot |
37 | andc r11,r6,r10 # ever be too large, only too small) | 37 | andc r11,r6,r10 # ever be too large, only too small) |
38 | andc r9,r9,r10 | 38 | andc r9,r9,r10 |
39 | addze r9,r9 | ||
39 | or r11,r5,r11 | 40 | or r11,r5,r11 |
40 | rotlw r9,r9,r0 | 41 | rotlw r9,r9,r0 |
41 | rotlw r11,r11,r0 | 42 | rotlw r11,r11,r0 |
diff --git a/arch/powerpc/boot/dtc-src/.gitignore b/arch/powerpc/boot/dtc-src/.gitignore deleted file mode 100644 index a7c3f94e5e75..000000000000 --- a/arch/powerpc/boot/dtc-src/.gitignore +++ /dev/null | |||
@@ -1,3 +0,0 @@ | |||
1 | dtc-lexer.lex.c | ||
2 | dtc-parser.tab.c | ||
3 | dtc-parser.tab.h | ||
diff --git a/arch/powerpc/boot/dts/bluestone.dts b/arch/powerpc/boot/dts/bluestone.dts new file mode 100644 index 000000000000..2a56a0dbd1f7 --- /dev/null +++ b/arch/powerpc/boot/dts/bluestone.dts | |||
@@ -0,0 +1,254 @@ | |||
1 | /* | ||
2 | * Device Tree for Bluestone (APM821xx) board. | ||
3 | * | ||
4 | * Copyright (c) 2010, Applied Micro Circuits Corporation | ||
5 | * Author: Tirumala R Marri <tmarri@apm.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License as | ||
9 | * published by the Free Software Foundation; either version 2 of | ||
10 | * the License, or (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
20 | * MA 02111-1307 USA | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | /dts-v1/; | ||
25 | |||
26 | / { | ||
27 | #address-cells = <2>; | ||
28 | #size-cells = <1>; | ||
29 | model = "apm,bluestone"; | ||
30 | compatible = "apm,bluestone"; | ||
31 | dcr-parent = <&{/cpus/cpu@0}>; | ||
32 | |||
33 | aliases { | ||
34 | ethernet0 = &EMAC0; | ||
35 | serial0 = &UART0; | ||
36 | //serial1 = &UART1; --gcl missing UART1 label | ||
37 | }; | ||
38 | |||
39 | cpus { | ||
40 | #address-cells = <1>; | ||
41 | #size-cells = <0>; | ||
42 | |||
43 | cpu@0 { | ||
44 | device_type = "cpu"; | ||
45 | model = "PowerPC,apm821xx"; | ||
46 | reg = <0x00000000>; | ||
47 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
48 | timebase-frequency = <0>; /* Filled in by U-Boot */ | ||
49 | i-cache-line-size = <32>; | ||
50 | d-cache-line-size = <32>; | ||
51 | i-cache-size = <32768>; | ||
52 | d-cache-size = <32768>; | ||
53 | dcr-controller; | ||
54 | dcr-access-method = "native"; | ||
55 | //next-level-cache = <&L2C0>; --gcl missing L2C0 label | ||
56 | }; | ||
57 | }; | ||
58 | |||
59 | memory { | ||
60 | device_type = "memory"; | ||
61 | reg = <0x00000000 0x00000000 0x00000000>; /* Filled in by U-Boot */ | ||
62 | }; | ||
63 | |||
64 | UIC0: interrupt-controller0 { | ||
65 | compatible = "ibm,uic"; | ||
66 | interrupt-controller; | ||
67 | cell-index = <0>; | ||
68 | dcr-reg = <0x0c0 0x009>; | ||
69 | #address-cells = <0>; | ||
70 | #size-cells = <0>; | ||
71 | #interrupt-cells = <2>; | ||
72 | }; | ||
73 | |||
74 | UIC1: interrupt-controller1 { | ||
75 | compatible = "ibm,uic"; | ||
76 | interrupt-controller; | ||
77 | cell-index = <1>; | ||
78 | dcr-reg = <0x0d0 0x009>; | ||
79 | #address-cells = <0>; | ||
80 | #size-cells = <0>; | ||
81 | #interrupt-cells = <2>; | ||
82 | interrupts = <0x1e 0x4 0x1f 0x4>; /* cascade */ | ||
83 | interrupt-parent = <&UIC0>; | ||
84 | }; | ||
85 | |||
86 | UIC2: interrupt-controller2 { | ||
87 | compatible = "ibm,uic"; | ||
88 | interrupt-controller; | ||
89 | cell-index = <2>; | ||
90 | dcr-reg = <0x0e0 0x009>; | ||
91 | #address-cells = <0>; | ||
92 | #size-cells = <0>; | ||
93 | #interrupt-cells = <2>; | ||
94 | interrupts = <0xa 0x4 0xb 0x4>; /* cascade */ | ||
95 | interrupt-parent = <&UIC0>; | ||
96 | }; | ||
97 | |||
98 | UIC3: interrupt-controller3 { | ||
99 | compatible = "ibm,uic"; | ||
100 | interrupt-controller; | ||
101 | cell-index = <3>; | ||
102 | dcr-reg = <0x0f0 0x009>; | ||
103 | #address-cells = <0>; | ||
104 | #size-cells = <0>; | ||
105 | #interrupt-cells = <2>; | ||
106 | interrupts = <0x10 0x4 0x11 0x4>; /* cascade */ | ||
107 | interrupt-parent = <&UIC0>; | ||
108 | }; | ||
109 | |||
110 | SDR0: sdr { | ||
111 | compatible = "ibm,sdr-apm821xx"; | ||
112 | dcr-reg = <0x00e 0x002>; | ||
113 | }; | ||
114 | |||
115 | CPR0: cpr { | ||
116 | compatible = "ibm,cpr-apm821xx"; | ||
117 | dcr-reg = <0x00c 0x002>; | ||
118 | }; | ||
119 | |||
120 | plb { | ||
121 | compatible = "ibm,plb4"; | ||
122 | #address-cells = <2>; | ||
123 | #size-cells = <1>; | ||
124 | ranges; | ||
125 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
126 | |||
127 | SDRAM0: sdram { | ||
128 | compatible = "ibm,sdram-apm821xx"; | ||
129 | dcr-reg = <0x010 0x002>; | ||
130 | }; | ||
131 | |||
132 | MAL0: mcmal { | ||
133 | compatible = "ibm,mcmal2"; | ||
134 | descriptor-memory = "ocm"; | ||
135 | dcr-reg = <0x180 0x062>; | ||
136 | num-tx-chans = <1>; | ||
137 | num-rx-chans = <1>; | ||
138 | #address-cells = <0>; | ||
139 | #size-cells = <0>; | ||
140 | interrupt-parent = <&UIC2>; | ||
141 | interrupts = < /*TXEOB*/ 0x6 0x4 | ||
142 | /*RXEOB*/ 0x7 0x4 | ||
143 | /*SERR*/ 0x3 0x4 | ||
144 | /*TXDE*/ 0x4 0x4 | ||
145 | /*RXDE*/ 0x5 0x4>; | ||
146 | }; | ||
147 | |||
148 | POB0: opb { | ||
149 | compatible = "ibm,opb"; | ||
150 | #address-cells = <1>; | ||
151 | #size-cells = <1>; | ||
152 | ranges = <0xb0000000 0x00000004 0xb0000000 0x50000000>; | ||
153 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
154 | |||
155 | EBC0: ebc { | ||
156 | compatible = "ibm,ebc"; | ||
157 | dcr-reg = <0x012 0x002>; | ||
158 | #address-cells = <2>; | ||
159 | #size-cells = <1>; | ||
160 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
161 | /* ranges property is supplied by U-Boot */ | ||
162 | ranges = < 0x00000003 0x00000000 0xe0000000 0x8000000>; | ||
163 | interrupts = <0x6 0x4>; | ||
164 | interrupt-parent = <&UIC1>; | ||
165 | |||
166 | nor_flash@0,0 { | ||
167 | compatible = "amd,s29gl512n", "cfi-flash"; | ||
168 | bank-width = <2>; | ||
169 | reg = <0x00000000 0x00000000 0x00400000>; | ||
170 | #address-cells = <1>; | ||
171 | #size-cells = <1>; | ||
172 | partition@0 { | ||
173 | label = "kernel"; | ||
174 | reg = <0x00000000 0x00180000>; | ||
175 | }; | ||
176 | partition@180000 { | ||
177 | label = "env"; | ||
178 | reg = <0x00180000 0x00020000>; | ||
179 | }; | ||
180 | partition@1a0000 { | ||
181 | label = "u-boot"; | ||
182 | reg = <0x001a0000 0x00060000>; | ||
183 | }; | ||
184 | }; | ||
185 | }; | ||
186 | |||
187 | UART0: serial@ef600300 { | ||
188 | device_type = "serial"; | ||
189 | compatible = "ns16550"; | ||
190 | reg = <0xef600300 0x00000008>; | ||
191 | virtual-reg = <0xef600300>; | ||
192 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
193 | current-speed = <0>; /* Filled in by U-Boot */ | ||
194 | interrupt-parent = <&UIC1>; | ||
195 | interrupts = <0x1 0x4>; | ||
196 | }; | ||
197 | |||
198 | IIC0: i2c@ef600700 { | ||
199 | compatible = "ibm,iic"; | ||
200 | reg = <0xef600700 0x00000014>; | ||
201 | interrupt-parent = <&UIC0>; | ||
202 | interrupts = <0x2 0x4>; | ||
203 | }; | ||
204 | |||
205 | IIC1: i2c@ef600800 { | ||
206 | compatible = "ibm,iic"; | ||
207 | reg = <0xef600800 0x00000014>; | ||
208 | interrupt-parent = <&UIC0>; | ||
209 | interrupts = <0x3 0x4>; | ||
210 | }; | ||
211 | |||
212 | RGMII0: emac-rgmii@ef601500 { | ||
213 | compatible = "ibm,rgmii"; | ||
214 | reg = <0xef601500 0x00000008>; | ||
215 | has-mdio; | ||
216 | }; | ||
217 | |||
218 | TAH0: emac-tah@ef601350 { | ||
219 | compatible = "ibm,tah"; | ||
220 | reg = <0xef601350 0x00000030>; | ||
221 | }; | ||
222 | |||
223 | EMAC0: ethernet@ef600c00 { | ||
224 | device_type = "network"; | ||
225 | compatible = "ibm,emac4sync"; | ||
226 | interrupt-parent = <&EMAC0>; | ||
227 | interrupts = <0x0 0x1>; | ||
228 | #interrupt-cells = <1>; | ||
229 | #address-cells = <0>; | ||
230 | #size-cells = <0>; | ||
231 | interrupt-map = </*Status*/ 0x0 &UIC2 0x10 0x4 | ||
232 | /*Wake*/ 0x1 &UIC2 0x14 0x4>; | ||
233 | reg = <0xef600c00 0x000000c4>; | ||
234 | local-mac-address = [000000000000]; /* Filled in by U-Boot */ | ||
235 | mal-device = <&MAL0>; | ||
236 | mal-tx-channel = <0>; | ||
237 | mal-rx-channel = <0>; | ||
238 | cell-index = <0>; | ||
239 | max-frame-size = <9000>; | ||
240 | rx-fifo-size = <16384>; | ||
241 | tx-fifo-size = <2048>; | ||
242 | phy-mode = "rgmii"; | ||
243 | phy-map = <0x00000000>; | ||
244 | rgmii-device = <&RGMII0>; | ||
245 | rgmii-channel = <0>; | ||
246 | tah-device = <&TAH0>; | ||
247 | tah-channel = <0>; | ||
248 | has-inverted-stacr-oc; | ||
249 | has-new-stacr-staopc; | ||
250 | }; | ||
251 | }; | ||
252 | |||
253 | }; | ||
254 | }; | ||
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts index a30370396250..22dd6ae84da0 100644 --- a/arch/powerpc/boot/dts/canyonlands.dts +++ b/arch/powerpc/boot/dts/canyonlands.dts | |||
@@ -105,6 +105,15 @@ | |||
105 | dcr-reg = <0x00c 0x002>; | 105 | dcr-reg = <0x00c 0x002>; |
106 | }; | 106 | }; |
107 | 107 | ||
108 | CPM0: cpm { | ||
109 | compatible = "ibm,cpm"; | ||
110 | dcr-access-method = "native"; | ||
111 | dcr-reg = <0x160 0x003>; | ||
112 | unused-units = <0x00000100>; | ||
113 | idle-doze = <0x02000000>; | ||
114 | standby = <0xfeff791d>; | ||
115 | }; | ||
116 | |||
108 | L2C0: l2c { | 117 | L2C0: l2c { |
109 | compatible = "ibm,l2-cache-460ex", "ibm,l2-cache"; | 118 | compatible = "ibm,l2-cache-460ex", "ibm,l2-cache"; |
110 | dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */ | 119 | dcr-reg = <0x020 0x008 /* Internal SRAM DCR's */ |
@@ -163,6 +172,19 @@ | |||
163 | interrupts = <0x1e 4>; | 172 | interrupts = <0x1e 4>; |
164 | }; | 173 | }; |
165 | 174 | ||
175 | USBOTG0: usbotg@bff80000 { | ||
176 | compatible = "amcc,dwc-otg"; | ||
177 | reg = <0x4 0xbff80000 0x10000>; | ||
178 | interrupt-parent = <&USBOTG0>; | ||
179 | #interrupt-cells = <1>; | ||
180 | #address-cells = <0>; | ||
181 | #size-cells = <0>; | ||
182 | interrupts = <0x0 0x1 0x2>; | ||
183 | interrupt-map = </* USB-OTG */ 0x0 &UIC2 0x1c 0x4 | ||
184 | /* HIGH-POWER */ 0x1 &UIC1 0x1a 0x8 | ||
185 | /* DMA */ 0x2 &UIC0 0xc 0x4>; | ||
186 | }; | ||
187 | |||
166 | SATA0: sata@bffd1000 { | 188 | SATA0: sata@bffd1000 { |
167 | compatible = "amcc,sata-460ex"; | 189 | compatible = "amcc,sata-460ex"; |
168 | reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>; | 190 | reg = <4 0xbffd1000 0x800 4 0xbffd0800 0x400>; |
@@ -224,6 +246,11 @@ | |||
224 | }; | 246 | }; |
225 | }; | 247 | }; |
226 | 248 | ||
249 | cpld@2,0 { | ||
250 | compatible = "amcc,ppc460ex-bcsr"; | ||
251 | reg = <2 0x0 0x9>; | ||
252 | }; | ||
253 | |||
227 | ndfc@3,0 { | 254 | ndfc@3,0 { |
228 | compatible = "ibm,ndfc"; | 255 | compatible = "ibm,ndfc"; |
229 | reg = <0x00000003 0x00000000 0x00002000>; | 256 | reg = <0x00000003 0x00000000 0x00002000>; |
@@ -270,28 +297,6 @@ | |||
270 | interrupts = <0x1 0x4>; | 297 | interrupts = <0x1 0x4>; |
271 | }; | 298 | }; |
272 | 299 | ||
273 | UART2: serial@ef600500 { | ||
274 | device_type = "serial"; | ||
275 | compatible = "ns16550"; | ||
276 | reg = <0xef600500 0x00000008>; | ||
277 | virtual-reg = <0xef600500>; | ||
278 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
279 | current-speed = <0>; /* Filled in by U-Boot */ | ||
280 | interrupt-parent = <&UIC1>; | ||
281 | interrupts = <28 0x4>; | ||
282 | }; | ||
283 | |||
284 | UART3: serial@ef600600 { | ||
285 | device_type = "serial"; | ||
286 | compatible = "ns16550"; | ||
287 | reg = <0xef600600 0x00000008>; | ||
288 | virtual-reg = <0xef600600>; | ||
289 | clock-frequency = <0>; /* Filled in by U-Boot */ | ||
290 | current-speed = <0>; /* Filled in by U-Boot */ | ||
291 | interrupt-parent = <&UIC1>; | ||
292 | interrupts = <29 0x4>; | ||
293 | }; | ||
294 | |||
295 | IIC0: i2c@ef600700 { | 300 | IIC0: i2c@ef600700 { |
296 | compatible = "ibm,iic-460ex", "ibm,iic"; | 301 | compatible = "ibm,iic-460ex", "ibm,iic"; |
297 | reg = <0xef600700 0x00000014>; | 302 | reg = <0xef600700 0x00000014>; |
@@ -320,6 +325,12 @@ | |||
320 | interrupts = <0x3 0x4>; | 325 | interrupts = <0x3 0x4>; |
321 | }; | 326 | }; |
322 | 327 | ||
328 | GPIO0: gpio@ef600b00 { | ||
329 | compatible = "ibm,ppc4xx-gpio"; | ||
330 | reg = <0xef600b00 0x00000048>; | ||
331 | gpio-controller; | ||
332 | }; | ||
333 | |||
323 | ZMII0: emac-zmii@ef600d00 { | 334 | ZMII0: emac-zmii@ef600d00 { |
324 | compatible = "ibm,zmii-460ex", "ibm,zmii"; | 335 | compatible = "ibm,zmii-460ex", "ibm,zmii"; |
325 | reg = <0xef600d00 0x0000000c>; | 336 | reg = <0xef600d00 0x0000000c>; |
@@ -519,5 +530,23 @@ | |||
519 | 0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */ | 530 | 0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */ |
520 | 0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>; | 531 | 0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>; |
521 | }; | 532 | }; |
533 | |||
534 | MSI: ppc4xx-msi@C10000000 { | ||
535 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
536 | reg = < 0xC 0x10000000 0x100>; | ||
537 | sdr-base = <0x36C>; | ||
538 | msi-data = <0x00000000>; | ||
539 | msi-mask = <0x44440000>; | ||
540 | interrupt-count = <3>; | ||
541 | interrupts = <0 1 2 3>; | ||
542 | interrupt-parent = <&UIC3>; | ||
543 | #interrupt-cells = <1>; | ||
544 | #address-cells = <0>; | ||
545 | #size-cells = <0>; | ||
546 | interrupt-map = <0 &UIC3 0x18 1 | ||
547 | 1 &UIC3 0x19 1 | ||
548 | 2 &UIC3 0x1A 1 | ||
549 | 3 &UIC3 0x1B 1>; | ||
550 | }; | ||
522 | }; | 551 | }; |
523 | }; | 552 | }; |
diff --git a/arch/powerpc/boot/dts/cm5200.dts b/arch/powerpc/boot/dts/cm5200.dts index dd3860846f15..ad3a4f4a2b04 100644 --- a/arch/powerpc/boot/dts/cm5200.dts +++ b/arch/powerpc/boot/dts/cm5200.dts | |||
@@ -10,220 +10,74 @@ | |||
10 | * option) any later version. | 10 | * option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | /dts-v1/; | 13 | /include/ "mpc5200b.dtsi" |
14 | 14 | ||
15 | / { | 15 | / { |
16 | model = "schindler,cm5200"; | 16 | model = "schindler,cm5200"; |
17 | compatible = "schindler,cm5200"; | 17 | compatible = "schindler,cm5200"; |
18 | #address-cells = <1>; | ||
19 | #size-cells = <1>; | ||
20 | interrupt-parent = <&mpc5200_pic>; | ||
21 | |||
22 | cpus { | ||
23 | #address-cells = <1>; | ||
24 | #size-cells = <0>; | ||
25 | |||
26 | PowerPC,5200@0 { | ||
27 | device_type = "cpu"; | ||
28 | reg = <0>; | ||
29 | d-cache-line-size = <32>; | ||
30 | i-cache-line-size = <32>; | ||
31 | d-cache-size = <0x4000>; // L1, 16K | ||
32 | i-cache-size = <0x4000>; // L1, 16K | ||
33 | timebase-frequency = <0>; // from bootloader | ||
34 | bus-frequency = <0>; // from bootloader | ||
35 | clock-frequency = <0>; // from bootloader | ||
36 | }; | ||
37 | }; | ||
38 | |||
39 | memory { | ||
40 | device_type = "memory"; | ||
41 | reg = <0x00000000 0x04000000>; // 64MB | ||
42 | }; | ||
43 | 18 | ||
44 | soc5200@f0000000 { | 19 | soc5200@f0000000 { |
45 | #address-cells = <1>; | ||
46 | #size-cells = <1>; | ||
47 | compatible = "fsl,mpc5200b-immr"; | ||
48 | ranges = <0 0xf0000000 0x0000c000>; | ||
49 | reg = <0xf0000000 0x00000100>; | ||
50 | bus-frequency = <0>; // from bootloader | ||
51 | system-frequency = <0>; // from bootloader | ||
52 | |||
53 | cdm@200 { | ||
54 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
55 | reg = <0x200 0x38>; | ||
56 | }; | ||
57 | |||
58 | mpc5200_pic: interrupt-controller@500 { | ||
59 | // 5200 interrupts are encoded into two levels; | ||
60 | interrupt-controller; | ||
61 | #interrupt-cells = <3>; | ||
62 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
63 | reg = <0x500 0x80>; | ||
64 | }; | ||
65 | |||
66 | timer@600 { // General Purpose Timer | 20 | timer@600 { // General Purpose Timer |
67 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
68 | reg = <0x600 0x10>; | ||
69 | interrupts = <1 9 0>; | ||
70 | fsl,has-wdt; | 21 | fsl,has-wdt; |
71 | }; | 22 | }; |
72 | 23 | ||
73 | timer@610 { // General Purpose Timer | 24 | can@900 { |
74 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | 25 | status = "disabled"; |
75 | reg = <0x610 0x10>; | ||
76 | interrupts = <1 10 0>; | ||
77 | }; | ||
78 | |||
79 | timer@620 { // General Purpose Timer | ||
80 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
81 | reg = <0x620 0x10>; | ||
82 | interrupts = <1 11 0>; | ||
83 | }; | ||
84 | |||
85 | timer@630 { // General Purpose Timer | ||
86 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
87 | reg = <0x630 0x10>; | ||
88 | interrupts = <1 12 0>; | ||
89 | }; | ||
90 | |||
91 | timer@640 { // General Purpose Timer | ||
92 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
93 | reg = <0x640 0x10>; | ||
94 | interrupts = <1 13 0>; | ||
95 | }; | ||
96 | |||
97 | timer@650 { // General Purpose Timer | ||
98 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
99 | reg = <0x650 0x10>; | ||
100 | interrupts = <1 14 0>; | ||
101 | }; | ||
102 | |||
103 | timer@660 { // General Purpose Timer | ||
104 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
105 | reg = <0x660 0x10>; | ||
106 | interrupts = <1 15 0>; | ||
107 | }; | ||
108 | |||
109 | timer@670 { // General Purpose Timer | ||
110 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
111 | reg = <0x670 0x10>; | ||
112 | interrupts = <1 16 0>; | ||
113 | }; | 26 | }; |
114 | 27 | ||
115 | rtc@800 { // Real time clock | 28 | can@980 { |
116 | compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; | 29 | status = "disabled"; |
117 | reg = <0x800 0x100>; | ||
118 | interrupts = <1 5 0 1 6 0>; | ||
119 | }; | 30 | }; |
120 | 31 | ||
121 | gpio_simple: gpio@b00 { | 32 | psc@2000 { // PSC1 |
122 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | 33 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
123 | reg = <0xb00 0x40>; | ||
124 | interrupts = <1 7 0>; | ||
125 | gpio-controller; | ||
126 | #gpio-cells = <2>; | ||
127 | }; | ||
128 | |||
129 | gpio_wkup: gpio@c00 { | ||
130 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | ||
131 | reg = <0xc00 0x40>; | ||
132 | interrupts = <1 8 0 0 3 0>; | ||
133 | gpio-controller; | ||
134 | #gpio-cells = <2>; | ||
135 | }; | ||
136 | |||
137 | spi@f00 { | ||
138 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | ||
139 | reg = <0xf00 0x20>; | ||
140 | interrupts = <2 13 0 2 14 0>; | ||
141 | }; | ||
142 | |||
143 | usb@1000 { | ||
144 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | ||
145 | reg = <0x1000 0xff>; | ||
146 | interrupts = <2 6 0>; | ||
147 | }; | ||
148 | |||
149 | dma-controller@1200 { | ||
150 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | ||
151 | reg = <0x1200 0x80>; | ||
152 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
153 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
154 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
155 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
156 | }; | 34 | }; |
157 | 35 | ||
158 | xlb@1f00 { | 36 | psc@2200 { // PSC2 |
159 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | 37 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
160 | reg = <0x1f00 0x100>; | ||
161 | }; | 38 | }; |
162 | 39 | ||
163 | serial@2000 { // PSC1 | 40 | psc@2400 { // PSC3 |
164 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 41 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
165 | reg = <0x2000 0x100>; | ||
166 | interrupts = <2 1 0>; | ||
167 | }; | 42 | }; |
168 | 43 | ||
169 | serial@2200 { // PSC2 | 44 | psc@2600 { // PSC4 |
170 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 45 | status = "disabled"; |
171 | reg = <0x2200 0x100>; | ||
172 | interrupts = <2 2 0>; | ||
173 | }; | 46 | }; |
174 | 47 | ||
175 | serial@2400 { // PSC3 | 48 | psc@2800 { // PSC5 |
176 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 49 | status = "disabled"; |
177 | reg = <0x2400 0x100>; | ||
178 | interrupts = <2 3 0>; | ||
179 | }; | 50 | }; |
180 | 51 | ||
181 | serial@2c00 { // PSC6 | 52 | psc@2c00 { // PSC6 |
182 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 53 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
183 | reg = <0x2c00 0x100>; | ||
184 | interrupts = <2 4 0>; | ||
185 | }; | 54 | }; |
186 | 55 | ||
187 | ethernet@3000 { | 56 | ethernet@3000 { |
188 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
189 | reg = <0x3000 0x400>; | ||
190 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
191 | interrupts = <2 5 0>; | ||
192 | phy-handle = <&phy0>; | 57 | phy-handle = <&phy0>; |
193 | }; | 58 | }; |
194 | 59 | ||
195 | mdio@3000 { | 60 | mdio@3000 { |
196 | #address-cells = <1>; | ||
197 | #size-cells = <0>; | ||
198 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
199 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
200 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
201 | |||
202 | phy0: ethernet-phy@0 { | 61 | phy0: ethernet-phy@0 { |
203 | reg = <0>; | 62 | reg = <0>; |
204 | }; | 63 | }; |
205 | }; | 64 | }; |
206 | 65 | ||
207 | i2c@3d40 { | 66 | ata@3a00 { |
208 | #address-cells = <1>; | 67 | status = "disabled"; |
209 | #size-cells = <0>; | ||
210 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
211 | reg = <0x3d40 0x40>; | ||
212 | interrupts = <2 16 0>; | ||
213 | }; | 68 | }; |
214 | 69 | ||
215 | sram@8000 { | 70 | i2c@3d00 { |
216 | compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram"; | 71 | status = "disabled"; |
217 | reg = <0x8000 0x4000>; | ||
218 | }; | 72 | }; |
73 | |||
219 | }; | 74 | }; |
220 | 75 | ||
221 | localbus { | 76 | pci@f0000d00 { |
222 | compatible = "fsl,mpc5200b-lpb","simple-bus"; | 77 | status = "disabled"; |
223 | #address-cells = <2>; | 78 | }; |
224 | #size-cells = <1>; | ||
225 | ranges = <0 0 0xfc000000 0x2000000>; | ||
226 | 79 | ||
80 | localbus { | ||
227 | // 16-bit flash device at LocalPlus Bus CS0 | 81 | // 16-bit flash device at LocalPlus Bus CS0 |
228 | flash@0,0 { | 82 | flash@0,0 { |
229 | compatible = "cfi-flash"; | 83 | compatible = "cfi-flash"; |
diff --git a/arch/powerpc/boot/dts/digsy_mtc.dts b/arch/powerpc/boot/dts/digsy_mtc.dts index 8e9be6bfe23e..27bd267d631c 100644 --- a/arch/powerpc/boot/dts/digsy_mtc.dts +++ b/arch/powerpc/boot/dts/digsy_mtc.dts | |||
@@ -11,195 +11,68 @@ | |||
11 | * option) any later version. | 11 | * option) any later version. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | /dts-v1/; | 14 | /include/ "mpc5200b.dtsi" |
15 | 15 | ||
16 | / { | 16 | / { |
17 | model = "intercontrol,digsy-mtc"; | 17 | model = "intercontrol,digsy-mtc"; |
18 | compatible = "intercontrol,digsy-mtc"; | 18 | compatible = "intercontrol,digsy-mtc"; |
19 | #address-cells = <1>; | ||
20 | #size-cells = <1>; | ||
21 | interrupt-parent = <&mpc5200_pic>; | ||
22 | |||
23 | cpus { | ||
24 | #address-cells = <1>; | ||
25 | #size-cells = <0>; | ||
26 | |||
27 | PowerPC,5200@0 { | ||
28 | device_type = "cpu"; | ||
29 | reg = <0>; | ||
30 | d-cache-line-size = <32>; | ||
31 | i-cache-line-size = <32>; | ||
32 | d-cache-size = <0x4000>; // L1, 16K | ||
33 | i-cache-size = <0x4000>; // L1, 16K | ||
34 | timebase-frequency = <0>; // from bootloader | ||
35 | bus-frequency = <0>; // from bootloader | ||
36 | clock-frequency = <0>; // from bootloader | ||
37 | }; | ||
38 | }; | ||
39 | 19 | ||
40 | memory { | 20 | memory { |
41 | device_type = "memory"; | ||
42 | reg = <0x00000000 0x02000000>; // 32MB | 21 | reg = <0x00000000 0x02000000>; // 32MB |
43 | }; | 22 | }; |
44 | 23 | ||
45 | soc5200@f0000000 { | 24 | soc5200@f0000000 { |
46 | #address-cells = <1>; | ||
47 | #size-cells = <1>; | ||
48 | compatible = "fsl,mpc5200b-immr"; | ||
49 | ranges = <0 0xf0000000 0x0000c000>; | ||
50 | reg = <0xf0000000 0x00000100>; | ||
51 | bus-frequency = <0>; // from bootloader | ||
52 | system-frequency = <0>; // from bootloader | ||
53 | |||
54 | cdm@200 { | ||
55 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
56 | reg = <0x200 0x38>; | ||
57 | }; | ||
58 | |||
59 | mpc5200_pic: interrupt-controller@500 { | ||
60 | // 5200 interrupts are encoded into two levels; | ||
61 | interrupt-controller; | ||
62 | #interrupt-cells = <3>; | ||
63 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
64 | reg = <0x500 0x80>; | ||
65 | }; | ||
66 | |||
67 | timer@600 { // General Purpose Timer | 25 | timer@600 { // General Purpose Timer |
68 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
69 | reg = <0x600 0x10>; | ||
70 | interrupts = <1 9 0>; | ||
71 | fsl,has-wdt; | 26 | fsl,has-wdt; |
72 | }; | 27 | }; |
73 | 28 | ||
74 | timer@610 { // General Purpose Timer | 29 | rtc@800 { |
75 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | 30 | status = "disabled"; |
76 | reg = <0x610 0x10>; | ||
77 | interrupts = <1 10 0>; | ||
78 | }; | ||
79 | |||
80 | timer@620 { // General Purpose Timer | ||
81 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
82 | reg = <0x620 0x10>; | ||
83 | interrupts = <1 11 0>; | ||
84 | }; | ||
85 | |||
86 | timer@630 { // General Purpose Timer | ||
87 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
88 | reg = <0x630 0x10>; | ||
89 | interrupts = <1 12 0>; | ||
90 | }; | ||
91 | |||
92 | timer@640 { // General Purpose Timer | ||
93 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
94 | reg = <0x640 0x10>; | ||
95 | interrupts = <1 13 0>; | ||
96 | }; | ||
97 | |||
98 | timer@650 { // General Purpose Timer | ||
99 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
100 | reg = <0x650 0x10>; | ||
101 | interrupts = <1 14 0>; | ||
102 | }; | 31 | }; |
103 | 32 | ||
104 | timer@660 { // General Purpose Timer | 33 | can@900 { |
105 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | 34 | status = "disabled"; |
106 | reg = <0x660 0x10>; | ||
107 | interrupts = <1 15 0>; | ||
108 | }; | 35 | }; |
109 | 36 | ||
110 | timer@670 { // General Purpose Timer | 37 | can@980 { |
111 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | 38 | status = "disabled"; |
112 | reg = <0x670 0x10>; | ||
113 | interrupts = <1 16 0>; | ||
114 | }; | 39 | }; |
115 | 40 | ||
116 | gpio_simple: gpio@b00 { | 41 | psc@2000 { // PSC1 |
117 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | 42 | status = "disabled"; |
118 | reg = <0xb00 0x40>; | ||
119 | interrupts = <1 7 0>; | ||
120 | gpio-controller; | ||
121 | #gpio-cells = <2>; | ||
122 | }; | 43 | }; |
123 | 44 | ||
124 | gpio_wkup: gpio@c00 { | 45 | psc@2200 { // PSC2 |
125 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | 46 | status = "disabled"; |
126 | reg = <0xc00 0x40>; | ||
127 | interrupts = <1 8 0 0 3 0>; | ||
128 | gpio-controller; | ||
129 | #gpio-cells = <2>; | ||
130 | }; | 47 | }; |
131 | 48 | ||
132 | spi@f00 { | 49 | psc@2400 { // PSC3 |
133 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | 50 | status = "disabled"; |
134 | reg = <0xf00 0x20>; | ||
135 | interrupts = <2 13 0 2 14 0>; | ||
136 | }; | 51 | }; |
137 | 52 | ||
138 | usb@1000 { | 53 | psc@2600 { // PSC4 |
139 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | 54 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
140 | reg = <0x1000 0xff>; | ||
141 | interrupts = <2 6 0>; | ||
142 | }; | ||
143 | |||
144 | dma-controller@1200 { | ||
145 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | ||
146 | reg = <0x1200 0x80>; | ||
147 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
148 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
149 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
150 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
151 | }; | ||
152 | |||
153 | xlb@1f00 { | ||
154 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | ||
155 | reg = <0x1f00 0x100>; | ||
156 | }; | 55 | }; |
157 | 56 | ||
158 | serial@2600 { // PSC4 | 57 | psc@2800 { // PSC5 |
159 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 58 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
160 | reg = <0x2600 0x100>; | ||
161 | interrupts = <2 11 0>; | ||
162 | }; | 59 | }; |
163 | 60 | ||
164 | serial@2800 { // PSC5 | 61 | psc@2c00 { // PSC6 |
165 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 62 | status = "disabled"; |
166 | reg = <0x2800 0x100>; | ||
167 | interrupts = <2 12 0>; | ||
168 | }; | 63 | }; |
169 | 64 | ||
170 | ethernet@3000 { | 65 | ethernet@3000 { |
171 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
172 | reg = <0x3000 0x400>; | ||
173 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
174 | interrupts = <2 5 0>; | ||
175 | phy-handle = <&phy0>; | 66 | phy-handle = <&phy0>; |
176 | }; | 67 | }; |
177 | 68 | ||
178 | mdio@3000 { | 69 | mdio@3000 { |
179 | #address-cells = <1>; | ||
180 | #size-cells = <0>; | ||
181 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
182 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
183 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
184 | |||
185 | phy0: ethernet-phy@0 { | 70 | phy0: ethernet-phy@0 { |
186 | reg = <0>; | 71 | reg = <0>; |
187 | }; | 72 | }; |
188 | }; | 73 | }; |
189 | 74 | ||
190 | ata@3a00 { | ||
191 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | ||
192 | reg = <0x3a00 0x100>; | ||
193 | interrupts = <2 7 0>; | ||
194 | }; | ||
195 | |||
196 | i2c@3d00 { | 75 | i2c@3d00 { |
197 | #address-cells = <1>; | ||
198 | #size-cells = <0>; | ||
199 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
200 | reg = <0x3d00 0x40>; | ||
201 | interrupts = <2 15 0>; | ||
202 | |||
203 | rtc@50 { | 76 | rtc@50 { |
204 | compatible = "at,24c08"; | 77 | compatible = "at,24c08"; |
205 | reg = <0x50>; | 78 | reg = <0x50>; |
@@ -211,16 +84,16 @@ | |||
211 | }; | 84 | }; |
212 | }; | 85 | }; |
213 | 86 | ||
214 | sram@8000 { | 87 | i2c@3d40 { |
215 | compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram"; | 88 | status = "disabled"; |
216 | reg = <0x8000 0x4000>; | ||
217 | }; | 89 | }; |
218 | }; | 90 | }; |
219 | 91 | ||
220 | lpb { | 92 | pci@f0000d00 { |
221 | compatible = "fsl,mpc5200b-lpb","simple-bus"; | 93 | status = "disabled"; |
222 | #address-cells = <2>; | 94 | }; |
223 | #size-cells = <1>; | 95 | |
96 | localbus { | ||
224 | ranges = <0 0 0xff000000 0x1000000>; | 97 | ranges = <0 0 0xff000000 0x1000000>; |
225 | 98 | ||
226 | // 16-bit flash device at LocalPlus Bus CS0 | 99 | // 16-bit flash device at LocalPlus Bus CS0 |
diff --git a/arch/powerpc/boot/dts/hotfoot.dts b/arch/powerpc/boot/dts/hotfoot.dts index cad9c3840afc..71d3bb4931dc 100644 --- a/arch/powerpc/boot/dts/hotfoot.dts +++ b/arch/powerpc/boot/dts/hotfoot.dts | |||
@@ -117,6 +117,8 @@ | |||
117 | }; | 117 | }; |
118 | 118 | ||
119 | IIC: i2c@ef600500 { | 119 | IIC: i2c@ef600500 { |
120 | #address-cells = <1>; | ||
121 | #size-cells = <0>; | ||
120 | compatible = "ibm,iic-405ep", "ibm,iic"; | 122 | compatible = "ibm,iic-405ep", "ibm,iic"; |
121 | reg = <0xef600500 0x00000011>; | 123 | reg = <0xef600500 0x00000011>; |
122 | interrupt-parent = <&UIC0>; | 124 | interrupt-parent = <&UIC0>; |
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts index 7c3be5e45748..f913dbe25d35 100644 --- a/arch/powerpc/boot/dts/katmai.dts +++ b/arch/powerpc/boot/dts/katmai.dts | |||
@@ -442,6 +442,24 @@ | |||
442 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; | 442 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; |
443 | }; | 443 | }; |
444 | 444 | ||
445 | MSI: ppc4xx-msi@400300000 { | ||
446 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
447 | reg = < 0x4 0x00300000 0x100>; | ||
448 | sdr-base = <0x3B0>; | ||
449 | msi-data = <0x00000000>; | ||
450 | msi-mask = <0x44440000>; | ||
451 | interrupt-count = <3>; | ||
452 | interrupts =<0 1 2 3>; | ||
453 | interrupt-parent = <&UIC0>; | ||
454 | #interrupt-cells = <1>; | ||
455 | #address-cells = <0>; | ||
456 | #size-cells = <0>; | ||
457 | interrupt-map = <0 &UIC0 0xC 1 | ||
458 | 1 &UIC0 0x0D 1 | ||
459 | 2 &UIC0 0x0E 1 | ||
460 | 3 &UIC0 0x0F 1>; | ||
461 | }; | ||
462 | |||
445 | I2O: i2o@400100000 { | 463 | I2O: i2o@400100000 { |
446 | compatible = "ibm,i2o-440spe"; | 464 | compatible = "ibm,i2o-440spe"; |
447 | reg = <0x00000004 0x00100000 0x100>; | 465 | reg = <0x00000004 0x00100000 0x100>; |
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts index 083e68eeaca4..1613d6e4049e 100644 --- a/arch/powerpc/boot/dts/kilauea.dts +++ b/arch/powerpc/boot/dts/kilauea.dts | |||
@@ -82,6 +82,15 @@ | |||
82 | interrupt-parent = <&UIC0>; | 82 | interrupt-parent = <&UIC0>; |
83 | }; | 83 | }; |
84 | 84 | ||
85 | CPM0: cpm { | ||
86 | compatible = "ibm,cpm"; | ||
87 | dcr-access-method = "native"; | ||
88 | dcr-reg = <0x0b0 0x003>; | ||
89 | unused-units = <0x00000000>; | ||
90 | idle-doze = <0x02000000>; | ||
91 | standby = <0xe3e74800>; | ||
92 | }; | ||
93 | |||
85 | plb { | 94 | plb { |
86 | compatible = "ibm,plb-405ex", "ibm,plb4"; | 95 | compatible = "ibm,plb-405ex", "ibm,plb4"; |
87 | #address-cells = <1>; | 96 | #address-cells = <1>; |
@@ -394,5 +403,33 @@ | |||
394 | 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */ | 403 | 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */ |
395 | 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>; | 404 | 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>; |
396 | }; | 405 | }; |
406 | |||
407 | MSI: ppc4xx-msi@C10000000 { | ||
408 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
409 | reg = < 0x0 0xEF620000 0x100>; | ||
410 | sdr-base = <0x4B0>; | ||
411 | msi-data = <0x00000000>; | ||
412 | msi-mask = <0x44440000>; | ||
413 | interrupt-count = <12>; | ||
414 | interrupts = <0 1 2 3 4 5 6 7 8 9 0xA 0xB 0xC 0xD>; | ||
415 | interrupt-parent = <&UIC2>; | ||
416 | #interrupt-cells = <1>; | ||
417 | #address-cells = <0>; | ||
418 | #size-cells = <0>; | ||
419 | interrupt-map = <0 &UIC2 0x10 1 | ||
420 | 1 &UIC2 0x11 1 | ||
421 | 2 &UIC2 0x12 1 | ||
422 | 2 &UIC2 0x13 1 | ||
423 | 2 &UIC2 0x14 1 | ||
424 | 2 &UIC2 0x15 1 | ||
425 | 2 &UIC2 0x16 1 | ||
426 | 2 &UIC2 0x17 1 | ||
427 | 2 &UIC2 0x18 1 | ||
428 | 2 &UIC2 0x19 1 | ||
429 | 2 &UIC2 0x1A 1 | ||
430 | 2 &UIC2 0x1B 1 | ||
431 | 2 &UIC2 0x1C 1 | ||
432 | 3 &UIC2 0x1D 1>; | ||
433 | }; | ||
397 | }; | 434 | }; |
398 | }; | 435 | }; |
diff --git a/arch/powerpc/boot/dts/kmeter1.dts b/arch/powerpc/boot/dts/kmeter1.dts index d8b5d12fb663..d16bae1230f7 100644 --- a/arch/powerpc/boot/dts/kmeter1.dts +++ b/arch/powerpc/boot/dts/kmeter1.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Keymile KMETER1 Device Tree Source | 2 | * Keymile KMETER1 Device Tree Source |
3 | * | 3 | * |
4 | * 2008 DENX Software Engineering GmbH | 4 | * 2008-2011 DENX Software Engineering GmbH |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
@@ -70,11 +70,11 @@ | |||
70 | #address-cells = <1>; | 70 | #address-cells = <1>; |
71 | #size-cells = <0>; | 71 | #size-cells = <0>; |
72 | cell-index = <0>; | 72 | cell-index = <0>; |
73 | compatible = "fsl-i2c"; | 73 | compatible = "fsl,mpc8313-i2c","fsl-i2c"; |
74 | reg = <0x3000 0x100>; | 74 | reg = <0x3000 0x100>; |
75 | interrupts = <14 0x8>; | 75 | interrupts = <14 0x8>; |
76 | interrupt-parent = <&ipic>; | 76 | interrupt-parent = <&ipic>; |
77 | dfsrr; | 77 | clock-frequency = <400000>; |
78 | }; | 78 | }; |
79 | 79 | ||
80 | serial0: serial@4500 { | 80 | serial0: serial@4500 { |
@@ -137,6 +137,13 @@ | |||
137 | compatible = "fsl,mpc8360-par_io"; | 137 | compatible = "fsl,mpc8360-par_io"; |
138 | num-ports = <7>; | 138 | num-ports = <7>; |
139 | 139 | ||
140 | qe_pio_c: gpio-controller@30 { | ||
141 | #gpio-cells = <2>; | ||
142 | compatible = "fsl,mpc8360-qe-pario-bank", | ||
143 | "fsl,mpc8323-qe-pario-bank"; | ||
144 | reg = <0x1430 0x18>; | ||
145 | gpio-controller; | ||
146 | }; | ||
140 | pio_ucc1: ucc_pin@0 { | 147 | pio_ucc1: ucc_pin@0 { |
141 | reg = <0>; | 148 | reg = <0>; |
142 | 149 | ||
@@ -472,7 +479,17 @@ | |||
472 | #address-cells = <0>; | 479 | #address-cells = <0>; |
473 | #interrupt-cells = <1>; | 480 | #interrupt-cells = <1>; |
474 | reg = <0x80 0x80>; | 481 | reg = <0x80 0x80>; |
475 | interrupts = <32 8 33 8>; | 482 | big-endian; |
483 | interrupts = < | ||
484 | 32 0x8 | ||
485 | 33 0x8 | ||
486 | 34 0x8 | ||
487 | 35 0x8 | ||
488 | 40 0x8 | ||
489 | 41 0x8 | ||
490 | 42 0x8 | ||
491 | 43 0x8 | ||
492 | >; | ||
476 | interrupt-parent = <&ipic>; | 493 | interrupt-parent = <&ipic>; |
477 | }; | 494 | }; |
478 | }; | 495 | }; |
@@ -484,43 +501,31 @@ | |||
484 | compatible = "fsl,mpc8360-localbus", "fsl,pq2pro-localbus", | 501 | compatible = "fsl,mpc8360-localbus", "fsl,pq2pro-localbus", |
485 | "simple-bus"; | 502 | "simple-bus"; |
486 | reg = <0xe0005000 0xd8>; | 503 | reg = <0xe0005000 0xd8>; |
487 | ranges = <0 0 0xf0000000 0x04000000>; /* Filled in by U-Boot */ | 504 | ranges = <0 0 0xf0000000 0x04000000 /* LB 0 */ |
505 | 1 0 0xe8000000 0x01000000 /* LB 1 */ | ||
506 | 3 0 0xa0000000 0x10000000>; /* LB 3 */ | ||
488 | 507 | ||
489 | flash@f0000000,0 { | 508 | flash@0,0 { |
490 | compatible = "cfi-flash"; | 509 | compatible = "cfi-flash"; |
491 | /* | 510 | reg = <0 0 0x04000000>; |
492 | * The Intel P30 chip has 2 non-identical chips on | ||
493 | * one die, so we need to define 2 separate regions | ||
494 | * that are scanned by physmap_of independantly. | ||
495 | */ | ||
496 | reg = <0 0x00000000 0x02000000 | ||
497 | 0 0x02000000 0x02000000>; /* Filled in by U-Boot */ | ||
498 | bank-width = <2>; | ||
499 | #address-cells = <1>; | 511 | #address-cells = <1>; |
500 | #size-cells = <1>; | 512 | #size-cells = <1>; |
501 | partition@0 { | 513 | bank-width = <2>; |
514 | partition@0 { /* 768KB */ | ||
502 | label = "u-boot"; | 515 | label = "u-boot"; |
503 | reg = <0 0x40000>; | 516 | reg = <0 0xC0000>; |
504 | }; | 517 | }; |
505 | partition@40000 { | 518 | partition@c0000 { /* 128KB */ |
506 | label = "env"; | 519 | label = "env"; |
507 | reg = <0x40000 0x40000>; | 520 | reg = <0xC0000 0x20000>; |
508 | }; | ||
509 | partition@80000 { | ||
510 | label = "dtb"; | ||
511 | reg = <0x80000 0x20000>; | ||
512 | }; | ||
513 | partition@a0000 { | ||
514 | label = "kernel"; | ||
515 | reg = <0xa0000 0x300000>; | ||
516 | }; | 521 | }; |
517 | partition@3a0000 { | 522 | partition@e0000 { /* 128KB */ |
518 | label = "ramdisk"; | 523 | label = "envred"; |
519 | reg = <0x3a0000 0x800000>; | 524 | reg = <0xE0000 0x20000>; |
520 | }; | 525 | }; |
521 | partition@ba0000 { | 526 | partition@100000 { /* 64512KB */ |
522 | label = "user"; | 527 | label = "ubi0"; |
523 | reg = <0xba0000 0x3460000>; | 528 | reg = <0x100000 0x3F00000>; |
524 | }; | 529 | }; |
525 | }; | 530 | }; |
526 | }; | 531 | }; |
diff --git a/arch/powerpc/boot/dts/lite5200b.dts b/arch/powerpc/boot/dts/lite5200b.dts index 59702ace900f..fb288bb882b6 100644 --- a/arch/powerpc/boot/dts/lite5200b.dts +++ b/arch/powerpc/boot/dts/lite5200b.dts | |||
@@ -10,256 +10,75 @@ | |||
10 | * option) any later version. | 10 | * option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | /dts-v1/; | 13 | /include/ "mpc5200b.dtsi" |
14 | 14 | ||
15 | / { | 15 | / { |
16 | model = "fsl,lite5200b"; | 16 | model = "fsl,lite5200b"; |
17 | compatible = "fsl,lite5200b"; | 17 | compatible = "fsl,lite5200b"; |
18 | #address-cells = <1>; | ||
19 | #size-cells = <1>; | ||
20 | interrupt-parent = <&mpc5200_pic>; | ||
21 | |||
22 | cpus { | ||
23 | #address-cells = <1>; | ||
24 | #size-cells = <0>; | ||
25 | |||
26 | PowerPC,5200@0 { | ||
27 | device_type = "cpu"; | ||
28 | reg = <0>; | ||
29 | d-cache-line-size = <32>; | ||
30 | i-cache-line-size = <32>; | ||
31 | d-cache-size = <0x4000>; // L1, 16K | ||
32 | i-cache-size = <0x4000>; // L1, 16K | ||
33 | timebase-frequency = <0>; // from bootloader | ||
34 | bus-frequency = <0>; // from bootloader | ||
35 | clock-frequency = <0>; // from bootloader | ||
36 | }; | ||
37 | }; | ||
38 | 18 | ||
39 | memory { | 19 | memory { |
40 | device_type = "memory"; | ||
41 | reg = <0x00000000 0x10000000>; // 256MB | 20 | reg = <0x00000000 0x10000000>; // 256MB |
42 | }; | 21 | }; |
43 | 22 | ||
44 | soc5200@f0000000 { | 23 | soc5200@f0000000 { |
45 | #address-cells = <1>; | ||
46 | #size-cells = <1>; | ||
47 | compatible = "fsl,mpc5200b-immr"; | ||
48 | ranges = <0 0xf0000000 0x0000c000>; | ||
49 | reg = <0xf0000000 0x00000100>; | ||
50 | bus-frequency = <0>; // from bootloader | ||
51 | system-frequency = <0>; // from bootloader | ||
52 | |||
53 | cdm@200 { | ||
54 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
55 | reg = <0x200 0x38>; | ||
56 | }; | ||
57 | |||
58 | mpc5200_pic: interrupt-controller@500 { | ||
59 | // 5200 interrupts are encoded into two levels; | ||
60 | interrupt-controller; | ||
61 | #interrupt-cells = <3>; | ||
62 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
63 | reg = <0x500 0x80>; | ||
64 | }; | ||
65 | |||
66 | timer@600 { // General Purpose Timer | 24 | timer@600 { // General Purpose Timer |
67 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
68 | reg = <0x600 0x10>; | ||
69 | interrupts = <1 9 0>; | ||
70 | fsl,has-wdt; | 25 | fsl,has-wdt; |
71 | }; | 26 | }; |
72 | 27 | ||
73 | timer@610 { // General Purpose Timer | 28 | psc@2000 { // PSC1 |
74 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | 29 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
75 | reg = <0x610 0x10>; | 30 | cell-index = <0>; |
76 | interrupts = <1 10 0>; | ||
77 | }; | ||
78 | |||
79 | timer@620 { // General Purpose Timer | ||
80 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
81 | reg = <0x620 0x10>; | ||
82 | interrupts = <1 11 0>; | ||
83 | }; | ||
84 | |||
85 | timer@630 { // General Purpose Timer | ||
86 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
87 | reg = <0x630 0x10>; | ||
88 | interrupts = <1 12 0>; | ||
89 | }; | ||
90 | |||
91 | timer@640 { // General Purpose Timer | ||
92 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
93 | reg = <0x640 0x10>; | ||
94 | interrupts = <1 13 0>; | ||
95 | }; | ||
96 | |||
97 | timer@650 { // General Purpose Timer | ||
98 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
99 | reg = <0x650 0x10>; | ||
100 | interrupts = <1 14 0>; | ||
101 | }; | ||
102 | |||
103 | timer@660 { // General Purpose Timer | ||
104 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
105 | reg = <0x660 0x10>; | ||
106 | interrupts = <1 15 0>; | ||
107 | }; | ||
108 | |||
109 | timer@670 { // General Purpose Timer | ||
110 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
111 | reg = <0x670 0x10>; | ||
112 | interrupts = <1 16 0>; | ||
113 | }; | ||
114 | |||
115 | rtc@800 { // Real time clock | ||
116 | compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; | ||
117 | reg = <0x800 0x100>; | ||
118 | interrupts = <1 5 0 1 6 0>; | ||
119 | }; | ||
120 | |||
121 | can@900 { | ||
122 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
123 | interrupts = <2 17 0>; | ||
124 | reg = <0x900 0x80>; | ||
125 | }; | ||
126 | |||
127 | can@980 { | ||
128 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
129 | interrupts = <2 18 0>; | ||
130 | reg = <0x980 0x80>; | ||
131 | }; | ||
132 | |||
133 | gpio_simple: gpio@b00 { | ||
134 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | ||
135 | reg = <0xb00 0x40>; | ||
136 | interrupts = <1 7 0>; | ||
137 | gpio-controller; | ||
138 | #gpio-cells = <2>; | ||
139 | }; | ||
140 | |||
141 | gpio_wkup: gpio@c00 { | ||
142 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | ||
143 | reg = <0xc00 0x40>; | ||
144 | interrupts = <1 8 0 0 3 0>; | ||
145 | gpio-controller; | ||
146 | #gpio-cells = <2>; | ||
147 | }; | 31 | }; |
148 | 32 | ||
149 | spi@f00 { | 33 | psc@2200 { // PSC2 |
150 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | 34 | status = "disabled"; |
151 | reg = <0xf00 0x20>; | ||
152 | interrupts = <2 13 0 2 14 0>; | ||
153 | }; | 35 | }; |
154 | 36 | ||
155 | usb@1000 { | 37 | psc@2400 { // PSC3 |
156 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | 38 | status = "disabled"; |
157 | reg = <0x1000 0xff>; | ||
158 | interrupts = <2 6 0>; | ||
159 | }; | 39 | }; |
160 | 40 | ||
161 | dma-controller@1200 { | 41 | psc@2600 { // PSC4 |
162 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | 42 | status = "disabled"; |
163 | reg = <0x1200 0x80>; | ||
164 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
165 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
166 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
167 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
168 | }; | 43 | }; |
169 | 44 | ||
170 | xlb@1f00 { | 45 | psc@2800 { // PSC5 |
171 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | 46 | status = "disabled"; |
172 | reg = <0x1f00 0x100>; | ||
173 | }; | 47 | }; |
174 | 48 | ||
175 | serial@2000 { // PSC1 | 49 | psc@2c00 { // PSC6 |
176 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 50 | status = "disabled"; |
177 | cell-index = <0>; | ||
178 | reg = <0x2000 0x100>; | ||
179 | interrupts = <2 1 0>; | ||
180 | }; | 51 | }; |
181 | 52 | ||
182 | // PSC2 in ac97 mode example | 53 | // PSC2 in ac97 mode example |
183 | //ac97@2200 { // PSC2 | 54 | //ac97@2200 { // PSC2 |
184 | // compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97"; | 55 | // compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97"; |
185 | // cell-index = <1>; | 56 | // cell-index = <1>; |
186 | // reg = <0x2200 0x100>; | ||
187 | // interrupts = <2 2 0>; | ||
188 | //}; | 57 | //}; |
189 | 58 | ||
190 | // PSC3 in CODEC mode example | 59 | // PSC3 in CODEC mode example |
191 | //i2s@2400 { // PSC3 | 60 | //i2s@2400 { // PSC3 |
192 | // compatible = "fsl,mpc5200b-psc-i2s"; //not 5200 compatible | 61 | // compatible = "fsl,mpc5200b-psc-i2s"; //not 5200 compatible |
193 | // cell-index = <2>; | 62 | // cell-index = <2>; |
194 | // reg = <0x2400 0x100>; | ||
195 | // interrupts = <2 3 0>; | ||
196 | //}; | ||
197 | |||
198 | // PSC4 in uart mode example | ||
199 | //serial@2600 { // PSC4 | ||
200 | // compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | ||
201 | // cell-index = <3>; | ||
202 | // reg = <0x2600 0x100>; | ||
203 | // interrupts = <2 11 0>; | ||
204 | //}; | ||
205 | |||
206 | // PSC5 in uart mode example | ||
207 | //serial@2800 { // PSC5 | ||
208 | // compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | ||
209 | // cell-index = <4>; | ||
210 | // reg = <0x2800 0x100>; | ||
211 | // interrupts = <2 12 0>; | ||
212 | //}; | 63 | //}; |
213 | 64 | ||
214 | // PSC6 in spi mode example | 65 | // PSC6 in spi mode example |
215 | //spi@2c00 { // PSC6 | 66 | //spi@2c00 { // PSC6 |
216 | // compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi"; | 67 | // compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi"; |
217 | // cell-index = <5>; | 68 | // cell-index = <5>; |
218 | // reg = <0x2c00 0x100>; | ||
219 | // interrupts = <2 4 0>; | ||
220 | //}; | 69 | //}; |
221 | 70 | ||
222 | ethernet@3000 { | 71 | ethernet@3000 { |
223 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
224 | reg = <0x3000 0x400>; | ||
225 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
226 | interrupts = <2 5 0>; | ||
227 | phy-handle = <&phy0>; | 72 | phy-handle = <&phy0>; |
228 | }; | 73 | }; |
229 | 74 | ||
230 | mdio@3000 { | 75 | mdio@3000 { |
231 | #address-cells = <1>; | ||
232 | #size-cells = <0>; | ||
233 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
234 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
235 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
236 | |||
237 | phy0: ethernet-phy@0 { | 76 | phy0: ethernet-phy@0 { |
238 | reg = <0>; | 77 | reg = <0>; |
239 | }; | 78 | }; |
240 | }; | 79 | }; |
241 | 80 | ||
242 | ata@3a00 { | ||
243 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | ||
244 | reg = <0x3a00 0x100>; | ||
245 | interrupts = <2 7 0>; | ||
246 | }; | ||
247 | |||
248 | i2c@3d00 { | ||
249 | #address-cells = <1>; | ||
250 | #size-cells = <0>; | ||
251 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
252 | reg = <0x3d00 0x40>; | ||
253 | interrupts = <2 15 0>; | ||
254 | }; | ||
255 | |||
256 | i2c@3d40 { | 81 | i2c@3d40 { |
257 | #address-cells = <1>; | ||
258 | #size-cells = <0>; | ||
259 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
260 | reg = <0x3d40 0x40>; | ||
261 | interrupts = <2 16 0>; | ||
262 | |||
263 | eeprom@50 { | 82 | eeprom@50 { |
264 | compatible = "atmel,24c02"; | 83 | compatible = "atmel,24c02"; |
265 | reg = <0x50>; | 84 | reg = <0x50>; |
@@ -273,12 +92,6 @@ | |||
273 | }; | 92 | }; |
274 | 93 | ||
275 | pci@f0000d00 { | 94 | pci@f0000d00 { |
276 | #interrupt-cells = <1>; | ||
277 | #size-cells = <2>; | ||
278 | #address-cells = <3>; | ||
279 | device_type = "pci"; | ||
280 | compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci"; | ||
281 | reg = <0xf0000d00 0x100>; | ||
282 | interrupt-map-mask = <0xf800 0 0 7>; | 95 | interrupt-map-mask = <0xf800 0 0 7>; |
283 | interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot | 96 | interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot |
284 | 0xc000 0 0 2 &mpc5200_pic 1 1 3 | 97 | 0xc000 0 0 2 &mpc5200_pic 1 1 3 |
@@ -298,11 +111,6 @@ | |||
298 | }; | 111 | }; |
299 | 112 | ||
300 | localbus { | 113 | localbus { |
301 | compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus"; | ||
302 | |||
303 | #address-cells = <2>; | ||
304 | #size-cells = <1>; | ||
305 | |||
306 | ranges = <0 0 0xfe000000 0x02000000>; | 114 | ranges = <0 0 0xfe000000 0x02000000>; |
307 | 115 | ||
308 | flash@0,0 { | 116 | flash@0,0 { |
diff --git a/arch/powerpc/boot/dts/media5200.dts b/arch/powerpc/boot/dts/media5200.dts index 0c3902bc5b6a..48d72f38e5ed 100644 --- a/arch/powerpc/boot/dts/media5200.dts +++ b/arch/powerpc/boot/dts/media5200.dts | |||
@@ -11,14 +11,11 @@ | |||
11 | * option) any later version. | 11 | * option) any later version. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | /dts-v1/; | 14 | /include/ "mpc5200b.dtsi" |
15 | 15 | ||
16 | / { | 16 | / { |
17 | model = "fsl,media5200"; | 17 | model = "fsl,media5200"; |
18 | compatible = "fsl,media5200"; | 18 | compatible = "fsl,media5200"; |
19 | #address-cells = <1>; | ||
20 | #size-cells = <1>; | ||
21 | interrupt-parent = <&mpc5200_pic>; | ||
22 | 19 | ||
23 | aliases { | 20 | aliases { |
24 | console = &console; | 21 | console = &console; |
@@ -30,16 +27,7 @@ | |||
30 | }; | 27 | }; |
31 | 28 | ||
32 | cpus { | 29 | cpus { |
33 | #address-cells = <1>; | ||
34 | #size-cells = <0>; | ||
35 | |||
36 | PowerPC,5200@0 { | 30 | PowerPC,5200@0 { |
37 | device_type = "cpu"; | ||
38 | reg = <0>; | ||
39 | d-cache-line-size = <32>; | ||
40 | i-cache-line-size = <32>; | ||
41 | d-cache-size = <0x4000>; // L1, 16K | ||
42 | i-cache-size = <0x4000>; // L1, 16K | ||
43 | timebase-frequency = <33000000>; // 33 MHz, these were configured by U-Boot | 31 | timebase-frequency = <33000000>; // 33 MHz, these were configured by U-Boot |
44 | bus-frequency = <132000000>; // 132 MHz | 32 | bus-frequency = <132000000>; // 132 MHz |
45 | clock-frequency = <396000000>; // 396 MHz | 33 | clock-frequency = <396000000>; // 396 MHz |
@@ -47,205 +35,57 @@ | |||
47 | }; | 35 | }; |
48 | 36 | ||
49 | memory { | 37 | memory { |
50 | device_type = "memory"; | ||
51 | reg = <0x00000000 0x08000000>; // 128MB RAM | 38 | reg = <0x00000000 0x08000000>; // 128MB RAM |
52 | }; | 39 | }; |
53 | 40 | ||
54 | soc@f0000000 { | 41 | soc5200@f0000000 { |
55 | #address-cells = <1>; | ||
56 | #size-cells = <1>; | ||
57 | compatible = "fsl,mpc5200b-immr"; | ||
58 | ranges = <0 0xf0000000 0x0000c000>; | ||
59 | reg = <0xf0000000 0x00000100>; | ||
60 | bus-frequency = <132000000>;// 132 MHz | 42 | bus-frequency = <132000000>;// 132 MHz |
61 | system-frequency = <0>; // from bootloader | ||
62 | |||
63 | cdm@200 { | ||
64 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
65 | reg = <0x200 0x38>; | ||
66 | }; | ||
67 | |||
68 | mpc5200_pic: interrupt-controller@500 { | ||
69 | // 5200 interrupts are encoded into two levels; | ||
70 | interrupt-controller; | ||
71 | #interrupt-cells = <3>; | ||
72 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
73 | reg = <0x500 0x80>; | ||
74 | }; | ||
75 | 43 | ||
76 | timer@600 { // General Purpose Timer | 44 | timer@600 { // General Purpose Timer |
77 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
78 | reg = <0x600 0x10>; | ||
79 | interrupts = <1 9 0>; | ||
80 | fsl,has-wdt; | 45 | fsl,has-wdt; |
81 | }; | 46 | }; |
82 | 47 | ||
83 | timer@610 { // General Purpose Timer | 48 | psc@2000 { // PSC1 |
84 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | 49 | status = "disabled"; |
85 | reg = <0x610 0x10>; | ||
86 | interrupts = <1 10 0>; | ||
87 | }; | ||
88 | |||
89 | timer@620 { // General Purpose Timer | ||
90 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
91 | reg = <0x620 0x10>; | ||
92 | interrupts = <1 11 0>; | ||
93 | }; | ||
94 | |||
95 | timer@630 { // General Purpose Timer | ||
96 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
97 | reg = <0x630 0x10>; | ||
98 | interrupts = <1 12 0>; | ||
99 | }; | ||
100 | |||
101 | timer@640 { // General Purpose Timer | ||
102 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
103 | reg = <0x640 0x10>; | ||
104 | interrupts = <1 13 0>; | ||
105 | }; | ||
106 | |||
107 | timer@650 { // General Purpose Timer | ||
108 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
109 | reg = <0x650 0x10>; | ||
110 | interrupts = <1 14 0>; | ||
111 | }; | ||
112 | |||
113 | timer@660 { // General Purpose Timer | ||
114 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
115 | reg = <0x660 0x10>; | ||
116 | interrupts = <1 15 0>; | ||
117 | }; | ||
118 | |||
119 | timer@670 { // General Purpose Timer | ||
120 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
121 | reg = <0x670 0x10>; | ||
122 | interrupts = <1 16 0>; | ||
123 | }; | ||
124 | |||
125 | rtc@800 { // Real time clock | ||
126 | compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; | ||
127 | reg = <0x800 0x100>; | ||
128 | interrupts = <1 5 0 1 6 0>; | ||
129 | }; | ||
130 | |||
131 | can@900 { | ||
132 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
133 | interrupts = <2 17 0>; | ||
134 | reg = <0x900 0x80>; | ||
135 | }; | 50 | }; |
136 | 51 | ||
137 | can@980 { | 52 | psc@2200 { // PSC2 |
138 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | 53 | status = "disabled"; |
139 | interrupts = <2 18 0>; | ||
140 | reg = <0x980 0x80>; | ||
141 | }; | 54 | }; |
142 | 55 | ||
143 | gpio_simple: gpio@b00 { | 56 | psc@2400 { // PSC3 |
144 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | 57 | status = "disabled"; |
145 | reg = <0xb00 0x40>; | ||
146 | interrupts = <1 7 0>; | ||
147 | gpio-controller; | ||
148 | #gpio-cells = <2>; | ||
149 | }; | 58 | }; |
150 | 59 | ||
151 | gpio_wkup: gpio@c00 { | 60 | psc@2600 { // PSC4 |
152 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | 61 | status = "disabled"; |
153 | reg = <0xc00 0x40>; | ||
154 | interrupts = <1 8 0 0 3 0>; | ||
155 | gpio-controller; | ||
156 | #gpio-cells = <2>; | ||
157 | }; | 62 | }; |
158 | 63 | ||
159 | spi@f00 { | 64 | psc@2800 { // PSC5 |
160 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | 65 | status = "disabled"; |
161 | reg = <0xf00 0x20>; | ||
162 | interrupts = <2 13 0 2 14 0>; | ||
163 | }; | ||
164 | |||
165 | usb@1000 { | ||
166 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | ||
167 | reg = <0x1000 0x100>; | ||
168 | interrupts = <2 6 0>; | ||
169 | }; | ||
170 | |||
171 | dma-controller@1200 { | ||
172 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | ||
173 | reg = <0x1200 0x80>; | ||
174 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
175 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
176 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
177 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
178 | }; | ||
179 | |||
180 | xlb@1f00 { | ||
181 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | ||
182 | reg = <0x1f00 0x100>; | ||
183 | }; | 66 | }; |
184 | 67 | ||
185 | // PSC6 in uart mode | 68 | // PSC6 in uart mode |
186 | console: serial@2c00 { // PSC6 | 69 | console: psc@2c00 { // PSC6 |
187 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 70 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
188 | cell-index = <5>; | ||
189 | port-number = <0>; // Logical port assignment | ||
190 | reg = <0x2c00 0x100>; | ||
191 | interrupts = <2 4 0>; | ||
192 | }; | 71 | }; |
193 | 72 | ||
194 | eth0: ethernet@3000 { | 73 | ethernet@3000 { |
195 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
196 | reg = <0x3000 0x400>; | ||
197 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
198 | interrupts = <2 5 0>; | ||
199 | phy-handle = <&phy0>; | 74 | phy-handle = <&phy0>; |
200 | }; | 75 | }; |
201 | 76 | ||
202 | mdio@3000 { | 77 | mdio@3000 { |
203 | #address-cells = <1>; | ||
204 | #size-cells = <0>; | ||
205 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
206 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
207 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
208 | |||
209 | phy0: ethernet-phy@0 { | 78 | phy0: ethernet-phy@0 { |
210 | reg = <0>; | 79 | reg = <0>; |
211 | }; | 80 | }; |
212 | }; | 81 | }; |
213 | 82 | ||
214 | ata@3a00 { | 83 | usb@1000 { |
215 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | 84 | reg = <0x1000 0x100>; |
216 | reg = <0x3a00 0x100>; | ||
217 | interrupts = <2 7 0>; | ||
218 | }; | ||
219 | |||
220 | i2c@3d00 { | ||
221 | #address-cells = <1>; | ||
222 | #size-cells = <0>; | ||
223 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
224 | reg = <0x3d00 0x40>; | ||
225 | interrupts = <2 15 0>; | ||
226 | }; | ||
227 | |||
228 | i2c@3d40 { | ||
229 | #address-cells = <1>; | ||
230 | #size-cells = <0>; | ||
231 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
232 | reg = <0x3d40 0x40>; | ||
233 | interrupts = <2 16 0>; | ||
234 | }; | ||
235 | |||
236 | sram@8000 { | ||
237 | compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram"; | ||
238 | reg = <0x8000 0x4000>; | ||
239 | }; | 85 | }; |
240 | }; | 86 | }; |
241 | 87 | ||
242 | pci@f0000d00 { | 88 | pci@f0000d00 { |
243 | #interrupt-cells = <1>; | ||
244 | #size-cells = <2>; | ||
245 | #address-cells = <3>; | ||
246 | device_type = "pci"; | ||
247 | compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci"; | ||
248 | reg = <0xf0000d00 0x100>; | ||
249 | interrupt-map-mask = <0xf800 0 0 7>; | 89 | interrupt-map-mask = <0xf800 0 0 7>; |
250 | interrupt-map = <0xc000 0 0 1 &media5200_fpga 0 2 // 1st slot | 90 | interrupt-map = <0xc000 0 0 1 &media5200_fpga 0 2 // 1st slot |
251 | 0xc000 0 0 2 &media5200_fpga 0 3 | 91 | 0xc000 0 0 2 &media5200_fpga 0 3 |
@@ -262,37 +102,29 @@ | |||
262 | 102 | ||
263 | 0xe000 0 0 1 &media5200_fpga 0 5 // CoralIP | 103 | 0xe000 0 0 1 &media5200_fpga 0 5 // CoralIP |
264 | >; | 104 | >; |
265 | clock-frequency = <0>; // From boot loader | ||
266 | interrupts = <2 8 0 2 9 0 2 10 0>; | ||
267 | interrupt-parent = <&mpc5200_pic>; | ||
268 | bus-range = <0 0>; | ||
269 | ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000 | 105 | ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000 |
270 | 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000 | 106 | 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000 |
271 | 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>; | 107 | 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>; |
108 | interrupt-parent = <&mpc5200_pic>; | ||
272 | }; | 109 | }; |
273 | 110 | ||
274 | localbus { | 111 | localbus { |
275 | compatible = "fsl,mpc5200b-lpb","simple-bus"; | ||
276 | #address-cells = <2>; | ||
277 | #size-cells = <1>; | ||
278 | |||
279 | ranges = < 0 0 0xfc000000 0x02000000 | 112 | ranges = < 0 0 0xfc000000 0x02000000 |
280 | 1 0 0xfe000000 0x02000000 | 113 | 1 0 0xfe000000 0x02000000 |
281 | 2 0 0xf0010000 0x00010000 | 114 | 2 0 0xf0010000 0x00010000 |
282 | 3 0 0xf0020000 0x00010000 >; | 115 | 3 0 0xf0020000 0x00010000 >; |
283 | |||
284 | flash@0,0 { | 116 | flash@0,0 { |
285 | compatible = "amd,am29lv28ml", "cfi-flash"; | 117 | compatible = "amd,am29lv28ml", "cfi-flash"; |
286 | reg = <0 0x0 0x2000000>; // 32 MB | 118 | reg = <0 0x0 0x2000000>; // 32 MB |
287 | bank-width = <4>; // Width in bytes of the flash bank | 119 | bank-width = <4>; // Width in bytes of the flash bank |
288 | device-width = <2>; // Two devices on each bank | 120 | device-width = <2>; // Two devices on each bank |
289 | }; | 121 | }; |
290 | 122 | ||
291 | flash@1,0 { | 123 | flash@1,0 { |
292 | compatible = "amd,am29lv28ml", "cfi-flash"; | 124 | compatible = "amd,am29lv28ml", "cfi-flash"; |
293 | reg = <1 0 0x2000000>; // 32 MB | 125 | reg = <1 0 0x2000000>; // 32 MB |
294 | bank-width = <4>; // Width in bytes of the flash bank | 126 | bank-width = <4>; // Width in bytes of the flash bank |
295 | device-width = <2>; // Two devices on each bank | 127 | device-width = <2>; // Two devices on each bank |
296 | }; | 128 | }; |
297 | 129 | ||
298 | media5200_fpga: fpga@2,0 { | 130 | media5200_fpga: fpga@2,0 { |
diff --git a/arch/powerpc/boot/dts/mgcoge.dts b/arch/powerpc/boot/dts/mgcoge.dts index 0ce96644176d..1360d2f69024 100644 --- a/arch/powerpc/boot/dts/mgcoge.dts +++ b/arch/powerpc/boot/dts/mgcoge.dts | |||
@@ -13,7 +13,7 @@ | |||
13 | /dts-v1/; | 13 | /dts-v1/; |
14 | / { | 14 | / { |
15 | model = "MGCOGE"; | 15 | model = "MGCOGE"; |
16 | compatible = "keymile,mgcoge"; | 16 | compatible = "keymile,km82xx"; |
17 | #address-cells = <1>; | 17 | #address-cells = <1>; |
18 | #size-cells = <1>; | 18 | #size-cells = <1>; |
19 | 19 | ||
@@ -48,8 +48,10 @@ | |||
48 | reg = <0xf0010100 0x40>; | 48 | reg = <0xf0010100 0x40>; |
49 | 49 | ||
50 | ranges = <0 0 0xfe000000 0x00400000 | 50 | ranges = <0 0 0xfe000000 0x00400000 |
51 | 5 0 0x50000000 0x20000000 | 51 | 1 0 0x30000000 0x00010000 |
52 | >; /* Filled in by U-Boot */ | 52 | 2 0 0x40000000 0x00010000 |
53 | 5 0 0x50000000 0x04000000 | ||
54 | >; | ||
53 | 55 | ||
54 | flash@0,0 { | 56 | flash@0,0 { |
55 | compatible = "cfi-flash"; | 57 | compatible = "cfi-flash"; |
@@ -60,36 +62,32 @@ | |||
60 | device-width = <1>; | 62 | device-width = <1>; |
61 | partition@0 { | 63 | partition@0 { |
62 | label = "u-boot"; | 64 | label = "u-boot"; |
63 | reg = <0 0x40000>; | 65 | reg = <0x00000 0xC0000>; |
64 | }; | 66 | }; |
65 | partition@40000 { | 67 | partition@1 { |
66 | label = "env"; | 68 | label = "env"; |
67 | reg = <0x40000 0x20000>; | 69 | reg = <0xC0000 0x20000>; |
68 | }; | 70 | }; |
69 | partition@60000 { | 71 | partition@2 { |
70 | label = "kernel"; | 72 | label = "envred"; |
71 | reg = <0x60000 0x220000>; | 73 | reg = <0xE0000 0x20000>; |
72 | }; | 74 | }; |
73 | partition@280000 { | 75 | partition@3 { |
74 | label = "dtb"; | 76 | label = "free"; |
75 | reg = <0x280000 0x20000>; | 77 | reg = <0x100000 0x300000>; |
76 | }; | 78 | }; |
77 | }; | 79 | }; |
78 | 80 | ||
79 | flash@5,0 { | 81 | flash@5,0 { |
80 | compatible = "cfi-flash"; | 82 | compatible = "cfi-flash"; |
81 | reg = <5 0x0 0x2000000>; | 83 | reg = <5 0x00000000 0x02000000 |
84 | 5 0x02000000 0x02000000>; | ||
82 | #address-cells = <1>; | 85 | #address-cells = <1>; |
83 | #size-cells = <1>; | 86 | #size-cells = <1>; |
84 | bank-width = <2>; | 87 | bank-width = <2>; |
85 | device-width = <2>; | 88 | partition@app { /* 64 MBytes */ |
86 | partition@0 { | 89 | label = "ubi0"; |
87 | label = "ramdisk"; | 90 | reg = <0x00000000 0x04000000>; |
88 | reg = <0 0x7a0000>; | ||
89 | }; | ||
90 | partition@7a0000 { | ||
91 | label = "user"; | ||
92 | reg = <0x7a0000 0x1860000>; | ||
93 | }; | 91 | }; |
94 | }; | 92 | }; |
95 | }; | 93 | }; |
@@ -217,6 +215,13 @@ | |||
217 | }; | 215 | }; |
218 | }; | 216 | }; |
219 | 217 | ||
218 | cpm2_pio_c: gpio-controller@10d40 { | ||
219 | #gpio-cells = <2>; | ||
220 | compatible = "fsl,cpm2-pario-bank"; | ||
221 | reg = <0x10d40 0x14>; | ||
222 | gpio-controller; | ||
223 | }; | ||
224 | |||
220 | PIC: interrupt-controller@10c00 { | 225 | PIC: interrupt-controller@10c00 { |
221 | #interrupt-cells = <2>; | 226 | #interrupt-cells = <2>; |
222 | interrupt-controller; | 227 | interrupt-controller; |
diff --git a/arch/powerpc/boot/dts/mgsuvd.dts b/arch/powerpc/boot/dts/mgsuvd.dts deleted file mode 100644 index e4fc53ab42bd..000000000000 --- a/arch/powerpc/boot/dts/mgsuvd.dts +++ /dev/null | |||
@@ -1,163 +0,0 @@ | |||
1 | /* | ||
2 | * MGSUVD Device Tree Source | ||
3 | * | ||
4 | * Copyright 2008 DENX Software Engineering GmbH | ||
5 | * Heiko Schocher <hs@denx.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify it | ||
8 | * under the terms of the GNU General Public License as published by the | ||
9 | * Free Software Foundation; either version 2 of the License, or (at your | ||
10 | * option) any later version. | ||
11 | */ | ||
12 | |||
13 | /dts-v1/; | ||
14 | / { | ||
15 | model = "MGSUVD"; | ||
16 | compatible = "keymile,mgsuvd"; | ||
17 | #address-cells = <1>; | ||
18 | #size-cells = <1>; | ||
19 | |||
20 | cpus { | ||
21 | #address-cells = <1>; | ||
22 | #size-cells = <0>; | ||
23 | |||
24 | PowerPC,852@0 { | ||
25 | device_type = "cpu"; | ||
26 | reg = <0>; | ||
27 | d-cache-line-size = <16>; | ||
28 | i-cache-line-size = <16>; | ||
29 | d-cache-size = <8192>; | ||
30 | i-cache-size = <8192>; | ||
31 | timebase-frequency = <0>; /* Filled in by u-boot */ | ||
32 | bus-frequency = <0>; /* Filled in by u-boot */ | ||
33 | clock-frequency = <0>; /* Filled in by u-boot */ | ||
34 | interrupts = <15 2>; /* decrementer interrupt */ | ||
35 | interrupt-parent = <&PIC>; | ||
36 | }; | ||
37 | }; | ||
38 | |||
39 | memory { | ||
40 | device_type = "memory"; | ||
41 | reg = <00000000 0x4000000>; /* Filled in by u-boot */ | ||
42 | }; | ||
43 | |||
44 | localbus@fff00100 { | ||
45 | compatible = "fsl,mpc852-localbus", "fsl,pq1-localbus", "simple-bus"; | ||
46 | #address-cells = <2>; | ||
47 | #size-cells = <1>; | ||
48 | reg = <0xfff00100 0x40>; | ||
49 | |||
50 | ranges = <0 0 0xf0000000 0x01000000>; /* Filled in by u-boot */ | ||
51 | |||
52 | flash@0,0 { | ||
53 | compatible = "cfi-flash"; | ||
54 | reg = <0 0 0x1000000>; | ||
55 | #address-cells = <1>; | ||
56 | #size-cells = <1>; | ||
57 | bank-width = <1>; | ||
58 | device-width = <1>; | ||
59 | partition@0 { | ||
60 | label = "u-boot"; | ||
61 | reg = <0 0x80000>; | ||
62 | }; | ||
63 | partition@80000 { | ||
64 | label = "env"; | ||
65 | reg = <0x80000 0x20000>; | ||
66 | }; | ||
67 | partition@a0000 { | ||
68 | label = "kernel"; | ||
69 | reg = <0xa0000 0x1e0000>; | ||
70 | }; | ||
71 | partition@280000 { | ||
72 | label = "dtb"; | ||
73 | reg = <0x280000 0x20000>; | ||
74 | }; | ||
75 | partition@2a0000 { | ||
76 | label = "root"; | ||
77 | reg = <0x2a0000 0x500000>; | ||
78 | }; | ||
79 | partition@7a0000 { | ||
80 | label = "user"; | ||
81 | reg = <0x7a0000 0x860000>; | ||
82 | }; | ||
83 | }; | ||
84 | }; | ||
85 | |||
86 | soc@fff00000 { | ||
87 | compatible = "fsl,mpc852", "fsl,pq1-soc", "simple-bus"; | ||
88 | #address-cells = <1>; | ||
89 | #size-cells = <1>; | ||
90 | device_type = "soc"; | ||
91 | ranges = <0 0xfff00000 0x00004000>; | ||
92 | |||
93 | PIC: interrupt-controller@0 { | ||
94 | interrupt-controller; | ||
95 | #interrupt-cells = <2>; | ||
96 | reg = <0 24>; | ||
97 | compatible = "fsl,mpc852-pic", "fsl,pq1-pic"; | ||
98 | }; | ||
99 | |||
100 | cpm@9c0 { | ||
101 | #address-cells = <1>; | ||
102 | #size-cells = <1>; | ||
103 | compatible = "fsl,mpc852-cpm", "fsl,cpm1", "simple-bus"; | ||
104 | interrupts = <0>; /* cpm error interrupt */ | ||
105 | interrupt-parent = <&CPM_PIC>; | ||
106 | reg = <0x9c0 10>; | ||
107 | ranges; | ||
108 | |||
109 | muram@2000 { | ||
110 | compatible = "fsl,cpm-muram"; | ||
111 | #address-cells = <1>; | ||
112 | #size-cells = <1>; | ||
113 | ranges = <0 0x2000 0x2000>; | ||
114 | |||
115 | data@0 { | ||
116 | compatible = "fsl,cpm-muram-data"; | ||
117 | reg = <0x800 0x1800>; | ||
118 | }; | ||
119 | }; | ||
120 | |||
121 | brg@9f0 { | ||
122 | compatible = "fsl,mpc852-brg", | ||
123 | "fsl,cpm1-brg", | ||
124 | "fsl,cpm-brg"; | ||
125 | reg = <0x9f0 0x10>; | ||
126 | clock-frequency = <0>; /* Filled in by u-boot */ | ||
127 | }; | ||
128 | |||
129 | CPM_PIC: interrupt-controller@930 { | ||
130 | interrupt-controller; | ||
131 | #interrupt-cells = <1>; | ||
132 | interrupts = <5 2 0 2>; | ||
133 | interrupt-parent = <&PIC>; | ||
134 | reg = <0x930 0x20>; | ||
135 | compatible = "fsl,cpm1-pic"; | ||
136 | }; | ||
137 | |||
138 | /* MON-1 */ | ||
139 | serial@a80 { | ||
140 | device_type = "serial"; | ||
141 | compatible = "fsl,cpm1-smc-uart"; | ||
142 | reg = <0xa80 0x10 0x3fc0 0x40>; | ||
143 | interrupts = <4>; | ||
144 | interrupt-parent = <&CPM_PIC>; | ||
145 | fsl,cpm-brg = <1>; | ||
146 | fsl,cpm-command = <0x0090>; | ||
147 | current-speed = <0>; /* Filled in by u-boot */ | ||
148 | }; | ||
149 | |||
150 | ethernet@a40 { | ||
151 | device_type = "network"; | ||
152 | compatible = "fsl,mpc866-scc-enet", | ||
153 | "fsl,cpm1-scc-enet"; | ||
154 | reg = <0xa40 0x18 0x3e00 0x100>; | ||
155 | local-mac-address = [ 00 00 00 00 00 00 ]; /* Filled in by u-boot */ | ||
156 | interrupts = <28>; | ||
157 | interrupt-parent = <&CPM_PIC>; | ||
158 | fsl,cpm-command = <0x80>; | ||
159 | fixed-link = <0 0 10 0 0>; | ||
160 | }; | ||
161 | }; | ||
162 | }; | ||
163 | }; | ||
diff --git a/arch/powerpc/boot/dts/motionpro.dts b/arch/powerpc/boot/dts/motionpro.dts index 6ca4fc144a33..0b78e89ac69b 100644 --- a/arch/powerpc/boot/dts/motionpro.dts +++ b/arch/powerpc/boot/dts/motionpro.dts | |||
@@ -10,219 +10,73 @@ | |||
10 | * option) any later version. | 10 | * option) any later version. |
11 | */ | 11 | */ |
12 | 12 | ||
13 | /dts-v1/; | 13 | /include/ "mpc5200b.dtsi" |
14 | 14 | ||
15 | / { | 15 | / { |
16 | model = "promess,motionpro"; | 16 | model = "promess,motionpro"; |
17 | compatible = "promess,motionpro"; | 17 | compatible = "promess,motionpro"; |
18 | #address-cells = <1>; | ||
19 | #size-cells = <1>; | ||
20 | interrupt-parent = <&mpc5200_pic>; | ||
21 | |||
22 | cpus { | ||
23 | #address-cells = <1>; | ||
24 | #size-cells = <0>; | ||
25 | |||
26 | PowerPC,5200@0 { | ||
27 | device_type = "cpu"; | ||
28 | reg = <0>; | ||
29 | d-cache-line-size = <32>; | ||
30 | i-cache-line-size = <32>; | ||
31 | d-cache-size = <0x4000>; // L1, 16K | ||
32 | i-cache-size = <0x4000>; // L1, 16K | ||
33 | timebase-frequency = <0>; // from bootloader | ||
34 | bus-frequency = <0>; // from bootloader | ||
35 | clock-frequency = <0>; // from bootloader | ||
36 | }; | ||
37 | }; | ||
38 | |||
39 | memory { | ||
40 | device_type = "memory"; | ||
41 | reg = <0x00000000 0x04000000>; // 64MB | ||
42 | }; | ||
43 | 18 | ||
44 | soc5200@f0000000 { | 19 | soc5200@f0000000 { |
45 | #address-cells = <1>; | ||
46 | #size-cells = <1>; | ||
47 | compatible = "fsl,mpc5200b-immr"; | ||
48 | ranges = <0 0xf0000000 0x0000c000>; | ||
49 | reg = <0xf0000000 0x00000100>; | ||
50 | bus-frequency = <0>; // from bootloader | ||
51 | system-frequency = <0>; // from bootloader | ||
52 | |||
53 | cdm@200 { | ||
54 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
55 | reg = <0x200 0x38>; | ||
56 | }; | ||
57 | |||
58 | mpc5200_pic: interrupt-controller@500 { | ||
59 | // 5200 interrupts are encoded into two levels; | ||
60 | interrupt-controller; | ||
61 | #interrupt-cells = <3>; | ||
62 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
63 | reg = <0x500 0x80>; | ||
64 | }; | ||
65 | |||
66 | timer@600 { // General Purpose Timer | 20 | timer@600 { // General Purpose Timer |
67 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
68 | reg = <0x600 0x10>; | ||
69 | interrupts = <1 9 0>; | ||
70 | fsl,has-wdt; | 21 | fsl,has-wdt; |
71 | }; | 22 | }; |
72 | 23 | ||
73 | timer@610 { // General Purpose Timer | 24 | timer@660 { // Motion-PRO status LED |
74 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
75 | reg = <0x610 0x10>; | ||
76 | interrupts = <1 10 0>; | ||
77 | }; | ||
78 | |||
79 | timer@620 { // General Purpose Timer | ||
80 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
81 | reg = <0x620 0x10>; | ||
82 | interrupts = <1 11 0>; | ||
83 | }; | ||
84 | |||
85 | timer@630 { // General Purpose Timer | ||
86 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
87 | reg = <0x630 0x10>; | ||
88 | interrupts = <1 12 0>; | ||
89 | }; | ||
90 | |||
91 | timer@640 { // General Purpose Timer | ||
92 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
93 | reg = <0x640 0x10>; | ||
94 | interrupts = <1 13 0>; | ||
95 | }; | ||
96 | |||
97 | timer@650 { // General Purpose Timer | ||
98 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
99 | reg = <0x650 0x10>; | ||
100 | interrupts = <1 14 0>; | ||
101 | }; | ||
102 | |||
103 | motionpro-led@660 { // Motion-PRO status LED | ||
104 | compatible = "promess,motionpro-led"; | 25 | compatible = "promess,motionpro-led"; |
105 | label = "motionpro-statusled"; | 26 | label = "motionpro-statusled"; |
106 | reg = <0x660 0x10>; | ||
107 | interrupts = <1 15 0>; | ||
108 | blink-delay = <100>; // 100 msec | 27 | blink-delay = <100>; // 100 msec |
109 | }; | 28 | }; |
110 | 29 | ||
111 | motionpro-led@670 { // Motion-PRO ready LED | 30 | timer@670 { // Motion-PRO ready LED |
112 | compatible = "promess,motionpro-led"; | 31 | compatible = "promess,motionpro-led"; |
113 | label = "motionpro-readyled"; | 32 | label = "motionpro-readyled"; |
114 | reg = <0x670 0x10>; | ||
115 | interrupts = <1 16 0>; | ||
116 | }; | ||
117 | |||
118 | rtc@800 { // Real time clock | ||
119 | compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; | ||
120 | reg = <0x800 0x100>; | ||
121 | interrupts = <1 5 0 1 6 0>; | ||
122 | }; | ||
123 | |||
124 | can@980 { | ||
125 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
126 | interrupts = <2 18 0>; | ||
127 | reg = <0x980 0x80>; | ||
128 | }; | 33 | }; |
129 | 34 | ||
130 | gpio_simple: gpio@b00 { | 35 | can@900 { |
131 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | 36 | status = "disabled"; |
132 | reg = <0xb00 0x40>; | ||
133 | interrupts = <1 7 0>; | ||
134 | gpio-controller; | ||
135 | #gpio-cells = <2>; | ||
136 | }; | 37 | }; |
137 | 38 | ||
138 | gpio_wkup: gpio@c00 { | 39 | psc@2000 { // PSC1 |
139 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | 40 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
140 | reg = <0xc00 0x40>; | ||
141 | interrupts = <1 8 0 0 3 0>; | ||
142 | gpio-controller; | ||
143 | #gpio-cells = <2>; | ||
144 | }; | ||
145 | |||
146 | spi@f00 { | ||
147 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | ||
148 | reg = <0xf00 0x20>; | ||
149 | interrupts = <2 13 0 2 14 0>; | ||
150 | }; | 41 | }; |
151 | 42 | ||
152 | usb@1000 { | 43 | // PSC2 in spi master mode |
153 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | 44 | psc@2200 { // PSC2 |
154 | reg = <0x1000 0xff>; | 45 | compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi"; |
155 | interrupts = <2 6 0>; | 46 | cell-index = <1>; |
156 | }; | 47 | }; |
157 | 48 | ||
158 | dma-controller@1200 { | 49 | psc@2400 { // PSC3 |
159 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | 50 | status = "disabled"; |
160 | reg = <0x1200 0x80>; | ||
161 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
162 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
163 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
164 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
165 | }; | 51 | }; |
166 | 52 | ||
167 | xlb@1f00 { | 53 | psc@2600 { // PSC4 |
168 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | 54 | status = "disabled"; |
169 | reg = <0x1f00 0x100>; | ||
170 | }; | 55 | }; |
171 | 56 | ||
172 | serial@2000 { // PSC1 | 57 | psc@2800 { // PSC5 |
173 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 58 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
174 | reg = <0x2000 0x100>; | ||
175 | interrupts = <2 1 0>; | ||
176 | }; | ||
177 | |||
178 | // PSC2 in spi master mode | ||
179 | spi@2200 { // PSC2 | ||
180 | compatible = "fsl,mpc5200b-psc-spi","fsl,mpc5200-psc-spi"; | ||
181 | cell-index = <1>; | ||
182 | reg = <0x2200 0x100>; | ||
183 | interrupts = <2 2 0>; | ||
184 | }; | 59 | }; |
185 | 60 | ||
186 | // PSC5 in uart mode | 61 | psc@2c00 { // PSC6 |
187 | serial@2800 { // PSC5 | 62 | status = "disabled"; |
188 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | ||
189 | reg = <0x2800 0x100>; | ||
190 | interrupts = <2 12 0>; | ||
191 | }; | 63 | }; |
192 | 64 | ||
193 | ethernet@3000 { | 65 | ethernet@3000 { |
194 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
195 | reg = <0x3000 0x400>; | ||
196 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
197 | interrupts = <2 5 0>; | ||
198 | phy-handle = <&phy0>; | 66 | phy-handle = <&phy0>; |
199 | }; | 67 | }; |
200 | 68 | ||
201 | mdio@3000 { | 69 | mdio@3000 { |
202 | #address-cells = <1>; | ||
203 | #size-cells = <0>; | ||
204 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
205 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
206 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
207 | |||
208 | phy0: ethernet-phy@2 { | 70 | phy0: ethernet-phy@2 { |
209 | reg = <2>; | 71 | reg = <2>; |
210 | }; | 72 | }; |
211 | }; | 73 | }; |
212 | 74 | ||
213 | ata@3a00 { | 75 | i2c@3d00 { |
214 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | 76 | status = "disabled"; |
215 | reg = <0x3a00 0x100>; | ||
216 | interrupts = <2 7 0>; | ||
217 | }; | 77 | }; |
218 | 78 | ||
219 | i2c@3d40 { | 79 | i2c@3d40 { |
220 | #address-cells = <1>; | ||
221 | #size-cells = <0>; | ||
222 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
223 | reg = <0x3d40 0x40>; | ||
224 | interrupts = <2 16 0>; | ||
225 | |||
226 | rtc@68 { | 80 | rtc@68 { |
227 | compatible = "dallas,ds1339"; | 81 | compatible = "dallas,ds1339"; |
228 | reg = <0x68>; | 82 | reg = <0x68>; |
@@ -235,10 +89,11 @@ | |||
235 | }; | 89 | }; |
236 | }; | 90 | }; |
237 | 91 | ||
92 | pci@f0000d00 { | ||
93 | status = "disabled"; | ||
94 | }; | ||
95 | |||
238 | localbus { | 96 | localbus { |
239 | compatible = "fsl,mpc5200b-lpb","simple-bus"; | ||
240 | #address-cells = <2>; | ||
241 | #size-cells = <1>; | ||
242 | ranges = <0 0 0xff000000 0x01000000 | 97 | ranges = <0 0 0xff000000 0x01000000 |
243 | 1 0 0x50000000 0x00010000 | 98 | 1 0 0x50000000 0x00010000 |
244 | 2 0 0x50010000 0x00010000 | 99 | 2 0 0x50010000 0x00010000 |
@@ -280,5 +135,6 @@ | |||
280 | #size-cells = <1>; | 135 | #size-cells = <1>; |
281 | #address-cells = <1>; | 136 | #address-cells = <1>; |
282 | }; | 137 | }; |
138 | |||
283 | }; | 139 | }; |
284 | }; | 140 | }; |
diff --git a/arch/powerpc/boot/dts/mpc5200b.dtsi b/arch/powerpc/boot/dts/mpc5200b.dtsi new file mode 100644 index 000000000000..bc27548e895d --- /dev/null +++ b/arch/powerpc/boot/dts/mpc5200b.dtsi | |||
@@ -0,0 +1,275 @@ | |||
1 | /* | ||
2 | * base MPC5200b Device Tree Source | ||
3 | * | ||
4 | * Copyright (C) 2010 SecretLab | ||
5 | * Grant Likely <grant@secretlab.ca> | ||
6 | * John Bonesio <bones@secretlab.ca> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | /dts-v1/; | ||
15 | |||
16 | / { | ||
17 | model = "fsl,mpc5200b"; | ||
18 | compatible = "fsl,mpc5200b"; | ||
19 | #address-cells = <1>; | ||
20 | #size-cells = <1>; | ||
21 | interrupt-parent = <&mpc5200_pic>; | ||
22 | |||
23 | cpus { | ||
24 | #address-cells = <1>; | ||
25 | #size-cells = <0>; | ||
26 | |||
27 | powerpc: PowerPC,5200@0 { | ||
28 | device_type = "cpu"; | ||
29 | reg = <0>; | ||
30 | d-cache-line-size = <32>; | ||
31 | i-cache-line-size = <32>; | ||
32 | d-cache-size = <0x4000>; // L1, 16K | ||
33 | i-cache-size = <0x4000>; // L1, 16K | ||
34 | timebase-frequency = <0>; // from bootloader | ||
35 | bus-frequency = <0>; // from bootloader | ||
36 | clock-frequency = <0>; // from bootloader | ||
37 | }; | ||
38 | }; | ||
39 | |||
40 | memory: memory { | ||
41 | device_type = "memory"; | ||
42 | reg = <0x00000000 0x04000000>; // 64MB | ||
43 | }; | ||
44 | |||
45 | soc: soc5200@f0000000 { | ||
46 | #address-cells = <1>; | ||
47 | #size-cells = <1>; | ||
48 | compatible = "fsl,mpc5200b-immr"; | ||
49 | ranges = <0 0xf0000000 0x0000c000>; | ||
50 | reg = <0xf0000000 0x00000100>; | ||
51 | bus-frequency = <0>; // from bootloader | ||
52 | system-frequency = <0>; // from bootloader | ||
53 | |||
54 | cdm@200 { | ||
55 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
56 | reg = <0x200 0x38>; | ||
57 | }; | ||
58 | |||
59 | mpc5200_pic: interrupt-controller@500 { | ||
60 | // 5200 interrupts are encoded into two levels; | ||
61 | interrupt-controller; | ||
62 | #interrupt-cells = <3>; | ||
63 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
64 | reg = <0x500 0x80>; | ||
65 | }; | ||
66 | |||
67 | timer@600 { // General Purpose Timer | ||
68 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
69 | reg = <0x600 0x10>; | ||
70 | interrupts = <1 9 0>; | ||
71 | }; | ||
72 | |||
73 | timer@610 { // General Purpose Timer | ||
74 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
75 | reg = <0x610 0x10>; | ||
76 | interrupts = <1 10 0>; | ||
77 | }; | ||
78 | |||
79 | timer@620 { // General Purpose Timer | ||
80 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
81 | reg = <0x620 0x10>; | ||
82 | interrupts = <1 11 0>; | ||
83 | }; | ||
84 | |||
85 | timer@630 { // General Purpose Timer | ||
86 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
87 | reg = <0x630 0x10>; | ||
88 | interrupts = <1 12 0>; | ||
89 | }; | ||
90 | |||
91 | timer@640 { // General Purpose Timer | ||
92 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
93 | reg = <0x640 0x10>; | ||
94 | interrupts = <1 13 0>; | ||
95 | }; | ||
96 | |||
97 | timer@650 { // General Purpose Timer | ||
98 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
99 | reg = <0x650 0x10>; | ||
100 | interrupts = <1 14 0>; | ||
101 | }; | ||
102 | |||
103 | timer@660 { // General Purpose Timer | ||
104 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
105 | reg = <0x660 0x10>; | ||
106 | interrupts = <1 15 0>; | ||
107 | }; | ||
108 | |||
109 | timer@670 { // General Purpose Timer | ||
110 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
111 | reg = <0x670 0x10>; | ||
112 | interrupts = <1 16 0>; | ||
113 | }; | ||
114 | |||
115 | rtc@800 { // Real time clock | ||
116 | compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; | ||
117 | reg = <0x800 0x100>; | ||
118 | interrupts = <1 5 0 1 6 0>; | ||
119 | }; | ||
120 | |||
121 | can@900 { | ||
122 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
123 | interrupts = <2 17 0>; | ||
124 | reg = <0x900 0x80>; | ||
125 | }; | ||
126 | |||
127 | can@980 { | ||
128 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
129 | interrupts = <2 18 0>; | ||
130 | reg = <0x980 0x80>; | ||
131 | }; | ||
132 | |||
133 | gpio_simple: gpio@b00 { | ||
134 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | ||
135 | reg = <0xb00 0x40>; | ||
136 | interrupts = <1 7 0>; | ||
137 | gpio-controller; | ||
138 | #gpio-cells = <2>; | ||
139 | }; | ||
140 | |||
141 | gpio_wkup: gpio@c00 { | ||
142 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | ||
143 | reg = <0xc00 0x40>; | ||
144 | interrupts = <1 8 0 0 3 0>; | ||
145 | gpio-controller; | ||
146 | #gpio-cells = <2>; | ||
147 | }; | ||
148 | |||
149 | spi@f00 { | ||
150 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | ||
151 | reg = <0xf00 0x20>; | ||
152 | interrupts = <2 13 0 2 14 0>; | ||
153 | }; | ||
154 | |||
155 | usb: usb@1000 { | ||
156 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | ||
157 | reg = <0x1000 0xff>; | ||
158 | interrupts = <2 6 0>; | ||
159 | }; | ||
160 | |||
161 | dma-controller@1200 { | ||
162 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | ||
163 | reg = <0x1200 0x80>; | ||
164 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
165 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
166 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
167 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
168 | }; | ||
169 | |||
170 | xlb@1f00 { | ||
171 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | ||
172 | reg = <0x1f00 0x100>; | ||
173 | }; | ||
174 | |||
175 | psc1: psc@2000 { // PSC1 | ||
176 | compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc"; | ||
177 | reg = <0x2000 0x100>; | ||
178 | interrupts = <2 1 0>; | ||
179 | }; | ||
180 | |||
181 | psc2: psc@2200 { // PSC2 | ||
182 | compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc"; | ||
183 | reg = <0x2200 0x100>; | ||
184 | interrupts = <2 2 0>; | ||
185 | }; | ||
186 | |||
187 | psc3: psc@2400 { // PSC3 | ||
188 | compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc"; | ||
189 | reg = <0x2400 0x100>; | ||
190 | interrupts = <2 3 0>; | ||
191 | }; | ||
192 | |||
193 | psc4: psc@2600 { // PSC4 | ||
194 | compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc"; | ||
195 | reg = <0x2600 0x100>; | ||
196 | interrupts = <2 11 0>; | ||
197 | }; | ||
198 | |||
199 | psc5: psc@2800 { // PSC5 | ||
200 | compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc"; | ||
201 | reg = <0x2800 0x100>; | ||
202 | interrupts = <2 12 0>; | ||
203 | }; | ||
204 | |||
205 | psc6: psc@2c00 { // PSC6 | ||
206 | compatible = "fsl,mpc5200b-psc","fsl,mpc5200-psc"; | ||
207 | reg = <0x2c00 0x100>; | ||
208 | interrupts = <2 4 0>; | ||
209 | }; | ||
210 | |||
211 | eth0: ethernet@3000 { | ||
212 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
213 | reg = <0x3000 0x400>; | ||
214 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
215 | interrupts = <2 5 0>; | ||
216 | }; | ||
217 | |||
218 | mdio@3000 { | ||
219 | #address-cells = <1>; | ||
220 | #size-cells = <0>; | ||
221 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
222 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
223 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
224 | }; | ||
225 | |||
226 | ata@3a00 { | ||
227 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | ||
228 | reg = <0x3a00 0x100>; | ||
229 | interrupts = <2 7 0>; | ||
230 | }; | ||
231 | |||
232 | i2c@3d00 { | ||
233 | #address-cells = <1>; | ||
234 | #size-cells = <0>; | ||
235 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
236 | reg = <0x3d00 0x40>; | ||
237 | interrupts = <2 15 0>; | ||
238 | }; | ||
239 | |||
240 | i2c@3d40 { | ||
241 | #address-cells = <1>; | ||
242 | #size-cells = <0>; | ||
243 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
244 | reg = <0x3d40 0x40>; | ||
245 | interrupts = <2 16 0>; | ||
246 | }; | ||
247 | |||
248 | sram@8000 { | ||
249 | compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram"; | ||
250 | reg = <0x8000 0x4000>; | ||
251 | }; | ||
252 | }; | ||
253 | |||
254 | pci: pci@f0000d00 { | ||
255 | #interrupt-cells = <1>; | ||
256 | #size-cells = <2>; | ||
257 | #address-cells = <3>; | ||
258 | device_type = "pci"; | ||
259 | compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci"; | ||
260 | reg = <0xf0000d00 0x100>; | ||
261 | // interrupt-map-mask = need to add | ||
262 | // interrupt-map = need to add | ||
263 | clock-frequency = <0>; // From boot loader | ||
264 | interrupts = <2 8 0 2 9 0 2 10 0>; | ||
265 | bus-range = <0 0>; | ||
266 | // ranges = need to add | ||
267 | }; | ||
268 | |||
269 | localbus: localbus { | ||
270 | compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus"; | ||
271 | #address-cells = <2>; | ||
272 | #size-cells = <1>; | ||
273 | ranges = <0 0 0xfc000000 0x2000000>; | ||
274 | }; | ||
275 | }; | ||
diff --git a/arch/powerpc/boot/dts/mpc8308_p1m.dts b/arch/powerpc/boot/dts/mpc8308_p1m.dts new file mode 100644 index 000000000000..697b3f6b78bf --- /dev/null +++ b/arch/powerpc/boot/dts/mpc8308_p1m.dts | |||
@@ -0,0 +1,340 @@ | |||
1 | /* | ||
2 | * mpc8308_p1m Device Tree Source | ||
3 | * | ||
4 | * Copyright 2010 Ilya Yanok, Emcraft Systems, yanok@emcraft.com | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | /dts-v1/; | ||
13 | |||
14 | / { | ||
15 | compatible = "denx,mpc8308_p1m"; | ||
16 | #address-cells = <1>; | ||
17 | #size-cells = <1>; | ||
18 | |||
19 | aliases { | ||
20 | ethernet0 = &enet0; | ||
21 | ethernet1 = &enet1; | ||
22 | serial0 = &serial0; | ||
23 | serial1 = &serial1; | ||
24 | pci0 = &pci0; | ||
25 | }; | ||
26 | |||
27 | cpus { | ||
28 | #address-cells = <1>; | ||
29 | #size-cells = <0>; | ||
30 | |||
31 | PowerPC,8308@0 { | ||
32 | device_type = "cpu"; | ||
33 | reg = <0x0>; | ||
34 | d-cache-line-size = <32>; | ||
35 | i-cache-line-size = <32>; | ||
36 | d-cache-size = <16384>; | ||
37 | i-cache-size = <16384>; | ||
38 | timebase-frequency = <0>; // from bootloader | ||
39 | bus-frequency = <0>; // from bootloader | ||
40 | clock-frequency = <0>; // from bootloader | ||
41 | }; | ||
42 | }; | ||
43 | |||
44 | memory { | ||
45 | device_type = "memory"; | ||
46 | reg = <0x00000000 0x08000000>; // 128MB at 0 | ||
47 | }; | ||
48 | |||
49 | localbus@e0005000 { | ||
50 | #address-cells = <2>; | ||
51 | #size-cells = <1>; | ||
52 | compatible = "fsl,mpc8315-elbc", "fsl,elbc", "simple-bus"; | ||
53 | reg = <0xe0005000 0x1000>; | ||
54 | interrupts = <77 0x8>; | ||
55 | interrupt-parent = <&ipic>; | ||
56 | |||
57 | ranges = <0x0 0x0 0xfc000000 0x04000000 | ||
58 | 0x1 0x0 0xfbff0000 0x00008000 | ||
59 | 0x2 0x0 0xfbff8000 0x00008000>; | ||
60 | |||
61 | flash@0,0 { | ||
62 | #address-cells = <1>; | ||
63 | #size-cells = <1>; | ||
64 | compatible = "cfi-flash"; | ||
65 | reg = <0x0 0x0 0x4000000>; | ||
66 | bank-width = <2>; | ||
67 | device-width = <1>; | ||
68 | |||
69 | u-boot@0 { | ||
70 | reg = <0x0 0x60000>; | ||
71 | read-only; | ||
72 | }; | ||
73 | env@60000 { | ||
74 | reg = <0x60000 0x20000>; | ||
75 | }; | ||
76 | env1@80000 { | ||
77 | reg = <0x80000 0x20000>; | ||
78 | }; | ||
79 | kernel@a0000 { | ||
80 | reg = <0xa0000 0x200000>; | ||
81 | }; | ||
82 | dtb@2a0000 { | ||
83 | reg = <0x2a0000 0x20000>; | ||
84 | }; | ||
85 | ramdisk@2c0000 { | ||
86 | reg = <0x2c0000 0x640000>; | ||
87 | }; | ||
88 | user@700000 { | ||
89 | reg = <0x700000 0x3900000>; | ||
90 | }; | ||
91 | }; | ||
92 | |||
93 | can@1,0 { | ||
94 | compatible = "nxp,sja1000"; | ||
95 | reg = <0x1 0x0 0x80>; | ||
96 | interrupts = <18 0x8>; | ||
97 | interrups-parent = <&ipic>; | ||
98 | }; | ||
99 | |||
100 | cpld@2,0 { | ||
101 | compatible = "denx,mpc8308_p1m-cpld"; | ||
102 | reg = <0x2 0x0 0x8>; | ||
103 | interrupts = <48 0x8>; | ||
104 | interrups-parent = <&ipic>; | ||
105 | }; | ||
106 | }; | ||
107 | |||
108 | immr@e0000000 { | ||
109 | #address-cells = <1>; | ||
110 | #size-cells = <1>; | ||
111 | device_type = "soc"; | ||
112 | compatible = "fsl,mpc8308-immr", "simple-bus"; | ||
113 | ranges = <0 0xe0000000 0x00100000>; | ||
114 | reg = <0xe0000000 0x00000200>; | ||
115 | bus-frequency = <0>; | ||
116 | |||
117 | i2c@3000 { | ||
118 | #address-cells = <1>; | ||
119 | #size-cells = <0>; | ||
120 | compatible = "fsl-i2c"; | ||
121 | reg = <0x3000 0x100>; | ||
122 | interrupts = <14 0x8>; | ||
123 | interrupt-parent = <&ipic>; | ||
124 | dfsrr; | ||
125 | fram@50 { | ||
126 | compatible = "ramtron,24c64"; | ||
127 | reg = <0x50>; | ||
128 | }; | ||
129 | }; | ||
130 | |||
131 | i2c@3100 { | ||
132 | #address-cells = <1>; | ||
133 | #size-cells = <0>; | ||
134 | compatible = "fsl-i2c"; | ||
135 | reg = <0x3100 0x100>; | ||
136 | interrupts = <15 0x8>; | ||
137 | interrupt-parent = <&ipic>; | ||
138 | dfsrr; | ||
139 | pwm@28 { | ||
140 | compatible = "maxim,ds1050"; | ||
141 | reg = <0x28>; | ||
142 | }; | ||
143 | sensor@48 { | ||
144 | compatible = "maxim,max6625"; | ||
145 | reg = <0x48>; | ||
146 | }; | ||
147 | sensor@49 { | ||
148 | compatible = "maxim,max6625"; | ||
149 | reg = <0x49>; | ||
150 | }; | ||
151 | sensor@4b { | ||
152 | compatible = "maxim,max6625"; | ||
153 | reg = <0x4b>; | ||
154 | }; | ||
155 | }; | ||
156 | |||
157 | usb@23000 { | ||
158 | compatible = "fsl-usb2-dr"; | ||
159 | reg = <0x23000 0x1000>; | ||
160 | #address-cells = <1>; | ||
161 | #size-cells = <0>; | ||
162 | interrupt-parent = <&ipic>; | ||
163 | interrupts = <38 0x8>; | ||
164 | dr_mode = "peripheral"; | ||
165 | phy_type = "ulpi"; | ||
166 | }; | ||
167 | |||
168 | enet0: ethernet@24000 { | ||
169 | #address-cells = <1>; | ||
170 | #size-cells = <1>; | ||
171 | ranges = <0x0 0x24000 0x1000>; | ||
172 | |||
173 | cell-index = <0>; | ||
174 | device_type = "network"; | ||
175 | model = "eTSEC"; | ||
176 | compatible = "gianfar"; | ||
177 | reg = <0x24000 0x1000>; | ||
178 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
179 | interrupts = <32 0x8 33 0x8 34 0x8>; | ||
180 | interrupt-parent = <&ipic>; | ||
181 | phy-handle = < &phy1 >; | ||
182 | |||
183 | mdio@520 { | ||
184 | #address-cells = <1>; | ||
185 | #size-cells = <0>; | ||
186 | compatible = "fsl,gianfar-mdio"; | ||
187 | reg = <0x520 0x20>; | ||
188 | phy1: ethernet-phy@1 { | ||
189 | interrupt-parent = <&ipic>; | ||
190 | interrupts = <17 0x8>; | ||
191 | reg = <0x1>; | ||
192 | device_type = "ethernet-phy"; | ||
193 | }; | ||
194 | phy2: ethernet-phy@2 { | ||
195 | interrupt-parent = <&ipic>; | ||
196 | interrupts = <19 0x8>; | ||
197 | reg = <0x2>; | ||
198 | device_type = "ethernet-phy"; | ||
199 | }; | ||
200 | tbi0: tbi-phy@11 { | ||
201 | reg = <0x11>; | ||
202 | device_type = "tbi-phy"; | ||
203 | }; | ||
204 | }; | ||
205 | }; | ||
206 | |||
207 | enet1: ethernet@25000 { | ||
208 | #address-cells = <1>; | ||
209 | #size-cells = <1>; | ||
210 | cell-index = <1>; | ||
211 | device_type = "network"; | ||
212 | model = "eTSEC"; | ||
213 | compatible = "gianfar"; | ||
214 | reg = <0x25000 0x1000>; | ||
215 | ranges = <0x0 0x25000 0x1000>; | ||
216 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
217 | interrupts = <35 0x8 36 0x8 37 0x8>; | ||
218 | interrupt-parent = <&ipic>; | ||
219 | phy-handle = < &phy2 >; | ||
220 | |||
221 | mdio@520 { | ||
222 | #address-cells = <1>; | ||
223 | #size-cells = <0>; | ||
224 | compatible = "fsl,gianfar-tbi"; | ||
225 | reg = <0x520 0x20>; | ||
226 | tbi1: tbi-phy@11 { | ||
227 | reg = <0x11>; | ||
228 | device_type = "tbi-phy"; | ||
229 | }; | ||
230 | }; | ||
231 | }; | ||
232 | |||
233 | serial0: serial@4500 { | ||
234 | cell-index = <0>; | ||
235 | device_type = "serial"; | ||
236 | compatible = "ns16550"; | ||
237 | reg = <0x4500 0x100>; | ||
238 | clock-frequency = <133333333>; | ||
239 | interrupts = <9 0x8>; | ||
240 | interrupt-parent = <&ipic>; | ||
241 | }; | ||
242 | |||
243 | serial1: serial@4600 { | ||
244 | cell-index = <1>; | ||
245 | device_type = "serial"; | ||
246 | compatible = "ns16550"; | ||
247 | reg = <0x4600 0x100>; | ||
248 | clock-frequency = <133333333>; | ||
249 | interrupts = <10 0x8>; | ||
250 | interrupt-parent = <&ipic>; | ||
251 | }; | ||
252 | |||
253 | gpio@c00 { | ||
254 | #gpio-cells = <2>; | ||
255 | compatible = "fsl,mpc8308-gpio", "fsl,mpc8349-gpio"; | ||
256 | reg = <0xc00 0x18>; | ||
257 | interrupts = <74 0x8>; | ||
258 | interrupt-parent = <&ipic>; | ||
259 | gpio-controller; | ||
260 | }; | ||
261 | |||
262 | timer@500 { | ||
263 | compatible = "fsl,mpc8308-gtm", "fsl,gtm"; | ||
264 | reg = <0x500 0x100>; | ||
265 | interrupts = <90 8 78 8 84 8 72 8>; | ||
266 | interrupt-parent = <&ipic>; | ||
267 | clock-frequency = <133333333>; | ||
268 | }; | ||
269 | |||
270 | /* IPIC | ||
271 | * interrupts cell = <intr #, sense> | ||
272 | * sense values match linux IORESOURCE_IRQ_* defines: | ||
273 | * sense == 8: Level, low assertion | ||
274 | * sense == 2: Edge, high-to-low change | ||
275 | */ | ||
276 | ipic: interrupt-controller@700 { | ||
277 | compatible = "fsl,ipic"; | ||
278 | interrupt-controller; | ||
279 | #address-cells = <0>; | ||
280 | #interrupt-cells = <2>; | ||
281 | reg = <0x700 0x100>; | ||
282 | device_type = "ipic"; | ||
283 | }; | ||
284 | |||
285 | ipic-msi@7c0 { | ||
286 | compatible = "fsl,ipic-msi"; | ||
287 | reg = <0x7c0 0x40>; | ||
288 | msi-available-ranges = <0x0 0x100>; | ||
289 | interrupts = < 0x43 0x8 | ||
290 | 0x4 0x8 | ||
291 | 0x51 0x8 | ||
292 | 0x52 0x8 | ||
293 | 0x56 0x8 | ||
294 | 0x57 0x8 | ||
295 | 0x58 0x8 | ||
296 | 0x59 0x8 >; | ||
297 | interrupt-parent = < &ipic >; | ||
298 | }; | ||
299 | |||
300 | dma@2c000 { | ||
301 | compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma"; | ||
302 | reg = <0x2c000 0x1800>; | ||
303 | interrupts = <3 0x8 | ||
304 | 94 0x8>; | ||
305 | interrupt-parent = < &ipic >; | ||
306 | }; | ||
307 | |||
308 | }; | ||
309 | |||
310 | pci0: pcie@e0009000 { | ||
311 | #address-cells = <3>; | ||
312 | #size-cells = <2>; | ||
313 | #interrupt-cells = <1>; | ||
314 | device_type = "pci"; | ||
315 | compatible = "fsl,mpc8308-pcie", "fsl,mpc8314-pcie"; | ||
316 | reg = <0xe0009000 0x00001000 | ||
317 | 0xb0000000 0x01000000>; | ||
318 | ranges = <0x02000000 0 0xa0000000 0xa0000000 0 0x10000000 | ||
319 | 0x01000000 0 0x00000000 0xb1000000 0 0x00800000>; | ||
320 | bus-range = <0 0>; | ||
321 | interrupt-map-mask = <0 0 0 0>; | ||
322 | interrupt-map = <0 0 0 0 &ipic 1 8>; | ||
323 | interrupts = <0x1 0x8>; | ||
324 | interrupt-parent = <&ipic>; | ||
325 | clock-frequency = <0>; | ||
326 | |||
327 | pcie@0 { | ||
328 | #address-cells = <3>; | ||
329 | #size-cells = <2>; | ||
330 | device_type = "pci"; | ||
331 | reg = <0 0 0 0 0>; | ||
332 | ranges = <0x02000000 0 0xa0000000 | ||
333 | 0x02000000 0 0xa0000000 | ||
334 | 0 0x10000000 | ||
335 | 0x01000000 0 0x00000000 | ||
336 | 0x01000000 0 0x00000000 | ||
337 | 0 0x00800000>; | ||
338 | }; | ||
339 | }; | ||
340 | }; | ||
diff --git a/arch/powerpc/boot/dts/mpc8308rdb.dts b/arch/powerpc/boot/dts/mpc8308rdb.dts index a97eb2db5a18..a0bd1881081e 100644 --- a/arch/powerpc/boot/dts/mpc8308rdb.dts +++ b/arch/powerpc/boot/dts/mpc8308rdb.dts | |||
@@ -109,7 +109,7 @@ | |||
109 | #address-cells = <1>; | 109 | #address-cells = <1>; |
110 | #size-cells = <1>; | 110 | #size-cells = <1>; |
111 | device_type = "soc"; | 111 | device_type = "soc"; |
112 | compatible = "fsl,mpc8315-immr", "simple-bus"; | 112 | compatible = "fsl,mpc8308-immr", "simple-bus"; |
113 | ranges = <0 0xe0000000 0x00100000>; | 113 | ranges = <0 0xe0000000 0x00100000>; |
114 | reg = <0xe0000000 0x00000200>; | 114 | reg = <0xe0000000 0x00000200>; |
115 | bus-frequency = <0>; | 115 | bus-frequency = <0>; |
@@ -265,6 +265,14 @@ | |||
265 | interrupt-parent = < &ipic >; | 265 | interrupt-parent = < &ipic >; |
266 | }; | 266 | }; |
267 | 267 | ||
268 | dma@2c000 { | ||
269 | compatible = "fsl,mpc8308-dma", "fsl,mpc5121-dma"; | ||
270 | reg = <0x2c000 0x1800>; | ||
271 | interrupts = <3 0x8 | ||
272 | 94 0x8>; | ||
273 | interrupt-parent = < &ipic >; | ||
274 | }; | ||
275 | |||
268 | }; | 276 | }; |
269 | 277 | ||
270 | pci0: pcie@e0009000 { | 278 | pci0: pcie@e0009000 { |
diff --git a/arch/powerpc/boot/dts/mpc8313erdb.dts b/arch/powerpc/boot/dts/mpc8313erdb.dts index 761faa7b6964..ac1eb320c7b4 100644 --- a/arch/powerpc/boot/dts/mpc8313erdb.dts +++ b/arch/powerpc/boot/dts/mpc8313erdb.dts | |||
@@ -176,6 +176,19 @@ | |||
176 | sleep = <&pmc 0x00300000>; | 176 | sleep = <&pmc 0x00300000>; |
177 | }; | 177 | }; |
178 | 178 | ||
179 | ptp_clock@24E00 { | ||
180 | compatible = "fsl,etsec-ptp"; | ||
181 | reg = <0x24E00 0xB0>; | ||
182 | interrupts = <12 0x8 13 0x8>; | ||
183 | interrupt-parent = < &ipic >; | ||
184 | fsl,tclk-period = <10>; | ||
185 | fsl,tmr-prsc = <100>; | ||
186 | fsl,tmr-add = <0x999999A4>; | ||
187 | fsl,tmr-fiper1 = <0x3B9AC9F6>; | ||
188 | fsl,tmr-fiper2 = <0x00018696>; | ||
189 | fsl,max-adj = <659999998>; | ||
190 | }; | ||
191 | |||
179 | enet0: ethernet@24000 { | 192 | enet0: ethernet@24000 { |
180 | #address-cells = <1>; | 193 | #address-cells = <1>; |
181 | #size-cells = <1>; | 194 | #size-cells = <1>; |
diff --git a/arch/powerpc/boot/dts/mpc8536ds.dts b/arch/powerpc/boot/dts/mpc8536ds.dts index 815cebb2e3e5..a75c10eed269 100644 --- a/arch/powerpc/boot/dts/mpc8536ds.dts +++ b/arch/powerpc/boot/dts/mpc8536ds.dts | |||
@@ -108,6 +108,58 @@ | |||
108 | }; | 108 | }; |
109 | }; | 109 | }; |
110 | 110 | ||
111 | spi@7000 { | ||
112 | #address-cells = <1>; | ||
113 | #size-cells = <0>; | ||
114 | compatible = "fsl,mpc8536-espi"; | ||
115 | reg = <0x7000 0x1000>; | ||
116 | interrupts = <59 0x2>; | ||
117 | interrupt-parent = <&mpic>; | ||
118 | fsl,espi-num-chipselects = <4>; | ||
119 | |||
120 | flash@0 { | ||
121 | #address-cells = <1>; | ||
122 | #size-cells = <1>; | ||
123 | compatible = "spansion,s25sl12801"; | ||
124 | reg = <0>; | ||
125 | spi-max-frequency = <40000000>; | ||
126 | partition@u-boot { | ||
127 | label = "u-boot"; | ||
128 | reg = <0x00000000 0x00100000>; | ||
129 | read-only; | ||
130 | }; | ||
131 | partition@kernel { | ||
132 | label = "kernel"; | ||
133 | reg = <0x00100000 0x00500000>; | ||
134 | read-only; | ||
135 | }; | ||
136 | partition@dtb { | ||
137 | label = "dtb"; | ||
138 | reg = <0x00600000 0x00100000>; | ||
139 | read-only; | ||
140 | }; | ||
141 | partition@fs { | ||
142 | label = "file system"; | ||
143 | reg = <0x00700000 0x00900000>; | ||
144 | }; | ||
145 | }; | ||
146 | flash@1 { | ||
147 | compatible = "spansion,s25sl12801"; | ||
148 | reg = <1>; | ||
149 | spi-max-frequency = <40000000>; | ||
150 | }; | ||
151 | flash@2 { | ||
152 | compatible = "spansion,s25sl12801"; | ||
153 | reg = <2>; | ||
154 | spi-max-frequency = <40000000>; | ||
155 | }; | ||
156 | flash@3 { | ||
157 | compatible = "spansion,s25sl12801"; | ||
158 | reg = <3>; | ||
159 | spi-max-frequency = <40000000>; | ||
160 | }; | ||
161 | }; | ||
162 | |||
111 | dma@21300 { | 163 | dma@21300 { |
112 | #address-cells = <1>; | 164 | #address-cells = <1>; |
113 | #size-cells = <1>; | 165 | #size-cells = <1>; |
diff --git a/arch/powerpc/boot/dts/mpc8572ds.dts b/arch/powerpc/boot/dts/mpc8572ds.dts index cafc1285c140..f6c04d25e916 100644 --- a/arch/powerpc/boot/dts/mpc8572ds.dts +++ b/arch/powerpc/boot/dts/mpc8572ds.dts | |||
@@ -324,6 +324,19 @@ | |||
324 | }; | 324 | }; |
325 | }; | 325 | }; |
326 | 326 | ||
327 | ptp_clock@24E00 { | ||
328 | compatible = "fsl,etsec-ptp"; | ||
329 | reg = <0x24E00 0xB0>; | ||
330 | interrupts = <68 2 69 2 70 2 71 2>; | ||
331 | interrupt-parent = < &mpic >; | ||
332 | fsl,tclk-period = <5>; | ||
333 | fsl,tmr-prsc = <200>; | ||
334 | fsl,tmr-add = <0xAAAAAAAB>; | ||
335 | fsl,tmr-fiper1 = <0x3B9AC9FB>; | ||
336 | fsl,tmr-fiper2 = <0x3B9AC9FB>; | ||
337 | fsl,max-adj = <499999999>; | ||
338 | }; | ||
339 | |||
327 | enet0: ethernet@24000 { | 340 | enet0: ethernet@24000 { |
328 | #address-cells = <1>; | 341 | #address-cells = <1>; |
329 | #size-cells = <1>; | 342 | #size-cells = <1>; |
diff --git a/arch/powerpc/boot/dts/mpc8610_hpcd.dts b/arch/powerpc/boot/dts/mpc8610_hpcd.dts index 9535ce68caae..83c3218cb4da 100644 --- a/arch/powerpc/boot/dts/mpc8610_hpcd.dts +++ b/arch/powerpc/boot/dts/mpc8610_hpcd.dts | |||
@@ -286,6 +286,7 @@ | |||
286 | 286 | ||
287 | ssi@16100 { | 287 | ssi@16100 { |
288 | compatible = "fsl,mpc8610-ssi"; | 288 | compatible = "fsl,mpc8610-ssi"; |
289 | status = "disabled"; | ||
289 | cell-index = <1>; | 290 | cell-index = <1>; |
290 | reg = <0x16100 0x100>; | 291 | reg = <0x16100 0x100>; |
291 | interrupt-parent = <&mpic>; | 292 | interrupt-parent = <&mpic>; |
diff --git a/arch/powerpc/boot/dts/mucmc52.dts b/arch/powerpc/boot/dts/mucmc52.dts index b72a7581d798..21d34720fcc9 100644 --- a/arch/powerpc/boot/dts/mucmc52.dts +++ b/arch/powerpc/boot/dts/mucmc52.dts | |||
@@ -11,172 +11,109 @@ | |||
11 | * option) any later version. | 11 | * option) any later version. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | /dts-v1/; | 14 | /include/ "mpc5200b.dtsi" |
15 | 15 | ||
16 | / { | 16 | / { |
17 | model = "manroland,mucmc52"; | 17 | model = "manroland,mucmc52"; |
18 | compatible = "manroland,mucmc52"; | 18 | compatible = "manroland,mucmc52"; |
19 | #address-cells = <1>; | ||
20 | #size-cells = <1>; | ||
21 | interrupt-parent = <&mpc5200_pic>; | ||
22 | |||
23 | cpus { | ||
24 | #address-cells = <1>; | ||
25 | #size-cells = <0>; | ||
26 | |||
27 | PowerPC,5200@0 { | ||
28 | device_type = "cpu"; | ||
29 | reg = <0>; | ||
30 | d-cache-line-size = <32>; | ||
31 | i-cache-line-size = <32>; | ||
32 | d-cache-size = <0x4000>; // L1, 16K | ||
33 | i-cache-size = <0x4000>; // L1, 16K | ||
34 | timebase-frequency = <0>; // from bootloader | ||
35 | bus-frequency = <0>; // from bootloader | ||
36 | clock-frequency = <0>; // from bootloader | ||
37 | }; | ||
38 | }; | ||
39 | |||
40 | memory { | ||
41 | device_type = "memory"; | ||
42 | reg = <0x00000000 0x04000000>; // 64MB | ||
43 | }; | ||
44 | 19 | ||
45 | soc5200@f0000000 { | 20 | soc5200@f0000000 { |
46 | #address-cells = <1>; | ||
47 | #size-cells = <1>; | ||
48 | compatible = "fsl,mpc5200b-immr"; | ||
49 | ranges = <0 0xf0000000 0x0000c000>; | ||
50 | reg = <0xf0000000 0x00000100>; | ||
51 | bus-frequency = <0>; // from bootloader | ||
52 | system-frequency = <0>; // from bootloader | ||
53 | |||
54 | cdm@200 { | ||
55 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
56 | reg = <0x200 0x38>; | ||
57 | }; | ||
58 | |||
59 | mpc5200_pic: interrupt-controller@500 { | ||
60 | // 5200 interrupts are encoded into two levels; | ||
61 | interrupt-controller; | ||
62 | #interrupt-cells = <3>; | ||
63 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
64 | reg = <0x500 0x80>; | ||
65 | }; | ||
66 | |||
67 | gpt0: timer@600 { // GPT 0 in GPIO mode | 21 | gpt0: timer@600 { // GPT 0 in GPIO mode |
68 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
69 | reg = <0x600 0x10>; | ||
70 | interrupts = <1 9 0>; | ||
71 | gpio-controller; | 22 | gpio-controller; |
72 | #gpio-cells = <2>; | 23 | #gpio-cells = <2>; |
73 | }; | 24 | }; |
74 | 25 | ||
75 | gpt1: timer@610 { // General Purpose Timer in GPIO mode | 26 | gpt1: timer@610 { // General Purpose Timer in GPIO mode |
76 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
77 | reg = <0x610 0x10>; | ||
78 | interrupts = <1 10 0>; | ||
79 | gpio-controller; | 27 | gpio-controller; |
80 | #gpio-cells = <2>; | 28 | #gpio-cells = <2>; |
81 | }; | 29 | }; |
82 | 30 | ||
83 | gpt2: timer@620 { // General Purpose Timer in GPIO mode | 31 | gpt2: timer@620 { // General Purpose Timer in GPIO mode |
84 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
85 | reg = <0x620 0x10>; | ||
86 | interrupts = <1 11 0>; | ||
87 | gpio-controller; | 32 | gpio-controller; |
88 | #gpio-cells = <2>; | 33 | #gpio-cells = <2>; |
89 | }; | 34 | }; |
90 | 35 | ||
91 | gpt3: timer@630 { // General Purpose Timer in GPIO mode | 36 | gpt3: timer@630 { // General Purpose Timer in GPIO mode |
92 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
93 | reg = <0x630 0x10>; | ||
94 | interrupts = <1 12 0>; | ||
95 | gpio-controller; | 37 | gpio-controller; |
96 | #gpio-cells = <2>; | 38 | #gpio-cells = <2>; |
97 | }; | 39 | }; |
98 | 40 | ||
99 | gpio_simple: gpio@b00 { | 41 | timer@640 { |
100 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | 42 | status = "disabled"; |
101 | reg = <0xb00 0x40>; | ||
102 | interrupts = <1 7 0>; | ||
103 | gpio-controller; | ||
104 | #gpio-cells = <2>; | ||
105 | }; | 43 | }; |
106 | 44 | ||
107 | gpio_wkup: gpio@c00 { | 45 | timer@650 { |
108 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | 46 | status = "disabled"; |
109 | reg = <0xc00 0x40>; | 47 | }; |
110 | interrupts = <1 8 0 0 3 0>; | 48 | |
111 | gpio-controller; | 49 | timer@660 { |
112 | #gpio-cells = <2>; | 50 | status = "disabled"; |
51 | }; | ||
52 | |||
53 | timer@670 { | ||
54 | status = "disabled"; | ||
113 | }; | 55 | }; |
114 | 56 | ||
115 | dma-controller@1200 { | 57 | rtc@800 { |
116 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | 58 | status = "disabled"; |
117 | reg = <0x1200 0x80>; | ||
118 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
119 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
120 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
121 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
122 | }; | 59 | }; |
123 | 60 | ||
124 | xlb@1f00 { | 61 | can@900 { |
125 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | 62 | status = "disabled"; |
126 | reg = <0x1f00 0x100>; | ||
127 | }; | 63 | }; |
128 | 64 | ||
129 | serial@2000 { /* PSC1 in UART mode */ | 65 | can@980 { |
66 | status = "disabled"; | ||
67 | }; | ||
68 | |||
69 | spi@f00 { | ||
70 | status = "disabled"; | ||
71 | }; | ||
72 | |||
73 | usb@1000 { | ||
74 | status = "disabled"; | ||
75 | }; | ||
76 | |||
77 | psc@2000 { // PSC1 | ||
130 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 78 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
131 | reg = <0x2000 0x100>; | ||
132 | interrupts = <2 1 0>; | ||
133 | }; | 79 | }; |
134 | 80 | ||
135 | serial@2200 { /* PSC2 in UART mode */ | 81 | psc@2200 { // PSC2 |
136 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 82 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
137 | reg = <0x2200 0x100>; | ||
138 | interrupts = <2 2 0>; | ||
139 | }; | 83 | }; |
140 | 84 | ||
141 | serial@2c00 { /* PSC6 in UART mode */ | 85 | psc@2400 { // PSC3 |
86 | status = "disabled"; | ||
87 | }; | ||
88 | |||
89 | psc@2600 { // PSC4 | ||
90 | status = "disabled"; | ||
91 | }; | ||
92 | |||
93 | psc@2800 { // PSC5 | ||
94 | status = "disabled"; | ||
95 | }; | ||
96 | |||
97 | psc@2c00 { // PSC6 | ||
142 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 98 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
143 | reg = <0x2c00 0x100>; | ||
144 | interrupts = <2 4 0>; | ||
145 | }; | 99 | }; |
146 | 100 | ||
147 | ethernet@3000 { | 101 | ethernet@3000 { |
148 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
149 | reg = <0x3000 0x400>; | ||
150 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
151 | interrupts = <2 5 0>; | ||
152 | phy-handle = <&phy0>; | 102 | phy-handle = <&phy0>; |
153 | }; | 103 | }; |
154 | 104 | ||
155 | mdio@3000 { | 105 | mdio@3000 { |
156 | #address-cells = <1>; | ||
157 | #size-cells = <0>; | ||
158 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
159 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
160 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
161 | |||
162 | phy0: ethernet-phy@0 { | 106 | phy0: ethernet-phy@0 { |
163 | compatible = "intel,lxt971"; | 107 | compatible = "intel,lxt971"; |
164 | reg = <0>; | 108 | reg = <0>; |
165 | }; | 109 | }; |
166 | }; | 110 | }; |
167 | 111 | ||
168 | ata@3a00 { | 112 | i2c@3d00 { |
169 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | 113 | status = "disabled"; |
170 | reg = <0x3a00 0x100>; | ||
171 | interrupts = <2 7 0>; | ||
172 | }; | 114 | }; |
173 | 115 | ||
174 | i2c@3d40 { | 116 | i2c@3d40 { |
175 | #address-cells = <1>; | ||
176 | #size-cells = <0>; | ||
177 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
178 | reg = <0x3d40 0x40>; | ||
179 | interrupts = <2 16 0>; | ||
180 | hwmon@2c { | 117 | hwmon@2c { |
181 | compatible = "ad,adm9240"; | 118 | compatible = "ad,adm9240"; |
182 | reg = <0x2c>; | 119 | reg = <0x2c>; |
@@ -186,20 +123,9 @@ | |||
186 | reg = <0x51>; | 123 | reg = <0x51>; |
187 | }; | 124 | }; |
188 | }; | 125 | }; |
189 | |||
190 | sram@8000 { | ||
191 | compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram"; | ||
192 | reg = <0x8000 0x4000>; | ||
193 | }; | ||
194 | }; | 126 | }; |
195 | 127 | ||
196 | pci@f0000d00 { | 128 | pci@f0000d00 { |
197 | #interrupt-cells = <1>; | ||
198 | #size-cells = <2>; | ||
199 | #address-cells = <3>; | ||
200 | device_type = "pci"; | ||
201 | compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci"; | ||
202 | reg = <0xf0000d00 0x100>; | ||
203 | interrupt-map-mask = <0xf800 0 0 7>; | 129 | interrupt-map-mask = <0xf800 0 0 7>; |
204 | interrupt-map = < | 130 | interrupt-map = < |
205 | /* IDSEL 0x10 */ | 131 | /* IDSEL 0x10 */ |
@@ -208,20 +134,12 @@ | |||
208 | 0x8000 0 0 3 &mpc5200_pic 0 2 3 | 134 | 0x8000 0 0 3 &mpc5200_pic 0 2 3 |
209 | 0x8000 0 0 4 &mpc5200_pic 0 1 3 | 135 | 0x8000 0 0 4 &mpc5200_pic 0 1 3 |
210 | >; | 136 | >; |
211 | clock-frequency = <0>; // From boot loader | ||
212 | interrupts = <2 8 0 2 9 0 2 10 0>; | ||
213 | bus-range = <0 0>; | ||
214 | ranges = <0x42000000 0 0x60000000 0x60000000 0 0x10000000 | 137 | ranges = <0x42000000 0 0x60000000 0x60000000 0 0x10000000 |
215 | 0x02000000 0 0x90000000 0x90000000 0 0x10000000 | 138 | 0x02000000 0 0x90000000 0x90000000 0 0x10000000 |
216 | 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>; | 139 | 0x01000000 0 0x00000000 0xa0000000 0 0x01000000>; |
217 | }; | 140 | }; |
218 | 141 | ||
219 | localbus { | 142 | localbus { |
220 | compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus"; | ||
221 | |||
222 | #address-cells = <2>; | ||
223 | #size-cells = <1>; | ||
224 | |||
225 | ranges = <0 0 0xff800000 0x00800000 | 143 | ranges = <0 0 0xff800000 0x00800000 |
226 | 1 0 0x80000000 0x00800000 | 144 | 1 0 0x80000000 0x00800000 |
227 | 3 0 0x80000000 0x00800000>; | 145 | 3 0 0x80000000 0x00800000>; |
diff --git a/arch/powerpc/boot/dts/p1020rdb.dts b/arch/powerpc/boot/dts/p1020rdb.dts index 22f64b62d7f6..d6a8ae458137 100644 --- a/arch/powerpc/boot/dts/p1020rdb.dts +++ b/arch/powerpc/boot/dts/p1020rdb.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * P1020 RDB Device Tree Source | 2 | * P1020 RDB Device Tree Source |
3 | * | 3 | * |
4 | * Copyright 2009 Freescale Semiconductor Inc. | 4 | * Copyright 2009-2011 Freescale Semiconductor Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
@@ -9,12 +9,11 @@ | |||
9 | * option) any later version. | 9 | * option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | /dts-v1/; | 12 | /include/ "p1020si.dtsi" |
13 | |||
13 | / { | 14 | / { |
14 | model = "fsl,P1020"; | 15 | model = "fsl,P1020RDB"; |
15 | compatible = "fsl,P1020RDB"; | 16 | compatible = "fsl,P1020RDB"; |
16 | #address-cells = <2>; | ||
17 | #size-cells = <2>; | ||
18 | 17 | ||
19 | aliases { | 18 | aliases { |
20 | serial0 = &serial0; | 19 | serial0 = &serial0; |
@@ -26,34 +25,11 @@ | |||
26 | pci1 = &pci1; | 25 | pci1 = &pci1; |
27 | }; | 26 | }; |
28 | 27 | ||
29 | cpus { | ||
30 | #address-cells = <1>; | ||
31 | #size-cells = <0>; | ||
32 | |||
33 | PowerPC,P1020@0 { | ||
34 | device_type = "cpu"; | ||
35 | reg = <0x0>; | ||
36 | next-level-cache = <&L2>; | ||
37 | }; | ||
38 | |||
39 | PowerPC,P1020@1 { | ||
40 | device_type = "cpu"; | ||
41 | reg = <0x1>; | ||
42 | next-level-cache = <&L2>; | ||
43 | }; | ||
44 | }; | ||
45 | |||
46 | memory { | 28 | memory { |
47 | device_type = "memory"; | 29 | device_type = "memory"; |
48 | }; | 30 | }; |
49 | 31 | ||
50 | localbus@ffe05000 { | 32 | localbus@ffe05000 { |
51 | #address-cells = <2>; | ||
52 | #size-cells = <1>; | ||
53 | compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; | ||
54 | reg = <0 0xffe05000 0 0x1000>; | ||
55 | interrupts = <19 2>; | ||
56 | interrupt-parent = <&mpic>; | ||
57 | 33 | ||
58 | /* NOR, NAND Flashes and Vitesse 5 port L2 switch */ | 34 | /* NOR, NAND Flashes and Vitesse 5 port L2 switch */ |
59 | ranges = <0x0 0x0 0x0 0xef000000 0x01000000 | 35 | ranges = <0x0 0x0 0x0 0xef000000 0x01000000 |
@@ -165,88 +141,14 @@ | |||
165 | }; | 141 | }; |
166 | 142 | ||
167 | soc@ffe00000 { | 143 | soc@ffe00000 { |
168 | #address-cells = <1>; | ||
169 | #size-cells = <1>; | ||
170 | device_type = "soc"; | ||
171 | compatible = "fsl,p1020-immr", "simple-bus"; | ||
172 | ranges = <0x0 0x0 0xffe00000 0x100000>; | ||
173 | bus-frequency = <0>; // Filled out by uboot. | ||
174 | |||
175 | ecm-law@0 { | ||
176 | compatible = "fsl,ecm-law"; | ||
177 | reg = <0x0 0x1000>; | ||
178 | fsl,num-laws = <12>; | ||
179 | }; | ||
180 | |||
181 | ecm@1000 { | ||
182 | compatible = "fsl,p1020-ecm", "fsl,ecm"; | ||
183 | reg = <0x1000 0x1000>; | ||
184 | interrupts = <16 2>; | ||
185 | interrupt-parent = <&mpic>; | ||
186 | }; | ||
187 | |||
188 | memory-controller@2000 { | ||
189 | compatible = "fsl,p1020-memory-controller"; | ||
190 | reg = <0x2000 0x1000>; | ||
191 | interrupt-parent = <&mpic>; | ||
192 | interrupts = <16 2>; | ||
193 | }; | ||
194 | |||
195 | i2c@3000 { | 144 | i2c@3000 { |
196 | #address-cells = <1>; | ||
197 | #size-cells = <0>; | ||
198 | cell-index = <0>; | ||
199 | compatible = "fsl-i2c"; | ||
200 | reg = <0x3000 0x100>; | ||
201 | interrupts = <43 2>; | ||
202 | interrupt-parent = <&mpic>; | ||
203 | dfsrr; | ||
204 | rtc@68 { | 145 | rtc@68 { |
205 | compatible = "dallas,ds1339"; | 146 | compatible = "dallas,ds1339"; |
206 | reg = <0x68>; | 147 | reg = <0x68>; |
207 | }; | 148 | }; |
208 | }; | 149 | }; |
209 | 150 | ||
210 | i2c@3100 { | ||
211 | #address-cells = <1>; | ||
212 | #size-cells = <0>; | ||
213 | cell-index = <1>; | ||
214 | compatible = "fsl-i2c"; | ||
215 | reg = <0x3100 0x100>; | ||
216 | interrupts = <43 2>; | ||
217 | interrupt-parent = <&mpic>; | ||
218 | dfsrr; | ||
219 | }; | ||
220 | |||
221 | serial0: serial@4500 { | ||
222 | cell-index = <0>; | ||
223 | device_type = "serial"; | ||
224 | compatible = "ns16550"; | ||
225 | reg = <0x4500 0x100>; | ||
226 | clock-frequency = <0>; | ||
227 | interrupts = <42 2>; | ||
228 | interrupt-parent = <&mpic>; | ||
229 | }; | ||
230 | |||
231 | serial1: serial@4600 { | ||
232 | cell-index = <1>; | ||
233 | device_type = "serial"; | ||
234 | compatible = "ns16550"; | ||
235 | reg = <0x4600 0x100>; | ||
236 | clock-frequency = <0>; | ||
237 | interrupts = <42 2>; | ||
238 | interrupt-parent = <&mpic>; | ||
239 | }; | ||
240 | |||
241 | spi@7000 { | 151 | spi@7000 { |
242 | cell-index = <0>; | ||
243 | #address-cells = <1>; | ||
244 | #size-cells = <0>; | ||
245 | compatible = "fsl,espi"; | ||
246 | reg = <0x7000 0x1000>; | ||
247 | interrupts = <59 0x2>; | ||
248 | interrupt-parent = <&mpic>; | ||
249 | mode = "cpu"; | ||
250 | 152 | ||
251 | fsl_m25p80@0 { | 153 | fsl_m25p80@0 { |
252 | #address-cells = <1>; | 154 | #address-cells = <1>; |
@@ -294,66 +196,7 @@ | |||
294 | }; | 196 | }; |
295 | }; | 197 | }; |
296 | 198 | ||
297 | gpio: gpio-controller@f000 { | ||
298 | #gpio-cells = <2>; | ||
299 | compatible = "fsl,mpc8572-gpio"; | ||
300 | reg = <0xf000 0x100>; | ||
301 | interrupts = <47 0x2>; | ||
302 | interrupt-parent = <&mpic>; | ||
303 | gpio-controller; | ||
304 | }; | ||
305 | |||
306 | L2: l2-cache-controller@20000 { | ||
307 | compatible = "fsl,p1020-l2-cache-controller"; | ||
308 | reg = <0x20000 0x1000>; | ||
309 | cache-line-size = <32>; // 32 bytes | ||
310 | cache-size = <0x40000>; // L2,256K | ||
311 | interrupt-parent = <&mpic>; | ||
312 | interrupts = <16 2>; | ||
313 | }; | ||
314 | |||
315 | dma@21300 { | ||
316 | #address-cells = <1>; | ||
317 | #size-cells = <1>; | ||
318 | compatible = "fsl,eloplus-dma"; | ||
319 | reg = <0x21300 0x4>; | ||
320 | ranges = <0x0 0x21100 0x200>; | ||
321 | cell-index = <0>; | ||
322 | dma-channel@0 { | ||
323 | compatible = "fsl,eloplus-dma-channel"; | ||
324 | reg = <0x0 0x80>; | ||
325 | cell-index = <0>; | ||
326 | interrupt-parent = <&mpic>; | ||
327 | interrupts = <20 2>; | ||
328 | }; | ||
329 | dma-channel@80 { | ||
330 | compatible = "fsl,eloplus-dma-channel"; | ||
331 | reg = <0x80 0x80>; | ||
332 | cell-index = <1>; | ||
333 | interrupt-parent = <&mpic>; | ||
334 | interrupts = <21 2>; | ||
335 | }; | ||
336 | dma-channel@100 { | ||
337 | compatible = "fsl,eloplus-dma-channel"; | ||
338 | reg = <0x100 0x80>; | ||
339 | cell-index = <2>; | ||
340 | interrupt-parent = <&mpic>; | ||
341 | interrupts = <22 2>; | ||
342 | }; | ||
343 | dma-channel@180 { | ||
344 | compatible = "fsl,eloplus-dma-channel"; | ||
345 | reg = <0x180 0x80>; | ||
346 | cell-index = <3>; | ||
347 | interrupt-parent = <&mpic>; | ||
348 | interrupts = <23 2>; | ||
349 | }; | ||
350 | }; | ||
351 | |||
352 | mdio@24000 { | 199 | mdio@24000 { |
353 | #address-cells = <1>; | ||
354 | #size-cells = <0>; | ||
355 | compatible = "fsl,etsec2-mdio"; | ||
356 | reg = <0x24000 0x1000 0xb0030 0x4>; | ||
357 | 200 | ||
358 | phy0: ethernet-phy@0 { | 201 | phy0: ethernet-phy@0 { |
359 | interrupt-parent = <&mpic>; | 202 | interrupt-parent = <&mpic>; |
@@ -369,10 +212,6 @@ | |||
369 | }; | 212 | }; |
370 | 213 | ||
371 | mdio@25000 { | 214 | mdio@25000 { |
372 | #address-cells = <1>; | ||
373 | #size-cells = <0>; | ||
374 | compatible = "fsl,etsec2-tbi"; | ||
375 | reg = <0x25000 0x1000 0xb1030 0x4>; | ||
376 | 215 | ||
377 | tbi0: tbi-phy@11 { | 216 | tbi0: tbi-phy@11 { |
378 | reg = <0x11>; | 217 | reg = <0x11>; |
@@ -381,97 +220,25 @@ | |||
381 | }; | 220 | }; |
382 | 221 | ||
383 | enet0: ethernet@b0000 { | 222 | enet0: ethernet@b0000 { |
384 | #address-cells = <1>; | ||
385 | #size-cells = <1>; | ||
386 | device_type = "network"; | ||
387 | model = "eTSEC"; | ||
388 | compatible = "fsl,etsec2"; | ||
389 | fsl,num_rx_queues = <0x8>; | ||
390 | fsl,num_tx_queues = <0x8>; | ||
391 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
392 | interrupt-parent = <&mpic>; | ||
393 | fixed-link = <1 1 1000 0 0>; | 223 | fixed-link = <1 1 1000 0 0>; |
394 | phy-connection-type = "rgmii-id"; | 224 | phy-connection-type = "rgmii-id"; |
395 | 225 | ||
396 | queue-group@0 { | ||
397 | #address-cells = <1>; | ||
398 | #size-cells = <1>; | ||
399 | reg = <0xb0000 0x1000>; | ||
400 | interrupts = <29 2 30 2 34 2>; | ||
401 | }; | ||
402 | |||
403 | queue-group@1 { | ||
404 | #address-cells = <1>; | ||
405 | #size-cells = <1>; | ||
406 | reg = <0xb4000 0x1000>; | ||
407 | interrupts = <17 2 18 2 24 2>; | ||
408 | }; | ||
409 | }; | 226 | }; |
410 | 227 | ||
411 | enet1: ethernet@b1000 { | 228 | enet1: ethernet@b1000 { |
412 | #address-cells = <1>; | ||
413 | #size-cells = <1>; | ||
414 | device_type = "network"; | ||
415 | model = "eTSEC"; | ||
416 | compatible = "fsl,etsec2"; | ||
417 | fsl,num_rx_queues = <0x8>; | ||
418 | fsl,num_tx_queues = <0x8>; | ||
419 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
420 | interrupt-parent = <&mpic>; | ||
421 | phy-handle = <&phy0>; | 229 | phy-handle = <&phy0>; |
422 | tbi-handle = <&tbi0>; | 230 | tbi-handle = <&tbi0>; |
423 | phy-connection-type = "sgmii"; | 231 | phy-connection-type = "sgmii"; |
424 | 232 | ||
425 | queue-group@0 { | ||
426 | #address-cells = <1>; | ||
427 | #size-cells = <1>; | ||
428 | reg = <0xb1000 0x1000>; | ||
429 | interrupts = <35 2 36 2 40 2>; | ||
430 | }; | ||
431 | |||
432 | queue-group@1 { | ||
433 | #address-cells = <1>; | ||
434 | #size-cells = <1>; | ||
435 | reg = <0xb5000 0x1000>; | ||
436 | interrupts = <51 2 52 2 67 2>; | ||
437 | }; | ||
438 | }; | 233 | }; |
439 | 234 | ||
440 | enet2: ethernet@b2000 { | 235 | enet2: ethernet@b2000 { |
441 | #address-cells = <1>; | ||
442 | #size-cells = <1>; | ||
443 | device_type = "network"; | ||
444 | model = "eTSEC"; | ||
445 | compatible = "fsl,etsec2"; | ||
446 | fsl,num_rx_queues = <0x8>; | ||
447 | fsl,num_tx_queues = <0x8>; | ||
448 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
449 | interrupt-parent = <&mpic>; | ||
450 | phy-handle = <&phy1>; | 236 | phy-handle = <&phy1>; |
451 | phy-connection-type = "rgmii-id"; | 237 | phy-connection-type = "rgmii-id"; |
452 | 238 | ||
453 | queue-group@0 { | ||
454 | #address-cells = <1>; | ||
455 | #size-cells = <1>; | ||
456 | reg = <0xb2000 0x1000>; | ||
457 | interrupts = <31 2 32 2 33 2>; | ||
458 | }; | ||
459 | |||
460 | queue-group@1 { | ||
461 | #address-cells = <1>; | ||
462 | #size-cells = <1>; | ||
463 | reg = <0xb6000 0x1000>; | ||
464 | interrupts = <25 2 26 2 27 2>; | ||
465 | }; | ||
466 | }; | 239 | }; |
467 | 240 | ||
468 | usb@22000 { | 241 | usb@22000 { |
469 | #address-cells = <1>; | ||
470 | #size-cells = <0>; | ||
471 | compatible = "fsl-usb2-dr"; | ||
472 | reg = <0x22000 0x1000>; | ||
473 | interrupt-parent = <&mpic>; | ||
474 | interrupts = <28 0x2>; | ||
475 | phy_type = "ulpi"; | 242 | phy_type = "ulpi"; |
476 | }; | 243 | }; |
477 | 244 | ||
@@ -481,82 +248,23 @@ | |||
481 | it enables USB2. OTOH, U-Boot does create a new node | 248 | it enables USB2. OTOH, U-Boot does create a new node |
482 | when there isn't any. So, just comment it out. | 249 | when there isn't any. So, just comment it out. |
483 | usb@23000 { | 250 | usb@23000 { |
484 | #address-cells = <1>; | ||
485 | #size-cells = <0>; | ||
486 | compatible = "fsl-usb2-dr"; | ||
487 | reg = <0x23000 0x1000>; | ||
488 | interrupt-parent = <&mpic>; | ||
489 | interrupts = <46 0x2>; | ||
490 | phy_type = "ulpi"; | 251 | phy_type = "ulpi"; |
491 | }; | 252 | }; |
492 | */ | 253 | */ |
493 | 254 | ||
494 | sdhci@2e000 { | ||
495 | compatible = "fsl,p1020-esdhc", "fsl,esdhc"; | ||
496 | reg = <0x2e000 0x1000>; | ||
497 | interrupts = <72 0x2>; | ||
498 | interrupt-parent = <&mpic>; | ||
499 | /* Filled in by U-Boot */ | ||
500 | clock-frequency = <0>; | ||
501 | }; | ||
502 | |||
503 | crypto@30000 { | ||
504 | compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", | ||
505 | "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; | ||
506 | reg = <0x30000 0x10000>; | ||
507 | interrupts = <45 2 58 2>; | ||
508 | interrupt-parent = <&mpic>; | ||
509 | fsl,num-channels = <4>; | ||
510 | fsl,channel-fifo-len = <24>; | ||
511 | fsl,exec-units-mask = <0xbfe>; | ||
512 | fsl,descriptor-types-mask = <0x3ab0ebf>; | ||
513 | }; | ||
514 | |||
515 | mpic: pic@40000 { | ||
516 | interrupt-controller; | ||
517 | #address-cells = <0>; | ||
518 | #interrupt-cells = <2>; | ||
519 | reg = <0x40000 0x40000>; | ||
520 | compatible = "chrp,open-pic"; | ||
521 | device_type = "open-pic"; | ||
522 | }; | ||
523 | |||
524 | msi@41600 { | ||
525 | compatible = "fsl,p1020-msi", "fsl,mpic-msi"; | ||
526 | reg = <0x41600 0x80>; | ||
527 | msi-available-ranges = <0 0x100>; | ||
528 | interrupts = < | ||
529 | 0xe0 0 | ||
530 | 0xe1 0 | ||
531 | 0xe2 0 | ||
532 | 0xe3 0 | ||
533 | 0xe4 0 | ||
534 | 0xe5 0 | ||
535 | 0xe6 0 | ||
536 | 0xe7 0>; | ||
537 | interrupt-parent = <&mpic>; | ||
538 | }; | ||
539 | |||
540 | global-utilities@e0000 { //global utilities block | ||
541 | compatible = "fsl,p1020-guts"; | ||
542 | reg = <0xe0000 0x1000>; | ||
543 | fsl,has-rstcr; | ||
544 | }; | ||
545 | }; | 255 | }; |
546 | 256 | ||
547 | pci0: pcie@ffe09000 { | 257 | pci0: pcie@ffe09000 { |
548 | compatible = "fsl,mpc8548-pcie"; | ||
549 | device_type = "pci"; | ||
550 | #interrupt-cells = <1>; | ||
551 | #size-cells = <2>; | ||
552 | #address-cells = <3>; | ||
553 | reg = <0 0xffe09000 0 0x1000>; | ||
554 | bus-range = <0 255>; | ||
555 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 | 258 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 |
556 | 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; | 259 | 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; |
557 | clock-frequency = <33333333>; | 260 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
558 | interrupt-parent = <&mpic>; | 261 | interrupt-map = < |
559 | interrupts = <16 2>; | 262 | /* IDSEL 0x0 */ |
263 | 0000 0x0 0x0 0x1 &mpic 0x4 0x1 | ||
264 | 0000 0x0 0x0 0x2 &mpic 0x5 0x1 | ||
265 | 0000 0x0 0x0 0x3 &mpic 0x6 0x1 | ||
266 | 0000 0x0 0x0 0x4 &mpic 0x7 0x1 | ||
267 | >; | ||
560 | pcie@0 { | 268 | pcie@0 { |
561 | reg = <0x0 0x0 0x0 0x0 0x0>; | 269 | reg = <0x0 0x0 0x0 0x0 0x0>; |
562 | #size-cells = <2>; | 270 | #size-cells = <2>; |
@@ -573,25 +281,23 @@ | |||
573 | }; | 281 | }; |
574 | 282 | ||
575 | pci1: pcie@ffe0a000 { | 283 | pci1: pcie@ffe0a000 { |
576 | compatible = "fsl,mpc8548-pcie"; | 284 | ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 |
577 | device_type = "pci"; | 285 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; |
578 | #interrupt-cells = <1>; | 286 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
579 | #size-cells = <2>; | 287 | interrupt-map = < |
580 | #address-cells = <3>; | 288 | /* IDSEL 0x0 */ |
581 | reg = <0 0xffe0a000 0 0x1000>; | 289 | 0000 0x0 0x0 0x1 &mpic 0x0 0x1 |
582 | bus-range = <0 255>; | 290 | 0000 0x0 0x0 0x2 &mpic 0x1 0x1 |
583 | ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 | 291 | 0000 0x0 0x0 0x3 &mpic 0x2 0x1 |
584 | 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; | 292 | 0000 0x0 0x0 0x4 &mpic 0x3 0x1 |
585 | clock-frequency = <33333333>; | 293 | >; |
586 | interrupt-parent = <&mpic>; | ||
587 | interrupts = <16 2>; | ||
588 | pcie@0 { | 294 | pcie@0 { |
589 | reg = <0x0 0x0 0x0 0x0 0x0>; | 295 | reg = <0x0 0x0 0x0 0x0 0x0>; |
590 | #size-cells = <2>; | 296 | #size-cells = <2>; |
591 | #address-cells = <3>; | 297 | #address-cells = <3>; |
592 | device_type = "pci"; | 298 | device_type = "pci"; |
593 | ranges = <0x2000000 0x0 0xc0000000 | 299 | ranges = <0x2000000 0x0 0x80000000 |
594 | 0x2000000 0x0 0xc0000000 | 300 | 0x2000000 0x0 0x80000000 |
595 | 0x0 0x20000000 | 301 | 0x0 0x20000000 |
596 | 302 | ||
597 | 0x1000000 0x0 0x0 | 303 | 0x1000000 0x0 0x0 |
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts new file mode 100644 index 000000000000..f0bf7f42f097 --- /dev/null +++ b/arch/powerpc/boot/dts/p1020rdb_camp_core0.dts | |||
@@ -0,0 +1,213 @@ | |||
1 | /* | ||
2 | * P1020 RDB Core0 Device Tree Source in CAMP mode. | ||
3 | * | ||
4 | * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache | ||
5 | * can be shared, all the other devices must be assigned to one core only. | ||
6 | * This dts file allows core0 to have memory, l2, i2c, spi, gpio, tdm, dma, usb, | ||
7 | * eth1, eth2, sdhc, crypto, global-util, message, pci0, pci1, msi. | ||
8 | * | ||
9 | * Please note to add "-b 0" for core0's dts compiling. | ||
10 | * | ||
11 | * Copyright 2011 Freescale Semiconductor Inc. | ||
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify it | ||
14 | * under the terms of the GNU General Public License as published by the | ||
15 | * Free Software Foundation; either version 2 of the License, or (at your | ||
16 | * option) any later version. | ||
17 | */ | ||
18 | |||
19 | /include/ "p1020si.dtsi" | ||
20 | |||
21 | / { | ||
22 | model = "fsl,P1020RDB"; | ||
23 | compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP"; | ||
24 | |||
25 | aliases { | ||
26 | ethernet1 = &enet1; | ||
27 | ethernet2 = &enet2; | ||
28 | serial0 = &serial0; | ||
29 | pci0 = &pci0; | ||
30 | pci1 = &pci1; | ||
31 | }; | ||
32 | |||
33 | cpus { | ||
34 | PowerPC,P1020@1 { | ||
35 | status = "disabled"; | ||
36 | }; | ||
37 | }; | ||
38 | |||
39 | memory { | ||
40 | device_type = "memory"; | ||
41 | }; | ||
42 | |||
43 | localbus@ffe05000 { | ||
44 | status = "disabled"; | ||
45 | }; | ||
46 | |||
47 | soc@ffe00000 { | ||
48 | i2c@3000 { | ||
49 | rtc@68 { | ||
50 | compatible = "dallas,ds1339"; | ||
51 | reg = <0x68>; | ||
52 | }; | ||
53 | }; | ||
54 | |||
55 | serial1: serial@4600 { | ||
56 | status = "disabled"; | ||
57 | }; | ||
58 | |||
59 | spi@7000 { | ||
60 | fsl_m25p80@0 { | ||
61 | #address-cells = <1>; | ||
62 | #size-cells = <1>; | ||
63 | compatible = "fsl,espi-flash"; | ||
64 | reg = <0>; | ||
65 | linux,modalias = "fsl_m25p80"; | ||
66 | spi-max-frequency = <40000000>; | ||
67 | |||
68 | partition@0 { | ||
69 | /* 512KB for u-boot Bootloader Image */ | ||
70 | reg = <0x0 0x00080000>; | ||
71 | label = "SPI (RO) U-Boot Image"; | ||
72 | read-only; | ||
73 | }; | ||
74 | |||
75 | partition@80000 { | ||
76 | /* 512KB for DTB Image */ | ||
77 | reg = <0x00080000 0x00080000>; | ||
78 | label = "SPI (RO) DTB Image"; | ||
79 | read-only; | ||
80 | }; | ||
81 | |||
82 | partition@100000 { | ||
83 | /* 4MB for Linux Kernel Image */ | ||
84 | reg = <0x00100000 0x00400000>; | ||
85 | label = "SPI (RO) Linux Kernel Image"; | ||
86 | read-only; | ||
87 | }; | ||
88 | |||
89 | partition@500000 { | ||
90 | /* 4MB for Compressed RFS Image */ | ||
91 | reg = <0x00500000 0x00400000>; | ||
92 | label = "SPI (RO) Compressed RFS Image"; | ||
93 | read-only; | ||
94 | }; | ||
95 | |||
96 | partition@900000 { | ||
97 | /* 7MB for JFFS2 based RFS */ | ||
98 | reg = <0x00900000 0x00700000>; | ||
99 | label = "SPI (RW) JFFS2 RFS"; | ||
100 | }; | ||
101 | }; | ||
102 | }; | ||
103 | |||
104 | mdio@24000 { | ||
105 | phy0: ethernet-phy@0 { | ||
106 | interrupt-parent = <&mpic>; | ||
107 | interrupts = <3 1>; | ||
108 | reg = <0x0>; | ||
109 | }; | ||
110 | phy1: ethernet-phy@1 { | ||
111 | interrupt-parent = <&mpic>; | ||
112 | interrupts = <2 1>; | ||
113 | reg = <0x1>; | ||
114 | }; | ||
115 | }; | ||
116 | |||
117 | mdio@25000 { | ||
118 | tbi0: tbi-phy@11 { | ||
119 | reg = <0x11>; | ||
120 | device_type = "tbi-phy"; | ||
121 | }; | ||
122 | }; | ||
123 | |||
124 | enet0: ethernet@b0000 { | ||
125 | status = "disabled"; | ||
126 | }; | ||
127 | |||
128 | enet1: ethernet@b1000 { | ||
129 | phy-handle = <&phy0>; | ||
130 | tbi-handle = <&tbi0>; | ||
131 | phy-connection-type = "sgmii"; | ||
132 | }; | ||
133 | |||
134 | enet2: ethernet@b2000 { | ||
135 | phy-handle = <&phy1>; | ||
136 | phy-connection-type = "rgmii-id"; | ||
137 | }; | ||
138 | |||
139 | usb@22000 { | ||
140 | phy_type = "ulpi"; | ||
141 | }; | ||
142 | |||
143 | /* USB2 is shared with localbus, so it must be disabled | ||
144 | by default. We can't put 'status = "disabled";' here | ||
145 | since U-Boot doesn't clear the status property when | ||
146 | it enables USB2. OTOH, U-Boot does create a new node | ||
147 | when there isn't any. So, just comment it out. | ||
148 | usb@23000 { | ||
149 | phy_type = "ulpi"; | ||
150 | }; | ||
151 | */ | ||
152 | |||
153 | mpic: pic@40000 { | ||
154 | protected-sources = < | ||
155 | 42 29 30 34 /* serial1, enet0-queue-group0 */ | ||
156 | 17 18 24 45 /* enet0-queue-group1, crypto */ | ||
157 | >; | ||
158 | }; | ||
159 | |||
160 | }; | ||
161 | |||
162 | pci0: pcie@ffe09000 { | ||
163 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 | ||
164 | 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; | ||
165 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; | ||
166 | interrupt-map = < | ||
167 | /* IDSEL 0x0 */ | ||
168 | 0000 0x0 0x0 0x1 &mpic 0x4 0x1 | ||
169 | 0000 0x0 0x0 0x2 &mpic 0x5 0x1 | ||
170 | 0000 0x0 0x0 0x3 &mpic 0x6 0x1 | ||
171 | 0000 0x0 0x0 0x4 &mpic 0x7 0x1 | ||
172 | >; | ||
173 | pcie@0 { | ||
174 | reg = <0x0 0x0 0x0 0x0 0x0>; | ||
175 | #size-cells = <2>; | ||
176 | #address-cells = <3>; | ||
177 | device_type = "pci"; | ||
178 | ranges = <0x2000000 0x0 0xa0000000 | ||
179 | 0x2000000 0x0 0xa0000000 | ||
180 | 0x0 0x20000000 | ||
181 | |||
182 | 0x1000000 0x0 0x0 | ||
183 | 0x1000000 0x0 0x0 | ||
184 | 0x0 0x100000>; | ||
185 | }; | ||
186 | }; | ||
187 | |||
188 | pci1: pcie@ffe0a000 { | ||
189 | ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 | ||
190 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; | ||
191 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; | ||
192 | interrupt-map = < | ||
193 | /* IDSEL 0x0 */ | ||
194 | 0000 0x0 0x0 0x1 &mpic 0x0 0x1 | ||
195 | 0000 0x0 0x0 0x2 &mpic 0x1 0x1 | ||
196 | 0000 0x0 0x0 0x3 &mpic 0x2 0x1 | ||
197 | 0000 0x0 0x0 0x4 &mpic 0x3 0x1 | ||
198 | >; | ||
199 | pcie@0 { | ||
200 | reg = <0x0 0x0 0x0 0x0 0x0>; | ||
201 | #size-cells = <2>; | ||
202 | #address-cells = <3>; | ||
203 | device_type = "pci"; | ||
204 | ranges = <0x2000000 0x0 0x80000000 | ||
205 | 0x2000000 0x0 0x80000000 | ||
206 | 0x0 0x20000000 | ||
207 | |||
208 | 0x1000000 0x0 0x0 | ||
209 | 0x1000000 0x0 0x0 | ||
210 | 0x0 0x100000>; | ||
211 | }; | ||
212 | }; | ||
213 | }; | ||
diff --git a/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts new file mode 100644 index 000000000000..6ec02204a44e --- /dev/null +++ b/arch/powerpc/boot/dts/p1020rdb_camp_core1.dts | |||
@@ -0,0 +1,148 @@ | |||
1 | /* | ||
2 | * P1020 RDB Core1 Device Tree Source in CAMP mode. | ||
3 | * | ||
4 | * In CAMP mode, each core needs to have its own dts. Only mpic and L2 cache | ||
5 | * can be shared, all the other devices must be assigned to one core only. | ||
6 | * This dts allows core1 to have l2, eth0, crypto. | ||
7 | * | ||
8 | * Please note to add "-b 1" for core1's dts compiling. | ||
9 | * | ||
10 | * Copyright 2011 Freescale Semiconductor Inc. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License as published by the | ||
14 | * Free Software Foundation; either version 2 of the License, or (at your | ||
15 | * option) any later version. | ||
16 | */ | ||
17 | |||
18 | /include/ "p1020si.dtsi" | ||
19 | |||
20 | / { | ||
21 | model = "fsl,P1020RDB"; | ||
22 | compatible = "fsl,P1020RDB", "fsl,MPC85XXRDB-CAMP"; | ||
23 | |||
24 | aliases { | ||
25 | ethernet0 = &enet0; | ||
26 | serial0 = &serial1; | ||
27 | }; | ||
28 | |||
29 | cpus { | ||
30 | PowerPC,P1020@0 { | ||
31 | status = "disabled"; | ||
32 | }; | ||
33 | }; | ||
34 | |||
35 | memory { | ||
36 | device_type = "memory"; | ||
37 | }; | ||
38 | |||
39 | localbus@ffe05000 { | ||
40 | status = "disabled"; | ||
41 | }; | ||
42 | |||
43 | soc@ffe00000 { | ||
44 | ecm-law@0 { | ||
45 | status = "disabled"; | ||
46 | }; | ||
47 | |||
48 | ecm@1000 { | ||
49 | status = "disabled"; | ||
50 | }; | ||
51 | |||
52 | memory-controller@2000 { | ||
53 | status = "disabled"; | ||
54 | }; | ||
55 | |||
56 | i2c@3000 { | ||
57 | status = "disabled"; | ||
58 | }; | ||
59 | |||
60 | i2c@3100 { | ||
61 | status = "disabled"; | ||
62 | }; | ||
63 | |||
64 | serial0: serial@4500 { | ||
65 | status = "disabled"; | ||
66 | }; | ||
67 | |||
68 | spi@7000 { | ||
69 | status = "disabled"; | ||
70 | }; | ||
71 | |||
72 | gpio: gpio-controller@f000 { | ||
73 | status = "disabled"; | ||
74 | }; | ||
75 | |||
76 | dma@21300 { | ||
77 | status = "disabled"; | ||
78 | }; | ||
79 | |||
80 | mdio@24000 { | ||
81 | status = "disabled"; | ||
82 | }; | ||
83 | |||
84 | mdio@25000 { | ||
85 | status = "disabled"; | ||
86 | }; | ||
87 | |||
88 | enet0: ethernet@b0000 { | ||
89 | fixed-link = <1 1 1000 0 0>; | ||
90 | phy-connection-type = "rgmii-id"; | ||
91 | |||
92 | }; | ||
93 | |||
94 | enet1: ethernet@b1000 { | ||
95 | status = "disabled"; | ||
96 | }; | ||
97 | |||
98 | enet2: ethernet@b2000 { | ||
99 | status = "disabled"; | ||
100 | }; | ||
101 | |||
102 | usb@22000 { | ||
103 | status = "disabled"; | ||
104 | }; | ||
105 | |||
106 | sdhci@2e000 { | ||
107 | status = "disabled"; | ||
108 | }; | ||
109 | |||
110 | mpic: pic@40000 { | ||
111 | protected-sources = < | ||
112 | 16 /* ecm, mem, L2, pci0, pci1 */ | ||
113 | 43 42 59 /* i2c, serial0, spi */ | ||
114 | 47 63 62 /* gpio, tdm */ | ||
115 | 20 21 22 23 /* dma */ | ||
116 | 03 02 /* mdio */ | ||
117 | 35 36 40 /* enet1-queue-group0 */ | ||
118 | 51 52 67 /* enet1-queue-group1 */ | ||
119 | 31 32 33 /* enet2-queue-group0 */ | ||
120 | 25 26 27 /* enet2-queue-group1 */ | ||
121 | 28 72 58 /* usb, sdhci, crypto */ | ||
122 | 0xb0 0xb1 0xb2 /* message */ | ||
123 | 0xb3 0xb4 0xb5 | ||
124 | 0xb6 0xb7 | ||
125 | 0xe0 0xe1 0xe2 /* msi */ | ||
126 | 0xe3 0xe4 0xe5 | ||
127 | 0xe6 0xe7 /* sdhci, crypto , pci */ | ||
128 | >; | ||
129 | }; | ||
130 | |||
131 | msi@41600 { | ||
132 | status = "disabled"; | ||
133 | }; | ||
134 | |||
135 | global-utilities@e0000 { //global utilities block | ||
136 | status = "disabled"; | ||
137 | }; | ||
138 | |||
139 | }; | ||
140 | |||
141 | pci0: pcie@ffe09000 { | ||
142 | status = "disabled"; | ||
143 | }; | ||
144 | |||
145 | pci1: pcie@ffe0a000 { | ||
146 | status = "disabled"; | ||
147 | }; | ||
148 | }; | ||
diff --git a/arch/powerpc/boot/dts/p1020si.dtsi b/arch/powerpc/boot/dts/p1020si.dtsi new file mode 100644 index 000000000000..5c5acb66c3fc --- /dev/null +++ b/arch/powerpc/boot/dts/p1020si.dtsi | |||
@@ -0,0 +1,377 @@ | |||
1 | /* | ||
2 | * P1020si Device Tree Source | ||
3 | * | ||
4 | * Copyright 2011 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | /dts-v1/; | ||
13 | / { | ||
14 | compatible = "fsl,P1020"; | ||
15 | #address-cells = <2>; | ||
16 | #size-cells = <2>; | ||
17 | |||
18 | cpus { | ||
19 | #address-cells = <1>; | ||
20 | #size-cells = <0>; | ||
21 | |||
22 | PowerPC,P1020@0 { | ||
23 | device_type = "cpu"; | ||
24 | reg = <0x0>; | ||
25 | next-level-cache = <&L2>; | ||
26 | }; | ||
27 | |||
28 | PowerPC,P1020@1 { | ||
29 | device_type = "cpu"; | ||
30 | reg = <0x1>; | ||
31 | next-level-cache = <&L2>; | ||
32 | }; | ||
33 | }; | ||
34 | |||
35 | localbus@ffe05000 { | ||
36 | #address-cells = <2>; | ||
37 | #size-cells = <1>; | ||
38 | compatible = "fsl,p1020-elbc", "fsl,elbc", "simple-bus"; | ||
39 | reg = <0 0xffe05000 0 0x1000>; | ||
40 | interrupts = <19 2>; | ||
41 | interrupt-parent = <&mpic>; | ||
42 | }; | ||
43 | |||
44 | soc@ffe00000 { | ||
45 | #address-cells = <1>; | ||
46 | #size-cells = <1>; | ||
47 | device_type = "soc"; | ||
48 | compatible = "fsl,p1020-immr", "simple-bus"; | ||
49 | ranges = <0x0 0x0 0xffe00000 0x100000>; | ||
50 | bus-frequency = <0>; // Filled out by uboot. | ||
51 | |||
52 | ecm-law@0 { | ||
53 | compatible = "fsl,ecm-law"; | ||
54 | reg = <0x0 0x1000>; | ||
55 | fsl,num-laws = <12>; | ||
56 | }; | ||
57 | |||
58 | ecm@1000 { | ||
59 | compatible = "fsl,p1020-ecm", "fsl,ecm"; | ||
60 | reg = <0x1000 0x1000>; | ||
61 | interrupts = <16 2>; | ||
62 | interrupt-parent = <&mpic>; | ||
63 | }; | ||
64 | |||
65 | memory-controller@2000 { | ||
66 | compatible = "fsl,p1020-memory-controller"; | ||
67 | reg = <0x2000 0x1000>; | ||
68 | interrupt-parent = <&mpic>; | ||
69 | interrupts = <16 2>; | ||
70 | }; | ||
71 | |||
72 | i2c@3000 { | ||
73 | #address-cells = <1>; | ||
74 | #size-cells = <0>; | ||
75 | cell-index = <0>; | ||
76 | compatible = "fsl-i2c"; | ||
77 | reg = <0x3000 0x100>; | ||
78 | interrupts = <43 2>; | ||
79 | interrupt-parent = <&mpic>; | ||
80 | dfsrr; | ||
81 | }; | ||
82 | |||
83 | i2c@3100 { | ||
84 | #address-cells = <1>; | ||
85 | #size-cells = <0>; | ||
86 | cell-index = <1>; | ||
87 | compatible = "fsl-i2c"; | ||
88 | reg = <0x3100 0x100>; | ||
89 | interrupts = <43 2>; | ||
90 | interrupt-parent = <&mpic>; | ||
91 | dfsrr; | ||
92 | }; | ||
93 | |||
94 | serial0: serial@4500 { | ||
95 | cell-index = <0>; | ||
96 | device_type = "serial"; | ||
97 | compatible = "ns16550"; | ||
98 | reg = <0x4500 0x100>; | ||
99 | clock-frequency = <0>; | ||
100 | interrupts = <42 2>; | ||
101 | interrupt-parent = <&mpic>; | ||
102 | }; | ||
103 | |||
104 | serial1: serial@4600 { | ||
105 | cell-index = <1>; | ||
106 | device_type = "serial"; | ||
107 | compatible = "ns16550"; | ||
108 | reg = <0x4600 0x100>; | ||
109 | clock-frequency = <0>; | ||
110 | interrupts = <42 2>; | ||
111 | interrupt-parent = <&mpic>; | ||
112 | }; | ||
113 | |||
114 | spi@7000 { | ||
115 | cell-index = <0>; | ||
116 | #address-cells = <1>; | ||
117 | #size-cells = <0>; | ||
118 | compatible = "fsl,espi"; | ||
119 | reg = <0x7000 0x1000>; | ||
120 | interrupts = <59 0x2>; | ||
121 | interrupt-parent = <&mpic>; | ||
122 | mode = "cpu"; | ||
123 | }; | ||
124 | |||
125 | gpio: gpio-controller@f000 { | ||
126 | #gpio-cells = <2>; | ||
127 | compatible = "fsl,mpc8572-gpio"; | ||
128 | reg = <0xf000 0x100>; | ||
129 | interrupts = <47 0x2>; | ||
130 | interrupt-parent = <&mpic>; | ||
131 | gpio-controller; | ||
132 | }; | ||
133 | |||
134 | L2: l2-cache-controller@20000 { | ||
135 | compatible = "fsl,p1020-l2-cache-controller"; | ||
136 | reg = <0x20000 0x1000>; | ||
137 | cache-line-size = <32>; // 32 bytes | ||
138 | cache-size = <0x40000>; // L2,256K | ||
139 | interrupt-parent = <&mpic>; | ||
140 | interrupts = <16 2>; | ||
141 | }; | ||
142 | |||
143 | dma@21300 { | ||
144 | #address-cells = <1>; | ||
145 | #size-cells = <1>; | ||
146 | compatible = "fsl,eloplus-dma"; | ||
147 | reg = <0x21300 0x4>; | ||
148 | ranges = <0x0 0x21100 0x200>; | ||
149 | cell-index = <0>; | ||
150 | dma-channel@0 { | ||
151 | compatible = "fsl,eloplus-dma-channel"; | ||
152 | reg = <0x0 0x80>; | ||
153 | cell-index = <0>; | ||
154 | interrupt-parent = <&mpic>; | ||
155 | interrupts = <20 2>; | ||
156 | }; | ||
157 | dma-channel@80 { | ||
158 | compatible = "fsl,eloplus-dma-channel"; | ||
159 | reg = <0x80 0x80>; | ||
160 | cell-index = <1>; | ||
161 | interrupt-parent = <&mpic>; | ||
162 | interrupts = <21 2>; | ||
163 | }; | ||
164 | dma-channel@100 { | ||
165 | compatible = "fsl,eloplus-dma-channel"; | ||
166 | reg = <0x100 0x80>; | ||
167 | cell-index = <2>; | ||
168 | interrupt-parent = <&mpic>; | ||
169 | interrupts = <22 2>; | ||
170 | }; | ||
171 | dma-channel@180 { | ||
172 | compatible = "fsl,eloplus-dma-channel"; | ||
173 | reg = <0x180 0x80>; | ||
174 | cell-index = <3>; | ||
175 | interrupt-parent = <&mpic>; | ||
176 | interrupts = <23 2>; | ||
177 | }; | ||
178 | }; | ||
179 | |||
180 | mdio@24000 { | ||
181 | #address-cells = <1>; | ||
182 | #size-cells = <0>; | ||
183 | compatible = "fsl,etsec2-mdio"; | ||
184 | reg = <0x24000 0x1000 0xb0030 0x4>; | ||
185 | |||
186 | }; | ||
187 | |||
188 | mdio@25000 { | ||
189 | #address-cells = <1>; | ||
190 | #size-cells = <0>; | ||
191 | compatible = "fsl,etsec2-tbi"; | ||
192 | reg = <0x25000 0x1000 0xb1030 0x4>; | ||
193 | |||
194 | }; | ||
195 | |||
196 | enet0: ethernet@b0000 { | ||
197 | #address-cells = <1>; | ||
198 | #size-cells = <1>; | ||
199 | device_type = "network"; | ||
200 | model = "eTSEC"; | ||
201 | compatible = "fsl,etsec2"; | ||
202 | fsl,num_rx_queues = <0x8>; | ||
203 | fsl,num_tx_queues = <0x8>; | ||
204 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
205 | interrupt-parent = <&mpic>; | ||
206 | |||
207 | queue-group@0 { | ||
208 | #address-cells = <1>; | ||
209 | #size-cells = <1>; | ||
210 | reg = <0xb0000 0x1000>; | ||
211 | interrupts = <29 2 30 2 34 2>; | ||
212 | }; | ||
213 | |||
214 | queue-group@1 { | ||
215 | #address-cells = <1>; | ||
216 | #size-cells = <1>; | ||
217 | reg = <0xb4000 0x1000>; | ||
218 | interrupts = <17 2 18 2 24 2>; | ||
219 | }; | ||
220 | }; | ||
221 | |||
222 | enet1: ethernet@b1000 { | ||
223 | #address-cells = <1>; | ||
224 | #size-cells = <1>; | ||
225 | device_type = "network"; | ||
226 | model = "eTSEC"; | ||
227 | compatible = "fsl,etsec2"; | ||
228 | fsl,num_rx_queues = <0x8>; | ||
229 | fsl,num_tx_queues = <0x8>; | ||
230 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
231 | interrupt-parent = <&mpic>; | ||
232 | |||
233 | queue-group@0 { | ||
234 | #address-cells = <1>; | ||
235 | #size-cells = <1>; | ||
236 | reg = <0xb1000 0x1000>; | ||
237 | interrupts = <35 2 36 2 40 2>; | ||
238 | }; | ||
239 | |||
240 | queue-group@1 { | ||
241 | #address-cells = <1>; | ||
242 | #size-cells = <1>; | ||
243 | reg = <0xb5000 0x1000>; | ||
244 | interrupts = <51 2 52 2 67 2>; | ||
245 | }; | ||
246 | }; | ||
247 | |||
248 | enet2: ethernet@b2000 { | ||
249 | #address-cells = <1>; | ||
250 | #size-cells = <1>; | ||
251 | device_type = "network"; | ||
252 | model = "eTSEC"; | ||
253 | compatible = "fsl,etsec2"; | ||
254 | fsl,num_rx_queues = <0x8>; | ||
255 | fsl,num_tx_queues = <0x8>; | ||
256 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
257 | interrupt-parent = <&mpic>; | ||
258 | |||
259 | queue-group@0 { | ||
260 | #address-cells = <1>; | ||
261 | #size-cells = <1>; | ||
262 | reg = <0xb2000 0x1000>; | ||
263 | interrupts = <31 2 32 2 33 2>; | ||
264 | }; | ||
265 | |||
266 | queue-group@1 { | ||
267 | #address-cells = <1>; | ||
268 | #size-cells = <1>; | ||
269 | reg = <0xb6000 0x1000>; | ||
270 | interrupts = <25 2 26 2 27 2>; | ||
271 | }; | ||
272 | }; | ||
273 | |||
274 | usb@22000 { | ||
275 | #address-cells = <1>; | ||
276 | #size-cells = <0>; | ||
277 | compatible = "fsl-usb2-dr"; | ||
278 | reg = <0x22000 0x1000>; | ||
279 | interrupt-parent = <&mpic>; | ||
280 | interrupts = <28 0x2>; | ||
281 | }; | ||
282 | |||
283 | /* USB2 is shared with localbus, so it must be disabled | ||
284 | by default. We can't put 'status = "disabled";' here | ||
285 | since U-Boot doesn't clear the status property when | ||
286 | it enables USB2. OTOH, U-Boot does create a new node | ||
287 | when there isn't any. So, just comment it out. | ||
288 | usb@23000 { | ||
289 | #address-cells = <1>; | ||
290 | #size-cells = <0>; | ||
291 | compatible = "fsl-usb2-dr"; | ||
292 | reg = <0x23000 0x1000>; | ||
293 | interrupt-parent = <&mpic>; | ||
294 | interrupts = <46 0x2>; | ||
295 | phy_type = "ulpi"; | ||
296 | }; | ||
297 | */ | ||
298 | |||
299 | sdhci@2e000 { | ||
300 | compatible = "fsl,p1020-esdhc", "fsl,esdhc"; | ||
301 | reg = <0x2e000 0x1000>; | ||
302 | interrupts = <72 0x2>; | ||
303 | interrupt-parent = <&mpic>; | ||
304 | /* Filled in by U-Boot */ | ||
305 | clock-frequency = <0>; | ||
306 | }; | ||
307 | |||
308 | crypto@30000 { | ||
309 | compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", | ||
310 | "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; | ||
311 | reg = <0x30000 0x10000>; | ||
312 | interrupts = <45 2 58 2>; | ||
313 | interrupt-parent = <&mpic>; | ||
314 | fsl,num-channels = <4>; | ||
315 | fsl,channel-fifo-len = <24>; | ||
316 | fsl,exec-units-mask = <0xbfe>; | ||
317 | fsl,descriptor-types-mask = <0x3ab0ebf>; | ||
318 | }; | ||
319 | |||
320 | mpic: pic@40000 { | ||
321 | interrupt-controller; | ||
322 | #address-cells = <0>; | ||
323 | #interrupt-cells = <2>; | ||
324 | reg = <0x40000 0x40000>; | ||
325 | compatible = "chrp,open-pic"; | ||
326 | device_type = "open-pic"; | ||
327 | }; | ||
328 | |||
329 | msi@41600 { | ||
330 | compatible = "fsl,p1020-msi", "fsl,mpic-msi"; | ||
331 | reg = <0x41600 0x80>; | ||
332 | msi-available-ranges = <0 0x100>; | ||
333 | interrupts = < | ||
334 | 0xe0 0 | ||
335 | 0xe1 0 | ||
336 | 0xe2 0 | ||
337 | 0xe3 0 | ||
338 | 0xe4 0 | ||
339 | 0xe5 0 | ||
340 | 0xe6 0 | ||
341 | 0xe7 0>; | ||
342 | interrupt-parent = <&mpic>; | ||
343 | }; | ||
344 | |||
345 | global-utilities@e0000 { //global utilities block | ||
346 | compatible = "fsl,p1020-guts","fsl,p2020-guts"; | ||
347 | reg = <0xe0000 0x1000>; | ||
348 | fsl,has-rstcr; | ||
349 | }; | ||
350 | }; | ||
351 | |||
352 | pci0: pcie@ffe09000 { | ||
353 | compatible = "fsl,mpc8548-pcie"; | ||
354 | device_type = "pci"; | ||
355 | #interrupt-cells = <1>; | ||
356 | #size-cells = <2>; | ||
357 | #address-cells = <3>; | ||
358 | reg = <0 0xffe09000 0 0x1000>; | ||
359 | bus-range = <0 255>; | ||
360 | clock-frequency = <33333333>; | ||
361 | interrupt-parent = <&mpic>; | ||
362 | interrupts = <16 2>; | ||
363 | }; | ||
364 | |||
365 | pci1: pcie@ffe0a000 { | ||
366 | compatible = "fsl,mpc8548-pcie"; | ||
367 | device_type = "pci"; | ||
368 | #interrupt-cells = <1>; | ||
369 | #size-cells = <2>; | ||
370 | #address-cells = <3>; | ||
371 | reg = <0 0xffe0a000 0 0x1000>; | ||
372 | bus-range = <0 255>; | ||
373 | clock-frequency = <33333333>; | ||
374 | interrupt-parent = <&mpic>; | ||
375 | interrupts = <16 2>; | ||
376 | }; | ||
377 | }; | ||
diff --git a/arch/powerpc/boot/dts/p1022ds.dts b/arch/powerpc/boot/dts/p1022ds.dts index 8bcb10b92677..98d9426d4b85 100644 --- a/arch/powerpc/boot/dts/p1022ds.dts +++ b/arch/powerpc/boot/dts/p1022ds.dts | |||
@@ -52,7 +52,7 @@ | |||
52 | #size-cells = <1>; | 52 | #size-cells = <1>; |
53 | compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus"; | 53 | compatible = "fsl,p1022-elbc", "fsl,elbc", "simple-bus"; |
54 | reg = <0 0xffe05000 0 0x1000>; | 54 | reg = <0 0xffe05000 0 0x1000>; |
55 | interrupts = <19 2>; | 55 | interrupts = <19 2 0 0>; |
56 | 56 | ||
57 | ranges = <0x0 0x0 0xf 0xe8000000 0x08000000 | 57 | ranges = <0x0 0x0 0xf 0xe8000000 0x08000000 |
58 | 0x1 0x0 0xf 0xe0000000 0x08000000 | 58 | 0x1 0x0 0xf 0xe0000000 0x08000000 |
@@ -148,6 +148,17 @@ | |||
148 | label = "reserved-nand"; | 148 | label = "reserved-nand"; |
149 | }; | 149 | }; |
150 | }; | 150 | }; |
151 | |||
152 | board-control@3,0 { | ||
153 | compatible = "fsl,p1022ds-pixis"; | ||
154 | reg = <3 0 0x30>; | ||
155 | interrupt-parent = <&mpic>; | ||
156 | /* | ||
157 | * IRQ8 is generated if the "EVENT" switch is pressed | ||
158 | * and PX_CTL[EVESEL] is set to 00. | ||
159 | */ | ||
160 | interrupts = <8 8 0 0>; | ||
161 | }; | ||
151 | }; | 162 | }; |
152 | 163 | ||
153 | soc@fffe00000 { | 164 | soc@fffe00000 { |
@@ -167,13 +178,13 @@ | |||
167 | ecm@1000 { | 178 | ecm@1000 { |
168 | compatible = "fsl,p1022-ecm", "fsl,ecm"; | 179 | compatible = "fsl,p1022-ecm", "fsl,ecm"; |
169 | reg = <0x1000 0x1000>; | 180 | reg = <0x1000 0x1000>; |
170 | interrupts = <16 2>; | 181 | interrupts = <16 2 0 0>; |
171 | }; | 182 | }; |
172 | 183 | ||
173 | memory-controller@2000 { | 184 | memory-controller@2000 { |
174 | compatible = "fsl,p1022-memory-controller"; | 185 | compatible = "fsl,p1022-memory-controller"; |
175 | reg = <0x2000 0x1000>; | 186 | reg = <0x2000 0x1000>; |
176 | interrupts = <16 2>; | 187 | interrupts = <16 2 0 0>; |
177 | }; | 188 | }; |
178 | 189 | ||
179 | i2c@3000 { | 190 | i2c@3000 { |
@@ -182,7 +193,7 @@ | |||
182 | cell-index = <0>; | 193 | cell-index = <0>; |
183 | compatible = "fsl-i2c"; | 194 | compatible = "fsl-i2c"; |
184 | reg = <0x3000 0x100>; | 195 | reg = <0x3000 0x100>; |
185 | interrupts = <43 2>; | 196 | interrupts = <43 2 0 0>; |
186 | dfsrr; | 197 | dfsrr; |
187 | }; | 198 | }; |
188 | 199 | ||
@@ -192,14 +203,16 @@ | |||
192 | cell-index = <1>; | 203 | cell-index = <1>; |
193 | compatible = "fsl-i2c"; | 204 | compatible = "fsl-i2c"; |
194 | reg = <0x3100 0x100>; | 205 | reg = <0x3100 0x100>; |
195 | interrupts = <43 2>; | 206 | interrupts = <43 2 0 0>; |
196 | dfsrr; | 207 | dfsrr; |
197 | 208 | ||
198 | wm8776:codec@1a { | 209 | wm8776:codec@1a { |
199 | compatible = "wlf,wm8776"; | 210 | compatible = "wlf,wm8776"; |
200 | reg = <0x1a>; | 211 | reg = <0x1a>; |
201 | /* MCLK source is a stand-alone oscillator */ | 212 | /* |
202 | clock-frequency = <12288000>; | 213 | * clock-frequency will be set by U-Boot if |
214 | * the clock is enabled. | ||
215 | */ | ||
203 | }; | 216 | }; |
204 | }; | 217 | }; |
205 | 218 | ||
@@ -209,7 +222,7 @@ | |||
209 | compatible = "ns16550"; | 222 | compatible = "ns16550"; |
210 | reg = <0x4500 0x100>; | 223 | reg = <0x4500 0x100>; |
211 | clock-frequency = <0>; | 224 | clock-frequency = <0>; |
212 | interrupts = <42 2>; | 225 | interrupts = <42 2 0 0>; |
213 | }; | 226 | }; |
214 | 227 | ||
215 | serial1: serial@4600 { | 228 | serial1: serial@4600 { |
@@ -218,7 +231,7 @@ | |||
218 | compatible = "ns16550"; | 231 | compatible = "ns16550"; |
219 | reg = <0x4600 0x100>; | 232 | reg = <0x4600 0x100>; |
220 | clock-frequency = <0>; | 233 | clock-frequency = <0>; |
221 | interrupts = <42 2>; | 234 | interrupts = <42 2 0 0>; |
222 | }; | 235 | }; |
223 | 236 | ||
224 | spi@7000 { | 237 | spi@7000 { |
@@ -227,7 +240,7 @@ | |||
227 | #size-cells = <0>; | 240 | #size-cells = <0>; |
228 | compatible = "fsl,espi"; | 241 | compatible = "fsl,espi"; |
229 | reg = <0x7000 0x1000>; | 242 | reg = <0x7000 0x1000>; |
230 | interrupts = <59 0x2>; | 243 | interrupts = <59 0x2 0 0>; |
231 | espi,num-ss-bits = <4>; | 244 | espi,num-ss-bits = <4>; |
232 | mode = "cpu"; | 245 | mode = "cpu"; |
233 | 246 | ||
@@ -264,12 +277,13 @@ | |||
264 | compatible = "fsl,mpc8610-ssi"; | 277 | compatible = "fsl,mpc8610-ssi"; |
265 | cell-index = <0>; | 278 | cell-index = <0>; |
266 | reg = <0x15000 0x100>; | 279 | reg = <0x15000 0x100>; |
267 | interrupts = <75 2>; | 280 | interrupts = <75 2 0 0>; |
268 | fsl,mode = "i2s-slave"; | 281 | fsl,mode = "i2s-slave"; |
269 | codec-handle = <&wm8776>; | 282 | codec-handle = <&wm8776>; |
270 | fsl,playback-dma = <&dma00>; | 283 | fsl,playback-dma = <&dma00>; |
271 | fsl,capture-dma = <&dma01>; | 284 | fsl,capture-dma = <&dma01>; |
272 | fsl,fifo-depth = <16>; | 285 | fsl,fifo-depth = <15>; |
286 | fsl,ssi-asynchronous; | ||
273 | }; | 287 | }; |
274 | 288 | ||
275 | dma@c300 { | 289 | dma@c300 { |
@@ -280,28 +294,28 @@ | |||
280 | ranges = <0x0 0xc100 0x200>; | 294 | ranges = <0x0 0xc100 0x200>; |
281 | cell-index = <1>; | 295 | cell-index = <1>; |
282 | dma00: dma-channel@0 { | 296 | dma00: dma-channel@0 { |
283 | compatible = "fsl,eloplus-dma-channel"; | 297 | compatible = "fsl,ssi-dma-channel"; |
284 | reg = <0x0 0x80>; | 298 | reg = <0x0 0x80>; |
285 | cell-index = <0>; | 299 | cell-index = <0>; |
286 | interrupts = <76 2>; | 300 | interrupts = <76 2 0 0>; |
287 | }; | 301 | }; |
288 | dma01: dma-channel@80 { | 302 | dma01: dma-channel@80 { |
289 | compatible = "fsl,eloplus-dma-channel"; | 303 | compatible = "fsl,ssi-dma-channel"; |
290 | reg = <0x80 0x80>; | 304 | reg = <0x80 0x80>; |
291 | cell-index = <1>; | 305 | cell-index = <1>; |
292 | interrupts = <77 2>; | 306 | interrupts = <77 2 0 0>; |
293 | }; | 307 | }; |
294 | dma-channel@100 { | 308 | dma-channel@100 { |
295 | compatible = "fsl,eloplus-dma-channel"; | 309 | compatible = "fsl,eloplus-dma-channel"; |
296 | reg = <0x100 0x80>; | 310 | reg = <0x100 0x80>; |
297 | cell-index = <2>; | 311 | cell-index = <2>; |
298 | interrupts = <78 2>; | 312 | interrupts = <78 2 0 0>; |
299 | }; | 313 | }; |
300 | dma-channel@180 { | 314 | dma-channel@180 { |
301 | compatible = "fsl,eloplus-dma-channel"; | 315 | compatible = "fsl,eloplus-dma-channel"; |
302 | reg = <0x180 0x80>; | 316 | reg = <0x180 0x80>; |
303 | cell-index = <3>; | 317 | cell-index = <3>; |
304 | interrupts = <79 2>; | 318 | interrupts = <79 2 0 0>; |
305 | }; | 319 | }; |
306 | }; | 320 | }; |
307 | 321 | ||
@@ -309,7 +323,7 @@ | |||
309 | #gpio-cells = <2>; | 323 | #gpio-cells = <2>; |
310 | compatible = "fsl,mpc8572-gpio"; | 324 | compatible = "fsl,mpc8572-gpio"; |
311 | reg = <0xf000 0x100>; | 325 | reg = <0xf000 0x100>; |
312 | interrupts = <47 0x2>; | 326 | interrupts = <47 0x2 0 0>; |
313 | gpio-controller; | 327 | gpio-controller; |
314 | }; | 328 | }; |
315 | 329 | ||
@@ -318,7 +332,7 @@ | |||
318 | reg = <0x20000 0x1000>; | 332 | reg = <0x20000 0x1000>; |
319 | cache-line-size = <32>; // 32 bytes | 333 | cache-line-size = <32>; // 32 bytes |
320 | cache-size = <0x40000>; // L2, 256K | 334 | cache-size = <0x40000>; // L2, 256K |
321 | interrupts = <16 2>; | 335 | interrupts = <16 2 0 0>; |
322 | }; | 336 | }; |
323 | 337 | ||
324 | dma@21300 { | 338 | dma@21300 { |
@@ -332,25 +346,25 @@ | |||
332 | compatible = "fsl,eloplus-dma-channel"; | 346 | compatible = "fsl,eloplus-dma-channel"; |
333 | reg = <0x0 0x80>; | 347 | reg = <0x0 0x80>; |
334 | cell-index = <0>; | 348 | cell-index = <0>; |
335 | interrupts = <20 2>; | 349 | interrupts = <20 2 0 0>; |
336 | }; | 350 | }; |
337 | dma-channel@80 { | 351 | dma-channel@80 { |
338 | compatible = "fsl,eloplus-dma-channel"; | 352 | compatible = "fsl,eloplus-dma-channel"; |
339 | reg = <0x80 0x80>; | 353 | reg = <0x80 0x80>; |
340 | cell-index = <1>; | 354 | cell-index = <1>; |
341 | interrupts = <21 2>; | 355 | interrupts = <21 2 0 0>; |
342 | }; | 356 | }; |
343 | dma-channel@100 { | 357 | dma-channel@100 { |
344 | compatible = "fsl,eloplus-dma-channel"; | 358 | compatible = "fsl,eloplus-dma-channel"; |
345 | reg = <0x100 0x80>; | 359 | reg = <0x100 0x80>; |
346 | cell-index = <2>; | 360 | cell-index = <2>; |
347 | interrupts = <22 2>; | 361 | interrupts = <22 2 0 0>; |
348 | }; | 362 | }; |
349 | dma-channel@180 { | 363 | dma-channel@180 { |
350 | compatible = "fsl,eloplus-dma-channel"; | 364 | compatible = "fsl,eloplus-dma-channel"; |
351 | reg = <0x180 0x80>; | 365 | reg = <0x180 0x80>; |
352 | cell-index = <3>; | 366 | cell-index = <3>; |
353 | interrupts = <23 2>; | 367 | interrupts = <23 2 0 0>; |
354 | }; | 368 | }; |
355 | }; | 369 | }; |
356 | 370 | ||
@@ -359,7 +373,7 @@ | |||
359 | #size-cells = <0>; | 373 | #size-cells = <0>; |
360 | compatible = "fsl-usb2-dr"; | 374 | compatible = "fsl-usb2-dr"; |
361 | reg = <0x22000 0x1000>; | 375 | reg = <0x22000 0x1000>; |
362 | interrupts = <28 0x2>; | 376 | interrupts = <28 0x2 0 0>; |
363 | phy_type = "ulpi"; | 377 | phy_type = "ulpi"; |
364 | }; | 378 | }; |
365 | 379 | ||
@@ -370,11 +384,11 @@ | |||
370 | reg = <0x24000 0x1000 0xb0030 0x4>; | 384 | reg = <0x24000 0x1000 0xb0030 0x4>; |
371 | 385 | ||
372 | phy0: ethernet-phy@0 { | 386 | phy0: ethernet-phy@0 { |
373 | interrupts = <3 1>; | 387 | interrupts = <3 1 0 0>; |
374 | reg = <0x1>; | 388 | reg = <0x1>; |
375 | }; | 389 | }; |
376 | phy1: ethernet-phy@1 { | 390 | phy1: ethernet-phy@1 { |
377 | interrupts = <9 1>; | 391 | interrupts = <9 1 0 0>; |
378 | reg = <0x2>; | 392 | reg = <0x2>; |
379 | }; | 393 | }; |
380 | }; | 394 | }; |
@@ -405,13 +419,13 @@ | |||
405 | #address-cells = <1>; | 419 | #address-cells = <1>; |
406 | #size-cells = <1>; | 420 | #size-cells = <1>; |
407 | reg = <0xB0000 0x1000>; | 421 | reg = <0xB0000 0x1000>; |
408 | interrupts = <29 2 30 2 34 2>; | 422 | interrupts = <29 2 0 0 30 2 0 0 34 2 0 0>; |
409 | }; | 423 | }; |
410 | queue-group@1{ | 424 | queue-group@1{ |
411 | #address-cells = <1>; | 425 | #address-cells = <1>; |
412 | #size-cells = <1>; | 426 | #size-cells = <1>; |
413 | reg = <0xB4000 0x1000>; | 427 | reg = <0xB4000 0x1000>; |
414 | interrupts = <17 2 18 2 24 2>; | 428 | interrupts = <17 2 0 0 18 2 0 0 24 2 0 0>; |
415 | }; | 429 | }; |
416 | }; | 430 | }; |
417 | 431 | ||
@@ -432,20 +446,20 @@ | |||
432 | #address-cells = <1>; | 446 | #address-cells = <1>; |
433 | #size-cells = <1>; | 447 | #size-cells = <1>; |
434 | reg = <0xB1000 0x1000>; | 448 | reg = <0xB1000 0x1000>; |
435 | interrupts = <35 2 36 2 40 2>; | 449 | interrupts = <35 2 0 0 36 2 0 0 40 2 0 0>; |
436 | }; | 450 | }; |
437 | queue-group@1{ | 451 | queue-group@1{ |
438 | #address-cells = <1>; | 452 | #address-cells = <1>; |
439 | #size-cells = <1>; | 453 | #size-cells = <1>; |
440 | reg = <0xB5000 0x1000>; | 454 | reg = <0xB5000 0x1000>; |
441 | interrupts = <51 2 52 2 67 2>; | 455 | interrupts = <51 2 0 0 52 2 0 0 67 2 0 0>; |
442 | }; | 456 | }; |
443 | }; | 457 | }; |
444 | 458 | ||
445 | sdhci@2e000 { | 459 | sdhci@2e000 { |
446 | compatible = "fsl,p1022-esdhc", "fsl,esdhc"; | 460 | compatible = "fsl,p1022-esdhc", "fsl,esdhc"; |
447 | reg = <0x2e000 0x1000>; | 461 | reg = <0x2e000 0x1000>; |
448 | interrupts = <72 0x2>; | 462 | interrupts = <72 0x2 0 0>; |
449 | fsl,sdhci-auto-cmd12; | 463 | fsl,sdhci-auto-cmd12; |
450 | /* Filled in by U-Boot */ | 464 | /* Filled in by U-Boot */ |
451 | clock-frequency = <0>; | 465 | clock-frequency = <0>; |
@@ -456,7 +470,7 @@ | |||
456 | "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", | 470 | "fsl,sec2.4", "fsl,sec2.2", "fsl,sec2.1", |
457 | "fsl,sec2.0"; | 471 | "fsl,sec2.0"; |
458 | reg = <0x30000 0x10000>; | 472 | reg = <0x30000 0x10000>; |
459 | interrupts = <45 2 58 2>; | 473 | interrupts = <45 2 0 0 58 2 0 0>; |
460 | fsl,num-channels = <4>; | 474 | fsl,num-channels = <4>; |
461 | fsl,channel-fifo-len = <24>; | 475 | fsl,channel-fifo-len = <24>; |
462 | fsl,exec-units-mask = <0x97c>; | 476 | fsl,exec-units-mask = <0x97c>; |
@@ -464,17 +478,17 @@ | |||
464 | }; | 478 | }; |
465 | 479 | ||
466 | sata@18000 { | 480 | sata@18000 { |
467 | compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; | 481 | compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; |
468 | reg = <0x18000 0x1000>; | 482 | reg = <0x18000 0x1000>; |
469 | cell-index = <1>; | 483 | cell-index = <1>; |
470 | interrupts = <74 0x2>; | 484 | interrupts = <74 0x2 0 0>; |
471 | }; | 485 | }; |
472 | 486 | ||
473 | sata@19000 { | 487 | sata@19000 { |
474 | compatible = "fsl,mpc8536-sata", "fsl,pq-sata"; | 488 | compatible = "fsl,p1022-sata", "fsl,pq-sata-v2"; |
475 | reg = <0x19000 0x1000>; | 489 | reg = <0x19000 0x1000>; |
476 | cell-index = <2>; | 490 | cell-index = <2>; |
477 | interrupts = <41 0x2>; | 491 | interrupts = <41 0x2 0 0>; |
478 | }; | 492 | }; |
479 | 493 | ||
480 | power@e0070{ | 494 | power@e0070{ |
@@ -485,21 +499,33 @@ | |||
485 | display@10000 { | 499 | display@10000 { |
486 | compatible = "fsl,diu", "fsl,p1022-diu"; | 500 | compatible = "fsl,diu", "fsl,p1022-diu"; |
487 | reg = <0x10000 1000>; | 501 | reg = <0x10000 1000>; |
488 | interrupts = <64 2>; | 502 | interrupts = <64 2 0 0>; |
489 | }; | 503 | }; |
490 | 504 | ||
491 | timer@41100 { | 505 | timer@41100 { |
492 | compatible = "fsl,mpic-global-timer"; | 506 | compatible = "fsl,mpic-global-timer"; |
493 | reg = <0x41100 0x204>; | 507 | reg = <0x41100 0x100 0x41300 4>; |
494 | interrupts = <0xf7 0x2>; | 508 | interrupts = <0 0 3 0 |
509 | 1 0 3 0 | ||
510 | 2 0 3 0 | ||
511 | 3 0 3 0>; | ||
512 | }; | ||
513 | |||
514 | timer@42100 { | ||
515 | compatible = "fsl,mpic-global-timer"; | ||
516 | reg = <0x42100 0x100 0x42300 4>; | ||
517 | interrupts = <4 0 3 0 | ||
518 | 5 0 3 0 | ||
519 | 6 0 3 0 | ||
520 | 7 0 3 0>; | ||
495 | }; | 521 | }; |
496 | 522 | ||
497 | mpic: pic@40000 { | 523 | mpic: pic@40000 { |
498 | interrupt-controller; | 524 | interrupt-controller; |
499 | #address-cells = <0>; | 525 | #address-cells = <0>; |
500 | #interrupt-cells = <2>; | 526 | #interrupt-cells = <4>; |
501 | reg = <0x40000 0x40000>; | 527 | reg = <0x40000 0x40000>; |
502 | compatible = "chrp,open-pic"; | 528 | compatible = "fsl,mpic"; |
503 | device_type = "open-pic"; | 529 | device_type = "open-pic"; |
504 | }; | 530 | }; |
505 | 531 | ||
@@ -508,14 +534,14 @@ | |||
508 | reg = <0x41600 0x80>; | 534 | reg = <0x41600 0x80>; |
509 | msi-available-ranges = <0 0x100>; | 535 | msi-available-ranges = <0 0x100>; |
510 | interrupts = < | 536 | interrupts = < |
511 | 0xe0 0 | 537 | 0xe0 0 0 0 |
512 | 0xe1 0 | 538 | 0xe1 0 0 0 |
513 | 0xe2 0 | 539 | 0xe2 0 0 0 |
514 | 0xe3 0 | 540 | 0xe3 0 0 0 |
515 | 0xe4 0 | 541 | 0xe4 0 0 0 |
516 | 0xe5 0 | 542 | 0xe5 0 0 0 |
517 | 0xe6 0 | 543 | 0xe6 0 0 0 |
518 | 0xe7 0>; | 544 | 0xe7 0 0 0>; |
519 | }; | 545 | }; |
520 | 546 | ||
521 | global-utilities@e0000 { //global utilities block | 547 | global-utilities@e0000 { //global utilities block |
@@ -536,7 +562,7 @@ | |||
536 | ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000 | 562 | ranges = <0x2000000 0x0 0xa0000000 0xc 0x20000000 0x0 0x20000000 |
537 | 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; | 563 | 0x1000000 0x0 0x00000000 0xf 0xffc10000 0x0 0x10000>; |
538 | clock-frequency = <33333333>; | 564 | clock-frequency = <33333333>; |
539 | interrupts = <16 2>; | 565 | interrupts = <16 2 0 0>; |
540 | interrupt-map-mask = <0xf800 0 0 7>; | 566 | interrupt-map-mask = <0xf800 0 0 7>; |
541 | interrupt-map = < | 567 | interrupt-map = < |
542 | /* IDSEL 0x0 */ | 568 | /* IDSEL 0x0 */ |
@@ -571,7 +597,7 @@ | |||
571 | ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000 | 597 | ranges = <0x2000000 0x0 0xc0000000 0xc 0x40000000 0x0 0x20000000 |
572 | 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>; | 598 | 0x1000000 0x0 0x00000000 0xf 0xffc20000 0x0 0x10000>; |
573 | clock-frequency = <33333333>; | 599 | clock-frequency = <33333333>; |
574 | interrupts = <16 2>; | 600 | interrupts = <16 2 0 0>; |
575 | interrupt-map-mask = <0xf800 0 0 7>; | 601 | interrupt-map-mask = <0xf800 0 0 7>; |
576 | interrupt-map = < | 602 | interrupt-map = < |
577 | /* IDSEL 0x0 */ | 603 | /* IDSEL 0x0 */ |
@@ -607,7 +633,7 @@ | |||
607 | ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000 | 633 | ranges = <0x2000000 0x0 0x80000000 0xc 0x00000000 0x0 0x20000000 |
608 | 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; | 634 | 0x1000000 0x0 0x00000000 0xf 0xffc00000 0x0 0x10000>; |
609 | clock-frequency = <33333333>; | 635 | clock-frequency = <33333333>; |
610 | interrupts = <16 2>; | 636 | interrupts = <16 2 0 0>; |
611 | interrupt-map-mask = <0xf800 0 0 7>; | 637 | interrupt-map-mask = <0xf800 0 0 7>; |
612 | interrupt-map = < | 638 | interrupt-map = < |
613 | /* IDSEL 0x0 */ | 639 | /* IDSEL 0x0 */ |
diff --git a/arch/powerpc/boot/dts/p2020ds.dts b/arch/powerpc/boot/dts/p2020ds.dts index 11019142813c..dae403100f2f 100644 --- a/arch/powerpc/boot/dts/p2020ds.dts +++ b/arch/powerpc/boot/dts/p2020ds.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * P2020 DS Device Tree Source | 2 | * P2020 DS Device Tree Source |
3 | * | 3 | * |
4 | * Copyright 2009 Freescale Semiconductor Inc. | 4 | * Copyright 2009-2011 Freescale Semiconductor Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
@@ -9,12 +9,11 @@ | |||
9 | * option) any later version. | 9 | * option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | /dts-v1/; | 12 | /include/ "p2020si.dtsi" |
13 | |||
13 | / { | 14 | / { |
14 | model = "fsl,P2020"; | 15 | model = "fsl,P2020DS"; |
15 | compatible = "fsl,P2020DS"; | 16 | compatible = "fsl,P2020DS"; |
16 | #address-cells = <2>; | ||
17 | #size-cells = <2>; | ||
18 | 17 | ||
19 | aliases { | 18 | aliases { |
20 | ethernet0 = &enet0; | 19 | ethernet0 = &enet0; |
@@ -27,35 +26,13 @@ | |||
27 | pci2 = &pci2; | 26 | pci2 = &pci2; |
28 | }; | 27 | }; |
29 | 28 | ||
30 | cpus { | ||
31 | #address-cells = <1>; | ||
32 | #size-cells = <0>; | ||
33 | |||
34 | PowerPC,P2020@0 { | ||
35 | device_type = "cpu"; | ||
36 | reg = <0x0>; | ||
37 | next-level-cache = <&L2>; | ||
38 | }; | ||
39 | |||
40 | PowerPC,P2020@1 { | ||
41 | device_type = "cpu"; | ||
42 | reg = <0x1>; | ||
43 | next-level-cache = <&L2>; | ||
44 | }; | ||
45 | }; | ||
46 | 29 | ||
47 | memory { | 30 | memory { |
48 | device_type = "memory"; | 31 | device_type = "memory"; |
49 | }; | 32 | }; |
50 | 33 | ||
51 | localbus@ffe05000 { | 34 | localbus@ffe05000 { |
52 | #address-cells = <2>; | ||
53 | #size-cells = <1>; | ||
54 | compatible = "fsl,elbc", "simple-bus"; | 35 | compatible = "fsl,elbc", "simple-bus"; |
55 | reg = <0 0xffe05000 0 0x1000>; | ||
56 | interrupts = <19 2>; | ||
57 | interrupt-parent = <&mpic>; | ||
58 | |||
59 | ranges = <0x0 0x0 0x0 0xe8000000 0x08000000 | 36 | ranges = <0x0 0x0 0x0 0xe8000000 0x08000000 |
60 | 0x1 0x0 0x0 0xe0000000 0x08000000 | 37 | 0x1 0x0 0x0 0xe0000000 0x08000000 |
61 | 0x2 0x0 0x0 0xffa00000 0x00040000 | 38 | 0x2 0x0 0x0 0xffa00000 0x00040000 |
@@ -158,352 +135,90 @@ | |||
158 | }; | 135 | }; |
159 | 136 | ||
160 | soc@ffe00000 { | 137 | soc@ffe00000 { |
161 | #address-cells = <1>; | ||
162 | #size-cells = <1>; | ||
163 | device_type = "soc"; | ||
164 | compatible = "fsl,p2020-immr", "simple-bus"; | ||
165 | ranges = <0x0 0 0xffe00000 0x100000>; | ||
166 | bus-frequency = <0>; // Filled out by uboot. | ||
167 | |||
168 | ecm-law@0 { | ||
169 | compatible = "fsl,ecm-law"; | ||
170 | reg = <0x0 0x1000>; | ||
171 | fsl,num-laws = <12>; | ||
172 | }; | ||
173 | |||
174 | ecm@1000 { | ||
175 | compatible = "fsl,p2020-ecm", "fsl,ecm"; | ||
176 | reg = <0x1000 0x1000>; | ||
177 | interrupts = <17 2>; | ||
178 | interrupt-parent = <&mpic>; | ||
179 | }; | ||
180 | |||
181 | memory-controller@2000 { | ||
182 | compatible = "fsl,p2020-memory-controller"; | ||
183 | reg = <0x2000 0x1000>; | ||
184 | interrupt-parent = <&mpic>; | ||
185 | interrupts = <18 2>; | ||
186 | }; | ||
187 | |||
188 | i2c@3000 { | ||
189 | #address-cells = <1>; | ||
190 | #size-cells = <0>; | ||
191 | cell-index = <0>; | ||
192 | compatible = "fsl-i2c"; | ||
193 | reg = <0x3000 0x100>; | ||
194 | interrupts = <43 2>; | ||
195 | interrupt-parent = <&mpic>; | ||
196 | dfsrr; | ||
197 | }; | ||
198 | |||
199 | i2c@3100 { | ||
200 | #address-cells = <1>; | ||
201 | #size-cells = <0>; | ||
202 | cell-index = <1>; | ||
203 | compatible = "fsl-i2c"; | ||
204 | reg = <0x3100 0x100>; | ||
205 | interrupts = <43 2>; | ||
206 | interrupt-parent = <&mpic>; | ||
207 | dfsrr; | ||
208 | }; | ||
209 | 138 | ||
210 | serial0: serial@4500 { | 139 | usb@22000 { |
211 | cell-index = <0>; | 140 | phy_type = "ulpi"; |
212 | device_type = "serial"; | ||
213 | compatible = "ns16550"; | ||
214 | reg = <0x4500 0x100>; | ||
215 | clock-frequency = <0>; | ||
216 | interrupts = <42 2>; | ||
217 | interrupt-parent = <&mpic>; | ||
218 | }; | ||
219 | |||
220 | serial1: serial@4600 { | ||
221 | cell-index = <1>; | ||
222 | device_type = "serial"; | ||
223 | compatible = "ns16550"; | ||
224 | reg = <0x4600 0x100>; | ||
225 | clock-frequency = <0>; | ||
226 | interrupts = <42 2>; | ||
227 | interrupt-parent = <&mpic>; | ||
228 | }; | ||
229 | |||
230 | spi@7000 { | ||
231 | compatible = "fsl,espi"; | ||
232 | reg = <0x7000 0x1000>; | ||
233 | interrupts = <59 0x2>; | ||
234 | interrupt-parent = <&mpic>; | ||
235 | }; | 141 | }; |
236 | 142 | ||
237 | dma@c300 { | 143 | mdio@24520 { |
238 | #address-cells = <1>; | 144 | phy0: ethernet-phy@0 { |
239 | #size-cells = <1>; | ||
240 | compatible = "fsl,eloplus-dma"; | ||
241 | reg = <0xc300 0x4>; | ||
242 | ranges = <0x0 0xc100 0x200>; | ||
243 | cell-index = <1>; | ||
244 | dma-channel@0 { | ||
245 | compatible = "fsl,eloplus-dma-channel"; | ||
246 | reg = <0x0 0x80>; | ||
247 | cell-index = <0>; | ||
248 | interrupt-parent = <&mpic>; | 145 | interrupt-parent = <&mpic>; |
249 | interrupts = <76 2>; | 146 | interrupts = <3 1>; |
147 | reg = <0x0>; | ||
250 | }; | 148 | }; |
251 | dma-channel@80 { | 149 | phy1: ethernet-phy@1 { |
252 | compatible = "fsl,eloplus-dma-channel"; | ||
253 | reg = <0x80 0x80>; | ||
254 | cell-index = <1>; | ||
255 | interrupt-parent = <&mpic>; | 150 | interrupt-parent = <&mpic>; |
256 | interrupts = <77 2>; | 151 | interrupts = <3 1>; |
152 | reg = <0x1>; | ||
257 | }; | 153 | }; |
258 | dma-channel@100 { | 154 | phy2: ethernet-phy@2 { |
259 | compatible = "fsl,eloplus-dma-channel"; | ||
260 | reg = <0x100 0x80>; | ||
261 | cell-index = <2>; | ||
262 | interrupt-parent = <&mpic>; | 155 | interrupt-parent = <&mpic>; |
263 | interrupts = <78 2>; | 156 | interrupts = <3 1>; |
157 | reg = <0x2>; | ||
264 | }; | 158 | }; |
265 | dma-channel@180 { | 159 | tbi0: tbi-phy@11 { |
266 | compatible = "fsl,eloplus-dma-channel"; | 160 | reg = <0x11>; |
267 | reg = <0x180 0x80>; | 161 | device_type = "tbi-phy"; |
268 | cell-index = <3>; | ||
269 | interrupt-parent = <&mpic>; | ||
270 | interrupts = <79 2>; | ||
271 | }; | 162 | }; |
272 | }; | ||
273 | 163 | ||
274 | gpio: gpio-controller@f000 { | ||
275 | #gpio-cells = <2>; | ||
276 | compatible = "fsl,mpc8572-gpio"; | ||
277 | reg = <0xf000 0x100>; | ||
278 | interrupts = <47 0x2>; | ||
279 | interrupt-parent = <&mpic>; | ||
280 | gpio-controller; | ||
281 | }; | 164 | }; |
282 | 165 | ||
283 | L2: l2-cache-controller@20000 { | 166 | mdio@25520 { |
284 | compatible = "fsl,p2020-l2-cache-controller"; | 167 | tbi1: tbi-phy@11 { |
285 | reg = <0x20000 0x1000>; | 168 | reg = <0x11>; |
286 | cache-line-size = <32>; // 32 bytes | 169 | device_type = "tbi-phy"; |
287 | cache-size = <0x80000>; // L2, 512k | 170 | }; |
288 | interrupt-parent = <&mpic>; | ||
289 | interrupts = <16 2>; | ||
290 | }; | 171 | }; |
291 | 172 | ||
292 | dma@21300 { | 173 | mdio@26520 { |
293 | #address-cells = <1>; | 174 | tbi2: tbi-phy@11 { |
294 | #size-cells = <1>; | 175 | reg = <0x11>; |
295 | compatible = "fsl,eloplus-dma"; | 176 | device_type = "tbi-phy"; |
296 | reg = <0x21300 0x4>; | ||
297 | ranges = <0x0 0x21100 0x200>; | ||
298 | cell-index = <0>; | ||
299 | dma-channel@0 { | ||
300 | compatible = "fsl,eloplus-dma-channel"; | ||
301 | reg = <0x0 0x80>; | ||
302 | cell-index = <0>; | ||
303 | interrupt-parent = <&mpic>; | ||
304 | interrupts = <20 2>; | ||
305 | }; | ||
306 | dma-channel@80 { | ||
307 | compatible = "fsl,eloplus-dma-channel"; | ||
308 | reg = <0x80 0x80>; | ||
309 | cell-index = <1>; | ||
310 | interrupt-parent = <&mpic>; | ||
311 | interrupts = <21 2>; | ||
312 | }; | ||
313 | dma-channel@100 { | ||
314 | compatible = "fsl,eloplus-dma-channel"; | ||
315 | reg = <0x100 0x80>; | ||
316 | cell-index = <2>; | ||
317 | interrupt-parent = <&mpic>; | ||
318 | interrupts = <22 2>; | ||
319 | }; | ||
320 | dma-channel@180 { | ||
321 | compatible = "fsl,eloplus-dma-channel"; | ||
322 | reg = <0x180 0x80>; | ||
323 | cell-index = <3>; | ||
324 | interrupt-parent = <&mpic>; | ||
325 | interrupts = <23 2>; | ||
326 | }; | 177 | }; |
178 | |||
327 | }; | 179 | }; |
328 | 180 | ||
329 | usb@22000 { | 181 | ptp_clock@24E00 { |
330 | #address-cells = <1>; | 182 | compatible = "fsl,etsec-ptp"; |
331 | #size-cells = <0>; | 183 | reg = <0x24E00 0xB0>; |
332 | compatible = "fsl-usb2-dr"; | 184 | interrupts = <68 2 69 2 70 2>; |
333 | reg = <0x22000 0x1000>; | 185 | interrupt-parent = < &mpic >; |
334 | interrupt-parent = <&mpic>; | 186 | fsl,tclk-period = <5>; |
335 | interrupts = <28 0x2>; | 187 | fsl,tmr-prsc = <200>; |
336 | phy_type = "ulpi"; | 188 | fsl,tmr-add = <0xCCCCCCCD>; |
189 | fsl,tmr-fiper1 = <0x3B9AC9FB>; | ||
190 | fsl,tmr-fiper2 = <0x0001869B>; | ||
191 | fsl,max-adj = <249999999>; | ||
337 | }; | 192 | }; |
338 | 193 | ||
339 | enet0: ethernet@24000 { | 194 | enet0: ethernet@24000 { |
340 | #address-cells = <1>; | ||
341 | #size-cells = <1>; | ||
342 | cell-index = <0>; | ||
343 | device_type = "network"; | ||
344 | model = "eTSEC"; | ||
345 | compatible = "gianfar"; | ||
346 | reg = <0x24000 0x1000>; | ||
347 | ranges = <0x0 0x24000 0x1000>; | ||
348 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
349 | interrupts = <29 2 30 2 34 2>; | ||
350 | interrupt-parent = <&mpic>; | ||
351 | tbi-handle = <&tbi0>; | 195 | tbi-handle = <&tbi0>; |
352 | phy-handle = <&phy0>; | 196 | phy-handle = <&phy0>; |
353 | phy-connection-type = "rgmii-id"; | 197 | phy-connection-type = "rgmii-id"; |
354 | |||
355 | mdio@520 { | ||
356 | #address-cells = <1>; | ||
357 | #size-cells = <0>; | ||
358 | compatible = "fsl,gianfar-mdio"; | ||
359 | reg = <0x520 0x20>; | ||
360 | |||
361 | phy0: ethernet-phy@0 { | ||
362 | interrupt-parent = <&mpic>; | ||
363 | interrupts = <3 1>; | ||
364 | reg = <0x0>; | ||
365 | }; | ||
366 | phy1: ethernet-phy@1 { | ||
367 | interrupt-parent = <&mpic>; | ||
368 | interrupts = <3 1>; | ||
369 | reg = <0x1>; | ||
370 | }; | ||
371 | phy2: ethernet-phy@2 { | ||
372 | interrupt-parent = <&mpic>; | ||
373 | interrupts = <3 1>; | ||
374 | reg = <0x2>; | ||
375 | }; | ||
376 | tbi0: tbi-phy@11 { | ||
377 | reg = <0x11>; | ||
378 | device_type = "tbi-phy"; | ||
379 | }; | ||
380 | }; | ||
381 | }; | 198 | }; |
382 | 199 | ||
383 | enet1: ethernet@25000 { | 200 | enet1: ethernet@25000 { |
384 | #address-cells = <1>; | ||
385 | #size-cells = <1>; | ||
386 | cell-index = <1>; | ||
387 | device_type = "network"; | ||
388 | model = "eTSEC"; | ||
389 | compatible = "gianfar"; | ||
390 | reg = <0x25000 0x1000>; | ||
391 | ranges = <0x0 0x25000 0x1000>; | ||
392 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
393 | interrupts = <35 2 36 2 40 2>; | ||
394 | interrupt-parent = <&mpic>; | ||
395 | tbi-handle = <&tbi1>; | 201 | tbi-handle = <&tbi1>; |
396 | phy-handle = <&phy1>; | 202 | phy-handle = <&phy1>; |
397 | phy-connection-type = "rgmii-id"; | 203 | phy-connection-type = "rgmii-id"; |
398 | 204 | ||
399 | mdio@520 { | ||
400 | #address-cells = <1>; | ||
401 | #size-cells = <0>; | ||
402 | compatible = "fsl,gianfar-tbi"; | ||
403 | reg = <0x520 0x20>; | ||
404 | |||
405 | tbi1: tbi-phy@11 { | ||
406 | reg = <0x11>; | ||
407 | device_type = "tbi-phy"; | ||
408 | }; | ||
409 | }; | ||
410 | }; | 205 | }; |
411 | 206 | ||
412 | enet2: ethernet@26000 { | 207 | enet2: ethernet@26000 { |
413 | #address-cells = <1>; | ||
414 | #size-cells = <1>; | ||
415 | cell-index = <2>; | ||
416 | device_type = "network"; | ||
417 | model = "eTSEC"; | ||
418 | compatible = "gianfar"; | ||
419 | reg = <0x26000 0x1000>; | ||
420 | ranges = <0x0 0x26000 0x1000>; | ||
421 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
422 | interrupts = <31 2 32 2 33 2>; | ||
423 | interrupt-parent = <&mpic>; | ||
424 | tbi-handle = <&tbi2>; | 208 | tbi-handle = <&tbi2>; |
425 | phy-handle = <&phy2>; | 209 | phy-handle = <&phy2>; |
426 | phy-connection-type = "rgmii-id"; | 210 | phy-connection-type = "rgmii-id"; |
427 | |||
428 | mdio@520 { | ||
429 | #address-cells = <1>; | ||
430 | #size-cells = <0>; | ||
431 | compatible = "fsl,gianfar-tbi"; | ||
432 | reg = <0x520 0x20>; | ||
433 | |||
434 | tbi2: tbi-phy@11 { | ||
435 | reg = <0x11>; | ||
436 | device_type = "tbi-phy"; | ||
437 | }; | ||
438 | }; | ||
439 | }; | ||
440 | |||
441 | sdhci@2e000 { | ||
442 | compatible = "fsl,p2020-esdhc", "fsl,esdhc"; | ||
443 | reg = <0x2e000 0x1000>; | ||
444 | interrupts = <72 0x2>; | ||
445 | interrupt-parent = <&mpic>; | ||
446 | /* Filled in by U-Boot */ | ||
447 | clock-frequency = <0>; | ||
448 | }; | ||
449 | |||
450 | crypto@30000 { | ||
451 | compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", | ||
452 | "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; | ||
453 | reg = <0x30000 0x10000>; | ||
454 | interrupts = <45 2 58 2>; | ||
455 | interrupt-parent = <&mpic>; | ||
456 | fsl,num-channels = <4>; | ||
457 | fsl,channel-fifo-len = <24>; | ||
458 | fsl,exec-units-mask = <0xbfe>; | ||
459 | fsl,descriptor-types-mask = <0x3ab0ebf>; | ||
460 | }; | 211 | }; |
461 | 212 | ||
462 | mpic: pic@40000 { | ||
463 | interrupt-controller; | ||
464 | #address-cells = <0>; | ||
465 | #interrupt-cells = <2>; | ||
466 | reg = <0x40000 0x40000>; | ||
467 | compatible = "chrp,open-pic"; | ||
468 | device_type = "open-pic"; | ||
469 | }; | ||
470 | 213 | ||
471 | msi@41600 { | 214 | msi@41600 { |
472 | compatible = "fsl,mpic-msi"; | 215 | compatible = "fsl,mpic-msi"; |
473 | reg = <0x41600 0x80>; | ||
474 | msi-available-ranges = <0 0x100>; | ||
475 | interrupts = < | ||
476 | 0xe0 0 | ||
477 | 0xe1 0 | ||
478 | 0xe2 0 | ||
479 | 0xe3 0 | ||
480 | 0xe4 0 | ||
481 | 0xe5 0 | ||
482 | 0xe6 0 | ||
483 | 0xe7 0>; | ||
484 | interrupt-parent = <&mpic>; | ||
485 | }; | ||
486 | |||
487 | global-utilities@e0000 { //global utilities block | ||
488 | compatible = "fsl,p2020-guts"; | ||
489 | reg = <0xe0000 0x1000>; | ||
490 | fsl,has-rstcr; | ||
491 | }; | 216 | }; |
492 | }; | 217 | }; |
493 | 218 | ||
494 | pci0: pcie@ffe08000 { | 219 | pci0: pcie@ffe08000 { |
495 | compatible = "fsl,mpc8548-pcie"; | ||
496 | device_type = "pci"; | ||
497 | #interrupt-cells = <1>; | ||
498 | #size-cells = <2>; | ||
499 | #address-cells = <3>; | ||
500 | reg = <0 0xffe08000 0 0x1000>; | ||
501 | bus-range = <0 255>; | ||
502 | ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 | 220 | ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 |
503 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; | 221 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; |
504 | clock-frequency = <33333333>; | ||
505 | interrupt-parent = <&mpic>; | ||
506 | interrupts = <24 2>; | ||
507 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; | 222 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
508 | interrupt-map = < | 223 | interrupt-map = < |
509 | /* IDSEL 0x0 */ | 224 | /* IDSEL 0x0 */ |
@@ -528,18 +243,8 @@ | |||
528 | }; | 243 | }; |
529 | 244 | ||
530 | pci1: pcie@ffe09000 { | 245 | pci1: pcie@ffe09000 { |
531 | compatible = "fsl,mpc8548-pcie"; | ||
532 | device_type = "pci"; | ||
533 | #interrupt-cells = <1>; | ||
534 | #size-cells = <2>; | ||
535 | #address-cells = <3>; | ||
536 | reg = <0 0xffe09000 0 0x1000>; | ||
537 | bus-range = <0 255>; | ||
538 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 | 246 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 |
539 | 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; | 247 | 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; |
540 | clock-frequency = <33333333>; | ||
541 | interrupt-parent = <&mpic>; | ||
542 | interrupts = <25 2>; | ||
543 | interrupt-map-mask = <0xff00 0x0 0x0 0x7>; | 248 | interrupt-map-mask = <0xff00 0x0 0x0 0x7>; |
544 | interrupt-map = < | 249 | interrupt-map = < |
545 | 250 | ||
@@ -667,18 +372,8 @@ | |||
667 | }; | 372 | }; |
668 | 373 | ||
669 | pci2: pcie@ffe0a000 { | 374 | pci2: pcie@ffe0a000 { |
670 | compatible = "fsl,mpc8548-pcie"; | ||
671 | device_type = "pci"; | ||
672 | #interrupt-cells = <1>; | ||
673 | #size-cells = <2>; | ||
674 | #address-cells = <3>; | ||
675 | reg = <0 0xffe0a000 0 0x1000>; | ||
676 | bus-range = <0 255>; | ||
677 | ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 | 375 | ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 |
678 | 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; | 376 | 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; |
679 | clock-frequency = <33333333>; | ||
680 | interrupt-parent = <&mpic>; | ||
681 | interrupts = <26 2>; | ||
682 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; | 377 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
683 | interrupt-map = < | 378 | interrupt-map = < |
684 | /* IDSEL 0x0 */ | 379 | /* IDSEL 0x0 */ |
diff --git a/arch/powerpc/boot/dts/p2020rdb.dts b/arch/powerpc/boot/dts/p2020rdb.dts index da4cb0d8d215..1d7a05f3021e 100644 --- a/arch/powerpc/boot/dts/p2020rdb.dts +++ b/arch/powerpc/boot/dts/p2020rdb.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * P2020 RDB Device Tree Source | 2 | * P2020 RDB Device Tree Source |
3 | * | 3 | * |
4 | * Copyright 2009 Freescale Semiconductor Inc. | 4 | * Copyright 2009-2011 Freescale Semiconductor Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
@@ -9,12 +9,11 @@ | |||
9 | * option) any later version. | 9 | * option) any later version. |
10 | */ | 10 | */ |
11 | 11 | ||
12 | /dts-v1/; | 12 | /include/ "p2020si.dtsi" |
13 | |||
13 | / { | 14 | / { |
14 | model = "fsl,P2020"; | 15 | model = "fsl,P2020RDB"; |
15 | compatible = "fsl,P2020RDB"; | 16 | compatible = "fsl,P2020RDB"; |
16 | #address-cells = <2>; | ||
17 | #size-cells = <2>; | ||
18 | 17 | ||
19 | aliases { | 18 | aliases { |
20 | ethernet0 = &enet0; | 19 | ethernet0 = &enet0; |
@@ -26,34 +25,11 @@ | |||
26 | pci1 = &pci1; | 25 | pci1 = &pci1; |
27 | }; | 26 | }; |
28 | 27 | ||
29 | cpus { | ||
30 | #address-cells = <1>; | ||
31 | #size-cells = <0>; | ||
32 | |||
33 | PowerPC,P2020@0 { | ||
34 | device_type = "cpu"; | ||
35 | reg = <0x0>; | ||
36 | next-level-cache = <&L2>; | ||
37 | }; | ||
38 | |||
39 | PowerPC,P2020@1 { | ||
40 | device_type = "cpu"; | ||
41 | reg = <0x1>; | ||
42 | next-level-cache = <&L2>; | ||
43 | }; | ||
44 | }; | ||
45 | |||
46 | memory { | 28 | memory { |
47 | device_type = "memory"; | 29 | device_type = "memory"; |
48 | }; | 30 | }; |
49 | 31 | ||
50 | localbus@ffe05000 { | 32 | localbus@ffe05000 { |
51 | #address-cells = <2>; | ||
52 | #size-cells = <1>; | ||
53 | compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus"; | ||
54 | reg = <0 0xffe05000 0 0x1000>; | ||
55 | interrupts = <19 2>; | ||
56 | interrupt-parent = <&mpic>; | ||
57 | 33 | ||
58 | /* NOR and NAND Flashes */ | 34 | /* NOR and NAND Flashes */ |
59 | ranges = <0x0 0x0 0x0 0xef000000 0x01000000 | 35 | ranges = <0x0 0x0 0x0 0xef000000 0x01000000 |
@@ -165,90 +141,16 @@ | |||
165 | }; | 141 | }; |
166 | 142 | ||
167 | soc@ffe00000 { | 143 | soc@ffe00000 { |
168 | #address-cells = <1>; | ||
169 | #size-cells = <1>; | ||
170 | device_type = "soc"; | ||
171 | compatible = "fsl,p2020-immr", "simple-bus"; | ||
172 | ranges = <0x0 0x0 0xffe00000 0x100000>; | ||
173 | bus-frequency = <0>; // Filled out by uboot. | ||
174 | |||
175 | ecm-law@0 { | ||
176 | compatible = "fsl,ecm-law"; | ||
177 | reg = <0x0 0x1000>; | ||
178 | fsl,num-laws = <12>; | ||
179 | }; | ||
180 | |||
181 | ecm@1000 { | ||
182 | compatible = "fsl,p2020-ecm", "fsl,ecm"; | ||
183 | reg = <0x1000 0x1000>; | ||
184 | interrupts = <17 2>; | ||
185 | interrupt-parent = <&mpic>; | ||
186 | }; | ||
187 | |||
188 | memory-controller@2000 { | ||
189 | compatible = "fsl,p2020-memory-controller"; | ||
190 | reg = <0x2000 0x1000>; | ||
191 | interrupt-parent = <&mpic>; | ||
192 | interrupts = <18 2>; | ||
193 | }; | ||
194 | |||
195 | i2c@3000 { | 144 | i2c@3000 { |
196 | #address-cells = <1>; | ||
197 | #size-cells = <0>; | ||
198 | cell-index = <0>; | ||
199 | compatible = "fsl-i2c"; | ||
200 | reg = <0x3000 0x100>; | ||
201 | interrupts = <43 2>; | ||
202 | interrupt-parent = <&mpic>; | ||
203 | dfsrr; | ||
204 | rtc@68 { | 145 | rtc@68 { |
205 | compatible = "dallas,ds1339"; | 146 | compatible = "dallas,ds1339"; |
206 | reg = <0x68>; | 147 | reg = <0x68>; |
207 | }; | 148 | }; |
208 | }; | 149 | }; |
209 | 150 | ||
210 | i2c@3100 { | 151 | spi@7000 { |
211 | #address-cells = <1>; | ||
212 | #size-cells = <0>; | ||
213 | cell-index = <1>; | ||
214 | compatible = "fsl-i2c"; | ||
215 | reg = <0x3100 0x100>; | ||
216 | interrupts = <43 2>; | ||
217 | interrupt-parent = <&mpic>; | ||
218 | dfsrr; | ||
219 | }; | ||
220 | |||
221 | serial0: serial@4500 { | ||
222 | cell-index = <0>; | ||
223 | device_type = "serial"; | ||
224 | compatible = "ns16550"; | ||
225 | reg = <0x4500 0x100>; | ||
226 | clock-frequency = <0>; | ||
227 | interrupts = <42 2>; | ||
228 | interrupt-parent = <&mpic>; | ||
229 | }; | ||
230 | |||
231 | serial1: serial@4600 { | ||
232 | cell-index = <1>; | ||
233 | device_type = "serial"; | ||
234 | compatible = "ns16550"; | ||
235 | reg = <0x4600 0x100>; | ||
236 | clock-frequency = <0>; | ||
237 | interrupts = <42 2>; | ||
238 | interrupt-parent = <&mpic>; | ||
239 | }; | ||
240 | 152 | ||
241 | spi@7000 { | 153 | fsl_m25p80@0 { |
242 | cell-index = <0>; | ||
243 | #address-cells = <1>; | ||
244 | #size-cells = <0>; | ||
245 | compatible = "fsl,espi"; | ||
246 | reg = <0x7000 0x1000>; | ||
247 | interrupts = <59 0x2>; | ||
248 | interrupt-parent = <&mpic>; | ||
249 | mode = "cpu"; | ||
250 | |||
251 | fsl_m25p80@0 { | ||
252 | #address-cells = <1>; | 154 | #address-cells = <1>; |
253 | #size-cells = <1>; | 155 | #size-cells = <1>; |
254 | compatible = "fsl,espi-flash"; | 156 | compatible = "fsl,espi-flash"; |
@@ -294,254 +196,81 @@ | |||
294 | }; | 196 | }; |
295 | }; | 197 | }; |
296 | 198 | ||
297 | dma@c300 { | 199 | usb@22000 { |
298 | #address-cells = <1>; | 200 | phy_type = "ulpi"; |
299 | #size-cells = <1>; | 201 | }; |
300 | compatible = "fsl,eloplus-dma"; | 202 | |
301 | reg = <0xc300 0x4>; | 203 | mdio@24520 { |
302 | ranges = <0x0 0xc100 0x200>; | 204 | phy0: ethernet-phy@0 { |
303 | cell-index = <1>; | ||
304 | dma-channel@0 { | ||
305 | compatible = "fsl,eloplus-dma-channel"; | ||
306 | reg = <0x0 0x80>; | ||
307 | cell-index = <0>; | ||
308 | interrupt-parent = <&mpic>; | ||
309 | interrupts = <76 2>; | ||
310 | }; | ||
311 | dma-channel@80 { | ||
312 | compatible = "fsl,eloplus-dma-channel"; | ||
313 | reg = <0x80 0x80>; | ||
314 | cell-index = <1>; | ||
315 | interrupt-parent = <&mpic>; | ||
316 | interrupts = <77 2>; | ||
317 | }; | ||
318 | dma-channel@100 { | ||
319 | compatible = "fsl,eloplus-dma-channel"; | ||
320 | reg = <0x100 0x80>; | ||
321 | cell-index = <2>; | ||
322 | interrupt-parent = <&mpic>; | 205 | interrupt-parent = <&mpic>; |
323 | interrupts = <78 2>; | 206 | interrupts = <3 1>; |
324 | }; | 207 | reg = <0x0>; |
325 | dma-channel@180 { | 208 | }; |
326 | compatible = "fsl,eloplus-dma-channel"; | 209 | phy1: ethernet-phy@1 { |
327 | reg = <0x180 0x80>; | ||
328 | cell-index = <3>; | ||
329 | interrupt-parent = <&mpic>; | 210 | interrupt-parent = <&mpic>; |
330 | interrupts = <79 2>; | 211 | interrupts = <3 1>; |
331 | }; | 212 | reg = <0x1>; |
213 | }; | ||
332 | }; | 214 | }; |
333 | 215 | ||
334 | gpio: gpio-controller@f000 { | 216 | mdio@25520 { |
335 | #gpio-cells = <2>; | 217 | tbi0: tbi-phy@11 { |
336 | compatible = "fsl,mpc8572-gpio"; | 218 | reg = <0x11>; |
337 | reg = <0xf000 0x100>; | 219 | device_type = "tbi-phy"; |
338 | interrupts = <47 0x2>; | 220 | }; |
339 | interrupt-parent = <&mpic>; | ||
340 | gpio-controller; | ||
341 | }; | 221 | }; |
342 | 222 | ||
343 | L2: l2-cache-controller@20000 { | 223 | mdio@26520 { |
344 | compatible = "fsl,p2020-l2-cache-controller"; | 224 | status = "disabled"; |
345 | reg = <0x20000 0x1000>; | ||
346 | cache-line-size = <32>; // 32 bytes | ||
347 | cache-size = <0x80000>; // L2,512K | ||
348 | interrupt-parent = <&mpic>; | ||
349 | interrupts = <16 2>; | ||
350 | }; | 225 | }; |
351 | 226 | ||
352 | dma@21300 { | 227 | ptp_clock@24E00 { |
353 | #address-cells = <1>; | 228 | compatible = "fsl,etsec-ptp"; |
354 | #size-cells = <1>; | 229 | reg = <0x24E00 0xB0>; |
355 | compatible = "fsl,eloplus-dma"; | 230 | interrupts = <68 2 69 2 70 2>; |
356 | reg = <0x21300 0x4>; | 231 | interrupt-parent = < &mpic >; |
357 | ranges = <0x0 0x21100 0x200>; | 232 | fsl,tclk-period = <5>; |
358 | cell-index = <0>; | 233 | fsl,tmr-prsc = <200>; |
359 | dma-channel@0 { | 234 | fsl,tmr-add = <0xCCCCCCCD>; |
360 | compatible = "fsl,eloplus-dma-channel"; | 235 | fsl,tmr-fiper1 = <0x3B9AC9FB>; |
361 | reg = <0x0 0x80>; | 236 | fsl,tmr-fiper2 = <0x0001869B>; |
362 | cell-index = <0>; | 237 | fsl,max-adj = <249999999>; |
363 | interrupt-parent = <&mpic>; | ||
364 | interrupts = <20 2>; | ||
365 | }; | ||
366 | dma-channel@80 { | ||
367 | compatible = "fsl,eloplus-dma-channel"; | ||
368 | reg = <0x80 0x80>; | ||
369 | cell-index = <1>; | ||
370 | interrupt-parent = <&mpic>; | ||
371 | interrupts = <21 2>; | ||
372 | }; | ||
373 | dma-channel@100 { | ||
374 | compatible = "fsl,eloplus-dma-channel"; | ||
375 | reg = <0x100 0x80>; | ||
376 | cell-index = <2>; | ||
377 | interrupt-parent = <&mpic>; | ||
378 | interrupts = <22 2>; | ||
379 | }; | ||
380 | dma-channel@180 { | ||
381 | compatible = "fsl,eloplus-dma-channel"; | ||
382 | reg = <0x180 0x80>; | ||
383 | cell-index = <3>; | ||
384 | interrupt-parent = <&mpic>; | ||
385 | interrupts = <23 2>; | ||
386 | }; | ||
387 | }; | ||
388 | |||
389 | usb@22000 { | ||
390 | #address-cells = <1>; | ||
391 | #size-cells = <0>; | ||
392 | compatible = "fsl-usb2-dr"; | ||
393 | reg = <0x22000 0x1000>; | ||
394 | interrupt-parent = <&mpic>; | ||
395 | interrupts = <28 0x2>; | ||
396 | phy_type = "ulpi"; | ||
397 | }; | 238 | }; |
398 | 239 | ||
399 | enet0: ethernet@24000 { | 240 | enet0: ethernet@24000 { |
400 | #address-cells = <1>; | ||
401 | #size-cells = <1>; | ||
402 | cell-index = <0>; | ||
403 | device_type = "network"; | ||
404 | model = "eTSEC"; | ||
405 | compatible = "gianfar"; | ||
406 | reg = <0x24000 0x1000>; | ||
407 | ranges = <0x0 0x24000 0x1000>; | ||
408 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
409 | interrupts = <29 2 30 2 34 2>; | ||
410 | interrupt-parent = <&mpic>; | ||
411 | fixed-link = <1 1 1000 0 0>; | 241 | fixed-link = <1 1 1000 0 0>; |
412 | phy-connection-type = "rgmii-id"; | 242 | phy-connection-type = "rgmii-id"; |
413 | |||
414 | mdio@520 { | ||
415 | #address-cells = <1>; | ||
416 | #size-cells = <0>; | ||
417 | compatible = "fsl,gianfar-mdio"; | ||
418 | reg = <0x520 0x20>; | ||
419 | |||
420 | phy0: ethernet-phy@0 { | ||
421 | interrupt-parent = <&mpic>; | ||
422 | interrupts = <3 1>; | ||
423 | reg = <0x0>; | ||
424 | }; | ||
425 | phy1: ethernet-phy@1 { | ||
426 | interrupt-parent = <&mpic>; | ||
427 | interrupts = <3 1>; | ||
428 | reg = <0x1>; | ||
429 | }; | ||
430 | }; | ||
431 | }; | 243 | }; |
432 | 244 | ||
433 | enet1: ethernet@25000 { | 245 | enet1: ethernet@25000 { |
434 | #address-cells = <1>; | ||
435 | #size-cells = <1>; | ||
436 | cell-index = <1>; | ||
437 | device_type = "network"; | ||
438 | model = "eTSEC"; | ||
439 | compatible = "gianfar"; | ||
440 | reg = <0x25000 0x1000>; | ||
441 | ranges = <0x0 0x25000 0x1000>; | ||
442 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
443 | interrupts = <35 2 36 2 40 2>; | ||
444 | interrupt-parent = <&mpic>; | ||
445 | tbi-handle = <&tbi0>; | 246 | tbi-handle = <&tbi0>; |
446 | phy-handle = <&phy0>; | 247 | phy-handle = <&phy0>; |
447 | phy-connection-type = "sgmii"; | 248 | phy-connection-type = "sgmii"; |
448 | |||
449 | mdio@520 { | ||
450 | #address-cells = <1>; | ||
451 | #size-cells = <0>; | ||
452 | compatible = "fsl,gianfar-tbi"; | ||
453 | reg = <0x520 0x20>; | ||
454 | |||
455 | tbi0: tbi-phy@11 { | ||
456 | reg = <0x11>; | ||
457 | device_type = "tbi-phy"; | ||
458 | }; | ||
459 | }; | ||
460 | }; | 249 | }; |
461 | 250 | ||
462 | enet2: ethernet@26000 { | 251 | enet2: ethernet@26000 { |
463 | #address-cells = <1>; | ||
464 | #size-cells = <1>; | ||
465 | cell-index = <2>; | ||
466 | device_type = "network"; | ||
467 | model = "eTSEC"; | ||
468 | compatible = "gianfar"; | ||
469 | reg = <0x26000 0x1000>; | ||
470 | ranges = <0x0 0x26000 0x1000>; | ||
471 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
472 | interrupts = <31 2 32 2 33 2>; | ||
473 | interrupt-parent = <&mpic>; | ||
474 | phy-handle = <&phy1>; | 252 | phy-handle = <&phy1>; |
475 | phy-connection-type = "rgmii-id"; | 253 | phy-connection-type = "rgmii-id"; |
476 | }; | 254 | }; |
477 | 255 | ||
478 | sdhci@2e000 { | 256 | }; |
479 | compatible = "fsl,p2020-esdhc", "fsl,esdhc"; | ||
480 | reg = <0x2e000 0x1000>; | ||
481 | interrupts = <72 0x2>; | ||
482 | interrupt-parent = <&mpic>; | ||
483 | /* Filled in by U-Boot */ | ||
484 | clock-frequency = <0>; | ||
485 | }; | ||
486 | |||
487 | crypto@30000 { | ||
488 | compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", | ||
489 | "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; | ||
490 | reg = <0x30000 0x10000>; | ||
491 | interrupts = <45 2 58 2>; | ||
492 | interrupt-parent = <&mpic>; | ||
493 | fsl,num-channels = <4>; | ||
494 | fsl,channel-fifo-len = <24>; | ||
495 | fsl,exec-units-mask = <0xbfe>; | ||
496 | fsl,descriptor-types-mask = <0x3ab0ebf>; | ||
497 | }; | ||
498 | |||
499 | mpic: pic@40000 { | ||
500 | interrupt-controller; | ||
501 | #address-cells = <0>; | ||
502 | #interrupt-cells = <2>; | ||
503 | reg = <0x40000 0x40000>; | ||
504 | compatible = "chrp,open-pic"; | ||
505 | device_type = "open-pic"; | ||
506 | }; | ||
507 | |||
508 | msi@41600 { | ||
509 | compatible = "fsl,p2020-msi", "fsl,mpic-msi"; | ||
510 | reg = <0x41600 0x80>; | ||
511 | msi-available-ranges = <0 0x100>; | ||
512 | interrupts = < | ||
513 | 0xe0 0 | ||
514 | 0xe1 0 | ||
515 | 0xe2 0 | ||
516 | 0xe3 0 | ||
517 | 0xe4 0 | ||
518 | 0xe5 0 | ||
519 | 0xe6 0 | ||
520 | 0xe7 0>; | ||
521 | interrupt-parent = <&mpic>; | ||
522 | }; | ||
523 | 257 | ||
524 | global-utilities@e0000 { //global utilities block | 258 | pci0: pcie@ffe08000 { |
525 | compatible = "fsl,p2020-guts"; | 259 | status = "disabled"; |
526 | reg = <0xe0000 0x1000>; | ||
527 | fsl,has-rstcr; | ||
528 | }; | ||
529 | }; | 260 | }; |
530 | 261 | ||
531 | pci0: pcie@ffe09000 { | 262 | pci1: pcie@ffe09000 { |
532 | compatible = "fsl,mpc8548-pcie"; | ||
533 | device_type = "pci"; | ||
534 | #interrupt-cells = <1>; | ||
535 | #size-cells = <2>; | ||
536 | #address-cells = <3>; | ||
537 | reg = <0 0xffe09000 0 0x1000>; | ||
538 | bus-range = <0 255>; | ||
539 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 | 263 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 |
540 | 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; | 264 | 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; |
541 | clock-frequency = <33333333>; | 265 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
542 | interrupt-parent = <&mpic>; | 266 | interrupt-map = < |
543 | interrupts = <25 2>; | 267 | /* IDSEL 0x0 */ |
544 | pcie@0 { | 268 | 0000 0x0 0x0 0x1 &mpic 0x4 0x1 |
269 | 0000 0x0 0x0 0x2 &mpic 0x5 0x1 | ||
270 | 0000 0x0 0x0 0x3 &mpic 0x6 0x1 | ||
271 | 0000 0x0 0x0 0x4 &mpic 0x7 0x1 | ||
272 | >; | ||
273 | pcie@0 { | ||
545 | reg = <0x0 0x0 0x0 0x0 0x0>; | 274 | reg = <0x0 0x0 0x0 0x0 0x0>; |
546 | #size-cells = <2>; | 275 | #size-cells = <2>; |
547 | #address-cells = <3>; | 276 | #address-cells = <3>; |
@@ -556,26 +285,24 @@ | |||
556 | }; | 285 | }; |
557 | }; | 286 | }; |
558 | 287 | ||
559 | pci1: pcie@ffe0a000 { | 288 | pci2: pcie@ffe0a000 { |
560 | compatible = "fsl,mpc8548-pcie"; | 289 | ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 |
561 | device_type = "pci"; | 290 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; |
562 | #interrupt-cells = <1>; | 291 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
563 | #size-cells = <2>; | 292 | interrupt-map = < |
564 | #address-cells = <3>; | 293 | /* IDSEL 0x0 */ |
565 | reg = <0 0xffe0a000 0 0x1000>; | 294 | 0000 0x0 0x0 0x1 &mpic 0x0 0x1 |
566 | bus-range = <0 255>; | 295 | 0000 0x0 0x0 0x2 &mpic 0x1 0x1 |
567 | ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 | 296 | 0000 0x0 0x0 0x3 &mpic 0x2 0x1 |
568 | 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; | 297 | 0000 0x0 0x0 0x4 &mpic 0x3 0x1 |
569 | clock-frequency = <33333333>; | 298 | >; |
570 | interrupt-parent = <&mpic>; | ||
571 | interrupts = <26 2>; | ||
572 | pcie@0 { | 299 | pcie@0 { |
573 | reg = <0x0 0x0 0x0 0x0 0x0>; | 300 | reg = <0x0 0x0 0x0 0x0 0x0>; |
574 | #size-cells = <2>; | 301 | #size-cells = <2>; |
575 | #address-cells = <3>; | 302 | #address-cells = <3>; |
576 | device_type = "pci"; | 303 | device_type = "pci"; |
577 | ranges = <0x2000000 0x0 0xc0000000 | 304 | ranges = <0x2000000 0x0 0x80000000 |
578 | 0x2000000 0x0 0xc0000000 | 305 | 0x2000000 0x0 0x80000000 |
579 | 0x0 0x20000000 | 306 | 0x0 0x20000000 |
580 | 307 | ||
581 | 0x1000000 0x0 0x0 | 308 | 0x1000000 0x0 0x0 |
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts index 0fe93d0c8b2e..fc8ddddfccb6 100644 --- a/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts +++ b/arch/powerpc/boot/dts/p2020rdb_camp_core0.dts | |||
@@ -6,7 +6,7 @@ | |||
6 | * This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb, | 6 | * This dts file allows core0 to have memory, l2, i2c, spi, gpio, dma1, usb, |
7 | * eth1, eth2, sdhc, crypto, global-util, pci0. | 7 | * eth1, eth2, sdhc, crypto, global-util, pci0. |
8 | * | 8 | * |
9 | * Copyright 2009 Freescale Semiconductor Inc. | 9 | * Copyright 2009-2011 Freescale Semiconductor Inc. |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or modify it | 11 | * This program is free software; you can redistribute it and/or modify it |
12 | * under the terms of the GNU General Public License as published by the | 12 | * under the terms of the GNU General Public License as published by the |
@@ -14,12 +14,11 @@ | |||
14 | * option) any later version. | 14 | * option) any later version. |
15 | */ | 15 | */ |
16 | 16 | ||
17 | /dts-v1/; | 17 | /include/ "p2020si.dtsi" |
18 | |||
18 | / { | 19 | / { |
19 | model = "fsl,P2020"; | 20 | model = "fsl,P2020RDB"; |
20 | compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; | 21 | compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; |
21 | #address-cells = <2>; | ||
22 | #size-cells = <2>; | ||
23 | 22 | ||
24 | aliases { | 23 | aliases { |
25 | ethernet1 = &enet1; | 24 | ethernet1 = &enet1; |
@@ -29,91 +28,33 @@ | |||
29 | }; | 28 | }; |
30 | 29 | ||
31 | cpus { | 30 | cpus { |
32 | #address-cells = <1>; | 31 | PowerPC,P2020@1 { |
33 | #size-cells = <0>; | 32 | status = "disabled"; |
34 | |||
35 | PowerPC,P2020@0 { | ||
36 | device_type = "cpu"; | ||
37 | reg = <0x0>; | ||
38 | next-level-cache = <&L2>; | ||
39 | }; | 33 | }; |
34 | |||
40 | }; | 35 | }; |
41 | 36 | ||
42 | memory { | 37 | memory { |
43 | device_type = "memory"; | 38 | device_type = "memory"; |
44 | }; | 39 | }; |
45 | 40 | ||
46 | soc@ffe00000 { | 41 | localbus@ffe05000 { |
47 | #address-cells = <1>; | 42 | status = "disabled"; |
48 | #size-cells = <1>; | 43 | }; |
49 | device_type = "soc"; | ||
50 | compatible = "fsl,p2020-immr", "simple-bus"; | ||
51 | ranges = <0x0 0x0 0xffe00000 0x100000>; | ||
52 | bus-frequency = <0>; // Filled out by uboot. | ||
53 | |||
54 | ecm-law@0 { | ||
55 | compatible = "fsl,ecm-law"; | ||
56 | reg = <0x0 0x1000>; | ||
57 | fsl,num-laws = <12>; | ||
58 | }; | ||
59 | |||
60 | ecm@1000 { | ||
61 | compatible = "fsl,p2020-ecm", "fsl,ecm"; | ||
62 | reg = <0x1000 0x1000>; | ||
63 | interrupts = <17 2>; | ||
64 | interrupt-parent = <&mpic>; | ||
65 | }; | ||
66 | |||
67 | memory-controller@2000 { | ||
68 | compatible = "fsl,p2020-memory-controller"; | ||
69 | reg = <0x2000 0x1000>; | ||
70 | interrupt-parent = <&mpic>; | ||
71 | interrupts = <18 2>; | ||
72 | }; | ||
73 | 44 | ||
45 | soc@ffe00000 { | ||
74 | i2c@3000 { | 46 | i2c@3000 { |
75 | #address-cells = <1>; | ||
76 | #size-cells = <0>; | ||
77 | cell-index = <0>; | ||
78 | compatible = "fsl-i2c"; | ||
79 | reg = <0x3000 0x100>; | ||
80 | interrupts = <43 2>; | ||
81 | interrupt-parent = <&mpic>; | ||
82 | dfsrr; | ||
83 | rtc@68 { | 47 | rtc@68 { |
84 | compatible = "dallas,ds1339"; | 48 | compatible = "dallas,ds1339"; |
85 | reg = <0x68>; | 49 | reg = <0x68>; |
86 | }; | 50 | }; |
87 | }; | 51 | }; |
88 | 52 | ||
89 | i2c@3100 { | 53 | serial1: serial@4600 { |
90 | #address-cells = <1>; | 54 | status = "disabled"; |
91 | #size-cells = <0>; | ||
92 | cell-index = <1>; | ||
93 | compatible = "fsl-i2c"; | ||
94 | reg = <0x3100 0x100>; | ||
95 | interrupts = <43 2>; | ||
96 | interrupt-parent = <&mpic>; | ||
97 | dfsrr; | ||
98 | }; | ||
99 | |||
100 | serial0: serial@4500 { | ||
101 | cell-index = <0>; | ||
102 | device_type = "serial"; | ||
103 | compatible = "ns16550"; | ||
104 | reg = <0x4500 0x100>; | ||
105 | clock-frequency = <0>; | ||
106 | }; | 55 | }; |
107 | 56 | ||
108 | spi@7000 { | 57 | spi@7000 { |
109 | cell-index = <0>; | ||
110 | #address-cells = <1>; | ||
111 | #size-cells = <0>; | ||
112 | compatible = "fsl,espi"; | ||
113 | reg = <0x7000 0x1000>; | ||
114 | interrupts = <59 0x2>; | ||
115 | interrupt-parent = <&mpic>; | ||
116 | mode = "cpu"; | ||
117 | 58 | ||
118 | fsl_m25p80@0 { | 59 | fsl_m25p80@0 { |
119 | #address-cells = <1>; | 60 | #address-cells = <1>; |
@@ -161,76 +102,15 @@ | |||
161 | }; | 102 | }; |
162 | }; | 103 | }; |
163 | 104 | ||
164 | gpio: gpio-controller@f000 { | 105 | dma@c300 { |
165 | #gpio-cells = <2>; | 106 | status = "disabled"; |
166 | compatible = "fsl,mpc8572-gpio"; | ||
167 | reg = <0xf000 0x100>; | ||
168 | interrupts = <47 0x2>; | ||
169 | interrupt-parent = <&mpic>; | ||
170 | gpio-controller; | ||
171 | }; | ||
172 | |||
173 | L2: l2-cache-controller@20000 { | ||
174 | compatible = "fsl,p2020-l2-cache-controller"; | ||
175 | reg = <0x20000 0x1000>; | ||
176 | cache-line-size = <32>; // 32 bytes | ||
177 | cache-size = <0x80000>; // L2,512K | ||
178 | interrupt-parent = <&mpic>; | ||
179 | interrupts = <16 2>; | ||
180 | }; | ||
181 | |||
182 | dma@21300 { | ||
183 | #address-cells = <1>; | ||
184 | #size-cells = <1>; | ||
185 | compatible = "fsl,eloplus-dma"; | ||
186 | reg = <0x21300 0x4>; | ||
187 | ranges = <0x0 0x21100 0x200>; | ||
188 | cell-index = <0>; | ||
189 | dma-channel@0 { | ||
190 | compatible = "fsl,eloplus-dma-channel"; | ||
191 | reg = <0x0 0x80>; | ||
192 | cell-index = <0>; | ||
193 | interrupt-parent = <&mpic>; | ||
194 | interrupts = <20 2>; | ||
195 | }; | ||
196 | dma-channel@80 { | ||
197 | compatible = "fsl,eloplus-dma-channel"; | ||
198 | reg = <0x80 0x80>; | ||
199 | cell-index = <1>; | ||
200 | interrupt-parent = <&mpic>; | ||
201 | interrupts = <21 2>; | ||
202 | }; | ||
203 | dma-channel@100 { | ||
204 | compatible = "fsl,eloplus-dma-channel"; | ||
205 | reg = <0x100 0x80>; | ||
206 | cell-index = <2>; | ||
207 | interrupt-parent = <&mpic>; | ||
208 | interrupts = <22 2>; | ||
209 | }; | ||
210 | dma-channel@180 { | ||
211 | compatible = "fsl,eloplus-dma-channel"; | ||
212 | reg = <0x180 0x80>; | ||
213 | cell-index = <3>; | ||
214 | interrupt-parent = <&mpic>; | ||
215 | interrupts = <23 2>; | ||
216 | }; | ||
217 | }; | 107 | }; |
218 | 108 | ||
219 | usb@22000 { | 109 | usb@22000 { |
220 | #address-cells = <1>; | ||
221 | #size-cells = <0>; | ||
222 | compatible = "fsl-usb2-dr"; | ||
223 | reg = <0x22000 0x1000>; | ||
224 | interrupt-parent = <&mpic>; | ||
225 | interrupts = <28 0x2>; | ||
226 | phy_type = "ulpi"; | 110 | phy_type = "ulpi"; |
227 | }; | 111 | }; |
228 | 112 | ||
229 | mdio@24520 { | 113 | mdio@24520 { |
230 | #address-cells = <1>; | ||
231 | #size-cells = <0>; | ||
232 | compatible = "fsl,gianfar-mdio"; | ||
233 | reg = <0x24520 0x20>; | ||
234 | 114 | ||
235 | phy0: ethernet-phy@0 { | 115 | phy0: ethernet-phy@0 { |
236 | interrupt-parent = <&mpic>; | 116 | interrupt-parent = <&mpic>; |
@@ -245,29 +125,21 @@ | |||
245 | }; | 125 | }; |
246 | 126 | ||
247 | mdio@25520 { | 127 | mdio@25520 { |
248 | #address-cells = <1>; | ||
249 | #size-cells = <0>; | ||
250 | compatible = "fsl,gianfar-tbi"; | ||
251 | reg = <0x26520 0x20>; | ||
252 | |||
253 | tbi0: tbi-phy@11 { | 128 | tbi0: tbi-phy@11 { |
254 | reg = <0x11>; | 129 | reg = <0x11>; |
255 | device_type = "tbi-phy"; | 130 | device_type = "tbi-phy"; |
256 | }; | 131 | }; |
257 | }; | 132 | }; |
258 | 133 | ||
134 | mdio@26520 { | ||
135 | status = "disabled"; | ||
136 | }; | ||
137 | |||
138 | enet0: ethernet@24000 { | ||
139 | status = "disabled"; | ||
140 | }; | ||
141 | |||
259 | enet1: ethernet@25000 { | 142 | enet1: ethernet@25000 { |
260 | #address-cells = <1>; | ||
261 | #size-cells = <1>; | ||
262 | cell-index = <1>; | ||
263 | device_type = "network"; | ||
264 | model = "eTSEC"; | ||
265 | compatible = "gianfar"; | ||
266 | reg = <0x25000 0x1000>; | ||
267 | ranges = <0x0 0x25000 0x1000>; | ||
268 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
269 | interrupts = <35 2 36 2 40 2>; | ||
270 | interrupt-parent = <&mpic>; | ||
271 | tbi-handle = <&tbi0>; | 143 | tbi-handle = <&tbi0>; |
272 | phy-handle = <&phy0>; | 144 | phy-handle = <&phy0>; |
273 | phy-connection-type = "sgmii"; | 145 | phy-connection-type = "sgmii"; |
@@ -275,49 +147,12 @@ | |||
275 | }; | 147 | }; |
276 | 148 | ||
277 | enet2: ethernet@26000 { | 149 | enet2: ethernet@26000 { |
278 | #address-cells = <1>; | ||
279 | #size-cells = <1>; | ||
280 | cell-index = <2>; | ||
281 | device_type = "network"; | ||
282 | model = "eTSEC"; | ||
283 | compatible = "gianfar"; | ||
284 | reg = <0x26000 0x1000>; | ||
285 | ranges = <0x0 0x26000 0x1000>; | ||
286 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
287 | interrupts = <31 2 32 2 33 2>; | ||
288 | interrupt-parent = <&mpic>; | ||
289 | phy-handle = <&phy1>; | 150 | phy-handle = <&phy1>; |
290 | phy-connection-type = "rgmii-id"; | 151 | phy-connection-type = "rgmii-id"; |
291 | }; | 152 | }; |
292 | 153 | ||
293 | sdhci@2e000 { | ||
294 | compatible = "fsl,p2020-esdhc", "fsl,esdhc"; | ||
295 | reg = <0x2e000 0x1000>; | ||
296 | interrupts = <72 0x2>; | ||
297 | interrupt-parent = <&mpic>; | ||
298 | /* Filled in by U-Boot */ | ||
299 | clock-frequency = <0>; | ||
300 | }; | ||
301 | |||
302 | crypto@30000 { | ||
303 | compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", | ||
304 | "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; | ||
305 | reg = <0x30000 0x10000>; | ||
306 | interrupts = <45 2 58 2>; | ||
307 | interrupt-parent = <&mpic>; | ||
308 | fsl,num-channels = <4>; | ||
309 | fsl,channel-fifo-len = <24>; | ||
310 | fsl,exec-units-mask = <0xbfe>; | ||
311 | fsl,descriptor-types-mask = <0x3ab0ebf>; | ||
312 | }; | ||
313 | 154 | ||
314 | mpic: pic@40000 { | 155 | mpic: pic@40000 { |
315 | interrupt-controller; | ||
316 | #address-cells = <0>; | ||
317 | #interrupt-cells = <2>; | ||
318 | reg = <0x40000 0x40000>; | ||
319 | compatible = "chrp,open-pic"; | ||
320 | device_type = "open-pic"; | ||
321 | protected-sources = < | 156 | protected-sources = < |
322 | 42 76 77 78 79 /* serial1 , dma2 */ | 157 | 42 76 77 78 79 /* serial1 , dma2 */ |
323 | 29 30 34 26 /* enet0, pci1 */ | 158 | 29 30 34 26 /* enet0, pci1 */ |
@@ -326,26 +161,28 @@ | |||
326 | >; | 161 | >; |
327 | }; | 162 | }; |
328 | 163 | ||
329 | global-utilities@e0000 { | 164 | msi@41600 { |
330 | compatible = "fsl,p2020-guts"; | 165 | status = "disabled"; |
331 | reg = <0xe0000 0x1000>; | ||
332 | fsl,has-rstcr; | ||
333 | }; | 166 | }; |
167 | |||
168 | |||
334 | }; | 169 | }; |
335 | 170 | ||
336 | pci0: pcie@ffe09000 { | 171 | pci0: pcie@ffe08000 { |
337 | compatible = "fsl,mpc8548-pcie"; | 172 | status = "disabled"; |
338 | device_type = "pci"; | 173 | }; |
339 | #interrupt-cells = <1>; | 174 | |
340 | #size-cells = <2>; | 175 | pci1: pcie@ffe09000 { |
341 | #address-cells = <3>; | ||
342 | reg = <0 0xffe09000 0 0x1000>; | ||
343 | bus-range = <0 255>; | ||
344 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 | 176 | ranges = <0x2000000 0x0 0xa0000000 0 0xa0000000 0x0 0x20000000 |
345 | 0x1000000 0x0 0x00000000 0 0xffc30000 0x0 0x10000>; | 177 | 0x1000000 0x0 0x00000000 0 0xffc10000 0x0 0x10000>; |
346 | clock-frequency = <33333333>; | 178 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
347 | interrupt-parent = <&mpic>; | 179 | interrupt-map = < |
348 | interrupts = <25 2>; | 180 | /* IDSEL 0x0 */ |
181 | 0000 0x0 0x0 0x1 &mpic 0x4 0x1 | ||
182 | 0000 0x0 0x0 0x2 &mpic 0x5 0x1 | ||
183 | 0000 0x0 0x0 0x3 &mpic 0x6 0x1 | ||
184 | 0000 0x0 0x0 0x4 &mpic 0x7 0x1 | ||
185 | >; | ||
349 | pcie@0 { | 186 | pcie@0 { |
350 | reg = <0x0 0x0 0x0 0x0 0x0>; | 187 | reg = <0x0 0x0 0x0 0x0 0x0>; |
351 | #size-cells = <2>; | 188 | #size-cells = <2>; |
@@ -360,4 +197,8 @@ | |||
360 | 0x0 0x100000>; | 197 | 0x0 0x100000>; |
361 | }; | 198 | }; |
362 | }; | 199 | }; |
200 | |||
201 | pci2: pcie@ffe0a000 { | ||
202 | status = "disabled"; | ||
203 | }; | ||
363 | }; | 204 | }; |
diff --git a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts index e95a51285328..261c34ba45ec 100644 --- a/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts +++ b/arch/powerpc/boot/dts/p2020rdb_camp_core1.dts | |||
@@ -7,7 +7,7 @@ | |||
7 | * | 7 | * |
8 | * Please note to add "-b 1" for core1's dts compiling. | 8 | * Please note to add "-b 1" for core1's dts compiling. |
9 | * | 9 | * |
10 | * Copyright 2009 Freescale Semiconductor Inc. | 10 | * Copyright 2009-2011 Freescale Semiconductor Inc. |
11 | * | 11 | * |
12 | * This program is free software; you can redistribute it and/or modify it | 12 | * This program is free software; you can redistribute it and/or modify it |
13 | * under the terms of the GNU General Public License as published by the | 13 | * under the terms of the GNU General Public License as published by the |
@@ -15,27 +15,21 @@ | |||
15 | * option) any later version. | 15 | * option) any later version. |
16 | */ | 16 | */ |
17 | 17 | ||
18 | /dts-v1/; | 18 | /include/ "p2020si.dtsi" |
19 | |||
19 | / { | 20 | / { |
20 | model = "fsl,P2020"; | 21 | model = "fsl,P2020RDB"; |
21 | compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; | 22 | compatible = "fsl,P2020RDB", "fsl,MPC85XXRDB-CAMP"; |
22 | #address-cells = <2>; | ||
23 | #size-cells = <2>; | ||
24 | 23 | ||
25 | aliases { | 24 | aliases { |
26 | ethernet0 = &enet0; | 25 | ethernet0 = &enet0; |
27 | serial0 = &serial0; | 26 | serial0 = &serial1; |
28 | pci1 = &pci1; | 27 | pci1 = &pci1; |
29 | }; | 28 | }; |
30 | 29 | ||
31 | cpus { | 30 | cpus { |
32 | #address-cells = <1>; | 31 | PowerPC,P2020@0 { |
33 | #size-cells = <0>; | 32 | status = "disabled"; |
34 | |||
35 | PowerPC,P2020@1 { | ||
36 | device_type = "cpu"; | ||
37 | reg = <0x1>; | ||
38 | next-level-cache = <&L2>; | ||
39 | }; | 33 | }; |
40 | }; | 34 | }; |
41 | 35 | ||
@@ -43,20 +37,37 @@ | |||
43 | device_type = "memory"; | 37 | device_type = "memory"; |
44 | }; | 38 | }; |
45 | 39 | ||
40 | localbus@ffe05000 { | ||
41 | status = "disabled"; | ||
42 | }; | ||
43 | |||
46 | soc@ffe00000 { | 44 | soc@ffe00000 { |
47 | #address-cells = <1>; | 45 | ecm-law@0 { |
48 | #size-cells = <1>; | 46 | status = "disabled"; |
49 | device_type = "soc"; | 47 | }; |
50 | compatible = "fsl,p2020-immr", "simple-bus"; | 48 | |
51 | ranges = <0x0 0x0 0xffe00000 0x100000>; | 49 | ecm@1000 { |
52 | bus-frequency = <0>; // Filled out by uboot. | 50 | status = "disabled"; |
53 | 51 | }; | |
54 | serial0: serial@4600 { | 52 | |
55 | cell-index = <1>; | 53 | memory-controller@2000 { |
56 | device_type = "serial"; | 54 | status = "disabled"; |
57 | compatible = "ns16550"; | 55 | }; |
58 | reg = <0x4600 0x100>; | 56 | |
59 | clock-frequency = <0>; | 57 | i2c@3000 { |
58 | status = "disabled"; | ||
59 | }; | ||
60 | |||
61 | i2c@3100 { | ||
62 | status = "disabled"; | ||
63 | }; | ||
64 | |||
65 | serial0: serial@4500 { | ||
66 | status = "disabled"; | ||
67 | }; | ||
68 | |||
69 | spi@7000 { | ||
70 | status = "disabled"; | ||
60 | }; | 71 | }; |
61 | 72 | ||
62 | dma@c300 { | 73 | dma@c300 { |
@@ -96,6 +107,10 @@ | |||
96 | }; | 107 | }; |
97 | }; | 108 | }; |
98 | 109 | ||
110 | gpio: gpio-controller@f000 { | ||
111 | status = "disabled"; | ||
112 | }; | ||
113 | |||
99 | L2: l2-cache-controller@20000 { | 114 | L2: l2-cache-controller@20000 { |
100 | compatible = "fsl,p2020-l2-cache-controller"; | 115 | compatible = "fsl,p2020-l2-cache-controller"; |
101 | reg = <0x20000 0x1000>; | 116 | reg = <0x20000 0x1000>; |
@@ -104,31 +119,49 @@ | |||
104 | interrupt-parent = <&mpic>; | 119 | interrupt-parent = <&mpic>; |
105 | }; | 120 | }; |
106 | 121 | ||
122 | dma@21300 { | ||
123 | status = "disabled"; | ||
124 | }; | ||
125 | |||
126 | usb@22000 { | ||
127 | status = "disabled"; | ||
128 | }; | ||
129 | |||
130 | mdio@24520 { | ||
131 | status = "disabled"; | ||
132 | }; | ||
133 | |||
134 | mdio@25520 { | ||
135 | status = "disabled"; | ||
136 | }; | ||
137 | |||
138 | mdio@26520 { | ||
139 | status = "disabled"; | ||
140 | }; | ||
107 | 141 | ||
108 | enet0: ethernet@24000 { | 142 | enet0: ethernet@24000 { |
109 | #address-cells = <1>; | ||
110 | #size-cells = <1>; | ||
111 | cell-index = <0>; | ||
112 | device_type = "network"; | ||
113 | model = "eTSEC"; | ||
114 | compatible = "gianfar"; | ||
115 | reg = <0x24000 0x1000>; | ||
116 | ranges = <0x0 0x24000 0x1000>; | ||
117 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
118 | interrupts = <29 2 30 2 34 2>; | ||
119 | interrupt-parent = <&mpic>; | ||
120 | fixed-link = <1 1 1000 0 0>; | 143 | fixed-link = <1 1 1000 0 0>; |
121 | phy-connection-type = "rgmii-id"; | 144 | phy-connection-type = "rgmii-id"; |
122 | 145 | ||
123 | }; | 146 | }; |
124 | 147 | ||
148 | enet1: ethernet@25000 { | ||
149 | status = "disabled"; | ||
150 | }; | ||
151 | |||
152 | enet2: ethernet@26000 { | ||
153 | status = "disabled"; | ||
154 | }; | ||
155 | |||
156 | sdhci@2e000 { | ||
157 | status = "disabled"; | ||
158 | }; | ||
159 | |||
160 | crypto@30000 { | ||
161 | status = "disabled"; | ||
162 | }; | ||
163 | |||
125 | mpic: pic@40000 { | 164 | mpic: pic@40000 { |
126 | interrupt-controller; | ||
127 | #address-cells = <0>; | ||
128 | #interrupt-cells = <2>; | ||
129 | reg = <0x40000 0x40000>; | ||
130 | compatible = "chrp,open-pic"; | ||
131 | device_type = "open-pic"; | ||
132 | protected-sources = < | 165 | protected-sources = < |
133 | 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */ | 166 | 17 18 43 42 59 47 /*ecm, mem, i2c, serial0, spi,gpio */ |
134 | 16 20 21 22 23 28 /* L2, dma1, USB */ | 167 | 16 20 21 22 23 28 /* L2, dma1, USB */ |
@@ -152,28 +185,39 @@ | |||
152 | 0xe7 0>; | 185 | 0xe7 0>; |
153 | interrupt-parent = <&mpic>; | 186 | interrupt-parent = <&mpic>; |
154 | }; | 187 | }; |
188 | |||
189 | global-utilities@e0000 { //global utilities block | ||
190 | status = "disabled"; | ||
191 | }; | ||
192 | |||
155 | }; | 193 | }; |
156 | 194 | ||
157 | pci1: pcie@ffe0a000 { | 195 | pci0: pcie@ffe08000 { |
158 | compatible = "fsl,mpc8548-pcie"; | 196 | status = "disabled"; |
159 | device_type = "pci"; | 197 | }; |
160 | #interrupt-cells = <1>; | 198 | |
161 | #size-cells = <2>; | 199 | pci1: pcie@ffe09000 { |
162 | #address-cells = <3>; | 200 | status = "disabled"; |
163 | reg = <0 0xffe0a000 0 0x1000>; | 201 | }; |
164 | bus-range = <0 255>; | 202 | |
165 | ranges = <0x2000000 0x0 0xc0000000 0 0xc0000000 0x0 0x20000000 | 203 | pci2: pcie@ffe0a000 { |
166 | 0x1000000 0x0 0x00000000 0 0xffc20000 0x0 0x10000>; | 204 | ranges = <0x2000000 0x0 0x80000000 0 0x80000000 0x0 0x20000000 |
167 | clock-frequency = <33333333>; | 205 | 0x1000000 0x0 0x00000000 0 0xffc00000 0x0 0x10000>; |
168 | interrupt-parent = <&mpic>; | 206 | interrupt-map-mask = <0xf800 0x0 0x0 0x7>; |
169 | interrupts = <26 2>; | 207 | interrupt-map = < |
208 | /* IDSEL 0x0 */ | ||
209 | 0000 0x0 0x0 0x1 &mpic 0x0 0x1 | ||
210 | 0000 0x0 0x0 0x2 &mpic 0x1 0x1 | ||
211 | 0000 0x0 0x0 0x3 &mpic 0x2 0x1 | ||
212 | 0000 0x0 0x0 0x4 &mpic 0x3 0x1 | ||
213 | >; | ||
170 | pcie@0 { | 214 | pcie@0 { |
171 | reg = <0x0 0x0 0x0 0x0 0x0>; | 215 | reg = <0x0 0x0 0x0 0x0 0x0>; |
172 | #size-cells = <2>; | 216 | #size-cells = <2>; |
173 | #address-cells = <3>; | 217 | #address-cells = <3>; |
174 | device_type = "pci"; | 218 | device_type = "pci"; |
175 | ranges = <0x2000000 0x0 0xc0000000 | 219 | ranges = <0x2000000 0x0 0x80000000 |
176 | 0x2000000 0x0 0xc0000000 | 220 | 0x2000000 0x0 0x80000000 |
177 | 0x0 0x20000000 | 221 | 0x0 0x20000000 |
178 | 222 | ||
179 | 0x1000000 0x0 0x0 | 223 | 0x1000000 0x0 0x0 |
diff --git a/arch/powerpc/boot/dts/p2020si.dtsi b/arch/powerpc/boot/dts/p2020si.dtsi new file mode 100644 index 000000000000..6def17f265d3 --- /dev/null +++ b/arch/powerpc/boot/dts/p2020si.dtsi | |||
@@ -0,0 +1,382 @@ | |||
1 | /* | ||
2 | * P2020 Device Tree Source | ||
3 | * | ||
4 | * Copyright 2011 Freescale Semiconductor Inc. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | /dts-v1/; | ||
13 | / { | ||
14 | compatible = "fsl,P2020"; | ||
15 | #address-cells = <2>; | ||
16 | #size-cells = <2>; | ||
17 | |||
18 | cpus { | ||
19 | #address-cells = <1>; | ||
20 | #size-cells = <0>; | ||
21 | |||
22 | PowerPC,P2020@0 { | ||
23 | device_type = "cpu"; | ||
24 | reg = <0x0>; | ||
25 | next-level-cache = <&L2>; | ||
26 | }; | ||
27 | |||
28 | PowerPC,P2020@1 { | ||
29 | device_type = "cpu"; | ||
30 | reg = <0x1>; | ||
31 | next-level-cache = <&L2>; | ||
32 | }; | ||
33 | }; | ||
34 | |||
35 | localbus@ffe05000 { | ||
36 | #address-cells = <2>; | ||
37 | #size-cells = <1>; | ||
38 | compatible = "fsl,p2020-elbc", "fsl,elbc", "simple-bus"; | ||
39 | reg = <0 0xffe05000 0 0x1000>; | ||
40 | interrupts = <19 2>; | ||
41 | interrupt-parent = <&mpic>; | ||
42 | }; | ||
43 | |||
44 | soc@ffe00000 { | ||
45 | #address-cells = <1>; | ||
46 | #size-cells = <1>; | ||
47 | device_type = "soc"; | ||
48 | compatible = "fsl,p2020-immr", "simple-bus"; | ||
49 | ranges = <0x0 0x0 0xffe00000 0x100000>; | ||
50 | bus-frequency = <0>; // Filled out by uboot. | ||
51 | |||
52 | ecm-law@0 { | ||
53 | compatible = "fsl,ecm-law"; | ||
54 | reg = <0x0 0x1000>; | ||
55 | fsl,num-laws = <12>; | ||
56 | }; | ||
57 | |||
58 | ecm@1000 { | ||
59 | compatible = "fsl,p2020-ecm", "fsl,ecm"; | ||
60 | reg = <0x1000 0x1000>; | ||
61 | interrupts = <17 2>; | ||
62 | interrupt-parent = <&mpic>; | ||
63 | }; | ||
64 | |||
65 | memory-controller@2000 { | ||
66 | compatible = "fsl,p2020-memory-controller"; | ||
67 | reg = <0x2000 0x1000>; | ||
68 | interrupt-parent = <&mpic>; | ||
69 | interrupts = <18 2>; | ||
70 | }; | ||
71 | |||
72 | i2c@3000 { | ||
73 | #address-cells = <1>; | ||
74 | #size-cells = <0>; | ||
75 | cell-index = <0>; | ||
76 | compatible = "fsl-i2c"; | ||
77 | reg = <0x3000 0x100>; | ||
78 | interrupts = <43 2>; | ||
79 | interrupt-parent = <&mpic>; | ||
80 | dfsrr; | ||
81 | }; | ||
82 | |||
83 | i2c@3100 { | ||
84 | #address-cells = <1>; | ||
85 | #size-cells = <0>; | ||
86 | cell-index = <1>; | ||
87 | compatible = "fsl-i2c"; | ||
88 | reg = <0x3100 0x100>; | ||
89 | interrupts = <43 2>; | ||
90 | interrupt-parent = <&mpic>; | ||
91 | dfsrr; | ||
92 | }; | ||
93 | |||
94 | serial0: serial@4500 { | ||
95 | cell-index = <0>; | ||
96 | device_type = "serial"; | ||
97 | compatible = "ns16550"; | ||
98 | reg = <0x4500 0x100>; | ||
99 | clock-frequency = <0>; | ||
100 | interrupts = <42 2>; | ||
101 | interrupt-parent = <&mpic>; | ||
102 | }; | ||
103 | |||
104 | serial1: serial@4600 { | ||
105 | cell-index = <1>; | ||
106 | device_type = "serial"; | ||
107 | compatible = "ns16550"; | ||
108 | reg = <0x4600 0x100>; | ||
109 | clock-frequency = <0>; | ||
110 | interrupts = <42 2>; | ||
111 | interrupt-parent = <&mpic>; | ||
112 | }; | ||
113 | |||
114 | spi@7000 { | ||
115 | cell-index = <0>; | ||
116 | #address-cells = <1>; | ||
117 | #size-cells = <0>; | ||
118 | compatible = "fsl,espi"; | ||
119 | reg = <0x7000 0x1000>; | ||
120 | interrupts = <59 0x2>; | ||
121 | interrupt-parent = <&mpic>; | ||
122 | mode = "cpu"; | ||
123 | }; | ||
124 | |||
125 | dma@c300 { | ||
126 | #address-cells = <1>; | ||
127 | #size-cells = <1>; | ||
128 | compatible = "fsl,eloplus-dma"; | ||
129 | reg = <0xc300 0x4>; | ||
130 | ranges = <0x0 0xc100 0x200>; | ||
131 | cell-index = <1>; | ||
132 | dma-channel@0 { | ||
133 | compatible = "fsl,eloplus-dma-channel"; | ||
134 | reg = <0x0 0x80>; | ||
135 | cell-index = <0>; | ||
136 | interrupt-parent = <&mpic>; | ||
137 | interrupts = <76 2>; | ||
138 | }; | ||
139 | dma-channel@80 { | ||
140 | compatible = "fsl,eloplus-dma-channel"; | ||
141 | reg = <0x80 0x80>; | ||
142 | cell-index = <1>; | ||
143 | interrupt-parent = <&mpic>; | ||
144 | interrupts = <77 2>; | ||
145 | }; | ||
146 | dma-channel@100 { | ||
147 | compatible = "fsl,eloplus-dma-channel"; | ||
148 | reg = <0x100 0x80>; | ||
149 | cell-index = <2>; | ||
150 | interrupt-parent = <&mpic>; | ||
151 | interrupts = <78 2>; | ||
152 | }; | ||
153 | dma-channel@180 { | ||
154 | compatible = "fsl,eloplus-dma-channel"; | ||
155 | reg = <0x180 0x80>; | ||
156 | cell-index = <3>; | ||
157 | interrupt-parent = <&mpic>; | ||
158 | interrupts = <79 2>; | ||
159 | }; | ||
160 | }; | ||
161 | |||
162 | gpio: gpio-controller@f000 { | ||
163 | #gpio-cells = <2>; | ||
164 | compatible = "fsl,mpc8572-gpio"; | ||
165 | reg = <0xf000 0x100>; | ||
166 | interrupts = <47 0x2>; | ||
167 | interrupt-parent = <&mpic>; | ||
168 | gpio-controller; | ||
169 | }; | ||
170 | |||
171 | L2: l2-cache-controller@20000 { | ||
172 | compatible = "fsl,p2020-l2-cache-controller"; | ||
173 | reg = <0x20000 0x1000>; | ||
174 | cache-line-size = <32>; // 32 bytes | ||
175 | cache-size = <0x80000>; // L2,512K | ||
176 | interrupt-parent = <&mpic>; | ||
177 | interrupts = <16 2>; | ||
178 | }; | ||
179 | |||
180 | dma@21300 { | ||
181 | #address-cells = <1>; | ||
182 | #size-cells = <1>; | ||
183 | compatible = "fsl,eloplus-dma"; | ||
184 | reg = <0x21300 0x4>; | ||
185 | ranges = <0x0 0x21100 0x200>; | ||
186 | cell-index = <0>; | ||
187 | dma-channel@0 { | ||
188 | compatible = "fsl,eloplus-dma-channel"; | ||
189 | reg = <0x0 0x80>; | ||
190 | cell-index = <0>; | ||
191 | interrupt-parent = <&mpic>; | ||
192 | interrupts = <20 2>; | ||
193 | }; | ||
194 | dma-channel@80 { | ||
195 | compatible = "fsl,eloplus-dma-channel"; | ||
196 | reg = <0x80 0x80>; | ||
197 | cell-index = <1>; | ||
198 | interrupt-parent = <&mpic>; | ||
199 | interrupts = <21 2>; | ||
200 | }; | ||
201 | dma-channel@100 { | ||
202 | compatible = "fsl,eloplus-dma-channel"; | ||
203 | reg = <0x100 0x80>; | ||
204 | cell-index = <2>; | ||
205 | interrupt-parent = <&mpic>; | ||
206 | interrupts = <22 2>; | ||
207 | }; | ||
208 | dma-channel@180 { | ||
209 | compatible = "fsl,eloplus-dma-channel"; | ||
210 | reg = <0x180 0x80>; | ||
211 | cell-index = <3>; | ||
212 | interrupt-parent = <&mpic>; | ||
213 | interrupts = <23 2>; | ||
214 | }; | ||
215 | }; | ||
216 | |||
217 | usb@22000 { | ||
218 | #address-cells = <1>; | ||
219 | #size-cells = <0>; | ||
220 | compatible = "fsl-usb2-dr"; | ||
221 | reg = <0x22000 0x1000>; | ||
222 | interrupt-parent = <&mpic>; | ||
223 | interrupts = <28 0x2>; | ||
224 | }; | ||
225 | |||
226 | mdio@24520 { | ||
227 | #address-cells = <1>; | ||
228 | #size-cells = <0>; | ||
229 | compatible = "fsl,gianfar-mdio"; | ||
230 | reg = <0x24520 0x20>; | ||
231 | }; | ||
232 | |||
233 | mdio@25520 { | ||
234 | #address-cells = <1>; | ||
235 | #size-cells = <0>; | ||
236 | compatible = "fsl,gianfar-tbi"; | ||
237 | reg = <0x26520 0x20>; | ||
238 | }; | ||
239 | |||
240 | mdio@26520 { | ||
241 | #address-cells = <1>; | ||
242 | #size-cells = <0>; | ||
243 | compatible = "fsl,gianfar-tbi"; | ||
244 | reg = <0x520 0x20>; | ||
245 | }; | ||
246 | |||
247 | enet0: ethernet@24000 { | ||
248 | #address-cells = <1>; | ||
249 | #size-cells = <1>; | ||
250 | cell-index = <0>; | ||
251 | device_type = "network"; | ||
252 | model = "eTSEC"; | ||
253 | compatible = "gianfar"; | ||
254 | reg = <0x24000 0x1000>; | ||
255 | ranges = <0x0 0x24000 0x1000>; | ||
256 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
257 | interrupts = <29 2 30 2 34 2>; | ||
258 | interrupt-parent = <&mpic>; | ||
259 | }; | ||
260 | |||
261 | enet1: ethernet@25000 { | ||
262 | #address-cells = <1>; | ||
263 | #size-cells = <1>; | ||
264 | cell-index = <1>; | ||
265 | device_type = "network"; | ||
266 | model = "eTSEC"; | ||
267 | compatible = "gianfar"; | ||
268 | reg = <0x25000 0x1000>; | ||
269 | ranges = <0x0 0x25000 0x1000>; | ||
270 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
271 | interrupts = <35 2 36 2 40 2>; | ||
272 | interrupt-parent = <&mpic>; | ||
273 | |||
274 | }; | ||
275 | |||
276 | enet2: ethernet@26000 { | ||
277 | #address-cells = <1>; | ||
278 | #size-cells = <1>; | ||
279 | cell-index = <2>; | ||
280 | device_type = "network"; | ||
281 | model = "eTSEC"; | ||
282 | compatible = "gianfar"; | ||
283 | reg = <0x26000 0x1000>; | ||
284 | ranges = <0x0 0x26000 0x1000>; | ||
285 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
286 | interrupts = <31 2 32 2 33 2>; | ||
287 | interrupt-parent = <&mpic>; | ||
288 | |||
289 | }; | ||
290 | |||
291 | sdhci@2e000 { | ||
292 | compatible = "fsl,p2020-esdhc", "fsl,esdhc"; | ||
293 | reg = <0x2e000 0x1000>; | ||
294 | interrupts = <72 0x2>; | ||
295 | interrupt-parent = <&mpic>; | ||
296 | /* Filled in by U-Boot */ | ||
297 | clock-frequency = <0>; | ||
298 | }; | ||
299 | |||
300 | crypto@30000 { | ||
301 | compatible = "fsl,sec3.1", "fsl,sec3.0", "fsl,sec2.4", | ||
302 | "fsl,sec2.2", "fsl,sec2.1", "fsl,sec2.0"; | ||
303 | reg = <0x30000 0x10000>; | ||
304 | interrupts = <45 2 58 2>; | ||
305 | interrupt-parent = <&mpic>; | ||
306 | fsl,num-channels = <4>; | ||
307 | fsl,channel-fifo-len = <24>; | ||
308 | fsl,exec-units-mask = <0xbfe>; | ||
309 | fsl,descriptor-types-mask = <0x3ab0ebf>; | ||
310 | }; | ||
311 | |||
312 | mpic: pic@40000 { | ||
313 | interrupt-controller; | ||
314 | #address-cells = <0>; | ||
315 | #interrupt-cells = <2>; | ||
316 | reg = <0x40000 0x40000>; | ||
317 | compatible = "chrp,open-pic"; | ||
318 | device_type = "open-pic"; | ||
319 | }; | ||
320 | |||
321 | msi@41600 { | ||
322 | compatible = "fsl,p2020-msi", "fsl,mpic-msi"; | ||
323 | reg = <0x41600 0x80>; | ||
324 | msi-available-ranges = <0 0x100>; | ||
325 | interrupts = < | ||
326 | 0xe0 0 | ||
327 | 0xe1 0 | ||
328 | 0xe2 0 | ||
329 | 0xe3 0 | ||
330 | 0xe4 0 | ||
331 | 0xe5 0 | ||
332 | 0xe6 0 | ||
333 | 0xe7 0>; | ||
334 | interrupt-parent = <&mpic>; | ||
335 | }; | ||
336 | |||
337 | global-utilities@e0000 { //global utilities block | ||
338 | compatible = "fsl,p2020-guts"; | ||
339 | reg = <0xe0000 0x1000>; | ||
340 | fsl,has-rstcr; | ||
341 | }; | ||
342 | }; | ||
343 | |||
344 | pci0: pcie@ffe08000 { | ||
345 | compatible = "fsl,mpc8548-pcie"; | ||
346 | device_type = "pci"; | ||
347 | #interrupt-cells = <1>; | ||
348 | #size-cells = <2>; | ||
349 | #address-cells = <3>; | ||
350 | reg = <0 0xffe08000 0 0x1000>; | ||
351 | bus-range = <0 255>; | ||
352 | clock-frequency = <33333333>; | ||
353 | interrupt-parent = <&mpic>; | ||
354 | interrupts = <24 2>; | ||
355 | }; | ||
356 | |||
357 | pci1: pcie@ffe09000 { | ||
358 | compatible = "fsl,mpc8548-pcie"; | ||
359 | device_type = "pci"; | ||
360 | #interrupt-cells = <1>; | ||
361 | #size-cells = <2>; | ||
362 | #address-cells = <3>; | ||
363 | reg = <0 0xffe09000 0 0x1000>; | ||
364 | bus-range = <0 255>; | ||
365 | clock-frequency = <33333333>; | ||
366 | interrupt-parent = <&mpic>; | ||
367 | interrupts = <25 2>; | ||
368 | }; | ||
369 | |||
370 | pci2: pcie@ffe0a000 { | ||
371 | compatible = "fsl,mpc8548-pcie"; | ||
372 | device_type = "pci"; | ||
373 | #interrupt-cells = <1>; | ||
374 | #size-cells = <2>; | ||
375 | #address-cells = <3>; | ||
376 | reg = <0 0xffe0a000 0 0x1000>; | ||
377 | bus-range = <0 255>; | ||
378 | clock-frequency = <33333333>; | ||
379 | interrupt-parent = <&mpic>; | ||
380 | interrupts = <26 2>; | ||
381 | }; | ||
382 | }; | ||
diff --git a/arch/powerpc/boot/dts/p4080ds.dts b/arch/powerpc/boot/dts/p4080ds.dts index 2f0de24e3822..927f94d16e9b 100644 --- a/arch/powerpc/boot/dts/p4080ds.dts +++ b/arch/powerpc/boot/dts/p4080ds.dts | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * P4080DS Device Tree Source | 2 | * P4080DS Device Tree Source |
3 | * | 3 | * |
4 | * Copyright 2009 Freescale Semiconductor Inc. | 4 | * Copyright 2009-2011 Freescale Semiconductor Inc. |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
@@ -33,6 +33,17 @@ | |||
33 | dma1 = &dma1; | 33 | dma1 = &dma1; |
34 | sdhc = &sdhc; | 34 | sdhc = &sdhc; |
35 | 35 | ||
36 | crypto = &crypto; | ||
37 | sec_jr0 = &sec_jr0; | ||
38 | sec_jr1 = &sec_jr1; | ||
39 | sec_jr2 = &sec_jr2; | ||
40 | sec_jr3 = &sec_jr3; | ||
41 | rtic_a = &rtic_a; | ||
42 | rtic_b = &rtic_b; | ||
43 | rtic_c = &rtic_c; | ||
44 | rtic_d = &rtic_d; | ||
45 | sec_mon = &sec_mon; | ||
46 | |||
36 | rio0 = &rapidio0; | 47 | rio0 = &rapidio0; |
37 | }; | 48 | }; |
38 | 49 | ||
@@ -236,22 +247,19 @@ | |||
236 | }; | 247 | }; |
237 | 248 | ||
238 | spi@110000 { | 249 | spi@110000 { |
239 | cell-index = <0>; | ||
240 | #address-cells = <1>; | 250 | #address-cells = <1>; |
241 | #size-cells = <0>; | 251 | #size-cells = <0>; |
242 | compatible = "fsl,espi"; | 252 | compatible = "fsl,p4080-espi", "fsl,mpc8536-espi"; |
243 | reg = <0x110000 0x1000>; | 253 | reg = <0x110000 0x1000>; |
244 | interrupts = <53 0x2>; | 254 | interrupts = <53 0x2>; |
245 | interrupt-parent = <&mpic>; | 255 | interrupt-parent = <&mpic>; |
246 | espi,num-ss-bits = <4>; | 256 | fsl,espi-num-chipselects = <4>; |
247 | mode = "cpu"; | ||
248 | 257 | ||
249 | fsl_m25p80@0 { | 258 | flash@0 { |
250 | #address-cells = <1>; | 259 | #address-cells = <1>; |
251 | #size-cells = <1>; | 260 | #size-cells = <1>; |
252 | compatible = "fsl,espi-flash"; | 261 | compatible = "spansion,s25sl12801"; |
253 | reg = <0>; | 262 | reg = <0>; |
254 | linux,modalias = "fsl_m25p80"; | ||
255 | spi-max-frequency = <40000000>; /* input clock */ | 263 | spi-max-frequency = <40000000>; /* input clock */ |
256 | partition@u-boot { | 264 | partition@u-boot { |
257 | label = "u-boot"; | 265 | label = "u-boot"; |
@@ -413,6 +421,79 @@ | |||
413 | dr_mode = "host"; | 421 | dr_mode = "host"; |
414 | phy_type = "ulpi"; | 422 | phy_type = "ulpi"; |
415 | }; | 423 | }; |
424 | |||
425 | crypto: crypto@300000 { | ||
426 | compatible = "fsl,sec-v4.0"; | ||
427 | #address-cells = <1>; | ||
428 | #size-cells = <1>; | ||
429 | reg = <0x300000 0x10000>; | ||
430 | ranges = <0 0x300000 0x10000>; | ||
431 | interrupt-parent = <&mpic>; | ||
432 | interrupts = <92 2>; | ||
433 | |||
434 | sec_jr0: jr@1000 { | ||
435 | compatible = "fsl,sec-v4.0-job-ring"; | ||
436 | reg = <0x1000 0x1000>; | ||
437 | interrupt-parent = <&mpic>; | ||
438 | interrupts = <88 2>; | ||
439 | }; | ||
440 | |||
441 | sec_jr1: jr@2000 { | ||
442 | compatible = "fsl,sec-v4.0-job-ring"; | ||
443 | reg = <0x2000 0x1000>; | ||
444 | interrupt-parent = <&mpic>; | ||
445 | interrupts = <89 2>; | ||
446 | }; | ||
447 | |||
448 | sec_jr2: jr@3000 { | ||
449 | compatible = "fsl,sec-v4.0-job-ring"; | ||
450 | reg = <0x3000 0x1000>; | ||
451 | interrupt-parent = <&mpic>; | ||
452 | interrupts = <90 2>; | ||
453 | }; | ||
454 | |||
455 | sec_jr3: jr@4000 { | ||
456 | compatible = "fsl,sec-v4.0-job-ring"; | ||
457 | reg = <0x4000 0x1000>; | ||
458 | interrupt-parent = <&mpic>; | ||
459 | interrupts = <91 2>; | ||
460 | }; | ||
461 | |||
462 | rtic@6000 { | ||
463 | compatible = "fsl,sec-v4.0-rtic"; | ||
464 | #address-cells = <1>; | ||
465 | #size-cells = <1>; | ||
466 | reg = <0x6000 0x100>; | ||
467 | ranges = <0x0 0x6100 0xe00>; | ||
468 | |||
469 | rtic_a: rtic-a@0 { | ||
470 | compatible = "fsl,sec-v4.0-rtic-memory"; | ||
471 | reg = <0x00 0x20 0x100 0x80>; | ||
472 | }; | ||
473 | |||
474 | rtic_b: rtic-b@20 { | ||
475 | compatible = "fsl,sec-v4.0-rtic-memory"; | ||
476 | reg = <0x20 0x20 0x200 0x80>; | ||
477 | }; | ||
478 | |||
479 | rtic_c: rtic-c@40 { | ||
480 | compatible = "fsl,sec-v4.0-rtic-memory"; | ||
481 | reg = <0x40 0x20 0x300 0x80>; | ||
482 | }; | ||
483 | |||
484 | rtic_d: rtic-d@60 { | ||
485 | compatible = "fsl,sec-v4.0-rtic-memory"; | ||
486 | reg = <0x60 0x20 0x500 0x80>; | ||
487 | }; | ||
488 | }; | ||
489 | }; | ||
490 | |||
491 | sec_mon: sec_mon@314000 { | ||
492 | compatible = "fsl,sec-v4.0-mon"; | ||
493 | reg = <0x314000 0x1000>; | ||
494 | interrupt-parent = <&mpic>; | ||
495 | interrupts = <93 2>; | ||
496 | }; | ||
416 | }; | 497 | }; |
417 | 498 | ||
418 | rapidio0: rapidio@ffe0c0000 { | 499 | rapidio0: rapidio@ffe0c0000 { |
diff --git a/arch/powerpc/boot/dts/pcm030.dts b/arch/powerpc/boot/dts/pcm030.dts index 8a4ec30b21ae..9e354997eb7e 100644 --- a/arch/powerpc/boot/dts/pcm030.dts +++ b/arch/powerpc/boot/dts/pcm030.dts | |||
@@ -12,246 +12,92 @@ | |||
12 | * option) any later version. | 12 | * option) any later version. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | /dts-v1/; | 15 | /include/ "mpc5200b.dtsi" |
16 | 16 | ||
17 | / { | 17 | / { |
18 | model = "phytec,pcm030"; | 18 | model = "phytec,pcm030"; |
19 | compatible = "phytec,pcm030"; | 19 | compatible = "phytec,pcm030"; |
20 | #address-cells = <1>; | ||
21 | #size-cells = <1>; | ||
22 | interrupt-parent = <&mpc5200_pic>; | ||
23 | |||
24 | cpus { | ||
25 | #address-cells = <1>; | ||
26 | #size-cells = <0>; | ||
27 | |||
28 | PowerPC,5200@0 { | ||
29 | device_type = "cpu"; | ||
30 | reg = <0>; | ||
31 | d-cache-line-size = <32>; | ||
32 | i-cache-line-size = <32>; | ||
33 | d-cache-size = <0x4000>; // L1, 16K | ||
34 | i-cache-size = <0x4000>; // L1, 16K | ||
35 | timebase-frequency = <0>; // from bootloader | ||
36 | bus-frequency = <0>; // from bootloader | ||
37 | clock-frequency = <0>; // from bootloader | ||
38 | }; | ||
39 | }; | ||
40 | |||
41 | memory { | ||
42 | device_type = "memory"; | ||
43 | reg = <0x00000000 0x04000000>; // 64MB | ||
44 | }; | ||
45 | 20 | ||
46 | soc5200@f0000000 { | 21 | soc5200@f0000000 { |
47 | #address-cells = <1>; | 22 | timer@600 { // General Purpose Timer |
48 | #size-cells = <1>; | ||
49 | compatible = "fsl,mpc5200b-immr"; | ||
50 | ranges = <0 0xf0000000 0x0000c000>; | ||
51 | bus-frequency = <0>; // from bootloader | ||
52 | system-frequency = <0>; // from bootloader | ||
53 | |||
54 | cdm@200 { | ||
55 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
56 | reg = <0x200 0x38>; | ||
57 | }; | ||
58 | |||
59 | mpc5200_pic: interrupt-controller@500 { | ||
60 | // 5200 interrupts are encoded into two levels; | ||
61 | interrupt-controller; | ||
62 | #interrupt-cells = <3>; | ||
63 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
64 | reg = <0x500 0x80>; | ||
65 | }; | ||
66 | |||
67 | timer@600 { // General Purpose Timer | ||
68 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
69 | reg = <0x600 0x10>; | ||
70 | interrupts = <1 9 0>; | ||
71 | fsl,has-wdt; | 23 | fsl,has-wdt; |
72 | }; | 24 | }; |
73 | 25 | ||
74 | timer@610 { // General Purpose Timer | ||
75 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
76 | reg = <0x610 0x10>; | ||
77 | interrupts = <1 10 0>; | ||
78 | }; | ||
79 | |||
80 | gpt2: timer@620 { // General Purpose Timer in GPIO mode | 26 | gpt2: timer@620 { // General Purpose Timer in GPIO mode |
81 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; | 27 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; |
82 | reg = <0x620 0x10>; | ||
83 | interrupts = <1 11 0>; | ||
84 | gpio-controller; | 28 | gpio-controller; |
85 | #gpio-cells = <2>; | 29 | #gpio-cells = <2>; |
86 | }; | 30 | }; |
87 | 31 | ||
88 | gpt3: timer@630 { // General Purpose Timer in GPIO mode | 32 | gpt3: timer@630 { // General Purpose Timer in GPIO mode |
89 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; | 33 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; |
90 | reg = <0x630 0x10>; | ||
91 | interrupts = <1 12 0>; | ||
92 | gpio-controller; | 34 | gpio-controller; |
93 | #gpio-cells = <2>; | 35 | #gpio-cells = <2>; |
94 | }; | 36 | }; |
95 | 37 | ||
96 | gpt4: timer@640 { // General Purpose Timer in GPIO mode | 38 | gpt4: timer@640 { // General Purpose Timer in GPIO mode |
97 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; | 39 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; |
98 | reg = <0x640 0x10>; | ||
99 | interrupts = <1 13 0>; | ||
100 | gpio-controller; | 40 | gpio-controller; |
101 | #gpio-cells = <2>; | 41 | #gpio-cells = <2>; |
102 | }; | 42 | }; |
103 | 43 | ||
104 | gpt5: timer@650 { // General Purpose Timer in GPIO mode | 44 | gpt5: timer@650 { // General Purpose Timer in GPIO mode |
105 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; | 45 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; |
106 | reg = <0x650 0x10>; | ||
107 | interrupts = <1 14 0>; | ||
108 | gpio-controller; | 46 | gpio-controller; |
109 | #gpio-cells = <2>; | 47 | #gpio-cells = <2>; |
110 | }; | 48 | }; |
111 | 49 | ||
112 | gpt6: timer@660 { // General Purpose Timer in GPIO mode | 50 | gpt6: timer@660 { // General Purpose Timer in GPIO mode |
113 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; | 51 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; |
114 | reg = <0x660 0x10>; | ||
115 | interrupts = <1 15 0>; | ||
116 | gpio-controller; | 52 | gpio-controller; |
117 | #gpio-cells = <2>; | 53 | #gpio-cells = <2>; |
118 | }; | 54 | }; |
119 | 55 | ||
120 | gpt7: timer@670 { // General Purpose Timer in GPIO mode | 56 | gpt7: timer@670 { // General Purpose Timer in GPIO mode |
121 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; | 57 | compatible = "fsl,mpc5200b-gpt-gpio","fsl,mpc5200-gpt-gpio"; |
122 | reg = <0x670 0x10>; | ||
123 | interrupts = <1 16 0>; | ||
124 | gpio-controller; | ||
125 | #gpio-cells = <2>; | ||
126 | }; | ||
127 | |||
128 | rtc@800 { // Real time clock | ||
129 | compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; | ||
130 | reg = <0x800 0x100>; | ||
131 | interrupts = <1 5 0 1 6 0>; | ||
132 | }; | ||
133 | |||
134 | can@900 { | ||
135 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
136 | interrupts = <2 17 0>; | ||
137 | reg = <0x900 0x80>; | ||
138 | }; | ||
139 | |||
140 | can@980 { | ||
141 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
142 | interrupts = <2 18 0>; | ||
143 | reg = <0x980 0x80>; | ||
144 | }; | ||
145 | |||
146 | gpio_simple: gpio@b00 { | ||
147 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | ||
148 | reg = <0xb00 0x40>; | ||
149 | interrupts = <1 7 0>; | ||
150 | gpio-controller; | 58 | gpio-controller; |
151 | #gpio-cells = <2>; | 59 | #gpio-cells = <2>; |
152 | }; | 60 | }; |
153 | 61 | ||
154 | gpio_wkup: gpio@c00 { | 62 | psc@2000 { /* PSC1 in ac97 mode */ |
155 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | ||
156 | reg = <0xc00 0x40>; | ||
157 | interrupts = <1 8 0 0 3 0>; | ||
158 | gpio-controller; | ||
159 | #gpio-cells = <2>; | ||
160 | }; | ||
161 | |||
162 | spi@f00 { | ||
163 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | ||
164 | reg = <0xf00 0x20>; | ||
165 | interrupts = <2 13 0 2 14 0>; | ||
166 | }; | ||
167 | |||
168 | usb@1000 { | ||
169 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | ||
170 | reg = <0x1000 0xff>; | ||
171 | interrupts = <2 6 0>; | ||
172 | }; | ||
173 | |||
174 | dma-controller@1200 { | ||
175 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | ||
176 | reg = <0x1200 0x80>; | ||
177 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
178 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
179 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
180 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
181 | }; | ||
182 | |||
183 | xlb@1f00 { | ||
184 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | ||
185 | reg = <0x1f00 0x100>; | ||
186 | }; | ||
187 | |||
188 | ac97@2000 { /* PSC1 in ac97 mode */ | ||
189 | compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97"; | 63 | compatible = "mpc5200b-psc-ac97","fsl,mpc5200b-psc-ac97"; |
190 | cell-index = <0>; | 64 | cell-index = <0>; |
191 | reg = <0x2000 0x100>; | ||
192 | interrupts = <2 1 0>; | ||
193 | }; | 65 | }; |
194 | 66 | ||
195 | /* PSC2 port is used by CAN1/2 */ | 67 | /* PSC2 port is used by CAN1/2 */ |
68 | psc@2200 { | ||
69 | status = "disabled"; | ||
70 | }; | ||
196 | 71 | ||
197 | serial@2400 { /* PSC3 in UART mode */ | 72 | psc@2400 { /* PSC3 in UART mode */ |
198 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 73 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
199 | cell-index = <2>; | ||
200 | reg = <0x2400 0x100>; | ||
201 | interrupts = <2 3 0>; | ||
202 | }; | 74 | }; |
203 | 75 | ||
204 | /* PSC4 is ??? */ | 76 | /* PSC4 is ??? */ |
77 | psc@2600 { | ||
78 | status = "disabled"; | ||
79 | }; | ||
205 | 80 | ||
206 | /* PSC5 is ??? */ | 81 | /* PSC5 is ??? */ |
82 | psc@2800 { | ||
83 | status = "disabled"; | ||
84 | }; | ||
207 | 85 | ||
208 | serial@2c00 { /* PSC6 in UART mode */ | 86 | psc@2c00 { /* PSC6 in UART mode */ |
209 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 87 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
210 | cell-index = <5>; | ||
211 | reg = <0x2c00 0x100>; | ||
212 | interrupts = <2 4 0>; | ||
213 | }; | 88 | }; |
214 | 89 | ||
215 | ethernet@3000 { | 90 | ethernet@3000 { |
216 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
217 | reg = <0x3000 0x400>; | ||
218 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
219 | interrupts = <2 5 0>; | ||
220 | phy-handle = <&phy0>; | 91 | phy-handle = <&phy0>; |
221 | }; | 92 | }; |
222 | 93 | ||
223 | mdio@3000 { | 94 | mdio@3000 { |
224 | #address-cells = <1>; | ||
225 | #size-cells = <0>; | ||
226 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
227 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
228 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
229 | |||
230 | phy0: ethernet-phy@0 { | 95 | phy0: ethernet-phy@0 { |
231 | reg = <0>; | 96 | reg = <0>; |
232 | }; | 97 | }; |
233 | }; | 98 | }; |
234 | 99 | ||
235 | ata@3a00 { | ||
236 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | ||
237 | reg = <0x3a00 0x100>; | ||
238 | interrupts = <2 7 0>; | ||
239 | }; | ||
240 | |||
241 | i2c@3d00 { | ||
242 | #address-cells = <1>; | ||
243 | #size-cells = <0>; | ||
244 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
245 | reg = <0x3d00 0x40>; | ||
246 | interrupts = <2 15 0>; | ||
247 | }; | ||
248 | |||
249 | i2c@3d40 { | 100 | i2c@3d40 { |
250 | #address-cells = <1>; | ||
251 | #size-cells = <0>; | ||
252 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
253 | reg = <0x3d40 0x40>; | ||
254 | interrupts = <2 16 0>; | ||
255 | rtc@51 { | 101 | rtc@51 { |
256 | compatible = "nxp,pcf8563"; | 102 | compatible = "nxp,pcf8563"; |
257 | reg = <0x51>; | 103 | reg = <0x51>; |
@@ -259,6 +105,7 @@ | |||
259 | eeprom@52 { | 105 | eeprom@52 { |
260 | compatible = "catalyst,24c32"; | 106 | compatible = "catalyst,24c32"; |
261 | reg = <0x52>; | 107 | reg = <0x52>; |
108 | pagesize = <32>; | ||
262 | }; | 109 | }; |
263 | }; | 110 | }; |
264 | 111 | ||
@@ -269,12 +116,6 @@ | |||
269 | }; | 116 | }; |
270 | 117 | ||
271 | pci@f0000d00 { | 118 | pci@f0000d00 { |
272 | #interrupt-cells = <1>; | ||
273 | #size-cells = <2>; | ||
274 | #address-cells = <3>; | ||
275 | device_type = "pci"; | ||
276 | compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci"; | ||
277 | reg = <0xf0000d00 0x100>; | ||
278 | interrupt-map-mask = <0xf800 0 0 7>; | 119 | interrupt-map-mask = <0xf800 0 0 7>; |
279 | interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot | 120 | interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot |
280 | 0xc000 0 0 2 &mpc5200_pic 1 1 3 | 121 | 0xc000 0 0 2 &mpc5200_pic 1 1 3 |
@@ -285,11 +126,12 @@ | |||
285 | 0xc800 0 0 2 &mpc5200_pic 1 2 3 | 126 | 0xc800 0 0 2 &mpc5200_pic 1 2 3 |
286 | 0xc800 0 0 3 &mpc5200_pic 1 3 3 | 127 | 0xc800 0 0 3 &mpc5200_pic 1 3 3 |
287 | 0xc800 0 0 4 &mpc5200_pic 0 0 3>; | 128 | 0xc800 0 0 4 &mpc5200_pic 0 0 3>; |
288 | clock-frequency = <0>; // From boot loader | ||
289 | interrupts = <2 8 0 2 9 0 2 10 0>; | ||
290 | bus-range = <0 0>; | ||
291 | ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000 | 129 | ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000 |
292 | 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000 | 130 | 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000 |
293 | 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>; | 131 | 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>; |
294 | }; | 132 | }; |
133 | |||
134 | localbus { | ||
135 | status = "disabled"; | ||
136 | }; | ||
295 | }; | 137 | }; |
diff --git a/arch/powerpc/boot/dts/pcm032.dts b/arch/powerpc/boot/dts/pcm032.dts index 85d857a5d46e..1dd478bfff96 100644 --- a/arch/powerpc/boot/dts/pcm032.dts +++ b/arch/powerpc/boot/dts/pcm032.dts | |||
@@ -12,99 +12,37 @@ | |||
12 | * option) any later version. | 12 | * option) any later version. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | /dts-v1/; | 15 | /include/ "mpc5200b.dtsi" |
16 | 16 | ||
17 | / { | 17 | / { |
18 | model = "phytec,pcm032"; | 18 | model = "phytec,pcm032"; |
19 | compatible = "phytec,pcm032"; | 19 | compatible = "phytec,pcm032"; |
20 | #address-cells = <1>; | ||
21 | #size-cells = <1>; | ||
22 | interrupt-parent = <&mpc5200_pic>; | ||
23 | |||
24 | cpus { | ||
25 | #address-cells = <1>; | ||
26 | #size-cells = <0>; | ||
27 | |||
28 | PowerPC,5200@0 { | ||
29 | device_type = "cpu"; | ||
30 | reg = <0>; | ||
31 | d-cache-line-size = <32>; | ||
32 | i-cache-line-size = <32>; | ||
33 | d-cache-size = <0x4000>; // L1, 16K | ||
34 | i-cache-size = <0x4000>; // L1, 16K | ||
35 | timebase-frequency = <0>; // from bootloader | ||
36 | bus-frequency = <0>; // from bootloader | ||
37 | clock-frequency = <0>; // from bootloader | ||
38 | }; | ||
39 | }; | ||
40 | 20 | ||
41 | memory { | 21 | memory { |
42 | device_type = "memory"; | ||
43 | reg = <0x00000000 0x08000000>; // 128MB | 22 | reg = <0x00000000 0x08000000>; // 128MB |
44 | }; | 23 | }; |
45 | 24 | ||
46 | soc5200@f0000000 { | 25 | soc5200@f0000000 { |
47 | #address-cells = <1>; | 26 | timer@600 { // General Purpose Timer |
48 | #size-cells = <1>; | ||
49 | compatible = "fsl,mpc5200b-immr"; | ||
50 | ranges = <0 0xf0000000 0x0000c000>; | ||
51 | bus-frequency = <0>; // from bootloader | ||
52 | system-frequency = <0>; // from bootloader | ||
53 | |||
54 | cdm@200 { | ||
55 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
56 | reg = <0x200 0x38>; | ||
57 | }; | ||
58 | |||
59 | mpc5200_pic: interrupt-controller@500 { | ||
60 | // 5200 interrupts are encoded into two levels; | ||
61 | interrupt-controller; | ||
62 | #interrupt-cells = <3>; | ||
63 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
64 | reg = <0x500 0x80>; | ||
65 | }; | ||
66 | |||
67 | timer@600 { // General Purpose Timer | ||
68 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
69 | reg = <0x600 0x10>; | ||
70 | interrupts = <1 9 0>; | ||
71 | fsl,has-wdt; | 27 | fsl,has-wdt; |
72 | }; | 28 | }; |
73 | 29 | ||
74 | timer@610 { // General Purpose Timer | ||
75 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
76 | reg = <0x610 0x10>; | ||
77 | interrupts = <1 10 0>; | ||
78 | }; | ||
79 | |||
80 | gpt2: timer@620 { // General Purpose Timer in GPIO mode | 30 | gpt2: timer@620 { // General Purpose Timer in GPIO mode |
81 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
82 | reg = <0x620 0x10>; | ||
83 | interrupts = <1 11 0>; | ||
84 | gpio-controller; | 31 | gpio-controller; |
85 | #gpio-cells = <2>; | 32 | #gpio-cells = <2>; |
86 | }; | 33 | }; |
87 | 34 | ||
88 | gpt3: timer@630 { // General Purpose Timer in GPIO mode | 35 | gpt3: timer@630 { // General Purpose Timer in GPIO mode |
89 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
90 | reg = <0x630 0x10>; | ||
91 | interrupts = <1 12 0>; | ||
92 | gpio-controller; | 36 | gpio-controller; |
93 | #gpio-cells = <2>; | 37 | #gpio-cells = <2>; |
94 | }; | 38 | }; |
95 | 39 | ||
96 | gpt4: timer@640 { // General Purpose Timer in GPIO mode | 40 | gpt4: timer@640 { // General Purpose Timer in GPIO mode |
97 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
98 | reg = <0x640 0x10>; | ||
99 | interrupts = <1 13 0>; | ||
100 | gpio-controller; | 41 | gpio-controller; |
101 | #gpio-cells = <2>; | 42 | #gpio-cells = <2>; |
102 | }; | 43 | }; |
103 | 44 | ||
104 | gpt5: timer@650 { // General Purpose Timer in GPIO mode | 45 | gpt5: timer@650 { // General Purpose Timer in GPIO mode |
105 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
106 | reg = <0x650 0x10>; | ||
107 | interrupts = <1 14 0>; | ||
108 | gpio-controller; | 46 | gpio-controller; |
109 | #gpio-cells = <2>; | 47 | #gpio-cells = <2>; |
110 | }; | 48 | }; |
@@ -118,163 +56,62 @@ | |||
118 | }; | 56 | }; |
119 | 57 | ||
120 | gpt7: timer@670 { // General Purpose Timer in GPIO mode | 58 | gpt7: timer@670 { // General Purpose Timer in GPIO mode |
121 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
122 | reg = <0x670 0x10>; | ||
123 | interrupts = <1 16 0>; | ||
124 | gpio-controller; | 59 | gpio-controller; |
125 | #gpio-cells = <2>; | 60 | #gpio-cells = <2>; |
126 | }; | 61 | }; |
127 | 62 | ||
128 | rtc@800 { // Real time clock | 63 | psc@2000 { /* PSC1 is ac97 */ |
129 | compatible = "fsl,mpc5200b-rtc","fsl,mpc5200-rtc"; | ||
130 | reg = <0x800 0x100>; | ||
131 | interrupts = <1 5 0 1 6 0>; | ||
132 | }; | ||
133 | |||
134 | can@900 { | ||
135 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
136 | interrupts = <2 17 0>; | ||
137 | reg = <0x900 0x80>; | ||
138 | }; | ||
139 | |||
140 | can@980 { | ||
141 | compatible = "fsl,mpc5200b-mscan","fsl,mpc5200-mscan"; | ||
142 | interrupts = <2 18 0>; | ||
143 | reg = <0x980 0x80>; | ||
144 | }; | ||
145 | |||
146 | gpio_simple: gpio@b00 { | ||
147 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | ||
148 | reg = <0xb00 0x40>; | ||
149 | interrupts = <1 7 0>; | ||
150 | gpio-controller; | ||
151 | #gpio-cells = <2>; | ||
152 | }; | ||
153 | |||
154 | gpio_wkup: gpio@c00 { | ||
155 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | ||
156 | reg = <0xc00 0x40>; | ||
157 | interrupts = <1 8 0 0 3 0>; | ||
158 | gpio-controller; | ||
159 | #gpio-cells = <2>; | ||
160 | }; | ||
161 | |||
162 | spi@f00 { | ||
163 | compatible = "fsl,mpc5200b-spi","fsl,mpc5200-spi"; | ||
164 | reg = <0xf00 0x20>; | ||
165 | interrupts = <2 13 0 2 14 0>; | ||
166 | }; | ||
167 | |||
168 | usb@1000 { | ||
169 | compatible = "fsl,mpc5200b-ohci","fsl,mpc5200-ohci","ohci-be"; | ||
170 | reg = <0x1000 0xff>; | ||
171 | interrupts = <2 6 0>; | ||
172 | }; | ||
173 | |||
174 | dma-controller@1200 { | ||
175 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | ||
176 | reg = <0x1200 0x80>; | ||
177 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
178 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
179 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
180 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
181 | }; | ||
182 | |||
183 | xlb@1f00 { | ||
184 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | ||
185 | reg = <0x1f00 0x100>; | ||
186 | }; | ||
187 | |||
188 | ac97@2000 { /* PSC1 is ac97 */ | ||
189 | compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97"; | 64 | compatible = "fsl,mpc5200b-psc-ac97","fsl,mpc5200-psc-ac97"; |
190 | cell-index = <0>; | 65 | cell-index = <0>; |
191 | reg = <0x2000 0x100>; | ||
192 | interrupts = <2 1 0>; | ||
193 | }; | 66 | }; |
194 | 67 | ||
195 | /* PSC2 port is used by CAN1/2 */ | 68 | /* PSC2 port is used by CAN1/2 */ |
69 | psc@2200 { | ||
70 | status = "disabled"; | ||
71 | }; | ||
196 | 72 | ||
197 | serial@2400 { /* PSC3 in UART mode */ | 73 | psc@2400 { /* PSC3 in UART mode */ |
198 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 74 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
199 | cell-index = <2>; | ||
200 | reg = <0x2400 0x100>; | ||
201 | interrupts = <2 3 0>; | ||
202 | }; | 75 | }; |
203 | 76 | ||
204 | /* PSC4 is ??? */ | 77 | /* PSC4 is ??? */ |
78 | psc@2600 { | ||
79 | status = "disabled"; | ||
80 | }; | ||
205 | 81 | ||
206 | /* PSC5 is ??? */ | 82 | /* PSC5 is ??? */ |
83 | psc@2800 { | ||
84 | status = "disabled"; | ||
85 | }; | ||
207 | 86 | ||
208 | serial@2c00 { /* PSC6 in UART mode */ | 87 | psc@2c00 { /* PSC6 in UART mode */ |
209 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 88 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
210 | cell-index = <5>; | ||
211 | reg = <0x2c00 0x100>; | ||
212 | interrupts = <2 4 0>; | ||
213 | }; | 89 | }; |
214 | 90 | ||
215 | ethernet@3000 { | 91 | ethernet@3000 { |
216 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
217 | reg = <0x3000 0x400>; | ||
218 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
219 | interrupts = <2 5 0>; | ||
220 | phy-handle = <&phy0>; | 92 | phy-handle = <&phy0>; |
221 | }; | 93 | }; |
222 | 94 | ||
223 | mdio@3000 { | 95 | mdio@3000 { |
224 | #address-cells = <1>; | ||
225 | #size-cells = <0>; | ||
226 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
227 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
228 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
229 | |||
230 | phy0: ethernet-phy@0 { | 96 | phy0: ethernet-phy@0 { |
231 | reg = <0>; | 97 | reg = <0>; |
232 | }; | 98 | }; |
233 | }; | 99 | }; |
234 | 100 | ||
235 | ata@3a00 { | ||
236 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | ||
237 | reg = <0x3a00 0x100>; | ||
238 | interrupts = <2 7 0>; | ||
239 | }; | ||
240 | |||
241 | i2c@3d00 { | ||
242 | #address-cells = <1>; | ||
243 | #size-cells = <0>; | ||
244 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
245 | reg = <0x3d00 0x40>; | ||
246 | interrupts = <2 15 0>; | ||
247 | }; | ||
248 | |||
249 | i2c@3d40 { | 101 | i2c@3d40 { |
250 | #address-cells = <1>; | ||
251 | #size-cells = <0>; | ||
252 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
253 | reg = <0x3d40 0x40>; | ||
254 | interrupts = <2 16 0>; | ||
255 | rtc@51 { | 102 | rtc@51 { |
256 | compatible = "nxp,pcf8563"; | 103 | compatible = "nxp,pcf8563"; |
257 | reg = <0x51>; | 104 | reg = <0x51>; |
258 | }; | 105 | }; |
259 | eeprom@52 { | 106 | eeprom@52 { |
260 | compatible = "at24,24c32"; | 107 | compatible = "catalyst,24c32"; |
261 | reg = <0x52>; | 108 | reg = <0x52>; |
109 | pagesize = <32>; | ||
262 | }; | 110 | }; |
263 | }; | 111 | }; |
264 | |||
265 | sram@8000 { | ||
266 | compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram"; | ||
267 | reg = <0x8000 0x4000>; | ||
268 | }; | ||
269 | }; | 112 | }; |
270 | 113 | ||
271 | pci@f0000d00 { | 114 | pci@f0000d00 { |
272 | #interrupt-cells = <1>; | ||
273 | #size-cells = <2>; | ||
274 | #address-cells = <3>; | ||
275 | device_type = "pci"; | ||
276 | compatible = "fsl,mpc5200b-pci","fsl,mpc5200-pci"; | ||
277 | reg = <0xf0000d00 0x100>; | ||
278 | interrupt-map-mask = <0xf800 0 0 7>; | 115 | interrupt-map-mask = <0xf800 0 0 7>; |
279 | interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot | 116 | interrupt-map = <0xc000 0 0 1 &mpc5200_pic 0 0 3 // 1st slot |
280 | 0xc000 0 0 2 &mpc5200_pic 1 1 3 | 117 | 0xc000 0 0 2 &mpc5200_pic 1 1 3 |
@@ -285,20 +122,12 @@ | |||
285 | 0xc800 0 0 2 &mpc5200_pic 1 2 3 | 122 | 0xc800 0 0 2 &mpc5200_pic 1 2 3 |
286 | 0xc800 0 0 3 &mpc5200_pic 1 3 3 | 123 | 0xc800 0 0 3 &mpc5200_pic 1 3 3 |
287 | 0xc800 0 0 4 &mpc5200_pic 0 0 3>; | 124 | 0xc800 0 0 4 &mpc5200_pic 0 0 3>; |
288 | clock-frequency = <0>; // From boot loader | ||
289 | interrupts = <2 8 0 2 9 0 2 10 0>; | ||
290 | bus-range = <0 0>; | ||
291 | ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000 | 125 | ranges = <0x42000000 0 0x80000000 0x80000000 0 0x20000000 |
292 | 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000 | 126 | 0x02000000 0 0xa0000000 0xa0000000 0 0x10000000 |
293 | 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>; | 127 | 0x01000000 0 0x00000000 0xb0000000 0 0x01000000>; |
294 | }; | 128 | }; |
295 | 129 | ||
296 | localbus { | 130 | localbus { |
297 | compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus"; | ||
298 | |||
299 | #address-cells = <2>; | ||
300 | #size-cells = <1>; | ||
301 | |||
302 | ranges = <0 0 0xfe000000 0x02000000 | 131 | ranges = <0 0 0xfe000000 0x02000000 |
303 | 1 0 0xfc000000 0x02000000 | 132 | 1 0 0xfc000000 0x02000000 |
304 | 2 0 0xfbe00000 0x00200000 | 133 | 2 0 0xfbe00000 0x00200000 |
@@ -351,40 +180,39 @@ | |||
351 | bank-width = <2>; | 180 | bank-width = <2>; |
352 | }; | 181 | }; |
353 | 182 | ||
354 | /* | 183 | /* |
355 | * example snippets for FPGA | 184 | * example snippets for FPGA |
356 | * | 185 | * |
357 | * fpga@3,0 { | 186 | * fpga@3,0 { |
358 | * compatible = "fpga_driver"; | 187 | * compatible = "fpga_driver"; |
359 | * reg = <3 0 0x02000000>; | 188 | * reg = <3 0 0x02000000>; |
360 | * bank-width = <4>; | 189 | * bank-width = <4>; |
361 | * }; | 190 | * }; |
362 | * | 191 | * |
363 | * fpga@4,0 { | 192 | * fpga@4,0 { |
364 | * compatible = "fpga_driver"; | 193 | * compatible = "fpga_driver"; |
365 | * reg = <4 0 0x02000000>; | 194 | * reg = <4 0 0x02000000>; |
366 | * bank-width = <4>; | 195 | * bank-width = <4>; |
367 | * }; | 196 | * }; |
368 | */ | 197 | */ |
369 | 198 | ||
370 | /* | 199 | /* |
371 | * example snippets for free chipselects | 200 | * example snippets for free chipselects |
372 | * | 201 | * |
373 | * device@5,0 { | 202 | * device@5,0 { |
374 | * compatible = "custom_driver"; | 203 | * compatible = "custom_driver"; |
375 | * reg = <5 0 0x02000000>; | 204 | * reg = <5 0 0x02000000>; |
376 | * }; | 205 | * }; |
377 | * | 206 | * |
378 | * device@6,0 { | 207 | * device@6,0 { |
379 | * compatible = "custom_driver"; | 208 | * compatible = "custom_driver"; |
380 | * reg = <6 0 0x02000000>; | 209 | * reg = <6 0 0x02000000>; |
381 | * }; | 210 | * }; |
382 | * | 211 | * |
383 | * device@7,0 { | 212 | * device@7,0 { |
384 | * compatible = "custom_driver"; | 213 | * compatible = "custom_driver"; |
385 | * reg = <7 0 0x02000000>; | 214 | * reg = <7 0 0x02000000>; |
386 | * }; | 215 | * }; |
387 | */ | 216 | */ |
388 | }; | 217 | }; |
389 | }; | 218 | }; |
390 | |||
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts index 81636c01d906..d86a3a498118 100644 --- a/arch/powerpc/boot/dts/redwood.dts +++ b/arch/powerpc/boot/dts/redwood.dts | |||
@@ -358,8 +358,28 @@ | |||
358 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; | 358 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; |
359 | }; | 359 | }; |
360 | 360 | ||
361 | MSI: ppc4xx-msi@400300000 { | ||
362 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
363 | reg = < 0x4 0x00300000 0x100 | ||
364 | 0x4 0x00300000 0x100>; | ||
365 | sdr-base = <0x3B0>; | ||
366 | msi-data = <0x00000000>; | ||
367 | msi-mask = <0x44440000>; | ||
368 | interrupt-count = <3>; | ||
369 | interrupts =<0 1 2 3>; | ||
370 | interrupt-parent = <&UIC0>; | ||
371 | #interrupt-cells = <1>; | ||
372 | #address-cells = <0>; | ||
373 | #size-cells = <0>; | ||
374 | interrupt-map = <0 &UIC0 0xC 1 | ||
375 | 1 &UIC0 0x0D 1 | ||
376 | 2 &UIC0 0x0E 1 | ||
377 | 3 &UIC0 0x0F 1>; | ||
378 | }; | ||
379 | |||
361 | }; | 380 | }; |
362 | 381 | ||
382 | |||
363 | chosen { | 383 | chosen { |
364 | linux,stdout-path = "/plb/opb/serial@ef600200"; | 384 | linux,stdout-path = "/plb/opb/serial@ef600200"; |
365 | }; | 385 | }; |
diff --git a/arch/powerpc/boot/dts/uc101.dts b/arch/powerpc/boot/dts/uc101.dts index 019264c62904..ba83d5488ec6 100644 --- a/arch/powerpc/boot/dts/uc101.dts +++ b/arch/powerpc/boot/dts/uc101.dts | |||
@@ -11,79 +11,24 @@ | |||
11 | * option) any later version. | 11 | * option) any later version. |
12 | */ | 12 | */ |
13 | 13 | ||
14 | /dts-v1/; | 14 | /include/ "mpc5200b.dtsi" |
15 | 15 | ||
16 | / { | 16 | / { |
17 | model = "manroland,uc101"; | 17 | model = "manroland,uc101"; |
18 | compatible = "manroland,uc101"; | 18 | compatible = "manroland,uc101"; |
19 | #address-cells = <1>; | ||
20 | #size-cells = <1>; | ||
21 | interrupt-parent = <&mpc5200_pic>; | ||
22 | |||
23 | cpus { | ||
24 | #address-cells = <1>; | ||
25 | #size-cells = <0>; | ||
26 | |||
27 | PowerPC,5200@0 { | ||
28 | device_type = "cpu"; | ||
29 | reg = <0>; | ||
30 | d-cache-line-size = <32>; | ||
31 | i-cache-line-size = <32>; | ||
32 | d-cache-size = <0x4000>; // L1, 16K | ||
33 | i-cache-size = <0x4000>; // L1, 16K | ||
34 | timebase-frequency = <0>; // from bootloader | ||
35 | bus-frequency = <0>; // from bootloader | ||
36 | clock-frequency = <0>; // from bootloader | ||
37 | }; | ||
38 | }; | ||
39 | |||
40 | memory { | ||
41 | device_type = "memory"; | ||
42 | reg = <0x00000000 0x04000000>; // 64MB | ||
43 | }; | ||
44 | 19 | ||
45 | soc5200@f0000000 { | 20 | soc5200@f0000000 { |
46 | #address-cells = <1>; | ||
47 | #size-cells = <1>; | ||
48 | compatible = "fsl,mpc5200b-immr"; | ||
49 | ranges = <0 0xf0000000 0x0000c000>; | ||
50 | reg = <0xf0000000 0x00000100>; | ||
51 | bus-frequency = <0>; // from bootloader | ||
52 | system-frequency = <0>; // from bootloader | ||
53 | |||
54 | cdm@200 { | ||
55 | compatible = "fsl,mpc5200b-cdm","fsl,mpc5200-cdm"; | ||
56 | reg = <0x200 0x38>; | ||
57 | }; | ||
58 | |||
59 | mpc5200_pic: interrupt-controller@500 { | ||
60 | // 5200 interrupts are encoded into two levels; | ||
61 | interrupt-controller; | ||
62 | #interrupt-cells = <3>; | ||
63 | compatible = "fsl,mpc5200b-pic","fsl,mpc5200-pic"; | ||
64 | reg = <0x500 0x80>; | ||
65 | }; | ||
66 | |||
67 | gpt0: timer@600 { // General Purpose Timer in GPIO mode | 21 | gpt0: timer@600 { // General Purpose Timer in GPIO mode |
68 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
69 | reg = <0x600 0x10>; | ||
70 | interrupts = <1 9 0>; | ||
71 | gpio-controller; | 22 | gpio-controller; |
72 | #gpio-cells = <2>; | 23 | #gpio-cells = <2>; |
73 | }; | 24 | }; |
74 | 25 | ||
75 | gpt1: timer@610 { // General Purpose Timer in GPIO mode | 26 | gpt1: timer@610 { // General Purpose Timer in GPIO mode |
76 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
77 | reg = <0x610 0x10>; | ||
78 | interrupts = <1 10 0>; | ||
79 | gpio-controller; | 27 | gpio-controller; |
80 | #gpio-cells = <2>; | 28 | #gpio-cells = <2>; |
81 | }; | 29 | }; |
82 | 30 | ||
83 | gpt2: timer@620 { // General Purpose Timer in GPIO mode | 31 | gpt2: timer@620 { // General Purpose Timer in GPIO mode |
84 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
85 | reg = <0x620 0x10>; | ||
86 | interrupts = <1 11 0>; | ||
87 | gpio-controller; | 32 | gpio-controller; |
88 | #gpio-cells = <2>; | 33 | #gpio-cells = <2>; |
89 | }; | 34 | }; |
@@ -97,118 +42,85 @@ | |||
97 | }; | 42 | }; |
98 | 43 | ||
99 | gpt4: timer@640 { // General Purpose Timer in GPIO mode | 44 | gpt4: timer@640 { // General Purpose Timer in GPIO mode |
100 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
101 | reg = <0x640 0x10>; | ||
102 | interrupts = <1 13 0>; | ||
103 | gpio-controller; | 45 | gpio-controller; |
104 | #gpio-cells = <2>; | 46 | #gpio-cells = <2>; |
105 | }; | 47 | }; |
106 | 48 | ||
107 | gpt5: timer@650 { // General Purpose Timer in GPIO mode | 49 | gpt5: timer@650 { // General Purpose Timer in GPIO mode |
108 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
109 | reg = <0x650 0x10>; | ||
110 | interrupts = <1 14 0>; | ||
111 | gpio-controller; | 50 | gpio-controller; |
112 | #gpio-cells = <2>; | 51 | #gpio-cells = <2>; |
113 | }; | 52 | }; |
114 | 53 | ||
115 | gpt6: timer@660 { // General Purpose Timer in GPIO mode | 54 | gpt6: timer@660 { // General Purpose Timer in GPIO mode |
116 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
117 | reg = <0x660 0x10>; | ||
118 | interrupts = <1 15 0>; | ||
119 | gpio-controller; | 55 | gpio-controller; |
120 | #gpio-cells = <2>; | 56 | #gpio-cells = <2>; |
121 | }; | 57 | }; |
122 | 58 | ||
123 | gpt7: timer@670 { // General Purpose Timer in GPIO mode | 59 | gpt7: timer@670 { // General Purpose Timer in GPIO mode |
124 | compatible = "fsl,mpc5200b-gpt","fsl,mpc5200-gpt"; | ||
125 | reg = <0x670 0x10>; | ||
126 | interrupts = <1 16 0>; | ||
127 | gpio-controller; | 60 | gpio-controller; |
128 | #gpio-cells = <2>; | 61 | #gpio-cells = <2>; |
129 | }; | 62 | }; |
130 | 63 | ||
131 | gpio_simple: gpio@b00 { | 64 | rtc@800 { |
132 | compatible = "fsl,mpc5200b-gpio","fsl,mpc5200-gpio"; | 65 | status = "disabled"; |
133 | reg = <0xb00 0x40>; | ||
134 | interrupts = <1 7 0>; | ||
135 | gpio-controller; | ||
136 | #gpio-cells = <2>; | ||
137 | }; | 66 | }; |
138 | 67 | ||
139 | gpio_wkup: gpio@c00 { | 68 | can@900 { |
140 | compatible = "fsl,mpc5200b-gpio-wkup","fsl,mpc5200-gpio-wkup"; | 69 | status = "disabled"; |
141 | reg = <0xc00 0x40>; | 70 | }; |
142 | interrupts = <1 8 0 0 3 0>; | 71 | |
143 | gpio-controller; | 72 | can@980 { |
144 | #gpio-cells = <2>; | 73 | status = "disabled"; |
145 | }; | 74 | }; |
146 | 75 | ||
147 | dma-controller@1200 { | 76 | spi@f00 { |
148 | compatible = "fsl,mpc5200b-bestcomm","fsl,mpc5200-bestcomm"; | 77 | status = "disabled"; |
149 | reg = <0x1200 0x80>; | ||
150 | interrupts = <3 0 0 3 1 0 3 2 0 3 3 0 | ||
151 | 3 4 0 3 5 0 3 6 0 3 7 0 | ||
152 | 3 8 0 3 9 0 3 10 0 3 11 0 | ||
153 | 3 12 0 3 13 0 3 14 0 3 15 0>; | ||
154 | }; | 78 | }; |
155 | 79 | ||
156 | xlb@1f00 { | 80 | usb@1000 { |
157 | compatible = "fsl,mpc5200b-xlb","fsl,mpc5200-xlb"; | 81 | status = "disabled"; |
158 | reg = <0x1f00 0x100>; | ||
159 | }; | 82 | }; |
160 | 83 | ||
161 | serial@2000 { /* PSC1 in UART mode */ | 84 | psc@2000 { // PSC1 |
162 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 85 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
163 | reg = <0x2000 0x100>; | ||
164 | interrupts = <2 1 0>; | ||
165 | }; | 86 | }; |
166 | 87 | ||
167 | serial@2200 { /* PSC2 in UART mode */ | 88 | psc@2200 { // PSC2 |
168 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 89 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
169 | reg = <0x2200 0x100>; | ||
170 | interrupts = <2 2 0>; | ||
171 | }; | 90 | }; |
172 | 91 | ||
173 | serial@2c00 { /* PSC6 in UART mode */ | 92 | psc@2400 { // PSC3 |
93 | status = "disabled"; | ||
94 | }; | ||
95 | |||
96 | psc@2600 { // PSC4 | ||
97 | status = "disabled"; | ||
98 | }; | ||
99 | |||
100 | psc@2800 { // PSC5 | ||
101 | status = "disabled"; | ||
102 | }; | ||
103 | |||
104 | psc@2c00 { // PSC6 | ||
174 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; | 105 | compatible = "fsl,mpc5200b-psc-uart","fsl,mpc5200-psc-uart"; |
175 | reg = <0x2c00 0x100>; | ||
176 | interrupts = <2 4 0>; | ||
177 | }; | 106 | }; |
178 | 107 | ||
179 | ethernet@3000 { | 108 | ethernet@3000 { |
180 | compatible = "fsl,mpc5200b-fec","fsl,mpc5200-fec"; | ||
181 | reg = <0x3000 0x400>; | ||
182 | local-mac-address = [ 00 00 00 00 00 00 ]; | ||
183 | interrupts = <2 5 0>; | ||
184 | phy-handle = <&phy0>; | 109 | phy-handle = <&phy0>; |
185 | }; | 110 | }; |
186 | 111 | ||
187 | mdio@3000 { | 112 | mdio@3000 { |
188 | #address-cells = <1>; | ||
189 | #size-cells = <0>; | ||
190 | compatible = "fsl,mpc5200b-mdio","fsl,mpc5200-mdio"; | ||
191 | reg = <0x3000 0x400>; // fec range, since we need to setup fec interrupts | ||
192 | interrupts = <2 5 0>; // these are for "mii command finished", not link changes & co. | ||
193 | |||
194 | phy0: ethernet-phy@0 { | 113 | phy0: ethernet-phy@0 { |
195 | compatible = "intel,lxt971"; | 114 | compatible = "intel,lxt971"; |
196 | reg = <0>; | 115 | reg = <0>; |
197 | }; | 116 | }; |
198 | }; | 117 | }; |
199 | 118 | ||
200 | ata@3a00 { | 119 | i2c@3d00 { |
201 | compatible = "fsl,mpc5200b-ata","fsl,mpc5200-ata"; | 120 | status = "disabled"; |
202 | reg = <0x3a00 0x100>; | ||
203 | interrupts = <2 7 0>; | ||
204 | }; | 121 | }; |
205 | 122 | ||
206 | i2c@3d40 { | 123 | i2c@3d40 { |
207 | #address-cells = <1>; | ||
208 | #size-cells = <0>; | ||
209 | compatible = "fsl,mpc5200b-i2c","fsl,mpc5200-i2c","fsl-i2c"; | ||
210 | reg = <0x3d40 0x40>; | ||
211 | interrupts = <2 16 0>; | ||
212 | fsl,preserve-clocking; | 124 | fsl,preserve-clocking; |
213 | clock-frequency = <400000>; | 125 | clock-frequency = <400000>; |
214 | 126 | ||
@@ -221,19 +133,13 @@ | |||
221 | reg = <0x51>; | 133 | reg = <0x51>; |
222 | }; | 134 | }; |
223 | }; | 135 | }; |
136 | }; | ||
224 | 137 | ||
225 | sram@8000 { | 138 | pci@f0000d00 { |
226 | compatible = "fsl,mpc5200b-sram","fsl,mpc5200-sram"; | 139 | status = "disabled"; |
227 | reg = <0x8000 0x4000>; | ||
228 | }; | ||
229 | }; | 140 | }; |
230 | 141 | ||
231 | localbus { | 142 | localbus { |
232 | compatible = "fsl,mpc5200b-lpb","fsl,mpc5200-lpb","simple-bus"; | ||
233 | |||
234 | #address-cells = <2>; | ||
235 | #size-cells = <1>; | ||
236 | |||
237 | ranges = <0 0 0xff800000 0x00800000 | 143 | ranges = <0 0 0xff800000 0x00800000 |
238 | 1 0 0x80000000 0x00800000 | 144 | 1 0 0x80000000 0x00800000 |
239 | 3 0 0x80000000 0x00800000>; | 145 | 3 0 0x80000000 0x00800000>; |
diff --git a/arch/powerpc/boot/epapr.c b/arch/powerpc/boot/epapr.c new file mode 100644 index 000000000000..06c1961bd124 --- /dev/null +++ b/arch/powerpc/boot/epapr.c | |||
@@ -0,0 +1,66 @@ | |||
1 | /* | ||
2 | * Bootwrapper for ePAPR compliant firmwares | ||
3 | * | ||
4 | * Copyright 2010 David Gibson <david@gibson.dropbear.id.au>, IBM Corporation. | ||
5 | * | ||
6 | * Based on earlier bootwrappers by: | ||
7 | * (c) Benjamin Herrenschmidt <benh@kernel.crashing.org>, IBM Corp,\ | ||
8 | * and | ||
9 | * Scott Wood <scottwood@freescale.com> | ||
10 | * Copyright (c) 2007 Freescale Semiconductor, Inc. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify it | ||
13 | * under the terms of the GNU General Public License version 2 as published | ||
14 | * by the Free Software Foundation. | ||
15 | */ | ||
16 | |||
17 | #include "ops.h" | ||
18 | #include "stdio.h" | ||
19 | #include "io.h" | ||
20 | #include <libfdt.h> | ||
21 | |||
22 | BSS_STACK(4096); | ||
23 | |||
24 | #define EPAPR_SMAGIC 0x65504150 | ||
25 | #define EPAPR_EMAGIC 0x45504150 | ||
26 | |||
27 | static unsigned epapr_magic; | ||
28 | static unsigned long ima_size; | ||
29 | static unsigned long fdt_addr; | ||
30 | |||
31 | static void platform_fixups(void) | ||
32 | { | ||
33 | if ((epapr_magic != EPAPR_EMAGIC) | ||
34 | && (epapr_magic != EPAPR_SMAGIC)) | ||
35 | fatal("r6 contained 0x%08x instead of ePAPR magic number\n", | ||
36 | epapr_magic); | ||
37 | |||
38 | if (ima_size < (unsigned long)_end) | ||
39 | printf("WARNING: Image loaded outside IMA!" | ||
40 | " (_end=%p, ima_size=0x%lx)\n", _end, ima_size); | ||
41 | if (ima_size < fdt_addr) | ||
42 | printf("WARNING: Device tree address is outside IMA!" | ||
43 | "(fdt_addr=0x%lx, ima_size=0x%lx)\n", fdt_addr, | ||
44 | ima_size); | ||
45 | if (ima_size < fdt_addr + fdt_totalsize((void *)fdt_addr)) | ||
46 | printf("WARNING: Device tree extends outside IMA!" | ||
47 | " (fdt_addr=0x%lx, size=0x%x, ima_size=0x%lx\n", | ||
48 | fdt_addr, fdt_totalsize((void *)fdt_addr), ima_size); | ||
49 | } | ||
50 | |||
51 | void platform_init(unsigned long r3, unsigned long r4, unsigned long r5, | ||
52 | unsigned long r6, unsigned long r7) | ||
53 | { | ||
54 | epapr_magic = r6; | ||
55 | ima_size = r7; | ||
56 | fdt_addr = r3; | ||
57 | |||
58 | /* FIXME: we should process reserve entries */ | ||
59 | |||
60 | simple_alloc_init(_end, ima_size - (unsigned long)_end, 32, 64); | ||
61 | |||
62 | fdt_init((void *)fdt_addr); | ||
63 | |||
64 | serial_console_init(); | ||
65 | platform_ops.fixups = platform_fixups; | ||
66 | } | ||
diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper index cb97e7511d7e..c74531af72c0 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper | |||
@@ -39,6 +39,7 @@ dts= | |||
39 | cacheit= | 39 | cacheit= |
40 | binary= | 40 | binary= |
41 | gzip=.gz | 41 | gzip=.gz |
42 | pie= | ||
42 | 43 | ||
43 | # cross-compilation prefix | 44 | # cross-compilation prefix |
44 | CROSS= | 45 | CROSS= |
@@ -157,9 +158,10 @@ pmac|chrp) | |||
157 | platformo=$object/of.o | 158 | platformo=$object/of.o |
158 | ;; | 159 | ;; |
159 | coff) | 160 | coff) |
160 | platformo=$object/of.o | 161 | platformo="$object/crt0.o $object/of.o" |
161 | lds=$object/zImage.coff.lds | 162 | lds=$object/zImage.coff.lds |
162 | link_address='0x500000' | 163 | link_address='0x500000' |
164 | pie= | ||
163 | ;; | 165 | ;; |
164 | miboot|uboot) | 166 | miboot|uboot) |
165 | # miboot and U-boot want just the bare bits, not an ELF binary | 167 | # miboot and U-boot want just the bare bits, not an ELF binary |
@@ -208,6 +210,7 @@ ps3) | |||
208 | ksection=.kernel:vmlinux.bin | 210 | ksection=.kernel:vmlinux.bin |
209 | isection=.kernel:initrd | 211 | isection=.kernel:initrd |
210 | link_address='' | 212 | link_address='' |
213 | pie= | ||
211 | ;; | 214 | ;; |
212 | ep88xc|ep405|ep8248e) | 215 | ep88xc|ep405|ep8248e) |
213 | platformo="$object/fixed-head.o $object/$platform.o" | 216 | platformo="$object/fixed-head.o $object/$platform.o" |
@@ -244,6 +247,10 @@ gamecube|wii) | |||
244 | treeboot-iss4xx-mpic) | 247 | treeboot-iss4xx-mpic) |
245 | platformo="$object/treeboot-iss4xx.o" | 248 | platformo="$object/treeboot-iss4xx.o" |
246 | ;; | 249 | ;; |
250 | epapr) | ||
251 | link_address='0x20000000' | ||
252 | pie=-pie | ||
253 | ;; | ||
247 | esac | 254 | esac |
248 | 255 | ||
249 | vmz="$tmpdir/`basename \"$kernel\"`.$ext" | 256 | vmz="$tmpdir/`basename \"$kernel\"`.$ext" |
@@ -251,7 +258,7 @@ if [ -z "$cacheit" -o ! -f "$vmz$gzip" -o "$vmz$gzip" -ot "$kernel" ]; then | |||
251 | ${CROSS}objcopy $objflags "$kernel" "$vmz.$$" | 258 | ${CROSS}objcopy $objflags "$kernel" "$vmz.$$" |
252 | 259 | ||
253 | if [ -n "$gzip" ]; then | 260 | if [ -n "$gzip" ]; then |
254 | gzip -f -9 "$vmz.$$" | 261 | gzip -n -f -9 "$vmz.$$" |
255 | fi | 262 | fi |
256 | 263 | ||
257 | if [ -n "$cacheit" ]; then | 264 | if [ -n "$cacheit" ]; then |
@@ -310,9 +317,9 @@ fi | |||
310 | 317 | ||
311 | if [ "$platform" != "miboot" ]; then | 318 | if [ "$platform" != "miboot" ]; then |
312 | if [ -n "$link_address" ] ; then | 319 | if [ -n "$link_address" ] ; then |
313 | text_start="-Ttext $link_address --defsym _start=$link_address" | 320 | text_start="-Ttext $link_address" |
314 | fi | 321 | fi |
315 | ${CROSS}ld -m elf32ppc -T $lds $text_start -o "$ofile" \ | 322 | ${CROSS}ld -m elf32ppc -T $lds $text_start $pie -o "$ofile" \ |
316 | $platformo $tmp $object/wrapper.a | 323 | $platformo $tmp $object/wrapper.a |
317 | rm $tmp | 324 | rm $tmp |
318 | fi | 325 | fi |
@@ -336,7 +343,7 @@ coff) | |||
336 | $objbin/hack-coff "$ofile" | 343 | $objbin/hack-coff "$ofile" |
337 | ;; | 344 | ;; |
338 | cuboot*) | 345 | cuboot*) |
339 | gzip -f -9 "$ofile" | 346 | gzip -n -f -9 "$ofile" |
340 | ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \ | 347 | ${MKIMAGE} -A ppc -O linux -T kernel -C gzip -a "$base" -e "$entry" \ |
341 | $uboot_version -d "$ofile".gz "$ofile" | 348 | $uboot_version -d "$ofile".gz "$ofile" |
342 | ;; | 349 | ;; |
@@ -383,6 +390,6 @@ ps3) | |||
383 | 390 | ||
384 | odir="$(dirname "$ofile.bin")" | 391 | odir="$(dirname "$ofile.bin")" |
385 | rm -f "$odir/otheros.bld" | 392 | rm -f "$odir/otheros.bld" |
386 | gzip --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld" | 393 | gzip -n --force -9 --stdout "$ofile.bin" > "$odir/otheros.bld" |
387 | ;; | 394 | ;; |
388 | esac | 395 | esac |
diff --git a/arch/powerpc/boot/zImage.coff.lds.S b/arch/powerpc/boot/zImage.coff.lds.S index 856dc78b14ef..de4c9e3c9344 100644 --- a/arch/powerpc/boot/zImage.coff.lds.S +++ b/arch/powerpc/boot/zImage.coff.lds.S | |||
@@ -3,13 +3,13 @@ ENTRY(_zimage_start_opd) | |||
3 | EXTERN(_zimage_start_opd) | 3 | EXTERN(_zimage_start_opd) |
4 | SECTIONS | 4 | SECTIONS |
5 | { | 5 | { |
6 | _start = .; | ||
7 | .text : | 6 | .text : |
8 | { | 7 | { |
8 | _start = .; | ||
9 | *(.text) | 9 | *(.text) |
10 | *(.fixup) | 10 | *(.fixup) |
11 | _etext = .; | ||
11 | } | 12 | } |
12 | _etext = .; | ||
13 | . = ALIGN(4096); | 13 | . = ALIGN(4096); |
14 | .data : | 14 | .data : |
15 | { | 15 | { |
@@ -17,9 +17,7 @@ SECTIONS | |||
17 | *(.data*) | 17 | *(.data*) |
18 | *(__builtin_*) | 18 | *(__builtin_*) |
19 | *(.sdata*) | 19 | *(.sdata*) |
20 | __got2_start = .; | ||
21 | *(.got2) | 20 | *(.got2) |
22 | __got2_end = .; | ||
23 | 21 | ||
24 | _dtb_start = .; | 22 | _dtb_start = .; |
25 | *(.kernel:dtb) | 23 | *(.kernel:dtb) |
diff --git a/arch/powerpc/boot/zImage.lds.S b/arch/powerpc/boot/zImage.lds.S index 0962d62bdb50..2bd8731f1365 100644 --- a/arch/powerpc/boot/zImage.lds.S +++ b/arch/powerpc/boot/zImage.lds.S | |||
@@ -3,49 +3,64 @@ ENTRY(_zimage_start) | |||
3 | EXTERN(_zimage_start) | 3 | EXTERN(_zimage_start) |
4 | SECTIONS | 4 | SECTIONS |
5 | { | 5 | { |
6 | _start = .; | ||
7 | .text : | 6 | .text : |
8 | { | 7 | { |
8 | _start = .; | ||
9 | *(.text) | 9 | *(.text) |
10 | *(.fixup) | 10 | *(.fixup) |
11 | _etext = .; | ||
11 | } | 12 | } |
12 | _etext = .; | ||
13 | . = ALIGN(4096); | 13 | . = ALIGN(4096); |
14 | .data : | 14 | .data : |
15 | { | 15 | { |
16 | *(.rodata*) | 16 | *(.rodata*) |
17 | *(.data*) | 17 | *(.data*) |
18 | *(.sdata*) | 18 | *(.sdata*) |
19 | __got2_start = .; | ||
20 | *(.got2) | 19 | *(.got2) |
21 | __got2_end = .; | ||
22 | } | 20 | } |
21 | .dynsym : { *(.dynsym) } | ||
22 | .dynstr : { *(.dynstr) } | ||
23 | .dynamic : | ||
24 | { | ||
25 | __dynamic_start = .; | ||
26 | *(.dynamic) | ||
27 | } | ||
28 | .hash : { *(.hash) } | ||
29 | .interp : { *(.interp) } | ||
30 | .rela.dyn : { *(.rela*) } | ||
23 | 31 | ||
24 | . = ALIGN(8); | 32 | . = ALIGN(8); |
25 | _dtb_start = .; | 33 | .kernel:dtb : |
26 | .kernel:dtb : { *(.kernel:dtb) } | 34 | { |
27 | _dtb_end = .; | 35 | _dtb_start = .; |
28 | 36 | *(.kernel:dtb) | |
29 | . = ALIGN(4096); | 37 | _dtb_end = .; |
30 | _vmlinux_start = .; | 38 | } |
31 | .kernel:vmlinux.strip : { *(.kernel:vmlinux.strip) } | ||
32 | _vmlinux_end = .; | ||
33 | 39 | ||
34 | . = ALIGN(4096); | 40 | . = ALIGN(4096); |
35 | _initrd_start = .; | 41 | .kernel:vmlinux.strip : |
36 | .kernel:initrd : { *(.kernel:initrd) } | 42 | { |
37 | _initrd_end = .; | 43 | _vmlinux_start = .; |
44 | *(.kernel:vmlinux.strip) | ||
45 | _vmlinux_end = .; | ||
46 | } | ||
38 | 47 | ||
39 | . = ALIGN(4096); | 48 | . = ALIGN(4096); |
40 | _edata = .; | 49 | .kernel:initrd : |
50 | { | ||
51 | _initrd_start = .; | ||
52 | *(.kernel:initrd) | ||
53 | _initrd_end = .; | ||
54 | } | ||
41 | 55 | ||
42 | . = ALIGN(4096); | 56 | . = ALIGN(4096); |
43 | __bss_start = .; | ||
44 | .bss : | 57 | .bss : |
45 | { | 58 | { |
46 | *(.sbss) | 59 | _edata = .; |
47 | *(.bss) | 60 | __bss_start = .; |
61 | *(.sbss) | ||
62 | *(.bss) | ||
63 | *(COMMON) | ||
64 | _end = . ; | ||
48 | } | 65 | } |
49 | . = ALIGN(4096); | ||
50 | _end = . ; | ||
51 | } | 66 | } |
diff --git a/arch/powerpc/configs/40x/acadia_defconfig b/arch/powerpc/configs/40x/acadia_defconfig index 97fedceaa30b..4182c772340b 100644 --- a/arch/powerpc/configs/40x/acadia_defconfig +++ b/arch/powerpc/configs/40x/acadia_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/40x/ep405_defconfig b/arch/powerpc/configs/40x/ep405_defconfig index 33b3c24f4edd..2dbb293163f5 100644 --- a/arch/powerpc/configs/40x/ep405_defconfig +++ b/arch/powerpc/configs/40x/ep405_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/40x/hcu4_defconfig b/arch/powerpc/configs/40x/hcu4_defconfig index 4613079a0ab1..ebeb4accad65 100644 --- a/arch/powerpc/configs/40x/hcu4_defconfig +++ b/arch/powerpc/configs/40x/hcu4_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/40x/kilauea_defconfig b/arch/powerpc/configs/40x/kilauea_defconfig index 4e19ee7ce4ee..532ea9d93a15 100644 --- a/arch/powerpc/configs/40x/kilauea_defconfig +++ b/arch/powerpc/configs/40x/kilauea_defconfig | |||
@@ -5,13 +5,15 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
12 | CONFIG_MODULE_UNLOAD=y | 12 | CONFIG_MODULE_UNLOAD=y |
13 | # CONFIG_BLK_DEV_BSG is not set | 13 | # CONFIG_BLK_DEV_BSG is not set |
14 | CONFIG_KILAUEA=y | 14 | CONFIG_KILAUEA=y |
15 | CONFIG_NO_HZ=y | ||
16 | CONFIG_HIGH_RES_TIMERS=y | ||
15 | # CONFIG_WALNUT is not set | 17 | # CONFIG_WALNUT is not set |
16 | CONFIG_SPARSE_IRQ=y | 18 | CONFIG_SPARSE_IRQ=y |
17 | CONFIG_PCI=y | 19 | CONFIG_PCI=y |
@@ -42,6 +44,9 @@ CONFIG_MTD_PHYSMAP_OF=y | |||
42 | CONFIG_MTD_NAND=y | 44 | CONFIG_MTD_NAND=y |
43 | CONFIG_MTD_NAND_NDFC=y | 45 | CONFIG_MTD_NAND_NDFC=y |
44 | CONFIG_PROC_DEVICETREE=y | 46 | CONFIG_PROC_DEVICETREE=y |
47 | CONFIG_PM=y | ||
48 | CONFIG_SUSPEND=y | ||
49 | CONFIG_PPC4xx_CPM=y | ||
45 | CONFIG_BLK_DEV_RAM=y | 50 | CONFIG_BLK_DEV_RAM=y |
46 | CONFIG_BLK_DEV_RAM_SIZE=35000 | 51 | CONFIG_BLK_DEV_RAM_SIZE=35000 |
47 | # CONFIG_MISC_DEVICES is not set | 52 | # CONFIG_MISC_DEVICES is not set |
diff --git a/arch/powerpc/configs/40x/makalu_defconfig b/arch/powerpc/configs/40x/makalu_defconfig index 651be09136fa..3c142ac1b344 100644 --- a/arch/powerpc/configs/40x/makalu_defconfig +++ b/arch/powerpc/configs/40x/makalu_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/40x/walnut_defconfig b/arch/powerpc/configs/40x/walnut_defconfig index ded455e18339..ff57d4828ffc 100644 --- a/arch/powerpc/configs/40x/walnut_defconfig +++ b/arch/powerpc/configs/40x/walnut_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/44x/arches_defconfig b/arch/powerpc/configs/44x/arches_defconfig index 63746a041d6b..3ed16d5c909d 100644 --- a/arch/powerpc/configs/44x/arches_defconfig +++ b/arch/powerpc/configs/44x/arches_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/bamboo_defconfig b/arch/powerpc/configs/44x/bamboo_defconfig index f5f2a4e3e21b..b1b7d2c5c059 100644 --- a/arch/powerpc/configs/44x/bamboo_defconfig +++ b/arch/powerpc/configs/44x/bamboo_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/bluestone_defconfig b/arch/powerpc/configs/44x/bluestone_defconfig new file mode 100644 index 000000000000..30a0a8e08fdd --- /dev/null +++ b/arch/powerpc/configs/44x/bluestone_defconfig | |||
@@ -0,0 +1,68 @@ | |||
1 | CONFIG_44x=y | ||
2 | CONFIG_EXPERIMENTAL=y | ||
3 | CONFIG_SYSVIPC=y | ||
4 | CONFIG_POSIX_MQUEUE=y | ||
5 | CONFIG_LOG_BUF_SHIFT=14 | ||
6 | CONFIG_BLK_DEV_INITRD=y | ||
7 | CONFIG_EXPERT=y | ||
8 | # CONFIG_VM_EVENT_COUNTERS is not set | ||
9 | # CONFIG_PCI_QUIRKS is not set | ||
10 | # CONFIG_COMPAT_BRK is not set | ||
11 | CONFIG_BLUESTONE=y | ||
12 | # CONFIG_EBONY is not set | ||
13 | # CONFIG_KVM_GUEST is not set | ||
14 | CONFIG_NO_HZ=y | ||
15 | CONFIG_HIGH_RES_TIMERS=y | ||
16 | CONFIG_SPARSE_IRQ=y | ||
17 | CONFIG_CMDLINE_BOOL=y | ||
18 | CONFIG_CMDLINE="" | ||
19 | CONFIG_NET=y | ||
20 | CONFIG_PACKET=y | ||
21 | CONFIG_UNIX=y | ||
22 | CONFIG_INET=y | ||
23 | CONFIG_IP_PNP=y | ||
24 | CONFIG_IP_PNP_DHCP=y | ||
25 | CONFIG_IP_PNP_BOOTP=y | ||
26 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
27 | CONFIG_CONNECTOR=y | ||
28 | CONFIG_MTD=y | ||
29 | CONFIG_MTD_PARTITIONS=y | ||
30 | CONFIG_MTD_CMDLINE_PARTS=y | ||
31 | CONFIG_MTD_OF_PARTS=y | ||
32 | CONFIG_MTD_CHAR=y | ||
33 | CONFIG_MTD_BLOCK=y | ||
34 | CONFIG_MTD_CFI=y | ||
35 | CONFIG_MTD_CFI_AMDSTD=y | ||
36 | CONFIG_MTD_PHYSMAP_OF=y | ||
37 | CONFIG_PROC_DEVICETREE=y | ||
38 | CONFIG_BLK_DEV_RAM=y | ||
39 | CONFIG_BLK_DEV_RAM_SIZE=35000 | ||
40 | CONFIG_NETDEVICES=y | ||
41 | CONFIG_NET_ETHERNET=y | ||
42 | CONFIG_IBM_NEW_EMAC=y | ||
43 | CONFIG_IBM_NEW_EMAC_RXB=256 | ||
44 | CONFIG_IBM_NEW_EMAC_TXB=256 | ||
45 | CONFIG_SERIAL_8250=y | ||
46 | CONFIG_SERIAL_8250_CONSOLE=y | ||
47 | CONFIG_SERIAL_8250_NR_UARTS=2 | ||
48 | CONFIG_SERIAL_8250_RUNTIME_UARTS=2 | ||
49 | CONFIG_SERIAL_8250_EXTENDED=y | ||
50 | CONFIG_SERIAL_8250_SHARE_IRQ=y | ||
51 | CONFIG_SERIAL_OF_PLATFORM=y | ||
52 | CONFIG_I2C=y | ||
53 | CONFIG_I2C_CHARDEV=y | ||
54 | CONFIG_I2C_IBM_IIC=y | ||
55 | CONFIG_SENSORS_AD7414=y | ||
56 | # CONFIG_HID_SUPPORT is not set | ||
57 | # CONFIG_USB_SUPPORT is not set | ||
58 | CONFIG_RTC_CLASS=y | ||
59 | CONFIG_RTC_DRV_M41T80=y | ||
60 | CONFIG_EXT2_FS=y | ||
61 | CONFIG_EXT3_FS=y | ||
62 | CONFIG_PROC_KCORE=y | ||
63 | CONFIG_TMPFS=y | ||
64 | CONFIG_CRAMFS=y | ||
65 | CONFIG_NFS_FS=y | ||
66 | CONFIG_NFS_V3=y | ||
67 | CONFIG_ROOT_NFS=y | ||
68 | CONFIG_NLS=y | ||
diff --git a/arch/powerpc/configs/44x/canyonlands_defconfig b/arch/powerpc/configs/44x/canyonlands_defconfig index 45c64d818b2a..a46942aac695 100644 --- a/arch/powerpc/configs/44x/canyonlands_defconfig +++ b/arch/powerpc/configs/44x/canyonlands_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
@@ -42,6 +42,9 @@ CONFIG_MTD_PHYSMAP_OF=y | |||
42 | CONFIG_MTD_NAND=y | 42 | CONFIG_MTD_NAND=y |
43 | CONFIG_MTD_NAND_NDFC=y | 43 | CONFIG_MTD_NAND_NDFC=y |
44 | CONFIG_PROC_DEVICETREE=y | 44 | CONFIG_PROC_DEVICETREE=y |
45 | CONFIG_PM=y | ||
46 | CONFIG_SUSPEND=y | ||
47 | CONFIG_PPC4xx_CPM=y | ||
45 | CONFIG_BLK_DEV_RAM=y | 48 | CONFIG_BLK_DEV_RAM=y |
46 | CONFIG_BLK_DEV_RAM_SIZE=35000 | 49 | CONFIG_BLK_DEV_RAM_SIZE=35000 |
47 | # CONFIG_MISC_DEVICES is not set | 50 | # CONFIG_MISC_DEVICES is not set |
diff --git a/arch/powerpc/configs/44x/ebony_defconfig b/arch/powerpc/configs/44x/ebony_defconfig index fedd03fdf5d5..07d77e51f1ba 100644 --- a/arch/powerpc/configs/44x/ebony_defconfig +++ b/arch/powerpc/configs/44x/ebony_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/44x/eiger_defconfig b/arch/powerpc/configs/44x/eiger_defconfig index ebff7011282e..2ce7e9aff09e 100644 --- a/arch/powerpc/configs/44x/eiger_defconfig +++ b/arch/powerpc/configs/44x/eiger_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/icon_defconfig b/arch/powerpc/configs/44x/icon_defconfig index 865e93fb41fd..18730ff9de7c 100644 --- a/arch/powerpc/configs/44x/icon_defconfig +++ b/arch/powerpc/configs/44x/icon_defconfig | |||
@@ -6,7 +6,7 @@ CONFIG_LOG_BUF_SHIFT=14 | |||
6 | CONFIG_SYSFS_DEPRECATED_V2=y | 6 | CONFIG_SYSFS_DEPRECATED_V2=y |
7 | CONFIG_BLK_DEV_INITRD=y | 7 | CONFIG_BLK_DEV_INITRD=y |
8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
9 | CONFIG_EMBEDDED=y | 9 | CONFIG_EXPERT=y |
10 | CONFIG_MODULES=y | 10 | CONFIG_MODULES=y |
11 | CONFIG_MODULE_UNLOAD=y | 11 | CONFIG_MODULE_UNLOAD=y |
12 | # CONFIG_BLK_DEV_BSG is not set | 12 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/iss476-smp_defconfig b/arch/powerpc/configs/44x/iss476-smp_defconfig index 8ece4c774415..92f863ac8443 100644 --- a/arch/powerpc/configs/44x/iss476-smp_defconfig +++ b/arch/powerpc/configs/44x/iss476-smp_defconfig | |||
@@ -7,7 +7,7 @@ CONFIG_LOG_BUF_SHIFT=14 | |||
7 | CONFIG_SYSFS_DEPRECATED_V2=y | 7 | CONFIG_SYSFS_DEPRECATED_V2=y |
8 | CONFIG_BLK_DEV_INITRD=y | 8 | CONFIG_BLK_DEV_INITRD=y |
9 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 9 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
10 | CONFIG_EMBEDDED=y | 10 | CONFIG_EXPERT=y |
11 | CONFIG_KALLSYMS_ALL=y | 11 | CONFIG_KALLSYMS_ALL=y |
12 | CONFIG_KALLSYMS_EXTRA_PASS=y | 12 | CONFIG_KALLSYMS_EXTRA_PASS=y |
13 | CONFIG_PROFILING=y | 13 | CONFIG_PROFILING=y |
diff --git a/arch/powerpc/configs/44x/katmai_defconfig b/arch/powerpc/configs/44x/katmai_defconfig index 4ca9b4873c51..34c09144a699 100644 --- a/arch/powerpc/configs/44x/katmai_defconfig +++ b/arch/powerpc/configs/44x/katmai_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/rainier_defconfig b/arch/powerpc/configs/44x/rainier_defconfig index e3b65d24207e..21c33faf61a2 100644 --- a/arch/powerpc/configs/44x/rainier_defconfig +++ b/arch/powerpc/configs/44x/rainier_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/redwood_defconfig b/arch/powerpc/configs/44x/redwood_defconfig index 64cd0f3421a9..01cc2b1a7f9a 100644 --- a/arch/powerpc/configs/44x/redwood_defconfig +++ b/arch/powerpc/configs/44x/redwood_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/sam440ep_defconfig b/arch/powerpc/configs/44x/sam440ep_defconfig index 01d03367917e..dfcffede16ad 100644 --- a/arch/powerpc/configs/44x/sam440ep_defconfig +++ b/arch/powerpc/configs/44x/sam440ep_defconfig | |||
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG=y | |||
6 | CONFIG_LOG_BUF_SHIFT=14 | 6 | CONFIG_LOG_BUF_SHIFT=14 |
7 | CONFIG_BLK_DEV_INITRD=y | 7 | CONFIG_BLK_DEV_INITRD=y |
8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
9 | CONFIG_EMBEDDED=y | 9 | CONFIG_EXPERT=y |
10 | CONFIG_MODULES=y | 10 | CONFIG_MODULES=y |
11 | CONFIG_MODULE_UNLOAD=y | 11 | CONFIG_MODULE_UNLOAD=y |
12 | # CONFIG_BLK_DEV_BSG is not set | 12 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/sequoia_defconfig b/arch/powerpc/configs/44x/sequoia_defconfig index 89b2f9626137..47e399f2892f 100644 --- a/arch/powerpc/configs/44x/sequoia_defconfig +++ b/arch/powerpc/configs/44x/sequoia_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/taishan_defconfig b/arch/powerpc/configs/44x/taishan_defconfig index e3386cf6f5b7..a6a002ed5681 100644 --- a/arch/powerpc/configs/44x/taishan_defconfig +++ b/arch/powerpc/configs/44x/taishan_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
11 | # CONFIG_BLK_DEV_BSG is not set | 11 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/44x/warp_defconfig b/arch/powerpc/configs/44x/warp_defconfig index 9c13b9dffafa..abf74dc1f79c 100644 --- a/arch/powerpc/configs/44x/warp_defconfig +++ b/arch/powerpc/configs/44x/warp_defconfig | |||
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG_PROC=y | |||
8 | CONFIG_LOG_BUF_SHIFT=14 | 8 | CONFIG_LOG_BUF_SHIFT=14 |
9 | CONFIG_BLK_DEV_INITRD=y | 9 | CONFIG_BLK_DEV_INITRD=y |
10 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 10 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
11 | CONFIG_EMBEDDED=y | 11 | CONFIG_EXPERT=y |
12 | CONFIG_MODULES=y | 12 | CONFIG_MODULES=y |
13 | CONFIG_MODULE_UNLOAD=y | 13 | CONFIG_MODULE_UNLOAD=y |
14 | # CONFIG_BLK_DEV_BSG is not set | 14 | # CONFIG_BLK_DEV_BSG is not set |
@@ -47,6 +47,7 @@ CONFIG_MTD_NAND_NDFC=y | |||
47 | CONFIG_MTD_UBI=y | 47 | CONFIG_MTD_UBI=y |
48 | CONFIG_PROC_DEVICETREE=y | 48 | CONFIG_PROC_DEVICETREE=y |
49 | CONFIG_BLK_DEV_RAM=y | 49 | CONFIG_BLK_DEV_RAM=y |
50 | CONFIG_MISC_DEVICES=y | ||
50 | CONFIG_EEPROM_AT24=y | 51 | CONFIG_EEPROM_AT24=y |
51 | CONFIG_SCSI=y | 52 | CONFIG_SCSI=y |
52 | CONFIG_BLK_DEV_SD=y | 53 | CONFIG_BLK_DEV_SD=y |
diff --git a/arch/powerpc/configs/52xx/cm5200_defconfig b/arch/powerpc/configs/52xx/cm5200_defconfig index f234c4d0b15c..69b57daf402e 100644 --- a/arch/powerpc/configs/52xx/cm5200_defconfig +++ b/arch/powerpc/configs/52xx/cm5200_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_SYSCTL_SYSCALL is not set | 7 | # CONFIG_SYSCTL_SYSCALL is not set |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_EPOLL is not set | 9 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/52xx/lite5200b_defconfig b/arch/powerpc/configs/52xx/lite5200b_defconfig index a4a795c80740..f3638ae0a627 100644 --- a/arch/powerpc/configs/52xx/lite5200b_defconfig +++ b/arch/powerpc/configs/52xx/lite5200b_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_SYSCTL_SYSCALL is not set | 7 | # CONFIG_SYSCTL_SYSCALL is not set |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_EPOLL is not set | 9 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/52xx/motionpro_defconfig b/arch/powerpc/configs/52xx/motionpro_defconfig index 20d53a1aa7e4..0c7de9620ea6 100644 --- a/arch/powerpc/configs/52xx/motionpro_defconfig +++ b/arch/powerpc/configs/52xx/motionpro_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_SYSCTL_SYSCALL is not set | 7 | # CONFIG_SYSCTL_SYSCALL is not set |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_EPOLL is not set | 9 | # CONFIG_EPOLL is not set |
@@ -43,6 +43,7 @@ CONFIG_PROC_DEVICETREE=y | |||
43 | CONFIG_BLK_DEV_LOOP=y | 43 | CONFIG_BLK_DEV_LOOP=y |
44 | CONFIG_BLK_DEV_RAM=y | 44 | CONFIG_BLK_DEV_RAM=y |
45 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 45 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
46 | CONFIG_MISC_DEVICES=y | ||
46 | CONFIG_EEPROM_LEGACY=y | 47 | CONFIG_EEPROM_LEGACY=y |
47 | CONFIG_SCSI_TGT=y | 48 | CONFIG_SCSI_TGT=y |
48 | CONFIG_BLK_DEV_SD=y | 49 | CONFIG_BLK_DEV_SD=y |
diff --git a/arch/powerpc/configs/52xx/pcm030_defconfig b/arch/powerpc/configs/52xx/pcm030_defconfig index 6bd58338bf1a..22e719575c60 100644 --- a/arch/powerpc/configs/52xx/pcm030_defconfig +++ b/arch/powerpc/configs/52xx/pcm030_defconfig | |||
@@ -8,7 +8,7 @@ CONFIG_IKCONFIG=y | |||
8 | CONFIG_IKCONFIG_PROC=y | 8 | CONFIG_IKCONFIG_PROC=y |
9 | CONFIG_LOG_BUF_SHIFT=14 | 9 | CONFIG_LOG_BUF_SHIFT=14 |
10 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 10 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
11 | CONFIG_EMBEDDED=y | 11 | CONFIG_EXPERT=y |
12 | # CONFIG_SYSCTL_SYSCALL is not set | 12 | # CONFIG_SYSCTL_SYSCALL is not set |
13 | # CONFIG_VM_EVENT_COUNTERS is not set | 13 | # CONFIG_VM_EVENT_COUNTERS is not set |
14 | CONFIG_SLAB=y | 14 | CONFIG_SLAB=y |
@@ -85,7 +85,7 @@ CONFIG_USB_OHCI_HCD=m | |||
85 | CONFIG_USB_OHCI_HCD_PPC_OF_BE=y | 85 | CONFIG_USB_OHCI_HCD_PPC_OF_BE=y |
86 | # CONFIG_USB_OHCI_HCD_PCI is not set | 86 | # CONFIG_USB_OHCI_HCD_PCI is not set |
87 | CONFIG_USB_STORAGE=m | 87 | CONFIG_USB_STORAGE=m |
88 | CONFIG_RTC_CLASS=m | 88 | CONFIG_RTC_CLASS=y |
89 | CONFIG_RTC_DRV_PCF8563=m | 89 | CONFIG_RTC_DRV_PCF8563=m |
90 | CONFIG_EXT2_FS=m | 90 | CONFIG_EXT2_FS=m |
91 | CONFIG_EXT3_FS=m | 91 | CONFIG_EXT3_FS=m |
diff --git a/arch/powerpc/configs/52xx/tqm5200_defconfig b/arch/powerpc/configs/52xx/tqm5200_defconfig index 3a1f70292d9d..959cd2cfc275 100644 --- a/arch/powerpc/configs/52xx/tqm5200_defconfig +++ b/arch/powerpc/configs/52xx/tqm5200_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_SYSCTL_SYSCALL is not set | 7 | # CONFIG_SYSCTL_SYSCALL is not set |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_EPOLL is not set | 9 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/83xx/asp8347_defconfig b/arch/powerpc/configs/83xx/asp8347_defconfig index eed42d8919e8..d2762d9dcb8e 100644 --- a/arch/powerpc/configs/83xx/asp8347_defconfig +++ b/arch/powerpc/configs/83xx/asp8347_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
10 | CONFIG_MODULE_UNLOAD=y | 10 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/kmeter1_defconfig b/arch/powerpc/configs/83xx/kmeter1_defconfig index e43ecb27dfd7..07e1bbadebfe 100644 --- a/arch/powerpc/configs/83xx/kmeter1_defconfig +++ b/arch/powerpc/configs/83xx/kmeter1_defconfig | |||
@@ -2,8 +2,9 @@ CONFIG_EXPERIMENTAL=y | |||
2 | # CONFIG_SWAP is not set | 2 | # CONFIG_SWAP is not set |
3 | CONFIG_SYSVIPC=y | 3 | CONFIG_SYSVIPC=y |
4 | CONFIG_POSIX_MQUEUE=y | 4 | CONFIG_POSIX_MQUEUE=y |
5 | CONFIG_SPARSE_IRQ=y | ||
5 | CONFIG_LOG_BUF_SHIFT=14 | 6 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
7 | # CONFIG_HOTPLUG is not set | 8 | # CONFIG_HOTPLUG is not set |
8 | CONFIG_SLAB=y | 9 | CONFIG_SLAB=y |
9 | CONFIG_MODULES=y | 10 | CONFIG_MODULES=y |
@@ -18,7 +19,6 @@ CONFIG_KMETER1=y | |||
18 | CONFIG_NO_HZ=y | 19 | CONFIG_NO_HZ=y |
19 | CONFIG_HIGH_RES_TIMERS=y | 20 | CONFIG_HIGH_RES_TIMERS=y |
20 | CONFIG_PREEMPT=y | 21 | CONFIG_PREEMPT=y |
21 | CONFIG_SPARSE_IRQ=y | ||
22 | # CONFIG_SECCOMP is not set | 22 | # CONFIG_SECCOMP is not set |
23 | CONFIG_NET=y | 23 | CONFIG_NET=y |
24 | CONFIG_PACKET=y | 24 | CONFIG_PACKET=y |
@@ -37,7 +37,6 @@ CONFIG_MTD=y | |||
37 | CONFIG_MTD_CONCAT=y | 37 | CONFIG_MTD_CONCAT=y |
38 | CONFIG_MTD_PARTITIONS=y | 38 | CONFIG_MTD_PARTITIONS=y |
39 | CONFIG_MTD_CMDLINE_PARTS=y | 39 | CONFIG_MTD_CMDLINE_PARTS=y |
40 | CONFIG_MTD_OF_PARTS=y | ||
41 | CONFIG_MTD_CHAR=y | 40 | CONFIG_MTD_CHAR=y |
42 | CONFIG_MTD_BLOCK=y | 41 | CONFIG_MTD_BLOCK=y |
43 | CONFIG_MTD_CFI=y | 42 | CONFIG_MTD_CFI=y |
@@ -49,13 +48,12 @@ CONFIG_MTD_UBI=y | |||
49 | CONFIG_MTD_UBI_GLUEBI=y | 48 | CONFIG_MTD_UBI_GLUEBI=y |
50 | CONFIG_MTD_UBI_DEBUG=y | 49 | CONFIG_MTD_UBI_DEBUG=y |
51 | CONFIG_PROC_DEVICETREE=y | 50 | CONFIG_PROC_DEVICETREE=y |
52 | # CONFIG_MISC_DEVICES is not set | ||
53 | CONFIG_NETDEVICES=y | 51 | CONFIG_NETDEVICES=y |
54 | CONFIG_DUMMY=y | 52 | CONFIG_DUMMY=y |
55 | CONFIG_TUN=y | 53 | CONFIG_TUN=y |
54 | CONFIG_MII=y | ||
56 | CONFIG_MARVELL_PHY=y | 55 | CONFIG_MARVELL_PHY=y |
57 | CONFIG_NET_ETHERNET=y | 56 | CONFIG_NET_ETHERNET=y |
58 | CONFIG_MII=y | ||
59 | CONFIG_UCC_GETH=y | 57 | CONFIG_UCC_GETH=y |
60 | # CONFIG_NETDEV_10000 is not set | 58 | # CONFIG_NETDEV_10000 is not set |
61 | CONFIG_WAN=y | 59 | CONFIG_WAN=y |
@@ -77,7 +75,6 @@ CONFIG_I2C_MPC=y | |||
77 | # CONFIG_USB_SUPPORT is not set | 75 | # CONFIG_USB_SUPPORT is not set |
78 | CONFIG_UIO=y | 76 | CONFIG_UIO=y |
79 | # CONFIG_DNOTIFY is not set | 77 | # CONFIG_DNOTIFY is not set |
80 | CONFIG_INOTIFY=y | ||
81 | CONFIG_TMPFS=y | 78 | CONFIG_TMPFS=y |
82 | CONFIG_JFFS2_FS=y | 79 | CONFIG_JFFS2_FS=y |
83 | CONFIG_NFS_FS=y | 80 | CONFIG_NFS_FS=y |
diff --git a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig index c2e6ab51d335..126ef1b08a01 100644 --- a/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8313_rdb_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
@@ -104,7 +104,6 @@ CONFIG_ROOT_NFS=y | |||
104 | CONFIG_PARTITION_ADVANCED=y | 104 | CONFIG_PARTITION_ADVANCED=y |
105 | CONFIG_DEBUG_KERNEL=y | 105 | CONFIG_DEBUG_KERNEL=y |
106 | CONFIG_DETECT_HUNG_TASK=y | 106 | CONFIG_DETECT_HUNG_TASK=y |
107 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
108 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 107 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
109 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 108 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
110 | CONFIG_CRYPTO_PCBC=m | 109 | CONFIG_CRYPTO_PCBC=m |
diff --git a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig index 1d3b20065913..abcf00ad939e 100644 --- a/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc8315_rdb_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
@@ -101,7 +101,6 @@ CONFIG_ROOT_NFS=y | |||
101 | CONFIG_PARTITION_ADVANCED=y | 101 | CONFIG_PARTITION_ADVANCED=y |
102 | CONFIG_DEBUG_KERNEL=y | 102 | CONFIG_DEBUG_KERNEL=y |
103 | CONFIG_DETECT_HUNG_TASK=y | 103 | CONFIG_DETECT_HUNG_TASK=y |
104 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
105 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 104 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
106 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 105 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
107 | CONFIG_CRYPTO_PCBC=m | 106 | CONFIG_CRYPTO_PCBC=m |
diff --git a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig index 91fe73bd5ad2..a5699a1f7d0a 100644 --- a/arch/powerpc/configs/83xx/mpc832x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc832x_mds_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig index 6d300f205604..b4da1a7e6449 100644 --- a/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc832x_rdb_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig index b236a67e01fe..291f8221d5a6 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itx_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itx_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig index 001dead3cde9..f8b228aaa03a 100644 --- a/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_itxgp_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig index 9dccefca00c3..99660c062191 100644 --- a/arch/powerpc/configs/83xx/mpc834x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc834x_mds_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig index d4b165d7d294..10b5c4cd0e72 100644 --- a/arch/powerpc/configs/83xx/mpc836x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc836x_mds_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig index 89ba67274bda..45925d701d2a 100644 --- a/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig +++ b/arch/powerpc/configs/83xx/mpc836x_rdk_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig index 2ea6b405046a..f367985be6f7 100644 --- a/arch/powerpc/configs/83xx/mpc837x_mds_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_mds_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | CONFIG_SLAB=y | 7 | CONFIG_SLAB=y |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig index bffe3c775030..414eda381591 100644 --- a/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig +++ b/arch/powerpc/configs/83xx/mpc837x_rdb_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | CONFIG_SLAB=y | 7 | CONFIG_SLAB=y |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/83xx/sbc834x_defconfig b/arch/powerpc/configs/83xx/sbc834x_defconfig index fa5c9eefc9ad..6d6463fe06fc 100644 --- a/arch/powerpc/configs/83xx/sbc834x_defconfig +++ b/arch/powerpc/configs/83xx/sbc834x_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | # CONFIG_KALLSYMS is not set | 7 | # CONFIG_KALLSYMS is not set |
8 | CONFIG_SLAB=y | 8 | CONFIG_SLAB=y |
9 | CONFIG_MODULES=y | 9 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/85xx/ksi8560_defconfig b/arch/powerpc/configs/85xx/ksi8560_defconfig index 385b1af37d75..8f7c1061891a 100644 --- a/arch/powerpc/configs/85xx/ksi8560_defconfig +++ b/arch/powerpc/configs/85xx/ksi8560_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_BLK_DEV_BSG is not set | 8 | # CONFIG_BLK_DEV_BSG is not set |
9 | CONFIG_KSI8560=y | 9 | CONFIG_KSI8560=y |
10 | CONFIG_CPM2=y | 10 | CONFIG_CPM2=y |
diff --git a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig index 222b704c1f4b..11662c217ac0 100644 --- a/arch/powerpc/configs/85xx/mpc8540_ads_defconfig +++ b/arch/powerpc/configs/85xx/mpc8540_ads_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_BLK_DEV_BSG is not set | 8 | # CONFIG_BLK_DEV_BSG is not set |
9 | CONFIG_MPC8540_ADS=y | 9 | CONFIG_MPC8540_ADS=y |
10 | CONFIG_NO_HZ=y | 10 | CONFIG_NO_HZ=y |
@@ -58,7 +58,6 @@ CONFIG_PARTITION_ADVANCED=y | |||
58 | CONFIG_DEBUG_KERNEL=y | 58 | CONFIG_DEBUG_KERNEL=y |
59 | CONFIG_DETECT_HUNG_TASK=y | 59 | CONFIG_DETECT_HUNG_TASK=y |
60 | CONFIG_DEBUG_MUTEXES=y | 60 | CONFIG_DEBUG_MUTEXES=y |
61 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
62 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 61 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
63 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 62 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
64 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 63 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
diff --git a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig index 619702de9477..ebe9b30b0721 100644 --- a/arch/powerpc/configs/85xx/mpc8560_ads_defconfig +++ b/arch/powerpc/configs/85xx/mpc8560_ads_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_BLK_DEV_BSG is not set | 8 | # CONFIG_BLK_DEV_BSG is not set |
9 | CONFIG_MPC8560_ADS=y | 9 | CONFIG_MPC8560_ADS=y |
10 | CONFIG_BINFMT_MISC=y | 10 | CONFIG_BINFMT_MISC=y |
@@ -59,7 +59,6 @@ CONFIG_PARTITION_ADVANCED=y | |||
59 | CONFIG_DEBUG_KERNEL=y | 59 | CONFIG_DEBUG_KERNEL=y |
60 | CONFIG_DETECT_HUNG_TASK=y | 60 | CONFIG_DETECT_HUNG_TASK=y |
61 | CONFIG_DEBUG_MUTEXES=y | 61 | CONFIG_DEBUG_MUTEXES=y |
62 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
63 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 62 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
64 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 63 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
65 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 64 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
diff --git a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig index 6bf56e83f957..eb25229b387a 100644 --- a/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig +++ b/arch/powerpc/configs/85xx/mpc85xx_cds_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_BLK_DEV_BSG is not set | 8 | # CONFIG_BLK_DEV_BSG is not set |
9 | CONFIG_MPC85xx_CDS=y | 9 | CONFIG_MPC85xx_CDS=y |
10 | CONFIG_NO_HZ=y | 10 | CONFIG_NO_HZ=y |
@@ -63,7 +63,6 @@ CONFIG_PARTITION_ADVANCED=y | |||
63 | CONFIG_DEBUG_KERNEL=y | 63 | CONFIG_DEBUG_KERNEL=y |
64 | CONFIG_DETECT_HUNG_TASK=y | 64 | CONFIG_DETECT_HUNG_TASK=y |
65 | CONFIG_DEBUG_MUTEXES=y | 65 | CONFIG_DEBUG_MUTEXES=y |
66 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
67 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 66 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
68 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 67 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
69 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 68 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
diff --git a/arch/powerpc/configs/85xx/sbc8548_defconfig b/arch/powerpc/configs/85xx/sbc8548_defconfig index a9a17d055766..5b2b651dfb98 100644 --- a/arch/powerpc/configs/85xx/sbc8548_defconfig +++ b/arch/powerpc/configs/85xx/sbc8548_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | CONFIG_SLAB=y | 8 | CONFIG_SLAB=y |
9 | # CONFIG_BLK_DEV_BSG is not set | 9 | # CONFIG_BLK_DEV_BSG is not set |
10 | CONFIG_SBC8548=y | 10 | CONFIG_SBC8548=y |
diff --git a/arch/powerpc/configs/85xx/sbc8560_defconfig b/arch/powerpc/configs/85xx/sbc8560_defconfig index 820e32d8c42b..f7fdb0318e4c 100644 --- a/arch/powerpc/configs/85xx/sbc8560_defconfig +++ b/arch/powerpc/configs/85xx/sbc8560_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | CONFIG_SLAB=y | 8 | CONFIG_SLAB=y |
9 | # CONFIG_BLK_DEV_BSG is not set | 9 | # CONFIG_BLK_DEV_BSG is not set |
10 | CONFIG_SBC8560=y | 10 | CONFIG_SBC8560=y |
diff --git a/arch/powerpc/configs/85xx/socrates_defconfig b/arch/powerpc/configs/85xx/socrates_defconfig index b6db3f47af99..77506b5d5a41 100644 --- a/arch/powerpc/configs/85xx/socrates_defconfig +++ b/arch/powerpc/configs/85xx/socrates_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=16 | 4 | CONFIG_LOG_BUF_SHIFT=16 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_HOTPLUG is not set | 9 | # CONFIG_HOTPLUG is not set |
10 | # CONFIG_EPOLL is not set | 10 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/85xx/stx_gp3_defconfig b/arch/powerpc/configs/85xx/stx_gp3_defconfig index 333a41bd2a68..5d4db154bf59 100644 --- a/arch/powerpc/configs/85xx/stx_gp3_defconfig +++ b/arch/powerpc/configs/85xx/stx_gp3_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODVERSIONS=y | 9 | CONFIG_MODVERSIONS=y |
10 | # CONFIG_BLK_DEV_BSG is not set | 10 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/85xx/tqm8540_defconfig b/arch/powerpc/configs/85xx/tqm8540_defconfig index 33db352f847e..ddcb9f37fa1f 100644 --- a/arch/powerpc/configs/85xx/tqm8540_defconfig +++ b/arch/powerpc/configs/85xx/tqm8540_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_HOTPLUG is not set | 9 | # CONFIG_HOTPLUG is not set |
10 | # CONFIG_EPOLL is not set | 10 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/85xx/tqm8541_defconfig b/arch/powerpc/configs/85xx/tqm8541_defconfig index f0c20dfbd4d3..981abd6d4b57 100644 --- a/arch/powerpc/configs/85xx/tqm8541_defconfig +++ b/arch/powerpc/configs/85xx/tqm8541_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_HOTPLUG is not set | 9 | # CONFIG_HOTPLUG is not set |
10 | # CONFIG_EPOLL is not set | 10 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/85xx/tqm8548_defconfig b/arch/powerpc/configs/85xx/tqm8548_defconfig index a883450dcdfa..37b3d7227cdd 100644 --- a/arch/powerpc/configs/85xx/tqm8548_defconfig +++ b/arch/powerpc/configs/85xx/tqm8548_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
10 | # CONFIG_BLK_DEV_BSG is not set | 10 | # CONFIG_BLK_DEV_BSG is not set |
diff --git a/arch/powerpc/configs/85xx/tqm8555_defconfig b/arch/powerpc/configs/85xx/tqm8555_defconfig index ff95f90dc171..3593b320c97c 100644 --- a/arch/powerpc/configs/85xx/tqm8555_defconfig +++ b/arch/powerpc/configs/85xx/tqm8555_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_HOTPLUG is not set | 9 | # CONFIG_HOTPLUG is not set |
10 | # CONFIG_EPOLL is not set | 10 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/85xx/tqm8560_defconfig b/arch/powerpc/configs/85xx/tqm8560_defconfig index 8d6c90ea4783..de413acc34d6 100644 --- a/arch/powerpc/configs/85xx/tqm8560_defconfig +++ b/arch/powerpc/configs/85xx/tqm8560_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_KALLSYMS is not set | 8 | # CONFIG_KALLSYMS is not set |
9 | # CONFIG_HOTPLUG is not set | 9 | # CONFIG_HOTPLUG is not set |
10 | # CONFIG_EPOLL is not set | 10 | # CONFIG_EPOLL is not set |
diff --git a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig index f53efe4a0e0c..5ea3124518fd 100644 --- a/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig +++ b/arch/powerpc/configs/85xx/xes_mpc85xx_defconfig | |||
@@ -11,7 +11,7 @@ CONFIG_IKCONFIG_PROC=y | |||
11 | CONFIG_LOG_BUF_SHIFT=14 | 11 | CONFIG_LOG_BUF_SHIFT=14 |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
14 | CONFIG_EMBEDDED=y | 14 | CONFIG_EXPERT=y |
15 | CONFIG_KALLSYMS_ALL=y | 15 | CONFIG_KALLSYMS_ALL=y |
16 | CONFIG_KALLSYMS_EXTRA_PASS=y | 16 | CONFIG_KALLSYMS_EXTRA_PASS=y |
17 | CONFIG_MODULES=y | 17 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig index 432ebc28d25c..d41857a5152d 100644 --- a/arch/powerpc/configs/86xx/gef_ppc9a_defconfig +++ b/arch/powerpc/configs/86xx/gef_ppc9a_defconfig | |||
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14 | |||
11 | CONFIG_RELAY=y | 11 | CONFIG_RELAY=y |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
14 | CONFIG_EMBEDDED=y | 14 | CONFIG_EXPERT=y |
15 | CONFIG_SLAB=y | 15 | CONFIG_SLAB=y |
16 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
17 | CONFIG_MODULE_UNLOAD=y | 17 | CONFIG_MODULE_UNLOAD=y |
@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m | |||
85 | CONFIG_BLK_DEV_NBD=m | 85 | CONFIG_BLK_DEV_NBD=m |
86 | CONFIG_BLK_DEV_RAM=y | 86 | CONFIG_BLK_DEV_RAM=y |
87 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 87 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
88 | CONFIG_MISC_DEVICES=y | ||
88 | CONFIG_DS1682=y | 89 | CONFIG_DS1682=y |
89 | CONFIG_IDE=y | 90 | CONFIG_IDE=y |
90 | CONFIG_BLK_DEV_IDECS=y | 91 | CONFIG_BLK_DEV_IDECS=y |
diff --git a/arch/powerpc/configs/86xx/gef_sbc310_defconfig b/arch/powerpc/configs/86xx/gef_sbc310_defconfig index ce5e919d9b55..38303ec11bcd 100644 --- a/arch/powerpc/configs/86xx/gef_sbc310_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc310_defconfig | |||
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14 | |||
11 | CONFIG_RELAY=y | 11 | CONFIG_RELAY=y |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
14 | CONFIG_EMBEDDED=y | 14 | CONFIG_EXPERT=y |
15 | CONFIG_SLAB=y | 15 | CONFIG_SLAB=y |
16 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
17 | CONFIG_MODULE_UNLOAD=y | 17 | CONFIG_MODULE_UNLOAD=y |
@@ -85,6 +85,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m | |||
85 | CONFIG_BLK_DEV_NBD=m | 85 | CONFIG_BLK_DEV_NBD=m |
86 | CONFIG_BLK_DEV_RAM=y | 86 | CONFIG_BLK_DEV_RAM=y |
87 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 87 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
88 | CONFIG_MISC_DEVICES=y | ||
88 | CONFIG_DS1682=y | 89 | CONFIG_DS1682=y |
89 | CONFIG_IDE=y | 90 | CONFIG_IDE=y |
90 | CONFIG_BLK_DEV_IDECS=y | 91 | CONFIG_BLK_DEV_IDECS=y |
diff --git a/arch/powerpc/configs/86xx/gef_sbc610_defconfig b/arch/powerpc/configs/86xx/gef_sbc610_defconfig index 589e71e6dc1c..98533973d20f 100644 --- a/arch/powerpc/configs/86xx/gef_sbc610_defconfig +++ b/arch/powerpc/configs/86xx/gef_sbc610_defconfig | |||
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14 | |||
11 | CONFIG_RELAY=y | 11 | CONFIG_RELAY=y |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
14 | CONFIG_EMBEDDED=y | 14 | CONFIG_EXPERT=y |
15 | CONFIG_SLAB=y | 15 | CONFIG_SLAB=y |
16 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
17 | CONFIG_MODULE_UNLOAD=y | 17 | CONFIG_MODULE_UNLOAD=y |
@@ -138,6 +138,7 @@ CONFIG_BLK_DEV_CRYPTOLOOP=m | |||
138 | CONFIG_BLK_DEV_NBD=m | 138 | CONFIG_BLK_DEV_NBD=m |
139 | CONFIG_BLK_DEV_RAM=y | 139 | CONFIG_BLK_DEV_RAM=y |
140 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 140 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
141 | CONFIG_MISC_DEVICES=y | ||
141 | CONFIG_DS1682=y | 142 | CONFIG_DS1682=y |
142 | CONFIG_BLK_DEV_SD=y | 143 | CONFIG_BLK_DEV_SD=y |
143 | CONFIG_CHR_DEV_ST=y | 144 | CONFIG_CHR_DEV_ST=y |
diff --git a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig index 321fb47096d9..036bfb2d18cd 100644 --- a/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig +++ b/arch/powerpc/configs/86xx/mpc8610_hpcd_defconfig | |||
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y | |||
6 | CONFIG_LOG_BUF_SHIFT=14 | 6 | CONFIG_LOG_BUF_SHIFT=14 |
7 | CONFIG_BLK_DEV_INITRD=y | 7 | CONFIG_BLK_DEV_INITRD=y |
8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
9 | CONFIG_EMBEDDED=y | 9 | CONFIG_EXPERT=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | # CONFIG_ELF_CORE is not set | 11 | # CONFIG_ELF_CORE is not set |
12 | CONFIG_MODULES=y | 12 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig index b5e46399374e..f51c7ebc181e 100644 --- a/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig +++ b/arch/powerpc/configs/86xx/mpc8641_hpcn_defconfig | |||
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y | |||
10 | CONFIG_LOG_BUF_SHIFT=14 | 10 | CONFIG_LOG_BUF_SHIFT=14 |
11 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
12 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 12 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
13 | CONFIG_EMBEDDED=y | 13 | CONFIG_EXPERT=y |
14 | CONFIG_KALLSYMS_ALL=y | 14 | CONFIG_KALLSYMS_ALL=y |
15 | CONFIG_KALLSYMS_EXTRA_PASS=y | 15 | CONFIG_KALLSYMS_EXTRA_PASS=y |
16 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
@@ -63,6 +63,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
63 | CONFIG_BLK_DEV_NBD=y | 63 | CONFIG_BLK_DEV_NBD=y |
64 | CONFIG_BLK_DEV_RAM=y | 64 | CONFIG_BLK_DEV_RAM=y |
65 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 65 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
66 | CONFIG_MISC_DEVICES=y | ||
66 | CONFIG_EEPROM_LEGACY=y | 67 | CONFIG_EEPROM_LEGACY=y |
67 | CONFIG_BLK_DEV_SD=y | 68 | CONFIG_BLK_DEV_SD=y |
68 | CONFIG_CHR_DEV_ST=y | 69 | CONFIG_CHR_DEV_ST=y |
@@ -167,7 +168,6 @@ CONFIG_MAC_PARTITION=y | |||
167 | CONFIG_CRC_T10DIF=y | 168 | CONFIG_CRC_T10DIF=y |
168 | CONFIG_DEBUG_KERNEL=y | 169 | CONFIG_DEBUG_KERNEL=y |
169 | CONFIG_DETECT_HUNG_TASK=y | 170 | CONFIG_DETECT_HUNG_TASK=y |
170 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
171 | CONFIG_DEBUG_INFO=y | 171 | CONFIG_DEBUG_INFO=y |
172 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 172 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
173 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 173 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
diff --git a/arch/powerpc/configs/86xx/sbc8641d_defconfig b/arch/powerpc/configs/86xx/sbc8641d_defconfig index 71145c3a64db..0a92ca045641 100644 --- a/arch/powerpc/configs/86xx/sbc8641d_defconfig +++ b/arch/powerpc/configs/86xx/sbc8641d_defconfig | |||
@@ -11,7 +11,7 @@ CONFIG_LOG_BUF_SHIFT=14 | |||
11 | CONFIG_RELAY=y | 11 | CONFIG_RELAY=y |
12 | CONFIG_BLK_DEV_INITRD=y | 12 | CONFIG_BLK_DEV_INITRD=y |
13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 13 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
14 | CONFIG_EMBEDDED=y | 14 | CONFIG_EXPERT=y |
15 | CONFIG_SLAB=y | 15 | CONFIG_SLAB=y |
16 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
17 | CONFIG_MODULE_UNLOAD=y | 17 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/adder875_defconfig b/arch/powerpc/configs/adder875_defconfig index ca84c7fc24d5..69128740c14d 100644 --- a/arch/powerpc/configs/adder875_defconfig +++ b/arch/powerpc/configs/adder875_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y | |||
4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_SYSCTL_SYSCALL is not set | 8 | # CONFIG_SYSCTL_SYSCALL is not set |
9 | # CONFIG_ELF_CORE is not set | 9 | # CONFIG_ELF_CORE is not set |
10 | # CONFIG_BASE_FULL is not set | 10 | # CONFIG_BASE_FULL is not set |
diff --git a/arch/powerpc/configs/c2k_defconfig b/arch/powerpc/configs/c2k_defconfig index f9e6a3ea5a64..2a84fd7f631c 100644 --- a/arch/powerpc/configs/c2k_defconfig +++ b/arch/powerpc/configs/c2k_defconfig | |||
@@ -132,8 +132,8 @@ CONFIG_NET_CLS_RSVP=m | |||
132 | CONFIG_NET_CLS_RSVP6=m | 132 | CONFIG_NET_CLS_RSVP6=m |
133 | CONFIG_NET_CLS_IND=y | 133 | CONFIG_NET_CLS_IND=y |
134 | CONFIG_BT=m | 134 | CONFIG_BT=m |
135 | CONFIG_BT_L2CAP=m | 135 | CONFIG_BT_L2CAP=y |
136 | CONFIG_BT_SCO=m | 136 | CONFIG_BT_SCO=y |
137 | CONFIG_BT_RFCOMM=m | 137 | CONFIG_BT_RFCOMM=m |
138 | CONFIG_BT_RFCOMM_TTY=y | 138 | CONFIG_BT_RFCOMM_TTY=y |
139 | CONFIG_BT_BNEP=m | 139 | CONFIG_BT_BNEP=m |
diff --git a/arch/powerpc/configs/e55xx_smp_defconfig b/arch/powerpc/configs/e55xx_smp_defconfig new file mode 100644 index 000000000000..d32283555b53 --- /dev/null +++ b/arch/powerpc/configs/e55xx_smp_defconfig | |||
@@ -0,0 +1,104 @@ | |||
1 | CONFIG_PPC64=y | ||
2 | CONFIG_PPC_BOOK3E_64=y | ||
3 | # CONFIG_VIRT_CPU_ACCOUNTING is not set | ||
4 | CONFIG_SMP=y | ||
5 | CONFIG_NR_CPUS=2 | ||
6 | CONFIG_EXPERIMENTAL=y | ||
7 | CONFIG_SYSVIPC=y | ||
8 | CONFIG_BSD_PROCESS_ACCT=y | ||
9 | CONFIG_SPARSE_IRQ=y | ||
10 | CONFIG_IKCONFIG=y | ||
11 | CONFIG_IKCONFIG_PROC=y | ||
12 | CONFIG_LOG_BUF_SHIFT=14 | ||
13 | CONFIG_BLK_DEV_INITRD=y | ||
14 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
15 | CONFIG_EXPERT=y | ||
16 | CONFIG_KALLSYMS_ALL=y | ||
17 | CONFIG_KALLSYMS_EXTRA_PASS=y | ||
18 | CONFIG_MODULES=y | ||
19 | CONFIG_MODULE_UNLOAD=y | ||
20 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
21 | CONFIG_MODVERSIONS=y | ||
22 | # CONFIG_BLK_DEV_BSG is not set | ||
23 | CONFIG_P5020_DS=y | ||
24 | # CONFIG_PPC_OF_BOOT_TRAMPOLINE is not set | ||
25 | CONFIG_NO_HZ=y | ||
26 | CONFIG_HIGH_RES_TIMERS=y | ||
27 | CONFIG_BINFMT_MISC=m | ||
28 | # CONFIG_PCI is not set | ||
29 | CONFIG_NET=y | ||
30 | CONFIG_PACKET=y | ||
31 | CONFIG_UNIX=y | ||
32 | CONFIG_XFRM_USER=y | ||
33 | CONFIG_NET_KEY=y | ||
34 | CONFIG_INET=y | ||
35 | CONFIG_IP_MULTICAST=y | ||
36 | CONFIG_IP_ADVANCED_ROUTER=y | ||
37 | CONFIG_IP_MULTIPLE_TABLES=y | ||
38 | CONFIG_IP_ROUTE_MULTIPATH=y | ||
39 | CONFIG_IP_ROUTE_VERBOSE=y | ||
40 | CONFIG_IP_PNP=y | ||
41 | CONFIG_IP_PNP_DHCP=y | ||
42 | CONFIG_IP_PNP_BOOTP=y | ||
43 | CONFIG_IP_PNP_RARP=y | ||
44 | CONFIG_NET_IPIP=y | ||
45 | CONFIG_IP_MROUTE=y | ||
46 | CONFIG_IP_PIMSM_V1=y | ||
47 | CONFIG_IP_PIMSM_V2=y | ||
48 | CONFIG_ARPD=y | ||
49 | CONFIG_INET_ESP=y | ||
50 | # CONFIG_INET_XFRM_MODE_BEET is not set | ||
51 | # CONFIG_INET_LRO is not set | ||
52 | CONFIG_IPV6=y | ||
53 | CONFIG_IP_SCTP=m | ||
54 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | ||
55 | CONFIG_PROC_DEVICETREE=y | ||
56 | CONFIG_BLK_DEV_LOOP=y | ||
57 | CONFIG_BLK_DEV_RAM=y | ||
58 | CONFIG_BLK_DEV_RAM_SIZE=131072 | ||
59 | CONFIG_MISC_DEVICES=y | ||
60 | CONFIG_EEPROM_LEGACY=y | ||
61 | CONFIG_NETDEVICES=y | ||
62 | CONFIG_DUMMY=y | ||
63 | CONFIG_NET_ETHERNET=y | ||
64 | CONFIG_INPUT_FF_MEMLESS=m | ||
65 | # CONFIG_INPUT_MOUSEDEV is not set | ||
66 | # CONFIG_INPUT_KEYBOARD is not set | ||
67 | # CONFIG_INPUT_MOUSE is not set | ||
68 | CONFIG_SERIO_LIBPS2=y | ||
69 | CONFIG_SERIAL_8250=y | ||
70 | CONFIG_SERIAL_8250_CONSOLE=y | ||
71 | CONFIG_SERIAL_8250_EXTENDED=y | ||
72 | CONFIG_SERIAL_8250_MANY_PORTS=y | ||
73 | CONFIG_SERIAL_8250_DETECT_IRQ=y | ||
74 | CONFIG_SERIAL_8250_RSA=y | ||
75 | CONFIG_I2C=y | ||
76 | # CONFIG_HWMON is not set | ||
77 | CONFIG_VIDEO_OUTPUT_CONTROL=y | ||
78 | # CONFIG_HID_SUPPORT is not set | ||
79 | # CONFIG_USB_SUPPORT is not set | ||
80 | CONFIG_DMADEVICES=y | ||
81 | CONFIG_FSL_DMA=y | ||
82 | CONFIG_EXT2_FS=y | ||
83 | CONFIG_EXT3_FS=y | ||
84 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
85 | CONFIG_PROC_KCORE=y | ||
86 | CONFIG_TMPFS=y | ||
87 | # CONFIG_MISC_FILESYSTEMS is not set | ||
88 | CONFIG_PARTITION_ADVANCED=y | ||
89 | CONFIG_MAC_PARTITION=y | ||
90 | CONFIG_NLS=y | ||
91 | CONFIG_NLS_UTF8=m | ||
92 | CONFIG_CRC_T10DIF=y | ||
93 | CONFIG_CRC_ITU_T=m | ||
94 | CONFIG_FRAME_WARN=1024 | ||
95 | CONFIG_DEBUG_FS=y | ||
96 | CONFIG_DEBUG_KERNEL=y | ||
97 | CONFIG_DETECT_HUNG_TASK=y | ||
98 | CONFIG_DEBUG_INFO=y | ||
99 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
100 | CONFIG_SYSCTL_SYSCALL_CHECK=y | ||
101 | CONFIG_VIRQ_DEBUG=y | ||
102 | CONFIG_CRYPTO_PCBC=m | ||
103 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
104 | CONFIG_CRYPTO_DEV_TALITOS=y | ||
diff --git a/arch/powerpc/configs/ep8248e_defconfig b/arch/powerpc/configs/ep8248e_defconfig index 2677b08199e7..fceffb3cffbe 100644 --- a/arch/powerpc/configs/ep8248e_defconfig +++ b/arch/powerpc/configs/ep8248e_defconfig | |||
@@ -2,7 +2,7 @@ CONFIG_SYSVIPC=y | |||
2 | CONFIG_IKCONFIG=y | 2 | CONFIG_IKCONFIG=y |
3 | CONFIG_IKCONFIG_PROC=y | 3 | CONFIG_IKCONFIG_PROC=y |
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_EMBEDDED=y | 5 | CONFIG_EXPERT=y |
6 | CONFIG_KALLSYMS_ALL=y | 6 | CONFIG_KALLSYMS_ALL=y |
7 | CONFIG_SLAB=y | 7 | CONFIG_SLAB=y |
8 | # CONFIG_IOSCHED_CFQ is not set | 8 | # CONFIG_IOSCHED_CFQ is not set |
diff --git a/arch/powerpc/configs/ep88xc_defconfig b/arch/powerpc/configs/ep88xc_defconfig index f9a3112e5442..219fd470ed22 100644 --- a/arch/powerpc/configs/ep88xc_defconfig +++ b/arch/powerpc/configs/ep88xc_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y | |||
4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_SYSCTL_SYSCALL is not set | 8 | # CONFIG_SYSCTL_SYSCALL is not set |
9 | # CONFIG_ELF_CORE is not set | 9 | # CONFIG_ELF_CORE is not set |
10 | # CONFIG_BASE_FULL is not set | 10 | # CONFIG_BASE_FULL is not set |
diff --git a/arch/powerpc/configs/gamecube_defconfig b/arch/powerpc/configs/gamecube_defconfig index fcf0a398cd66..e74d3a483705 100644 --- a/arch/powerpc/configs/gamecube_defconfig +++ b/arch/powerpc/configs/gamecube_defconfig | |||
@@ -6,7 +6,7 @@ CONFIG_IKCONFIG_PROC=y | |||
6 | CONFIG_LOG_BUF_SHIFT=14 | 6 | CONFIG_LOG_BUF_SHIFT=14 |
7 | CONFIG_BLK_DEV_INITRD=y | 7 | CONFIG_BLK_DEV_INITRD=y |
8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 8 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
9 | CONFIG_EMBEDDED=y | 9 | CONFIG_EXPERT=y |
10 | # CONFIG_ELF_CORE is not set | 10 | # CONFIG_ELF_CORE is not set |
11 | CONFIG_PERF_COUNTERS=y | 11 | CONFIG_PERF_COUNTERS=y |
12 | # CONFIG_VM_EVENT_COUNTERS is not set | 12 | # CONFIG_VM_EVENT_COUNTERS is not set |
diff --git a/arch/powerpc/configs/holly_defconfig b/arch/powerpc/configs/holly_defconfig index b9b63a609525..94ebfee188db 100644 --- a/arch/powerpc/configs/holly_defconfig +++ b/arch/powerpc/configs/holly_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | CONFIG_MODULES=y | 7 | CONFIG_MODULES=y |
8 | # CONFIG_BLK_DEV_BSG is not set | 8 | # CONFIG_BLK_DEV_BSG is not set |
9 | # CONFIG_PPC_CHRP is not set | 9 | # CONFIG_PPC_CHRP is not set |
diff --git a/arch/powerpc/configs/linkstation_defconfig b/arch/powerpc/configs/linkstation_defconfig index f39d0cf876dd..8a874b999867 100644 --- a/arch/powerpc/configs/linkstation_defconfig +++ b/arch/powerpc/configs/linkstation_defconfig | |||
@@ -78,6 +78,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
78 | CONFIG_BLK_DEV_RAM=y | 78 | CONFIG_BLK_DEV_RAM=y |
79 | CONFIG_BLK_DEV_RAM_COUNT=2 | 79 | CONFIG_BLK_DEV_RAM_COUNT=2 |
80 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 80 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
81 | CONFIG_MISC_DEVICES=y | ||
81 | CONFIG_EEPROM_LEGACY=m | 82 | CONFIG_EEPROM_LEGACY=m |
82 | CONFIG_BLK_DEV_SD=y | 83 | CONFIG_BLK_DEV_SD=y |
83 | CONFIG_CHR_DEV_SG=y | 84 | CONFIG_CHR_DEV_SG=y |
diff --git a/arch/powerpc/configs/mgcoge_defconfig b/arch/powerpc/configs/mgcoge_defconfig index c4ed255af18b..6cb588a7d425 100644 --- a/arch/powerpc/configs/mgcoge_defconfig +++ b/arch/powerpc/configs/mgcoge_defconfig | |||
@@ -1,16 +1,16 @@ | |||
1 | CONFIG_SYSVIPC=y | 1 | CONFIG_SYSVIPC=y |
2 | CONFIG_SPARSE_IRQ=y | ||
2 | CONFIG_IKCONFIG=y | 3 | CONFIG_IKCONFIG=y |
3 | CONFIG_IKCONFIG_PROC=y | 4 | CONFIG_IKCONFIG_PROC=y |
4 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
6 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
7 | CONFIG_KALLSYMS_ALL=y | 8 | CONFIG_KALLSYMS_ALL=y |
8 | CONFIG_SLAB=y | 9 | CONFIG_SLAB=y |
9 | # CONFIG_IOSCHED_CFQ is not set | 10 | # CONFIG_IOSCHED_CFQ is not set |
10 | CONFIG_PPC_82xx=y | 11 | CONFIG_PPC_82xx=y |
11 | CONFIG_MGCOGE=y | 12 | CONFIG_MGCOGE=y |
12 | CONFIG_BINFMT_MISC=y | 13 | CONFIG_BINFMT_MISC=y |
13 | CONFIG_SPARSE_IRQ=y | ||
14 | # CONFIG_SECCOMP is not set | 14 | # CONFIG_SECCOMP is not set |
15 | CONFIG_NET=y | 15 | CONFIG_NET=y |
16 | CONFIG_PACKET=y | 16 | CONFIG_PACKET=y |
@@ -30,7 +30,6 @@ CONFIG_MTD=y | |||
30 | CONFIG_MTD_CONCAT=y | 30 | CONFIG_MTD_CONCAT=y |
31 | CONFIG_MTD_PARTITIONS=y | 31 | CONFIG_MTD_PARTITIONS=y |
32 | CONFIG_MTD_CMDLINE_PARTS=y | 32 | CONFIG_MTD_CMDLINE_PARTS=y |
33 | CONFIG_MTD_OF_PARTS=y | ||
34 | CONFIG_MTD_CHAR=y | 33 | CONFIG_MTD_CHAR=y |
35 | CONFIG_MTD_BLKDEVS=y | 34 | CONFIG_MTD_BLKDEVS=y |
36 | CONFIG_MTD_CFI=y | 35 | CONFIG_MTD_CFI=y |
@@ -43,7 +42,6 @@ CONFIG_MTD_PHYSMAP_OF=y | |||
43 | CONFIG_PROC_DEVICETREE=y | 42 | CONFIG_PROC_DEVICETREE=y |
44 | CONFIG_BLK_DEV_LOOP=y | 43 | CONFIG_BLK_DEV_LOOP=y |
45 | CONFIG_BLK_DEV_RAM=y | 44 | CONFIG_BLK_DEV_RAM=y |
46 | # CONFIG_MISC_DEVICES is not set | ||
47 | # CONFIG_MACINTOSH_DRIVERS is not set | 45 | # CONFIG_MACINTOSH_DRIVERS is not set |
48 | CONFIG_NETDEVICES=y | 46 | CONFIG_NETDEVICES=y |
49 | CONFIG_FIXED_PHY=y | 47 | CONFIG_FIXED_PHY=y |
@@ -67,7 +65,6 @@ CONFIG_EXT2_FS=y | |||
67 | CONFIG_EXT3_FS=y | 65 | CONFIG_EXT3_FS=y |
68 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 66 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
69 | # CONFIG_EXT3_FS_XATTR is not set | 67 | # CONFIG_EXT3_FS_XATTR is not set |
70 | CONFIG_INOTIFY=y | ||
71 | CONFIG_AUTOFS4_FS=y | 68 | CONFIG_AUTOFS4_FS=y |
72 | CONFIG_PROC_KCORE=y | 69 | CONFIG_PROC_KCORE=y |
73 | CONFIG_TMPFS=y | 70 | CONFIG_TMPFS=y |
@@ -88,13 +85,9 @@ CONFIG_DEBUG_FS=y | |||
88 | CONFIG_DEBUG_KERNEL=y | 85 | CONFIG_DEBUG_KERNEL=y |
89 | # CONFIG_SCHED_DEBUG is not set | 86 | # CONFIG_SCHED_DEBUG is not set |
90 | CONFIG_DEBUG_INFO=y | 87 | CONFIG_DEBUG_INFO=y |
91 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
92 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 88 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
93 | CONFIG_BDI_SWITCH=y | 89 | CONFIG_BDI_SWITCH=y |
94 | CONFIG_CRYPTO_CBC=y | ||
95 | CONFIG_CRYPTO_ECB=y | 90 | CONFIG_CRYPTO_ECB=y |
96 | CONFIG_CRYPTO_PCBC=y | 91 | CONFIG_CRYPTO_PCBC=y |
97 | CONFIG_CRYPTO_MD5=y | ||
98 | CONFIG_CRYPTO_DES=y | ||
99 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 92 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
100 | # CONFIG_CRYPTO_HW is not set | 93 | # CONFIG_CRYPTO_HW is not set |
diff --git a/arch/powerpc/configs/mgsuvd_defconfig b/arch/powerpc/configs/mgsuvd_defconfig deleted file mode 100644 index f276c7cf555b..000000000000 --- a/arch/powerpc/configs/mgsuvd_defconfig +++ /dev/null | |||
@@ -1,81 +0,0 @@ | |||
1 | CONFIG_PPC_8xx=y | ||
2 | CONFIG_EXPERIMENTAL=y | ||
3 | # CONFIG_SWAP is not set | ||
4 | CONFIG_SYSVIPC=y | ||
5 | CONFIG_BLK_DEV_INITRD=y | ||
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | ||
7 | CONFIG_EMBEDDED=y | ||
8 | # CONFIG_SYSCTL_SYSCALL is not set | ||
9 | # CONFIG_HOTPLUG is not set | ||
10 | # CONFIG_BUG is not set | ||
11 | # CONFIG_BASE_FULL is not set | ||
12 | # CONFIG_EPOLL is not set | ||
13 | # CONFIG_VM_EVENT_COUNTERS is not set | ||
14 | CONFIG_SLAB=y | ||
15 | # CONFIG_BLK_DEV_BSG is not set | ||
16 | CONFIG_PPC_MGSUVD=y | ||
17 | CONFIG_8xx_COPYBACK=y | ||
18 | CONFIG_8xx_CPU6=y | ||
19 | CONFIG_I2C_SPI_SMC1_UCODE_PATCH=y | ||
20 | CONFIG_HZ_1000=y | ||
21 | CONFIG_MATH_EMULATION=y | ||
22 | CONFIG_SPARSE_IRQ=y | ||
23 | # CONFIG_SECCOMP is not set | ||
24 | CONFIG_NET=y | ||
25 | CONFIG_PACKET=y | ||
26 | CONFIG_UNIX=y | ||
27 | CONFIG_INET=y | ||
28 | CONFIG_IP_MULTICAST=y | ||
29 | CONFIG_IP_PNP=y | ||
30 | CONFIG_SYN_COOKIES=y | ||
31 | # CONFIG_INET_LRO is not set | ||
32 | # CONFIG_IPV6 is not set | ||
33 | CONFIG_MTD=y | ||
34 | CONFIG_MTD_PARTITIONS=y | ||
35 | CONFIG_MTD_CMDLINE_PARTS=y | ||
36 | CONFIG_MTD_OF_PARTS=y | ||
37 | CONFIG_MTD_CHAR=y | ||
38 | CONFIG_MTD_BLOCK=y | ||
39 | CONFIG_MTD_CFI=y | ||
40 | CONFIG_MTD_CFI_ADV_OPTIONS=y | ||
41 | CONFIG_MTD_CFI_GEOMETRY=y | ||
42 | # CONFIG_MTD_MAP_BANK_WIDTH_4 is not set | ||
43 | CONFIG_MTD_CFI_INTELEXT=y | ||
44 | CONFIG_MTD_CFI_AMDSTD=y | ||
45 | CONFIG_MTD_CFI_STAA=y | ||
46 | CONFIG_MTD_PHYSMAP_OF=y | ||
47 | CONFIG_BLK_DEV_LOOP=y | ||
48 | CONFIG_BLK_DEV_RAM=y | ||
49 | # CONFIG_MISC_DEVICES is not set | ||
50 | CONFIG_NETDEVICES=y | ||
51 | CONFIG_FIXED_PHY=y | ||
52 | CONFIG_NET_ETHERNET=y | ||
53 | CONFIG_FS_ENET=y | ||
54 | # CONFIG_FS_ENET_HAS_FEC is not set | ||
55 | # CONFIG_NETDEV_1000 is not set | ||
56 | # CONFIG_NETDEV_10000 is not set | ||
57 | # CONFIG_INPUT is not set | ||
58 | # CONFIG_SERIO is not set | ||
59 | # CONFIG_VT is not set | ||
60 | CONFIG_SERIAL_CPM=y | ||
61 | CONFIG_SERIAL_CPM_CONSOLE=y | ||
62 | # CONFIG_LEGACY_PTYS is not set | ||
63 | CONFIG_GEN_RTC=y | ||
64 | # CONFIG_HWMON is not set | ||
65 | # CONFIG_USB_SUPPORT is not set | ||
66 | CONFIG_EXT2_FS=y | ||
67 | CONFIG_EXT2_FS_XATTR=y | ||
68 | CONFIG_EXT3_FS=y | ||
69 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | ||
70 | CONFIG_INOTIFY=y | ||
71 | CONFIG_TMPFS=y | ||
72 | CONFIG_JFFS2_FS=y | ||
73 | CONFIG_CRAMFS=y | ||
74 | CONFIG_NFS_FS=y | ||
75 | CONFIG_NFS_V3=y | ||
76 | CONFIG_ROOT_NFS=y | ||
77 | CONFIG_PARTITION_ADVANCED=y | ||
78 | CONFIG_CRC_CCITT=y | ||
79 | CONFIG_DEBUG_FS=y | ||
80 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | ||
81 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | ||
diff --git a/arch/powerpc/configs/mpc512x_defconfig b/arch/powerpc/configs/mpc512x_defconfig index 62db8a3df162..c02bbb2fddf8 100644 --- a/arch/powerpc/configs/mpc512x_defconfig +++ b/arch/powerpc/configs/mpc512x_defconfig | |||
@@ -61,6 +61,7 @@ CONFIG_BLK_DEV_RAM=y | |||
61 | CONFIG_BLK_DEV_RAM_COUNT=1 | 61 | CONFIG_BLK_DEV_RAM_COUNT=1 |
62 | CONFIG_BLK_DEV_RAM_SIZE=8192 | 62 | CONFIG_BLK_DEV_RAM_SIZE=8192 |
63 | CONFIG_BLK_DEV_XIP=y | 63 | CONFIG_BLK_DEV_XIP=y |
64 | CONFIG_MISC_DEVICES=y | ||
64 | CONFIG_EEPROM_AT24=y | 65 | CONFIG_EEPROM_AT24=y |
65 | CONFIG_SCSI=y | 66 | CONFIG_SCSI=y |
66 | # CONFIG_SCSI_PROC_FS is not set | 67 | # CONFIG_SCSI_PROC_FS is not set |
diff --git a/arch/powerpc/configs/mpc5200_defconfig b/arch/powerpc/configs/mpc5200_defconfig index 7376e27b8ed4..e63f537b854a 100644 --- a/arch/powerpc/configs/mpc5200_defconfig +++ b/arch/powerpc/configs/mpc5200_defconfig | |||
@@ -52,6 +52,7 @@ CONFIG_PROC_DEVICETREE=y | |||
52 | CONFIG_BLK_DEV_LOOP=y | 52 | CONFIG_BLK_DEV_LOOP=y |
53 | CONFIG_BLK_DEV_RAM=y | 53 | CONFIG_BLK_DEV_RAM=y |
54 | CONFIG_BLK_DEV_RAM_SIZE=32768 | 54 | CONFIG_BLK_DEV_RAM_SIZE=32768 |
55 | CONFIG_MISC_DEVICES=y | ||
55 | CONFIG_EEPROM_AT24=y | 56 | CONFIG_EEPROM_AT24=y |
56 | CONFIG_SCSI_TGT=y | 57 | CONFIG_SCSI_TGT=y |
57 | CONFIG_BLK_DEV_SD=y | 58 | CONFIG_BLK_DEV_SD=y |
diff --git a/arch/powerpc/configs/mpc7448_hpc2_defconfig b/arch/powerpc/configs/mpc7448_hpc2_defconfig index 3b9470883de5..75f0bbf0f6e8 100644 --- a/arch/powerpc/configs/mpc7448_hpc2_defconfig +++ b/arch/powerpc/configs/mpc7448_hpc2_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_SYSVIPC=y | |||
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_BLK_DEV_BSG is not set | 8 | # CONFIG_BLK_DEV_BSG is not set |
9 | # CONFIG_PPC_CHRP is not set | 9 | # CONFIG_PPC_CHRP is not set |
10 | # CONFIG_PPC_PMAC is not set | 10 | # CONFIG_PPC_PMAC is not set |
diff --git a/arch/powerpc/configs/mpc8272_ads_defconfig b/arch/powerpc/configs/mpc8272_ads_defconfig index c7d68ff1a736..6a22400f73c1 100644 --- a/arch/powerpc/configs/mpc8272_ads_defconfig +++ b/arch/powerpc/configs/mpc8272_ads_defconfig | |||
@@ -2,7 +2,7 @@ CONFIG_SYSVIPC=y | |||
2 | CONFIG_IKCONFIG=y | 2 | CONFIG_IKCONFIG=y |
3 | CONFIG_IKCONFIG_PROC=y | 3 | CONFIG_IKCONFIG_PROC=y |
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_EMBEDDED=y | 5 | CONFIG_EXPERT=y |
6 | CONFIG_KALLSYMS_ALL=y | 6 | CONFIG_KALLSYMS_ALL=y |
7 | # CONFIG_PPC_CHRP is not set | 7 | # CONFIG_PPC_CHRP is not set |
8 | # CONFIG_PPC_PMAC is not set | 8 | # CONFIG_PPC_PMAC is not set |
diff --git a/arch/powerpc/configs/mpc83xx_defconfig b/arch/powerpc/configs/mpc83xx_defconfig index 5b1b10fd9740..5aac9a8bc53b 100644 --- a/arch/powerpc/configs/mpc83xx_defconfig +++ b/arch/powerpc/configs/mpc83xx_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_SYSVIPC=y | |||
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_BLK_DEV_INITRD=y | 4 | CONFIG_BLK_DEV_INITRD=y |
5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 5 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | CONFIG_SLAB=y | 7 | CONFIG_SLAB=y |
8 | CONFIG_MODULES=y | 8 | CONFIG_MODULES=y |
9 | CONFIG_MODULE_UNLOAD=y | 9 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/mpc85xx_defconfig b/arch/powerpc/configs/mpc85xx_defconfig index c3b113b2ca31..96b89df7752a 100644 --- a/arch/powerpc/configs/mpc85xx_defconfig +++ b/arch/powerpc/configs/mpc85xx_defconfig | |||
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y | |||
10 | CONFIG_LOG_BUF_SHIFT=14 | 10 | CONFIG_LOG_BUF_SHIFT=14 |
11 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
12 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 12 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
13 | CONFIG_EMBEDDED=y | 13 | CONFIG_EXPERT=y |
14 | CONFIG_KALLSYMS_ALL=y | 14 | CONFIG_KALLSYMS_ALL=y |
15 | CONFIG_KALLSYMS_EXTRA_PASS=y | 15 | CONFIG_KALLSYMS_EXTRA_PASS=y |
16 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
@@ -82,6 +82,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
82 | CONFIG_BLK_DEV_NBD=y | 82 | CONFIG_BLK_DEV_NBD=y |
83 | CONFIG_BLK_DEV_RAM=y | 83 | CONFIG_BLK_DEV_RAM=y |
84 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 84 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
85 | CONFIG_MISC_DEVICES=y | ||
85 | CONFIG_EEPROM_LEGACY=y | 86 | CONFIG_EEPROM_LEGACY=y |
86 | CONFIG_BLK_DEV_SD=y | 87 | CONFIG_BLK_DEV_SD=y |
87 | CONFIG_CHR_DEV_ST=y | 88 | CONFIG_CHR_DEV_ST=y |
@@ -124,6 +125,9 @@ CONFIG_I2C_CPM=m | |||
124 | CONFIG_I2C_MPC=y | 125 | CONFIG_I2C_MPC=y |
125 | # CONFIG_HWMON is not set | 126 | # CONFIG_HWMON is not set |
126 | CONFIG_VIDEO_OUTPUT_CONTROL=y | 127 | CONFIG_VIDEO_OUTPUT_CONTROL=y |
128 | CONFIG_FB=y | ||
129 | CONFIG_FB_FSL_DIU=y | ||
130 | # CONFIG_VGA_CONSOLE is not set | ||
127 | CONFIG_SOUND=y | 131 | CONFIG_SOUND=y |
128 | CONFIG_SND=y | 132 | CONFIG_SND=y |
129 | # CONFIG_SND_SUPPORT_OLD_API is not set | 133 | # CONFIG_SND_SUPPORT_OLD_API is not set |
@@ -200,7 +204,6 @@ CONFIG_CRC_T10DIF=y | |||
200 | CONFIG_DEBUG_FS=y | 204 | CONFIG_DEBUG_FS=y |
201 | CONFIG_DEBUG_KERNEL=y | 205 | CONFIG_DEBUG_KERNEL=y |
202 | CONFIG_DETECT_HUNG_TASK=y | 206 | CONFIG_DETECT_HUNG_TASK=y |
203 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
204 | CONFIG_DEBUG_INFO=y | 207 | CONFIG_DEBUG_INFO=y |
205 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 208 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
206 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 209 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
diff --git a/arch/powerpc/configs/mpc85xx_smp_defconfig b/arch/powerpc/configs/mpc85xx_smp_defconfig index a075da2ea3fb..de65841aa04e 100644 --- a/arch/powerpc/configs/mpc85xx_smp_defconfig +++ b/arch/powerpc/configs/mpc85xx_smp_defconfig | |||
@@ -12,7 +12,7 @@ CONFIG_IKCONFIG_PROC=y | |||
12 | CONFIG_LOG_BUF_SHIFT=14 | 12 | CONFIG_LOG_BUF_SHIFT=14 |
13 | CONFIG_BLK_DEV_INITRD=y | 13 | CONFIG_BLK_DEV_INITRD=y |
14 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 14 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
15 | CONFIG_EMBEDDED=y | 15 | CONFIG_EXPERT=y |
16 | CONFIG_KALLSYMS_ALL=y | 16 | CONFIG_KALLSYMS_ALL=y |
17 | CONFIG_KALLSYMS_EXTRA_PASS=y | 17 | CONFIG_KALLSYMS_EXTRA_PASS=y |
18 | CONFIG_MODULES=y | 18 | CONFIG_MODULES=y |
@@ -84,6 +84,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
84 | CONFIG_BLK_DEV_NBD=y | 84 | CONFIG_BLK_DEV_NBD=y |
85 | CONFIG_BLK_DEV_RAM=y | 85 | CONFIG_BLK_DEV_RAM=y |
86 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 86 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
87 | CONFIG_MISC_DEVICES=y | ||
87 | CONFIG_EEPROM_LEGACY=y | 88 | CONFIG_EEPROM_LEGACY=y |
88 | CONFIG_BLK_DEV_SD=y | 89 | CONFIG_BLK_DEV_SD=y |
89 | CONFIG_CHR_DEV_ST=y | 90 | CONFIG_CHR_DEV_ST=y |
@@ -126,6 +127,9 @@ CONFIG_I2C_CPM=m | |||
126 | CONFIG_I2C_MPC=y | 127 | CONFIG_I2C_MPC=y |
127 | # CONFIG_HWMON is not set | 128 | # CONFIG_HWMON is not set |
128 | CONFIG_VIDEO_OUTPUT_CONTROL=y | 129 | CONFIG_VIDEO_OUTPUT_CONTROL=y |
130 | CONFIG_FB=y | ||
131 | CONFIG_FB_FSL_DIU=y | ||
132 | # CONFIG_VGA_CONSOLE is not set | ||
129 | CONFIG_SOUND=y | 133 | CONFIG_SOUND=y |
130 | CONFIG_SND=y | 134 | CONFIG_SND=y |
131 | # CONFIG_SND_SUPPORT_OLD_API is not set | 135 | # CONFIG_SND_SUPPORT_OLD_API is not set |
@@ -202,7 +206,6 @@ CONFIG_CRC_T10DIF=y | |||
202 | CONFIG_DEBUG_FS=y | 206 | CONFIG_DEBUG_FS=y |
203 | CONFIG_DEBUG_KERNEL=y | 207 | CONFIG_DEBUG_KERNEL=y |
204 | CONFIG_DETECT_HUNG_TASK=y | 208 | CONFIG_DETECT_HUNG_TASK=y |
205 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
206 | CONFIG_DEBUG_INFO=y | 209 | CONFIG_DEBUG_INFO=y |
207 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 210 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
208 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 211 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
diff --git a/arch/powerpc/configs/mpc866_ads_defconfig b/arch/powerpc/configs/mpc866_ads_defconfig index 668215cae890..5c258823e694 100644 --- a/arch/powerpc/configs/mpc866_ads_defconfig +++ b/arch/powerpc/configs/mpc866_ads_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y | |||
4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_SYSCTL_SYSCALL is not set | 8 | # CONFIG_SYSCTL_SYSCALL is not set |
9 | # CONFIG_HOTPLUG is not set | 9 | # CONFIG_HOTPLUG is not set |
10 | # CONFIG_BUG is not set | 10 | # CONFIG_BUG is not set |
diff --git a/arch/powerpc/configs/mpc86xx_defconfig b/arch/powerpc/configs/mpc86xx_defconfig index 63b90d477889..a1cc8179e9fd 100644 --- a/arch/powerpc/configs/mpc86xx_defconfig +++ b/arch/powerpc/configs/mpc86xx_defconfig | |||
@@ -10,7 +10,7 @@ CONFIG_IKCONFIG_PROC=y | |||
10 | CONFIG_LOG_BUF_SHIFT=14 | 10 | CONFIG_LOG_BUF_SHIFT=14 |
11 | CONFIG_BLK_DEV_INITRD=y | 11 | CONFIG_BLK_DEV_INITRD=y |
12 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 12 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
13 | CONFIG_EMBEDDED=y | 13 | CONFIG_EXPERT=y |
14 | CONFIG_KALLSYMS_ALL=y | 14 | CONFIG_KALLSYMS_ALL=y |
15 | CONFIG_KALLSYMS_EXTRA_PASS=y | 15 | CONFIG_KALLSYMS_EXTRA_PASS=y |
16 | CONFIG_MODULES=y | 16 | CONFIG_MODULES=y |
@@ -66,6 +66,7 @@ CONFIG_BLK_DEV_LOOP=y | |||
66 | CONFIG_BLK_DEV_NBD=y | 66 | CONFIG_BLK_DEV_NBD=y |
67 | CONFIG_BLK_DEV_RAM=y | 67 | CONFIG_BLK_DEV_RAM=y |
68 | CONFIG_BLK_DEV_RAM_SIZE=131072 | 68 | CONFIG_BLK_DEV_RAM_SIZE=131072 |
69 | CONFIG_MISC_DEVICES=y | ||
69 | CONFIG_EEPROM_LEGACY=y | 70 | CONFIG_EEPROM_LEGACY=y |
70 | CONFIG_BLK_DEV_SD=y | 71 | CONFIG_BLK_DEV_SD=y |
71 | CONFIG_CHR_DEV_ST=y | 72 | CONFIG_CHR_DEV_ST=y |
@@ -170,7 +171,6 @@ CONFIG_MAC_PARTITION=y | |||
170 | CONFIG_CRC_T10DIF=y | 171 | CONFIG_CRC_T10DIF=y |
171 | CONFIG_DEBUG_KERNEL=y | 172 | CONFIG_DEBUG_KERNEL=y |
172 | CONFIG_DETECT_HUNG_TASK=y | 173 | CONFIG_DETECT_HUNG_TASK=y |
173 | # CONFIG_DEBUG_BUGVERBOSE is not set | ||
174 | CONFIG_DEBUG_INFO=y | 174 | CONFIG_DEBUG_INFO=y |
175 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 175 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
176 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 176 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
diff --git a/arch/powerpc/configs/mpc885_ads_defconfig b/arch/powerpc/configs/mpc885_ads_defconfig index f9b83481b00e..9e146cdf63de 100644 --- a/arch/powerpc/configs/mpc885_ads_defconfig +++ b/arch/powerpc/configs/mpc885_ads_defconfig | |||
@@ -4,7 +4,7 @@ CONFIG_EXPERIMENTAL=y | |||
4 | CONFIG_SYSVIPC=y | 4 | CONFIG_SYSVIPC=y |
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 6 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
7 | CONFIG_EMBEDDED=y | 7 | CONFIG_EXPERT=y |
8 | # CONFIG_SYSCTL_SYSCALL is not set | 8 | # CONFIG_SYSCTL_SYSCALL is not set |
9 | # CONFIG_ELF_CORE is not set | 9 | # CONFIG_ELF_CORE is not set |
10 | # CONFIG_BASE_FULL is not set | 10 | # CONFIG_BASE_FULL is not set |
diff --git a/arch/powerpc/configs/pasemi_defconfig b/arch/powerpc/configs/pasemi_defconfig index edd2d54c8196..f4deb0b78cf0 100644 --- a/arch/powerpc/configs/pasemi_defconfig +++ b/arch/powerpc/configs/pasemi_defconfig | |||
@@ -59,6 +59,7 @@ CONFIG_PROC_DEVICETREE=y | |||
59 | CONFIG_BLK_DEV_LOOP=y | 59 | CONFIG_BLK_DEV_LOOP=y |
60 | CONFIG_BLK_DEV_RAM=y | 60 | CONFIG_BLK_DEV_RAM=y |
61 | CONFIG_BLK_DEV_RAM_SIZE=16384 | 61 | CONFIG_BLK_DEV_RAM_SIZE=16384 |
62 | CONFIG_MISC_DEVICES=y | ||
62 | CONFIG_EEPROM_LEGACY=y | 63 | CONFIG_EEPROM_LEGACY=y |
63 | CONFIG_IDE=y | 64 | CONFIG_IDE=y |
64 | CONFIG_BLK_DEV_IDECD=y | 65 | CONFIG_BLK_DEV_IDECD=y |
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig index ac4fc41035f6..f8b394a76ac3 100644 --- a/arch/powerpc/configs/pmac32_defconfig +++ b/arch/powerpc/configs/pmac32_defconfig | |||
@@ -112,8 +112,8 @@ CONFIG_IRDA_CACHE_LAST_LSAP=y | |||
112 | CONFIG_IRDA_FAST_RR=y | 112 | CONFIG_IRDA_FAST_RR=y |
113 | CONFIG_IRTTY_SIR=m | 113 | CONFIG_IRTTY_SIR=m |
114 | CONFIG_BT=m | 114 | CONFIG_BT=m |
115 | CONFIG_BT_L2CAP=m | 115 | CONFIG_BT_L2CAP=y |
116 | CONFIG_BT_SCO=m | 116 | CONFIG_BT_SCO=y |
117 | CONFIG_BT_RFCOMM=m | 117 | CONFIG_BT_RFCOMM=m |
118 | CONFIG_BT_RFCOMM_TTY=y | 118 | CONFIG_BT_RFCOMM_TTY=y |
119 | CONFIG_BT_BNEP=m | 119 | CONFIG_BT_BNEP=m |
diff --git a/arch/powerpc/configs/ppc40x_defconfig b/arch/powerpc/configs/ppc40x_defconfig index 93d7425ce6cd..bfd634b5ada7 100644 --- a/arch/powerpc/configs/ppc40x_defconfig +++ b/arch/powerpc/configs/ppc40x_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
diff --git a/arch/powerpc/configs/ppc44x_defconfig b/arch/powerpc/configs/ppc44x_defconfig index cd446fba3fae..47133202a625 100644 --- a/arch/powerpc/configs/ppc44x_defconfig +++ b/arch/powerpc/configs/ppc44x_defconfig | |||
@@ -5,13 +5,14 @@ CONFIG_POSIX_MQUEUE=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_BLK_DEV_INITRD=y | 6 | CONFIG_BLK_DEV_INITRD=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | CONFIG_KALLSYMS_ALL=y | 9 | CONFIG_KALLSYMS_ALL=y |
10 | CONFIG_KALLSYMS_EXTRA_PASS=y | 10 | CONFIG_KALLSYMS_EXTRA_PASS=y |
11 | CONFIG_MODULES=y | 11 | CONFIG_MODULES=y |
12 | CONFIG_MODULE_UNLOAD=y | 12 | CONFIG_MODULE_UNLOAD=y |
13 | # CONFIG_BLK_DEV_BSG is not set | 13 | # CONFIG_BLK_DEV_BSG is not set |
14 | CONFIG_BAMBOO=y | 14 | CONFIG_BAMBOO=y |
15 | CONFIG_BLUESTONE=y | ||
15 | CONFIG_SAM440EP=y | 16 | CONFIG_SAM440EP=y |
16 | CONFIG_SEQUOIA=y | 17 | CONFIG_SEQUOIA=y |
17 | CONFIG_TAISHAN=y | 18 | CONFIG_TAISHAN=y |
@@ -97,14 +98,17 @@ CONFIG_USB_STORAGE=m | |||
97 | CONFIG_EXT2_FS=y | 98 | CONFIG_EXT2_FS=y |
98 | CONFIG_EXT3_FS=m | 99 | CONFIG_EXT3_FS=m |
99 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set | 100 | # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set |
100 | CONFIG_INOTIFY=y | ||
101 | CONFIG_VFAT_FS=m | 101 | CONFIG_VFAT_FS=m |
102 | CONFIG_PROC_KCORE=y | 102 | CONFIG_PROC_KCORE=y |
103 | CONFIG_TMPFS=y | 103 | CONFIG_TMPFS=y |
104 | CONFIG_JFFS2_FS=y | 104 | CONFIG_JFFS2_FS=y |
105 | CONFIG_UBIFS_FS=m | 105 | CONFIG_UBIFS_FS=m |
106 | CONFIG_UBIFS_FS_XATTR=y | 106 | CONFIG_UBIFS_FS_XATTR=y |
107 | CONFIG_LOGFS=m | ||
107 | CONFIG_CRAMFS=y | 108 | CONFIG_CRAMFS=y |
109 | CONFIG_SQUASHFS=m | ||
110 | CONFIG_SQUASHFS_XATTR=y | ||
111 | CONFIG_SQUASHFS_LZO=y | ||
108 | CONFIG_NFS_FS=y | 112 | CONFIG_NFS_FS=y |
109 | CONFIG_NFS_V3=y | 113 | CONFIG_NFS_V3=y |
110 | CONFIG_ROOT_NFS=y | 114 | CONFIG_ROOT_NFS=y |
@@ -116,11 +120,8 @@ CONFIG_DEBUG_KERNEL=y | |||
116 | CONFIG_DETECT_HUNG_TASK=y | 120 | CONFIG_DETECT_HUNG_TASK=y |
117 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 121 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
118 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 122 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
119 | CONFIG_CRYPTO_CBC=y | ||
120 | CONFIG_CRYPTO_ECB=y | 123 | CONFIG_CRYPTO_ECB=y |
121 | CONFIG_CRYPTO_PCBC=y | 124 | CONFIG_CRYPTO_PCBC=y |
122 | CONFIG_CRYPTO_MD5=y | ||
123 | CONFIG_CRYPTO_DES=y | ||
124 | # CONFIG_CRYPTO_ANSI_CPRNG is not set | 125 | # CONFIG_CRYPTO_ANSI_CPRNG is not set |
125 | # CONFIG_CRYPTO_HW is not set | 126 | # CONFIG_CRYPTO_HW is not set |
126 | CONFIG_VIRTUALIZATION=y | 127 | CONFIG_VIRTUALIZATION=y |
diff --git a/arch/powerpc/configs/ppc64e_defconfig b/arch/powerpc/configs/ppc64e_defconfig index 04ae0740b6d0..7bd1763877ba 100644 --- a/arch/powerpc/configs/ppc64e_defconfig +++ b/arch/powerpc/configs/ppc64e_defconfig | |||
@@ -18,6 +18,7 @@ CONFIG_MODULES=y | |||
18 | CONFIG_MODULE_UNLOAD=y | 18 | CONFIG_MODULE_UNLOAD=y |
19 | CONFIG_MODVERSIONS=y | 19 | CONFIG_MODVERSIONS=y |
20 | CONFIG_MODULE_SRCVERSION_ALL=y | 20 | CONFIG_MODULE_SRCVERSION_ALL=y |
21 | CONFIG_P5020_DS=y | ||
21 | CONFIG_CPU_FREQ=y | 22 | CONFIG_CPU_FREQ=y |
22 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y | 23 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y |
23 | CONFIG_CPU_FREQ_GOV_USERSPACE=y | 24 | CONFIG_CPU_FREQ_GOV_USERSPACE=y |
@@ -256,7 +257,6 @@ CONFIG_HID_ZEROPLUS=y | |||
256 | CONFIG_USB=y | 257 | CONFIG_USB=y |
257 | CONFIG_USB_DEVICEFS=y | 258 | CONFIG_USB_DEVICEFS=y |
258 | CONFIG_USB_EHCI_HCD=y | 259 | CONFIG_USB_EHCI_HCD=y |
259 | CONFIG_USB_EHCI_TT_NEWSCHED=y | ||
260 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set | 260 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set |
261 | CONFIG_USB_OHCI_HCD=y | 261 | CONFIG_USB_OHCI_HCD=y |
262 | CONFIG_USB_STORAGE=m | 262 | CONFIG_USB_STORAGE=m |
@@ -290,7 +290,6 @@ CONFIG_JFS_POSIX_ACL=y | |||
290 | CONFIG_JFS_SECURITY=y | 290 | CONFIG_JFS_SECURITY=y |
291 | CONFIG_XFS_FS=m | 291 | CONFIG_XFS_FS=m |
292 | CONFIG_XFS_POSIX_ACL=y | 292 | CONFIG_XFS_POSIX_ACL=y |
293 | CONFIG_INOTIFY=y | ||
294 | CONFIG_AUTOFS4_FS=m | 293 | CONFIG_AUTOFS4_FS=m |
295 | CONFIG_ISO9660_FS=y | 294 | CONFIG_ISO9660_FS=y |
296 | CONFIG_UDF_FS=m | 295 | CONFIG_UDF_FS=m |
@@ -384,7 +383,6 @@ CONFIG_CRYPTO_TGR192=m | |||
384 | CONFIG_CRYPTO_WP512=m | 383 | CONFIG_CRYPTO_WP512=m |
385 | CONFIG_CRYPTO_AES=m | 384 | CONFIG_CRYPTO_AES=m |
386 | CONFIG_CRYPTO_ANUBIS=m | 385 | CONFIG_CRYPTO_ANUBIS=m |
387 | CONFIG_CRYPTO_ARC4=m | ||
388 | CONFIG_CRYPTO_BLOWFISH=m | 386 | CONFIG_CRYPTO_BLOWFISH=m |
389 | CONFIG_CRYPTO_CAST6=m | 387 | CONFIG_CRYPTO_CAST6=m |
390 | CONFIG_CRYPTO_KHAZAD=m | 388 | CONFIG_CRYPTO_KHAZAD=m |
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 9d64a6822d86..04360f9b0109 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig | |||
@@ -10,7 +10,6 @@ CONFIG_TASK_XACCT=y | |||
10 | CONFIG_TASK_IO_ACCOUNTING=y | 10 | CONFIG_TASK_IO_ACCOUNTING=y |
11 | CONFIG_AUDIT=y | 11 | CONFIG_AUDIT=y |
12 | CONFIG_CGROUPS=y | 12 | CONFIG_CGROUPS=y |
13 | CONFIG_CGROUP_NS=y | ||
14 | CONFIG_CGROUP_DEVICE=y | 13 | CONFIG_CGROUP_DEVICE=y |
15 | CONFIG_CGROUP_CPUACCT=y | 14 | CONFIG_CGROUP_CPUACCT=y |
16 | CONFIG_RESOURCE_COUNTERS=y | 15 | CONFIG_RESOURCE_COUNTERS=y |
@@ -351,8 +350,8 @@ CONFIG_VLSI_FIR=m | |||
351 | CONFIG_VIA_FIR=m | 350 | CONFIG_VIA_FIR=m |
352 | CONFIG_MCS_FIR=m | 351 | CONFIG_MCS_FIR=m |
353 | CONFIG_BT=m | 352 | CONFIG_BT=m |
354 | CONFIG_BT_L2CAP=m | 353 | CONFIG_BT_L2CAP=y |
355 | CONFIG_BT_SCO=m | 354 | CONFIG_BT_SCO=y |
356 | CONFIG_BT_RFCOMM=m | 355 | CONFIG_BT_RFCOMM=m |
357 | CONFIG_BT_RFCOMM_TTY=y | 356 | CONFIG_BT_RFCOMM_TTY=y |
358 | CONFIG_BT_BNEP=m | 357 | CONFIG_BT_BNEP=m |
@@ -398,6 +397,7 @@ CONFIG_BLK_DEV_RAM_SIZE=16384 | |||
398 | CONFIG_CDROM_PKTCDVD=m | 397 | CONFIG_CDROM_PKTCDVD=m |
399 | CONFIG_VIRTIO_BLK=m | 398 | CONFIG_VIRTIO_BLK=m |
400 | CONFIG_BLK_DEV_HD=y | 399 | CONFIG_BLK_DEV_HD=y |
400 | CONFIG_MISC_DEVICES=y | ||
401 | CONFIG_ENCLOSURE_SERVICES=m | 401 | CONFIG_ENCLOSURE_SERVICES=m |
402 | CONFIG_SENSORS_TSL2550=m | 402 | CONFIG_SENSORS_TSL2550=m |
403 | CONFIG_EEPROM_AT24=m | 403 | CONFIG_EEPROM_AT24=m |
diff --git a/arch/powerpc/configs/pq2fads_defconfig b/arch/powerpc/configs/pq2fads_defconfig index a4353bef31c5..baad8db21b61 100644 --- a/arch/powerpc/configs/pq2fads_defconfig +++ b/arch/powerpc/configs/pq2fads_defconfig | |||
@@ -3,7 +3,7 @@ CONFIG_IKCONFIG=y | |||
3 | CONFIG_IKCONFIG_PROC=y | 3 | CONFIG_IKCONFIG_PROC=y |
4 | CONFIG_LOG_BUF_SHIFT=14 | 4 | CONFIG_LOG_BUF_SHIFT=14 |
5 | CONFIG_BLK_DEV_INITRD=y | 5 | CONFIG_BLK_DEV_INITRD=y |
6 | CONFIG_EMBEDDED=y | 6 | CONFIG_EXPERT=y |
7 | CONFIG_KALLSYMS_ALL=y | 7 | CONFIG_KALLSYMS_ALL=y |
8 | # CONFIG_PPC_CHRP is not set | 8 | # CONFIG_PPC_CHRP is not set |
9 | # CONFIG_PPC_PMAC is not set | 9 | # CONFIG_PPC_PMAC is not set |
diff --git a/arch/powerpc/configs/ps3_defconfig b/arch/powerpc/configs/ps3_defconfig index 49cffe003657..185c292b0f1c 100644 --- a/arch/powerpc/configs/ps3_defconfig +++ b/arch/powerpc/configs/ps3_defconfig | |||
@@ -8,7 +8,7 @@ CONFIG_SYSVIPC=y | |||
8 | CONFIG_POSIX_MQUEUE=y | 8 | CONFIG_POSIX_MQUEUE=y |
9 | CONFIG_NAMESPACES=y | 9 | CONFIG_NAMESPACES=y |
10 | CONFIG_BLK_DEV_INITRD=y | 10 | CONFIG_BLK_DEV_INITRD=y |
11 | CONFIG_EMBEDDED=y | 11 | CONFIG_EXPERT=y |
12 | CONFIG_KALLSYMS_EXTRA_PASS=y | 12 | CONFIG_KALLSYMS_EXTRA_PASS=y |
13 | # CONFIG_PERF_EVENTS is not set | 13 | # CONFIG_PERF_EVENTS is not set |
14 | # CONFIG_COMPAT_BRK is not set | 14 | # CONFIG_COMPAT_BRK is not set |
@@ -52,8 +52,8 @@ CONFIG_IP_PNP_DHCP=y | |||
52 | # CONFIG_INET_DIAG is not set | 52 | # CONFIG_INET_DIAG is not set |
53 | CONFIG_IPV6=y | 53 | CONFIG_IPV6=y |
54 | CONFIG_BT=m | 54 | CONFIG_BT=m |
55 | CONFIG_BT_L2CAP=m | 55 | CONFIG_BT_L2CAP=y |
56 | CONFIG_BT_SCO=m | 56 | CONFIG_BT_SCO=y |
57 | CONFIG_BT_RFCOMM=m | 57 | CONFIG_BT_RFCOMM=m |
58 | CONFIG_BT_RFCOMM_TTY=y | 58 | CONFIG_BT_RFCOMM_TTY=y |
59 | CONFIG_BT_BNEP=m | 59 | CONFIG_BT_BNEP=m |
@@ -141,7 +141,7 @@ CONFIG_USB_EHCI_TT_NEWSCHED=y | |||
141 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set | 141 | # CONFIG_USB_EHCI_HCD_PPC_OF is not set |
142 | CONFIG_USB_OHCI_HCD=m | 142 | CONFIG_USB_OHCI_HCD=m |
143 | CONFIG_USB_STORAGE=m | 143 | CONFIG_USB_STORAGE=m |
144 | CONFIG_RTC_CLASS=m | 144 | CONFIG_RTC_CLASS=y |
145 | CONFIG_RTC_DRV_PS3=m | 145 | CONFIG_RTC_DRV_PS3=m |
146 | CONFIG_EXT2_FS=m | 146 | CONFIG_EXT2_FS=m |
147 | CONFIG_EXT3_FS=m | 147 | CONFIG_EXT3_FS=m |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index f87f0e15cfa7..80bc5de7ee1d 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -2,7 +2,7 @@ CONFIG_PPC64=y | |||
2 | CONFIG_ALTIVEC=y | 2 | CONFIG_ALTIVEC=y |
3 | CONFIG_VSX=y | 3 | CONFIG_VSX=y |
4 | CONFIG_SMP=y | 4 | CONFIG_SMP=y |
5 | CONFIG_NR_CPUS=128 | 5 | CONFIG_NR_CPUS=1024 |
6 | CONFIG_EXPERIMENTAL=y | 6 | CONFIG_EXPERIMENTAL=y |
7 | CONFIG_SYSVIPC=y | 7 | CONFIG_SYSVIPC=y |
8 | CONFIG_POSIX_MQUEUE=y | 8 | CONFIG_POSIX_MQUEUE=y |
@@ -15,7 +15,6 @@ CONFIG_AUDITSYSCALL=y | |||
15 | CONFIG_IKCONFIG=y | 15 | CONFIG_IKCONFIG=y |
16 | CONFIG_IKCONFIG_PROC=y | 16 | CONFIG_IKCONFIG_PROC=y |
17 | CONFIG_CGROUPS=y | 17 | CONFIG_CGROUPS=y |
18 | CONFIG_CGROUP_NS=y | ||
19 | CONFIG_CGROUP_FREEZER=y | 18 | CONFIG_CGROUP_FREEZER=y |
20 | CONFIG_CGROUP_DEVICE=y | 19 | CONFIG_CGROUP_DEVICE=y |
21 | CONFIG_CPUSETS=y | 20 | CONFIG_CPUSETS=y |
@@ -45,6 +44,8 @@ CONFIG_KEXEC=y | |||
45 | CONFIG_IRQ_ALL_CPUS=y | 44 | CONFIG_IRQ_ALL_CPUS=y |
46 | CONFIG_MEMORY_HOTPLUG=y | 45 | CONFIG_MEMORY_HOTPLUG=y |
47 | CONFIG_MEMORY_HOTREMOVE=y | 46 | CONFIG_MEMORY_HOTREMOVE=y |
47 | CONFIG_PPC_64K_PAGES=y | ||
48 | CONFIG_PPC_SUBPAGE_PROT=y | ||
48 | CONFIG_SCHED_SMT=y | 49 | CONFIG_SCHED_SMT=y |
49 | CONFIG_HOTPLUG_PCI=m | 50 | CONFIG_HOTPLUG_PCI=m |
50 | CONFIG_HOTPLUG_PCI_RPA=m | 51 | CONFIG_HOTPLUG_PCI_RPA=m |
@@ -144,12 +145,17 @@ CONFIG_SCSI_MULTI_LUN=y | |||
144 | CONFIG_SCSI_CONSTANTS=y | 145 | CONFIG_SCSI_CONSTANTS=y |
145 | CONFIG_SCSI_FC_ATTRS=y | 146 | CONFIG_SCSI_FC_ATTRS=y |
146 | CONFIG_SCSI_SAS_ATTRS=m | 147 | CONFIG_SCSI_SAS_ATTRS=m |
148 | CONFIG_SCSI_CXGB3_ISCSI=m | ||
149 | CONFIG_SCSI_CXGB4_ISCSI=m | ||
150 | CONFIG_SCSI_BNX2_ISCSI=m | ||
151 | CONFIG_BE2ISCSI=m | ||
147 | CONFIG_SCSI_IBMVSCSI=y | 152 | CONFIG_SCSI_IBMVSCSI=y |
148 | CONFIG_SCSI_IBMVFC=m | 153 | CONFIG_SCSI_IBMVFC=m |
149 | CONFIG_SCSI_SYM53C8XX_2=y | 154 | CONFIG_SCSI_SYM53C8XX_2=y |
150 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 | 155 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 |
151 | CONFIG_SCSI_IPR=y | 156 | CONFIG_SCSI_IPR=y |
152 | CONFIG_SCSI_QLA_FC=m | 157 | CONFIG_SCSI_QLA_FC=m |
158 | CONFIG_SCSI_QLA_ISCSI=m | ||
153 | CONFIG_SCSI_LPFC=m | 159 | CONFIG_SCSI_LPFC=m |
154 | CONFIG_ATA=y | 160 | CONFIG_ATA=y |
155 | # CONFIG_ATA_SFF is not set | 161 | # CONFIG_ATA_SFF is not set |
@@ -184,8 +190,10 @@ CONFIG_ACENIC_OMIT_TIGON_I=y | |||
184 | CONFIG_E1000=y | 190 | CONFIG_E1000=y |
185 | CONFIG_E1000E=y | 191 | CONFIG_E1000E=y |
186 | CONFIG_TIGON3=y | 192 | CONFIG_TIGON3=y |
193 | CONFIG_BNX2=m | ||
187 | CONFIG_CHELSIO_T1=m | 194 | CONFIG_CHELSIO_T1=m |
188 | CONFIG_CHELSIO_T3=m | 195 | CONFIG_CHELSIO_T3=m |
196 | CONFIG_CHELSIO_T4=m | ||
189 | CONFIG_EHEA=y | 197 | CONFIG_EHEA=y |
190 | CONFIG_IXGBE=m | 198 | CONFIG_IXGBE=m |
191 | CONFIG_IXGB=m | 199 | CONFIG_IXGB=m |
@@ -193,6 +201,8 @@ CONFIG_S2IO=m | |||
193 | CONFIG_MYRI10GE=m | 201 | CONFIG_MYRI10GE=m |
194 | CONFIG_NETXEN_NIC=m | 202 | CONFIG_NETXEN_NIC=m |
195 | CONFIG_MLX4_EN=m | 203 | CONFIG_MLX4_EN=m |
204 | CONFIG_QLGE=m | ||
205 | CONFIG_BE2NET=m | ||
196 | CONFIG_PPP=m | 206 | CONFIG_PPP=m |
197 | CONFIG_PPP_ASYNC=m | 207 | CONFIG_PPP_ASYNC=m |
198 | CONFIG_PPP_SYNC_TTY=m | 208 | CONFIG_PPP_SYNC_TTY=m |
@@ -252,6 +262,8 @@ CONFIG_INFINIBAND_USER_MAD=m | |||
252 | CONFIG_INFINIBAND_USER_ACCESS=m | 262 | CONFIG_INFINIBAND_USER_ACCESS=m |
253 | CONFIG_INFINIBAND_MTHCA=m | 263 | CONFIG_INFINIBAND_MTHCA=m |
254 | CONFIG_INFINIBAND_EHCA=m | 264 | CONFIG_INFINIBAND_EHCA=m |
265 | CONFIG_INFINIBAND_CXGB3=m | ||
266 | CONFIG_INFINIBAND_CXGB4=m | ||
255 | CONFIG_MLX4_INFINIBAND=m | 267 | CONFIG_MLX4_INFINIBAND=m |
256 | CONFIG_INFINIBAND_IPOIB=m | 268 | CONFIG_INFINIBAND_IPOIB=m |
257 | CONFIG_INFINIBAND_IPOIB_CM=y | 269 | CONFIG_INFINIBAND_IPOIB_CM=y |
@@ -311,9 +323,7 @@ CONFIG_DEBUG_KERNEL=y | |||
311 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set | 323 | # CONFIG_RCU_CPU_STALL_DETECTOR is not set |
312 | CONFIG_LATENCYTOP=y | 324 | CONFIG_LATENCYTOP=y |
313 | CONFIG_SYSCTL_SYSCALL_CHECK=y | 325 | CONFIG_SYSCTL_SYSCALL_CHECK=y |
314 | CONFIG_IRQSOFF_TRACER=y | ||
315 | CONFIG_SCHED_TRACER=y | 326 | CONFIG_SCHED_TRACER=y |
316 | CONFIG_STACK_TRACER=y | ||
317 | CONFIG_BLK_DEV_IO_TRACE=y | 327 | CONFIG_BLK_DEV_IO_TRACE=y |
318 | CONFIG_DEBUG_STACKOVERFLOW=y | 328 | CONFIG_DEBUG_STACKOVERFLOW=y |
319 | CONFIG_DEBUG_STACK_USAGE=y | 329 | CONFIG_DEBUG_STACK_USAGE=y |
diff --git a/arch/powerpc/configs/storcenter_defconfig b/arch/powerpc/configs/storcenter_defconfig index 4f0c10a62b9d..ebb2a66c99d3 100644 --- a/arch/powerpc/configs/storcenter_defconfig +++ b/arch/powerpc/configs/storcenter_defconfig | |||
@@ -1,7 +1,7 @@ | |||
1 | CONFIG_EXPERIMENTAL=y | 1 | CONFIG_EXPERIMENTAL=y |
2 | CONFIG_SYSVIPC=y | 2 | CONFIG_SYSVIPC=y |
3 | CONFIG_LOG_BUF_SHIFT=14 | 3 | CONFIG_LOG_BUF_SHIFT=14 |
4 | CONFIG_EMBEDDED=y | 4 | CONFIG_EXPERT=y |
5 | # CONFIG_KALLSYMS is not set | 5 | # CONFIG_KALLSYMS is not set |
6 | CONFIG_MODULES=y | 6 | CONFIG_MODULES=y |
7 | CONFIG_MODULE_UNLOAD=y | 7 | CONFIG_MODULE_UNLOAD=y |
diff --git a/arch/powerpc/configs/tqm8xx_defconfig b/arch/powerpc/configs/tqm8xx_defconfig index d0a5b6763880..8616fde0896f 100644 --- a/arch/powerpc/configs/tqm8xx_defconfig +++ b/arch/powerpc/configs/tqm8xx_defconfig | |||
@@ -5,7 +5,7 @@ CONFIG_SYSVIPC=y | |||
5 | CONFIG_LOG_BUF_SHIFT=14 | 5 | CONFIG_LOG_BUF_SHIFT=14 |
6 | CONFIG_SYSFS_DEPRECATED_V2=y | 6 | CONFIG_SYSFS_DEPRECATED_V2=y |
7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 7 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
8 | CONFIG_EMBEDDED=y | 8 | CONFIG_EXPERT=y |
9 | # CONFIG_SYSCTL_SYSCALL is not set | 9 | # CONFIG_SYSCTL_SYSCALL is not set |
10 | # CONFIG_ELF_CORE is not set | 10 | # CONFIG_ELF_CORE is not set |
11 | # CONFIG_BASE_FULL is not set | 11 | # CONFIG_BASE_FULL is not set |
diff --git a/arch/powerpc/configs/wii_defconfig b/arch/powerpc/configs/wii_defconfig index bb8ba75b7c68..175295fbf4f3 100644 --- a/arch/powerpc/configs/wii_defconfig +++ b/arch/powerpc/configs/wii_defconfig | |||
@@ -7,7 +7,7 @@ CONFIG_IKCONFIG_PROC=y | |||
7 | CONFIG_LOG_BUF_SHIFT=14 | 7 | CONFIG_LOG_BUF_SHIFT=14 |
8 | CONFIG_BLK_DEV_INITRD=y | 8 | CONFIG_BLK_DEV_INITRD=y |
9 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set | 9 | # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set |
10 | CONFIG_EMBEDDED=y | 10 | CONFIG_EXPERT=y |
11 | # CONFIG_ELF_CORE is not set | 11 | # CONFIG_ELF_CORE is not set |
12 | CONFIG_PERF_COUNTERS=y | 12 | CONFIG_PERF_COUNTERS=y |
13 | # CONFIG_VM_EVENT_COUNTERS is not set | 13 | # CONFIG_VM_EVENT_COUNTERS is not set |
diff --git a/arch/powerpc/include/asm/8xx_immap.h b/arch/powerpc/include/asm/8xx_immap.h index 4b0e15206006..bdf0563ba423 100644 --- a/arch/powerpc/include/asm/8xx_immap.h +++ b/arch/powerpc/include/asm/8xx_immap.h | |||
@@ -93,7 +93,7 @@ typedef struct mem_ctlr { | |||
93 | } memctl8xx_t; | 93 | } memctl8xx_t; |
94 | 94 | ||
95 | /*----------------------------------------------------------------------- | 95 | /*----------------------------------------------------------------------- |
96 | * BR - Memory Controler: Base Register 16-9 | 96 | * BR - Memory Controller: Base Register 16-9 |
97 | */ | 97 | */ |
98 | #define BR_BA_MSK 0xffff8000 /* Base Address Mask */ | 98 | #define BR_BA_MSK 0xffff8000 /* Base Address Mask */ |
99 | #define BR_AT_MSK 0x00007000 /* Address Type Mask */ | 99 | #define BR_AT_MSK 0x00007000 /* Address Type Mask */ |
@@ -110,7 +110,7 @@ typedef struct mem_ctlr { | |||
110 | #define BR_V 0x00000001 /* Bank Valid */ | 110 | #define BR_V 0x00000001 /* Bank Valid */ |
111 | 111 | ||
112 | /*----------------------------------------------------------------------- | 112 | /*----------------------------------------------------------------------- |
113 | * OR - Memory Controler: Option Register 16-11 | 113 | * OR - Memory Controller: Option Register 16-11 |
114 | */ | 114 | */ |
115 | #define OR_AM_MSK 0xffff8000 /* Address Mask Mask */ | 115 | #define OR_AM_MSK 0xffff8000 /* Address Mask Mask */ |
116 | #define OR_ATM_MSK 0x00007000 /* Address Type Mask Mask */ | 116 | #define OR_ATM_MSK 0x00007000 /* Address Type Mask Mask */ |
@@ -393,8 +393,8 @@ typedef struct fec { | |||
393 | uint fec_addr_low; /* lower 32 bits of station address */ | 393 | uint fec_addr_low; /* lower 32 bits of station address */ |
394 | ushort fec_addr_high; /* upper 16 bits of station address */ | 394 | ushort fec_addr_high; /* upper 16 bits of station address */ |
395 | ushort res1; /* reserved */ | 395 | ushort res1; /* reserved */ |
396 | uint fec_hash_table_high; /* upper 32-bits of hash table */ | 396 | uint fec_grp_hash_table_high; /* upper 32-bits of hash table */ |
397 | uint fec_hash_table_low; /* lower 32-bits of hash table */ | 397 | uint fec_grp_hash_table_low; /* lower 32-bits of hash table */ |
398 | uint fec_r_des_start; /* beginning of Rx descriptor ring */ | 398 | uint fec_r_des_start; /* beginning of Rx descriptor ring */ |
399 | uint fec_x_des_start; /* beginning of Tx descriptor ring */ | 399 | uint fec_x_des_start; /* beginning of Tx descriptor ring */ |
400 | uint fec_r_buff_size; /* Rx buffer size */ | 400 | uint fec_r_buff_size; /* Rx buffer size */ |
diff --git a/arch/powerpc/include/asm/bitops.h b/arch/powerpc/include/asm/bitops.h index 30964ae2d096..f18c6d9b9510 100644 --- a/arch/powerpc/include/asm/bitops.h +++ b/arch/powerpc/include/asm/bitops.h | |||
@@ -209,8 +209,8 @@ static __inline__ unsigned long ffz(unsigned long x) | |||
209 | return BITS_PER_LONG; | 209 | return BITS_PER_LONG; |
210 | 210 | ||
211 | /* | 211 | /* |
212 | * Calculate the bit position of the least signficant '1' bit in x | 212 | * Calculate the bit position of the least significant '1' bit in x |
213 | * (since x has been changed this will actually be the least signficant | 213 | * (since x has been changed this will actually be the least significant |
214 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that | 214 | * '0' bit in * the original x). Note: (x & -x) gives us a mask that |
215 | * is the least significant * (RIGHT-most) 1-bit of the value in x. | 215 | * is the least significant * (RIGHT-most) 1-bit of the value in x. |
216 | */ | 216 | */ |
@@ -267,73 +267,70 @@ static __inline__ int fls64(__u64 x) | |||
267 | #include <asm-generic/bitops/fls64.h> | 267 | #include <asm-generic/bitops/fls64.h> |
268 | #endif /* __powerpc64__ */ | 268 | #endif /* __powerpc64__ */ |
269 | 269 | ||
270 | #ifdef CONFIG_PPC64 | ||
271 | unsigned int __arch_hweight8(unsigned int w); | ||
272 | unsigned int __arch_hweight16(unsigned int w); | ||
273 | unsigned int __arch_hweight32(unsigned int w); | ||
274 | unsigned long __arch_hweight64(__u64 w); | ||
275 | #include <asm-generic/bitops/const_hweight.h> | ||
276 | #else | ||
270 | #include <asm-generic/bitops/hweight.h> | 277 | #include <asm-generic/bitops/hweight.h> |
278 | #endif | ||
279 | |||
271 | #include <asm-generic/bitops/find.h> | 280 | #include <asm-generic/bitops/find.h> |
272 | 281 | ||
273 | /* Little-endian versions */ | 282 | /* Little-endian versions */ |
274 | 283 | ||
275 | static __inline__ int test_le_bit(unsigned long nr, | 284 | static __inline__ int test_bit_le(unsigned long nr, |
276 | __const__ unsigned long *addr) | 285 | __const__ void *addr) |
277 | { | 286 | { |
278 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; | 287 | __const__ unsigned char *tmp = (__const__ unsigned char *) addr; |
279 | return (tmp[nr >> 3] >> (nr & 7)) & 1; | 288 | return (tmp[nr >> 3] >> (nr & 7)) & 1; |
280 | } | 289 | } |
281 | 290 | ||
282 | #define __set_le_bit(nr, addr) \ | 291 | static inline void __set_bit_le(int nr, void *addr) |
283 | __set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 292 | { |
284 | #define __clear_le_bit(nr, addr) \ | 293 | __set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
285 | __clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 294 | } |
286 | 295 | ||
287 | #define test_and_set_le_bit(nr, addr) \ | 296 | static inline void __clear_bit_le(int nr, void *addr) |
288 | test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 297 | { |
289 | #define test_and_clear_le_bit(nr, addr) \ | 298 | __clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
290 | test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 299 | } |
291 | 300 | ||
292 | #define __test_and_set_le_bit(nr, addr) \ | 301 | static inline int test_and_set_bit_le(int nr, void *addr) |
293 | __test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 302 | { |
294 | #define __test_and_clear_le_bit(nr, addr) \ | 303 | return test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); |
295 | __test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, (addr)) | 304 | } |
296 | 305 | ||
297 | #define find_first_zero_le_bit(addr, size) generic_find_next_zero_le_bit((addr), (size), 0) | 306 | static inline int test_and_clear_bit_le(int nr, void *addr) |
298 | unsigned long generic_find_next_zero_le_bit(const unsigned long *addr, | 307 | { |
308 | return test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); | ||
309 | } | ||
310 | |||
311 | static inline int __test_and_set_bit_le(int nr, void *addr) | ||
312 | { | ||
313 | return __test_and_set_bit(nr ^ BITOP_LE_SWIZZLE, addr); | ||
314 | } | ||
315 | |||
316 | static inline int __test_and_clear_bit_le(int nr, void *addr) | ||
317 | { | ||
318 | return __test_and_clear_bit(nr ^ BITOP_LE_SWIZZLE, addr); | ||
319 | } | ||
320 | |||
321 | #define find_first_zero_bit_le(addr, size) \ | ||
322 | find_next_zero_bit_le((addr), (size), 0) | ||
323 | unsigned long find_next_zero_bit_le(const void *addr, | ||
299 | unsigned long size, unsigned long offset); | 324 | unsigned long size, unsigned long offset); |
300 | 325 | ||
301 | unsigned long generic_find_next_le_bit(const unsigned long *addr, | 326 | unsigned long find_next_bit_le(const void *addr, |
302 | unsigned long size, unsigned long offset); | 327 | unsigned long size, unsigned long offset); |
303 | /* Bitmap functions for the ext2 filesystem */ | 328 | /* Bitmap functions for the ext2 filesystem */ |
304 | 329 | ||
305 | #define ext2_set_bit(nr,addr) \ | ||
306 | __test_and_set_le_bit((nr), (unsigned long*)addr) | ||
307 | #define ext2_clear_bit(nr, addr) \ | ||
308 | __test_and_clear_le_bit((nr), (unsigned long*)addr) | ||
309 | |||
310 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 330 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
311 | test_and_set_le_bit((nr), (unsigned long*)addr) | 331 | test_and_set_bit_le((nr), (unsigned long*)addr) |
312 | #define ext2_clear_bit_atomic(lock, nr, addr) \ | 332 | #define ext2_clear_bit_atomic(lock, nr, addr) \ |
313 | test_and_clear_le_bit((nr), (unsigned long*)addr) | 333 | test_and_clear_bit_le((nr), (unsigned long*)addr) |
314 | |||
315 | #define ext2_test_bit(nr, addr) test_le_bit((nr),(unsigned long*)addr) | ||
316 | |||
317 | #define ext2_find_first_zero_bit(addr, size) \ | ||
318 | find_first_zero_le_bit((unsigned long*)addr, size) | ||
319 | #define ext2_find_next_zero_bit(addr, size, off) \ | ||
320 | generic_find_next_zero_le_bit((unsigned long*)addr, size, off) | ||
321 | |||
322 | #define ext2_find_next_bit(addr, size, off) \ | ||
323 | generic_find_next_le_bit((unsigned long *)addr, size, off) | ||
324 | /* Bitmap functions for the minix filesystem. */ | ||
325 | |||
326 | #define minix_test_and_set_bit(nr,addr) \ | ||
327 | __test_and_set_le_bit(nr, (unsigned long *)addr) | ||
328 | #define minix_set_bit(nr,addr) \ | ||
329 | __set_le_bit(nr, (unsigned long *)addr) | ||
330 | #define minix_test_and_clear_bit(nr,addr) \ | ||
331 | __test_and_clear_le_bit(nr, (unsigned long *)addr) | ||
332 | #define minix_test_bit(nr,addr) \ | ||
333 | test_le_bit(nr, (unsigned long *)addr) | ||
334 | |||
335 | #define minix_find_first_zero_bit(addr,size) \ | ||
336 | find_first_zero_le_bit((unsigned long *)addr, size) | ||
337 | 334 | ||
338 | #include <asm-generic/bitops/sched.h> | 335 | #include <asm-generic/bitops/sched.h> |
339 | 336 | ||
diff --git a/arch/powerpc/include/asm/checksum.h b/arch/powerpc/include/asm/checksum.h index 7cdf358337cf..ce0c28495f9a 100644 --- a/arch/powerpc/include/asm/checksum.h +++ b/arch/powerpc/include/asm/checksum.h | |||
@@ -52,12 +52,22 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum); | |||
52 | extern __wsum csum_partial_copy_generic(const void *src, void *dst, | 52 | extern __wsum csum_partial_copy_generic(const void *src, void *dst, |
53 | int len, __wsum sum, | 53 | int len, __wsum sum, |
54 | int *src_err, int *dst_err); | 54 | int *src_err, int *dst_err); |
55 | |||
56 | #ifdef __powerpc64__ | ||
57 | #define _HAVE_ARCH_COPY_AND_CSUM_FROM_USER | ||
58 | extern __wsum csum_and_copy_from_user(const void __user *src, void *dst, | ||
59 | int len, __wsum sum, int *err_ptr); | ||
60 | #define HAVE_CSUM_COPY_USER | ||
61 | extern __wsum csum_and_copy_to_user(const void *src, void __user *dst, | ||
62 | int len, __wsum sum, int *err_ptr); | ||
63 | #else | ||
55 | /* | 64 | /* |
56 | * the same as csum_partial, but copies from src to dst while it | 65 | * the same as csum_partial, but copies from src to dst while it |
57 | * checksums. | 66 | * checksums. |
58 | */ | 67 | */ |
59 | #define csum_partial_copy_from_user(src, dst, len, sum, errp) \ | 68 | #define csum_partial_copy_from_user(src, dst, len, sum, errp) \ |
60 | csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL) | 69 | csum_partial_copy_generic((__force const void *)(src), (dst), (len), (sum), (errp), NULL) |
70 | #endif | ||
61 | 71 | ||
62 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ | 72 | #define csum_partial_copy_nocheck(src, dst, len, sum) \ |
63 | csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) | 73 | csum_partial_copy_generic((src), (dst), (len), (sum), NULL, NULL) |
diff --git a/arch/powerpc/include/asm/compat.h b/arch/powerpc/include/asm/compat.h index a11d4eac4f97..91010e8f8479 100644 --- a/arch/powerpc/include/asm/compat.h +++ b/arch/powerpc/include/asm/compat.h | |||
@@ -140,10 +140,10 @@ static inline void __user *arch_compat_alloc_user_space(long len) | |||
140 | unsigned long usp = regs->gpr[1]; | 140 | unsigned long usp = regs->gpr[1]; |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * We cant access below the stack pointer in the 32bit ABI and | 143 | * We can't access below the stack pointer in the 32bit ABI and |
144 | * can access 288 bytes in the 64bit ABI | 144 | * can access 288 bytes in the 64bit ABI |
145 | */ | 145 | */ |
146 | if (!(test_thread_flag(TIF_32BIT))) | 146 | if (!is_32bit_task()) |
147 | usp -= 288; | 147 | usp -= 288; |
148 | 148 | ||
149 | return (void __user *) (usp - len); | 149 | return (void __user *) (usp - len); |
@@ -213,7 +213,7 @@ struct compat_shmid64_ds { | |||
213 | 213 | ||
214 | static inline int is_compat_task(void) | 214 | static inline int is_compat_task(void) |
215 | { | 215 | { |
216 | return test_thread_flag(TIF_32BIT); | 216 | return is_32bit_task(); |
217 | } | 217 | } |
218 | 218 | ||
219 | #endif /* __KERNEL__ */ | 219 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/cpm.h b/arch/powerpc/include/asm/cpm.h index e50323fe941f..4398a6cdcf53 100644 --- a/arch/powerpc/include/asm/cpm.h +++ b/arch/powerpc/include/asm/cpm.h | |||
@@ -98,7 +98,7 @@ typedef struct cpm_buf_desc { | |||
98 | #define BD_SC_INTRPT (0x1000) /* Interrupt on change */ | 98 | #define BD_SC_INTRPT (0x1000) /* Interrupt on change */ |
99 | #define BD_SC_LAST (0x0800) /* Last buffer in frame */ | 99 | #define BD_SC_LAST (0x0800) /* Last buffer in frame */ |
100 | #define BD_SC_TC (0x0400) /* Transmit CRC */ | 100 | #define BD_SC_TC (0x0400) /* Transmit CRC */ |
101 | #define BD_SC_CM (0x0200) /* Continous mode */ | 101 | #define BD_SC_CM (0x0200) /* Continuous mode */ |
102 | #define BD_SC_ID (0x0100) /* Rec'd too many idles */ | 102 | #define BD_SC_ID (0x0100) /* Rec'd too many idles */ |
103 | #define BD_SC_P (0x0100) /* xmt preamble */ | 103 | #define BD_SC_P (0x0100) /* xmt preamble */ |
104 | #define BD_SC_BR (0x0020) /* Break received */ | 104 | #define BD_SC_BR (0x0020) /* Break received */ |
diff --git a/arch/powerpc/include/asm/cpm1.h b/arch/powerpc/include/asm/cpm1.h index bd07650dca56..8ee4211ca0c6 100644 --- a/arch/powerpc/include/asm/cpm1.h +++ b/arch/powerpc/include/asm/cpm1.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * | 4 | * |
5 | * This file contains structures and information for the communication | 5 | * This file contains structures and information for the communication |
6 | * processor channels. Some CPM control and status is available | 6 | * processor channels. Some CPM control and status is available |
7 | * throught the MPC8xx internal memory map. See immap.h for details. | 7 | * through the MPC8xx internal memory map. See immap.h for details. |
8 | * This file only contains what I need for the moment, not the total | 8 | * This file only contains what I need for the moment, not the total |
9 | * CPM capabilities. I (or someone else) will add definitions as they | 9 | * CPM capabilities. I (or someone else) will add definitions as they |
10 | * are needed. -- Dan | 10 | * are needed. -- Dan |
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h index 3a40a992e594..c0d842cfd012 100644 --- a/arch/powerpc/include/asm/cputable.h +++ b/arch/powerpc/include/asm/cputable.h | |||
@@ -154,8 +154,10 @@ extern const char *powerpc_base_platform; | |||
154 | #define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) | 154 | #define CPU_FTR_NAP_DISABLE_L2_PR ASM_CONST(0x0000000000002000) |
155 | #define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) | 155 | #define CPU_FTR_DUAL_PLL_750FX ASM_CONST(0x0000000000004000) |
156 | #define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) | 156 | #define CPU_FTR_NO_DPM ASM_CONST(0x0000000000008000) |
157 | #define CPU_FTR_476_DD2 ASM_CONST(0x0000000000010000) | ||
157 | #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) | 158 | #define CPU_FTR_NEED_COHERENT ASM_CONST(0x0000000000020000) |
158 | #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) | 159 | #define CPU_FTR_NO_BTIC ASM_CONST(0x0000000000040000) |
160 | #define CPU_FTR_DEBUG_LVL_EXC ASM_CONST(0x0000000000080000) | ||
159 | #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) | 161 | #define CPU_FTR_NODSISRALIGN ASM_CONST(0x0000000000100000) |
160 | #define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) | 162 | #define CPU_FTR_PPC_LE ASM_CONST(0x0000000000200000) |
161 | #define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) | 163 | #define CPU_FTR_REAL_LE ASM_CONST(0x0000000000400000) |
@@ -177,33 +179,34 @@ extern const char *powerpc_base_platform; | |||
177 | #define LONG_ASM_CONST(x) 0 | 179 | #define LONG_ASM_CONST(x) 0 |
178 | #endif | 180 | #endif |
179 | 181 | ||
180 | #define CPU_FTR_SLB LONG_ASM_CONST(0x0000000100000000) | 182 | |
181 | #define CPU_FTR_16M_PAGE LONG_ASM_CONST(0x0000000200000000) | 183 | #define CPU_FTR_HVMODE_206 LONG_ASM_CONST(0x0000000800000000) |
182 | #define CPU_FTR_TLBIEL LONG_ASM_CONST(0x0000000400000000) | 184 | #define CPU_FTR_CFAR LONG_ASM_CONST(0x0000001000000000) |
183 | #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) | 185 | #define CPU_FTR_IABR LONG_ASM_CONST(0x0000002000000000) |
184 | #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) | 186 | #define CPU_FTR_MMCRA LONG_ASM_CONST(0x0000004000000000) |
185 | #define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) | 187 | #define CPU_FTR_CTRL LONG_ASM_CONST(0x0000008000000000) |
186 | #define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000) | 188 | #define CPU_FTR_SMT LONG_ASM_CONST(0x0000010000000000) |
187 | #define CPU_FTR_LOCKLESS_TLBIE LONG_ASM_CONST(0x0000040000000000) | ||
188 | #define CPU_FTR_CI_LARGE_PAGE LONG_ASM_CONST(0x0000100000000000) | ||
189 | #define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) | 189 | #define CPU_FTR_PAUSE_ZERO LONG_ASM_CONST(0x0000200000000000) |
190 | #define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) | 190 | #define CPU_FTR_PURR LONG_ASM_CONST(0x0000400000000000) |
191 | #define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) | 191 | #define CPU_FTR_CELL_TB_BUG LONG_ASM_CONST(0x0000800000000000) |
192 | #define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) | 192 | #define CPU_FTR_SPURR LONG_ASM_CONST(0x0001000000000000) |
193 | #define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) | 193 | #define CPU_FTR_DSCR LONG_ASM_CONST(0x0002000000000000) |
194 | #define CPU_FTR_1T_SEGMENT LONG_ASM_CONST(0x0004000000000000) | ||
195 | #define CPU_FTR_NO_SLBIE_B LONG_ASM_CONST(0x0008000000000000) | ||
196 | #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) | 194 | #define CPU_FTR_VSX LONG_ASM_CONST(0x0010000000000000) |
197 | #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) | 195 | #define CPU_FTR_SAO LONG_ASM_CONST(0x0020000000000000) |
198 | #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) | 196 | #define CPU_FTR_CP_USE_DCBTZ LONG_ASM_CONST(0x0040000000000000) |
199 | #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) | 197 | #define CPU_FTR_UNALIGNED_LD_STD LONG_ASM_CONST(0x0080000000000000) |
200 | #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000) | 198 | #define CPU_FTR_ASYM_SMT LONG_ASM_CONST(0x0100000000000000) |
199 | #define CPU_FTR_STCX_CHECKS_ADDRESS LONG_ASM_CONST(0x0200000000000000) | ||
200 | #define CPU_FTR_POPCNTB LONG_ASM_CONST(0x0400000000000000) | ||
201 | #define CPU_FTR_POPCNTD LONG_ASM_CONST(0x0800000000000000) | ||
202 | #define CPU_FTR_ICSWX LONG_ASM_CONST(0x1000000000000000) | ||
201 | 203 | ||
202 | #ifndef __ASSEMBLY__ | 204 | #ifndef __ASSEMBLY__ |
203 | 205 | ||
204 | #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_SLB | \ | 206 | #define CPU_FTR_PPCAS_ARCH_V2 (CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) |
205 | CPU_FTR_TLBIEL | CPU_FTR_NOEXECUTE | \ | 207 | |
206 | CPU_FTR_NODSISRALIGN | CPU_FTR_16M_PAGE) | 208 | #define MMU_FTR_PPCAS_ARCH_V2 (MMU_FTR_SLB | MMU_FTR_TLBIEL | \ |
209 | MMU_FTR_16M_PAGE) | ||
207 | 210 | ||
208 | /* We only set the altivec features if the kernel was compiled with altivec | 211 | /* We only set the altivec features if the kernel was compiled with altivec |
209 | * support | 212 | * support |
@@ -378,10 +381,13 @@ extern const char *powerpc_base_platform; | |||
378 | #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 381 | #define CPU_FTRS_E500_2 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ |
379 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ | 382 | CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | \ |
380 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) | 383 | CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) |
381 | #define CPU_FTRS_E500MC (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ | 384 | #define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ |
382 | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ | ||
383 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | 385 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ |
384 | CPU_FTR_DBELL) | 386 | CPU_FTR_DBELL) |
387 | #define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ | ||
388 | CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ | ||
389 | CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ | ||
390 | CPU_FTR_DEBUG_LVL_EXC) | ||
385 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) | 391 | #define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) |
386 | 392 | ||
387 | /* 64-bit CPUs */ | 393 | /* 64-bit CPUs */ |
@@ -392,46 +398,55 @@ extern const char *powerpc_base_platform; | |||
392 | CPU_FTR_MMCRA | CPU_FTR_CTRL) | 398 | CPU_FTR_MMCRA | CPU_FTR_CTRL) |
393 | #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 399 | #define CPU_FTRS_POWER4 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
394 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 400 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
395 | CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ) | 401 | CPU_FTR_MMCRA | CPU_FTR_CP_USE_DCBTZ | \ |
402 | CPU_FTR_STCX_CHECKS_ADDRESS) | ||
396 | #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 403 | #define CPU_FTRS_PPC970 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
397 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 404 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
398 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ | 405 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CAN_NAP | CPU_FTR_MMCRA | \ |
399 | CPU_FTR_CP_USE_DCBTZ) | 406 | CPU_FTR_CP_USE_DCBTZ | CPU_FTR_STCX_CHECKS_ADDRESS) |
400 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 407 | #define CPU_FTRS_POWER5 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
401 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 408 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
402 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 409 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
403 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ | 410 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_PURR | \ |
404 | CPU_FTR_PURR) | 411 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB) |
405 | #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 412 | #define CPU_FTRS_POWER6 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
406 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 413 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
407 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 414 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
408 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ | 415 | CPU_FTR_COHERENT_ICACHE | \ |
409 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ | 416 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ |
410 | CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD) | 417 | CPU_FTR_DSCR | CPU_FTR_UNALIGNED_LD_STD | \ |
418 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_CFAR) | ||
411 | #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 419 | #define CPU_FTRS_POWER7 (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
412 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 420 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | CPU_FTR_HVMODE_206 |\ |
413 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 421 | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
414 | CPU_FTR_COHERENT_ICACHE | CPU_FTR_LOCKLESS_TLBIE | \ | 422 | CPU_FTR_COHERENT_ICACHE | \ |
415 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ | 423 | CPU_FTR_PURR | CPU_FTR_SPURR | CPU_FTR_REAL_LE | \ |
416 | CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT) | 424 | CPU_FTR_DSCR | CPU_FTR_SAO | CPU_FTR_ASYM_SMT | \ |
425 | CPU_FTR_STCX_CHECKS_ADDRESS | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ | ||
426 | CPU_FTR_ICSWX | CPU_FTR_CFAR) | ||
417 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 427 | #define CPU_FTRS_CELL (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
418 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ | 428 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \ |
419 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ | 429 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \ |
420 | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE | \ | 430 | CPU_FTR_PAUSE_ZERO | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ |
421 | CPU_FTR_CELL_TB_BUG | CPU_FTR_CP_USE_DCBTZ | \ | ||
422 | CPU_FTR_UNALIGNED_LD_STD) | 431 | CPU_FTR_UNALIGNED_LD_STD) |
423 | #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ | 432 | #define CPU_FTRS_PA6T (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \ |
424 | CPU_FTR_PPCAS_ARCH_V2 | \ | 433 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | \ |
425 | CPU_FTR_ALTIVEC_COMP | CPU_FTR_CI_LARGE_PAGE | \ | 434 | CPU_FTR_PURR | CPU_FTR_REAL_LE) |
426 | CPU_FTR_PURR | CPU_FTR_REAL_LE | CPU_FTR_NO_SLBIE_B) | ||
427 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) | 435 | #define CPU_FTRS_COMPATIBLE (CPU_FTR_USE_TB | CPU_FTR_PPCAS_ARCH_V2) |
428 | 436 | ||
437 | #define CPU_FTRS_A2 (CPU_FTR_USE_TB | CPU_FTR_SMT | CPU_FTR_DBELL | \ | ||
438 | CPU_FTR_NOEXECUTE | CPU_FTR_NODSISRALIGN) | ||
439 | |||
429 | #ifdef __powerpc64__ | 440 | #ifdef __powerpc64__ |
441 | #ifdef CONFIG_PPC_BOOK3E | ||
442 | #define CPU_FTRS_POSSIBLE (CPU_FTRS_E5500 | CPU_FTRS_A2) | ||
443 | #else | ||
430 | #define CPU_FTRS_POSSIBLE \ | 444 | #define CPU_FTRS_POSSIBLE \ |
431 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ | 445 | (CPU_FTRS_POWER3 | CPU_FTRS_RS64 | CPU_FTRS_POWER4 | \ |
432 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ | 446 | CPU_FTRS_PPC970 | CPU_FTRS_POWER5 | CPU_FTRS_POWER6 | \ |
433 | CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ | 447 | CPU_FTRS_POWER7 | CPU_FTRS_CELL | CPU_FTRS_PA6T | \ |
434 | CPU_FTR_1T_SEGMENT | CPU_FTR_VSX) | 448 | CPU_FTR_VSX) |
449 | #endif | ||
435 | #else | 450 | #else |
436 | enum { | 451 | enum { |
437 | CPU_FTRS_POSSIBLE = | 452 | CPU_FTRS_POSSIBLE = |
@@ -458,23 +473,28 @@ enum { | |||
458 | CPU_FTRS_44X | CPU_FTRS_440x6 | | 473 | CPU_FTRS_44X | CPU_FTRS_440x6 | |
459 | #endif | 474 | #endif |
460 | #ifdef CONFIG_PPC_47x | 475 | #ifdef CONFIG_PPC_47x |
461 | CPU_FTRS_47X | | 476 | CPU_FTRS_47X | CPU_FTR_476_DD2 | |
462 | #endif | 477 | #endif |
463 | #ifdef CONFIG_E200 | 478 | #ifdef CONFIG_E200 |
464 | CPU_FTRS_E200 | | 479 | CPU_FTRS_E200 | |
465 | #endif | 480 | #endif |
466 | #ifdef CONFIG_E500 | 481 | #ifdef CONFIG_E500 |
467 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | | 482 | CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | |
483 | CPU_FTRS_E5500 | | ||
468 | #endif | 484 | #endif |
469 | 0, | 485 | 0, |
470 | }; | 486 | }; |
471 | #endif /* __powerpc64__ */ | 487 | #endif /* __powerpc64__ */ |
472 | 488 | ||
473 | #ifdef __powerpc64__ | 489 | #ifdef __powerpc64__ |
490 | #ifdef CONFIG_PPC_BOOK3E | ||
491 | #define CPU_FTRS_ALWAYS (CPU_FTRS_E5500 & CPU_FTRS_A2) | ||
492 | #else | ||
474 | #define CPU_FTRS_ALWAYS \ | 493 | #define CPU_FTRS_ALWAYS \ |
475 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ | 494 | (CPU_FTRS_POWER3 & CPU_FTRS_RS64 & CPU_FTRS_POWER4 & \ |
476 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ | 495 | CPU_FTRS_PPC970 & CPU_FTRS_POWER5 & CPU_FTRS_POWER6 & \ |
477 | CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) | 496 | CPU_FTRS_POWER7 & CPU_FTRS_CELL & CPU_FTRS_PA6T & CPU_FTRS_POSSIBLE) |
497 | #endif | ||
478 | #else | 498 | #else |
479 | enum { | 499 | enum { |
480 | CPU_FTRS_ALWAYS = | 500 | CPU_FTRS_ALWAYS = |
@@ -505,6 +525,7 @@ enum { | |||
505 | #endif | 525 | #endif |
506 | #ifdef CONFIG_E500 | 526 | #ifdef CONFIG_E500 |
507 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & | 527 | CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & |
528 | CPU_FTRS_E5500 & | ||
508 | #endif | 529 | #endif |
509 | CPU_FTRS_POSSIBLE, | 530 | CPU_FTRS_POSSIBLE, |
510 | }; | 531 | }; |
diff --git a/arch/powerpc/include/asm/cputhreads.h b/arch/powerpc/include/asm/cputhreads.h index a8e18447c62b..ce516e5eb0d3 100644 --- a/arch/powerpc/include/asm/cputhreads.h +++ b/arch/powerpc/include/asm/cputhreads.h | |||
@@ -37,16 +37,16 @@ extern cpumask_t threads_core_mask; | |||
37 | * This can typically be used for things like IPI for tlb invalidations | 37 | * This can typically be used for things like IPI for tlb invalidations |
38 | * since those need to be done only once per core/TLB | 38 | * since those need to be done only once per core/TLB |
39 | */ | 39 | */ |
40 | static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads) | 40 | static inline cpumask_t cpu_thread_mask_to_cores(const struct cpumask *threads) |
41 | { | 41 | { |
42 | cpumask_t tmp, res; | 42 | cpumask_t tmp, res; |
43 | int i; | 43 | int i; |
44 | 44 | ||
45 | res = CPU_MASK_NONE; | 45 | cpumask_clear(&res); |
46 | for (i = 0; i < NR_CPUS; i += threads_per_core) { | 46 | for (i = 0; i < NR_CPUS; i += threads_per_core) { |
47 | cpus_shift_left(tmp, threads_core_mask, i); | 47 | cpumask_shift_left(&tmp, &threads_core_mask, i); |
48 | if (cpus_intersects(threads, tmp)) | 48 | if (cpumask_intersects(threads, &tmp)) |
49 | cpu_set(i, res); | 49 | cpumask_set_cpu(i, &res); |
50 | } | 50 | } |
51 | return res; | 51 | return res; |
52 | } | 52 | } |
@@ -58,25 +58,28 @@ static inline int cpu_nr_cores(void) | |||
58 | 58 | ||
59 | static inline cpumask_t cpu_online_cores_map(void) | 59 | static inline cpumask_t cpu_online_cores_map(void) |
60 | { | 60 | { |
61 | return cpu_thread_mask_to_cores(cpu_online_map); | 61 | return cpu_thread_mask_to_cores(cpu_online_mask); |
62 | } | 62 | } |
63 | 63 | ||
64 | static inline int cpu_thread_to_core(int cpu) | 64 | #ifdef CONFIG_SMP |
65 | { | 65 | int cpu_core_index_of_thread(int cpu); |
66 | return cpu >> threads_shift; | 66 | int cpu_first_thread_of_core(int core); |
67 | } | 67 | #else |
68 | static inline int cpu_core_index_of_thread(int cpu) { return cpu; } | ||
69 | static inline int cpu_first_thread_of_core(int core) { return core; } | ||
70 | #endif | ||
68 | 71 | ||
69 | static inline int cpu_thread_in_core(int cpu) | 72 | static inline int cpu_thread_in_core(int cpu) |
70 | { | 73 | { |
71 | return cpu & (threads_per_core - 1); | 74 | return cpu & (threads_per_core - 1); |
72 | } | 75 | } |
73 | 76 | ||
74 | static inline int cpu_first_thread_in_core(int cpu) | 77 | static inline int cpu_first_thread_sibling(int cpu) |
75 | { | 78 | { |
76 | return cpu & ~(threads_per_core - 1); | 79 | return cpu & ~(threads_per_core - 1); |
77 | } | 80 | } |
78 | 81 | ||
79 | static inline int cpu_last_thread_in_core(int cpu) | 82 | static inline int cpu_last_thread_sibling(int cpu) |
80 | { | 83 | { |
81 | return cpu | (threads_per_core - 1); | 84 | return cpu | (threads_per_core - 1); |
82 | } | 85 | } |
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h index 8bdc6a9e5773..1cf20bdfbeca 100644 --- a/arch/powerpc/include/asm/cputime.h +++ b/arch/powerpc/include/asm/cputime.h | |||
@@ -124,23 +124,23 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct) | |||
124 | } | 124 | } |
125 | 125 | ||
126 | /* | 126 | /* |
127 | * Convert cputime <-> milliseconds | 127 | * Convert cputime <-> microseconds |
128 | */ | 128 | */ |
129 | extern u64 __cputime_msec_factor; | 129 | extern u64 __cputime_msec_factor; |
130 | 130 | ||
131 | static inline unsigned long cputime_to_msecs(const cputime_t ct) | 131 | static inline unsigned long cputime_to_usecs(const cputime_t ct) |
132 | { | 132 | { |
133 | return mulhdu(ct, __cputime_msec_factor); | 133 | return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC; |
134 | } | 134 | } |
135 | 135 | ||
136 | static inline cputime_t msecs_to_cputime(const unsigned long ms) | 136 | static inline cputime_t usecs_to_cputime(const unsigned long us) |
137 | { | 137 | { |
138 | cputime_t ct; | 138 | cputime_t ct; |
139 | unsigned long sec; | 139 | unsigned long sec; |
140 | 140 | ||
141 | /* have to be a little careful about overflow */ | 141 | /* have to be a little careful about overflow */ |
142 | ct = ms % 1000; | 142 | ct = us % 1000000; |
143 | sec = ms / 1000; | 143 | sec = us / 1000000; |
144 | if (ct) { | 144 | if (ct) { |
145 | ct *= tb_ticks_per_sec; | 145 | ct *= tb_ticks_per_sec; |
146 | do_div(ct, 1000); | 146 | do_div(ct, 1000); |
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h index 0893ab9343a6..9c70d0ca96d4 100644 --- a/arch/powerpc/include/asm/dbell.h +++ b/arch/powerpc/include/asm/dbell.h | |||
@@ -27,9 +27,8 @@ enum ppc_dbell { | |||
27 | PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ | 27 | PPC_G_DBELL_MC = 4, /* guest mcheck doorbell */ |
28 | }; | 28 | }; |
29 | 29 | ||
30 | extern void doorbell_message_pass(int target, int msg); | 30 | extern void doorbell_cause_ipi(int cpu, unsigned long data); |
31 | extern void doorbell_exception(struct pt_regs *regs); | 31 | extern void doorbell_exception(struct pt_regs *regs); |
32 | extern void doorbell_check_self(void); | ||
33 | extern void doorbell_setup_this_cpu(void); | 32 | extern void doorbell_setup_this_cpu(void); |
34 | 33 | ||
35 | static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) | 34 | static inline void ppc_msgsnd(enum ppc_dbell type, u32 flags, u32 tag) |
diff --git a/arch/powerpc/include/asm/device.h b/arch/powerpc/include/asm/device.h index a3954e4fcbe2..16d25c0974be 100644 --- a/arch/powerpc/include/asm/device.h +++ b/arch/powerpc/include/asm/device.h | |||
@@ -9,6 +9,12 @@ | |||
9 | struct dma_map_ops; | 9 | struct dma_map_ops; |
10 | struct device_node; | 10 | struct device_node; |
11 | 11 | ||
12 | /* | ||
13 | * Arch extensions to struct device. | ||
14 | * | ||
15 | * When adding fields, consider macio_add_one_device in | ||
16 | * drivers/macintosh/macio_asic.c | ||
17 | */ | ||
12 | struct dev_archdata { | 18 | struct dev_archdata { |
13 | /* DMA operations on that device */ | 19 | /* DMA operations on that device */ |
14 | struct dma_map_ops *dma_ops; | 20 | struct dma_map_ops *dma_ops; |
diff --git a/arch/powerpc/include/asm/dma-mapping.h b/arch/powerpc/include/asm/dma-mapping.h index 8c9c6ad2004e..dd70fac57ec8 100644 --- a/arch/powerpc/include/asm/dma-mapping.h +++ b/arch/powerpc/include/asm/dma-mapping.h | |||
@@ -42,6 +42,7 @@ extern void __dma_free_coherent(size_t size, void *vaddr); | |||
42 | extern void __dma_sync(void *vaddr, size_t size, int direction); | 42 | extern void __dma_sync(void *vaddr, size_t size, int direction); |
43 | extern void __dma_sync_page(struct page *page, unsigned long offset, | 43 | extern void __dma_sync_page(struct page *page, unsigned long offset, |
44 | size_t size, int direction); | 44 | size_t size, int direction); |
45 | extern unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr); | ||
45 | 46 | ||
46 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ | 47 | #else /* ! CONFIG_NOT_COHERENT_CACHE */ |
47 | /* | 48 | /* |
@@ -127,19 +128,7 @@ static inline int dma_supported(struct device *dev, u64 mask) | |||
127 | return dma_ops->dma_supported(dev, mask); | 128 | return dma_ops->dma_supported(dev, mask); |
128 | } | 129 | } |
129 | 130 | ||
130 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) | 131 | extern int dma_set_mask(struct device *dev, u64 dma_mask); |
131 | { | ||
132 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
133 | |||
134 | if (unlikely(dma_ops == NULL)) | ||
135 | return -EIO; | ||
136 | if (dma_ops->set_dma_mask != NULL) | ||
137 | return dma_ops->set_dma_mask(dev, dma_mask); | ||
138 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
139 | return -EIO; | ||
140 | *dev->dma_mask = dma_mask; | ||
141 | return 0; | ||
142 | } | ||
143 | 132 | ||
144 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, | 133 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
145 | dma_addr_t *dma_handle, gfp_t flag) | 134 | dma_addr_t *dma_handle, gfp_t flag) |
@@ -210,6 +199,11 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr) | |||
210 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) | 199 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
211 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) | 200 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
212 | 201 | ||
202 | extern int dma_mmap_coherent(struct device *, struct vm_area_struct *, | ||
203 | void *, dma_addr_t, size_t); | ||
204 | #define ARCH_HAS_DMA_MMAP_COHERENT | ||
205 | |||
206 | |||
213 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, | 207 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
214 | enum dma_data_direction direction) | 208 | enum dma_data_direction direction) |
215 | { | 209 | { |
diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h index c376eda15313..2b917c69ed15 100644 --- a/arch/powerpc/include/asm/elf.h +++ b/arch/powerpc/include/asm/elf.h | |||
@@ -250,7 +250,7 @@ do { \ | |||
250 | * the 64bit ABI has never had these issues dont enable the workaround | 250 | * the 64bit ABI has never had these issues dont enable the workaround |
251 | * even if we have an executable stack. | 251 | * even if we have an executable stack. |
252 | */ | 252 | */ |
253 | # define elf_read_implies_exec(ex, exec_stk) (test_thread_flag(TIF_32BIT) ? \ | 253 | # define elf_read_implies_exec(ex, exec_stk) (is_32bit_task() ? \ |
254 | (exec_stk == EXSTACK_DEFAULT) : 0) | 254 | (exec_stk == EXSTACK_DEFAULT) : 0) |
255 | #else | 255 | #else |
256 | # define SET_PERSONALITY(ex) \ | 256 | # define SET_PERSONALITY(ex) \ |
diff --git a/arch/powerpc/include/asm/emulated_ops.h b/arch/powerpc/include/asm/emulated_ops.h index f0fb4fc1f6e6..45921672b97a 100644 --- a/arch/powerpc/include/asm/emulated_ops.h +++ b/arch/powerpc/include/asm/emulated_ops.h | |||
@@ -52,6 +52,10 @@ extern struct ppc_emulated { | |||
52 | #ifdef CONFIG_VSX | 52 | #ifdef CONFIG_VSX |
53 | struct ppc_emulated_entry vsx; | 53 | struct ppc_emulated_entry vsx; |
54 | #endif | 54 | #endif |
55 | #ifdef CONFIG_PPC64 | ||
56 | struct ppc_emulated_entry mfdscr; | ||
57 | struct ppc_emulated_entry mtdscr; | ||
58 | #endif | ||
55 | } ppc_emulated; | 59 | } ppc_emulated; |
56 | 60 | ||
57 | extern u32 ppc_warn_emulated; | 61 | extern u32 ppc_warn_emulated; |
diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h index 57c400071995..f5dfe3411f64 100644 --- a/arch/powerpc/include/asm/exception-64s.h +++ b/arch/powerpc/include/asm/exception-64s.h | |||
@@ -46,6 +46,7 @@ | |||
46 | #define EX_CCR 60 | 46 | #define EX_CCR 60 |
47 | #define EX_R3 64 | 47 | #define EX_R3 64 |
48 | #define EX_LR 72 | 48 | #define EX_LR 72 |
49 | #define EX_CFAR 80 | ||
49 | 50 | ||
50 | /* | 51 | /* |
51 | * We're short on space and time in the exception prolog, so we can't | 52 | * We're short on space and time in the exception prolog, so we can't |
@@ -56,30 +57,40 @@ | |||
56 | #define LOAD_HANDLER(reg, label) \ | 57 | #define LOAD_HANDLER(reg, label) \ |
57 | addi reg,reg,(label)-_stext; /* virt addr of handler ... */ | 58 | addi reg,reg,(label)-_stext; /* virt addr of handler ... */ |
58 | 59 | ||
59 | #define EXCEPTION_PROLOG_1(area) \ | 60 | /* Exception register prefixes */ |
60 | mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \ | 61 | #define EXC_HV H |
62 | #define EXC_STD | ||
63 | |||
64 | #define EXCEPTION_PROLOG_1(area) \ | ||
65 | GET_PACA(r13); \ | ||
61 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | 66 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ |
62 | std r10,area+EX_R10(r13); \ | 67 | std r10,area+EX_R10(r13); \ |
63 | std r11,area+EX_R11(r13); \ | 68 | std r11,area+EX_R11(r13); \ |
64 | std r12,area+EX_R12(r13); \ | 69 | std r12,area+EX_R12(r13); \ |
65 | mfspr r9,SPRN_SPRG_SCRATCH0; \ | 70 | BEGIN_FTR_SECTION_NESTED(66); \ |
71 | mfspr r10,SPRN_CFAR; \ | ||
72 | std r10,area+EX_CFAR(r13); \ | ||
73 | END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ | ||
74 | GET_SCRATCH0(r9); \ | ||
66 | std r9,area+EX_R13(r13); \ | 75 | std r9,area+EX_R13(r13); \ |
67 | mfcr r9 | 76 | mfcr r9 |
68 | 77 | ||
69 | #define EXCEPTION_PROLOG_PSERIES_1(label) \ | 78 | #define __EXCEPTION_PROLOG_PSERIES_1(label, h) \ |
70 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ | 79 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ |
71 | ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ | 80 | ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ |
72 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ | 81 | mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ |
73 | LOAD_HANDLER(r12,label) \ | 82 | LOAD_HANDLER(r12,label) \ |
74 | mtspr SPRN_SRR0,r12; \ | 83 | mtspr SPRN_##h##SRR0,r12; \ |
75 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ | 84 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ |
76 | mtspr SPRN_SRR1,r10; \ | 85 | mtspr SPRN_##h##SRR1,r10; \ |
77 | rfid; \ | 86 | h##rfid; \ |
78 | b . /* prevent speculative execution */ | 87 | b . /* prevent speculative execution */ |
88 | #define EXCEPTION_PROLOG_PSERIES_1(label, h) \ | ||
89 | __EXCEPTION_PROLOG_PSERIES_1(label, h) | ||
79 | 90 | ||
80 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ | 91 | #define EXCEPTION_PROLOG_PSERIES(area, label, h) \ |
81 | EXCEPTION_PROLOG_1(area); \ | 92 | EXCEPTION_PROLOG_1(area); \ |
82 | EXCEPTION_PROLOG_PSERIES_1(label); | 93 | EXCEPTION_PROLOG_PSERIES_1(label, h); |
83 | 94 | ||
84 | /* | 95 | /* |
85 | * The common exception prolog is used for all except a few exceptions | 96 | * The common exception prolog is used for all except a few exceptions |
@@ -98,10 +109,11 @@ | |||
98 | beq- 1f; \ | 109 | beq- 1f; \ |
99 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ | 110 | ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ |
100 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ | 111 | 1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ |
101 | bge- cr1,2f; /* abort if it is */ \ | 112 | blt+ cr1,3f; /* abort if it is */ \ |
102 | b 3f; \ | 113 | li r1,(n); /* will be reloaded later */ \ |
103 | 2: li r1,(n); /* will be reloaded later */ \ | ||
104 | sth r1,PACA_TRAP_SAVE(r13); \ | 114 | sth r1,PACA_TRAP_SAVE(r13); \ |
115 | std r3,area+EX_R3(r13); \ | ||
116 | addi r3,r13,area; /* r3 -> where regs are saved*/ \ | ||
105 | b bad_stack; \ | 117 | b bad_stack; \ |
106 | 3: std r9,_CCR(r1); /* save CR in stackframe */ \ | 118 | 3: std r9,_CCR(r1); /* save CR in stackframe */ \ |
107 | std r11,_NIP(r1); /* save SRR0 in stackframe */ \ | 119 | std r11,_NIP(r1); /* save SRR0 in stackframe */ \ |
@@ -123,6 +135,10 @@ | |||
123 | std r9,GPR11(r1); \ | 135 | std r9,GPR11(r1); \ |
124 | std r10,GPR12(r1); \ | 136 | std r10,GPR12(r1); \ |
125 | std r11,GPR13(r1); \ | 137 | std r11,GPR13(r1); \ |
138 | BEGIN_FTR_SECTION_NESTED(66); \ | ||
139 | ld r10,area+EX_CFAR(r13); \ | ||
140 | std r10,ORIG_GPR3(r1); \ | ||
141 | END_FTR_SECTION_NESTED(CPU_FTR_CFAR, CPU_FTR_CFAR, 66); \ | ||
126 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ | 142 | ld r2,PACATOC(r13); /* get kernel TOC into r2 */ \ |
127 | mflr r9; /* save LR in stackframe */ \ | 143 | mflr r9; /* save LR in stackframe */ \ |
128 | std r9,_LINK(r1); \ | 144 | std r9,_LINK(r1); \ |
@@ -137,62 +153,68 @@ | |||
137 | li r10,0; \ | 153 | li r10,0; \ |
138 | ld r11,exception_marker@toc(r2); \ | 154 | ld r11,exception_marker@toc(r2); \ |
139 | std r10,RESULT(r1); /* clear regs->result */ \ | 155 | std r10,RESULT(r1); /* clear regs->result */ \ |
140 | std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ | 156 | std r11,STACK_FRAME_OVERHEAD-16(r1); /* mark the frame */ \ |
157 | ACCOUNT_STOLEN_TIME | ||
141 | 158 | ||
142 | /* | 159 | /* |
143 | * Exception vectors. | 160 | * Exception vectors. |
144 | */ | 161 | */ |
145 | #define STD_EXCEPTION_PSERIES(n, label) \ | 162 | #define STD_EXCEPTION_PSERIES(loc, vec, label) \ |
146 | . = n; \ | 163 | . = loc; \ |
147 | .globl label##_pSeries; \ | 164 | .globl label##_pSeries; \ |
148 | label##_pSeries: \ | 165 | label##_pSeries: \ |
149 | HMT_MEDIUM; \ | 166 | HMT_MEDIUM; \ |
150 | DO_KVM n; \ | 167 | DO_KVM vec; \ |
151 | mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ | 168 | SET_SCRATCH0(r13); /* save r13 */ \ |
152 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) | 169 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_STD) |
153 | 170 | ||
154 | #define HSTD_EXCEPTION_PSERIES(n, label) \ | 171 | #define STD_EXCEPTION_HV(loc, vec, label) \ |
155 | . = n; \ | 172 | . = loc; \ |
156 | .globl label##_pSeries; \ | 173 | .globl label##_hv; \ |
157 | label##_pSeries: \ | 174 | label##_hv: \ |
158 | HMT_MEDIUM; \ | 175 | HMT_MEDIUM; \ |
159 | mtspr SPRN_SPRG_SCRATCH0,r20; /* save r20 */ \ | 176 | DO_KVM vec; \ |
160 | mfspr r20,SPRN_HSRR0; /* copy HSRR0 to SRR0 */ \ | 177 | SET_SCRATCH0(r13); /* save r13 */ \ |
161 | mtspr SPRN_SRR0,r20; \ | 178 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common, EXC_HV) |
162 | mfspr r20,SPRN_HSRR1; /* copy HSRR0 to SRR0 */ \ | ||
163 | mtspr SPRN_SRR1,r20; \ | ||
164 | mfspr r20,SPRN_SPRG_SCRATCH0; /* restore r20 */ \ | ||
165 | mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ | ||
166 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, label##_common) | ||
167 | 179 | ||
168 | 180 | #define __MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ | |
169 | #define MASKABLE_EXCEPTION_PSERIES(n, label) \ | ||
170 | . = n; \ | ||
171 | .globl label##_pSeries; \ | ||
172 | label##_pSeries: \ | ||
173 | HMT_MEDIUM; \ | 181 | HMT_MEDIUM; \ |
174 | DO_KVM n; \ | 182 | DO_KVM vec; \ |
175 | mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \ | 183 | SET_SCRATCH0(r13); /* save r13 */ \ |
176 | mfspr r13,SPRN_SPRG_PACA; /* get paca address into r13 */ \ | 184 | GET_PACA(r13); \ |
177 | std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ | 185 | std r9,PACA_EXGEN+EX_R9(r13); /* save r9, r10 */ \ |
178 | std r10,PACA_EXGEN+EX_R10(r13); \ | 186 | std r10,PACA_EXGEN+EX_R10(r13); \ |
179 | lbz r10,PACASOFTIRQEN(r13); \ | 187 | lbz r10,PACASOFTIRQEN(r13); \ |
180 | mfcr r9; \ | 188 | mfcr r9; \ |
181 | cmpwi r10,0; \ | 189 | cmpwi r10,0; \ |
182 | beq masked_interrupt; \ | 190 | beq masked_##h##interrupt; \ |
183 | mfspr r10,SPRN_SPRG_SCRATCH0; \ | 191 | GET_SCRATCH0(r10); \ |
184 | std r10,PACA_EXGEN+EX_R13(r13); \ | 192 | std r10,PACA_EXGEN+EX_R13(r13); \ |
185 | std r11,PACA_EXGEN+EX_R11(r13); \ | 193 | std r11,PACA_EXGEN+EX_R11(r13); \ |
186 | std r12,PACA_EXGEN+EX_R12(r13); \ | 194 | std r12,PACA_EXGEN+EX_R12(r13); \ |
187 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ | 195 | ld r12,PACAKBASE(r13); /* get high part of &label */ \ |
188 | ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ | 196 | ld r10,PACAKMSR(r13); /* get MSR value for kernel */ \ |
189 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ | 197 | mfspr r11,SPRN_##h##SRR0; /* save SRR0 */ \ |
190 | LOAD_HANDLER(r12,label##_common) \ | 198 | LOAD_HANDLER(r12,label##_common) \ |
191 | mtspr SPRN_SRR0,r12; \ | 199 | mtspr SPRN_##h##SRR0,r12; \ |
192 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ | 200 | mfspr r12,SPRN_##h##SRR1; /* and SRR1 */ \ |
193 | mtspr SPRN_SRR1,r10; \ | 201 | mtspr SPRN_##h##SRR1,r10; \ |
194 | rfid; \ | 202 | h##rfid; \ |
195 | b . /* prevent speculative execution */ | 203 | b . /* prevent speculative execution */ |
204 | #define _MASKABLE_EXCEPTION_PSERIES(vec, label, h) \ | ||
205 | __MASKABLE_EXCEPTION_PSERIES(vec, label, h) | ||
206 | |||
207 | #define MASKABLE_EXCEPTION_PSERIES(loc, vec, label) \ | ||
208 | . = loc; \ | ||
209 | .globl label##_pSeries; \ | ||
210 | label##_pSeries: \ | ||
211 | _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_STD) | ||
212 | |||
213 | #define MASKABLE_EXCEPTION_HV(loc, vec, label) \ | ||
214 | . = loc; \ | ||
215 | .globl label##_hv; \ | ||
216 | label##_hv: \ | ||
217 | _MASKABLE_EXCEPTION_PSERIES(vec, label, EXC_HV) | ||
196 | 218 | ||
197 | #ifdef CONFIG_PPC_ISERIES | 219 | #ifdef CONFIG_PPC_ISERIES |
198 | #define DISABLE_INTS \ | 220 | #define DISABLE_INTS \ |
diff --git a/arch/powerpc/include/asm/feature-fixups.h b/arch/powerpc/include/asm/feature-fixups.h index 96a7d067fbb2..9a67a38bf7b9 100644 --- a/arch/powerpc/include/asm/feature-fixups.h +++ b/arch/powerpc/include/asm/feature-fixups.h | |||
@@ -37,18 +37,21 @@ label##2: \ | |||
37 | .align 2; \ | 37 | .align 2; \ |
38 | label##3: | 38 | label##3: |
39 | 39 | ||
40 | #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ | 40 | #define MAKE_FTR_SECTION_ENTRY(msk, val, label, sect) \ |
41 | label##4: \ | 41 | label##4: \ |
42 | .popsection; \ | 42 | .popsection; \ |
43 | .pushsection sect,"a"; \ | 43 | .pushsection sect,"a"; \ |
44 | .align 3; \ | 44 | .align 3; \ |
45 | label##5: \ | 45 | label##5: \ |
46 | FTR_ENTRY_LONG msk; \ | 46 | FTR_ENTRY_LONG msk; \ |
47 | FTR_ENTRY_LONG val; \ | 47 | FTR_ENTRY_LONG val; \ |
48 | FTR_ENTRY_OFFSET label##1b-label##5b; \ | 48 | FTR_ENTRY_OFFSET label##1b-label##5b; \ |
49 | FTR_ENTRY_OFFSET label##2b-label##5b; \ | 49 | FTR_ENTRY_OFFSET label##2b-label##5b; \ |
50 | FTR_ENTRY_OFFSET label##3b-label##5b; \ | 50 | FTR_ENTRY_OFFSET label##3b-label##5b; \ |
51 | FTR_ENTRY_OFFSET label##4b-label##5b; \ | 51 | FTR_ENTRY_OFFSET label##4b-label##5b; \ |
52 | .ifgt (label##4b- label##3b)-(label##2b- label##1b); \ | ||
53 | .error "Feature section else case larger than body"; \ | ||
54 | .endif; \ | ||
52 | .popsection; | 55 | .popsection; |
53 | 56 | ||
54 | 57 | ||
@@ -143,6 +146,19 @@ label##5: \ | |||
143 | 146 | ||
144 | #ifndef __ASSEMBLY__ | 147 | #ifndef __ASSEMBLY__ |
145 | 148 | ||
149 | #define ASM_FTR_IF(section_if, section_else, msk, val) \ | ||
150 | stringify_in_c(BEGIN_FTR_SECTION) \ | ||
151 | section_if "; " \ | ||
152 | stringify_in_c(FTR_SECTION_ELSE) \ | ||
153 | section_else "; " \ | ||
154 | stringify_in_c(ALT_FTR_SECTION_END((msk), (val))) | ||
155 | |||
156 | #define ASM_FTR_IFSET(section_if, section_else, msk) \ | ||
157 | ASM_FTR_IF(section_if, section_else, (msk), (msk)) | ||
158 | |||
159 | #define ASM_FTR_IFCLR(section_if, section_else, msk) \ | ||
160 | ASM_FTR_IF(section_if, section_else, (msk), 0) | ||
161 | |||
146 | #define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \ | 162 | #define ASM_MMU_FTR_IF(section_if, section_else, msk, val) \ |
147 | stringify_in_c(BEGIN_MMU_FTR_SECTION) \ | 163 | stringify_in_c(BEGIN_MMU_FTR_SECTION) \ |
148 | section_if "; " \ | 164 | section_if "; " \ |
diff --git a/arch/powerpc/include/asm/firmware.h b/arch/powerpc/include/asm/firmware.h index 20778a405d7a..3a6c586c4e40 100644 --- a/arch/powerpc/include/asm/firmware.h +++ b/arch/powerpc/include/asm/firmware.h | |||
@@ -46,6 +46,8 @@ | |||
46 | #define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) | 46 | #define FW_FEATURE_PS3_LV1 ASM_CONST(0x0000000000800000) |
47 | #define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) | 47 | #define FW_FEATURE_BEAT ASM_CONST(0x0000000001000000) |
48 | #define FW_FEATURE_CMO ASM_CONST(0x0000000002000000) | 48 | #define FW_FEATURE_CMO ASM_CONST(0x0000000002000000) |
49 | #define FW_FEATURE_VPHN ASM_CONST(0x0000000004000000) | ||
50 | #define FW_FEATURE_XCMO ASM_CONST(0x0000000008000000) | ||
49 | 51 | ||
50 | #ifndef __ASSEMBLY__ | 52 | #ifndef __ASSEMBLY__ |
51 | 53 | ||
@@ -59,7 +61,7 @@ enum { | |||
59 | FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | | 61 | FW_FEATURE_VIO | FW_FEATURE_RDMA | FW_FEATURE_LLAN | |
60 | FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | | 62 | FW_FEATURE_BULK_REMOVE | FW_FEATURE_XDABR | |
61 | FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | | 63 | FW_FEATURE_MULTITCE | FW_FEATURE_SPLPAR | FW_FEATURE_LPAR | |
62 | FW_FEATURE_CMO, | 64 | FW_FEATURE_CMO | FW_FEATURE_VPHN | FW_FEATURE_XCMO, |
63 | FW_FEATURE_PSERIES_ALWAYS = 0, | 65 | FW_FEATURE_PSERIES_ALWAYS = 0, |
64 | FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, | 66 | FW_FEATURE_ISERIES_POSSIBLE = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, |
65 | FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, | 67 | FW_FEATURE_ISERIES_ALWAYS = FW_FEATURE_ISERIES | FW_FEATURE_LPAR, |
diff --git a/arch/powerpc/include/asm/fsl_85xx_cache_sram.h b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h new file mode 100644 index 000000000000..2af2bdc37b2e --- /dev/null +++ b/arch/powerpc/include/asm/fsl_85xx_cache_sram.h | |||
@@ -0,0 +1,48 @@ | |||
1 | /* | ||
2 | * Copyright 2009 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * Cache SRAM handling for QorIQ platform | ||
5 | * | ||
6 | * Author: Vivek Mahajan <vivek.mahajan@freescale.com> | ||
7 | |||
8 | * This file is derived from the original work done | ||
9 | * by Sylvain Munaut for the Bestcomm SRAM allocator. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | */ | ||
25 | |||
26 | #ifndef __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ | ||
27 | #define __ASM_POWERPC_FSL_85XX_CACHE_SRAM_H__ | ||
28 | |||
29 | #include <asm/rheap.h> | ||
30 | #include <linux/spinlock.h> | ||
31 | |||
32 | /* | ||
33 | * Cache-SRAM | ||
34 | */ | ||
35 | |||
36 | struct mpc85xx_cache_sram { | ||
37 | phys_addr_t base_phys; | ||
38 | void *base_virt; | ||
39 | unsigned int size; | ||
40 | rh_info_t *rh; | ||
41 | spinlock_t lock; | ||
42 | }; | ||
43 | |||
44 | extern void mpc85xx_cache_sram_free(void *ptr); | ||
45 | extern void *mpc85xx_cache_sram_alloc(unsigned int size, | ||
46 | phys_addr_t *phys, unsigned int align); | ||
47 | |||
48 | #endif /* __AMS_POWERPC_FSL_85XX_CACHE_SRAM_H__ */ | ||
diff --git a/arch/powerpc/include/asm/immap_86xx.h b/arch/powerpc/include/asm/fsl_guts.h index 0f165e59c326..bebd12463ec9 100644 --- a/arch/powerpc/include/asm/immap_86xx.h +++ b/arch/powerpc/include/asm/fsl_guts.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /** | 1 | /** |
2 | * MPC86xx Internal Memory Map | 2 | * Freecale 85xx and 86xx Global Utilties register set |
3 | * | 3 | * |
4 | * Authors: Jeff Brown | 4 | * Authors: Jeff Brown |
5 | * Timur Tabi <timur@freescale.com> | 5 | * Timur Tabi <timur@freescale.com> |
@@ -10,73 +10,112 @@ | |||
10 | * under the terms of the GNU General Public License as published by the | 10 | * under the terms of the GNU General Public License as published by the |
11 | * Free Software Foundation; either version 2 of the License, or (at your | 11 | * Free Software Foundation; either version 2 of the License, or (at your |
12 | * option) any later version. | 12 | * option) any later version. |
13 | * | ||
14 | * This header file defines structures for various 86xx SOC devices that are | ||
15 | * used by multiple source files. | ||
16 | */ | 13 | */ |
17 | 14 | ||
18 | #ifndef __ASM_POWERPC_IMMAP_86XX_H__ | 15 | #ifndef __ASM_POWERPC_FSL_GUTS_H__ |
19 | #define __ASM_POWERPC_IMMAP_86XX_H__ | 16 | #define __ASM_POWERPC_FSL_GUTS_H__ |
20 | #ifdef __KERNEL__ | 17 | #ifdef __KERNEL__ |
21 | 18 | ||
22 | /* Global Utility Registers */ | 19 | /* |
23 | struct ccsr_guts { | 20 | * These #ifdefs are safe because it's not possible to build a kernel that |
21 | * runs on e500 and e600 cores. | ||
22 | */ | ||
23 | |||
24 | #if !defined(CONFIG_PPC_85xx) && !defined(CONFIG_PPC_86xx) | ||
25 | #error Only 85xx and 86xx SOCs are supported | ||
26 | #endif | ||
27 | |||
28 | /** | ||
29 | * Global Utility Registers. | ||
30 | * | ||
31 | * Not all registers defined in this structure are available on all chips, so | ||
32 | * you are expected to know whether a given register actually exists on your | ||
33 | * chip before you access it. | ||
34 | * | ||
35 | * Also, some registers are similar on different chips but have slightly | ||
36 | * different names. In these cases, one name is chosen to avoid extraneous | ||
37 | * #ifdefs. | ||
38 | */ | ||
39 | #ifdef CONFIG_PPC_85xx | ||
40 | struct ccsr_guts_85xx { | ||
41 | #else | ||
42 | struct ccsr_guts_86xx { | ||
43 | #endif | ||
24 | __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ | 44 | __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ |
25 | __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ | 45 | __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ |
26 | __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ | 46 | __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ |
27 | __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ | 47 | __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ |
28 | __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ | 48 | __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ |
29 | u8 res1[0x20 - 0x14]; | 49 | __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ |
50 | u8 res018[0x20 - 0x18]; | ||
30 | __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ | 51 | __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ |
31 | u8 res2[0x30 - 0x24]; | 52 | u8 res024[0x30 - 0x24]; |
32 | __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ | 53 | __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ |
33 | u8 res3[0x40 - 0x34]; | 54 | u8 res034[0x40 - 0x34]; |
34 | __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ | 55 | __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ |
35 | u8 res4[0x50 - 0x44]; | 56 | u8 res044[0x50 - 0x44]; |
36 | __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ | 57 | __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ |
37 | u8 res5[0x60 - 0x54]; | 58 | u8 res054[0x60 - 0x54]; |
38 | __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ | 59 | __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ |
39 | u8 res6[0x70 - 0x64]; | 60 | __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ |
61 | __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ | ||
62 | u8 res06c[0x70 - 0x6c]; | ||
40 | __be32 devdisr; /* 0x.0070 - Device Disable Control */ | 63 | __be32 devdisr; /* 0x.0070 - Device Disable Control */ |
41 | __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ | 64 | __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ |
42 | u8 res7[0x80 - 0x78]; | 65 | u8 res078[0x7c - 0x78]; |
66 | __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ | ||
43 | __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ | 67 | __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ |
44 | u8 res8[0x90 - 0x84]; | 68 | __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ |
69 | __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ | ||
70 | __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ | ||
45 | __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ | 71 | __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ |
46 | __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ | 72 | __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ |
47 | u8 res9[0xA0 - 0x98]; | 73 | __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ |
74 | __be32 autorstsr; /* 0x.009c - Automatic reset status register */ | ||
48 | __be32 pvr; /* 0x.00a0 - Processor Version Register */ | 75 | __be32 pvr; /* 0x.00a0 - Processor Version Register */ |
49 | __be32 svr; /* 0x.00a4 - System Version Register */ | 76 | __be32 svr; /* 0x.00a4 - System Version Register */ |
50 | u8 res10[0xB0 - 0xA8]; | 77 | u8 res0a8[0xb0 - 0xa8]; |
51 | __be32 rstcr; /* 0x.00b0 - Reset Control Register */ | 78 | __be32 rstcr; /* 0x.00b0 - Reset Control Register */ |
52 | u8 res11[0xC0 - 0xB4]; | 79 | u8 res0b4[0xc0 - 0xb4]; |
80 | #ifdef CONFIG_PPC_85xx | ||
81 | __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register */ | ||
82 | #else | ||
53 | __be32 elbcvselcr; /* 0x.00c0 - eLBC Voltage Select Ctrl Reg */ | 83 | __be32 elbcvselcr; /* 0x.00c0 - eLBC Voltage Select Ctrl Reg */ |
54 | u8 res12[0x800 - 0xC4]; | 84 | #endif |
85 | u8 res0c4[0x224 - 0xc4]; | ||
86 | __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ | ||
87 | __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ | ||
88 | u8 res22c[0x800 - 0x22c]; | ||
55 | __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ | 89 | __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ |
56 | u8 res13[0x900 - 0x804]; | 90 | u8 res804[0x900 - 0x804]; |
57 | __be32 ircr; /* 0x.0900 - Infrared Control Register */ | 91 | __be32 ircr; /* 0x.0900 - Infrared Control Register */ |
58 | u8 res14[0x908 - 0x904]; | 92 | u8 res904[0x908 - 0x904]; |
59 | __be32 dmacr; /* 0x.0908 - DMA Control Register */ | 93 | __be32 dmacr; /* 0x.0908 - DMA Control Register */ |
60 | u8 res15[0x914 - 0x90C]; | 94 | u8 res90c[0x914 - 0x90c]; |
61 | __be32 elbccr; /* 0x.0914 - eLBC Control Register */ | 95 | __be32 elbccr; /* 0x.0914 - eLBC Control Register */ |
62 | u8 res16[0xB20 - 0x918]; | 96 | u8 res918[0xb20 - 0x918]; |
63 | __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ | 97 | __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ |
64 | __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ | 98 | __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ |
65 | __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ | 99 | __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ |
66 | u8 res17[0xE00 - 0xB2C]; | 100 | u8 resb2c[0xe00 - 0xb2c]; |
67 | __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ | 101 | __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ |
68 | u8 res18[0xE10 - 0xE04]; | 102 | u8 rese04[0xe10 - 0xe04]; |
69 | __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ | 103 | __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ |
70 | u8 res19[0xE20 - 0xE14]; | 104 | u8 rese14[0xe20 - 0xe14]; |
71 | __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ | 105 | __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ |
72 | u8 res20[0xF04 - 0xE24]; | 106 | __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ |
107 | u8 rese28[0xf04 - 0xe28]; | ||
73 | __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ | 108 | __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ |
74 | __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ | 109 | __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ |
75 | u8 res21[0xF40 - 0xF0C]; | 110 | u8 resf0c[0xf2c - 0xf0c]; |
76 | __be32 srds2cr0; /* 0x.0f40 - SerDes1 Control Register 0 */ | 111 | __be32 itcr; /* 0x.0f2c - Internal transaction control register */ |
77 | __be32 srds2cr1; /* 0x.0f44 - SerDes1 Control Register 0 */ | 112 | u8 resf30[0xf40 - 0xf30]; |
113 | __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ | ||
114 | __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ | ||
78 | } __attribute__ ((packed)); | 115 | } __attribute__ ((packed)); |
79 | 116 | ||
117 | #ifdef CONFIG_PPC_86xx | ||
118 | |||
80 | #define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ | 119 | #define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ |
81 | #define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ | 120 | #define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ |
82 | 121 | ||
@@ -93,7 +132,7 @@ struct ccsr_guts { | |||
93 | * ch: The channel on the DMA controller (0, 1, 2, or 3) | 132 | * ch: The channel on the DMA controller (0, 1, 2, or 3) |
94 | * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) | 133 | * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) |
95 | */ | 134 | */ |
96 | static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, | 135 | static inline void guts_set_dmacr(struct ccsr_guts_86xx __iomem *guts, |
97 | unsigned int co, unsigned int ch, unsigned int device) | 136 | unsigned int co, unsigned int ch, unsigned int device) |
98 | { | 137 | { |
99 | unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); | 138 | unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); |
@@ -129,7 +168,7 @@ static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, | |||
129 | * ch: The channel on the DMA controller (0, 1, 2, or 3) | 168 | * ch: The channel on the DMA controller (0, 1, 2, or 3) |
130 | * value: the new value for the bit (0 or 1) | 169 | * value: the new value for the bit (0 or 1) |
131 | */ | 170 | */ |
132 | static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, | 171 | static inline void guts_set_pmuxcr_dma(struct ccsr_guts_86xx __iomem *guts, |
133 | unsigned int co, unsigned int ch, unsigned int value) | 172 | unsigned int co, unsigned int ch, unsigned int value) |
134 | { | 173 | { |
135 | if ((ch == 0) || (ch == 3)) { | 174 | if ((ch == 0) || (ch == 3)) { |
@@ -152,5 +191,7 @@ static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, | |||
152 | #define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF | 191 | #define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF |
153 | #define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) | 192 | #define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) |
154 | 193 | ||
155 | #endif /* __ASM_POWERPC_IMMAP_86XX_H__ */ | 194 | #endif |
156 | #endif /* __KERNEL__ */ | 195 | |
196 | #endif | ||
197 | #endif | ||
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 1b5a21041f9b..8a0b5ece8f76 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h | |||
@@ -1,9 +1,10 @@ | |||
1 | /* Freescale Local Bus Controller | 1 | /* Freescale Local Bus Controller |
2 | * | 2 | * |
3 | * Copyright (c) 2006-2007 Freescale Semiconductor | 3 | * Copyright © 2006-2007, 2010 Freescale Semiconductor |
4 | * | 4 | * |
5 | * Authors: Nick Spence <nick.spence@freescale.com>, | 5 | * Authors: Nick Spence <nick.spence@freescale.com>, |
6 | * Scott Wood <scottwood@freescale.com> | 6 | * Scott Wood <scottwood@freescale.com> |
7 | * Jack Lan <jack.lan@freescale.com> | ||
7 | * | 8 | * |
8 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
@@ -26,6 +27,8 @@ | |||
26 | #include <linux/compiler.h> | 27 | #include <linux/compiler.h> |
27 | #include <linux/types.h> | 28 | #include <linux/types.h> |
28 | #include <linux/io.h> | 29 | #include <linux/io.h> |
30 | #include <linux/device.h> | ||
31 | #include <linux/spinlock.h> | ||
29 | 32 | ||
30 | struct fsl_lbc_bank { | 33 | struct fsl_lbc_bank { |
31 | __be32 br; /**< Base Register */ | 34 | __be32 br; /**< Base Register */ |
@@ -125,13 +128,23 @@ struct fsl_lbc_regs { | |||
125 | #define LTESR_ATMW 0x00800000 | 128 | #define LTESR_ATMW 0x00800000 |
126 | #define LTESR_ATMR 0x00400000 | 129 | #define LTESR_ATMR 0x00400000 |
127 | #define LTESR_CS 0x00080000 | 130 | #define LTESR_CS 0x00080000 |
131 | #define LTESR_UPM 0x00000002 | ||
128 | #define LTESR_CC 0x00000001 | 132 | #define LTESR_CC 0x00000001 |
129 | #define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC) | 133 | #define LTESR_NAND_MASK (LTESR_FCT | LTESR_PAR | LTESR_CC) |
134 | #define LTESR_MASK (LTESR_BM | LTESR_FCT | LTESR_PAR | LTESR_WP \ | ||
135 | | LTESR_ATMW | LTESR_ATMR | LTESR_CS | LTESR_UPM \ | ||
136 | | LTESR_CC) | ||
137 | #define LTESR_CLEAR 0xFFFFFFFF | ||
138 | #define LTECCR_CLEAR 0xFFFFFFFF | ||
139 | #define LTESR_STATUS LTESR_MASK | ||
140 | #define LTEIR_ENABLE LTESR_MASK | ||
141 | #define LTEDR_ENABLE 0x00000000 | ||
130 | __be32 ltedr; /**< Transfer Error Disable Register */ | 142 | __be32 ltedr; /**< Transfer Error Disable Register */ |
131 | __be32 lteir; /**< Transfer Error Interrupt Register */ | 143 | __be32 lteir; /**< Transfer Error Interrupt Register */ |
132 | __be32 lteatr; /**< Transfer Error Attributes Register */ | 144 | __be32 lteatr; /**< Transfer Error Attributes Register */ |
133 | __be32 ltear; /**< Transfer Error Address Register */ | 145 | __be32 ltear; /**< Transfer Error Address Register */ |
134 | u8 res6[0xC]; | 146 | __be32 lteccr; /**< Transfer Error ECC Register */ |
147 | u8 res6[0x8]; | ||
135 | __be32 lbcr; /**< Configuration Register */ | 148 | __be32 lbcr; /**< Configuration Register */ |
136 | #define LBCR_LDIS 0x80000000 | 149 | #define LBCR_LDIS 0x80000000 |
137 | #define LBCR_LDIS_SHIFT 31 | 150 | #define LBCR_LDIS_SHIFT 31 |
@@ -144,6 +157,8 @@ struct fsl_lbc_regs { | |||
144 | #define LBCR_EPAR_SHIFT 16 | 157 | #define LBCR_EPAR_SHIFT 16 |
145 | #define LBCR_BMT 0x0000FF00 | 158 | #define LBCR_BMT 0x0000FF00 |
146 | #define LBCR_BMT_SHIFT 8 | 159 | #define LBCR_BMT_SHIFT 8 |
160 | #define LBCR_BMTPS 0x0000000F | ||
161 | #define LBCR_BMTPS_SHIFT 0 | ||
147 | #define LBCR_INIT 0x00040000 | 162 | #define LBCR_INIT 0x00040000 |
148 | __be32 lcrr; /**< Clock Ratio Register */ | 163 | __be32 lcrr; /**< Clock Ratio Register */ |
149 | #define LCRR_DBYP 0x80000000 | 164 | #define LCRR_DBYP 0x80000000 |
@@ -235,6 +250,7 @@ struct fsl_upm { | |||
235 | int width; | 250 | int width; |
236 | }; | 251 | }; |
237 | 252 | ||
253 | extern u32 fsl_lbc_addr(phys_addr_t addr_base); | ||
238 | extern int fsl_lbc_find(phys_addr_t addr_base); | 254 | extern int fsl_lbc_find(phys_addr_t addr_base); |
239 | extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm); | 255 | extern int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm); |
240 | 256 | ||
@@ -265,7 +281,23 @@ static inline void fsl_upm_end_pattern(struct fsl_upm *upm) | |||
265 | cpu_relax(); | 281 | cpu_relax(); |
266 | } | 282 | } |
267 | 283 | ||
284 | /* overview of the fsl lbc controller */ | ||
285 | |||
286 | struct fsl_lbc_ctrl { | ||
287 | /* device info */ | ||
288 | struct device *dev; | ||
289 | struct fsl_lbc_regs __iomem *regs; | ||
290 | int irq; | ||
291 | wait_queue_head_t irq_wait; | ||
292 | spinlock_t lock; | ||
293 | void *nand; | ||
294 | |||
295 | /* status read from LTESR by irq handler */ | ||
296 | unsigned int irq_status; | ||
297 | }; | ||
298 | |||
268 | extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, | 299 | extern int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, |
269 | u32 mar); | 300 | u32 mar); |
301 | extern struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; | ||
270 | 302 | ||
271 | #endif /* __ASM_FSL_LBC_H */ | 303 | #endif /* __ASM_FSL_LBC_H */ |
diff --git a/arch/powerpc/include/asm/fsldma.h b/arch/powerpc/include/asm/fsldma.h deleted file mode 100644 index debc5ed96d6e..000000000000 --- a/arch/powerpc/include/asm/fsldma.h +++ /dev/null | |||
@@ -1,137 +0,0 @@ | |||
1 | /* | ||
2 | * Freescale MPC83XX / MPC85XX DMA Controller | ||
3 | * | ||
4 | * Copyright (c) 2009 Ira W. Snyder <iws@ovro.caltech.edu> | ||
5 | * | ||
6 | * This file is licensed under the terms of the GNU General Public License | ||
7 | * version 2. This program is licensed "as is" without any warranty of any | ||
8 | * kind, whether express or implied. | ||
9 | */ | ||
10 | |||
11 | #ifndef __ARCH_POWERPC_ASM_FSLDMA_H__ | ||
12 | #define __ARCH_POWERPC_ASM_FSLDMA_H__ | ||
13 | |||
14 | #include <linux/slab.h> | ||
15 | #include <linux/dmaengine.h> | ||
16 | |||
17 | /* | ||
18 | * Definitions for the Freescale DMA controller's DMA_SLAVE implemention | ||
19 | * | ||
20 | * The Freescale DMA_SLAVE implementation was designed to handle many-to-many | ||
21 | * transfers. An example usage would be an accelerated copy between two | ||
22 | * scatterlists. Another example use would be an accelerated copy from | ||
23 | * multiple non-contiguous device buffers into a single scatterlist. | ||
24 | * | ||
25 | * A DMA_SLAVE transaction is defined by a struct fsl_dma_slave. This | ||
26 | * structure contains a list of hardware addresses that should be copied | ||
27 | * to/from the scatterlist passed into device_prep_slave_sg(). The structure | ||
28 | * also has some fields to enable hardware-specific features. | ||
29 | */ | ||
30 | |||
31 | /** | ||
32 | * struct fsl_dma_hw_addr | ||
33 | * @entry: linked list entry | ||
34 | * @address: the hardware address | ||
35 | * @length: length to transfer | ||
36 | * | ||
37 | * Holds a single physical hardware address / length pair for use | ||
38 | * with the DMAEngine DMA_SLAVE API. | ||
39 | */ | ||
40 | struct fsl_dma_hw_addr { | ||
41 | struct list_head entry; | ||
42 | |||
43 | dma_addr_t address; | ||
44 | size_t length; | ||
45 | }; | ||
46 | |||
47 | /** | ||
48 | * struct fsl_dma_slave | ||
49 | * @addresses: a linked list of struct fsl_dma_hw_addr structures | ||
50 | * @request_count: value for DMA request count | ||
51 | * @src_loop_size: setup and enable constant source-address DMA transfers | ||
52 | * @dst_loop_size: setup and enable constant destination address DMA transfers | ||
53 | * @external_start: enable externally started DMA transfers | ||
54 | * @external_pause: enable externally paused DMA transfers | ||
55 | * | ||
56 | * Holds a list of address / length pairs for use with the DMAEngine | ||
57 | * DMA_SLAVE API implementation for the Freescale DMA controller. | ||
58 | */ | ||
59 | struct fsl_dma_slave { | ||
60 | |||
61 | /* List of hardware address/length pairs */ | ||
62 | struct list_head addresses; | ||
63 | |||
64 | /* Support for extra controller features */ | ||
65 | unsigned int request_count; | ||
66 | unsigned int src_loop_size; | ||
67 | unsigned int dst_loop_size; | ||
68 | bool external_start; | ||
69 | bool external_pause; | ||
70 | }; | ||
71 | |||
72 | /** | ||
73 | * fsl_dma_slave_append - add an address/length pair to a struct fsl_dma_slave | ||
74 | * @slave: the &struct fsl_dma_slave to add to | ||
75 | * @address: the hardware address to add | ||
76 | * @length: the length of bytes to transfer from @address | ||
77 | * | ||
78 | * Add a hardware address/length pair to a struct fsl_dma_slave. Returns 0 on | ||
79 | * success, -ERRNO otherwise. | ||
80 | */ | ||
81 | static inline int fsl_dma_slave_append(struct fsl_dma_slave *slave, | ||
82 | dma_addr_t address, size_t length) | ||
83 | { | ||
84 | struct fsl_dma_hw_addr *addr; | ||
85 | |||
86 | addr = kzalloc(sizeof(*addr), GFP_ATOMIC); | ||
87 | if (!addr) | ||
88 | return -ENOMEM; | ||
89 | |||
90 | INIT_LIST_HEAD(&addr->entry); | ||
91 | addr->address = address; | ||
92 | addr->length = length; | ||
93 | |||
94 | list_add_tail(&addr->entry, &slave->addresses); | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | /** | ||
99 | * fsl_dma_slave_free - free a struct fsl_dma_slave | ||
100 | * @slave: the struct fsl_dma_slave to free | ||
101 | * | ||
102 | * Free a struct fsl_dma_slave and all associated address/length pairs | ||
103 | */ | ||
104 | static inline void fsl_dma_slave_free(struct fsl_dma_slave *slave) | ||
105 | { | ||
106 | struct fsl_dma_hw_addr *addr, *tmp; | ||
107 | |||
108 | if (slave) { | ||
109 | list_for_each_entry_safe(addr, tmp, &slave->addresses, entry) { | ||
110 | list_del(&addr->entry); | ||
111 | kfree(addr); | ||
112 | } | ||
113 | |||
114 | kfree(slave); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | /** | ||
119 | * fsl_dma_slave_alloc - allocate a struct fsl_dma_slave | ||
120 | * @gfp: the flags to pass to kmalloc when allocating this structure | ||
121 | * | ||
122 | * Allocate a struct fsl_dma_slave for use by the DMA_SLAVE API. Returns a new | ||
123 | * struct fsl_dma_slave on success, or NULL on failure. | ||
124 | */ | ||
125 | static inline struct fsl_dma_slave *fsl_dma_slave_alloc(gfp_t gfp) | ||
126 | { | ||
127 | struct fsl_dma_slave *slave; | ||
128 | |||
129 | slave = kzalloc(sizeof(*slave), gfp); | ||
130 | if (!slave) | ||
131 | return NULL; | ||
132 | |||
133 | INIT_LIST_HEAD(&slave->addresses); | ||
134 | return slave; | ||
135 | } | ||
136 | |||
137 | #endif /* __ARCH_POWERPC_ASM_FSLDMA_H__ */ | ||
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index dde1296b8b41..169d039ed402 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h | |||
@@ -60,4 +60,18 @@ struct dyn_arch_ftrace { | |||
60 | 60 | ||
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) | ||
64 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME | ||
65 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) | ||
66 | { | ||
67 | /* | ||
68 | * Compare the symbol name with the system call name. Skip the .sys or .SyS | ||
69 | * prefix from the symbol name and the sys prefix from the system call name and | ||
70 | * just match the rest. This is only needed on ppc64 since symbol names on | ||
71 | * 32bit do not start with a period so the generic function will work. | ||
72 | */ | ||
73 | return !strcmp(sym + 4, name + 3); | ||
74 | } | ||
75 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */ | ||
76 | |||
63 | #endif /* _ASM_POWERPC_FTRACE */ | 77 | #endif /* _ASM_POWERPC_FTRACE */ |
diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h index 7c589ef81fb0..c94e4a3fe2ef 100644 --- a/arch/powerpc/include/asm/futex.h +++ b/arch/powerpc/include/asm/futex.h | |||
@@ -30,7 +30,7 @@ | |||
30 | : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ | 30 | : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ |
31 | : "cr0", "memory") | 31 | : "cr0", "memory") |
32 | 32 | ||
33 | static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | 33 | static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) |
34 | { | 34 | { |
35 | int op = (encoded_op >> 28) & 7; | 35 | int op = (encoded_op >> 28) & 7; |
36 | int cmp = (encoded_op >> 24) & 15; | 36 | int cmp = (encoded_op >> 24) & 15; |
@@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
40 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) | 40 | if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) |
41 | oparg = 1 << oparg; | 41 | oparg = 1 << oparg; |
42 | 42 | ||
43 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) | 43 | if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) |
44 | return -EFAULT; | 44 | return -EFAULT; |
45 | 45 | ||
46 | pagefault_disable(); | 46 | pagefault_disable(); |
@@ -82,35 +82,38 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) | |||
82 | } | 82 | } |
83 | 83 | ||
84 | static inline int | 84 | static inline int |
85 | futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) | 85 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, |
86 | u32 oldval, u32 newval) | ||
86 | { | 87 | { |
87 | int prev; | 88 | int ret = 0; |
89 | u32 prev; | ||
88 | 90 | ||
89 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) | 91 | if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) |
90 | return -EFAULT; | 92 | return -EFAULT; |
91 | 93 | ||
92 | __asm__ __volatile__ ( | 94 | __asm__ __volatile__ ( |
93 | PPC_RELEASE_BARRIER | 95 | PPC_RELEASE_BARRIER |
94 | "1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ | 96 | "1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ |
95 | cmpw 0,%0,%3\n\ | 97 | cmpw 0,%1,%4\n\ |
96 | bne- 3f\n" | 98 | bne- 3f\n" |
97 | PPC405_ERR77(0,%2) | 99 | PPC405_ERR77(0,%3) |
98 | "2: stwcx. %4,0,%2\n\ | 100 | "2: stwcx. %5,0,%3\n\ |
99 | bne- 1b\n" | 101 | bne- 1b\n" |
100 | PPC_ACQUIRE_BARRIER | 102 | PPC_ACQUIRE_BARRIER |
101 | "3: .section .fixup,\"ax\"\n\ | 103 | "3: .section .fixup,\"ax\"\n\ |
102 | 4: li %0,%5\n\ | 104 | 4: li %0,%6\n\ |
103 | b 3b\n\ | 105 | b 3b\n\ |
104 | .previous\n\ | 106 | .previous\n\ |
105 | .section __ex_table,\"a\"\n\ | 107 | .section __ex_table,\"a\"\n\ |
106 | .align 3\n\ | 108 | .align 3\n\ |
107 | " PPC_LONG "1b,4b,2b,4b\n\ | 109 | " PPC_LONG "1b,4b,2b,4b\n\ |
108 | .previous" \ | 110 | .previous" \ |
109 | : "=&r" (prev), "+m" (*uaddr) | 111 | : "+r" (ret), "=&r" (prev), "+m" (*uaddr) |
110 | : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) | 112 | : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) |
111 | : "cc", "memory"); | 113 | : "cc", "memory"); |
112 | 114 | ||
113 | return prev; | 115 | *uval = prev; |
116 | return ret; | ||
114 | } | 117 | } |
115 | 118 | ||
116 | #endif /* __KERNEL__ */ | 119 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/highmem.h b/arch/powerpc/include/asm/highmem.h index d10d64a4be38..dbc264010d0b 100644 --- a/arch/powerpc/include/asm/highmem.h +++ b/arch/powerpc/include/asm/highmem.h | |||
@@ -60,9 +60,8 @@ extern pte_t *pkmap_page_table; | |||
60 | 60 | ||
61 | extern void *kmap_high(struct page *page); | 61 | extern void *kmap_high(struct page *page); |
62 | extern void kunmap_high(struct page *page); | 62 | extern void kunmap_high(struct page *page); |
63 | extern void *kmap_atomic_prot(struct page *page, enum km_type type, | 63 | extern void *kmap_atomic_prot(struct page *page, pgprot_t prot); |
64 | pgprot_t prot); | 64 | extern void __kunmap_atomic(void *kvaddr); |
65 | extern void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type); | ||
66 | 65 | ||
67 | static inline void *kmap(struct page *page) | 66 | static inline void *kmap(struct page *page) |
68 | { | 67 | { |
@@ -80,9 +79,9 @@ static inline void kunmap(struct page *page) | |||
80 | kunmap_high(page); | 79 | kunmap_high(page); |
81 | } | 80 | } |
82 | 81 | ||
83 | static inline void *kmap_atomic(struct page *page, enum km_type type) | 82 | static inline void *__kmap_atomic(struct page *page) |
84 | { | 83 | { |
85 | return kmap_atomic_prot(page, type, kmap_prot); | 84 | return kmap_atomic_prot(page, kmap_prot); |
86 | } | 85 | } |
87 | 86 | ||
88 | static inline struct page *kmap_atomic_to_page(void *ptr) | 87 | static inline struct page *kmap_atomic_to_page(void *ptr) |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index de03ca58db5d..fd8201dddd4b 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -102,6 +102,7 @@ | |||
102 | #define H_ANDCOND (1UL<<(63-33)) | 102 | #define H_ANDCOND (1UL<<(63-33)) |
103 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ | 103 | #define H_ICACHE_INVALIDATE (1UL<<(63-40)) /* icbi, etc. (ignored for IO pages) */ |
104 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ | 104 | #define H_ICACHE_SYNCHRONIZE (1UL<<(63-41)) /* dcbst, icbi, etc (ignored for IO pages */ |
105 | #define H_COALESCE_CAND (1UL<<(63-42)) /* page is a good candidate for coalescing */ | ||
105 | #define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */ | 106 | #define H_ZERO_PAGE (1UL<<(63-48)) /* zero the page before mapping (ignored for IO pages) */ |
106 | #define H_COPY_PAGE (1UL<<(63-49)) | 107 | #define H_COPY_PAGE (1UL<<(63-49)) |
107 | #define H_N (1UL<<(63-61)) | 108 | #define H_N (1UL<<(63-61)) |
@@ -122,7 +123,7 @@ | |||
122 | #define H_DABRX_KERNEL (1UL<<(63-62)) | 123 | #define H_DABRX_KERNEL (1UL<<(63-62)) |
123 | #define H_DABRX_USER (1UL<<(63-63)) | 124 | #define H_DABRX_USER (1UL<<(63-63)) |
124 | 125 | ||
125 | /* Each control block has to be on a 4K bondary */ | 126 | /* Each control block has to be on a 4K boundary */ |
126 | #define H_CB_ALIGNMENT 4096 | 127 | #define H_CB_ALIGNMENT 4096 |
127 | 128 | ||
128 | /* pSeries hypervisor opcodes */ | 129 | /* pSeries hypervisor opcodes */ |
@@ -232,7 +233,10 @@ | |||
232 | #define H_GET_EM_PARMS 0x2B8 | 233 | #define H_GET_EM_PARMS 0x2B8 |
233 | #define H_SET_MPP 0x2D0 | 234 | #define H_SET_MPP 0x2D0 |
234 | #define H_GET_MPP 0x2D4 | 235 | #define H_GET_MPP 0x2D4 |
235 | #define MAX_HCALL_OPCODE H_GET_MPP | 236 | #define H_HOME_NODE_ASSOCIATIVITY 0x2EC |
237 | #define H_BEST_ENERGY 0x2F4 | ||
238 | #define H_GET_MPP_X 0x314 | ||
239 | #define MAX_HCALL_OPCODE H_GET_MPP_X | ||
236 | 240 | ||
237 | #ifndef __ASSEMBLY__ | 241 | #ifndef __ASSEMBLY__ |
238 | 242 | ||
@@ -310,6 +314,16 @@ struct hvcall_mpp_data { | |||
310 | 314 | ||
311 | int h_get_mpp(struct hvcall_mpp_data *); | 315 | int h_get_mpp(struct hvcall_mpp_data *); |
312 | 316 | ||
317 | struct hvcall_mpp_x_data { | ||
318 | unsigned long coalesced_bytes; | ||
319 | unsigned long pool_coalesced_bytes; | ||
320 | unsigned long pool_purr_cycles; | ||
321 | unsigned long pool_spurr_cycles; | ||
322 | unsigned long reserved[3]; | ||
323 | }; | ||
324 | |||
325 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data); | ||
326 | |||
313 | #ifdef CONFIG_PPC_PSERIES | 327 | #ifdef CONFIG_PPC_PSERIES |
314 | extern int CMO_PrPSP; | 328 | extern int CMO_PrPSP; |
315 | extern int CMO_SecPSP; | 329 | extern int CMO_SecPSP; |
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h index bd100fcf40d0..bb712c9488b3 100644 --- a/arch/powerpc/include/asm/hw_irq.h +++ b/arch/powerpc/include/asm/hw_irq.h | |||
@@ -16,42 +16,57 @@ extern void timer_interrupt(struct pt_regs *); | |||
16 | #ifdef CONFIG_PPC64 | 16 | #ifdef CONFIG_PPC64 |
17 | #include <asm/paca.h> | 17 | #include <asm/paca.h> |
18 | 18 | ||
19 | static inline unsigned long local_get_flags(void) | 19 | static inline unsigned long arch_local_save_flags(void) |
20 | { | 20 | { |
21 | unsigned long flags; | 21 | unsigned long flags; |
22 | 22 | ||
23 | __asm__ __volatile__("lbz %0,%1(13)" | 23 | asm volatile( |
24 | : "=r" (flags) | 24 | "lbz %0,%1(13)" |
25 | : "i" (offsetof(struct paca_struct, soft_enabled))); | 25 | : "=r" (flags) |
26 | : "i" (offsetof(struct paca_struct, soft_enabled))); | ||
26 | 27 | ||
27 | return flags; | 28 | return flags; |
28 | } | 29 | } |
29 | 30 | ||
30 | static inline unsigned long raw_local_irq_disable(void) | 31 | static inline unsigned long arch_local_irq_disable(void) |
31 | { | 32 | { |
32 | unsigned long flags, zero; | 33 | unsigned long flags, zero; |
33 | 34 | ||
34 | __asm__ __volatile__("li %1,0; lbz %0,%2(13); stb %1,%2(13)" | 35 | asm volatile( |
35 | : "=r" (flags), "=&r" (zero) | 36 | "li %1,0; lbz %0,%2(13); stb %1,%2(13)" |
36 | : "i" (offsetof(struct paca_struct, soft_enabled)) | 37 | : "=r" (flags), "=&r" (zero) |
37 | : "memory"); | 38 | : "i" (offsetof(struct paca_struct, soft_enabled)) |
39 | : "memory"); | ||
38 | 40 | ||
39 | return flags; | 41 | return flags; |
40 | } | 42 | } |
41 | 43 | ||
42 | extern void raw_local_irq_restore(unsigned long); | 44 | extern void arch_local_irq_restore(unsigned long); |
43 | extern void iseries_handle_interrupts(void); | 45 | extern void iseries_handle_interrupts(void); |
44 | 46 | ||
45 | #define raw_local_irq_enable() raw_local_irq_restore(1) | 47 | static inline void arch_local_irq_enable(void) |
46 | #define raw_local_save_flags(flags) ((flags) = local_get_flags()) | 48 | { |
47 | #define raw_local_irq_save(flags) ((flags) = raw_local_irq_disable()) | 49 | arch_local_irq_restore(1); |
50 | } | ||
51 | |||
52 | static inline unsigned long arch_local_irq_save(void) | ||
53 | { | ||
54 | return arch_local_irq_disable(); | ||
55 | } | ||
56 | |||
57 | static inline bool arch_irqs_disabled_flags(unsigned long flags) | ||
58 | { | ||
59 | return flags == 0; | ||
60 | } | ||
48 | 61 | ||
49 | #define raw_irqs_disabled() (local_get_flags() == 0) | 62 | static inline bool arch_irqs_disabled(void) |
50 | #define raw_irqs_disabled_flags(flags) ((flags) == 0) | 63 | { |
64 | return arch_irqs_disabled_flags(arch_local_save_flags()); | ||
65 | } | ||
51 | 66 | ||
52 | #ifdef CONFIG_PPC_BOOK3E | 67 | #ifdef CONFIG_PPC_BOOK3E |
53 | #define __hard_irq_enable() __asm__ __volatile__("wrteei 1": : :"memory"); | 68 | #define __hard_irq_enable() asm volatile("wrteei 1" : : : "memory"); |
54 | #define __hard_irq_disable() __asm__ __volatile__("wrteei 0": : :"memory"); | 69 | #define __hard_irq_disable() asm volatile("wrteei 0" : : : "memory"); |
55 | #else | 70 | #else |
56 | #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) | 71 | #define __hard_irq_enable() __mtmsrd(mfmsr() | MSR_EE, 1) |
57 | #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) | 72 | #define __hard_irq_disable() __mtmsrd(mfmsr() & ~MSR_EE, 1) |
@@ -64,66 +79,70 @@ extern void iseries_handle_interrupts(void); | |||
64 | get_paca()->hard_enabled = 0; \ | 79 | get_paca()->hard_enabled = 0; \ |
65 | } while(0) | 80 | } while(0) |
66 | 81 | ||
67 | #else | 82 | #else /* CONFIG_PPC64 */ |
68 | 83 | ||
69 | #if defined(CONFIG_BOOKE) | ||
70 | #define SET_MSR_EE(x) mtmsr(x) | 84 | #define SET_MSR_EE(x) mtmsr(x) |
71 | #define raw_local_irq_restore(flags) __asm__ __volatile__("wrtee %0" : : "r" (flags) : "memory") | 85 | |
86 | static inline unsigned long arch_local_save_flags(void) | ||
87 | { | ||
88 | return mfmsr(); | ||
89 | } | ||
90 | |||
91 | static inline void arch_local_irq_restore(unsigned long flags) | ||
92 | { | ||
93 | #if defined(CONFIG_BOOKE) | ||
94 | asm volatile("wrtee %0" : : "r" (flags) : "memory"); | ||
72 | #else | 95 | #else |
73 | #define SET_MSR_EE(x) mtmsr(x) | 96 | mtmsr(flags); |
74 | #define raw_local_irq_restore(flags) mtmsr(flags) | ||
75 | #endif | 97 | #endif |
98 | } | ||
76 | 99 | ||
77 | static inline void raw_local_irq_disable(void) | 100 | static inline unsigned long arch_local_irq_save(void) |
78 | { | 101 | { |
102 | unsigned long flags = arch_local_save_flags(); | ||
79 | #ifdef CONFIG_BOOKE | 103 | #ifdef CONFIG_BOOKE |
80 | __asm__ __volatile__("wrteei 0": : :"memory"); | 104 | asm volatile("wrteei 0" : : : "memory"); |
81 | #else | 105 | #else |
82 | unsigned long msr; | 106 | SET_MSR_EE(flags & ~MSR_EE); |
83 | |||
84 | msr = mfmsr(); | ||
85 | SET_MSR_EE(msr & ~MSR_EE); | ||
86 | #endif | 107 | #endif |
108 | return flags; | ||
87 | } | 109 | } |
88 | 110 | ||
89 | static inline void raw_local_irq_enable(void) | 111 | static inline void arch_local_irq_disable(void) |
90 | { | 112 | { |
91 | #ifdef CONFIG_BOOKE | 113 | #ifdef CONFIG_BOOKE |
92 | __asm__ __volatile__("wrteei 1": : :"memory"); | 114 | asm volatile("wrteei 0" : : : "memory"); |
93 | #else | 115 | #else |
94 | unsigned long msr; | 116 | arch_local_irq_save(); |
95 | |||
96 | msr = mfmsr(); | ||
97 | SET_MSR_EE(msr | MSR_EE); | ||
98 | #endif | 117 | #endif |
99 | } | 118 | } |
100 | 119 | ||
101 | static inline void raw_local_irq_save_ptr(unsigned long *flags) | 120 | static inline void arch_local_irq_enable(void) |
102 | { | 121 | { |
103 | unsigned long msr; | ||
104 | msr = mfmsr(); | ||
105 | *flags = msr; | ||
106 | #ifdef CONFIG_BOOKE | 122 | #ifdef CONFIG_BOOKE |
107 | __asm__ __volatile__("wrteei 0": : :"memory"); | 123 | asm volatile("wrteei 1" : : : "memory"); |
108 | #else | 124 | #else |
109 | SET_MSR_EE(msr & ~MSR_EE); | 125 | unsigned long msr = mfmsr(); |
126 | SET_MSR_EE(msr | MSR_EE); | ||
110 | #endif | 127 | #endif |
111 | } | 128 | } |
112 | 129 | ||
113 | #define raw_local_save_flags(flags) ((flags) = mfmsr()) | 130 | static inline bool arch_irqs_disabled_flags(unsigned long flags) |
114 | #define raw_local_irq_save(flags) raw_local_irq_save_ptr(&flags) | ||
115 | #define raw_irqs_disabled() ((mfmsr() & MSR_EE) == 0) | ||
116 | #define raw_irqs_disabled_flags(flags) (((flags) & MSR_EE) == 0) | ||
117 | |||
118 | #define hard_irq_disable() raw_local_irq_disable() | ||
119 | |||
120 | static inline int irqs_disabled_flags(unsigned long flags) | ||
121 | { | 131 | { |
122 | return (flags & MSR_EE) == 0; | 132 | return (flags & MSR_EE) == 0; |
123 | } | 133 | } |
124 | 134 | ||
135 | static inline bool arch_irqs_disabled(void) | ||
136 | { | ||
137 | return arch_irqs_disabled_flags(arch_local_save_flags()); | ||
138 | } | ||
139 | |||
140 | #define hard_irq_disable() arch_local_irq_disable() | ||
141 | |||
125 | #endif /* CONFIG_PPC64 */ | 142 | #endif /* CONFIG_PPC64 */ |
126 | 143 | ||
144 | #define ARCH_IRQ_INIT_FLAGS IRQ_NOREQUEST | ||
145 | |||
127 | /* | 146 | /* |
128 | * interrupt-retrigger: should we handle this via lost interrupts and IPIs | 147 | * interrupt-retrigger: should we handle this via lost interrupts and IPIs |
129 | * or should we not care like we do now ? --BenH. | 148 | * or should we not care like we do now ? --BenH. |
diff --git a/arch/powerpc/include/asm/hydra.h b/arch/powerpc/include/asm/hydra.h index 1ad4eed07fbe..5b0c98bd46ab 100644 --- a/arch/powerpc/include/asm/hydra.h +++ b/arch/powerpc/include/asm/hydra.h | |||
@@ -10,7 +10,7 @@ | |||
10 | * | 10 | * |
11 | * © Copyright 1995 Apple Computer, Inc. All rights reserved. | 11 | * © Copyright 1995 Apple Computer, Inc. All rights reserved. |
12 | * | 12 | * |
13 | * It's available online from http://chrp.apple.com/MacTech.pdf. | 13 | * It's available online from http://www.cpu.lu/~mlan/ftp/MacTech.pdf |
14 | * You can obtain paper copies of this book from computer bookstores or by | 14 | * You can obtain paper copies of this book from computer bookstores or by |
15 | * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San | 15 | * writing Morgan Kaufmann Publishers, Inc., 340 Pine Street, Sixth Floor, San |
16 | * Francisco, CA 94104. Reference ISBN 1-55860-393-X. | 16 | * Francisco, CA 94104. Reference ISBN 1-55860-393-X. |
diff --git a/arch/powerpc/include/asm/immap_qe.h b/arch/powerpc/include/asm/immap_qe.h index 4e10f508570a..0edb6842b13d 100644 --- a/arch/powerpc/include/asm/immap_qe.h +++ b/arch/powerpc/include/asm/immap_qe.h | |||
@@ -467,13 +467,22 @@ struct qe_immap { | |||
467 | extern struct qe_immap __iomem *qe_immr; | 467 | extern struct qe_immap __iomem *qe_immr; |
468 | extern phys_addr_t get_qe_base(void); | 468 | extern phys_addr_t get_qe_base(void); |
469 | 469 | ||
470 | static inline unsigned long immrbar_virt_to_phys(void *address) | 470 | /* |
471 | * Returns the offset within the QE address space of the given pointer. | ||
472 | * | ||
473 | * Note that the QE does not support 36-bit physical addresses, so if | ||
474 | * get_qe_base() returns a number above 4GB, the caller will probably fail. | ||
475 | */ | ||
476 | static inline phys_addr_t immrbar_virt_to_phys(void *address) | ||
471 | { | 477 | { |
472 | if ( ((u32)address >= (u32)qe_immr) && | 478 | void *q = (void *)qe_immr; |
473 | ((u32)address < ((u32)qe_immr + QE_IMMAP_SIZE)) ) | 479 | |
474 | return (unsigned long)(address - (u32)qe_immr + | 480 | /* Is it a MURAM address? */ |
475 | (u32)get_qe_base()); | 481 | if ((address >= q) && (address < (q + QE_IMMAP_SIZE))) |
476 | return (unsigned long)virt_to_phys(address); | 482 | return get_qe_base() + (address - q); |
483 | |||
484 | /* It's an address returned by kmalloc */ | ||
485 | return virt_to_phys(address); | ||
477 | } | 486 | } |
478 | 487 | ||
479 | #endif /* __KERNEL__ */ | 488 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/platforms/cell/io-workarounds.h b/arch/powerpc/include/asm/io-workarounds.h index 6efc7782ebf2..fbae49286926 100644 --- a/arch/powerpc/platforms/cell/io-workarounds.h +++ b/arch/powerpc/include/asm/io-workarounds.h | |||
@@ -31,7 +31,6 @@ struct iowa_bus { | |||
31 | void *private; | 31 | void *private; |
32 | }; | 32 | }; |
33 | 33 | ||
34 | void __devinit io_workaround_init(void); | ||
35 | void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, | 34 | void __devinit iowa_register_bus(struct pci_controller *, struct ppc_pci_io *, |
36 | int (*)(struct iowa_bus *, void *), void *); | 35 | int (*)(struct iowa_bus *, void *), void *); |
37 | struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); | 36 | struct iowa_bus *iowa_mem_find_bus(const PCI_IO_ADDR); |
diff --git a/arch/powerpc/include/asm/io.h b/arch/powerpc/include/asm/io.h index 001f2f11c19b..45698d55cd6a 100644 --- a/arch/powerpc/include/asm/io.h +++ b/arch/powerpc/include/asm/io.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define _ASM_POWERPC_IO_H | 2 | #define _ASM_POWERPC_IO_H |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #define ARCH_HAS_IOREMAP_WC | ||
6 | |||
5 | /* | 7 | /* |
6 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 9 | * modify it under the terms of the GNU General Public License |
@@ -481,10 +483,16 @@ __do_out_asm(_rec_outl, "stwbrx") | |||
481 | _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) | 483 | _memcpy_fromio(dst,PCI_FIX_ADDR(src),n) |
482 | #endif /* !CONFIG_EEH */ | 484 | #endif /* !CONFIG_EEH */ |
483 | 485 | ||
484 | #ifdef CONFIG_PPC_INDIRECT_IO | 486 | #ifdef CONFIG_PPC_INDIRECT_PIO |
485 | #define DEF_PCI_HOOK(x) x | 487 | #define DEF_PCI_HOOK_pio(x) x |
488 | #else | ||
489 | #define DEF_PCI_HOOK_pio(x) NULL | ||
490 | #endif | ||
491 | |||
492 | #ifdef CONFIG_PPC_INDIRECT_MMIO | ||
493 | #define DEF_PCI_HOOK_mem(x) x | ||
486 | #else | 494 | #else |
487 | #define DEF_PCI_HOOK(x) NULL | 495 | #define DEF_PCI_HOOK_mem(x) NULL |
488 | #endif | 496 | #endif |
489 | 497 | ||
490 | /* Structure containing all the hooks */ | 498 | /* Structure containing all the hooks */ |
@@ -504,7 +512,7 @@ extern struct ppc_pci_io { | |||
504 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ | 512 | #define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \ |
505 | static inline ret name at \ | 513 | static inline ret name at \ |
506 | { \ | 514 | { \ |
507 | if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ | 515 | if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ |
508 | return ppc_pci_io.name al; \ | 516 | return ppc_pci_io.name al; \ |
509 | return __do_##name al; \ | 517 | return __do_##name al; \ |
510 | } | 518 | } |
@@ -512,7 +520,7 @@ static inline ret name at \ | |||
512 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ | 520 | #define DEF_PCI_AC_NORET(name, at, al, space, aa) \ |
513 | static inline void name at \ | 521 | static inline void name at \ |
514 | { \ | 522 | { \ |
515 | if (DEF_PCI_HOOK(ppc_pci_io.name) != NULL) \ | 523 | if (DEF_PCI_HOOK_##space(ppc_pci_io.name) != NULL) \ |
516 | ppc_pci_io.name al; \ | 524 | ppc_pci_io.name al; \ |
517 | else \ | 525 | else \ |
518 | __do_##name al; \ | 526 | __do_##name al; \ |
@@ -616,12 +624,13 @@ static inline void iosync(void) | |||
616 | * * ioremap is the standard one and provides non-cacheable guarded mappings | 624 | * * ioremap is the standard one and provides non-cacheable guarded mappings |
617 | * and can be hooked by the platform via ppc_md | 625 | * and can be hooked by the platform via ppc_md |
618 | * | 626 | * |
619 | * * ioremap_flags allows to specify the page flags as an argument and can | 627 | * * ioremap_prot allows to specify the page flags as an argument and can |
620 | * also be hooked by the platform via ppc_md. ioremap_prot is the exact | 628 | * also be hooked by the platform via ppc_md. |
621 | * same thing as ioremap_flags. | ||
622 | * | 629 | * |
623 | * * ioremap_nocache is identical to ioremap | 630 | * * ioremap_nocache is identical to ioremap |
624 | * | 631 | * |
632 | * * ioremap_wc enables write combining | ||
633 | * | ||
625 | * * iounmap undoes such a mapping and can be hooked | 634 | * * iounmap undoes such a mapping and can be hooked |
626 | * | 635 | * |
627 | * * __ioremap_at (and the pending __iounmap_at) are low level functions to | 636 | * * __ioremap_at (and the pending __iounmap_at) are low level functions to |
@@ -629,7 +638,7 @@ static inline void iosync(void) | |||
629 | * currently be hooked. Must be page aligned. | 638 | * currently be hooked. Must be page aligned. |
630 | * | 639 | * |
631 | * * __ioremap is the low level implementation used by ioremap and | 640 | * * __ioremap is the low level implementation used by ioremap and |
632 | * ioremap_flags and cannot be hooked (but can be used by a hook on one | 641 | * ioremap_prot and cannot be hooked (but can be used by a hook on one |
633 | * of the previous ones) | 642 | * of the previous ones) |
634 | * | 643 | * |
635 | * * __ioremap_caller is the same as above but takes an explicit caller | 644 | * * __ioremap_caller is the same as above but takes an explicit caller |
@@ -640,10 +649,10 @@ static inline void iosync(void) | |||
640 | * | 649 | * |
641 | */ | 650 | */ |
642 | extern void __iomem *ioremap(phys_addr_t address, unsigned long size); | 651 | extern void __iomem *ioremap(phys_addr_t address, unsigned long size); |
643 | extern void __iomem *ioremap_flags(phys_addr_t address, unsigned long size, | 652 | extern void __iomem *ioremap_prot(phys_addr_t address, unsigned long size, |
644 | unsigned long flags); | 653 | unsigned long flags); |
654 | extern void __iomem *ioremap_wc(phys_addr_t address, unsigned long size); | ||
645 | #define ioremap_nocache(addr, size) ioremap((addr), (size)) | 655 | #define ioremap_nocache(addr, size) ioremap((addr), (size)) |
646 | #define ioremap_prot(addr, size, prot) ioremap_flags((addr), (size), (prot)) | ||
647 | 656 | ||
648 | extern void iounmap(volatile void __iomem *addr); | 657 | extern void iounmap(volatile void __iomem *addr); |
649 | 658 | ||
diff --git a/arch/powerpc/include/asm/io_event_irq.h b/arch/powerpc/include/asm/io_event_irq.h new file mode 100644 index 000000000000..b1a9a1be3c21 --- /dev/null +++ b/arch/powerpc/include/asm/io_event_irq.h | |||
@@ -0,0 +1,54 @@ | |||
1 | /* | ||
2 | * Copyright 2010, 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef _ASM_POWERPC_IO_EVENT_IRQ_H | ||
11 | #define _ASM_POWERPC_IO_EVENT_IRQ_H | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <linux/notifier.h> | ||
15 | |||
16 | #define PSERIES_IOEI_RPC_MAX_LEN 216 | ||
17 | |||
18 | #define PSERIES_IOEI_TYPE_ERR_DETECTED 0x01 | ||
19 | #define PSERIES_IOEI_TYPE_ERR_RECOVERED 0x02 | ||
20 | #define PSERIES_IOEI_TYPE_EVENT 0x03 | ||
21 | #define PSERIES_IOEI_TYPE_RPC_PASS_THRU 0x04 | ||
22 | |||
23 | #define PSERIES_IOEI_SUBTYPE_NOT_APP 0x00 | ||
24 | #define PSERIES_IOEI_SUBTYPE_REBALANCE_REQ 0x01 | ||
25 | #define PSERIES_IOEI_SUBTYPE_NODE_ONLINE 0x03 | ||
26 | #define PSERIES_IOEI_SUBTYPE_NODE_OFFLINE 0x04 | ||
27 | #define PSERIES_IOEI_SUBTYPE_DUMP_SIZE_CHANGE 0x05 | ||
28 | #define PSERIES_IOEI_SUBTYPE_TORRENT_IRV_UPDATE 0x06 | ||
29 | #define PSERIES_IOEI_SUBTYPE_TORRENT_HFI_CFGED 0x07 | ||
30 | |||
31 | #define PSERIES_IOEI_SCOPE_NOT_APP 0x00 | ||
32 | #define PSERIES_IOEI_SCOPE_RIO_HUB 0x36 | ||
33 | #define PSERIES_IOEI_SCOPE_RIO_BRIDGE 0x37 | ||
34 | #define PSERIES_IOEI_SCOPE_PHB 0x38 | ||
35 | #define PSERIES_IOEI_SCOPE_EADS_GLOBAL 0x39 | ||
36 | #define PSERIES_IOEI_SCOPE_EADS_SLOT 0x3A | ||
37 | #define PSERIES_IOEI_SCOPE_TORRENT_HUB 0x3B | ||
38 | #define PSERIES_IOEI_SCOPE_SERVICE_PROC 0x51 | ||
39 | |||
40 | /* Platform Event Log Format, Version 6, data portition of IO event section */ | ||
41 | struct pseries_io_event { | ||
42 | uint8_t event_type; /* 0x00 IO-Event Type */ | ||
43 | uint8_t rpc_data_len; /* 0x01 RPC data length */ | ||
44 | uint8_t scope; /* 0x02 Error/Event Scope */ | ||
45 | uint8_t event_subtype; /* 0x03 I/O-Event Sub-Type */ | ||
46 | uint32_t drc_index; /* 0x04 DRC Index */ | ||
47 | uint8_t rpc_data[PSERIES_IOEI_RPC_MAX_LEN]; | ||
48 | /* 0x08 RPC Data (0-216 bytes, */ | ||
49 | /* padded to 4 bytes alignment) */ | ||
50 | }; | ||
51 | |||
52 | extern struct atomic_notifier_head pseries_ioei_notifier_list; | ||
53 | |||
54 | #endif /* _ASM_POWERPC_IO_EVENT_IRQ_H */ | ||
diff --git a/arch/powerpc/include/asm/ioctls.h b/arch/powerpc/include/asm/ioctls.h index 851920052e08..e9b78870aaab 100644 --- a/arch/powerpc/include/asm/ioctls.h +++ b/arch/powerpc/include/asm/ioctls.h | |||
@@ -94,7 +94,9 @@ | |||
94 | #define TIOCSRS485 0x542f | 94 | #define TIOCSRS485 0x542f |
95 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ | 95 | #define TIOCGPTN _IOR('T',0x30, unsigned int) /* Get Pty Number (of pty-mux device) */ |
96 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ | 96 | #define TIOCSPTLCK _IOW('T',0x31, int) /* Lock/unlock Pty */ |
97 | #define TIOCGDEV _IOR('T',0x32, unsigned int) /* Get primary device node of /dev/console */ | ||
97 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ | 98 | #define TIOCSIG _IOW('T',0x36, int) /* Generate signal on Pty slave */ |
99 | #define TIOCVHANGUP 0x5437 | ||
98 | 100 | ||
99 | #define TIOCSERCONFIG 0x5453 | 101 | #define TIOCSERCONFIG 0x5453 |
100 | #define TIOCSERGWILD 0x5454 | 102 | #define TIOCSERGWILD 0x5454 |
diff --git a/arch/powerpc/include/asm/irq.h b/arch/powerpc/include/asm/irq.h index 67ab5fb7d153..1bff591f7f72 100644 --- a/arch/powerpc/include/asm/irq.h +++ b/arch/powerpc/include/asm/irq.h | |||
@@ -88,9 +88,6 @@ struct irq_host_ops { | |||
88 | /* Dispose of such a mapping */ | 88 | /* Dispose of such a mapping */ |
89 | void (*unmap)(struct irq_host *h, unsigned int virq); | 89 | void (*unmap)(struct irq_host *h, unsigned int virq); |
90 | 90 | ||
91 | /* Update of such a mapping */ | ||
92 | void (*remap)(struct irq_host *h, unsigned int virq, irq_hw_number_t hw); | ||
93 | |||
94 | /* Translate device-tree interrupt specifier from raw format coming | 91 | /* Translate device-tree interrupt specifier from raw format coming |
95 | * from the firmware to a irq_hw_number_t (interrupt line number) and | 92 | * from the firmware to a irq_hw_number_t (interrupt line number) and |
96 | * type (sense) that can be passed to set_irq_type(). In the absence | 93 | * type (sense) that can be passed to set_irq_type(). In the absence |
@@ -128,19 +125,10 @@ struct irq_host { | |||
128 | struct device_node *of_node; | 125 | struct device_node *of_node; |
129 | }; | 126 | }; |
130 | 127 | ||
131 | /* The main irq map itself is an array of NR_IRQ entries containing the | 128 | struct irq_data; |
132 | * associate host and irq number. An entry with a host of NULL is free. | 129 | extern irq_hw_number_t irqd_to_hwirq(struct irq_data *d); |
133 | * An entry can be allocated if it's free, the allocator always then sets | ||
134 | * hwirq first to the host's invalid irq number and then fills ops. | ||
135 | */ | ||
136 | struct irq_map_entry { | ||
137 | irq_hw_number_t hwirq; | ||
138 | struct irq_host *host; | ||
139 | }; | ||
140 | |||
141 | extern struct irq_map_entry irq_map[NR_IRQS]; | ||
142 | |||
143 | extern irq_hw_number_t virq_to_hw(unsigned int virq); | 130 | extern irq_hw_number_t virq_to_hw(unsigned int virq); |
131 | extern bool virq_is_host(unsigned int virq, struct irq_host *host); | ||
144 | 132 | ||
145 | /** | 133 | /** |
146 | * irq_alloc_host - Allocate a new irq_host data structure | 134 | * irq_alloc_host - Allocate a new irq_host data structure |
diff --git a/arch/powerpc/include/asm/irqflags.h b/arch/powerpc/include/asm/irqflags.h index 5f68ecfdf516..b0b06d85788d 100644 --- a/arch/powerpc/include/asm/irqflags.h +++ b/arch/powerpc/include/asm/irqflags.h | |||
@@ -6,30 +6,50 @@ | |||
6 | 6 | ||
7 | #ifndef __ASSEMBLY__ | 7 | #ifndef __ASSEMBLY__ |
8 | /* | 8 | /* |
9 | * Get definitions for raw_local_save_flags(x), etc. | 9 | * Get definitions for arch_local_save_flags(x), etc. |
10 | */ | 10 | */ |
11 | #include <asm/hw_irq.h> | 11 | #include <asm/hw_irq.h> |
12 | 12 | ||
13 | #else | 13 | #else |
14 | #ifdef CONFIG_TRACE_IRQFLAGS | 14 | #ifdef CONFIG_TRACE_IRQFLAGS |
15 | #ifdef CONFIG_IRQSOFF_TRACER | ||
16 | /* | ||
17 | * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, | ||
18 | * which is the stack frame here, we need to force a stack frame | ||
19 | * in case we came from user space. | ||
20 | */ | ||
21 | #define TRACE_WITH_FRAME_BUFFER(func) \ | ||
22 | mflr r0; \ | ||
23 | stdu r1, -32(r1); \ | ||
24 | std r0, 16(r1); \ | ||
25 | stdu r1, -32(r1); \ | ||
26 | bl func; \ | ||
27 | ld r1, 0(r1); \ | ||
28 | ld r1, 0(r1); | ||
29 | #else | ||
30 | #define TRACE_WITH_FRAME_BUFFER(func) \ | ||
31 | bl func; | ||
32 | #endif | ||
33 | |||
15 | /* | 34 | /* |
16 | * Most of the CPU's IRQ-state tracing is done from assembly code; we | 35 | * Most of the CPU's IRQ-state tracing is done from assembly code; we |
17 | * have to call a C function so call a wrapper that saves all the | 36 | * have to call a C function so call a wrapper that saves all the |
18 | * C-clobbered registers. | 37 | * C-clobbered registers. |
19 | */ | 38 | */ |
20 | #define TRACE_ENABLE_INTS bl .trace_hardirqs_on | 39 | #define TRACE_ENABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) |
21 | #define TRACE_DISABLE_INTS bl .trace_hardirqs_off | 40 | #define TRACE_DISABLE_INTS TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) |
22 | #define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \ | 41 | |
23 | cmpdi en,0; \ | 42 | #define TRACE_AND_RESTORE_IRQ_PARTIAL(en,skip) \ |
24 | bne 95f; \ | 43 | cmpdi en,0; \ |
25 | stb en,PACASOFTIRQEN(r13); \ | 44 | bne 95f; \ |
26 | bl .trace_hardirqs_off; \ | 45 | stb en,PACASOFTIRQEN(r13); \ |
27 | b skip; \ | 46 | TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_off) \ |
28 | 95: bl .trace_hardirqs_on; \ | 47 | b skip; \ |
48 | 95: TRACE_WITH_FRAME_BUFFER(.trace_hardirqs_on) \ | ||
29 | li en,1; | 49 | li en,1; |
30 | #define TRACE_AND_RESTORE_IRQ(en) \ | 50 | #define TRACE_AND_RESTORE_IRQ(en) \ |
31 | TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \ | 51 | TRACE_AND_RESTORE_IRQ_PARTIAL(en,96f); \ |
32 | stb en,PACASOFTIRQEN(r13); \ | 52 | stb en,PACASOFTIRQEN(r13); \ |
33 | 96: | 53 | 96: |
34 | #else | 54 | #else |
35 | #define TRACE_ENABLE_INTS | 55 | #define TRACE_ENABLE_INTS |
diff --git a/arch/powerpc/include/asm/kexec.h b/arch/powerpc/include/asm/kexec.h index 076327f2eff7..8a33698c61bd 100644 --- a/arch/powerpc/include/asm/kexec.h +++ b/arch/powerpc/include/asm/kexec.h | |||
@@ -76,7 +76,7 @@ extern void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)); | |||
76 | extern cpumask_t cpus_in_sr; | 76 | extern cpumask_t cpus_in_sr; |
77 | static inline int kexec_sr_activated(int cpu) | 77 | static inline int kexec_sr_activated(int cpu) |
78 | { | 78 | { |
79 | return cpu_isset(cpu,cpus_in_sr); | 79 | return cpumask_test_cpu(cpu, &cpus_in_sr); |
80 | } | 80 | } |
81 | 81 | ||
82 | struct kimage; | 82 | struct kimage; |
@@ -91,6 +91,7 @@ extern void machine_kexec_simple(struct kimage *image); | |||
91 | extern void crash_kexec_secondary(struct pt_regs *regs); | 91 | extern void crash_kexec_secondary(struct pt_regs *regs); |
92 | extern int overlaps_crashkernel(unsigned long start, unsigned long size); | 92 | extern int overlaps_crashkernel(unsigned long start, unsigned long size); |
93 | extern void reserve_crashkernel(void); | 93 | extern void reserve_crashkernel(void); |
94 | extern void machine_kexec_mask_interrupts(void); | ||
94 | 95 | ||
95 | #else /* !CONFIG_KEXEC */ | 96 | #else /* !CONFIG_KEXEC */ |
96 | static inline int kexec_sr_activated(int cpu) { return 0; } | 97 | static inline int kexec_sr_activated(int cpu) { return 0; } |
diff --git a/arch/powerpc/include/asm/kgdb.h b/arch/powerpc/include/asm/kgdb.h index edd217006d27..9db24e77b9f4 100644 --- a/arch/powerpc/include/asm/kgdb.h +++ b/arch/powerpc/include/asm/kgdb.h | |||
@@ -31,6 +31,7 @@ static inline void arch_kgdb_breakpoint(void) | |||
31 | asm(".long 0x7d821008"); /* twge r2, r2 */ | 31 | asm(".long 0x7d821008"); /* twge r2, r2 */ |
32 | } | 32 | } |
33 | #define CACHE_FLUSH_IS_SAFE 1 | 33 | #define CACHE_FLUSH_IS_SAFE 1 |
34 | #define DBG_MAX_REG_NUM 70 | ||
34 | 35 | ||
35 | /* The number bytes of registers we have to save depends on a few | 36 | /* The number bytes of registers we have to save depends on a few |
36 | * things. For 64bit we default to not including vector registers and | 37 | * things. For 64bit we default to not including vector registers and |
diff --git a/arch/powerpc/include/asm/kprobes.h b/arch/powerpc/include/asm/kprobes.h index d0e7701fa1f6..be0171afdc0f 100644 --- a/arch/powerpc/include/asm/kprobes.h +++ b/arch/powerpc/include/asm/kprobes.h | |||
@@ -50,7 +50,7 @@ typedef unsigned int kprobe_opcode_t; | |||
50 | * Handle cases where: | 50 | * Handle cases where: |
51 | * - User passes a <.symbol> or <module:.symbol> | 51 | * - User passes a <.symbol> or <module:.symbol> |
52 | * - User passes a <symbol> or <module:symbol> | 52 | * - User passes a <symbol> or <module:symbol> |
53 | * - User passes a non-existant symbol, kallsyms_lookup_name | 53 | * - User passes a non-existent symbol, kallsyms_lookup_name |
54 | * returns 0. Don't deref the NULL pointer in that case | 54 | * returns 0. Don't deref the NULL pointer in that case |
55 | */ | 55 | */ |
56 | #define kprobe_lookup_name(name, addr) \ | 56 | #define kprobe_lookup_name(name, addr) \ |
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h index 6c5547d82bbe..d2ca5ed3877b 100644 --- a/arch/powerpc/include/asm/kvm.h +++ b/arch/powerpc/include/asm/kvm.h | |||
@@ -45,6 +45,114 @@ struct kvm_regs { | |||
45 | __u64 gpr[32]; | 45 | __u64 gpr[32]; |
46 | }; | 46 | }; |
47 | 47 | ||
48 | #define KVM_SREGS_E_IMPL_NONE 0 | ||
49 | #define KVM_SREGS_E_IMPL_FSL 1 | ||
50 | |||
51 | #define KVM_SREGS_E_FSL_PIDn (1 << 0) /* PID1/PID2 */ | ||
52 | |||
53 | /* | ||
54 | * Feature bits indicate which sections of the sregs struct are valid, | ||
55 | * both in KVM_GET_SREGS and KVM_SET_SREGS. On KVM_SET_SREGS, registers | ||
56 | * corresponding to unset feature bits will not be modified. This allows | ||
57 | * restoring a checkpoint made without that feature, while keeping the | ||
58 | * default values of the new registers. | ||
59 | * | ||
60 | * KVM_SREGS_E_BASE contains: | ||
61 | * CSRR0/1 (refers to SRR2/3 on 40x) | ||
62 | * ESR | ||
63 | * DEAR | ||
64 | * MCSR | ||
65 | * TSR | ||
66 | * TCR | ||
67 | * DEC | ||
68 | * TB | ||
69 | * VRSAVE (USPRG0) | ||
70 | */ | ||
71 | #define KVM_SREGS_E_BASE (1 << 0) | ||
72 | |||
73 | /* | ||
74 | * KVM_SREGS_E_ARCH206 contains: | ||
75 | * | ||
76 | * PIR | ||
77 | * MCSRR0/1 | ||
78 | * DECAR | ||
79 | * IVPR | ||
80 | */ | ||
81 | #define KVM_SREGS_E_ARCH206 (1 << 1) | ||
82 | |||
83 | /* | ||
84 | * Contains EPCR, plus the upper half of 64-bit registers | ||
85 | * that are 32-bit on 32-bit implementations. | ||
86 | */ | ||
87 | #define KVM_SREGS_E_64 (1 << 2) | ||
88 | |||
89 | #define KVM_SREGS_E_SPRG8 (1 << 3) | ||
90 | #define KVM_SREGS_E_MCIVPR (1 << 4) | ||
91 | |||
92 | /* | ||
93 | * IVORs are used -- contains IVOR0-15, plus additional IVORs | ||
94 | * in combination with an appropriate feature bit. | ||
95 | */ | ||
96 | #define KVM_SREGS_E_IVOR (1 << 5) | ||
97 | |||
98 | /* | ||
99 | * Contains MAS0-4, MAS6-7, TLBnCFG, MMUCFG. | ||
100 | * Also TLBnPS if MMUCFG[MAVN] = 1. | ||
101 | */ | ||
102 | #define KVM_SREGS_E_ARCH206_MMU (1 << 6) | ||
103 | |||
104 | /* DBSR, DBCR, IAC, DAC, DVC */ | ||
105 | #define KVM_SREGS_E_DEBUG (1 << 7) | ||
106 | |||
107 | /* Enhanced debug -- DSRR0/1, SPRG9 */ | ||
108 | #define KVM_SREGS_E_ED (1 << 8) | ||
109 | |||
110 | /* Embedded Floating Point (SPE) -- IVOR32-34 if KVM_SREGS_E_IVOR */ | ||
111 | #define KVM_SREGS_E_SPE (1 << 9) | ||
112 | |||
113 | /* External Proxy (EXP) -- EPR */ | ||
114 | #define KVM_SREGS_EXP (1 << 10) | ||
115 | |||
116 | /* External PID (E.PD) -- EPSC/EPLC */ | ||
117 | #define KVM_SREGS_E_PD (1 << 11) | ||
118 | |||
119 | /* Processor Control (E.PC) -- IVOR36-37 if KVM_SREGS_E_IVOR */ | ||
120 | #define KVM_SREGS_E_PC (1 << 12) | ||
121 | |||
122 | /* Page table (E.PT) -- EPTCFG */ | ||
123 | #define KVM_SREGS_E_PT (1 << 13) | ||
124 | |||
125 | /* Embedded Performance Monitor (E.PM) -- IVOR35 if KVM_SREGS_E_IVOR */ | ||
126 | #define KVM_SREGS_E_PM (1 << 14) | ||
127 | |||
128 | /* | ||
129 | * Special updates: | ||
130 | * | ||
131 | * Some registers may change even while a vcpu is not running. | ||
132 | * To avoid losing these changes, by default these registers are | ||
133 | * not updated by KVM_SET_SREGS. To force an update, set the bit | ||
134 | * in u.e.update_special corresponding to the register to be updated. | ||
135 | * | ||
136 | * The update_special field is zero on return from KVM_GET_SREGS. | ||
137 | * | ||
138 | * When restoring a checkpoint, the caller can set update_special | ||
139 | * to 0xffffffff to ensure that everything is restored, even new features | ||
140 | * that the caller doesn't know about. | ||
141 | */ | ||
142 | #define KVM_SREGS_E_UPDATE_MCSR (1 << 0) | ||
143 | #define KVM_SREGS_E_UPDATE_TSR (1 << 1) | ||
144 | #define KVM_SREGS_E_UPDATE_DEC (1 << 2) | ||
145 | #define KVM_SREGS_E_UPDATE_DBSR (1 << 3) | ||
146 | |||
147 | /* | ||
148 | * In KVM_SET_SREGS, reserved/pad fields must be left untouched from a | ||
149 | * previous KVM_GET_REGS. | ||
150 | * | ||
151 | * Unless otherwise indicated, setting any register with KVM_SET_SREGS | ||
152 | * directly sets its value. It does not trigger any special semantics such | ||
153 | * as write-one-to-clear. Calling KVM_SET_SREGS on an unmodified struct | ||
154 | * just received from KVM_GET_SREGS is always a no-op. | ||
155 | */ | ||
48 | struct kvm_sregs { | 156 | struct kvm_sregs { |
49 | __u32 pvr; | 157 | __u32 pvr; |
50 | union { | 158 | union { |
@@ -62,6 +170,82 @@ struct kvm_sregs { | |||
62 | __u64 dbat[8]; | 170 | __u64 dbat[8]; |
63 | } ppc32; | 171 | } ppc32; |
64 | } s; | 172 | } s; |
173 | struct { | ||
174 | union { | ||
175 | struct { /* KVM_SREGS_E_IMPL_FSL */ | ||
176 | __u32 features; /* KVM_SREGS_E_FSL_ */ | ||
177 | __u32 svr; | ||
178 | __u64 mcar; | ||
179 | __u32 hid0; | ||
180 | |||
181 | /* KVM_SREGS_E_FSL_PIDn */ | ||
182 | __u32 pid1, pid2; | ||
183 | } fsl; | ||
184 | __u8 pad[256]; | ||
185 | } impl; | ||
186 | |||
187 | __u32 features; /* KVM_SREGS_E_ */ | ||
188 | __u32 impl_id; /* KVM_SREGS_E_IMPL_ */ | ||
189 | __u32 update_special; /* KVM_SREGS_E_UPDATE_ */ | ||
190 | __u32 pir; /* read-only */ | ||
191 | __u64 sprg8; | ||
192 | __u64 sprg9; /* E.ED */ | ||
193 | __u64 csrr0; | ||
194 | __u64 dsrr0; /* E.ED */ | ||
195 | __u64 mcsrr0; | ||
196 | __u32 csrr1; | ||
197 | __u32 dsrr1; /* E.ED */ | ||
198 | __u32 mcsrr1; | ||
199 | __u32 esr; | ||
200 | __u64 dear; | ||
201 | __u64 ivpr; | ||
202 | __u64 mcivpr; | ||
203 | __u64 mcsr; /* KVM_SREGS_E_UPDATE_MCSR */ | ||
204 | |||
205 | __u32 tsr; /* KVM_SREGS_E_UPDATE_TSR */ | ||
206 | __u32 tcr; | ||
207 | __u32 decar; | ||
208 | __u32 dec; /* KVM_SREGS_E_UPDATE_DEC */ | ||
209 | |||
210 | /* | ||
211 | * Userspace can read TB directly, but the | ||
212 | * value reported here is consistent with "dec". | ||
213 | * | ||
214 | * Read-only. | ||
215 | */ | ||
216 | __u64 tb; | ||
217 | |||
218 | __u32 dbsr; /* KVM_SREGS_E_UPDATE_DBSR */ | ||
219 | __u32 dbcr[3]; | ||
220 | __u32 iac[4]; | ||
221 | __u32 dac[2]; | ||
222 | __u32 dvc[2]; | ||
223 | __u8 num_iac; /* read-only */ | ||
224 | __u8 num_dac; /* read-only */ | ||
225 | __u8 num_dvc; /* read-only */ | ||
226 | __u8 pad; | ||
227 | |||
228 | __u32 epr; /* EXP */ | ||
229 | __u32 vrsave; /* a.k.a. USPRG0 */ | ||
230 | __u32 epcr; /* KVM_SREGS_E_64 */ | ||
231 | |||
232 | __u32 mas0; | ||
233 | __u32 mas1; | ||
234 | __u64 mas2; | ||
235 | __u64 mas7_3; | ||
236 | __u32 mas4; | ||
237 | __u32 mas6; | ||
238 | |||
239 | __u32 ivor_low[16]; /* IVOR0-15 */ | ||
240 | __u32 ivor_high[18]; /* IVOR32+, plus room to expand */ | ||
241 | |||
242 | __u32 mmucfg; /* read-only */ | ||
243 | __u32 eptcfg; /* E.PT, read-only */ | ||
244 | __u32 tlbcfg[4];/* read-only */ | ||
245 | __u32 tlbps[4]; /* read-only */ | ||
246 | |||
247 | __u32 eplc, epsc; /* E.PD */ | ||
248 | } e; | ||
65 | __u8 pad[1020]; | 249 | __u8 pad[1020]; |
66 | } u; | 250 | } u; |
67 | }; | 251 | }; |
@@ -86,5 +270,6 @@ struct kvm_guest_debug_arch { | |||
86 | 270 | ||
87 | #define KVM_INTERRUPT_SET -1U | 271 | #define KVM_INTERRUPT_SET -1U |
88 | #define KVM_INTERRUPT_UNSET -2U | 272 | #define KVM_INTERRUPT_UNSET -2U |
273 | #define KVM_INTERRUPT_SET_LEVEL -3U | ||
89 | 274 | ||
90 | #endif /* __LINUX_KVM_POWERPC_H */ | 275 | #endif /* __LINUX_KVM_POWERPC_H */ |
diff --git a/arch/powerpc/include/asm/kvm_44x.h b/arch/powerpc/include/asm/kvm_44x.h index d22d39942a92..a0e57618ff33 100644 --- a/arch/powerpc/include/asm/kvm_44x.h +++ b/arch/powerpc/include/asm/kvm_44x.h | |||
@@ -61,7 +61,6 @@ static inline struct kvmppc_vcpu_44x *to_44x(struct kvm_vcpu *vcpu) | |||
61 | return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu); | 61 | return container_of(vcpu, struct kvmppc_vcpu_44x, vcpu); |
62 | } | 62 | } |
63 | 63 | ||
64 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 new_pid); | ||
65 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu); | 64 | void kvmppc_44x_tlb_put(struct kvm_vcpu *vcpu); |
66 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu); | 65 | void kvmppc_44x_tlb_load(struct kvm_vcpu *vcpu); |
67 | 66 | ||
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h index c5ea4cda34b3..0951b17f4eb5 100644 --- a/arch/powerpc/include/asm/kvm_asm.h +++ b/arch/powerpc/include/asm/kvm_asm.h | |||
@@ -58,6 +58,8 @@ | |||
58 | #define BOOK3S_INTERRUPT_INST_STORAGE 0x400 | 58 | #define BOOK3S_INTERRUPT_INST_STORAGE 0x400 |
59 | #define BOOK3S_INTERRUPT_INST_SEGMENT 0x480 | 59 | #define BOOK3S_INTERRUPT_INST_SEGMENT 0x480 |
60 | #define BOOK3S_INTERRUPT_EXTERNAL 0x500 | 60 | #define BOOK3S_INTERRUPT_EXTERNAL 0x500 |
61 | #define BOOK3S_INTERRUPT_EXTERNAL_LEVEL 0x501 | ||
62 | #define BOOK3S_INTERRUPT_EXTERNAL_HV 0x502 | ||
61 | #define BOOK3S_INTERRUPT_ALIGNMENT 0x600 | 63 | #define BOOK3S_INTERRUPT_ALIGNMENT 0x600 |
62 | #define BOOK3S_INTERRUPT_PROGRAM 0x700 | 64 | #define BOOK3S_INTERRUPT_PROGRAM 0x700 |
63 | #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 | 65 | #define BOOK3S_INTERRUPT_FP_UNAVAIL 0x800 |
@@ -84,7 +86,8 @@ | |||
84 | #define BOOK3S_IRQPRIO_EXTERNAL 13 | 86 | #define BOOK3S_IRQPRIO_EXTERNAL 13 |
85 | #define BOOK3S_IRQPRIO_DECREMENTER 14 | 87 | #define BOOK3S_IRQPRIO_DECREMENTER 14 |
86 | #define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15 | 88 | #define BOOK3S_IRQPRIO_PERFORMANCE_MONITOR 15 |
87 | #define BOOK3S_IRQPRIO_MAX 16 | 89 | #define BOOK3S_IRQPRIO_EXTERNAL_LEVEL 16 |
90 | #define BOOK3S_IRQPRIO_MAX 17 | ||
88 | 91 | ||
89 | #define BOOK3S_HFLAG_DCBZ32 0x1 | 92 | #define BOOK3S_HFLAG_DCBZ32 0x1 |
90 | #define BOOK3S_HFLAG_SLB 0x2 | 93 | #define BOOK3S_HFLAG_SLB 0x2 |
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h index 8274a2d43925..d62e703f1214 100644 --- a/arch/powerpc/include/asm/kvm_book3s.h +++ b/arch/powerpc/include/asm/kvm_book3s.h | |||
@@ -38,15 +38,6 @@ struct kvmppc_slb { | |||
38 | bool class : 1; | 38 | bool class : 1; |
39 | }; | 39 | }; |
40 | 40 | ||
41 | struct kvmppc_sr { | ||
42 | u32 raw; | ||
43 | u32 vsid; | ||
44 | bool Ks : 1; | ||
45 | bool Kp : 1; | ||
46 | bool nx : 1; | ||
47 | bool valid : 1; | ||
48 | }; | ||
49 | |||
50 | struct kvmppc_bat { | 41 | struct kvmppc_bat { |
51 | u64 raw; | 42 | u64 raw; |
52 | u32 bepi; | 43 | u32 bepi; |
@@ -69,6 +60,13 @@ struct kvmppc_sid_map { | |||
69 | #define SID_MAP_NUM (1 << SID_MAP_BITS) | 60 | #define SID_MAP_NUM (1 << SID_MAP_BITS) |
70 | #define SID_MAP_MASK (SID_MAP_NUM - 1) | 61 | #define SID_MAP_MASK (SID_MAP_NUM - 1) |
71 | 62 | ||
63 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
64 | #define SID_CONTEXTS 1 | ||
65 | #else | ||
66 | #define SID_CONTEXTS 128 | ||
67 | #define VSID_POOL_SIZE (SID_CONTEXTS * 16) | ||
68 | #endif | ||
69 | |||
72 | struct kvmppc_vcpu_book3s { | 70 | struct kvmppc_vcpu_book3s { |
73 | struct kvm_vcpu vcpu; | 71 | struct kvm_vcpu vcpu; |
74 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; | 72 | struct kvmppc_book3s_shadow_vcpu *shadow_vcpu; |
@@ -79,20 +77,22 @@ struct kvmppc_vcpu_book3s { | |||
79 | u64 vsid; | 77 | u64 vsid; |
80 | } slb_shadow[64]; | 78 | } slb_shadow[64]; |
81 | u8 slb_shadow_max; | 79 | u8 slb_shadow_max; |
82 | struct kvmppc_sr sr[16]; | ||
83 | struct kvmppc_bat ibat[8]; | 80 | struct kvmppc_bat ibat[8]; |
84 | struct kvmppc_bat dbat[8]; | 81 | struct kvmppc_bat dbat[8]; |
85 | u64 hid[6]; | 82 | u64 hid[6]; |
86 | u64 gqr[8]; | 83 | u64 gqr[8]; |
87 | int slb_nr; | 84 | int slb_nr; |
88 | u32 dsisr; | ||
89 | u64 sdr1; | 85 | u64 sdr1; |
90 | u64 hior; | 86 | u64 hior; |
91 | u64 msr_mask; | 87 | u64 msr_mask; |
92 | u64 vsid_first; | ||
93 | u64 vsid_next; | 88 | u64 vsid_next; |
89 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
90 | u32 vsid_pool[VSID_POOL_SIZE]; | ||
91 | #else | ||
92 | u64 vsid_first; | ||
94 | u64 vsid_max; | 93 | u64 vsid_max; |
95 | int context_id; | 94 | #endif |
95 | int context_id[SID_CONTEXTS]; | ||
96 | ulong prog_flags; /* flags to inject when giving a 700 trap */ | 96 | ulong prog_flags; /* flags to inject when giving a 700 trap */ |
97 | }; | 97 | }; |
98 | 98 | ||
@@ -131,9 +131,10 @@ extern void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, | |||
131 | bool upper, u32 val); | 131 | bool upper, u32 val); |
132 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); | 132 | extern void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr); |
133 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); | 133 | extern int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu); |
134 | extern pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | ||
134 | 135 | ||
135 | extern u32 kvmppc_trampoline_lowmem; | 136 | extern ulong kvmppc_trampoline_lowmem; |
136 | extern u32 kvmppc_trampoline_enter; | 137 | extern ulong kvmppc_trampoline_enter; |
137 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); | 138 | extern void kvmppc_rmcall(ulong srr0, ulong srr1); |
138 | extern void kvmppc_load_up_fpu(void); | 139 | extern void kvmppc_load_up_fpu(void); |
139 | extern void kvmppc_load_up_altivec(void); | 140 | extern void kvmppc_load_up_altivec(void); |
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h index 36fdb3aff30b..d5a8a3861635 100644 --- a/arch/powerpc/include/asm/kvm_book3s_asm.h +++ b/arch/powerpc/include/asm/kvm_book3s_asm.h | |||
@@ -34,6 +34,7 @@ | |||
34 | (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \ | 34 | (\intno == BOOK3S_INTERRUPT_DATA_SEGMENT) || \ |
35 | (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \ | 35 | (\intno == BOOK3S_INTERRUPT_INST_SEGMENT) || \ |
36 | (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \ | 36 | (\intno == BOOK3S_INTERRUPT_EXTERNAL) || \ |
37 | (\intno == BOOK3S_INTERRUPT_EXTERNAL_HV) || \ | ||
37 | (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \ | 38 | (\intno == BOOK3S_INTERRUPT_ALIGNMENT) || \ |
38 | (\intno == BOOK3S_INTERRUPT_PROGRAM) || \ | 39 | (\intno == BOOK3S_INTERRUPT_PROGRAM) || \ |
39 | (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \ | 40 | (\intno == BOOK3S_INTERRUPT_FP_UNAVAIL) || \ |
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h index 7fea26fffb25..7a2a565f88c4 100644 --- a/arch/powerpc/include/asm/kvm_e500.h +++ b/arch/powerpc/include/asm/kvm_e500.h | |||
@@ -43,6 +43,7 @@ struct kvmppc_vcpu_e500 { | |||
43 | 43 | ||
44 | u32 host_pid[E500_PID_NUM]; | 44 | u32 host_pid[E500_PID_NUM]; |
45 | u32 pid[E500_PID_NUM]; | 45 | u32 pid[E500_PID_NUM]; |
46 | u32 svr; | ||
46 | 47 | ||
47 | u32 mas0; | 48 | u32 mas0; |
48 | u32 mas1; | 49 | u32 mas1; |
@@ -58,6 +59,7 @@ struct kvmppc_vcpu_e500 { | |||
58 | u32 hid1; | 59 | u32 hid1; |
59 | u32 tlb0cfg; | 60 | u32 tlb0cfg; |
60 | u32 tlb1cfg; | 61 | u32 tlb1cfg; |
62 | u64 mcar; | ||
61 | 63 | ||
62 | struct kvm_vcpu vcpu; | 64 | struct kvm_vcpu vcpu; |
63 | }; | 65 | }; |
diff --git a/arch/powerpc/include/asm/kvm_fpu.h b/arch/powerpc/include/asm/kvm_fpu.h index c3d4f0518a67..92daae132492 100644 --- a/arch/powerpc/include/asm/kvm_fpu.h +++ b/arch/powerpc/include/asm/kvm_fpu.h | |||
@@ -82,7 +82,7 @@ FPD_THREE_IN(fmadd) | |||
82 | FPD_THREE_IN(fnmsub) | 82 | FPD_THREE_IN(fnmsub) |
83 | FPD_THREE_IN(fnmadd) | 83 | FPD_THREE_IN(fnmadd) |
84 | 84 | ||
85 | extern void kvm_cvt_fd(u32 *from, u64 *to, u64 *fpscr); | 85 | extern void kvm_cvt_fd(u32 *from, u64 *to); |
86 | extern void kvm_cvt_df(u64 *from, u32 *to, u64 *fpscr); | 86 | extern void kvm_cvt_df(u64 *from, u32 *to); |
87 | 87 | ||
88 | #endif | 88 | #endif |
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h index b0b23c007d6e..186f150b9b89 100644 --- a/arch/powerpc/include/asm/kvm_host.h +++ b/arch/powerpc/include/asm/kvm_host.h | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
26 | #include <linux/types.h> | 26 | #include <linux/types.h> |
27 | #include <linux/kvm_types.h> | 27 | #include <linux/kvm_types.h> |
28 | #include <linux/kvm_para.h> | ||
28 | #include <asm/kvm_asm.h> | 29 | #include <asm/kvm_asm.h> |
29 | 30 | ||
30 | #define KVM_MAX_VCPUS 1 | 31 | #define KVM_MAX_VCPUS 1 |
@@ -41,12 +42,17 @@ | |||
41 | 42 | ||
42 | #define HPTEG_CACHE_NUM (1 << 15) | 43 | #define HPTEG_CACHE_NUM (1 << 15) |
43 | #define HPTEG_HASH_BITS_PTE 13 | 44 | #define HPTEG_HASH_BITS_PTE 13 |
45 | #define HPTEG_HASH_BITS_PTE_LONG 12 | ||
44 | #define HPTEG_HASH_BITS_VPTE 13 | 46 | #define HPTEG_HASH_BITS_VPTE 13 |
45 | #define HPTEG_HASH_BITS_VPTE_LONG 5 | 47 | #define HPTEG_HASH_BITS_VPTE_LONG 5 |
46 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) | 48 | #define HPTEG_HASH_NUM_PTE (1 << HPTEG_HASH_BITS_PTE) |
49 | #define HPTEG_HASH_NUM_PTE_LONG (1 << HPTEG_HASH_BITS_PTE_LONG) | ||
47 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) | 50 | #define HPTEG_HASH_NUM_VPTE (1 << HPTEG_HASH_BITS_VPTE) |
48 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) | 51 | #define HPTEG_HASH_NUM_VPTE_LONG (1 << HPTEG_HASH_BITS_VPTE_LONG) |
49 | 52 | ||
53 | /* Physical Address Mask - allowed range of real mode RAM access */ | ||
54 | #define KVM_PAM 0x0fffffffffffffffULL | ||
55 | |||
50 | struct kvm; | 56 | struct kvm; |
51 | struct kvm_run; | 57 | struct kvm_run; |
52 | struct kvm_vcpu; | 58 | struct kvm_vcpu; |
@@ -159,8 +165,10 @@ struct kvmppc_mmu { | |||
159 | 165 | ||
160 | struct hpte_cache { | 166 | struct hpte_cache { |
161 | struct hlist_node list_pte; | 167 | struct hlist_node list_pte; |
168 | struct hlist_node list_pte_long; | ||
162 | struct hlist_node list_vpte; | 169 | struct hlist_node list_vpte; |
163 | struct hlist_node list_vpte_long; | 170 | struct hlist_node list_vpte_long; |
171 | struct rcu_head rcu_head; | ||
164 | u64 host_va; | 172 | u64 host_va; |
165 | u64 pfn; | 173 | u64 pfn; |
166 | ulong slot; | 174 | ulong slot; |
@@ -210,28 +218,24 @@ struct kvm_vcpu_arch { | |||
210 | u32 cr; | 218 | u32 cr; |
211 | #endif | 219 | #endif |
212 | 220 | ||
213 | ulong msr; | ||
214 | #ifdef CONFIG_PPC_BOOK3S | 221 | #ifdef CONFIG_PPC_BOOK3S |
215 | ulong shadow_msr; | 222 | ulong shadow_msr; |
216 | ulong hflags; | 223 | ulong hflags; |
217 | ulong guest_owned_ext; | 224 | ulong guest_owned_ext; |
218 | #endif | 225 | #endif |
226 | u32 vrsave; /* also USPRG0 */ | ||
219 | u32 mmucr; | 227 | u32 mmucr; |
220 | ulong sprg0; | ||
221 | ulong sprg1; | ||
222 | ulong sprg2; | ||
223 | ulong sprg3; | ||
224 | ulong sprg4; | 228 | ulong sprg4; |
225 | ulong sprg5; | 229 | ulong sprg5; |
226 | ulong sprg6; | 230 | ulong sprg6; |
227 | ulong sprg7; | 231 | ulong sprg7; |
228 | ulong srr0; | ||
229 | ulong srr1; | ||
230 | ulong csrr0; | 232 | ulong csrr0; |
231 | ulong csrr1; | 233 | ulong csrr1; |
232 | ulong dsrr0; | 234 | ulong dsrr0; |
233 | ulong dsrr1; | 235 | ulong dsrr1; |
234 | ulong dear; | 236 | ulong mcsrr0; |
237 | ulong mcsrr1; | ||
238 | ulong mcsr; | ||
235 | ulong esr; | 239 | ulong esr; |
236 | u32 dec; | 240 | u32 dec; |
237 | u32 decar; | 241 | u32 decar; |
@@ -255,6 +259,7 @@ struct kvm_vcpu_arch { | |||
255 | u32 dbsr; | 259 | u32 dbsr; |
256 | 260 | ||
257 | #ifdef CONFIG_KVM_EXIT_TIMING | 261 | #ifdef CONFIG_KVM_EXIT_TIMING |
262 | struct mutex exit_timing_lock; | ||
258 | struct kvmppc_exit_timing timing_exit; | 263 | struct kvmppc_exit_timing timing_exit; |
259 | struct kvmppc_exit_timing timing_last_enter; | 264 | struct kvmppc_exit_timing timing_last_enter; |
260 | u32 last_exit_type; | 265 | u32 last_exit_type; |
@@ -290,12 +295,17 @@ struct kvm_vcpu_arch { | |||
290 | struct tasklet_struct tasklet; | 295 | struct tasklet_struct tasklet; |
291 | u64 dec_jiffies; | 296 | u64 dec_jiffies; |
292 | unsigned long pending_exceptions; | 297 | unsigned long pending_exceptions; |
298 | struct kvm_vcpu_arch_shared *shared; | ||
299 | unsigned long magic_page_pa; /* phys addr to map the magic page to */ | ||
300 | unsigned long magic_page_ea; /* effect. addr to map the magic page to */ | ||
293 | 301 | ||
294 | #ifdef CONFIG_PPC_BOOK3S | 302 | #ifdef CONFIG_PPC_BOOK3S |
295 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; | 303 | struct hlist_head hpte_hash_pte[HPTEG_HASH_NUM_PTE]; |
304 | struct hlist_head hpte_hash_pte_long[HPTEG_HASH_NUM_PTE_LONG]; | ||
296 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; | 305 | struct hlist_head hpte_hash_vpte[HPTEG_HASH_NUM_VPTE]; |
297 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; | 306 | struct hlist_head hpte_hash_vpte_long[HPTEG_HASH_NUM_VPTE_LONG]; |
298 | int hpte_cache_count; | 307 | int hpte_cache_count; |
308 | spinlock_t mmu_lock; | ||
299 | #endif | 309 | #endif |
300 | }; | 310 | }; |
301 | 311 | ||
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h index 2d48f6a63d0b..50533f9adf40 100644 --- a/arch/powerpc/include/asm/kvm_para.h +++ b/arch/powerpc/include/asm/kvm_para.h | |||
@@ -20,16 +20,153 @@ | |||
20 | #ifndef __POWERPC_KVM_PARA_H__ | 20 | #ifndef __POWERPC_KVM_PARA_H__ |
21 | #define __POWERPC_KVM_PARA_H__ | 21 | #define __POWERPC_KVM_PARA_H__ |
22 | 22 | ||
23 | #include <linux/types.h> | ||
24 | |||
25 | struct kvm_vcpu_arch_shared { | ||
26 | __u64 scratch1; | ||
27 | __u64 scratch2; | ||
28 | __u64 scratch3; | ||
29 | __u64 critical; /* Guest may not get interrupts if == r1 */ | ||
30 | __u64 sprg0; | ||
31 | __u64 sprg1; | ||
32 | __u64 sprg2; | ||
33 | __u64 sprg3; | ||
34 | __u64 srr0; | ||
35 | __u64 srr1; | ||
36 | __u64 dar; | ||
37 | __u64 msr; | ||
38 | __u32 dsisr; | ||
39 | __u32 int_pending; /* Tells the guest if we have an interrupt */ | ||
40 | __u32 sr[16]; | ||
41 | }; | ||
42 | |||
43 | #define KVM_SC_MAGIC_R0 0x4b564d21 /* "KVM!" */ | ||
44 | #define HC_VENDOR_KVM (42 << 16) | ||
45 | #define HC_EV_SUCCESS 0 | ||
46 | #define HC_EV_UNIMPLEMENTED 12 | ||
47 | |||
48 | #define KVM_FEATURE_MAGIC_PAGE 1 | ||
49 | |||
50 | #define KVM_MAGIC_FEAT_SR (1 << 0) | ||
51 | |||
23 | #ifdef __KERNEL__ | 52 | #ifdef __KERNEL__ |
24 | 53 | ||
54 | #ifdef CONFIG_KVM_GUEST | ||
55 | |||
56 | #include <linux/of.h> | ||
57 | |||
58 | static inline int kvm_para_available(void) | ||
59 | { | ||
60 | struct device_node *hyper_node; | ||
61 | |||
62 | hyper_node = of_find_node_by_path("/hypervisor"); | ||
63 | if (!hyper_node) | ||
64 | return 0; | ||
65 | |||
66 | if (!of_device_is_compatible(hyper_node, "linux,kvm")) | ||
67 | return 0; | ||
68 | |||
69 | return 1; | ||
70 | } | ||
71 | |||
72 | extern unsigned long kvm_hypercall(unsigned long *in, | ||
73 | unsigned long *out, | ||
74 | unsigned long nr); | ||
75 | |||
76 | #else | ||
77 | |||
25 | static inline int kvm_para_available(void) | 78 | static inline int kvm_para_available(void) |
26 | { | 79 | { |
27 | return 0; | 80 | return 0; |
28 | } | 81 | } |
29 | 82 | ||
83 | static unsigned long kvm_hypercall(unsigned long *in, | ||
84 | unsigned long *out, | ||
85 | unsigned long nr) | ||
86 | { | ||
87 | return HC_EV_UNIMPLEMENTED; | ||
88 | } | ||
89 | |||
90 | #endif | ||
91 | |||
92 | static inline long kvm_hypercall0_1(unsigned int nr, unsigned long *r2) | ||
93 | { | ||
94 | unsigned long in[8]; | ||
95 | unsigned long out[8]; | ||
96 | unsigned long r; | ||
97 | |||
98 | r = kvm_hypercall(in, out, nr | HC_VENDOR_KVM); | ||
99 | *r2 = out[0]; | ||
100 | |||
101 | return r; | ||
102 | } | ||
103 | |||
104 | static inline long kvm_hypercall0(unsigned int nr) | ||
105 | { | ||
106 | unsigned long in[8]; | ||
107 | unsigned long out[8]; | ||
108 | |||
109 | return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); | ||
110 | } | ||
111 | |||
112 | static inline long kvm_hypercall1(unsigned int nr, unsigned long p1) | ||
113 | { | ||
114 | unsigned long in[8]; | ||
115 | unsigned long out[8]; | ||
116 | |||
117 | in[0] = p1; | ||
118 | return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); | ||
119 | } | ||
120 | |||
121 | static inline long kvm_hypercall2(unsigned int nr, unsigned long p1, | ||
122 | unsigned long p2) | ||
123 | { | ||
124 | unsigned long in[8]; | ||
125 | unsigned long out[8]; | ||
126 | |||
127 | in[0] = p1; | ||
128 | in[1] = p2; | ||
129 | return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); | ||
130 | } | ||
131 | |||
132 | static inline long kvm_hypercall3(unsigned int nr, unsigned long p1, | ||
133 | unsigned long p2, unsigned long p3) | ||
134 | { | ||
135 | unsigned long in[8]; | ||
136 | unsigned long out[8]; | ||
137 | |||
138 | in[0] = p1; | ||
139 | in[1] = p2; | ||
140 | in[2] = p3; | ||
141 | return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); | ||
142 | } | ||
143 | |||
144 | static inline long kvm_hypercall4(unsigned int nr, unsigned long p1, | ||
145 | unsigned long p2, unsigned long p3, | ||
146 | unsigned long p4) | ||
147 | { | ||
148 | unsigned long in[8]; | ||
149 | unsigned long out[8]; | ||
150 | |||
151 | in[0] = p1; | ||
152 | in[1] = p2; | ||
153 | in[2] = p3; | ||
154 | in[3] = p4; | ||
155 | return kvm_hypercall(in, out, nr | HC_VENDOR_KVM); | ||
156 | } | ||
157 | |||
158 | |||
30 | static inline unsigned int kvm_arch_para_features(void) | 159 | static inline unsigned int kvm_arch_para_features(void) |
31 | { | 160 | { |
32 | return 0; | 161 | unsigned long r; |
162 | |||
163 | if (!kvm_para_available()) | ||
164 | return 0; | ||
165 | |||
166 | if(kvm_hypercall0_1(KVM_HC_FEATURES, &r)) | ||
167 | return 0; | ||
168 | |||
169 | return r; | ||
33 | } | 170 | } |
34 | 171 | ||
35 | #endif /* __KERNEL__ */ | 172 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h index 18d139ec2d22..9345238edecf 100644 --- a/arch/powerpc/include/asm/kvm_ppc.h +++ b/arch/powerpc/include/asm/kvm_ppc.h | |||
@@ -61,6 +61,7 @@ extern int kvmppc_emulate_instruction(struct kvm_run *run, | |||
61 | struct kvm_vcpu *vcpu); | 61 | struct kvm_vcpu *vcpu); |
62 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); | 62 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
63 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); | 63 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); |
64 | extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); | ||
64 | 65 | ||
65 | /* Core-specific hooks */ | 66 | /* Core-specific hooks */ |
66 | 67 | ||
@@ -107,6 +108,7 @@ extern int kvmppc_booke_init(void); | |||
107 | extern void kvmppc_booke_exit(void); | 108 | extern void kvmppc_booke_exit(void); |
108 | 109 | ||
109 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); | 110 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); |
111 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); | ||
110 | 112 | ||
111 | /* | 113 | /* |
112 | * Cuts out inst bits with ordering according to spec. | 114 | * Cuts out inst bits with ordering according to spec. |
@@ -141,4 +143,12 @@ static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) | |||
141 | return r; | 143 | return r; |
142 | } | 144 | } |
143 | 145 | ||
146 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
147 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
148 | |||
149 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
150 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | ||
151 | |||
152 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); | ||
153 | |||
144 | #endif /* __POWERPC_KVM_PPC_H__ */ | 154 | #endif /* __POWERPC_KVM_PPC_H__ */ |
diff --git a/arch/powerpc/include/asm/lppaca.h b/arch/powerpc/include/asm/lppaca.h index 14b592dfb4e8..e0298d26ce5d 100644 --- a/arch/powerpc/include/asm/lppaca.h +++ b/arch/powerpc/include/asm/lppaca.h | |||
@@ -33,9 +33,25 @@ | |||
33 | // | 33 | // |
34 | //---------------------------------------------------------------------------- | 34 | //---------------------------------------------------------------------------- |
35 | #include <linux/cache.h> | 35 | #include <linux/cache.h> |
36 | #include <linux/threads.h> | ||
36 | #include <asm/types.h> | 37 | #include <asm/types.h> |
37 | #include <asm/mmu.h> | 38 | #include <asm/mmu.h> |
38 | 39 | ||
40 | /* | ||
41 | * We only have to have statically allocated lppaca structs on | ||
42 | * legacy iSeries, which supports at most 64 cpus. | ||
43 | */ | ||
44 | #ifdef CONFIG_PPC_ISERIES | ||
45 | #if NR_CPUS < 64 | ||
46 | #define NR_LPPACAS NR_CPUS | ||
47 | #else | ||
48 | #define NR_LPPACAS 64 | ||
49 | #endif | ||
50 | #else /* not iSeries */ | ||
51 | #define NR_LPPACAS 1 | ||
52 | #endif | ||
53 | |||
54 | |||
39 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k | 55 | /* The Hypervisor barfs if the lppaca crosses a page boundary. A 1k |
40 | * alignment is sufficient to prevent this */ | 56 | * alignment is sufficient to prevent this */ |
41 | struct lppaca { | 57 | struct lppaca { |
@@ -62,7 +78,10 @@ struct lppaca { | |||
62 | volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23 | 78 | volatile u32 dyn_pir; // Dynamic ProcIdReg value x20-x23 |
63 | u32 dsei_data; // DSEI data x24-x27 | 79 | u32 dsei_data; // DSEI data x24-x27 |
64 | u64 sprg3; // SPRG3 value x28-x2F | 80 | u64 sprg3; // SPRG3 value x28-x2F |
65 | u8 reserved3[80]; // Reserved x30-x7F | 81 | u8 reserved3[40]; // Reserved x30-x57 |
82 | volatile u8 vphn_assoc_counts[8]; // Virtual processor home node | ||
83 | // associativity change counters x58-x5F | ||
84 | u8 reserved4[32]; // Reserved x60-x7F | ||
66 | 85 | ||
67 | //============================================================================= | 86 | //============================================================================= |
68 | // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data | 87 | // CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data |
@@ -86,7 +105,7 @@ struct lppaca { | |||
86 | // processing of external interrupts. Note that PLIC will store the | 105 | // processing of external interrupts. Note that PLIC will store the |
87 | // XIRR directly into the xXirrValue field so that another XIRR will | 106 | // XIRR directly into the xXirrValue field so that another XIRR will |
88 | // not be presented until this one clears. The layout of the low | 107 | // not be presented until this one clears. The layout of the low |
89 | // 4-bytes of this Dword is upto SLIC - PLIC just checks whether the | 108 | // 4-bytes of this Dword is up to SLIC - PLIC just checks whether the |
90 | // entire Dword is zero or not. A non-zero value in the low order | 109 | // entire Dword is zero or not. A non-zero value in the low order |
91 | // 2-bytes will result in SLIC being granted the highest thread | 110 | // 2-bytes will result in SLIC being granted the highest thread |
92 | // priority upon return. A 0 will return to SLIC as medium priority. | 111 | // priority upon return. A 0 will return to SLIC as medium priority. |
@@ -153,6 +172,8 @@ struct lppaca { | |||
153 | 172 | ||
154 | extern struct lppaca lppaca[]; | 173 | extern struct lppaca lppaca[]; |
155 | 174 | ||
175 | #define lppaca_of(cpu) (*paca[cpu].lppaca_ptr) | ||
176 | |||
156 | /* | 177 | /* |
157 | * SLB shadow buffer structure as defined in the PAPR. The save_area | 178 | * SLB shadow buffer structure as defined in the PAPR. The save_area |
158 | * contains adjacent ESID and VSID pairs for each shadowed SLB. The | 179 | * contains adjacent ESID and VSID pairs for each shadowed SLB. The |
@@ -170,6 +191,35 @@ struct slb_shadow { | |||
170 | 191 | ||
171 | extern struct slb_shadow slb_shadow[]; | 192 | extern struct slb_shadow slb_shadow[]; |
172 | 193 | ||
194 | /* | ||
195 | * Layout of entries in the hypervisor's dispatch trace log buffer. | ||
196 | */ | ||
197 | struct dtl_entry { | ||
198 | u8 dispatch_reason; | ||
199 | u8 preempt_reason; | ||
200 | u16 processor_id; | ||
201 | u32 enqueue_to_dispatch_time; | ||
202 | u32 ready_to_enqueue_time; | ||
203 | u32 waiting_to_ready_time; | ||
204 | u64 timebase; | ||
205 | u64 fault_addr; | ||
206 | u64 srr0; | ||
207 | u64 srr1; | ||
208 | }; | ||
209 | |||
210 | #define DISPATCH_LOG_BYTES 4096 /* bytes per cpu */ | ||
211 | #define N_DISPATCH_LOG (DISPATCH_LOG_BYTES / sizeof(struct dtl_entry)) | ||
212 | |||
213 | extern struct kmem_cache *dtl_cache; | ||
214 | |||
215 | /* | ||
216 | * When CONFIG_VIRT_CPU_ACCOUNTING = y, the cpu accounting code controls | ||
217 | * reading from the dispatch trace log. If other code wants to consume | ||
218 | * DTL entries, it can set this pointer to a function that will get | ||
219 | * called once for each DTL entry that gets processed. | ||
220 | */ | ||
221 | extern void (*dtl_consumer)(struct dtl_entry *entry, u64 index); | ||
222 | |||
173 | #endif /* CONFIG_PPC_BOOK3S */ | 223 | #endif /* CONFIG_PPC_BOOK3S */ |
174 | #endif /* __KERNEL__ */ | 224 | #endif /* __KERNEL__ */ |
175 | #endif /* _ASM_POWERPC_LPPACA_H */ | 225 | #endif /* _ASM_POWERPC_LPPACA_H */ |
diff --git a/arch/powerpc/include/asm/machdep.h b/arch/powerpc/include/asm/machdep.h index adc8e6cdf339..47cacddb14cf 100644 --- a/arch/powerpc/include/asm/machdep.h +++ b/arch/powerpc/include/asm/machdep.h | |||
@@ -27,24 +27,7 @@ struct iommu_table; | |||
27 | struct rtc_time; | 27 | struct rtc_time; |
28 | struct file; | 28 | struct file; |
29 | struct pci_controller; | 29 | struct pci_controller; |
30 | #ifdef CONFIG_KEXEC | ||
31 | struct kimage; | 30 | struct kimage; |
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_SMP | ||
35 | struct smp_ops_t { | ||
36 | void (*message_pass)(int target, int msg); | ||
37 | int (*probe)(void); | ||
38 | void (*kick_cpu)(int nr); | ||
39 | void (*setup_cpu)(int nr); | ||
40 | void (*take_timebase)(void); | ||
41 | void (*give_timebase)(void); | ||
42 | int (*cpu_enable)(unsigned int nr); | ||
43 | int (*cpu_disable)(void); | ||
44 | void (*cpu_die)(unsigned int nr); | ||
45 | int (*cpu_bootable)(unsigned int nr); | ||
46 | }; | ||
47 | #endif | ||
48 | 31 | ||
49 | struct machdep_calls { | 32 | struct machdep_calls { |
50 | char *name; | 33 | char *name; |
@@ -72,7 +55,7 @@ struct machdep_calls { | |||
72 | int psize, int ssize); | 55 | int psize, int ssize); |
73 | void (*flush_hash_range)(unsigned long number, int local); | 56 | void (*flush_hash_range)(unsigned long number, int local); |
74 | 57 | ||
75 | /* special for kexec, to be called in real mode, linar mapping is | 58 | /* special for kexec, to be called in real mode, linear mapping is |
76 | * destroyed as well */ | 59 | * destroyed as well */ |
77 | void (*hpte_clear_all)(void); | 60 | void (*hpte_clear_all)(void); |
78 | 61 | ||
@@ -102,6 +85,9 @@ struct machdep_calls { | |||
102 | void (*pci_dma_dev_setup)(struct pci_dev *dev); | 85 | void (*pci_dma_dev_setup)(struct pci_dev *dev); |
103 | void (*pci_dma_bus_setup)(struct pci_bus *bus); | 86 | void (*pci_dma_bus_setup)(struct pci_bus *bus); |
104 | 87 | ||
88 | /* Platform set_dma_mask override */ | ||
89 | int (*dma_set_mask)(struct device *dev, u64 dma_mask); | ||
90 | |||
105 | int (*probe)(void); | 91 | int (*probe)(void); |
106 | void (*setup_arch)(void); /* Optional, may be NULL */ | 92 | void (*setup_arch)(void); /* Optional, may be NULL */ |
107 | void (*init_early)(void); | 93 | void (*init_early)(void); |
@@ -115,9 +101,6 @@ struct machdep_calls { | |||
115 | * If for some reason there is no irq, but the interrupt | 101 | * If for some reason there is no irq, but the interrupt |
116 | * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */ | 102 | * shouldn't be counted as spurious, return NO_IRQ_IGNORE. */ |
117 | unsigned int (*get_irq)(void); | 103 | unsigned int (*get_irq)(void); |
118 | #ifdef CONFIG_KEXEC | ||
119 | void (*kexec_cpu_down)(int crash_shutdown, int secondary); | ||
120 | #endif | ||
121 | 104 | ||
122 | /* PCI stuff */ | 105 | /* PCI stuff */ |
123 | /* Called after scanning the bus, before allocating resources */ | 106 | /* Called after scanning the bus, before allocating resources */ |
@@ -234,11 +217,7 @@ struct machdep_calls { | |||
234 | void (*machine_shutdown)(void); | 217 | void (*machine_shutdown)(void); |
235 | 218 | ||
236 | #ifdef CONFIG_KEXEC | 219 | #ifdef CONFIG_KEXEC |
237 | /* Called to do the minimal shutdown needed to run a kexec'd kernel | 220 | void (*kexec_cpu_down)(int crash_shutdown, int secondary); |
238 | * to run successfully. | ||
239 | * XXX Should we move this one out of kexec scope? | ||
240 | */ | ||
241 | void (*machine_crash_shutdown)(struct pt_regs *regs); | ||
242 | 221 | ||
243 | /* Called to do what every setup is needed on image and the | 222 | /* Called to do what every setup is needed on image and the |
244 | * reboot code buffer. Returns 0 on success. | 223 | * reboot code buffer. Returns 0 on success. |
@@ -247,9 +226,6 @@ struct machdep_calls { | |||
247 | */ | 226 | */ |
248 | int (*machine_kexec_prepare)(struct kimage *image); | 227 | int (*machine_kexec_prepare)(struct kimage *image); |
249 | 228 | ||
250 | /* Called to handle any machine specific cleanup on image */ | ||
251 | void (*machine_kexec_cleanup)(struct kimage *image); | ||
252 | |||
253 | /* Called to perform the _real_ kexec. | 229 | /* Called to perform the _real_ kexec. |
254 | * Do NOT allocate memory or fail here. We are past the point of | 230 | * Do NOT allocate memory or fail here. We are past the point of |
255 | * no return. | 231 | * no return. |
@@ -276,7 +252,7 @@ struct machdep_calls { | |||
276 | 252 | ||
277 | extern void e500_idle(void); | 253 | extern void e500_idle(void); |
278 | extern void power4_idle(void); | 254 | extern void power4_idle(void); |
279 | extern void power4_cpu_offline_powersave(void); | 255 | extern void power7_idle(void); |
280 | extern void ppc6xx_idle(void); | 256 | extern void ppc6xx_idle(void); |
281 | extern void book3e_idle(void); | 257 | extern void book3e_idle(void); |
282 | 258 | ||
@@ -321,14 +297,6 @@ extern sys_ctrler_t sys_ctrler; | |||
321 | 297 | ||
322 | #endif /* CONFIG_PPC_PMAC */ | 298 | #endif /* CONFIG_PPC_PMAC */ |
323 | 299 | ||
324 | extern void setup_pci_ptrs(void); | ||
325 | |||
326 | #ifdef CONFIG_SMP | ||
327 | /* Poor default implementations */ | ||
328 | extern void __devinit smp_generic_give_timebase(void); | ||
329 | extern void __devinit smp_generic_take_timebase(void); | ||
330 | #endif /* CONFIG_SMP */ | ||
331 | |||
332 | 300 | ||
333 | /* Functions to produce codes on the leds. | 301 | /* Functions to produce codes on the leds. |
334 | * The SRC code should be unique for the message category and should | 302 | * The SRC code should be unique for the message category and should |
diff --git a/arch/powerpc/include/asm/memblock.h b/arch/powerpc/include/asm/memblock.h index 3c29728b56b1..43efc345065e 100644 --- a/arch/powerpc/include/asm/memblock.h +++ b/arch/powerpc/include/asm/memblock.h | |||
@@ -5,11 +5,4 @@ | |||
5 | 5 | ||
6 | #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) | 6 | #define MEMBLOCK_DBG(fmt...) udbg_printf(fmt) |
7 | 7 | ||
8 | #ifdef CONFIG_PPC32 | ||
9 | extern phys_addr_t lowmem_end_addr; | ||
10 | #define MEMBLOCK_REAL_LIMIT lowmem_end_addr | ||
11 | #else | ||
12 | #define MEMBLOCK_REAL_LIMIT 0 | ||
13 | #endif | ||
14 | |||
15 | #endif /* _ASM_POWERPC_MEMBLOCK_H */ | 8 | #endif /* _ASM_POWERPC_MEMBLOCK_H */ |
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h index 87a1d787c5b6..3ea0f9a259d8 100644 --- a/arch/powerpc/include/asm/mmu-book3e.h +++ b/arch/powerpc/include/asm/mmu-book3e.h | |||
@@ -40,9 +40,10 @@ | |||
40 | 40 | ||
41 | /* MAS registers bit definitions */ | 41 | /* MAS registers bit definitions */ |
42 | 42 | ||
43 | #define MAS0_TLBSEL(x) ((x << 28) & 0x30000000) | 43 | #define MAS0_TLBSEL(x) (((x) << 28) & 0x30000000) |
44 | #define MAS0_ESEL(x) ((x << 16) & 0x0FFF0000) | 44 | #define MAS0_ESEL(x) (((x) << 16) & 0x0FFF0000) |
45 | #define MAS0_NV(x) ((x) & 0x00000FFF) | 45 | #define MAS0_NV(x) ((x) & 0x00000FFF) |
46 | #define MAS0_ESEL_MASK 0x0FFF0000 | ||
46 | #define MAS0_HES 0x00004000 | 47 | #define MAS0_HES 0x00004000 |
47 | #define MAS0_WQ_ALLWAYS 0x00000000 | 48 | #define MAS0_WQ_ALLWAYS 0x00000000 |
48 | #define MAS0_WQ_COND 0x00001000 | 49 | #define MAS0_WQ_COND 0x00001000 |
@@ -50,12 +51,12 @@ | |||
50 | 51 | ||
51 | #define MAS1_VALID 0x80000000 | 52 | #define MAS1_VALID 0x80000000 |
52 | #define MAS1_IPROT 0x40000000 | 53 | #define MAS1_IPROT 0x40000000 |
53 | #define MAS1_TID(x) ((x << 16) & 0x3FFF0000) | 54 | #define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) |
54 | #define MAS1_IND 0x00002000 | 55 | #define MAS1_IND 0x00002000 |
55 | #define MAS1_TS 0x00001000 | 56 | #define MAS1_TS 0x00001000 |
56 | #define MAS1_TSIZE_MASK 0x00000f80 | 57 | #define MAS1_TSIZE_MASK 0x00000f80 |
57 | #define MAS1_TSIZE_SHIFT 7 | 58 | #define MAS1_TSIZE_SHIFT 7 |
58 | #define MAS1_TSIZE(x) ((x << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) | 59 | #define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) |
59 | 60 | ||
60 | #define MAS2_EPN 0xFFFFF000 | 61 | #define MAS2_EPN 0xFFFFF000 |
61 | #define MAS2_X0 0x00000040 | 62 | #define MAS2_X0 0x00000040 |
@@ -114,6 +115,17 @@ | |||
114 | 115 | ||
115 | #define MAS7_RPN 0xFFFFFFFF | 116 | #define MAS7_RPN 0xFFFFFFFF |
116 | 117 | ||
118 | /* Bit definitions for MMUCFG */ | ||
119 | #define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ | ||
120 | #define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ | ||
121 | #define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ | ||
122 | #define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ | ||
123 | #define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ | ||
124 | #define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ | ||
125 | #define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ | ||
126 | #define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ | ||
127 | #define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ | ||
128 | |||
117 | /* Bit definitions for MMUCSR0 */ | 129 | /* Bit definitions for MMUCSR0 */ |
118 | #define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ | 130 | #define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ |
119 | #define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ | 131 | #define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ |
@@ -126,6 +138,21 @@ | |||
126 | #define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ | 138 | #define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ |
127 | #define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ | 139 | #define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ |
128 | 140 | ||
141 | /* MMUCFG bits */ | ||
142 | #define MMUCFG_MAVN_NASK 0x00000003 | ||
143 | #define MMUCFG_MAVN_V1_0 0x00000000 | ||
144 | #define MMUCFG_MAVN_V2_0 0x00000001 | ||
145 | #define MMUCFG_NTLB_MASK 0x0000000c | ||
146 | #define MMUCFG_NTLB_SHIFT 2 | ||
147 | #define MMUCFG_PIDSIZE_MASK 0x000007c0 | ||
148 | #define MMUCFG_PIDSIZE_SHIFT 6 | ||
149 | #define MMUCFG_TWC 0x00008000 | ||
150 | #define MMUCFG_LRAT 0x00010000 | ||
151 | #define MMUCFG_RASIZE_MASK 0x00fe0000 | ||
152 | #define MMUCFG_RASIZE_SHIFT 17 | ||
153 | #define MMUCFG_LPIDSIZE_MASK 0x0f000000 | ||
154 | #define MMUCFG_LPIDSIZE_SHIFT 24 | ||
155 | |||
129 | /* TLBnCFG encoding */ | 156 | /* TLBnCFG encoding */ |
130 | #define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ | 157 | #define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ |
131 | #define TLBnCFG_HES 0x00002000 /* HW select supported */ | 158 | #define TLBnCFG_HES 0x00002000 /* HW select supported */ |
@@ -133,6 +160,10 @@ | |||
133 | #define TLBnCFG_GTWE 0x00010000 /* Guest can write */ | 160 | #define TLBnCFG_GTWE 0x00010000 /* Guest can write */ |
134 | #define TLBnCFG_IND 0x00020000 /* IND entries supported */ | 161 | #define TLBnCFG_IND 0x00020000 /* IND entries supported */ |
135 | #define TLBnCFG_PT 0x00040000 /* Can load from page table */ | 162 | #define TLBnCFG_PT 0x00040000 /* Can load from page table */ |
163 | #define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ | ||
164 | #define TLBnCFG_MINSIZE_SHIFT 20 | ||
165 | #define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ | ||
166 | #define TLBnCFG_MAXSIZE_SHIFT 16 | ||
136 | #define TLBnCFG_ASSOC 0xff000000 /* Associativity */ | 167 | #define TLBnCFG_ASSOC 0xff000000 /* Associativity */ |
137 | 168 | ||
138 | /* TLBnPS encoding */ | 169 | /* TLBnPS encoding */ |
@@ -214,6 +245,10 @@ extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; | |||
214 | extern int mmu_linear_psize; | 245 | extern int mmu_linear_psize; |
215 | extern int mmu_vmemmap_psize; | 246 | extern int mmu_vmemmap_psize; |
216 | 247 | ||
248 | #ifdef CONFIG_PPC64 | ||
249 | extern unsigned long linear_map_top; | ||
250 | #endif | ||
251 | |||
217 | #endif /* !__ASSEMBLY__ */ | 252 | #endif /* !__ASSEMBLY__ */ |
218 | 253 | ||
219 | #endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ | 254 | #endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ |
diff --git a/arch/powerpc/include/asm/mmu-hash64.h b/arch/powerpc/include/asm/mmu-hash64.h index acac35d5b382..d865bd909c7d 100644 --- a/arch/powerpc/include/asm/mmu-hash64.h +++ b/arch/powerpc/include/asm/mmu-hash64.h | |||
@@ -27,7 +27,7 @@ | |||
27 | #define STE_VSID_SHIFT 12 | 27 | #define STE_VSID_SHIFT 12 |
28 | 28 | ||
29 | /* Location of cpu0's segment table */ | 29 | /* Location of cpu0's segment table */ |
30 | #define STAB0_PAGE 0x6 | 30 | #define STAB0_PAGE 0x8 |
31 | #define STAB0_OFFSET (STAB0_PAGE << 12) | 31 | #define STAB0_OFFSET (STAB0_PAGE << 12) |
32 | #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) | 32 | #define STAB0_PHYS_ADDR (STAB0_OFFSET + PHYSICAL_START) |
33 | 33 | ||
@@ -408,6 +408,7 @@ static inline void subpage_prot_init_new_context(struct mm_struct *mm) { } | |||
408 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | 408 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ |
409 | 409 | ||
410 | typedef unsigned long mm_context_id_t; | 410 | typedef unsigned long mm_context_id_t; |
411 | struct spinlock; | ||
411 | 412 | ||
412 | typedef struct { | 413 | typedef struct { |
413 | mm_context_id_t id; | 414 | mm_context_id_t id; |
@@ -423,6 +424,11 @@ typedef struct { | |||
423 | #ifdef CONFIG_PPC_SUBPAGE_PROT | 424 | #ifdef CONFIG_PPC_SUBPAGE_PROT |
424 | struct subpage_prot_table spt; | 425 | struct subpage_prot_table spt; |
425 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ | 426 | #endif /* CONFIG_PPC_SUBPAGE_PROT */ |
427 | #ifdef CONFIG_PPC_ICSWX | ||
428 | struct spinlock *cop_lockp; /* guard acop and cop_pid */ | ||
429 | unsigned long acop; /* mask of enabled coprocessor types */ | ||
430 | unsigned int cop_pid; /* pid value used with coprocessors */ | ||
431 | #endif /* CONFIG_PPC_ICSWX */ | ||
426 | } mm_context_t; | 432 | } mm_context_t; |
427 | 433 | ||
428 | 434 | ||
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h index 7ebf42ed84a2..4138b21ae80a 100644 --- a/arch/powerpc/include/asm/mmu.h +++ b/arch/powerpc/include/asm/mmu.h | |||
@@ -2,6 +2,8 @@ | |||
2 | #define _ASM_POWERPC_MMU_H_ | 2 | #define _ASM_POWERPC_MMU_H_ |
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/types.h> | ||
6 | |||
5 | #include <asm/asm-compat.h> | 7 | #include <asm/asm-compat.h> |
6 | #include <asm/feature-fixups.h> | 8 | #include <asm/feature-fixups.h> |
7 | 9 | ||
@@ -54,11 +56,6 @@ | |||
54 | */ | 56 | */ |
55 | #define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) | 57 | #define MMU_FTR_NEED_DTLB_SW_LRU ASM_CONST(0x00200000) |
56 | 58 | ||
57 | /* This indicates that the processor uses the ISA 2.06 server tlbie | ||
58 | * mnemonics | ||
59 | */ | ||
60 | #define MMU_FTR_TLBIE_206 ASM_CONST(0x00400000) | ||
61 | |||
62 | /* Enable use of TLB reservation. Processor should support tlbsrx. | 59 | /* Enable use of TLB reservation. Processor should support tlbsrx. |
63 | * instruction and MAS0[WQ]. | 60 | * instruction and MAS0[WQ]. |
64 | */ | 61 | */ |
@@ -68,6 +65,53 @@ | |||
68 | */ | 65 | */ |
69 | #define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000) | 66 | #define MMU_FTR_USE_PAIRED_MAS ASM_CONST(0x01000000) |
70 | 67 | ||
68 | /* MMU is SLB-based | ||
69 | */ | ||
70 | #define MMU_FTR_SLB ASM_CONST(0x02000000) | ||
71 | |||
72 | /* Support 16M large pages | ||
73 | */ | ||
74 | #define MMU_FTR_16M_PAGE ASM_CONST(0x04000000) | ||
75 | |||
76 | /* Supports TLBIEL variant | ||
77 | */ | ||
78 | #define MMU_FTR_TLBIEL ASM_CONST(0x08000000) | ||
79 | |||
80 | /* Supports tlbies w/o locking | ||
81 | */ | ||
82 | #define MMU_FTR_LOCKLESS_TLBIE ASM_CONST(0x10000000) | ||
83 | |||
84 | /* Large pages can be marked CI | ||
85 | */ | ||
86 | #define MMU_FTR_CI_LARGE_PAGE ASM_CONST(0x20000000) | ||
87 | |||
88 | /* 1T segments available | ||
89 | */ | ||
90 | #define MMU_FTR_1T_SEGMENT ASM_CONST(0x40000000) | ||
91 | |||
92 | /* Doesn't support the B bit (1T segment) in SLBIE | ||
93 | */ | ||
94 | #define MMU_FTR_NO_SLBIE_B ASM_CONST(0x80000000) | ||
95 | |||
96 | /* MMU feature bit sets for various CPUs */ | ||
97 | #define MMU_FTRS_DEFAULT_HPTE_ARCH_V2 \ | ||
98 | MMU_FTR_HPTE_TABLE | MMU_FTR_PPCAS_ARCH_V2 | ||
99 | #define MMU_FTRS_POWER4 MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | ||
100 | #define MMU_FTRS_PPC970 MMU_FTRS_POWER4 | ||
101 | #define MMU_FTRS_POWER5 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | ||
102 | #define MMU_FTRS_POWER6 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | ||
103 | #define MMU_FTRS_POWER7 MMU_FTRS_POWER4 | MMU_FTR_LOCKLESS_TLBIE | ||
104 | #define MMU_FTRS_CELL MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ | ||
105 | MMU_FTR_CI_LARGE_PAGE | ||
106 | #define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ | ||
107 | MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B | ||
108 | #define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \ | ||
109 | MMU_FTR_USE_TLBIVAX_BCAST | \ | ||
110 | MMU_FTR_LOCK_BCAST_INVAL | \ | ||
111 | MMU_FTR_USE_TLBRSRV | \ | ||
112 | MMU_FTR_USE_PAIRED_MAS | \ | ||
113 | MMU_FTR_TLBIEL | \ | ||
114 | MMU_FTR_16M_PAGE | ||
71 | #ifndef __ASSEMBLY__ | 115 | #ifndef __ASSEMBLY__ |
72 | #include <asm/cputable.h> | 116 | #include <asm/cputable.h> |
73 | 117 | ||
@@ -82,6 +126,16 @@ extern unsigned int __start___mmu_ftr_fixup, __stop___mmu_ftr_fixup; | |||
82 | extern void early_init_mmu(void); | 126 | extern void early_init_mmu(void); |
83 | extern void early_init_mmu_secondary(void); | 127 | extern void early_init_mmu_secondary(void); |
84 | 128 | ||
129 | extern void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
130 | phys_addr_t first_memblock_size); | ||
131 | |||
132 | #ifdef CONFIG_PPC64 | ||
133 | /* This is our real memory area size on ppc64 server, on embedded, we | ||
134 | * make it match the size our of bolted TLB area | ||
135 | */ | ||
136 | extern u64 ppc64_rma_size; | ||
137 | #endif /* CONFIG_PPC64 */ | ||
138 | |||
85 | #endif /* !__ASSEMBLY__ */ | 139 | #endif /* !__ASSEMBLY__ */ |
86 | 140 | ||
87 | /* The kernel use the constants below to index in the page sizes array. | 141 | /* The kernel use the constants below to index in the page sizes array. |
diff --git a/arch/powerpc/include/asm/mmu_context.h b/arch/powerpc/include/asm/mmu_context.h index 81fb41289d6c..a73668a5f30d 100644 --- a/arch/powerpc/include/asm/mmu_context.h +++ b/arch/powerpc/include/asm/mmu_context.h | |||
@@ -32,6 +32,10 @@ extern void __destroy_context(unsigned long context_id); | |||
32 | extern void mmu_context_init(void); | 32 | extern void mmu_context_init(void); |
33 | #endif | 33 | #endif |
34 | 34 | ||
35 | extern void switch_cop(struct mm_struct *next); | ||
36 | extern int use_cop(unsigned long acop, struct mm_struct *mm); | ||
37 | extern void drop_cop(unsigned long acop, struct mm_struct *mm); | ||
38 | |||
35 | /* | 39 | /* |
36 | * switch_mm is the entry point called from the architecture independent | 40 | * switch_mm is the entry point called from the architecture independent |
37 | * code in kernel/sched.c | 41 | * code in kernel/sched.c |
@@ -55,6 +59,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
55 | if (prev == next) | 59 | if (prev == next) |
56 | return; | 60 | return; |
57 | 61 | ||
62 | #ifdef CONFIG_PPC_ICSWX | ||
63 | /* Switch coprocessor context only if prev or next uses a coprocessor */ | ||
64 | if (prev->context.acop || next->context.acop) | ||
65 | switch_cop(next); | ||
66 | #endif /* CONFIG_PPC_ICSWX */ | ||
67 | |||
58 | /* We must stop all altivec streams before changing the HW | 68 | /* We must stop all altivec streams before changing the HW |
59 | * context | 69 | * context |
60 | */ | 70 | */ |
@@ -67,7 +77,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, | |||
67 | * sub architectures. | 77 | * sub architectures. |
68 | */ | 78 | */ |
69 | #ifdef CONFIG_PPC_STD_MMU_64 | 79 | #ifdef CONFIG_PPC_STD_MMU_64 |
70 | if (cpu_has_feature(CPU_FTR_SLB)) | 80 | if (mmu_has_feature(MMU_FTR_SLB)) |
71 | switch_slb(tsk, next); | 81 | switch_slb(tsk, next); |
72 | else | 82 | else |
73 | switch_stab(tsk, next); | 83 | switch_stab(tsk, next); |
diff --git a/arch/powerpc/include/asm/mmzone.h b/arch/powerpc/include/asm/mmzone.h index aac87cbceb57..7b589178be46 100644 --- a/arch/powerpc/include/asm/mmzone.h +++ b/arch/powerpc/include/asm/mmzone.h | |||
@@ -33,15 +33,13 @@ extern int numa_cpu_lookup_table[]; | |||
33 | extern cpumask_var_t node_to_cpumask_map[]; | 33 | extern cpumask_var_t node_to_cpumask_map[]; |
34 | #ifdef CONFIG_MEMORY_HOTPLUG | 34 | #ifdef CONFIG_MEMORY_HOTPLUG |
35 | extern unsigned long max_pfn; | 35 | extern unsigned long max_pfn; |
36 | u64 memory_hotplug_max(void); | ||
37 | #else | ||
38 | #define memory_hotplug_max() memblock_end_of_DRAM() | ||
36 | #endif | 39 | #endif |
37 | 40 | ||
38 | /* | 41 | #else |
39 | * Following are macros that each numa implmentation must define. | 42 | #define memory_hotplug_max() memblock_end_of_DRAM() |
40 | */ | ||
41 | |||
42 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) | ||
43 | #define node_end_pfn(nid) (NODE_DATA(nid)->node_end_pfn) | ||
44 | |||
45 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ | 43 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ |
46 | 44 | ||
47 | #endif /* __KERNEL__ */ | 45 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/mpic.h b/arch/powerpc/include/asm/mpic.h index e000cce8f6dd..df18989e78d4 100644 --- a/arch/powerpc/include/asm/mpic.h +++ b/arch/powerpc/include/asm/mpic.h | |||
@@ -3,7 +3,6 @@ | |||
3 | #ifdef __KERNEL__ | 3 | #ifdef __KERNEL__ |
4 | 4 | ||
5 | #include <linux/irq.h> | 5 | #include <linux/irq.h> |
6 | #include <linux/sysdev.h> | ||
7 | #include <asm/dcr.h> | 6 | #include <asm/dcr.h> |
8 | #include <asm/msi_bitmap.h> | 7 | #include <asm/msi_bitmap.h> |
9 | 8 | ||
@@ -263,6 +262,7 @@ struct mpic | |||
263 | #ifdef CONFIG_SMP | 262 | #ifdef CONFIG_SMP |
264 | struct irq_chip hc_ipi; | 263 | struct irq_chip hc_ipi; |
265 | #endif | 264 | #endif |
265 | struct irq_chip hc_tm; | ||
266 | const char *name; | 266 | const char *name; |
267 | /* Flags */ | 267 | /* Flags */ |
268 | unsigned int flags; | 268 | unsigned int flags; |
@@ -281,7 +281,7 @@ struct mpic | |||
281 | 281 | ||
282 | /* vector numbers used for internal sources (ipi/timers) */ | 282 | /* vector numbers used for internal sources (ipi/timers) */ |
283 | unsigned int ipi_vecs[4]; | 283 | unsigned int ipi_vecs[4]; |
284 | unsigned int timer_vecs[4]; | 284 | unsigned int timer_vecs[8]; |
285 | 285 | ||
286 | /* Spurious vector to program into unused sources */ | 286 | /* Spurious vector to program into unused sources */ |
287 | unsigned int spurious_vec; | 287 | unsigned int spurious_vec; |
@@ -320,8 +320,6 @@ struct mpic | |||
320 | /* link */ | 320 | /* link */ |
321 | struct mpic *next; | 321 | struct mpic *next; |
322 | 322 | ||
323 | struct sys_device sysdev; | ||
324 | |||
325 | #ifdef CONFIG_PM | 323 | #ifdef CONFIG_PM |
326 | struct mpic_irq_save *save_data; | 324 | struct mpic_irq_save *save_data; |
327 | #endif | 325 | #endif |
@@ -367,6 +365,12 @@ struct mpic | |||
367 | #define MPIC_SINGLE_DEST_CPU 0x00001000 | 365 | #define MPIC_SINGLE_DEST_CPU 0x00001000 |
368 | /* Enable CoreInt delivery of interrupts */ | 366 | /* Enable CoreInt delivery of interrupts */ |
369 | #define MPIC_ENABLE_COREINT 0x00002000 | 367 | #define MPIC_ENABLE_COREINT 0x00002000 |
368 | /* Disable resetting of the MPIC. | ||
369 | * NOTE: This flag trumps MPIC_WANTS_RESET. | ||
370 | */ | ||
371 | #define MPIC_NO_RESET 0x00004000 | ||
372 | /* Freescale MPIC (compatible includes "fsl,mpic") */ | ||
373 | #define MPIC_FSL 0x00008000 | ||
370 | 374 | ||
371 | /* MPIC HW modification ID */ | 375 | /* MPIC HW modification ID */ |
372 | #define MPIC_REGSET_MASK 0xf0000000 | 376 | #define MPIC_REGSET_MASK 0xf0000000 |
@@ -467,11 +471,11 @@ extern void mpic_request_ipis(void); | |||
467 | void smp_mpic_message_pass(int target, int msg); | 471 | void smp_mpic_message_pass(int target, int msg); |
468 | 472 | ||
469 | /* Unmask a specific virq */ | 473 | /* Unmask a specific virq */ |
470 | extern void mpic_unmask_irq(unsigned int irq); | 474 | extern void mpic_unmask_irq(struct irq_data *d); |
471 | /* Mask a specific virq */ | 475 | /* Mask a specific virq */ |
472 | extern void mpic_mask_irq(unsigned int irq); | 476 | extern void mpic_mask_irq(struct irq_data *d); |
473 | /* EOI a specific virq */ | 477 | /* EOI a specific virq */ |
474 | extern void mpic_end_irq(unsigned int irq); | 478 | extern void mpic_end_irq(struct irq_data *d); |
475 | 479 | ||
476 | /* Fetch interrupt from a given mpic */ | 480 | /* Fetch interrupt from a given mpic */ |
477 | extern unsigned int mpic_get_one_irq(struct mpic *mpic); | 481 | extern unsigned int mpic_get_one_irq(struct mpic *mpic); |
diff --git a/arch/powerpc/include/asm/nvram.h b/arch/powerpc/include/asm/nvram.h index 850b72f27445..9d1aafe607c7 100644 --- a/arch/powerpc/include/asm/nvram.h +++ b/arch/powerpc/include/asm/nvram.h | |||
@@ -10,31 +10,7 @@ | |||
10 | #ifndef _ASM_POWERPC_NVRAM_H | 10 | #ifndef _ASM_POWERPC_NVRAM_H |
11 | #define _ASM_POWERPC_NVRAM_H | 11 | #define _ASM_POWERPC_NVRAM_H |
12 | 12 | ||
13 | #include <linux/errno.h> | 13 | /* Signatures for nvram partitions */ |
14 | |||
15 | #define NVRW_CNT 0x20 | ||
16 | #define NVRAM_HEADER_LEN 16 /* sizeof(struct nvram_header) */ | ||
17 | #define NVRAM_BLOCK_LEN 16 | ||
18 | #define NVRAM_MAX_REQ (2080/NVRAM_BLOCK_LEN) | ||
19 | #define NVRAM_MIN_REQ (1056/NVRAM_BLOCK_LEN) | ||
20 | |||
21 | #define NVRAM_AS0 0x74 | ||
22 | #define NVRAM_AS1 0x75 | ||
23 | #define NVRAM_DATA 0x77 | ||
24 | |||
25 | |||
26 | /* RTC Offsets */ | ||
27 | |||
28 | #define MOTO_RTC_SECONDS 0x1FF9 | ||
29 | #define MOTO_RTC_MINUTES 0x1FFA | ||
30 | #define MOTO_RTC_HOURS 0x1FFB | ||
31 | #define MOTO_RTC_DAY_OF_WEEK 0x1FFC | ||
32 | #define MOTO_RTC_DAY_OF_MONTH 0x1FFD | ||
33 | #define MOTO_RTC_MONTH 0x1FFE | ||
34 | #define MOTO_RTC_YEAR 0x1FFF | ||
35 | #define MOTO_RTC_CONTROLA 0x1FF8 | ||
36 | #define MOTO_RTC_CONTROLB 0x1FF9 | ||
37 | |||
38 | #define NVRAM_SIG_SP 0x02 /* support processor */ | 14 | #define NVRAM_SIG_SP 0x02 /* support processor */ |
39 | #define NVRAM_SIG_OF 0x50 /* open firmware config */ | 15 | #define NVRAM_SIG_OF 0x50 /* open firmware config */ |
40 | #define NVRAM_SIG_FW 0x51 /* general firmware */ | 16 | #define NVRAM_SIG_FW 0x51 /* general firmware */ |
@@ -49,32 +25,19 @@ | |||
49 | #define NVRAM_SIG_OS 0xa0 /* OS defined */ | 25 | #define NVRAM_SIG_OS 0xa0 /* OS defined */ |
50 | #define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */ | 26 | #define NVRAM_SIG_PANIC 0xa1 /* Apple OSX "panic" */ |
51 | 27 | ||
52 | /* If change this size, then change the size of NVNAME_LEN */ | ||
53 | struct nvram_header { | ||
54 | unsigned char signature; | ||
55 | unsigned char checksum; | ||
56 | unsigned short length; | ||
57 | char name[12]; | ||
58 | }; | ||
59 | |||
60 | #ifdef __KERNEL__ | 28 | #ifdef __KERNEL__ |
61 | 29 | ||
30 | #include <linux/errno.h> | ||
62 | #include <linux/list.h> | 31 | #include <linux/list.h> |
63 | 32 | ||
64 | struct nvram_partition { | 33 | #ifdef CONFIG_PPC_PSERIES |
65 | struct list_head partition; | ||
66 | struct nvram_header header; | ||
67 | unsigned int index; | ||
68 | }; | ||
69 | |||
70 | |||
71 | extern int nvram_write_error_log(char * buff, int length, | 34 | extern int nvram_write_error_log(char * buff, int length, |
72 | unsigned int err_type, unsigned int err_seq); | 35 | unsigned int err_type, unsigned int err_seq); |
73 | extern int nvram_read_error_log(char * buff, int length, | 36 | extern int nvram_read_error_log(char * buff, int length, |
74 | unsigned int * err_type, unsigned int *err_seq); | 37 | unsigned int * err_type, unsigned int *err_seq); |
75 | extern int nvram_clear_error_log(void); | 38 | extern int nvram_clear_error_log(void); |
76 | |||
77 | extern int pSeries_nvram_init(void); | 39 | extern int pSeries_nvram_init(void); |
40 | #endif /* CONFIG_PPC_PSERIES */ | ||
78 | 41 | ||
79 | #ifdef CONFIG_MMIO_NVRAM | 42 | #ifdef CONFIG_MMIO_NVRAM |
80 | extern int mmio_nvram_init(void); | 43 | extern int mmio_nvram_init(void); |
@@ -85,6 +48,14 @@ static inline int mmio_nvram_init(void) | |||
85 | } | 48 | } |
86 | #endif | 49 | #endif |
87 | 50 | ||
51 | extern int __init nvram_scan_partitions(void); | ||
52 | extern loff_t nvram_create_partition(const char *name, int sig, | ||
53 | int req_size, int min_size); | ||
54 | extern int nvram_remove_partition(const char *name, int sig, | ||
55 | const char *exceptions[]); | ||
56 | extern int nvram_get_partition_size(loff_t data_index); | ||
57 | extern loff_t nvram_find_partition(const char *name, int sig, int *out_size); | ||
58 | |||
88 | #endif /* __KERNEL__ */ | 59 | #endif /* __KERNEL__ */ |
89 | 60 | ||
90 | /* PowerMac specific nvram stuffs */ | 61 | /* PowerMac specific nvram stuffs */ |
diff --git a/arch/powerpc/include/asm/pSeries_reconfig.h b/arch/powerpc/include/asm/pSeries_reconfig.h index d4b4bfa26fb3..89d2f99c1bf4 100644 --- a/arch/powerpc/include/asm/pSeries_reconfig.h +++ b/arch/powerpc/include/asm/pSeries_reconfig.h | |||
@@ -18,13 +18,18 @@ | |||
18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); | 18 | extern int pSeries_reconfig_notifier_register(struct notifier_block *); |
19 | extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); | 19 | extern void pSeries_reconfig_notifier_unregister(struct notifier_block *); |
20 | extern struct blocking_notifier_head pSeries_reconfig_chain; | 20 | extern struct blocking_notifier_head pSeries_reconfig_chain; |
21 | /* Not the best place to put this, will be fixed when we move some | ||
22 | * of the rtas suspend-me stuff to pseries */ | ||
23 | extern void pSeries_coalesce_init(void); | ||
21 | #else /* !CONFIG_PPC_PSERIES */ | 24 | #else /* !CONFIG_PPC_PSERIES */ |
22 | static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) | 25 | static inline int pSeries_reconfig_notifier_register(struct notifier_block *nb) |
23 | { | 26 | { |
24 | return 0; | 27 | return 0; |
25 | } | 28 | } |
26 | static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { } | 29 | static inline void pSeries_reconfig_notifier_unregister(struct notifier_block *nb) { } |
30 | static inline void pSeries_coalesce_init(void) { } | ||
27 | #endif /* CONFIG_PPC_PSERIES */ | 31 | #endif /* CONFIG_PPC_PSERIES */ |
28 | 32 | ||
33 | |||
29 | #endif /* __KERNEL__ */ | 34 | #endif /* __KERNEL__ */ |
30 | #endif /* _PPC64_PSERIES_RECONFIG_H */ | 35 | #endif /* _PPC64_PSERIES_RECONFIG_H */ |
diff --git a/arch/powerpc/include/asm/paca.h b/arch/powerpc/include/asm/paca.h index 1ff6662f7faf..74126765106a 100644 --- a/arch/powerpc/include/asm/paca.h +++ b/arch/powerpc/include/asm/paca.h | |||
@@ -85,14 +85,16 @@ struct paca_struct { | |||
85 | u8 kexec_state; /* set when kexec down has irqs off */ | 85 | u8 kexec_state; /* set when kexec down has irqs off */ |
86 | #ifdef CONFIG_PPC_STD_MMU_64 | 86 | #ifdef CONFIG_PPC_STD_MMU_64 |
87 | struct slb_shadow *slb_shadow_ptr; | 87 | struct slb_shadow *slb_shadow_ptr; |
88 | struct dtl_entry *dispatch_log; | ||
89 | struct dtl_entry *dispatch_log_end; | ||
88 | 90 | ||
89 | /* | 91 | /* |
90 | * Now, starting in cacheline 2, the exception save areas | 92 | * Now, starting in cacheline 2, the exception save areas |
91 | */ | 93 | */ |
92 | /* used for most interrupts/exceptions */ | 94 | /* used for most interrupts/exceptions */ |
93 | u64 exgen[10] __attribute__((aligned(0x80))); | 95 | u64 exgen[11] __attribute__((aligned(0x80))); |
94 | u64 exmc[10]; /* used for machine checks */ | 96 | u64 exmc[11]; /* used for machine checks */ |
95 | u64 exslb[10]; /* used for SLB/segment table misses | 97 | u64 exslb[11]; /* used for SLB/segment table misses |
96 | * on the linear mapping */ | 98 | * on the linear mapping */ |
97 | /* SLB related definitions */ | 99 | /* SLB related definitions */ |
98 | u16 vmalloc_sllp; | 100 | u16 vmalloc_sllp; |
@@ -104,7 +106,8 @@ struct paca_struct { | |||
104 | pgd_t *pgd; /* Current PGD */ | 106 | pgd_t *pgd; /* Current PGD */ |
105 | pgd_t *kernel_pgd; /* Kernel PGD */ | 107 | pgd_t *kernel_pgd; /* Kernel PGD */ |
106 | u64 exgen[8] __attribute__((aligned(0x80))); | 108 | u64 exgen[8] __attribute__((aligned(0x80))); |
107 | u64 extlb[EX_TLB_SIZE*3] __attribute__((aligned(0x80))); | 109 | /* We can have up to 3 levels of reentrancy in the TLB miss handler */ |
110 | u64 extlb[3][EX_TLB_SIZE / sizeof(u64)] __attribute__((aligned(0x80))); | ||
108 | u64 exmc[8]; /* used for machine checks */ | 111 | u64 exmc[8]; /* used for machine checks */ |
109 | u64 excrit[8]; /* used for crit interrupts */ | 112 | u64 excrit[8]; /* used for crit interrupts */ |
110 | u64 exdbg[8]; /* used for debug interrupts */ | 113 | u64 exdbg[8]; /* used for debug interrupts */ |
@@ -123,19 +126,25 @@ struct paca_struct { | |||
123 | struct task_struct *__current; /* Pointer to current */ | 126 | struct task_struct *__current; /* Pointer to current */ |
124 | u64 kstack; /* Saved Kernel stack addr */ | 127 | u64 kstack; /* Saved Kernel stack addr */ |
125 | u64 stab_rr; /* stab/slb round-robin counter */ | 128 | u64 stab_rr; /* stab/slb round-robin counter */ |
126 | u64 saved_r1; /* r1 save for RTAS calls */ | 129 | u64 saved_r1; /* r1 save for RTAS calls or PM */ |
127 | u64 saved_msr; /* MSR saved here by enter_rtas */ | 130 | u64 saved_msr; /* MSR saved here by enter_rtas */ |
128 | u16 trap_save; /* Used when bad stack is encountered */ | 131 | u16 trap_save; /* Used when bad stack is encountered */ |
129 | u8 soft_enabled; /* irq soft-enable flag */ | 132 | u8 soft_enabled; /* irq soft-enable flag */ |
130 | u8 hard_enabled; /* set if irqs are enabled in MSR */ | 133 | u8 hard_enabled; /* set if irqs are enabled in MSR */ |
131 | u8 io_sync; /* writel() needs spin_unlock sync */ | 134 | u8 io_sync; /* writel() needs spin_unlock sync */ |
132 | u8 perf_event_pending; /* PM interrupt while soft-disabled */ | 135 | u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */ |
133 | 136 | ||
134 | /* Stuff for accurate time accounting */ | 137 | /* Stuff for accurate time accounting */ |
135 | u64 user_time; /* accumulated usermode TB ticks */ | 138 | u64 user_time; /* accumulated usermode TB ticks */ |
136 | u64 system_time; /* accumulated system TB ticks */ | 139 | u64 system_time; /* accumulated system TB ticks */ |
137 | u64 startpurr; /* PURR/TB value snapshot */ | 140 | u64 user_time_scaled; /* accumulated usermode SPURR ticks */ |
141 | u64 starttime; /* TB value snapshot */ | ||
142 | u64 starttime_user; /* TB value on exit to usermode */ | ||
138 | u64 startspurr; /* SPURR value snapshot */ | 143 | u64 startspurr; /* SPURR value snapshot */ |
144 | u64 utime_sspurr; /* ->user_time when ->startspurr set */ | ||
145 | u64 stolen_time; /* TB ticks taken by hypervisor */ | ||
146 | u64 dtl_ridx; /* read index in dispatch log */ | ||
147 | struct dtl_entry *dtl_curr; /* pointer corresponding to dtl_ridx */ | ||
139 | 148 | ||
140 | #ifdef CONFIG_KVM_BOOK3S_HANDLER | 149 | #ifdef CONFIG_KVM_BOOK3S_HANDLER |
141 | /* We use this to store guest state in */ | 150 | /* We use this to store guest state in */ |
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h index 53b64be40eb2..2cd664ef0a5e 100644 --- a/arch/powerpc/include/asm/page.h +++ b/arch/powerpc/include/asm/page.h | |||
@@ -100,8 +100,8 @@ extern phys_addr_t kernstart_addr; | |||
100 | #endif | 100 | #endif |
101 | 101 | ||
102 | #ifdef CONFIG_FLATMEM | 102 | #ifdef CONFIG_FLATMEM |
103 | #define ARCH_PFN_OFFSET (MEMORY_START >> PAGE_SHIFT) | 103 | #define ARCH_PFN_OFFSET ((unsigned long)(MEMORY_START >> PAGE_SHIFT)) |
104 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < (ARCH_PFN_OFFSET + max_mapnr)) | 104 | #define pfn_valid(pfn) ((pfn) >= ARCH_PFN_OFFSET && (pfn) < max_mapnr) |
105 | #endif | 105 | #endif |
106 | 106 | ||
107 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) | 107 | #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) |
diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h index 358ff14ea25e..9356262fd3cc 100644 --- a/arch/powerpc/include/asm/page_64.h +++ b/arch/powerpc/include/asm/page_64.h | |||
@@ -59,24 +59,7 @@ static __inline__ void clear_page(void *addr) | |||
59 | : "ctr", "memory"); | 59 | : "ctr", "memory"); |
60 | } | 60 | } |
61 | 61 | ||
62 | extern void copy_4K_page(void *to, void *from); | 62 | extern void copy_page(void *to, void *from); |
63 | |||
64 | #ifdef CONFIG_PPC_64K_PAGES | ||
65 | static inline void copy_page(void *to, void *from) | ||
66 | { | ||
67 | unsigned int i; | ||
68 | for (i=0; i < (1 << (PAGE_SHIFT - 12)); i++) { | ||
69 | copy_4K_page(to, from); | ||
70 | to += 4096; | ||
71 | from += 4096; | ||
72 | } | ||
73 | } | ||
74 | #else /* CONFIG_PPC_64K_PAGES */ | ||
75 | static inline void copy_page(void *to, void *from) | ||
76 | { | ||
77 | copy_4K_page(to, from); | ||
78 | } | ||
79 | #endif /* CONFIG_PPC_64K_PAGES */ | ||
80 | 63 | ||
81 | /* Log 2 of page table size */ | 64 | /* Log 2 of page table size */ |
82 | extern u64 ppc64_pft_size; | 65 | extern u64 ppc64_pft_size; |
@@ -130,7 +113,7 @@ extern void slice_set_user_psize(struct mm_struct *mm, unsigned int psize); | |||
130 | extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, | 113 | extern void slice_set_range_psize(struct mm_struct *mm, unsigned long start, |
131 | unsigned long len, unsigned int psize); | 114 | unsigned long len, unsigned int psize); |
132 | 115 | ||
133 | #define slice_mm_new_context(mm) ((mm)->context.id == 0) | 116 | #define slice_mm_new_context(mm) ((mm)->context.id == MMU_NO_CONTEXT) |
134 | 117 | ||
135 | #endif /* __ASSEMBLY__ */ | 118 | #endif /* __ASSEMBLY__ */ |
136 | #else | 119 | #else |
@@ -163,13 +146,13 @@ do { \ | |||
163 | #endif /* !CONFIG_HUGETLB_PAGE */ | 146 | #endif /* !CONFIG_HUGETLB_PAGE */ |
164 | 147 | ||
165 | #define VM_DATA_DEFAULT_FLAGS \ | 148 | #define VM_DATA_DEFAULT_FLAGS \ |
166 | (test_thread_flag(TIF_32BIT) ? \ | 149 | (is_32bit_task() ? \ |
167 | VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) | 150 | VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64) |
168 | 151 | ||
169 | /* | 152 | /* |
170 | * This is the default if a program doesn't have a PT_GNU_STACK | 153 | * This is the default if a program doesn't have a PT_GNU_STACK |
171 | * program header entry. The PPC64 ELF ABI has a non executable stack | 154 | * program header entry. The PPC64 ELF ABI has a non executable stack |
172 | * stack by default, so in the absense of a PT_GNU_STACK program header | 155 | * stack by default, so in the absence of a PT_GNU_STACK program header |
173 | * we turn execute permission off. | 156 | * we turn execute permission off. |
174 | */ | 157 | */ |
175 | #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ | 158 | #define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ |
@@ -179,7 +162,7 @@ do { \ | |||
179 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) | 162 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) |
180 | 163 | ||
181 | #define VM_STACK_DEFAULT_FLAGS \ | 164 | #define VM_STACK_DEFAULT_FLAGS \ |
182 | (test_thread_flag(TIF_32BIT) ? \ | 165 | (is_32bit_task() ? \ |
183 | VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) | 166 | VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) |
184 | 167 | ||
185 | #include <asm-generic/getorder.h> | 168 | #include <asm-generic/getorder.h> |
diff --git a/arch/powerpc/include/asm/pasemi_dma.h b/arch/powerpc/include/asm/pasemi_dma.h index 19fd7933e2d9..eafa5a5f56de 100644 --- a/arch/powerpc/include/asm/pasemi_dma.h +++ b/arch/powerpc/include/asm/pasemi_dma.h | |||
@@ -522,7 +522,7 @@ extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, | |||
522 | extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, | 522 | extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, |
523 | dma_addr_t *handle); | 523 | dma_addr_t *handle); |
524 | 524 | ||
525 | /* Routines to allocate flags (events) for channel syncronization */ | 525 | /* Routines to allocate flags (events) for channel synchronization */ |
526 | extern int pasemi_dma_alloc_flag(void); | 526 | extern int pasemi_dma_alloc_flag(void); |
527 | extern void pasemi_dma_free_flag(int flag); | 527 | extern void pasemi_dma_free_flag(int flag); |
528 | extern void pasemi_dma_set_flag(int flag); | 528 | extern void pasemi_dma_set_flag(int flag); |
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h index 51e9e6f90d12..b90dbf8e5cd9 100644 --- a/arch/powerpc/include/asm/pci-bridge.h +++ b/arch/powerpc/include/asm/pci-bridge.h | |||
@@ -106,7 +106,7 @@ struct pci_controller { | |||
106 | * Used for variants of PCI indirect handling and possible quirks: | 106 | * Used for variants of PCI indirect handling and possible quirks: |
107 | * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 | 107 | * SET_CFG_TYPE - used on 4xx or any PHB that does explicit type0/1 |
108 | * EXT_REG - provides access to PCI-e extended registers | 108 | * EXT_REG - provides access to PCI-e extended registers |
109 | * SURPRESS_PRIMARY_BUS - we surpress the setting of PCI_PRIMARY_BUS | 109 | * SURPRESS_PRIMARY_BUS - we suppress the setting of PCI_PRIMARY_BUS |
110 | * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS | 110 | * on Freescale PCI-e controllers since they used the PCI_PRIMARY_BUS |
111 | * to determine which bus number to match on when generating type0 | 111 | * to determine which bus number to match on when generating type0 |
112 | * config cycles | 112 | * config cycles |
@@ -164,13 +164,23 @@ extern void setup_indirect_pci(struct pci_controller* hose, | |||
164 | resource_size_t cfg_addr, | 164 | resource_size_t cfg_addr, |
165 | resource_size_t cfg_data, u32 flags); | 165 | resource_size_t cfg_data, u32 flags); |
166 | 166 | ||
167 | #ifndef CONFIG_PPC64 | ||
168 | |||
169 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) | 167 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) |
170 | { | 168 | { |
171 | return bus->sysdata; | 169 | return bus->sysdata; |
172 | } | 170 | } |
173 | 171 | ||
172 | #ifndef CONFIG_PPC64 | ||
173 | |||
174 | static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | ||
175 | { | ||
176 | struct pci_controller *host; | ||
177 | |||
178 | if (bus->self) | ||
179 | return pci_device_to_OF_node(bus->self); | ||
180 | host = pci_bus_to_host(bus); | ||
181 | return host ? host->dn : NULL; | ||
182 | } | ||
183 | |||
174 | static inline int isa_vaddr_is_ioport(void __iomem *address) | 184 | static inline int isa_vaddr_is_ioport(void __iomem *address) |
175 | { | 185 | { |
176 | /* No specific ISA handling on ppc32 at this stage, it | 186 | /* No specific ISA handling on ppc32 at this stage, it |
@@ -218,19 +228,10 @@ extern void * update_dn_pci_info(struct device_node *dn, void *data); | |||
218 | 228 | ||
219 | /* Get a device_node from a pci_dev. This code must be fast except | 229 | /* Get a device_node from a pci_dev. This code must be fast except |
220 | * in the case where the sysdata is incorrect and needs to be fixed | 230 | * in the case where the sysdata is incorrect and needs to be fixed |
221 | * up (this will only happen once). | 231 | * up (this will only happen once). */ |
222 | * In this case the sysdata will have been inherited from a PCI host | ||
223 | * bridge or a PCI-PCI bridge further up the tree, so it will point | ||
224 | * to a valid struct pci_dn, just not the one we want. | ||
225 | */ | ||
226 | static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) | 232 | static inline struct device_node *pci_device_to_OF_node(struct pci_dev *dev) |
227 | { | 233 | { |
228 | struct device_node *dn = dev->sysdata; | 234 | return dev->dev.of_node ? dev->dev.of_node : fetch_dev_dn(dev); |
229 | struct pci_dn *pdn = dn->data; | ||
230 | |||
231 | if (pdn && pdn->devfn == dev->devfn && pdn->busno == dev->bus->number) | ||
232 | return dn; /* fast path. sysdata is good */ | ||
233 | return fetch_dev_dn(dev); | ||
234 | } | 235 | } |
235 | 236 | ||
236 | static inline int pci_device_from_OF_node(struct device_node *np, | 237 | static inline int pci_device_from_OF_node(struct device_node *np, |
@@ -248,7 +249,7 @@ static inline struct device_node *pci_bus_to_OF_node(struct pci_bus *bus) | |||
248 | if (bus->self) | 249 | if (bus->self) |
249 | return pci_device_to_OF_node(bus->self); | 250 | return pci_device_to_OF_node(bus->self); |
250 | else | 251 | else |
251 | return bus->sysdata; /* Must be root bus (PHB) */ | 252 | return bus->dev.of_node; /* Must be root bus (PHB) */ |
252 | } | 253 | } |
253 | 254 | ||
254 | /** Find the bus corresponding to the indicated device node */ | 255 | /** Find the bus corresponding to the indicated device node */ |
@@ -260,14 +261,6 @@ extern void pcibios_remove_pci_devices(struct pci_bus *bus); | |||
260 | /** Discover new pci devices under this bus, and add them */ | 261 | /** Discover new pci devices under this bus, and add them */ |
261 | extern void pcibios_add_pci_devices(struct pci_bus *bus); | 262 | extern void pcibios_add_pci_devices(struct pci_bus *bus); |
262 | 263 | ||
263 | static inline struct pci_controller *pci_bus_to_host(const struct pci_bus *bus) | ||
264 | { | ||
265 | struct device_node *busdn = bus->sysdata; | ||
266 | |||
267 | BUG_ON(busdn == NULL); | ||
268 | return PCI_DN(busdn)->phb; | ||
269 | } | ||
270 | |||
271 | 264 | ||
272 | extern void isa_bridge_find_early(struct pci_controller *hose); | 265 | extern void isa_bridge_find_early(struct pci_controller *hose); |
273 | 266 | ||
diff --git a/arch/powerpc/include/asm/pci.h b/arch/powerpc/include/asm/pci.h index a20a9ad2258b..7d7790954e02 100644 --- a/arch/powerpc/include/asm/pci.h +++ b/arch/powerpc/include/asm/pci.h | |||
@@ -201,7 +201,7 @@ extern void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
201 | extern void pcibios_setup_bus_devices(struct pci_bus *bus); | 201 | extern void pcibios_setup_bus_devices(struct pci_bus *bus); |
202 | extern void pcibios_setup_bus_self(struct pci_bus *bus); | 202 | extern void pcibios_setup_bus_self(struct pci_bus *bus); |
203 | extern void pcibios_setup_phb_io_space(struct pci_controller *hose); | 203 | extern void pcibios_setup_phb_io_space(struct pci_controller *hose); |
204 | extern void pcibios_scan_phb(struct pci_controller *hose, void *sysdata); | 204 | extern void pcibios_scan_phb(struct pci_controller *hose); |
205 | 205 | ||
206 | #endif /* __KERNEL__ */ | 206 | #endif /* __KERNEL__ */ |
207 | #endif /* __ASM_POWERPC_PCI_H */ | 207 | #endif /* __ASM_POWERPC_PCI_H */ |
diff --git a/arch/powerpc/include/asm/pgalloc.h b/arch/powerpc/include/asm/pgalloc.h index abe8532bd14e..bf301ac62f35 100644 --- a/arch/powerpc/include/asm/pgalloc.h +++ b/arch/powerpc/include/asm/pgalloc.h | |||
@@ -31,14 +31,29 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage) | |||
31 | #endif | 31 | #endif |
32 | 32 | ||
33 | #ifdef CONFIG_SMP | 33 | #ifdef CONFIG_SMP |
34 | extern void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift); | 34 | struct mmu_gather; |
35 | extern void pte_free_finish(void); | 35 | extern void tlb_remove_table(struct mmu_gather *, void *); |
36 | |||
37 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) | ||
38 | { | ||
39 | unsigned long pgf = (unsigned long)table; | ||
40 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); | ||
41 | pgf |= shift; | ||
42 | tlb_remove_table(tlb, (void *)pgf); | ||
43 | } | ||
44 | |||
45 | static inline void __tlb_remove_table(void *_table) | ||
46 | { | ||
47 | void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); | ||
48 | unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; | ||
49 | |||
50 | pgtable_free(table, shift); | ||
51 | } | ||
36 | #else /* CONFIG_SMP */ | 52 | #else /* CONFIG_SMP */ |
37 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) | 53 | static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) |
38 | { | 54 | { |
39 | pgtable_free(table, shift); | 55 | pgtable_free(table, shift); |
40 | } | 56 | } |
41 | static inline void pte_free_finish(void) { } | ||
42 | #endif /* !CONFIG_SMP */ | 57 | #endif /* !CONFIG_SMP */ |
43 | 58 | ||
44 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, | 59 | static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *ptepage, |
diff --git a/arch/powerpc/include/asm/pgtable-ppc32.h b/arch/powerpc/include/asm/pgtable-ppc32.h index a7db96f2b5c3..47edde8c3556 100644 --- a/arch/powerpc/include/asm/pgtable-ppc32.h +++ b/arch/powerpc/include/asm/pgtable-ppc32.h | |||
@@ -308,12 +308,8 @@ static inline void __ptep_set_access_flags(pte_t *ptep, pte_t entry) | |||
308 | #define pte_offset_kernel(dir, addr) \ | 308 | #define pte_offset_kernel(dir, addr) \ |
309 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) | 309 | ((pte_t *) pmd_page_vaddr(*(dir)) + pte_index(addr)) |
310 | #define pte_offset_map(dir, addr) \ | 310 | #define pte_offset_map(dir, addr) \ |
311 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE0) + pte_index(addr)) | 311 | ((pte_t *) kmap_atomic(pmd_page(*(dir))) + pte_index(addr)) |
312 | #define pte_offset_map_nested(dir, addr) \ | 312 | #define pte_unmap(pte) kunmap_atomic(pte) |
313 | ((pte_t *) kmap_atomic(pmd_page(*(dir)), KM_PTE1) + pte_index(addr)) | ||
314 | |||
315 | #define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0) | ||
316 | #define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1) | ||
317 | 313 | ||
318 | /* | 314 | /* |
319 | * Encode and decode a swap entry. | 315 | * Encode and decode a swap entry. |
diff --git a/arch/powerpc/include/asm/pgtable-ppc64.h b/arch/powerpc/include/asm/pgtable-ppc64.h index 49865045d56f..81576ee0cfb1 100644 --- a/arch/powerpc/include/asm/pgtable-ppc64.h +++ b/arch/powerpc/include/asm/pgtable-ppc64.h | |||
@@ -193,9 +193,7 @@ | |||
193 | (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) | 193 | (((pte_t *) pmd_page_vaddr(*(dir))) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))) |
194 | 194 | ||
195 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) | 195 | #define pte_offset_map(dir,addr) pte_offset_kernel((dir), (addr)) |
196 | #define pte_offset_map_nested(dir,addr) pte_offset_kernel((dir), (addr)) | ||
197 | #define pte_unmap(pte) do { } while(0) | 196 | #define pte_unmap(pte) do { } while(0) |
198 | #define pte_unmap_nested(pte) do { } while(0) | ||
199 | 197 | ||
200 | /* to find an entry in a kernel page-table-directory */ | 198 | /* to find an entry in a kernel page-table-directory */ |
201 | /* This now only contains the vmalloc pages */ | 199 | /* This now only contains the vmalloc pages */ |
@@ -259,21 +257,20 @@ static inline int __ptep_test_and_clear_young(struct mm_struct *mm, | |||
259 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, | 257 | static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, |
260 | pte_t *ptep) | 258 | pte_t *ptep) |
261 | { | 259 | { |
262 | unsigned long old; | ||
263 | 260 | ||
264 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | 261 | if ((pte_val(*ptep) & _PAGE_RW) == 0) |
265 | return; | 262 | return; |
266 | old = pte_update(mm, addr, ptep, _PAGE_RW, 0); | 263 | |
264 | pte_update(mm, addr, ptep, _PAGE_RW, 0); | ||
267 | } | 265 | } |
268 | 266 | ||
269 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, | 267 | static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, |
270 | unsigned long addr, pte_t *ptep) | 268 | unsigned long addr, pte_t *ptep) |
271 | { | 269 | { |
272 | unsigned long old; | ||
273 | |||
274 | if ((pte_val(*ptep) & _PAGE_RW) == 0) | 270 | if ((pte_val(*ptep) & _PAGE_RW) == 0) |
275 | return; | 271 | return; |
276 | old = pte_update(mm, addr, ptep, _PAGE_RW, 1); | 272 | |
273 | pte_update(mm, addr, ptep, _PAGE_RW, 1); | ||
277 | } | 274 | } |
278 | 275 | ||
279 | /* | 276 | /* |
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h index 89f158731ce3..88b0bd925a8b 100644 --- a/arch/powerpc/include/asm/pgtable.h +++ b/arch/powerpc/include/asm/pgtable.h | |||
@@ -170,6 +170,7 @@ extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long addre | |||
170 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ | 170 | #define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ |
171 | _PAGE_COHERENT | _PAGE_WRITETHRU)) | 171 | _PAGE_COHERENT | _PAGE_WRITETHRU)) |
172 | 172 | ||
173 | #define pgprot_writecombine pgprot_noncached_wc | ||
173 | 174 | ||
174 | struct file; | 175 | struct file; |
175 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, | 176 | extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, |
diff --git a/arch/powerpc/include/asm/pmac_feature.h b/arch/powerpc/include/asm/pmac_feature.h index 00eedc5a4e61..10902c9375d0 100644 --- a/arch/powerpc/include/asm/pmac_feature.h +++ b/arch/powerpc/include/asm/pmac_feature.h | |||
@@ -53,8 +53,8 @@ | |||
53 | 53 | ||
54 | /* Here is the infamous serie of OHare based machines | 54 | /* Here is the infamous serie of OHare based machines |
55 | */ | 55 | */ |
56 | #define PMAC_TYPE_COMET 0x20 /* Beleived to be PowerBook 2400 */ | 56 | #define PMAC_TYPE_COMET 0x20 /* Believed to be PowerBook 2400 */ |
57 | #define PMAC_TYPE_HOOPER 0x21 /* Beleived to be PowerBook 3400 */ | 57 | #define PMAC_TYPE_HOOPER 0x21 /* Believed to be PowerBook 3400 */ |
58 | #define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */ | 58 | #define PMAC_TYPE_KANGA 0x22 /* PowerBook 3500 (first G3) */ |
59 | #define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */ | 59 | #define PMAC_TYPE_ALCHEMY 0x23 /* Alchemy motherboard base */ |
60 | #define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */ | 60 | #define PMAC_TYPE_GAZELLE 0x24 /* Spartacus, some 5xxx/6xxx */ |
diff --git a/arch/powerpc/include/asm/ppc-opcode.h b/arch/powerpc/include/asm/ppc-opcode.h index 43adc8b819ed..e472659d906c 100644 --- a/arch/powerpc/include/asm/ppc-opcode.h +++ b/arch/powerpc/include/asm/ppc-opcode.h | |||
@@ -36,9 +36,15 @@ | |||
36 | #define PPC_INST_NOP 0x60000000 | 36 | #define PPC_INST_NOP 0x60000000 |
37 | #define PPC_INST_POPCNTB 0x7c0000f4 | 37 | #define PPC_INST_POPCNTB 0x7c0000f4 |
38 | #define PPC_INST_POPCNTB_MASK 0xfc0007fe | 38 | #define PPC_INST_POPCNTB_MASK 0xfc0007fe |
39 | #define PPC_INST_POPCNTD 0x7c0003f4 | ||
40 | #define PPC_INST_POPCNTW 0x7c0002f4 | ||
39 | #define PPC_INST_RFCI 0x4c000066 | 41 | #define PPC_INST_RFCI 0x4c000066 |
40 | #define PPC_INST_RFDI 0x4c00004e | 42 | #define PPC_INST_RFDI 0x4c00004e |
41 | #define PPC_INST_RFMCI 0x4c00004c | 43 | #define PPC_INST_RFMCI 0x4c00004c |
44 | #define PPC_INST_MFSPR_DSCR 0x7c1102a6 | ||
45 | #define PPC_INST_MFSPR_DSCR_MASK 0xfc1fffff | ||
46 | #define PPC_INST_MTSPR_DSCR 0x7c1103a6 | ||
47 | #define PPC_INST_MTSPR_DSCR_MASK 0xfc1fffff | ||
42 | 48 | ||
43 | #define PPC_INST_STRING 0x7c00042a | 49 | #define PPC_INST_STRING 0x7c00042a |
44 | #define PPC_INST_STRING_MASK 0xfc0007fe | 50 | #define PPC_INST_STRING_MASK 0xfc0007fe |
@@ -54,6 +60,17 @@ | |||
54 | #define PPC_INST_TLBSRX_DOT 0x7c0006a5 | 60 | #define PPC_INST_TLBSRX_DOT 0x7c0006a5 |
55 | #define PPC_INST_XXLOR 0xf0000510 | 61 | #define PPC_INST_XXLOR 0xf0000510 |
56 | 62 | ||
63 | #define PPC_INST_NAP 0x4c000364 | ||
64 | #define PPC_INST_SLEEP 0x4c0003a4 | ||
65 | |||
66 | /* A2 specific instructions */ | ||
67 | #define PPC_INST_ERATWE 0x7c0001a6 | ||
68 | #define PPC_INST_ERATRE 0x7c000166 | ||
69 | #define PPC_INST_ERATILX 0x7c000066 | ||
70 | #define PPC_INST_ERATIVAX 0x7c000666 | ||
71 | #define PPC_INST_ERATSX 0x7c000126 | ||
72 | #define PPC_INST_ERATSX_DOT 0x7c000127 | ||
73 | |||
57 | /* macros to insert fields into opcodes */ | 74 | /* macros to insert fields into opcodes */ |
58 | #define __PPC_RA(a) (((a) & 0x1f) << 16) | 75 | #define __PPC_RA(a) (((a) & 0x1f) << 16) |
59 | #define __PPC_RB(b) (((b) & 0x1f) << 11) | 76 | #define __PPC_RB(b) (((b) & 0x1f) << 11) |
@@ -65,6 +82,8 @@ | |||
65 | #define __PPC_XT(s) __PPC_XS(s) | 82 | #define __PPC_XT(s) __PPC_XS(s) |
66 | #define __PPC_T_TLB(t) (((t) & 0x3) << 21) | 83 | #define __PPC_T_TLB(t) (((t) & 0x3) << 21) |
67 | #define __PPC_WC(w) (((w) & 0x3) << 21) | 84 | #define __PPC_WC(w) (((w) & 0x3) << 21) |
85 | #define __PPC_WS(w) (((w) & 0x1f) << 11) | ||
86 | |||
68 | /* | 87 | /* |
69 | * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a | 88 | * Only use the larx hint bit on 64bit CPUs. e500v1/v2 based CPUs will treat a |
70 | * larx with EH set as an illegal instruction. | 89 | * larx with EH set as an illegal instruction. |
@@ -88,6 +107,12 @@ | |||
88 | __PPC_RB(b) | __PPC_EH(eh)) | 107 | __PPC_RB(b) | __PPC_EH(eh)) |
89 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ | 108 | #define PPC_MSGSND(b) stringify_in_c(.long PPC_INST_MSGSND | \ |
90 | __PPC_RB(b)) | 109 | __PPC_RB(b)) |
110 | #define PPC_POPCNTB(a, s) stringify_in_c(.long PPC_INST_POPCNTB | \ | ||
111 | __PPC_RA(a) | __PPC_RS(s)) | ||
112 | #define PPC_POPCNTD(a, s) stringify_in_c(.long PPC_INST_POPCNTD | \ | ||
113 | __PPC_RA(a) | __PPC_RS(s)) | ||
114 | #define PPC_POPCNTW(a, s) stringify_in_c(.long PPC_INST_POPCNTW | \ | ||
115 | __PPC_RA(a) | __PPC_RS(s)) | ||
91 | #define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) | 116 | #define PPC_RFCI stringify_in_c(.long PPC_INST_RFCI) |
92 | #define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) | 117 | #define PPC_RFDI stringify_in_c(.long PPC_INST_RFDI) |
93 | #define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) | 118 | #define PPC_RFMCI stringify_in_c(.long PPC_INST_RFMCI) |
@@ -105,6 +130,21 @@ | |||
105 | #define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \ | 130 | #define PPC_TLBIVAX(a,b) stringify_in_c(.long PPC_INST_TLBIVAX | \ |
106 | __PPC_RA(a) | __PPC_RB(b)) | 131 | __PPC_RA(a) | __PPC_RB(b)) |
107 | 132 | ||
133 | #define PPC_ERATWE(s, a, w) stringify_in_c(.long PPC_INST_ERATWE | \ | ||
134 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) | ||
135 | #define PPC_ERATRE(s, a, w) stringify_in_c(.long PPC_INST_ERATRE | \ | ||
136 | __PPC_RS(s) | __PPC_RA(a) | __PPC_WS(w)) | ||
137 | #define PPC_ERATILX(t, a, b) stringify_in_c(.long PPC_INST_ERATILX | \ | ||
138 | __PPC_T_TLB(t) | __PPC_RA(a) | \ | ||
139 | __PPC_RB(b)) | ||
140 | #define PPC_ERATIVAX(s, a, b) stringify_in_c(.long PPC_INST_ERATIVAX | \ | ||
141 | __PPC_RS(s) | __PPC_RA(a) | __PPC_RB(b)) | ||
142 | #define PPC_ERATSX(t, a, w) stringify_in_c(.long PPC_INST_ERATSX | \ | ||
143 | __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) | ||
144 | #define PPC_ERATSX_DOT(t, a, w) stringify_in_c(.long PPC_INST_ERATSX_DOT | \ | ||
145 | __PPC_RS(t) | __PPC_RA(a) | __PPC_RB(b)) | ||
146 | |||
147 | |||
108 | /* | 148 | /* |
109 | * Define what the VSX XX1 form instructions will look like, then add | 149 | * Define what the VSX XX1 form instructions will look like, then add |
110 | * the 128 bit load store instructions based on that. | 150 | * the 128 bit load store instructions based on that. |
@@ -118,4 +158,7 @@ | |||
118 | #define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ | 158 | #define XXLOR(t, a, b) stringify_in_c(.long PPC_INST_XXLOR | \ |
119 | VSX_XX3((t), (a), (b))) | 159 | VSX_XX3((t), (a), (b))) |
120 | 160 | ||
161 | #define PPC_NAP stringify_in_c(.long PPC_INST_NAP) | ||
162 | #define PPC_SLEEP stringify_in_c(.long PPC_INST_SLEEP) | ||
163 | |||
121 | #endif /* _ASM_POWERPC_PPC_OPCODE_H */ | 164 | #endif /* _ASM_POWERPC_PPC_OPCODE_H */ |
diff --git a/arch/powerpc/include/asm/ppc-pci.h b/arch/powerpc/include/asm/ppc-pci.h index 42fdff0e4b32..43268f15004e 100644 --- a/arch/powerpc/include/asm/ppc-pci.h +++ b/arch/powerpc/include/asm/ppc-pci.h | |||
@@ -28,8 +28,8 @@ extern void find_and_init_phbs(void); | |||
28 | extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */ | 28 | extern struct pci_dev *isa_bridge_pcidev; /* may be NULL if no ISA bus */ |
29 | 29 | ||
30 | /** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */ | 30 | /** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */ |
31 | #define BUID_HI(buid) ((buid) >> 32) | 31 | #define BUID_HI(buid) upper_32_bits(buid) |
32 | #define BUID_LO(buid) ((buid) & 0xffffffff) | 32 | #define BUID_LO(buid) lower_32_bits(buid) |
33 | 33 | ||
34 | /* PCI device_node operations */ | 34 | /* PCI device_node operations */ |
35 | struct device_node; | 35 | struct device_node; |
diff --git a/arch/powerpc/include/asm/ppc_asm.h b/arch/powerpc/include/asm/ppc_asm.h index 498fe09263d3..1b422381fc16 100644 --- a/arch/powerpc/include/asm/ppc_asm.h +++ b/arch/powerpc/include/asm/ppc_asm.h | |||
@@ -9,6 +9,7 @@ | |||
9 | #include <asm/asm-compat.h> | 9 | #include <asm/asm-compat.h> |
10 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
11 | #include <asm/ppc-opcode.h> | 11 | #include <asm/ppc-opcode.h> |
12 | #include <asm/firmware.h> | ||
12 | 13 | ||
13 | #ifndef __ASSEMBLY__ | 14 | #ifndef __ASSEMBLY__ |
14 | #error __FILE__ should only be used in assembler files | 15 | #error __FILE__ should only be used in assembler files |
@@ -26,17 +27,13 @@ | |||
26 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING | 27 | #ifndef CONFIG_VIRT_CPU_ACCOUNTING |
27 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) | 28 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) |
28 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) | 29 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) |
30 | #define ACCOUNT_STOLEN_TIME | ||
29 | #else | 31 | #else |
30 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) \ | 32 | #define ACCOUNT_CPU_USER_ENTRY(ra, rb) \ |
31 | beq 2f; /* if from kernel mode */ \ | 33 | beq 2f; /* if from kernel mode */ \ |
32 | BEGIN_FTR_SECTION; \ | 34 | MFTB(ra); /* get timebase */ \ |
33 | mfspr ra,SPRN_PURR; /* get processor util. reg */ \ | 35 | ld rb,PACA_STARTTIME_USER(r13); \ |
34 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | 36 | std ra,PACA_STARTTIME(r13); \ |
35 | BEGIN_FTR_SECTION; \ | ||
36 | MFTB(ra); /* or get TB if no PURR */ \ | ||
37 | END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | ||
38 | ld rb,PACA_STARTPURR(r13); \ | ||
39 | std ra,PACA_STARTPURR(r13); \ | ||
40 | subf rb,rb,ra; /* subtract start value */ \ | 37 | subf rb,rb,ra; /* subtract start value */ \ |
41 | ld ra,PACA_USER_TIME(r13); \ | 38 | ld ra,PACA_USER_TIME(r13); \ |
42 | add ra,ra,rb; /* add on to user time */ \ | 39 | add ra,ra,rb; /* add on to user time */ \ |
@@ -44,19 +41,34 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
44 | 2: | 41 | 2: |
45 | 42 | ||
46 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) \ | 43 | #define ACCOUNT_CPU_USER_EXIT(ra, rb) \ |
47 | BEGIN_FTR_SECTION; \ | 44 | MFTB(ra); /* get timebase */ \ |
48 | mfspr ra,SPRN_PURR; /* get processor util. reg */ \ | 45 | ld rb,PACA_STARTTIME(r13); \ |
49 | END_FTR_SECTION_IFSET(CPU_FTR_PURR); \ | 46 | std ra,PACA_STARTTIME_USER(r13); \ |
50 | BEGIN_FTR_SECTION; \ | ||
51 | MFTB(ra); /* or get TB if no PURR */ \ | ||
52 | END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | ||
53 | ld rb,PACA_STARTPURR(r13); \ | ||
54 | std ra,PACA_STARTPURR(r13); \ | ||
55 | subf rb,rb,ra; /* subtract start value */ \ | 47 | subf rb,rb,ra; /* subtract start value */ \ |
56 | ld ra,PACA_SYSTEM_TIME(r13); \ | 48 | ld ra,PACA_SYSTEM_TIME(r13); \ |
57 | add ra,ra,rb; /* add on to user time */ \ | 49 | add ra,ra,rb; /* add on to system time */ \ |
58 | std ra,PACA_SYSTEM_TIME(r13); | 50 | std ra,PACA_SYSTEM_TIME(r13) |
59 | #endif | 51 | |
52 | #ifdef CONFIG_PPC_SPLPAR | ||
53 | #define ACCOUNT_STOLEN_TIME \ | ||
54 | BEGIN_FW_FTR_SECTION; \ | ||
55 | beq 33f; \ | ||
56 | /* from user - see if there are any DTL entries to process */ \ | ||
57 | ld r10,PACALPPACAPTR(r13); /* get ptr to VPA */ \ | ||
58 | ld r11,PACA_DTL_RIDX(r13); /* get log read index */ \ | ||
59 | ld r10,LPPACA_DTLIDX(r10); /* get log write index */ \ | ||
60 | cmpd cr1,r11,r10; \ | ||
61 | beq+ cr1,33f; \ | ||
62 | bl .accumulate_stolen_time; \ | ||
63 | 33: \ | ||
64 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | ||
65 | |||
66 | #else /* CONFIG_PPC_SPLPAR */ | ||
67 | #define ACCOUNT_STOLEN_TIME | ||
68 | |||
69 | #endif /* CONFIG_PPC_SPLPAR */ | ||
70 | |||
71 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
60 | 72 | ||
61 | /* | 73 | /* |
62 | * Macros for storing registers into and loading registers from | 74 | * Macros for storing registers into and loading registers from |
@@ -158,6 +170,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_PURR); \ | |||
158 | #define HMT_MEDIUM or 2,2,2 | 170 | #define HMT_MEDIUM or 2,2,2 |
159 | #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority | 171 | #define HMT_MEDIUM_HIGH or 5,5,5 # medium high priority |
160 | #define HMT_HIGH or 3,3,3 | 172 | #define HMT_HIGH or 3,3,3 |
173 | #define HMT_EXTRA_HIGH or 7,7,7 # power7 only | ||
161 | 174 | ||
162 | #ifdef __KERNEL__ | 175 | #ifdef __KERNEL__ |
163 | #ifdef CONFIG_PPC64 | 176 | #ifdef CONFIG_PPC64 |
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h index 19c05b0f74be..d50c2b6d9bc3 100644 --- a/arch/powerpc/include/asm/processor.h +++ b/arch/powerpc/include/asm/processor.h | |||
@@ -118,17 +118,16 @@ extern struct task_struct *last_task_used_spe; | |||
118 | #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) | 118 | #define TASK_UNMAPPED_BASE_USER32 (PAGE_ALIGN(TASK_SIZE_USER32 / 4)) |
119 | #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4)) | 119 | #define TASK_UNMAPPED_BASE_USER64 (PAGE_ALIGN(TASK_SIZE_USER64 / 4)) |
120 | 120 | ||
121 | #define TASK_UNMAPPED_BASE ((test_thread_flag(TIF_32BIT)) ? \ | 121 | #define TASK_UNMAPPED_BASE ((is_32bit_task()) ? \ |
122 | TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) | 122 | TASK_UNMAPPED_BASE_USER32 : TASK_UNMAPPED_BASE_USER64 ) |
123 | #endif | 123 | #endif |
124 | 124 | ||
125 | #ifdef __KERNEL__ | ||
126 | #ifdef __powerpc64__ | 125 | #ifdef __powerpc64__ |
127 | 126 | ||
128 | #define STACK_TOP_USER64 TASK_SIZE_USER64 | 127 | #define STACK_TOP_USER64 TASK_SIZE_USER64 |
129 | #define STACK_TOP_USER32 TASK_SIZE_USER32 | 128 | #define STACK_TOP_USER32 TASK_SIZE_USER32 |
130 | 129 | ||
131 | #define STACK_TOP (test_thread_flag(TIF_32BIT) ? \ | 130 | #define STACK_TOP (is_32bit_task() ? \ |
132 | STACK_TOP_USER32 : STACK_TOP_USER64) | 131 | STACK_TOP_USER32 : STACK_TOP_USER64) |
133 | 132 | ||
134 | #define STACK_TOP_MAX STACK_TOP_USER64 | 133 | #define STACK_TOP_MAX STACK_TOP_USER64 |
@@ -139,7 +138,6 @@ extern struct task_struct *last_task_used_spe; | |||
139 | #define STACK_TOP_MAX STACK_TOP | 138 | #define STACK_TOP_MAX STACK_TOP |
140 | 139 | ||
141 | #endif /* __powerpc64__ */ | 140 | #endif /* __powerpc64__ */ |
142 | #endif /* __KERNEL__ */ | ||
143 | 141 | ||
144 | typedef struct { | 142 | typedef struct { |
145 | unsigned long seg; | 143 | unsigned long seg; |
@@ -240,6 +238,10 @@ struct thread_struct { | |||
240 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER | 238 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
241 | void* kvm_shadow_vcpu; /* KVM internal data */ | 239 | void* kvm_shadow_vcpu; /* KVM internal data */ |
242 | #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ | 240 | #endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ |
241 | #ifdef CONFIG_PPC64 | ||
242 | unsigned long dscr; | ||
243 | int dscr_inherit; | ||
244 | #endif | ||
243 | }; | 245 | }; |
244 | 246 | ||
245 | #define ARCH_MIN_TASKALIGN 16 | 247 | #define ARCH_MIN_TASKALIGN 16 |
diff --git a/arch/powerpc/include/asm/prom.h b/arch/powerpc/include/asm/prom.h index ae26f2efd089..c189aa5fe1f4 100644 --- a/arch/powerpc/include/asm/prom.h +++ b/arch/powerpc/include/asm/prom.h | |||
@@ -42,7 +42,7 @@ extern void pci_create_OF_bus_map(void); | |||
42 | 42 | ||
43 | /* Translate a DMA address from device space to CPU space */ | 43 | /* Translate a DMA address from device space to CPU space */ |
44 | extern u64 of_translate_dma_address(struct device_node *dev, | 44 | extern u64 of_translate_dma_address(struct device_node *dev, |
45 | const u32 *in_addr); | 45 | const __be32 *in_addr); |
46 | 46 | ||
47 | #ifdef CONFIG_PCI | 47 | #ifdef CONFIG_PCI |
48 | extern unsigned long pci_address_to_pio(phys_addr_t address); | 48 | extern unsigned long pci_address_to_pio(phys_addr_t address); |
@@ -63,9 +63,6 @@ struct device_node *of_get_cpu_node(int cpu, unsigned int *thread); | |||
63 | /* cache lookup */ | 63 | /* cache lookup */ |
64 | struct device_node *of_find_next_cache_node(struct device_node *np); | 64 | struct device_node *of_find_next_cache_node(struct device_node *np); |
65 | 65 | ||
66 | /* Get the MAC address */ | ||
67 | extern const void *of_get_mac_address(struct device_node *np); | ||
68 | |||
69 | #ifdef CONFIG_NUMA | 66 | #ifdef CONFIG_NUMA |
70 | extern int of_node_to_nid(struct device_node *device); | 67 | extern int of_node_to_nid(struct device_node *device); |
71 | #else | 68 | #else |
@@ -73,21 +70,6 @@ static inline int of_node_to_nid(struct device_node *device) { return 0; } | |||
73 | #endif | 70 | #endif |
74 | #define of_node_to_nid of_node_to_nid | 71 | #define of_node_to_nid of_node_to_nid |
75 | 72 | ||
76 | /** | ||
77 | * of_irq_map_pci - Resolve the interrupt for a PCI device | ||
78 | * @pdev: the device whose interrupt is to be resolved | ||
79 | * @out_irq: structure of_irq filled by this function | ||
80 | * | ||
81 | * This function resolves the PCI interrupt for a given PCI device. If a | ||
82 | * device-node exists for a given pci_dev, it will use normal OF tree | ||
83 | * walking. If not, it will implement standard swizzling and walk up the | ||
84 | * PCI tree until an device-node is found, at which point it will finish | ||
85 | * resolving using the OF tree walking. | ||
86 | */ | ||
87 | struct pci_dev; | ||
88 | struct of_irq; | ||
89 | extern int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); | ||
90 | |||
91 | extern void of_instantiate_rtc(void); | 73 | extern void of_instantiate_rtc(void); |
92 | 74 | ||
93 | /* These includes are put at the bottom because they may contain things | 75 | /* These includes are put at the bottom because they may contain things |
diff --git a/arch/powerpc/include/asm/pte-common.h b/arch/powerpc/include/asm/pte-common.h index f2b370180a09..8d1569c29042 100644 --- a/arch/powerpc/include/asm/pte-common.h +++ b/arch/powerpc/include/asm/pte-common.h | |||
@@ -86,7 +86,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |||
86 | #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) | 86 | #define PTE_RPN_MASK (~((1UL<<PTE_RPN_SHIFT)-1)) |
87 | #endif | 87 | #endif |
88 | 88 | ||
89 | /* _PAGE_CHG_MASK masks of bits that are to be preserved accross | 89 | /* _PAGE_CHG_MASK masks of bits that are to be preserved across |
90 | * pgprot changes | 90 | * pgprot changes |
91 | */ | 91 | */ |
92 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ | 92 | #define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_HPTEFLAGS | _PAGE_DIRTY | \ |
@@ -162,7 +162,7 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |||
162 | * on platforms where such control is possible. | 162 | * on platforms where such control is possible. |
163 | */ | 163 | */ |
164 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ | 164 | #if defined(CONFIG_KGDB) || defined(CONFIG_XMON) || defined(CONFIG_BDI_SWITCH) ||\ |
165 | defined(CONFIG_KPROBES) | 165 | defined(CONFIG_KPROBES) || defined(CONFIG_DYNAMIC_FTRACE) |
166 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X | 166 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_X |
167 | #else | 167 | #else |
168 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX | 168 | #define PAGE_KERNEL_TEXT PAGE_KERNEL_ROX |
@@ -171,6 +171,13 @@ extern unsigned long bad_call_to_PMD_PAGE_SIZE(void); | |||
171 | /* Make modules code happy. We don't set RO yet */ | 171 | /* Make modules code happy. We don't set RO yet */ |
172 | #define PAGE_KERNEL_EXEC PAGE_KERNEL_X | 172 | #define PAGE_KERNEL_EXEC PAGE_KERNEL_X |
173 | 173 | ||
174 | /* | ||
175 | * Don't just check for any non zero bits in __PAGE_USER, since for book3e | ||
176 | * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in | ||
177 | * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. | ||
178 | */ | ||
179 | #define pte_user(val) ((val & _PAGE_USER) == _PAGE_USER) | ||
180 | |||
174 | /* Advertise special mapping type for AGP */ | 181 | /* Advertise special mapping type for AGP */ |
175 | #define PAGE_AGP (PAGE_KERNEL_NC) | 182 | #define PAGE_AGP (PAGE_KERNEL_NC) |
176 | #define HAVE_PAGE_AGP | 183 | #define HAVE_PAGE_AGP |
diff --git a/arch/powerpc/include/asm/pte-hash64-64k.h b/arch/powerpc/include/asm/pte-hash64-64k.h index c4490f9c67c4..59247e816ac5 100644 --- a/arch/powerpc/include/asm/pte-hash64-64k.h +++ b/arch/powerpc/include/asm/pte-hash64-64k.h | |||
@@ -22,7 +22,7 @@ | |||
22 | #define _PAGE_HASHPTE _PAGE_HPTE_SUB | 22 | #define _PAGE_HASHPTE _PAGE_HPTE_SUB |
23 | 23 | ||
24 | /* Note the full page bits must be in the same location as for normal | 24 | /* Note the full page bits must be in the same location as for normal |
25 | * 4k pages as the same asssembly will be used to insert 64K pages | 25 | * 4k pages as the same assembly will be used to insert 64K pages |
26 | * wether the kernel has CONFIG_PPC_64K_PAGES or not | 26 | * wether the kernel has CONFIG_PPC_64K_PAGES or not |
27 | */ | 27 | */ |
28 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ | 28 | #define _PAGE_F_SECOND 0x00008000 /* full page: hidx bits */ |
diff --git a/arch/powerpc/include/asm/ptrace.h b/arch/powerpc/include/asm/ptrace.h index 0175a676b34b..48223f9b8728 100644 --- a/arch/powerpc/include/asm/ptrace.h +++ b/arch/powerpc/include/asm/ptrace.h | |||
@@ -125,8 +125,10 @@ extern int ptrace_put_reg(struct task_struct *task, int regno, | |||
125 | #endif /* ! __powerpc64__ */ | 125 | #endif /* ! __powerpc64__ */ |
126 | #define TRAP(regs) ((regs)->trap & ~0xF) | 126 | #define TRAP(regs) ((regs)->trap & ~0xF) |
127 | #ifdef __powerpc64__ | 127 | #ifdef __powerpc64__ |
128 | #define NV_REG_POISON 0xdeadbeefdeadbeefUL | ||
128 | #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) | 129 | #define CHECK_FULL_REGS(regs) BUG_ON(regs->trap & 1) |
129 | #else | 130 | #else |
131 | #define NV_REG_POISON 0xdeadbeef | ||
130 | #define CHECK_FULL_REGS(regs) \ | 132 | #define CHECK_FULL_REGS(regs) \ |
131 | do { \ | 133 | do { \ |
132 | if ((regs)->trap & 1) \ | 134 | if ((regs)->trap & 1) \ |
diff --git a/arch/powerpc/include/asm/qe_ic.h b/arch/powerpc/include/asm/qe_ic.h index cf519663a791..f706164b0bd0 100644 --- a/arch/powerpc/include/asm/qe_ic.h +++ b/arch/powerpc/include/asm/qe_ic.h | |||
@@ -81,7 +81,7 @@ int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high); | |||
81 | static inline void qe_ic_cascade_low_ipic(unsigned int irq, | 81 | static inline void qe_ic_cascade_low_ipic(unsigned int irq, |
82 | struct irq_desc *desc) | 82 | struct irq_desc *desc) |
83 | { | 83 | { |
84 | struct qe_ic *qe_ic = desc->handler_data; | 84 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
85 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | 85 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); |
86 | 86 | ||
87 | if (cascade_irq != NO_IRQ) | 87 | if (cascade_irq != NO_IRQ) |
@@ -91,7 +91,7 @@ static inline void qe_ic_cascade_low_ipic(unsigned int irq, | |||
91 | static inline void qe_ic_cascade_high_ipic(unsigned int irq, | 91 | static inline void qe_ic_cascade_high_ipic(unsigned int irq, |
92 | struct irq_desc *desc) | 92 | struct irq_desc *desc) |
93 | { | 93 | { |
94 | struct qe_ic *qe_ic = desc->handler_data; | 94 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
95 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | 95 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); |
96 | 96 | ||
97 | if (cascade_irq != NO_IRQ) | 97 | if (cascade_irq != NO_IRQ) |
@@ -101,32 +101,35 @@ static inline void qe_ic_cascade_high_ipic(unsigned int irq, | |||
101 | static inline void qe_ic_cascade_low_mpic(unsigned int irq, | 101 | static inline void qe_ic_cascade_low_mpic(unsigned int irq, |
102 | struct irq_desc *desc) | 102 | struct irq_desc *desc) |
103 | { | 103 | { |
104 | struct qe_ic *qe_ic = desc->handler_data; | 104 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
105 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); | 105 | unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic); |
106 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
106 | 107 | ||
107 | if (cascade_irq != NO_IRQ) | 108 | if (cascade_irq != NO_IRQ) |
108 | generic_handle_irq(cascade_irq); | 109 | generic_handle_irq(cascade_irq); |
109 | 110 | ||
110 | desc->chip->eoi(irq); | 111 | chip->irq_eoi(&desc->irq_data); |
111 | } | 112 | } |
112 | 113 | ||
113 | static inline void qe_ic_cascade_high_mpic(unsigned int irq, | 114 | static inline void qe_ic_cascade_high_mpic(unsigned int irq, |
114 | struct irq_desc *desc) | 115 | struct irq_desc *desc) |
115 | { | 116 | { |
116 | struct qe_ic *qe_ic = desc->handler_data; | 117 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
117 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); | 118 | unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic); |
119 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
118 | 120 | ||
119 | if (cascade_irq != NO_IRQ) | 121 | if (cascade_irq != NO_IRQ) |
120 | generic_handle_irq(cascade_irq); | 122 | generic_handle_irq(cascade_irq); |
121 | 123 | ||
122 | desc->chip->eoi(irq); | 124 | chip->irq_eoi(&desc->irq_data); |
123 | } | 125 | } |
124 | 126 | ||
125 | static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, | 127 | static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, |
126 | struct irq_desc *desc) | 128 | struct irq_desc *desc) |
127 | { | 129 | { |
128 | struct qe_ic *qe_ic = desc->handler_data; | 130 | struct qe_ic *qe_ic = irq_desc_get_handler_data(desc); |
129 | unsigned int cascade_irq; | 131 | unsigned int cascade_irq; |
132 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
130 | 133 | ||
131 | cascade_irq = qe_ic_get_high_irq(qe_ic); | 134 | cascade_irq = qe_ic_get_high_irq(qe_ic); |
132 | if (cascade_irq == NO_IRQ) | 135 | if (cascade_irq == NO_IRQ) |
@@ -135,7 +138,7 @@ static inline void qe_ic_cascade_muxed_mpic(unsigned int irq, | |||
135 | if (cascade_irq != NO_IRQ) | 138 | if (cascade_irq != NO_IRQ) |
136 | generic_handle_irq(cascade_irq); | 139 | generic_handle_irq(cascade_irq); |
137 | 140 | ||
138 | desc->chip->eoi(irq); | 141 | chip->irq_eoi(&desc->irq_data); |
139 | } | 142 | } |
140 | 143 | ||
141 | #endif /* _ASM_POWERPC_QE_IC_H */ | 144 | #endif /* _ASM_POWERPC_QE_IC_H */ |
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h index ff0005eec7dd..c5cae0dd176c 100644 --- a/arch/powerpc/include/asm/reg.h +++ b/arch/powerpc/include/asm/reg.h | |||
@@ -99,17 +99,23 @@ | |||
99 | #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ | 99 | #define MSR_LE __MASK(MSR_LE_LG) /* Little Endian */ |
100 | 100 | ||
101 | #if defined(CONFIG_PPC_BOOK3S_64) | 101 | #if defined(CONFIG_PPC_BOOK3S_64) |
102 | #define MSR_64BIT MSR_SF | ||
103 | |||
102 | /* Server variant */ | 104 | /* Server variant */ |
103 | #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV | 105 | #define MSR_ MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_ISF |MSR_HV |
104 | #define MSR_KERNEL MSR_ | MSR_SF | 106 | #define MSR_KERNEL MSR_ | MSR_64BIT |
105 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE | 107 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE |
106 | #define MSR_USER64 MSR_USER32 | MSR_SF | 108 | #define MSR_USER64 MSR_USER32 | MSR_64BIT |
107 | #elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) | 109 | #elif defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_8xx) |
108 | /* Default MSR for kernel mode. */ | 110 | /* Default MSR for kernel mode. */ |
109 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) | 111 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR) |
110 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) | 112 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) |
111 | #endif | 113 | #endif |
112 | 114 | ||
115 | #ifndef MSR_64BIT | ||
116 | #define MSR_64BIT 0 | ||
117 | #endif | ||
118 | |||
113 | /* Floating Point Status and Control Register (FPSCR) Fields */ | 119 | /* Floating Point Status and Control Register (FPSCR) Fields */ |
114 | #define FPSCR_FX 0x80000000 /* FPU exception summary */ | 120 | #define FPSCR_FX 0x80000000 /* FPU exception summary */ |
115 | #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ | 121 | #define FPSCR_FEX 0x40000000 /* FPU enabled exception summary */ |
@@ -170,8 +176,20 @@ | |||
170 | #define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */ | 176 | #define SPEFSCR_FRMC 0x00000003 /* Embedded FP rounding mode control */ |
171 | 177 | ||
172 | /* Special Purpose Registers (SPRNs)*/ | 178 | /* Special Purpose Registers (SPRNs)*/ |
179 | |||
180 | #ifdef CONFIG_40x | ||
181 | #define SPRN_PID 0x3B1 /* Process ID */ | ||
182 | #else | ||
183 | #define SPRN_PID 0x030 /* Process ID */ | ||
184 | #ifdef CONFIG_BOOKE | ||
185 | #define SPRN_PID0 SPRN_PID/* Process ID Register 0 */ | ||
186 | #endif | ||
187 | #endif | ||
188 | |||
173 | #define SPRN_CTR 0x009 /* Count Register */ | 189 | #define SPRN_CTR 0x009 /* Count Register */ |
174 | #define SPRN_DSCR 0x11 | 190 | #define SPRN_DSCR 0x11 |
191 | #define SPRN_CFAR 0x1c /* Come From Address Register */ | ||
192 | #define SPRN_ACOP 0x1F /* Available Coprocessor Register */ | ||
175 | #define SPRN_CTRLF 0x088 | 193 | #define SPRN_CTRLF 0x088 |
176 | #define SPRN_CTRLT 0x098 | 194 | #define SPRN_CTRLT 0x098 |
177 | #define CTRL_CT 0xc0000000 /* current thread */ | 195 | #define CTRL_CT 0xc0000000 /* current thread */ |
@@ -200,8 +218,43 @@ | |||
200 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ | 218 | #define SPRN_TBWL 0x11C /* Time Base Lower Register (super, R/W) */ |
201 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ | 219 | #define SPRN_TBWU 0x11D /* Time Base Upper Register (super, R/W) */ |
202 | #define SPRN_SPURR 0x134 /* Scaled PURR */ | 220 | #define SPRN_SPURR 0x134 /* Scaled PURR */ |
221 | #define SPRN_HSPRG0 0x130 /* Hypervisor Scratch 0 */ | ||
222 | #define SPRN_HSPRG1 0x131 /* Hypervisor Scratch 1 */ | ||
223 | #define SPRN_HDSISR 0x132 | ||
224 | #define SPRN_HDAR 0x133 | ||
225 | #define SPRN_HDEC 0x136 /* Hypervisor Decrementer */ | ||
203 | #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ | 226 | #define SPRN_HIOR 0x137 /* 970 Hypervisor interrupt offset */ |
227 | #define SPRN_RMOR 0x138 /* Real mode offset register */ | ||
228 | #define SPRN_HRMOR 0x139 /* Real mode offset register */ | ||
229 | #define SPRN_HSRR0 0x13A /* Hypervisor Save/Restore 0 */ | ||
230 | #define SPRN_HSRR1 0x13B /* Hypervisor Save/Restore 1 */ | ||
204 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ | 231 | #define SPRN_LPCR 0x13E /* LPAR Control Register */ |
232 | #define LPCR_VPM0 (1ul << (63-0)) | ||
233 | #define LPCR_VPM1 (1ul << (63-1)) | ||
234 | #define LPCR_ISL (1ul << (63-2)) | ||
235 | #define LPCR_DPFD_SH (63-11) | ||
236 | #define LPCR_VRMA_L (1ul << (63-12)) | ||
237 | #define LPCR_VRMA_LP0 (1ul << (63-15)) | ||
238 | #define LPCR_VRMA_LP1 (1ul << (63-16)) | ||
239 | #define LPCR_RMLS 0x1C000000 /* impl dependent rmo limit sel */ | ||
240 | #define LPCR_ILE 0x02000000 /* !HV irqs set MSR:LE */ | ||
241 | #define LPCR_PECE 0x00007000 /* powersave exit cause enable */ | ||
242 | #define LPCR_PECE0 0x00004000 /* ext. exceptions can cause exit */ | ||
243 | #define LPCR_PECE1 0x00002000 /* decrementer can cause exit */ | ||
244 | #define LPCR_PECE2 0x00001000 /* machine check etc can cause exit */ | ||
245 | #define LPCR_MER 0x00000800 /* Mediated External Exception */ | ||
246 | #define LPCR_LPES0 0x00000008 /* LPAR Env selector 0 */ | ||
247 | #define LPCR_LPES1 0x00000004 /* LPAR Env selector 1 */ | ||
248 | #define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ | ||
249 | #define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ | ||
250 | #define SPRN_LPID 0x13F /* Logical Partition Identifier */ | ||
251 | #define SPRN_HMER 0x150 /* Hardware m? error recovery */ | ||
252 | #define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ | ||
253 | #define SPRN_HEIR 0x153 /* Hypervisor Emulated Instruction Register */ | ||
254 | #define SPRN_TLBINDEXR 0x154 /* P7 TLB control register */ | ||
255 | #define SPRN_TLBVPNR 0x155 /* P7 TLB control register */ | ||
256 | #define SPRN_TLBRPNR 0x156 /* P7 TLB control register */ | ||
257 | #define SPRN_TLBLPIDR 0x157 /* P7 TLB control register */ | ||
205 | #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ | 258 | #define SPRN_DBAT0L 0x219 /* Data BAT 0 Lower Register */ |
206 | #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ | 259 | #define SPRN_DBAT0U 0x218 /* Data BAT 0 Upper Register */ |
207 | #define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ | 260 | #define SPRN_DBAT1L 0x21B /* Data BAT 1 Lower Register */ |
@@ -283,6 +336,7 @@ | |||
283 | #define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */ | 336 | #define HID0_NOPTI (1<<0) /* No-op dcbt and dcbst instr. */ |
284 | 337 | ||
285 | #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ | 338 | #define SPRN_HID1 0x3F1 /* Hardware Implementation Register 1 */ |
339 | #ifdef CONFIG_6xx | ||
286 | #define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */ | 340 | #define HID1_EMCP (1<<31) /* 7450 Machine Check Pin Enable */ |
287 | #define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */ | 341 | #define HID1_DFS (1<<22) /* 7447A Dynamic Frequency Scaling */ |
288 | #define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */ | 342 | #define HID1_PC0 (1<<16) /* 7450 PLL_CFG[0] */ |
@@ -292,6 +346,7 @@ | |||
292 | #define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */ | 346 | #define HID1_SYNCBE (1<<11) /* 7450 ABE for sync, eieio */ |
293 | #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ | 347 | #define HID1_ABE (1<<10) /* 7450 Address Broadcast Enable */ |
294 | #define HID1_PS (1<<16) /* 750FX PLL selection */ | 348 | #define HID1_PS (1<<16) /* 750FX PLL selection */ |
349 | #endif | ||
295 | #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ | 350 | #define SPRN_HID2 0x3F8 /* Hardware Implementation Register 2 */ |
296 | #define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */ | 351 | #define SPRN_HID2_GEKKO 0x398 /* Gekko HID2 Register */ |
297 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ | 352 | #define SPRN_IABR 0x3F2 /* Instruction Address Breakpoint Register */ |
@@ -422,16 +477,23 @@ | |||
422 | #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ | 477 | #define SPRN_SRR0 0x01A /* Save/Restore Register 0 */ |
423 | #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ | 478 | #define SPRN_SRR1 0x01B /* Save/Restore Register 1 */ |
424 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ | 479 | #define SRR1_WAKEMASK 0x00380000 /* reason for wakeup */ |
425 | #define SRR1_WAKERESET 0x00380000 /* System reset */ | ||
426 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ | 480 | #define SRR1_WAKESYSERR 0x00300000 /* System error */ |
427 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ | 481 | #define SRR1_WAKEEE 0x00200000 /* External interrupt */ |
428 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ | 482 | #define SRR1_WAKEMT 0x00280000 /* mtctrl */ |
483 | #define SRR1_WAKEHMI 0x00280000 /* Hypervisor maintenance */ | ||
429 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ | 484 | #define SRR1_WAKEDEC 0x00180000 /* Decrementer interrupt */ |
430 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ | 485 | #define SRR1_WAKETHERM 0x00100000 /* Thermal management interrupt */ |
486 | #define SRR1_WAKERESET 0x00100000 /* System reset */ | ||
487 | #define SRR1_WAKESTATE 0x00030000 /* Powersave exit mask [46:47] */ | ||
488 | #define SRR1_WS_DEEPEST 0x00030000 /* Some resources not maintained, | ||
489 | * may not be recoverable */ | ||
490 | #define SRR1_WS_DEEPER 0x00020000 /* Some resources not maintained */ | ||
491 | #define SRR1_WS_DEEP 0x00010000 /* All resources maintained */ | ||
431 | #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ | 492 | #define SRR1_PROGFPE 0x00100000 /* Floating Point Enabled */ |
432 | #define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ | 493 | #define SRR1_PROGPRIV 0x00040000 /* Privileged instruction */ |
433 | #define SRR1_PROGTRAP 0x00020000 /* Trap */ | 494 | #define SRR1_PROGTRAP 0x00020000 /* Trap */ |
434 | #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ | 495 | #define SRR1_PROGADDR 0x00010000 /* SRR0 contains subsequent addr */ |
496 | |||
435 | #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ | 497 | #define SPRN_HSRR0 0x13A /* Save/Restore Register 0 */ |
436 | #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ | 498 | #define SPRN_HSRR1 0x13B /* Save/Restore Register 1 */ |
437 | 499 | ||
@@ -661,12 +723,15 @@ | |||
661 | * SPRG usage: | 723 | * SPRG usage: |
662 | * | 724 | * |
663 | * All 64-bit: | 725 | * All 64-bit: |
664 | * - SPRG1 stores PACA pointer | 726 | * - SPRG1 stores PACA pointer except 64-bit server in |
727 | * HV mode in which case it is HSPRG0 | ||
665 | * | 728 | * |
666 | * 64-bit server: | 729 | * 64-bit server: |
667 | * - SPRG0 unused (reserved for HV on Power4) | 730 | * - SPRG0 unused (reserved for HV on Power4) |
668 | * - SPRG2 scratch for exception vectors | 731 | * - SPRG2 scratch for exception vectors |
669 | * - SPRG3 unused (user visible) | 732 | * - SPRG3 unused (user visible) |
733 | * - HSPRG0 stores PACA in HV mode | ||
734 | * - HSPRG1 scratch for "HV" exceptions | ||
670 | * | 735 | * |
671 | * 64-bit embedded | 736 | * 64-bit embedded |
672 | * - SPRG0 generic exception scratch | 737 | * - SPRG0 generic exception scratch |
@@ -729,6 +794,41 @@ | |||
729 | 794 | ||
730 | #ifdef CONFIG_PPC_BOOK3S_64 | 795 | #ifdef CONFIG_PPC_BOOK3S_64 |
731 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG2 | 796 | #define SPRN_SPRG_SCRATCH0 SPRN_SPRG2 |
797 | #define SPRN_SPRG_HPACA SPRN_HSPRG0 | ||
798 | #define SPRN_SPRG_HSCRATCH0 SPRN_HSPRG1 | ||
799 | |||
800 | #define GET_PACA(rX) \ | ||
801 | BEGIN_FTR_SECTION_NESTED(66); \ | ||
802 | mfspr rX,SPRN_SPRG_PACA; \ | ||
803 | FTR_SECTION_ELSE_NESTED(66); \ | ||
804 | mfspr rX,SPRN_SPRG_HPACA; \ | ||
805 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | ||
806 | |||
807 | #define SET_PACA(rX) \ | ||
808 | BEGIN_FTR_SECTION_NESTED(66); \ | ||
809 | mtspr SPRN_SPRG_PACA,rX; \ | ||
810 | FTR_SECTION_ELSE_NESTED(66); \ | ||
811 | mtspr SPRN_SPRG_HPACA,rX; \ | ||
812 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | ||
813 | |||
814 | #define GET_SCRATCH0(rX) \ | ||
815 | BEGIN_FTR_SECTION_NESTED(66); \ | ||
816 | mfspr rX,SPRN_SPRG_SCRATCH0; \ | ||
817 | FTR_SECTION_ELSE_NESTED(66); \ | ||
818 | mfspr rX,SPRN_SPRG_HSCRATCH0; \ | ||
819 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | ||
820 | |||
821 | #define SET_SCRATCH0(rX) \ | ||
822 | BEGIN_FTR_SECTION_NESTED(66); \ | ||
823 | mtspr SPRN_SPRG_SCRATCH0,rX; \ | ||
824 | FTR_SECTION_ELSE_NESTED(66); \ | ||
825 | mtspr SPRN_SPRG_HSCRATCH0,rX; \ | ||
826 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_HVMODE_206, 66) | ||
827 | |||
828 | #else /* CONFIG_PPC_BOOK3S_64 */ | ||
829 | #define GET_SCRATCH0(rX) mfspr rX,SPRN_SPRG_SCRATCH0 | ||
830 | #define SET_SCRATCH0(rX) mtspr SPRN_SPRG_SCRATCH0,rX | ||
831 | |||
732 | #endif | 832 | #endif |
733 | 833 | ||
734 | #ifdef CONFIG_PPC_BOOK3E_64 | 834 | #ifdef CONFIG_PPC_BOOK3E_64 |
@@ -738,6 +838,10 @@ | |||
738 | #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 | 838 | #define SPRN_SPRG_TLB_EXFRAME SPRN_SPRG2 |
739 | #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 | 839 | #define SPRN_SPRG_TLB_SCRATCH SPRN_SPRG6 |
740 | #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 | 840 | #define SPRN_SPRG_GEN_SCRATCH SPRN_SPRG0 |
841 | |||
842 | #define SET_PACA(rX) mtspr SPRN_SPRG_PACA,rX | ||
843 | #define GET_PACA(rX) mfspr rX,SPRN_SPRG_PACA | ||
844 | |||
741 | #endif | 845 | #endif |
742 | 846 | ||
743 | #ifdef CONFIG_PPC_BOOK3S_32 | 847 | #ifdef CONFIG_PPC_BOOK3S_32 |
@@ -788,6 +892,8 @@ | |||
788 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 | 892 | #define SPRN_SPRG_SCRATCH1 SPRN_SPRG1 |
789 | #endif | 893 | #endif |
790 | 894 | ||
895 | |||
896 | |||
791 | /* | 897 | /* |
792 | * An mtfsf instruction with the L bit set. On CPUs that support this a | 898 | * An mtfsf instruction with the L bit set. On CPUs that support this a |
793 | * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored. | 899 | * full 64bits of FPSCR is restored and on other CPUs the L bit is ignored. |
@@ -850,6 +956,8 @@ | |||
850 | #define PVR_7450 0x80000000 | 956 | #define PVR_7450 0x80000000 |
851 | #define PVR_8540 0x80200000 | 957 | #define PVR_8540 0x80200000 |
852 | #define PVR_8560 0x80200000 | 958 | #define PVR_8560 0x80200000 |
959 | #define PVR_VER_E500V1 0x8020 | ||
960 | #define PVR_VER_E500V2 0x8021 | ||
853 | /* | 961 | /* |
854 | * For the 8xx processors, all of them report the same PVR family for | 962 | * For the 8xx processors, all of them report the same PVR family for |
855 | * the PowerPC core. The various versions of these processors must be | 963 | * the PowerPC core. The various versions of these processors must be |
@@ -878,7 +986,10 @@ | |||
878 | #define PV_970 0x0039 | 986 | #define PV_970 0x0039 |
879 | #define PV_POWER5 0x003A | 987 | #define PV_POWER5 0x003A |
880 | #define PV_POWER5p 0x003B | 988 | #define PV_POWER5p 0x003B |
989 | #define PV_POWER7 0x003F | ||
881 | #define PV_970FX 0x003C | 990 | #define PV_970FX 0x003C |
991 | #define PV_POWER6 0x003E | ||
992 | #define PV_POWER7 0x003F | ||
882 | #define PV_630 0x0040 | 993 | #define PV_630 0x0040 |
883 | #define PV_630p 0x0041 | 994 | #define PV_630p 0x0041 |
884 | #define PV_970MP 0x0044 | 995 | #define PV_970MP 0x0044 |
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h new file mode 100644 index 000000000000..3d52a1132f3d --- /dev/null +++ b/arch/powerpc/include/asm/reg_a2.h | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * Register definitions specific to the A2 core | ||
3 | * | ||
4 | * Copyright (C) 2008 Ben. Herrenschmidt (benh@kernel.crashing.org), IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef __ASM_POWERPC_REG_A2_H__ | ||
13 | #define __ASM_POWERPC_REG_A2_H__ | ||
14 | |||
15 | #define SPRN_TENSR 0x1b5 | ||
16 | #define SPRN_TENS 0x1b6 /* Thread ENable Set */ | ||
17 | #define SPRN_TENC 0x1b7 /* Thread ENable Clear */ | ||
18 | |||
19 | #define SPRN_A2_CCR0 0x3f0 /* Core Configuration Register 0 */ | ||
20 | #define SPRN_A2_CCR1 0x3f1 /* Core Configuration Register 1 */ | ||
21 | #define SPRN_A2_CCR2 0x3f2 /* Core Configuration Register 2 */ | ||
22 | #define SPRN_MMUCR0 0x3fc /* MMU Control Register 0 */ | ||
23 | #define SPRN_MMUCR1 0x3fd /* MMU Control Register 1 */ | ||
24 | #define SPRN_MMUCR2 0x3fe /* MMU Control Register 2 */ | ||
25 | #define SPRN_MMUCR3 0x3ff /* MMU Control Register 3 */ | ||
26 | |||
27 | #define SPRN_IAR 0x372 | ||
28 | |||
29 | #define SPRN_IUCR0 0x3f3 | ||
30 | #define IUCR0_ICBI_ACK 0x1000 | ||
31 | |||
32 | #define SPRN_XUCR0 0x3f6 /* Execution Unit Config Register 0 */ | ||
33 | |||
34 | #define A2_IERAT_SIZE 16 | ||
35 | #define A2_DERAT_SIZE 32 | ||
36 | |||
37 | /* A2 MMUCR0 bits */ | ||
38 | #define MMUCR0_ECL 0x80000000 /* Extended Class for TLB fills */ | ||
39 | #define MMUCR0_TID_NZ 0x40000000 /* TID is non-zero */ | ||
40 | #define MMUCR0_TS 0x10000000 /* Translation space for TLB fills */ | ||
41 | #define MMUCR0_TGS 0x20000000 /* Guest space for TLB fills */ | ||
42 | #define MMUCR0_TLBSEL 0x0c000000 /* TLB or ERAT target for TLB fills */ | ||
43 | #define MMUCR0_TLBSEL_U 0x00000000 /* TLBSEL = UTLB */ | ||
44 | #define MMUCR0_TLBSEL_I 0x08000000 /* TLBSEL = I-ERAT */ | ||
45 | #define MMUCR0_TLBSEL_D 0x0c000000 /* TLBSEL = D-ERAT */ | ||
46 | #define MMUCR0_LOCKSRSH 0x02000000 /* Use TLB lock on tlbsx. */ | ||
47 | #define MMUCR0_TID_MASK 0x000000ff /* TID field */ | ||
48 | |||
49 | /* A2 MMUCR1 bits */ | ||
50 | #define MMUCR1_IRRE 0x80000000 /* I-ERAT round robin enable */ | ||
51 | #define MMUCR1_DRRE 0x40000000 /* D-ERAT round robin enable */ | ||
52 | #define MMUCR1_REE 0x20000000 /* Reference Exception Enable*/ | ||
53 | #define MMUCR1_CEE 0x10000000 /* Change exception enable */ | ||
54 | #define MMUCR1_CSINV_ALL 0x00000000 /* Inval ERAT on all CS evts */ | ||
55 | #define MMUCR1_CSINV_NISYNC 0x04000000 /* Inval ERAT on all ex isync*/ | ||
56 | #define MMUCR1_CSINV_NEVER 0x0c000000 /* Don't inval ERAT on CS */ | ||
57 | #define MMUCR1_ICTID 0x00080000 /* IERAT class field as TID */ | ||
58 | #define MMUCR1_ITTID 0x00040000 /* IERAT thdid field as TID */ | ||
59 | #define MMUCR1_DCTID 0x00020000 /* DERAT class field as TID */ | ||
60 | #define MMUCR1_DTTID 0x00010000 /* DERAT thdid field as TID */ | ||
61 | #define MMUCR1_DCCD 0x00008000 /* DERAT class ignore */ | ||
62 | #define MMUCR1_TLBWE_BINV 0x00004000 /* back invalidate on tlbwe */ | ||
63 | |||
64 | /* A2 MMUCR2 bits */ | ||
65 | #define MMUCR2_PSSEL_SHIFT 4 | ||
66 | |||
67 | /* A2 MMUCR3 bits */ | ||
68 | #define MMUCR3_THID 0x0000000f /* Thread ID */ | ||
69 | |||
70 | /* *** ERAT TLB bits definitions */ | ||
71 | #define TLB0_EPN_MASK ASM_CONST(0xfffffffffffff000) | ||
72 | #define TLB0_CLASS_MASK ASM_CONST(0x0000000000000c00) | ||
73 | #define TLB0_CLASS_00 ASM_CONST(0x0000000000000000) | ||
74 | #define TLB0_CLASS_01 ASM_CONST(0x0000000000000400) | ||
75 | #define TLB0_CLASS_10 ASM_CONST(0x0000000000000800) | ||
76 | #define TLB0_CLASS_11 ASM_CONST(0x0000000000000c00) | ||
77 | #define TLB0_V ASM_CONST(0x0000000000000200) | ||
78 | #define TLB0_X ASM_CONST(0x0000000000000100) | ||
79 | #define TLB0_SIZE_MASK ASM_CONST(0x00000000000000f0) | ||
80 | #define TLB0_SIZE_4K ASM_CONST(0x0000000000000010) | ||
81 | #define TLB0_SIZE_64K ASM_CONST(0x0000000000000030) | ||
82 | #define TLB0_SIZE_1M ASM_CONST(0x0000000000000050) | ||
83 | #define TLB0_SIZE_16M ASM_CONST(0x0000000000000070) | ||
84 | #define TLB0_SIZE_1G ASM_CONST(0x00000000000000a0) | ||
85 | #define TLB0_THDID_MASK ASM_CONST(0x000000000000000f) | ||
86 | #define TLB0_THDID_0 ASM_CONST(0x0000000000000001) | ||
87 | #define TLB0_THDID_1 ASM_CONST(0x0000000000000002) | ||
88 | #define TLB0_THDID_2 ASM_CONST(0x0000000000000004) | ||
89 | #define TLB0_THDID_3 ASM_CONST(0x0000000000000008) | ||
90 | #define TLB0_THDID_ALL ASM_CONST(0x000000000000000f) | ||
91 | |||
92 | #define TLB1_RESVATTR ASM_CONST(0x00f0000000000000) | ||
93 | #define TLB1_U0 ASM_CONST(0x0008000000000000) | ||
94 | #define TLB1_U1 ASM_CONST(0x0004000000000000) | ||
95 | #define TLB1_U2 ASM_CONST(0x0002000000000000) | ||
96 | #define TLB1_U3 ASM_CONST(0x0001000000000000) | ||
97 | #define TLB1_R ASM_CONST(0x0000800000000000) | ||
98 | #define TLB1_C ASM_CONST(0x0000400000000000) | ||
99 | #define TLB1_RPN_MASK ASM_CONST(0x000003fffffff000) | ||
100 | #define TLB1_W ASM_CONST(0x0000000000000800) | ||
101 | #define TLB1_I ASM_CONST(0x0000000000000400) | ||
102 | #define TLB1_M ASM_CONST(0x0000000000000200) | ||
103 | #define TLB1_G ASM_CONST(0x0000000000000100) | ||
104 | #define TLB1_E ASM_CONST(0x0000000000000080) | ||
105 | #define TLB1_VF ASM_CONST(0x0000000000000040) | ||
106 | #define TLB1_UX ASM_CONST(0x0000000000000020) | ||
107 | #define TLB1_SX ASM_CONST(0x0000000000000010) | ||
108 | #define TLB1_UW ASM_CONST(0x0000000000000008) | ||
109 | #define TLB1_SW ASM_CONST(0x0000000000000004) | ||
110 | #define TLB1_UR ASM_CONST(0x0000000000000002) | ||
111 | #define TLB1_SR ASM_CONST(0x0000000000000001) | ||
112 | |||
113 | #ifdef CONFIG_PPC_EARLY_DEBUG_WSP | ||
114 | #define WSP_UART_PHYS 0xffc000c000 | ||
115 | /* This needs to be careful chosen to hit a !0 congruence class | ||
116 | * in the TLB since we bolt it in way 3, which is already occupied | ||
117 | * by our linear mapping primary bolted entry in CC 0. | ||
118 | */ | ||
119 | #define WSP_UART_VIRT 0xf000000000001000 | ||
120 | #endif | ||
121 | |||
122 | /* A2 erativax attributes definitions */ | ||
123 | #define ERATIVAX_RS_IS_ALL 0x000 | ||
124 | #define ERATIVAX_RS_IS_TID 0x040 | ||
125 | #define ERATIVAX_RS_IS_CLASS 0x080 | ||
126 | #define ERATIVAX_RS_IS_FULLMATCH 0x0c0 | ||
127 | #define ERATIVAX_CLASS_00 0x000 | ||
128 | #define ERATIVAX_CLASS_01 0x010 | ||
129 | #define ERATIVAX_CLASS_10 0x020 | ||
130 | #define ERATIVAX_CLASS_11 0x030 | ||
131 | #define ERATIVAX_PSIZE_4K (TLB_PSIZE_4K >> 1) | ||
132 | #define ERATIVAX_PSIZE_64K (TLB_PSIZE_64K >> 1) | ||
133 | #define ERATIVAX_PSIZE_1M (TLB_PSIZE_1M >> 1) | ||
134 | #define ERATIVAX_PSIZE_16M (TLB_PSIZE_16M >> 1) | ||
135 | #define ERATIVAX_PSIZE_1G (TLB_PSIZE_1G >> 1) | ||
136 | |||
137 | /* A2 eratilx attributes definitions */ | ||
138 | #define ERATILX_T_ALL 0 | ||
139 | #define ERATILX_T_TID 1 | ||
140 | #define ERATILX_T_TGS 2 | ||
141 | #define ERATILX_T_FULLMATCH 3 | ||
142 | #define ERATILX_T_CLASS0 4 | ||
143 | #define ERATILX_T_CLASS1 5 | ||
144 | #define ERATILX_T_CLASS2 6 | ||
145 | #define ERATILX_T_CLASS3 7 | ||
146 | |||
147 | /* XUCR0 bits */ | ||
148 | #define XUCR0_TRACE_UM_T0 0x40000000 /* Thread 0 */ | ||
149 | #define XUCR0_TRACE_UM_T1 0x20000000 /* Thread 1 */ | ||
150 | #define XUCR0_TRACE_UM_T2 0x10000000 /* Thread 2 */ | ||
151 | #define XUCR0_TRACE_UM_T3 0x08000000 /* Thread 3 */ | ||
152 | |||
153 | /* A2 CCR0 register */ | ||
154 | #define A2_CCR0_PME_DISABLED 0x00000000 | ||
155 | #define A2_CCR0_PME_SLEEP 0x40000000 | ||
156 | #define A2_CCR0_PME_RVW 0x80000000 | ||
157 | #define A2_CCR0_PME_DISABLED2 0xc0000000 | ||
158 | |||
159 | /* A2 CCR2 register */ | ||
160 | #define A2_CCR2_ERAT_ONLY_MODE 0x00000001 | ||
161 | #define A2_CCR2_ENABLE_ICSWX 0x00000002 | ||
162 | #define A2_CCR2_ENABLE_PC 0x20000000 | ||
163 | #define A2_CCR2_ENABLE_TRACE 0x40000000 | ||
164 | |||
165 | #endif /* __ASM_POWERPC_REG_A2_H__ */ | ||
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h index 667a498eaee1..0f0ad9fa01c1 100644 --- a/arch/powerpc/include/asm/reg_booke.h +++ b/arch/powerpc/include/asm/reg_booke.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * Contains register definitions common to the Book E PowerPC | 2 | * Contains register definitions common to the Book E PowerPC |
3 | * specification. Notice that while the IBM-40x series of CPUs | 3 | * specification. Notice that while the IBM-40x series of CPUs |
4 | * are not true Book E PowerPCs, they borrowed a number of features | 4 | * are not true Book E PowerPCs, they borrowed a number of features |
5 | * before Book E was finalized, and are included here as well. Unfortunatly, | 5 | * before Book E was finalized, and are included here as well. Unfortunately, |
6 | * they sometimes used different locations than true Book E CPUs did. | 6 | * they sometimes used different locations than true Book E CPUs did. |
7 | * | 7 | * |
8 | * This program is free software; you can redistribute it and/or | 8 | * This program is free software; you can redistribute it and/or |
@@ -27,10 +27,12 @@ | |||
27 | #define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */ | 27 | #define MSR_CM (1<<31) /* Computation Mode (0=32-bit, 1=64-bit) */ |
28 | 28 | ||
29 | #if defined(CONFIG_PPC_BOOK3E_64) | 29 | #if defined(CONFIG_PPC_BOOK3E_64) |
30 | #define MSR_64BIT MSR_CM | ||
31 | |||
30 | #define MSR_ MSR_ME | MSR_CE | 32 | #define MSR_ MSR_ME | MSR_CE |
31 | #define MSR_KERNEL MSR_ | MSR_CM | 33 | #define MSR_KERNEL MSR_ | MSR_64BIT |
32 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE | 34 | #define MSR_USER32 MSR_ | MSR_PR | MSR_EE | MSR_DE |
33 | #define MSR_USER64 MSR_USER32 | MSR_CM | MSR_DE | 35 | #define MSR_USER64 MSR_USER32 | MSR_64BIT |
34 | #elif defined (CONFIG_40x) | 36 | #elif defined (CONFIG_40x) |
35 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) | 37 | #define MSR_KERNEL (MSR_ME|MSR_RI|MSR_IR|MSR_DR|MSR_CE) |
36 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) | 38 | #define MSR_USER (MSR_KERNEL|MSR_PR|MSR_EE) |
@@ -81,6 +83,10 @@ | |||
81 | #define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */ | 83 | #define SPRN_IVOR13 0x19D /* Interrupt Vector Offset Register 13 */ |
82 | #define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */ | 84 | #define SPRN_IVOR14 0x19E /* Interrupt Vector Offset Register 14 */ |
83 | #define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */ | 85 | #define SPRN_IVOR15 0x19F /* Interrupt Vector Offset Register 15 */ |
86 | #define SPRN_IVOR38 0x1B0 /* Interrupt Vector Offset Register 38 */ | ||
87 | #define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ | ||
88 | #define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ | ||
89 | #define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ | ||
84 | #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ | 90 | #define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ |
85 | #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ | 91 | #define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ |
86 | #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ | 92 | #define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ |
@@ -110,7 +116,7 @@ | |||
110 | #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ | 116 | #define SPRN_MAS2 0x272 /* MMU Assist Register 2 */ |
111 | #define SPRN_MAS3 0x273 /* MMU Assist Register 3 */ | 117 | #define SPRN_MAS3 0x273 /* MMU Assist Register 3 */ |
112 | #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ | 118 | #define SPRN_MAS4 0x274 /* MMU Assist Register 4 */ |
113 | #define SPRN_MAS5 0x275 /* MMU Assist Register 5 */ | 119 | #define SPRN_MAS5 0x153 /* MMU Assist Register 5 */ |
114 | #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ | 120 | #define SPRN_MAS6 0x276 /* MMU Assist Register 6 */ |
115 | #define SPRN_PID1 0x279 /* Process ID Register 1 */ | 121 | #define SPRN_PID1 0x279 /* Process ID Register 1 */ |
116 | #define SPRN_PID2 0x27A /* Process ID Register 2 */ | 122 | #define SPRN_PID2 0x27A /* Process ID Register 2 */ |
@@ -150,8 +156,6 @@ | |||
150 | * or IBM 40x. | 156 | * or IBM 40x. |
151 | */ | 157 | */ |
152 | #ifdef CONFIG_BOOKE | 158 | #ifdef CONFIG_BOOKE |
153 | #define SPRN_PID 0x030 /* Process ID */ | ||
154 | #define SPRN_PID0 SPRN_PID/* Process ID Register 0 */ | ||
155 | #define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */ | 159 | #define SPRN_CSRR0 0x03A /* Critical Save and Restore Register 0 */ |
156 | #define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */ | 160 | #define SPRN_CSRR1 0x03B /* Critical Save and Restore Register 1 */ |
157 | #define SPRN_DEAR 0x03D /* Data Error Address Register */ | 161 | #define SPRN_DEAR 0x03D /* Data Error Address Register */ |
@@ -168,7 +172,6 @@ | |||
168 | #define SPRN_TCR 0x154 /* Timer Control Register */ | 172 | #define SPRN_TCR 0x154 /* Timer Control Register */ |
169 | #endif /* Book E */ | 173 | #endif /* Book E */ |
170 | #ifdef CONFIG_40x | 174 | #ifdef CONFIG_40x |
171 | #define SPRN_PID 0x3B1 /* Process ID */ | ||
172 | #define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */ | 175 | #define SPRN_DBCR1 0x3BD /* Debug Control Register 1 */ |
173 | #define SPRN_ESR 0x3D4 /* Exception Syndrome Register */ | 176 | #define SPRN_ESR 0x3D4 /* Exception Syndrome Register */ |
174 | #define SPRN_DEAR 0x3D5 /* Data Error Address Register */ | 177 | #define SPRN_DEAR 0x3D5 /* Data Error Address Register */ |
@@ -246,6 +249,20 @@ | |||
246 | store or cache line push */ | 249 | store or cache line push */ |
247 | #endif | 250 | #endif |
248 | 251 | ||
252 | /* Bit definitions for the HID1 */ | ||
253 | #ifdef CONFIG_E500 | ||
254 | /* e500v1/v2 */ | ||
255 | #define HID1_PLL_CFG_MASK 0xfc000000 /* PLL_CFG input pins */ | ||
256 | #define HID1_RFXE 0x00020000 /* Read fault exception enable */ | ||
257 | #define HID1_R1DPE 0x00008000 /* R1 data bus parity enable */ | ||
258 | #define HID1_R2DPE 0x00004000 /* R2 data bus parity enable */ | ||
259 | #define HID1_ASTME 0x00002000 /* Address bus streaming mode enable */ | ||
260 | #define HID1_ABE 0x00001000 /* Address broadcast enable */ | ||
261 | #define HID1_MPXTT 0x00000400 /* MPX re-map transfer type */ | ||
262 | #define HID1_ATS 0x00000080 /* Atomic status */ | ||
263 | #define HID1_MID_MASK 0x0000000f /* MID input pins */ | ||
264 | #endif | ||
265 | |||
249 | /* Bit definitions for the DBSR. */ | 266 | /* Bit definitions for the DBSR. */ |
250 | /* | 267 | /* |
251 | * DBSR bits which have conflicting definitions on true Book E versus IBM 40x. | 268 | * DBSR bits which have conflicting definitions on true Book E versus IBM 40x. |
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h index 0018bf80cb25..b1d2deceeedb 100644 --- a/arch/powerpc/include/asm/rio.h +++ b/arch/powerpc/include/asm/rio.h | |||
@@ -14,5 +14,10 @@ | |||
14 | #define ASM_PPC_RIO_H | 14 | #define ASM_PPC_RIO_H |
15 | 15 | ||
16 | extern void platform_rio_init(void); | 16 | extern void platform_rio_init(void); |
17 | #ifdef CONFIG_FSL_RIO | ||
18 | extern int fsl_rio_mcheck_exception(struct pt_regs *); | ||
19 | #else | ||
20 | static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; } | ||
21 | #endif | ||
17 | 22 | ||
18 | #endif /* ASM_PPC_RIO_H */ | 23 | #endif /* ASM_PPC_RIO_H */ |
diff --git a/arch/powerpc/include/asm/rtas.h b/arch/powerpc/include/asm/rtas.h index 3d35f8ae377e..58625d1e7802 100644 --- a/arch/powerpc/include/asm/rtas.h +++ b/arch/powerpc/include/asm/rtas.h | |||
@@ -158,7 +158,50 @@ struct rtas_error_log { | |||
158 | unsigned long target:4; /* Target of failed operation */ | 158 | unsigned long target:4; /* Target of failed operation */ |
159 | unsigned long type:8; /* General event or error*/ | 159 | unsigned long type:8; /* General event or error*/ |
160 | unsigned long extended_log_length:32; /* length in bytes */ | 160 | unsigned long extended_log_length:32; /* length in bytes */ |
161 | unsigned char buffer[1]; | 161 | unsigned char buffer[1]; /* Start of extended log */ |
162 | /* Variable length. */ | ||
163 | }; | ||
164 | |||
165 | #define RTAS_V6EXT_LOG_FORMAT_EVENT_LOG 14 | ||
166 | |||
167 | #define RTAS_V6EXT_COMPANY_ID_IBM (('I' << 24) | ('B' << 16) | ('M' << 8)) | ||
168 | |||
169 | /* RTAS general extended event log, Version 6. The extended log starts | ||
170 | * from "buffer" field of struct rtas_error_log defined above. | ||
171 | */ | ||
172 | struct rtas_ext_event_log_v6 { | ||
173 | /* Byte 0 */ | ||
174 | uint32_t log_valid:1; /* 1:Log valid */ | ||
175 | uint32_t unrecoverable_error:1; /* 1:Unrecoverable error */ | ||
176 | uint32_t recoverable_error:1; /* 1:recoverable (correctable */ | ||
177 | /* or successfully retried) */ | ||
178 | uint32_t degraded_operation:1; /* 1:Unrecoverable err, bypassed*/ | ||
179 | /* - degraded operation (e.g. */ | ||
180 | /* CPU or mem taken off-line) */ | ||
181 | uint32_t predictive_error:1; | ||
182 | uint32_t new_log:1; /* 1:"New" log (Always 1 for */ | ||
183 | /* data returned from RTAS */ | ||
184 | uint32_t big_endian:1; /* 1: Big endian */ | ||
185 | uint32_t :1; /* reserved */ | ||
186 | /* Byte 1 */ | ||
187 | uint32_t :8; /* reserved */ | ||
188 | /* Byte 2 */ | ||
189 | uint32_t powerpc_format:1; /* Set to 1 (indicating log is */ | ||
190 | /* in PowerPC format */ | ||
191 | uint32_t :3; /* reserved */ | ||
192 | uint32_t log_format:4; /* Log format indicator. Define */ | ||
193 | /* format used for byte 12-2047 */ | ||
194 | /* Byte 3 */ | ||
195 | uint32_t :8; /* reserved */ | ||
196 | /* Byte 4-11 */ | ||
197 | uint8_t reserved[8]; /* reserved */ | ||
198 | /* Byte 12-15 */ | ||
199 | uint32_t company_id; /* Company ID of the company */ | ||
200 | /* that defines the format for */ | ||
201 | /* the vendor specific log type */ | ||
202 | /* Byte 16-end of log */ | ||
203 | uint8_t vendor_log[1]; /* Start of vendor specific log */ | ||
204 | /* Variable length. */ | ||
162 | }; | 205 | }; |
163 | 206 | ||
164 | /* | 207 | /* |
@@ -187,6 +230,7 @@ extern void rtas_progress(char *s, unsigned short hex); | |||
187 | extern void rtas_initialize(void); | 230 | extern void rtas_initialize(void); |
188 | extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); | 231 | extern int rtas_suspend_cpu(struct rtas_suspend_me_data *data); |
189 | extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); | 232 | extern int rtas_suspend_last_cpu(struct rtas_suspend_me_data *data); |
233 | extern int rtas_ibm_suspend_me(struct rtas_args *); | ||
190 | 234 | ||
191 | struct rtc_time; | 235 | struct rtc_time; |
192 | extern unsigned long rtas_get_boot_time(void); | 236 | extern unsigned long rtas_get_boot_time(void); |
diff --git a/arch/powerpc/include/asm/rwsem.h b/arch/powerpc/include/asm/rwsem.h index 8447d89fbe72..bb1e2cdeb9bf 100644 --- a/arch/powerpc/include/asm/rwsem.h +++ b/arch/powerpc/include/asm/rwsem.h | |||
@@ -13,11 +13,6 @@ | |||
13 | * by Paul Mackerras <paulus@samba.org>. | 13 | * by Paul Mackerras <paulus@samba.org>. |
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/spinlock.h> | ||
18 | #include <asm/atomic.h> | ||
19 | #include <asm/system.h> | ||
20 | |||
21 | /* | 16 | /* |
22 | * the semaphore definition | 17 | * the semaphore definition |
23 | */ | 18 | */ |
@@ -33,47 +28,6 @@ | |||
33 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS | 28 | #define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS |
34 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) | 29 | #define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) |
35 | 30 | ||
36 | struct rw_semaphore { | ||
37 | long count; | ||
38 | spinlock_t wait_lock; | ||
39 | struct list_head wait_list; | ||
40 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
41 | struct lockdep_map dep_map; | ||
42 | #endif | ||
43 | }; | ||
44 | |||
45 | #ifdef CONFIG_DEBUG_LOCK_ALLOC | ||
46 | # define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname } | ||
47 | #else | ||
48 | # define __RWSEM_DEP_MAP_INIT(lockname) | ||
49 | #endif | ||
50 | |||
51 | #define __RWSEM_INITIALIZER(name) \ | ||
52 | { \ | ||
53 | RWSEM_UNLOCKED_VALUE, \ | ||
54 | __SPIN_LOCK_UNLOCKED((name).wait_lock), \ | ||
55 | LIST_HEAD_INIT((name).wait_list) \ | ||
56 | __RWSEM_DEP_MAP_INIT(name) \ | ||
57 | } | ||
58 | |||
59 | #define DECLARE_RWSEM(name) \ | ||
60 | struct rw_semaphore name = __RWSEM_INITIALIZER(name) | ||
61 | |||
62 | extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem); | ||
63 | extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem); | ||
64 | extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem); | ||
65 | extern struct rw_semaphore *rwsem_downgrade_wake(struct rw_semaphore *sem); | ||
66 | |||
67 | extern void __init_rwsem(struct rw_semaphore *sem, const char *name, | ||
68 | struct lock_class_key *key); | ||
69 | |||
70 | #define init_rwsem(sem) \ | ||
71 | do { \ | ||
72 | static struct lock_class_key __key; \ | ||
73 | \ | ||
74 | __init_rwsem((sem), #sem, &__key); \ | ||
75 | } while (0) | ||
76 | |||
77 | /* | 31 | /* |
78 | * lock for reading | 32 | * lock for reading |
79 | */ | 33 | */ |
@@ -174,10 +128,5 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem) | |||
174 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); | 128 | return atomic_long_add_return(delta, (atomic_long_t *)&sem->count); |
175 | } | 129 | } |
176 | 130 | ||
177 | static inline int rwsem_is_locked(struct rw_semaphore *sem) | ||
178 | { | ||
179 | return sem->count != 0; | ||
180 | } | ||
181 | |||
182 | #endif /* __KERNEL__ */ | 131 | #endif /* __KERNEL__ */ |
183 | #endif /* _ASM_POWERPC_RWSEM_H */ | 132 | #endif /* _ASM_POWERPC_RWSEM_H */ |
diff --git a/arch/powerpc/include/asm/scom.h b/arch/powerpc/include/asm/scom.h new file mode 100644 index 000000000000..0cabfd7bc2d1 --- /dev/null +++ b/arch/powerpc/include/asm/scom.h | |||
@@ -0,0 +1,156 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Benjamin Herrenschmidt, IBM Corp | ||
3 | * <benh@kernel.crashing.org> | ||
4 | * and David Gibson, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
14 | * the GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #ifndef _ASM_POWERPC_SCOM_H | ||
22 | #define _ASM_POWERPC_SCOM_H | ||
23 | |||
24 | #ifdef __KERNEL__ | ||
25 | #ifndef __ASSEMBLY__ | ||
26 | #ifdef CONFIG_PPC_SCOM | ||
27 | |||
28 | /* | ||
29 | * The SCOM bus is a sideband bus used for accessing various internal | ||
30 | * registers of the processor or the chipset. The implementation details | ||
31 | * differ between processors and platforms, and the access method as | ||
32 | * well. | ||
33 | * | ||
34 | * This API allows to "map" ranges of SCOM register numbers associated | ||
35 | * with a given SCOM controller. The later must be represented by a | ||
36 | * device node, though some implementations might support NULL if there | ||
37 | * is no possible ambiguity | ||
38 | * | ||
39 | * Then, scom_read/scom_write can be used to accesses registers inside | ||
40 | * that range. The argument passed is a register number relative to | ||
41 | * the beginning of the range mapped. | ||
42 | */ | ||
43 | |||
44 | typedef void *scom_map_t; | ||
45 | |||
46 | /* Value for an invalid SCOM map */ | ||
47 | #define SCOM_MAP_INVALID (NULL) | ||
48 | |||
49 | /* The scom_controller data structure is what the platform passes | ||
50 | * to the core code in scom_init, it provides the actual implementation | ||
51 | * of all the SCOM functions | ||
52 | */ | ||
53 | struct scom_controller { | ||
54 | scom_map_t (*map)(struct device_node *ctrl_dev, u64 reg, u64 count); | ||
55 | void (*unmap)(scom_map_t map); | ||
56 | |||
57 | u64 (*read)(scom_map_t map, u32 reg); | ||
58 | void (*write)(scom_map_t map, u32 reg, u64 value); | ||
59 | }; | ||
60 | |||
61 | extern const struct scom_controller *scom_controller; | ||
62 | |||
63 | /** | ||
64 | * scom_init - Initialize the SCOM backend, called by the platform | ||
65 | * @controller: The platform SCOM controller | ||
66 | */ | ||
67 | static inline void scom_init(const struct scom_controller *controller) | ||
68 | { | ||
69 | scom_controller = controller; | ||
70 | } | ||
71 | |||
72 | /** | ||
73 | * scom_map_ok - Test is a SCOM mapping is successful | ||
74 | * @map: The result of scom_map to test | ||
75 | */ | ||
76 | static inline int scom_map_ok(scom_map_t map) | ||
77 | { | ||
78 | return map != SCOM_MAP_INVALID; | ||
79 | } | ||
80 | |||
81 | /** | ||
82 | * scom_map - Map a block of SCOM registers | ||
83 | * @ctrl_dev: Device node of the SCOM controller | ||
84 | * some implementations allow NULL here | ||
85 | * @reg: first SCOM register to map | ||
86 | * @count: Number of SCOM registers to map | ||
87 | */ | ||
88 | |||
89 | static inline scom_map_t scom_map(struct device_node *ctrl_dev, | ||
90 | u64 reg, u64 count) | ||
91 | { | ||
92 | return scom_controller->map(ctrl_dev, reg, count); | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * scom_find_parent - Find the SCOM controller for a device | ||
97 | * @dev: OF node of the device | ||
98 | * | ||
99 | * This is not meant for general usage, but in combination with | ||
100 | * scom_map() allows to map registers not represented by the | ||
101 | * device own scom-reg property. Useful for applying HW workarounds | ||
102 | * on things not properly represented in the device-tree for example. | ||
103 | */ | ||
104 | struct device_node *scom_find_parent(struct device_node *dev); | ||
105 | |||
106 | |||
107 | /** | ||
108 | * scom_map_device - Map a device's block of SCOM registers | ||
109 | * @dev: OF node of the device | ||
110 | * @index: Register bank index (index in "scom-reg" property) | ||
111 | * | ||
112 | * This function will use the device-tree binding for SCOM which | ||
113 | * is to follow "scom-parent" properties until it finds a node with | ||
114 | * a "scom-controller" property to find the controller. It will then | ||
115 | * use the "scom-reg" property which is made of reg/count pairs, | ||
116 | * each of them having a size defined by the controller's #scom-cells | ||
117 | * property | ||
118 | */ | ||
119 | extern scom_map_t scom_map_device(struct device_node *dev, int index); | ||
120 | |||
121 | |||
122 | /** | ||
123 | * scom_unmap - Unmap a block of SCOM registers | ||
124 | * @map: Result of scom_map is to be unmapped | ||
125 | */ | ||
126 | static inline void scom_unmap(scom_map_t map) | ||
127 | { | ||
128 | if (scom_map_ok(map)) | ||
129 | scom_controller->unmap(map); | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * scom_read - Read a SCOM register | ||
134 | * @map: Result of scom_map | ||
135 | * @reg: Register index within that map | ||
136 | */ | ||
137 | static inline u64 scom_read(scom_map_t map, u32 reg) | ||
138 | { | ||
139 | return scom_controller->read(map, reg); | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * scom_write - Write to a SCOM register | ||
144 | * @map: Result of scom_map | ||
145 | * @reg: Register index within that map | ||
146 | * @value: Value to write | ||
147 | */ | ||
148 | static inline void scom_write(scom_map_t map, u32 reg, u64 value) | ||
149 | { | ||
150 | scom_controller->write(map, reg, value); | ||
151 | } | ||
152 | |||
153 | #endif /* CONFIG_PPC_SCOM */ | ||
154 | #endif /* __ASSEMBLY__ */ | ||
155 | #endif /* __KERNEL__ */ | ||
156 | #endif /* _ASM_POWERPC_SCOM_H */ | ||
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 66e237bbe15f..11eb404b5606 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h | |||
@@ -20,6 +20,7 @@ | |||
20 | #include <linux/threads.h> | 20 | #include <linux/threads.h> |
21 | #include <linux/cpumask.h> | 21 | #include <linux/cpumask.h> |
22 | #include <linux/kernel.h> | 22 | #include <linux/kernel.h> |
23 | #include <linux/irqreturn.h> | ||
23 | 24 | ||
24 | #ifndef __ASSEMBLY__ | 25 | #ifndef __ASSEMBLY__ |
25 | 26 | ||
@@ -29,22 +30,41 @@ | |||
29 | #include <asm/percpu.h> | 30 | #include <asm/percpu.h> |
30 | 31 | ||
31 | extern int boot_cpuid; | 32 | extern int boot_cpuid; |
33 | extern int boot_cpu_count; | ||
32 | 34 | ||
33 | extern void cpu_die(void); | 35 | extern void cpu_die(void); |
34 | 36 | ||
35 | #ifdef CONFIG_SMP | 37 | #ifdef CONFIG_SMP |
36 | 38 | ||
37 | extern void smp_send_debugger_break(int cpu); | 39 | struct smp_ops_t { |
38 | extern void smp_message_recv(int); | 40 | void (*message_pass)(int cpu, int msg); |
41 | #ifdef CONFIG_PPC_SMP_MUXED_IPI | ||
42 | void (*cause_ipi)(int cpu, unsigned long data); | ||
43 | #endif | ||
44 | int (*probe)(void); | ||
45 | int (*kick_cpu)(int nr); | ||
46 | void (*setup_cpu)(int nr); | ||
47 | void (*bringup_done)(void); | ||
48 | void (*take_timebase)(void); | ||
49 | void (*give_timebase)(void); | ||
50 | int (*cpu_disable)(void); | ||
51 | void (*cpu_die)(unsigned int nr); | ||
52 | int (*cpu_bootable)(unsigned int nr); | ||
53 | }; | ||
54 | |||
55 | extern void smp_send_debugger_break(void); | ||
56 | extern void start_secondary_resume(void); | ||
57 | extern void __devinit smp_generic_give_timebase(void); | ||
58 | extern void __devinit smp_generic_take_timebase(void); | ||
39 | 59 | ||
40 | DECLARE_PER_CPU(unsigned int, cpu_pvr); | 60 | DECLARE_PER_CPU(unsigned int, cpu_pvr); |
41 | 61 | ||
42 | #ifdef CONFIG_HOTPLUG_CPU | 62 | #ifdef CONFIG_HOTPLUG_CPU |
43 | extern void fixup_irqs(const struct cpumask *map); | 63 | extern void migrate_irqs(void); |
44 | int generic_cpu_disable(void); | 64 | int generic_cpu_disable(void); |
45 | int generic_cpu_enable(unsigned int cpu); | ||
46 | void generic_cpu_die(unsigned int cpu); | 65 | void generic_cpu_die(unsigned int cpu); |
47 | void generic_mach_cpu_die(void); | 66 | void generic_mach_cpu_die(void); |
67 | void generic_set_cpu_dead(unsigned int cpu); | ||
48 | #endif | 68 | #endif |
49 | 69 | ||
50 | #ifdef CONFIG_PPC64 | 70 | #ifdef CONFIG_PPC64 |
@@ -92,13 +112,16 @@ extern int cpu_to_core_id(int cpu); | |||
92 | #define PPC_MSG_CALL_FUNC_SINGLE 2 | 112 | #define PPC_MSG_CALL_FUNC_SINGLE 2 |
93 | #define PPC_MSG_DEBUGGER_BREAK 3 | 113 | #define PPC_MSG_DEBUGGER_BREAK 3 |
94 | 114 | ||
95 | /* | 115 | /* for irq controllers that have dedicated ipis per message (4) */ |
96 | * irq controllers that have dedicated ipis per message and don't | ||
97 | * need additional code in the action handler may use this | ||
98 | */ | ||
99 | extern int smp_request_message_ipi(int virq, int message); | 116 | extern int smp_request_message_ipi(int virq, int message); |
100 | extern const char *smp_ipi_name[]; | 117 | extern const char *smp_ipi_name[]; |
101 | 118 | ||
119 | /* for irq controllers with only a single ipi */ | ||
120 | extern void smp_muxed_ipi_set_data(int cpu, unsigned long data); | ||
121 | extern void smp_muxed_ipi_message_pass(int cpu, int msg); | ||
122 | extern void smp_muxed_ipi_resend(void); | ||
123 | extern irqreturn_t smp_ipi_demux(void); | ||
124 | |||
102 | void smp_init_iSeries(void); | 125 | void smp_init_iSeries(void); |
103 | void smp_init_pSeries(void); | 126 | void smp_init_pSeries(void); |
104 | void smp_init_cell(void); | 127 | void smp_init_cell(void); |
@@ -148,7 +171,7 @@ extern int smt_enabled_at_boot; | |||
148 | 171 | ||
149 | extern int smp_mpic_probe(void); | 172 | extern int smp_mpic_probe(void); |
150 | extern void smp_mpic_setup_cpu(int cpu); | 173 | extern void smp_mpic_setup_cpu(int cpu); |
151 | extern void smp_generic_kick_cpu(int nr); | 174 | extern int smp_generic_kick_cpu(int nr); |
152 | 175 | ||
153 | extern void smp_generic_give_timebase(void); | 176 | extern void smp_generic_give_timebase(void); |
154 | extern void smp_generic_take_timebase(void); | 177 | extern void smp_generic_take_timebase(void); |
diff --git a/arch/powerpc/include/asm/spu.h b/arch/powerpc/include/asm/spu.h index 0ab8d869e3d6..0c8b35d75232 100644 --- a/arch/powerpc/include/asm/spu.h +++ b/arch/powerpc/include/asm/spu.h | |||
@@ -203,14 +203,6 @@ void spu_irq_setaffinity(struct spu *spu, int cpu); | |||
203 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, | 203 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, |
204 | void *code, int code_size); | 204 | void *code, int code_size); |
205 | 205 | ||
206 | #ifdef CONFIG_KEXEC | ||
207 | void crash_register_spus(struct list_head *list); | ||
208 | #else | ||
209 | static inline void crash_register_spus(struct list_head *list) | ||
210 | { | ||
211 | } | ||
212 | #endif | ||
213 | |||
214 | extern void spu_invalidate_slbs(struct spu *spu); | 206 | extern void spu_invalidate_slbs(struct spu *spu); |
215 | extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm); | 207 | extern void spu_associate_mm(struct spu *spu, struct mm_struct *mm); |
216 | int spu_64k_pages_available(void); | 208 | int spu_64k_pages_available(void); |
diff --git a/arch/powerpc/include/asm/spu_priv1.h b/arch/powerpc/include/asm/spu_priv1.h index 25020a34ce7f..d8f5c60f61c1 100644 --- a/arch/powerpc/include/asm/spu_priv1.h +++ b/arch/powerpc/include/asm/spu_priv1.h | |||
@@ -223,7 +223,7 @@ spu_disable_spu (struct spu_context *ctx) | |||
223 | } | 223 | } |
224 | 224 | ||
225 | /* | 225 | /* |
226 | * The declarations folowing are put here for convenience | 226 | * The declarations following are put here for convenience |
227 | * and only intended to be used by the platform setup code. | 227 | * and only intended to be used by the platform setup code. |
228 | */ | 228 | */ |
229 | 229 | ||
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h deleted file mode 100644 index c6efc3466aa6..000000000000 --- a/arch/powerpc/include/asm/suspend.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_POWERPC_SUSPEND_H | ||
2 | #define __ASM_POWERPC_SUSPEND_H | ||
3 | |||
4 | static inline int arch_prepare_suspend(void) { return 0; } | ||
5 | |||
6 | #endif /* __ASM_POWERPC_SUSPEND_H */ | ||
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 23913e902fc3..b54b2add07be 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h | |||
@@ -15,6 +15,11 @@ | |||
15 | 15 | ||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | 17 | ||
18 | /* ftrace syscalls requires exporting the sys_call_table */ | ||
19 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
20 | extern const unsigned long *sys_call_table; | ||
21 | #endif /* CONFIG_FTRACE_SYSCALLS */ | ||
22 | |||
18 | static inline long syscall_get_nr(struct task_struct *task, | 23 | static inline long syscall_get_nr(struct task_struct *task, |
19 | struct pt_regs *regs) | 24 | struct pt_regs *regs) |
20 | { | 25 | { |
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h index 3d212669a130..f6736b7da463 100644 --- a/arch/powerpc/include/asm/systbl.h +++ b/arch/powerpc/include/asm/systbl.h | |||
@@ -329,3 +329,28 @@ COMPAT_SYS(rt_tgsigqueueinfo) | |||
329 | SYSCALL(fanotify_init) | 329 | SYSCALL(fanotify_init) |
330 | COMPAT_SYS(fanotify_mark) | 330 | COMPAT_SYS(fanotify_mark) |
331 | SYSCALL_SPU(prlimit64) | 331 | SYSCALL_SPU(prlimit64) |
332 | SYSCALL_SPU(socket) | ||
333 | SYSCALL_SPU(bind) | ||
334 | SYSCALL_SPU(connect) | ||
335 | SYSCALL_SPU(listen) | ||
336 | SYSCALL_SPU(accept) | ||
337 | SYSCALL_SPU(getsockname) | ||
338 | SYSCALL_SPU(getpeername) | ||
339 | SYSCALL_SPU(socketpair) | ||
340 | SYSCALL_SPU(send) | ||
341 | SYSCALL_SPU(sendto) | ||
342 | COMPAT_SYS_SPU(recv) | ||
343 | COMPAT_SYS_SPU(recvfrom) | ||
344 | SYSCALL_SPU(shutdown) | ||
345 | COMPAT_SYS_SPU(setsockopt) | ||
346 | COMPAT_SYS_SPU(getsockopt) | ||
347 | COMPAT_SYS_SPU(sendmsg) | ||
348 | COMPAT_SYS_SPU(recvmsg) | ||
349 | COMPAT_SYS_SPU(recvmmsg) | ||
350 | SYSCALL_SPU(accept4) | ||
351 | SYSCALL_SPU(name_to_handle_at) | ||
352 | COMPAT_SYS_SPU(open_by_handle_at) | ||
353 | COMPAT_SYS_SPU(clock_adjtime) | ||
354 | SYSCALL_SPU(syncfs) | ||
355 | COMPAT_SYS_SPU(sendmmsg) | ||
356 | SYSCALL_SPU(setns) | ||
diff --git a/arch/powerpc/include/asm/system.h b/arch/powerpc/include/asm/system.h index 6c294acac848..2dc595dda03b 100644 --- a/arch/powerpc/include/asm/system.h +++ b/arch/powerpc/include/asm/system.h | |||
@@ -154,8 +154,8 @@ extern void enable_kernel_spe(void); | |||
154 | extern void giveup_spe(struct task_struct *); | 154 | extern void giveup_spe(struct task_struct *); |
155 | extern void load_up_spe(struct task_struct *); | 155 | extern void load_up_spe(struct task_struct *); |
156 | extern int fix_alignment(struct pt_regs *); | 156 | extern int fix_alignment(struct pt_regs *); |
157 | extern void cvt_fd(float *from, double *to, struct thread_struct *thread); | 157 | extern void cvt_fd(float *from, double *to); |
158 | extern void cvt_df(double *from, float *to, struct thread_struct *thread); | 158 | extern void cvt_df(double *from, float *to); |
159 | 159 | ||
160 | #ifndef CONFIG_SMP | 160 | #ifndef CONFIG_SMP |
161 | extern void discard_lazy_cpu_state(void); | 161 | extern void discard_lazy_cpu_state(void); |
@@ -219,8 +219,6 @@ extern int mem_init_done; /* set on boot once kmalloc can be called */ | |||
219 | extern int init_bootmem_done; /* set once bootmem is available */ | 219 | extern int init_bootmem_done; /* set once bootmem is available */ |
220 | extern phys_addr_t memory_limit; | 220 | extern phys_addr_t memory_limit; |
221 | extern unsigned long klimit; | 221 | extern unsigned long klimit; |
222 | |||
223 | extern void *alloc_maybe_bootmem(size_t size, gfp_t mask); | ||
224 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); | 222 | extern void *zalloc_maybe_bootmem(size_t size, gfp_t mask); |
225 | 223 | ||
226 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ | 224 | extern int powersave_nap; /* set if nap mode can be used in idle loop */ |
@@ -542,10 +540,6 @@ extern void reloc_got2(unsigned long); | |||
542 | 540 | ||
543 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) | 541 | #define PTRRELOC(x) ((typeof(x)) add_reloc_offset((unsigned long)(x))) |
544 | 542 | ||
545 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
546 | extern void account_system_vtime(struct task_struct *); | ||
547 | #endif | ||
548 | |||
549 | extern struct dentry *powerpc_debugfs_root; | 543 | extern struct dentry *powerpc_debugfs_root; |
550 | 544 | ||
551 | #endif /* __KERNEL__ */ | 545 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 65eb85976a03..836f231ec1f0 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -72,7 +72,7 @@ struct thread_info { | |||
72 | 72 | ||
73 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR | 73 | #define __HAVE_ARCH_THREAD_INFO_ALLOCATOR |
74 | 74 | ||
75 | extern struct thread_info *alloc_thread_info(struct task_struct *tsk); | 75 | extern struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node); |
76 | extern void free_thread_info(struct thread_info *ti); | 76 | extern void free_thread_info(struct thread_info *ti); |
77 | 77 | ||
78 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ | 78 | #endif /* THREAD_SHIFT < PAGE_SHIFT */ |
@@ -110,7 +110,8 @@ static inline struct thread_info *current_thread_info(void) | |||
110 | #define TIF_NOERROR 12 /* Force successful syscall return */ | 110 | #define TIF_NOERROR 12 /* Force successful syscall return */ |
111 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ | 111 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ |
112 | #define TIF_FREEZE 14 /* Freezing for suspend */ | 112 | #define TIF_FREEZE 14 /* Freezing for suspend */ |
113 | #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ | 113 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ |
114 | #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ | ||
114 | 115 | ||
115 | /* as above, but as bit values */ | 116 | /* as above, but as bit values */ |
116 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 117 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -127,8 +128,10 @@ static inline struct thread_info *current_thread_info(void) | |||
127 | #define _TIF_NOERROR (1<<TIF_NOERROR) | 128 | #define _TIF_NOERROR (1<<TIF_NOERROR) |
128 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 129 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
129 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 130 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
131 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | ||
130 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) | 132 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) |
131 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) | 133 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
134 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) | ||
132 | 135 | ||
133 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | 136 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
134 | _TIF_NOTIFY_RESUME) | 137 | _TIF_NOTIFY_RESUME) |
@@ -139,10 +142,12 @@ static inline struct thread_info *current_thread_info(void) | |||
139 | #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ | 142 | #define TLF_NAPPING 0 /* idle thread enabled NAP mode */ |
140 | #define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ | 143 | #define TLF_SLEEPING 1 /* suspend code enabled SLEEP mode */ |
141 | #define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ | 144 | #define TLF_RESTORE_SIGMASK 2 /* Restore signal mask in do_signal */ |
145 | #define TLF_LAZY_MMU 3 /* tlb_batch is active */ | ||
142 | 146 | ||
143 | #define _TLF_NAPPING (1 << TLF_NAPPING) | 147 | #define _TLF_NAPPING (1 << TLF_NAPPING) |
144 | #define _TLF_SLEEPING (1 << TLF_SLEEPING) | 148 | #define _TLF_SLEEPING (1 << TLF_SLEEPING) |
145 | #define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) | 149 | #define _TLF_RESTORE_SIGMASK (1 << TLF_RESTORE_SIGMASK) |
150 | #define _TLF_LAZY_MMU (1 << TLF_LAZY_MMU) | ||
146 | 151 | ||
147 | #ifndef __ASSEMBLY__ | 152 | #ifndef __ASSEMBLY__ |
148 | #define HAVE_SET_RESTORE_SIGMASK 1 | 153 | #define HAVE_SET_RESTORE_SIGMASK 1 |
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h index dc779dfcf258..fe6f7c2c9c68 100644 --- a/arch/powerpc/include/asm/time.h +++ b/arch/powerpc/include/asm/time.h | |||
@@ -34,7 +34,6 @@ extern void to_tm(int tim, struct rtc_time * tm); | |||
34 | extern void GregorianDay(struct rtc_time *tm); | 34 | extern void GregorianDay(struct rtc_time *tm); |
35 | 35 | ||
36 | extern void generic_calibrate_decr(void); | 36 | extern void generic_calibrate_decr(void); |
37 | extern void snapshot_timebase(void); | ||
38 | 37 | ||
39 | extern void set_dec_cpu6(unsigned int val); | 38 | extern void set_dec_cpu6(unsigned int val); |
40 | 39 | ||
@@ -212,12 +211,8 @@ struct cpu_usage { | |||
212 | DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); | 211 | DECLARE_PER_CPU(struct cpu_usage, cpu_usage_array); |
213 | 212 | ||
214 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) | 213 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) |
215 | extern void calculate_steal_time(void); | ||
216 | extern void snapshot_timebases(void); | ||
217 | #define account_process_vtime(tsk) account_process_tick(tsk, 0) | 214 | #define account_process_vtime(tsk) account_process_tick(tsk, 0) |
218 | #else | 215 | #else |
219 | #define calculate_steal_time() do { } while (0) | ||
220 | #define snapshot_timebases() do { } while (0) | ||
221 | #define account_process_vtime(tsk) do { } while (0) | 216 | #define account_process_vtime(tsk) do { } while (0) |
222 | #endif | 217 | #endif |
223 | 218 | ||
diff --git a/arch/powerpc/include/asm/tlbflush.h b/arch/powerpc/include/asm/tlbflush.h index d50a380b2b6f..81143fcbd113 100644 --- a/arch/powerpc/include/asm/tlbflush.h +++ b/arch/powerpc/include/asm/tlbflush.h | |||
@@ -79,6 +79,8 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm) | |||
79 | 79 | ||
80 | #elif defined(CONFIG_PPC_STD_MMU_64) | 80 | #elif defined(CONFIG_PPC_STD_MMU_64) |
81 | 81 | ||
82 | #define MMU_NO_CONTEXT 0 | ||
83 | |||
82 | /* | 84 | /* |
83 | * TLB flushing for 64-bit hash-MMU CPUs | 85 | * TLB flushing for 64-bit hash-MMU CPUs |
84 | */ | 86 | */ |
diff --git a/arch/powerpc/include/asm/topology.h b/arch/powerpc/include/asm/topology.h index afe4aaa65c3b..7ef0d90defc8 100644 --- a/arch/powerpc/include/asm/topology.h +++ b/arch/powerpc/include/asm/topology.h | |||
@@ -106,9 +106,22 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev, | |||
106 | int nid) | 106 | int nid) |
107 | { | 107 | { |
108 | } | 108 | } |
109 | |||
110 | #endif /* CONFIG_NUMA */ | 109 | #endif /* CONFIG_NUMA */ |
111 | 110 | ||
111 | #if defined(CONFIG_NUMA) && defined(CONFIG_PPC_SPLPAR) | ||
112 | extern int start_topology_update(void); | ||
113 | extern int stop_topology_update(void); | ||
114 | #else | ||
115 | static inline int start_topology_update(void) | ||
116 | { | ||
117 | return 0; | ||
118 | } | ||
119 | static inline int stop_topology_update(void) | ||
120 | { | ||
121 | return 0; | ||
122 | } | ||
123 | #endif /* CONFIG_NUMA && CONFIG_PPC_SPLPAR */ | ||
124 | |||
112 | #include <asm-generic/topology.h> | 125 | #include <asm-generic/topology.h> |
113 | 126 | ||
114 | #ifdef CONFIG_SMP | 127 | #ifdef CONFIG_SMP |
diff --git a/arch/powerpc/include/asm/types.h b/arch/powerpc/include/asm/types.h index a5aea0ca34e9..8947b9827bc4 100644 --- a/arch/powerpc/include/asm/types.h +++ b/arch/powerpc/include/asm/types.h | |||
@@ -44,13 +44,6 @@ typedef struct { | |||
44 | 44 | ||
45 | typedef __vector128 vector128; | 45 | typedef __vector128 vector128; |
46 | 46 | ||
47 | #if defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT) | ||
48 | typedef u64 dma_addr_t; | ||
49 | #else | ||
50 | typedef u32 dma_addr_t; | ||
51 | #endif | ||
52 | typedef u64 dma64_addr_t; | ||
53 | |||
54 | typedef struct { | 47 | typedef struct { |
55 | unsigned long entry; | 48 | unsigned long entry; |
56 | unsigned long toc; | 49 | unsigned long toc; |
diff --git a/arch/powerpc/include/asm/udbg.h b/arch/powerpc/include/asm/udbg.h index 11ae699135ba..58580e94a2bb 100644 --- a/arch/powerpc/include/asm/udbg.h +++ b/arch/powerpc/include/asm/udbg.h | |||
@@ -52,6 +52,7 @@ extern void __init udbg_init_44x_as1(void); | |||
52 | extern void __init udbg_init_40x_realmode(void); | 52 | extern void __init udbg_init_40x_realmode(void); |
53 | extern void __init udbg_init_cpm(void); | 53 | extern void __init udbg_init_cpm(void); |
54 | extern void __init udbg_init_usbgecko(void); | 54 | extern void __init udbg_init_usbgecko(void); |
55 | extern void __init udbg_init_wsp(void); | ||
55 | 56 | ||
56 | #endif /* __KERNEL__ */ | 57 | #endif /* __KERNEL__ */ |
57 | #endif /* _ASM_POWERPC_UDBG_H */ | 58 | #endif /* _ASM_POWERPC_UDBG_H */ |
diff --git a/arch/powerpc/include/asm/uninorth.h b/arch/powerpc/include/asm/uninorth.h index f737732c3861..d12b11d7641e 100644 --- a/arch/powerpc/include/asm/uninorth.h +++ b/arch/powerpc/include/asm/uninorth.h | |||
@@ -60,7 +60,7 @@ | |||
60 | * | 60 | * |
61 | * Obviously, the GART is not cache coherent and so any change to it | 61 | * Obviously, the GART is not cache coherent and so any change to it |
62 | * must be flushed to memory (or maybe just make the GART space non | 62 | * must be flushed to memory (or maybe just make the GART space non |
63 | * cachable). AGP memory itself doens't seem to be cache coherent neither. | 63 | * cachable). AGP memory itself doesn't seem to be cache coherent neither. |
64 | * | 64 | * |
65 | * In order to invalidate the GART (which is probably necessary to inval | 65 | * In order to invalidate the GART (which is probably necessary to inval |
66 | * the bridge internal TLBs), the following sequence has to be written, | 66 | * the bridge internal TLBs), the following sequence has to be written, |
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h index 597e6f9d094a..b8b3f599362b 100644 --- a/arch/powerpc/include/asm/unistd.h +++ b/arch/powerpc/include/asm/unistd.h | |||
@@ -348,10 +348,35 @@ | |||
348 | #define __NR_fanotify_init 323 | 348 | #define __NR_fanotify_init 323 |
349 | #define __NR_fanotify_mark 324 | 349 | #define __NR_fanotify_mark 324 |
350 | #define __NR_prlimit64 325 | 350 | #define __NR_prlimit64 325 |
351 | #define __NR_socket 326 | ||
352 | #define __NR_bind 327 | ||
353 | #define __NR_connect 328 | ||
354 | #define __NR_listen 329 | ||
355 | #define __NR_accept 330 | ||
356 | #define __NR_getsockname 331 | ||
357 | #define __NR_getpeername 332 | ||
358 | #define __NR_socketpair 333 | ||
359 | #define __NR_send 334 | ||
360 | #define __NR_sendto 335 | ||
361 | #define __NR_recv 336 | ||
362 | #define __NR_recvfrom 337 | ||
363 | #define __NR_shutdown 338 | ||
364 | #define __NR_setsockopt 339 | ||
365 | #define __NR_getsockopt 340 | ||
366 | #define __NR_sendmsg 341 | ||
367 | #define __NR_recvmsg 342 | ||
368 | #define __NR_recvmmsg 343 | ||
369 | #define __NR_accept4 344 | ||
370 | #define __NR_name_to_handle_at 345 | ||
371 | #define __NR_open_by_handle_at 346 | ||
372 | #define __NR_clock_adjtime 347 | ||
373 | #define __NR_syncfs 348 | ||
374 | #define __NR_sendmmsg 349 | ||
375 | #define __NR_setns 350 | ||
351 | 376 | ||
352 | #ifdef __KERNEL__ | 377 | #ifdef __KERNEL__ |
353 | 378 | ||
354 | #define __NR_syscalls 326 | 379 | #define __NR_syscalls 351 |
355 | 380 | ||
356 | #define __NR__exit __NR_exit | 381 | #define __NR__exit __NR_exit |
357 | #define NR_syscalls __NR_syscalls | 382 | #define NR_syscalls __NR_syscalls |
diff --git a/arch/powerpc/include/asm/vdso_datapage.h b/arch/powerpc/include/asm/vdso_datapage.h index 08679c5319b8..b73a8199f161 100644 --- a/arch/powerpc/include/asm/vdso_datapage.h +++ b/arch/powerpc/include/asm/vdso_datapage.h | |||
@@ -57,7 +57,7 @@ struct vdso_data { | |||
57 | } version; | 57 | } version; |
58 | 58 | ||
59 | /* Note about the platform flags: it now only contains the lpar | 59 | /* Note about the platform flags: it now only contains the lpar |
60 | * bit. The actual platform number is dead and burried | 60 | * bit. The actual platform number is dead and buried |
61 | */ | 61 | */ |
62 | __u32 platform; /* Platform flags 0x18 */ | 62 | __u32 platform; /* Platform flags 0x18 */ |
63 | __u32 processor; /* Processor type 0x1C */ | 63 | __u32 processor; /* Processor type 0x1C */ |
@@ -116,9 +116,7 @@ struct vdso_data { | |||
116 | 116 | ||
117 | #endif /* CONFIG_PPC64 */ | 117 | #endif /* CONFIG_PPC64 */ |
118 | 118 | ||
119 | #ifdef __KERNEL__ | ||
120 | extern struct vdso_data *vdso_data; | 119 | extern struct vdso_data *vdso_data; |
121 | #endif | ||
122 | 120 | ||
123 | #endif /* __ASSEMBLY__ */ | 121 | #endif /* __ASSEMBLY__ */ |
124 | 122 | ||
diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h new file mode 100644 index 000000000000..c7dc83088a33 --- /dev/null +++ b/arch/powerpc/include/asm/wsp.h | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * Copyright 2011 Michael Ellerman, IBM Corp. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | #ifndef __ASM_POWERPC_WSP_H | ||
10 | #define __ASM_POWERPC_WSP_H | ||
11 | |||
12 | extern int wsp_get_chip_id(struct device_node *dn); | ||
13 | |||
14 | #endif /* __ASM_POWERPC_WSP_H */ | ||
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h new file mode 100644 index 000000000000..b183a4062011 --- /dev/null +++ b/arch/powerpc/include/asm/xics.h | |||
@@ -0,0 +1,142 @@ | |||
1 | /* | ||
2 | * Common definitions accross all variants of ICP and ICS interrupt | ||
3 | * controllers. | ||
4 | */ | ||
5 | |||
6 | #ifndef _XICS_H | ||
7 | #define _XICS_H | ||
8 | |||
9 | #include <linux/interrupt.h> | ||
10 | |||
11 | #define XICS_IPI 2 | ||
12 | #define XICS_IRQ_SPURIOUS 0 | ||
13 | |||
14 | /* Want a priority other than 0. Various HW issues require this. */ | ||
15 | #define DEFAULT_PRIORITY 5 | ||
16 | |||
17 | /* | ||
18 | * Mark IPIs as higher priority so we can take them inside interrupts that | ||
19 | * arent marked IRQF_DISABLED | ||
20 | */ | ||
21 | #define IPI_PRIORITY 4 | ||
22 | |||
23 | /* The least favored priority */ | ||
24 | #define LOWEST_PRIORITY 0xFF | ||
25 | |||
26 | /* The number of priorities defined above */ | ||
27 | #define MAX_NUM_PRIORITIES 3 | ||
28 | |||
29 | /* Native ICP */ | ||
30 | extern int icp_native_init(void); | ||
31 | |||
32 | /* PAPR ICP */ | ||
33 | extern int icp_hv_init(void); | ||
34 | |||
35 | /* ICP ops */ | ||
36 | struct icp_ops { | ||
37 | unsigned int (*get_irq)(void); | ||
38 | void (*eoi)(struct irq_data *d); | ||
39 | void (*set_priority)(unsigned char prio); | ||
40 | void (*teardown_cpu)(void); | ||
41 | void (*flush_ipi)(void); | ||
42 | #ifdef CONFIG_SMP | ||
43 | void (*cause_ipi)(int cpu, unsigned long data); | ||
44 | irq_handler_t ipi_action; | ||
45 | #endif | ||
46 | }; | ||
47 | |||
48 | extern const struct icp_ops *icp_ops; | ||
49 | |||
50 | /* Native ICS */ | ||
51 | extern int ics_native_init(void); | ||
52 | |||
53 | /* RTAS ICS */ | ||
54 | extern int ics_rtas_init(void); | ||
55 | |||
56 | /* ICS instance, hooked up to chip_data of an irq */ | ||
57 | struct ics { | ||
58 | struct list_head link; | ||
59 | int (*map)(struct ics *ics, unsigned int virq); | ||
60 | void (*mask_unknown)(struct ics *ics, unsigned long vec); | ||
61 | long (*get_server)(struct ics *ics, unsigned long vec); | ||
62 | int (*host_match)(struct ics *ics, struct device_node *node); | ||
63 | char data[]; | ||
64 | }; | ||
65 | |||
66 | /* Commons */ | ||
67 | extern unsigned int xics_default_server; | ||
68 | extern unsigned int xics_default_distrib_server; | ||
69 | extern unsigned int xics_interrupt_server_size; | ||
70 | extern struct irq_host *xics_host; | ||
71 | |||
72 | struct xics_cppr { | ||
73 | unsigned char stack[MAX_NUM_PRIORITIES]; | ||
74 | int index; | ||
75 | }; | ||
76 | |||
77 | DECLARE_PER_CPU(struct xics_cppr, xics_cppr); | ||
78 | |||
79 | static inline void xics_push_cppr(unsigned int vec) | ||
80 | { | ||
81 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
82 | |||
83 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | ||
84 | return; | ||
85 | |||
86 | if (vec == XICS_IPI) | ||
87 | os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; | ||
88 | else | ||
89 | os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; | ||
90 | } | ||
91 | |||
92 | static inline unsigned char xics_pop_cppr(void) | ||
93 | { | ||
94 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
95 | |||
96 | if (WARN_ON(os_cppr->index < 1)) | ||
97 | return LOWEST_PRIORITY; | ||
98 | |||
99 | return os_cppr->stack[--os_cppr->index]; | ||
100 | } | ||
101 | |||
102 | static inline void xics_set_base_cppr(unsigned char cppr) | ||
103 | { | ||
104 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
105 | |||
106 | /* we only really want to set the priority when there's | ||
107 | * just one cppr value on the stack | ||
108 | */ | ||
109 | WARN_ON(os_cppr->index != 0); | ||
110 | |||
111 | os_cppr->stack[0] = cppr; | ||
112 | } | ||
113 | |||
114 | static inline unsigned char xics_cppr_top(void) | ||
115 | { | ||
116 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
117 | |||
118 | return os_cppr->stack[os_cppr->index]; | ||
119 | } | ||
120 | |||
121 | DECLARE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
122 | |||
123 | extern void xics_init(void); | ||
124 | extern void xics_setup_cpu(void); | ||
125 | extern void xics_update_irq_servers(void); | ||
126 | extern void xics_set_cpu_giq(unsigned int gserver, unsigned int join); | ||
127 | extern void xics_mask_unknown_vec(unsigned int vec); | ||
128 | extern irqreturn_t xics_ipi_dispatch(int cpu); | ||
129 | extern int xics_smp_probe(void); | ||
130 | extern void xics_register_ics(struct ics *ics); | ||
131 | extern void xics_teardown_cpu(void); | ||
132 | extern void xics_kexec_teardown_cpu(int secondary); | ||
133 | extern void xics_migrate_irqs_away(void); | ||
134 | #ifdef CONFIG_SMP | ||
135 | extern int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
136 | unsigned int strict_check); | ||
137 | #else | ||
138 | #define xics_get_irq_server(virq, cpumask, strict_check) (xics_default_server) | ||
139 | #endif | ||
140 | |||
141 | |||
142 | #endif /* _XICS_H */ | ||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 1dda70129141..e8b981897d44 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -29,18 +29,23 @@ endif | |||
29 | obj-y := cputable.o ptrace.o syscalls.o \ | 29 | obj-y := cputable.o ptrace.o syscalls.o \ |
30 | irq.o align.o signal_32.o pmc.o vdso.o \ | 30 | irq.o align.o signal_32.o pmc.o vdso.o \ |
31 | init_task.o process.o systbl.o idle.o \ | 31 | init_task.o process.o systbl.o idle.o \ |
32 | signal.o sysfs.o cacheinfo.o | 32 | signal.o sysfs.o cacheinfo.o time.o \ |
33 | obj-y += vdso32/ | 33 | prom.o traps.o setup-common.o \ |
34 | udbg.o misc.o io.o dma.o \ | ||
35 | misc_$(CONFIG_WORD_SIZE).o vdso32/ | ||
34 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ | 36 | obj-$(CONFIG_PPC64) += setup_64.o sys_ppc32.o \ |
35 | signal_64.o ptrace32.o \ | 37 | signal_64.o ptrace32.o \ |
36 | paca.o nvram_64.o firmware.o | 38 | paca.o nvram_64.o firmware.o |
37 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o | 39 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o |
38 | obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o | 40 | obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_ppc970.o cpu_setup_pa6t.o |
41 | obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power7.o | ||
39 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o | 42 | obj64-$(CONFIG_RELOCATABLE) += reloc_64.o |
40 | obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o | 43 | obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o |
44 | obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o | ||
41 | obj-$(CONFIG_PPC64) += vdso64/ | 45 | obj-$(CONFIG_PPC64) += vdso64/ |
42 | obj-$(CONFIG_ALTIVEC) += vecemu.o | 46 | obj-$(CONFIG_ALTIVEC) += vecemu.o |
43 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o | 47 | obj-$(CONFIG_PPC_970_NAP) += idle_power4.o |
48 | obj-$(CONFIG_PPC_P7_NAP) += idle_power7.o | ||
44 | obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o | 49 | obj-$(CONFIG_PPC_OF) += of_platform.o prom_parse.o |
45 | obj-$(CONFIG_PPC_CLOCK) += clock.o | 50 | obj-$(CONFIG_PPC_CLOCK) += clock.o |
46 | procfs-y := proc_powerpc.o | 51 | procfs-y := proc_powerpc.o |
@@ -55,7 +60,9 @@ obj-$(CONFIG_IBMVIO) += vio.o | |||
55 | obj-$(CONFIG_IBMEBUS) += ibmebus.o | 60 | obj-$(CONFIG_IBMEBUS) += ibmebus.o |
56 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o | 61 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o |
57 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | 62 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o |
63 | ifeq ($(CONFIG_PPC32),y) | ||
58 | obj-$(CONFIG_E500) += idle_e500.o | 64 | obj-$(CONFIG_E500) += idle_e500.o |
65 | endif | ||
59 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o | 66 | obj-$(CONFIG_6xx) += idle_6xx.o l2cr_6xx.o cpu_setup_6xx.o |
60 | obj-$(CONFIG_TAU) += tau_6xx.o | 67 | obj-$(CONFIG_TAU) += tau_6xx.o |
61 | obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o | 68 | obj-$(CONFIG_HIBERNATION) += swsusp.o suspend.o |
@@ -67,20 +74,16 @@ endif | |||
67 | obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o | 74 | obj64-$(CONFIG_HIBERNATION) += swsusp_asm64.o |
68 | obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o | 75 | obj-$(CONFIG_MODULES) += module.o module_$(CONFIG_WORD_SIZE).o |
69 | obj-$(CONFIG_44x) += cpu_setup_44x.o | 76 | obj-$(CONFIG_44x) += cpu_setup_44x.o |
70 | obj-$(CONFIG_FSL_BOOKE) += cpu_setup_fsl_booke.o dbell.o | 77 | obj-$(CONFIG_PPC_FSL_BOOK3E) += cpu_setup_fsl_booke.o dbell.o |
71 | obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o | 78 | obj-$(CONFIG_PPC_BOOK3E_64) += dbell.o |
72 | 79 | ||
73 | extra-y := head_$(CONFIG_WORD_SIZE).o | 80 | extra-y := head_$(CONFIG_WORD_SIZE).o |
74 | extra-$(CONFIG_PPC_BOOK3E_32) := head_new_booke.o | ||
75 | extra-$(CONFIG_40x) := head_40x.o | 81 | extra-$(CONFIG_40x) := head_40x.o |
76 | extra-$(CONFIG_44x) := head_44x.o | 82 | extra-$(CONFIG_44x) := head_44x.o |
77 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o | 83 | extra-$(CONFIG_FSL_BOOKE) := head_fsl_booke.o |
78 | extra-$(CONFIG_8xx) := head_8xx.o | 84 | extra-$(CONFIG_8xx) := head_8xx.o |
79 | extra-y += vmlinux.lds | 85 | extra-y += vmlinux.lds |
80 | 86 | ||
81 | obj-y += time.o prom.o traps.o setup-common.o \ | ||
82 | udbg.o misc.o io.o dma.o \ | ||
83 | misc_$(CONFIG_WORD_SIZE).o | ||
84 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o | 87 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o |
85 | obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o | 88 | obj-$(CONFIG_PPC64) += dma-iommu.o iommu.o |
86 | obj-$(CONFIG_KGDB) += kgdb.o | 89 | obj-$(CONFIG_KGDB) += kgdb.o |
@@ -102,8 +105,11 @@ obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o \ | |||
102 | obj-$(CONFIG_AUDIT) += audit.o | 105 | obj-$(CONFIG_AUDIT) += audit.o |
103 | obj64-$(CONFIG_AUDIT) += compat_audit.o | 106 | obj64-$(CONFIG_AUDIT) += compat_audit.o |
104 | 107 | ||
108 | obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o | ||
109 | |||
105 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 110 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
106 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 111 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
112 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | ||
107 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o | 113 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o |
108 | 114 | ||
109 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o | 115 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o |
@@ -127,6 +133,8 @@ ifneq ($(CONFIG_XMON)$(CONFIG_KEXEC),) | |||
127 | obj-y += ppc_save_regs.o | 133 | obj-y += ppc_save_regs.o |
128 | endif | 134 | endif |
129 | 135 | ||
136 | obj-$(CONFIG_KVM_GUEST) += kvm.o kvm_emul.o | ||
137 | |||
130 | # Disable GCOV in odd or sensitive code | 138 | # Disable GCOV in odd or sensitive code |
131 | GCOV_PROFILE_prom_init.o := n | 139 | GCOV_PROFILE_prom_init.o := n |
132 | GCOV_PROFILE_ftrace.o := n | 140 | GCOV_PROFILE_ftrace.o := n |
diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index b876e989220b..8184ee97e484 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c | |||
@@ -889,7 +889,7 @@ int fix_alignment(struct pt_regs *regs) | |||
889 | #ifdef CONFIG_PPC_FPU | 889 | #ifdef CONFIG_PPC_FPU |
890 | preempt_disable(); | 890 | preempt_disable(); |
891 | enable_kernel_fp(); | 891 | enable_kernel_fp(); |
892 | cvt_df(&data.dd, (float *)&data.v[4], ¤t->thread); | 892 | cvt_df(&data.dd, (float *)&data.v[4]); |
893 | preempt_enable(); | 893 | preempt_enable(); |
894 | #else | 894 | #else |
895 | return 0; | 895 | return 0; |
@@ -933,7 +933,7 @@ int fix_alignment(struct pt_regs *regs) | |||
933 | #ifdef CONFIG_PPC_FPU | 933 | #ifdef CONFIG_PPC_FPU |
934 | preempt_disable(); | 934 | preempt_disable(); |
935 | enable_kernel_fp(); | 935 | enable_kernel_fp(); |
936 | cvt_fd((float *)&data.v[4], &data.dd, ¤t->thread); | 936 | cvt_fd((float *)&data.v[4], &data.dd); |
937 | preempt_enable(); | 937 | preempt_enable(); |
938 | #else | 938 | #else |
939 | return 0; | 939 | return 0; |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 1c0607ddccc0..36e1c8a29be8 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -48,11 +48,11 @@ | |||
48 | #ifdef CONFIG_PPC_ISERIES | 48 | #ifdef CONFIG_PPC_ISERIES |
49 | #include <asm/iseries/alpaca.h> | 49 | #include <asm/iseries/alpaca.h> |
50 | #endif | 50 | #endif |
51 | #ifdef CONFIG_KVM | 51 | #if defined(CONFIG_KVM) || defined(CONFIG_KVM_GUEST) |
52 | #include <linux/kvm_host.h> | 52 | #include <linux/kvm_host.h> |
53 | #ifndef CONFIG_BOOKE | ||
54 | #include <asm/kvm_book3s.h> | ||
55 | #endif | 53 | #endif |
54 | #if defined(CONFIG_KVM) && defined(CONFIG_PPC_BOOK3S) | ||
55 | #include <asm/kvm_book3s.h> | ||
56 | #endif | 56 | #endif |
57 | 57 | ||
58 | #ifdef CONFIG_PPC32 | 58 | #ifdef CONFIG_PPC32 |
@@ -61,7 +61,7 @@ | |||
61 | #endif | 61 | #endif |
62 | #endif | 62 | #endif |
63 | 63 | ||
64 | #if defined(CONFIG_FSL_BOOKE) | 64 | #if defined(CONFIG_PPC_FSL_BOOK3E) |
65 | #include "../mm/mmu_decl.h" | 65 | #include "../mm/mmu_decl.h" |
66 | #endif | 66 | #endif |
67 | 67 | ||
@@ -74,6 +74,7 @@ int main(void) | |||
74 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); | 74 | DEFINE(AUDITCONTEXT, offsetof(struct task_struct, audit_context)); |
75 | DEFINE(SIGSEGV, SIGSEGV); | 75 | DEFINE(SIGSEGV, SIGSEGV); |
76 | DEFINE(NMI_MASK, NMI_MASK); | 76 | DEFINE(NMI_MASK, NMI_MASK); |
77 | DEFINE(THREAD_DSCR, offsetof(struct thread_struct, dscr)); | ||
77 | #else | 78 | #else |
78 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); | 79 | DEFINE(THREAD_INFO, offsetof(struct task_struct, stack)); |
79 | #endif /* CONFIG_PPC64 */ | 80 | #endif /* CONFIG_PPC64 */ |
@@ -181,17 +182,19 @@ int main(void) | |||
181 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); | 182 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid)); |
182 | DEFINE(SLBSHADOW_STACKESID, | 183 | DEFINE(SLBSHADOW_STACKESID, |
183 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); | 184 | offsetof(struct slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid)); |
185 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | ||
184 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); | 186 | DEFINE(LPPACASRR0, offsetof(struct lppaca, saved_srr0)); |
185 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); | 187 | DEFINE(LPPACASRR1, offsetof(struct lppaca, saved_srr1)); |
186 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); | 188 | DEFINE(LPPACAANYINT, offsetof(struct lppaca, int_dword.any_int)); |
187 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); | 189 | DEFINE(LPPACADECRINT, offsetof(struct lppaca, int_dword.fields.decr_int)); |
188 | DEFINE(SLBSHADOW_SAVEAREA, offsetof(struct slb_shadow, save_area)); | 190 | DEFINE(LPPACA_DTLIDX, offsetof(struct lppaca, dtl_idx)); |
191 | DEFINE(PACA_DTL_RIDX, offsetof(struct paca_struct, dtl_ridx)); | ||
189 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 192 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
190 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | 193 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
191 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | 194 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
192 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); | 195 | DEFINE(PACAKEXECSTATE, offsetof(struct paca_struct, kexec_state)); |
193 | DEFINE(PACA_STARTPURR, offsetof(struct paca_struct, startpurr)); | 196 | DEFINE(PACA_STARTTIME, offsetof(struct paca_struct, starttime)); |
194 | DEFINE(PACA_STARTSPURR, offsetof(struct paca_struct, startspurr)); | 197 | DEFINE(PACA_STARTTIME_USER, offsetof(struct paca_struct, starttime_user)); |
195 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); | 198 | DEFINE(PACA_USER_TIME, offsetof(struct paca_struct, user_time)); |
196 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); | 199 | DEFINE(PACA_SYSTEM_TIME, offsetof(struct paca_struct, system_time)); |
197 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); | 200 | DEFINE(PACA_TRAP_SAVE, offsetof(struct paca_struct, trap_save)); |
@@ -207,7 +210,6 @@ int main(void) | |||
207 | DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); | 210 | DEFINE(RTASENTRY, offsetof(struct rtas_t, entry)); |
208 | 211 | ||
209 | /* Interrupt register frame */ | 212 | /* Interrupt register frame */ |
210 | DEFINE(STACK_FRAME_OVERHEAD, STACK_FRAME_OVERHEAD); | ||
211 | DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); | 213 | DEFINE(INT_FRAME_SIZE, STACK_INT_FRAME_SIZE); |
212 | DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); | 214 | DEFINE(SWITCH_FRAME_SIZE, STACK_FRAME_OVERHEAD + sizeof(struct pt_regs)); |
213 | #ifdef CONFIG_PPC64 | 215 | #ifdef CONFIG_PPC64 |
@@ -394,12 +396,14 @@ int main(void) | |||
394 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); | 396 | DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); |
395 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); | 397 | DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); |
396 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); | 398 | DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); |
397 | DEFINE(VCPU_MSR, offsetof(struct kvm_vcpu, arch.msr)); | 399 | DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); |
398 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); | 400 | DEFINE(VCPU_SPRG4, offsetof(struct kvm_vcpu, arch.sprg4)); |
399 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); | 401 | DEFINE(VCPU_SPRG5, offsetof(struct kvm_vcpu, arch.sprg5)); |
400 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); | 402 | DEFINE(VCPU_SPRG6, offsetof(struct kvm_vcpu, arch.sprg6)); |
401 | DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); | 403 | DEFINE(VCPU_SPRG7, offsetof(struct kvm_vcpu, arch.sprg7)); |
402 | DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); | 404 | DEFINE(VCPU_SHADOW_PID, offsetof(struct kvm_vcpu, arch.shadow_pid)); |
405 | DEFINE(VCPU_SHARED, offsetof(struct kvm_vcpu, arch.shared)); | ||
406 | DEFINE(VCPU_SHARED_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); | ||
403 | 407 | ||
404 | /* book3s */ | 408 | /* book3s */ |
405 | #ifdef CONFIG_PPC_BOOK3S | 409 | #ifdef CONFIG_PPC_BOOK3S |
@@ -464,11 +468,27 @@ int main(void) | |||
464 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); | 468 | DEFINE(VCPU_FAULT_ESR, offsetof(struct kvm_vcpu, arch.fault_esr)); |
465 | #endif /* CONFIG_PPC_BOOK3S */ | 469 | #endif /* CONFIG_PPC_BOOK3S */ |
466 | #endif | 470 | #endif |
471 | |||
472 | #ifdef CONFIG_KVM_GUEST | ||
473 | DEFINE(KVM_MAGIC_SCRATCH1, offsetof(struct kvm_vcpu_arch_shared, | ||
474 | scratch1)); | ||
475 | DEFINE(KVM_MAGIC_SCRATCH2, offsetof(struct kvm_vcpu_arch_shared, | ||
476 | scratch2)); | ||
477 | DEFINE(KVM_MAGIC_SCRATCH3, offsetof(struct kvm_vcpu_arch_shared, | ||
478 | scratch3)); | ||
479 | DEFINE(KVM_MAGIC_INT, offsetof(struct kvm_vcpu_arch_shared, | ||
480 | int_pending)); | ||
481 | DEFINE(KVM_MAGIC_MSR, offsetof(struct kvm_vcpu_arch_shared, msr)); | ||
482 | DEFINE(KVM_MAGIC_CRITICAL, offsetof(struct kvm_vcpu_arch_shared, | ||
483 | critical)); | ||
484 | DEFINE(KVM_MAGIC_SR, offsetof(struct kvm_vcpu_arch_shared, sr)); | ||
485 | #endif | ||
486 | |||
467 | #ifdef CONFIG_44x | 487 | #ifdef CONFIG_44x |
468 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); | 488 | DEFINE(PGD_T_LOG2, PGD_T_LOG2); |
469 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); | 489 | DEFINE(PTE_T_LOG2, PTE_T_LOG2); |
470 | #endif | 490 | #endif |
471 | #ifdef CONFIG_FSL_BOOKE | 491 | #ifdef CONFIG_PPC_FSL_BOOK3E |
472 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); | 492 | DEFINE(TLBCAM_SIZE, sizeof(struct tlbcam)); |
473 | DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); | 493 | DEFINE(TLBCAM_MAS0, offsetof(struct tlbcam, MAS0)); |
474 | DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); | 494 | DEFINE(TLBCAM_MAS1, offsetof(struct tlbcam, MAS1)); |
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 625942ae5585..60b3e377b1e4 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c | |||
@@ -99,7 +99,7 @@ void __init btext_prepare_BAT(void) | |||
99 | 99 | ||
100 | /* This function can be used to enable the early boot text when doing | 100 | /* This function can be used to enable the early boot text when doing |
101 | * OF booting or within bootx init. It must be followed by a btext_unmap() | 101 | * OF booting or within bootx init. It must be followed by a btext_unmap() |
102 | * call before the logical address becomes unuseable | 102 | * call before the logical address becomes unusable |
103 | */ | 103 | */ |
104 | void __init btext_setup_display(int width, int height, int depth, int pitch, | 104 | void __init btext_setup_display(int width, int height, int depth, int pitch, |
105 | unsigned long address) | 105 | unsigned long address) |
diff --git a/arch/powerpc/kernel/cpu_setup_44x.S b/arch/powerpc/kernel/cpu_setup_44x.S index 7d606f89a839..e32b4a9a2c22 100644 --- a/arch/powerpc/kernel/cpu_setup_44x.S +++ b/arch/powerpc/kernel/cpu_setup_44x.S | |||
@@ -35,6 +35,7 @@ _GLOBAL(__setup_cpu_440grx) | |||
35 | _GLOBAL(__setup_cpu_460ex) | 35 | _GLOBAL(__setup_cpu_460ex) |
36 | _GLOBAL(__setup_cpu_460gt) | 36 | _GLOBAL(__setup_cpu_460gt) |
37 | _GLOBAL(__setup_cpu_460sx) | 37 | _GLOBAL(__setup_cpu_460sx) |
38 | _GLOBAL(__setup_cpu_apm821xx) | ||
38 | mflr r4 | 39 | mflr r4 |
39 | bl __init_fpu_44x | 40 | bl __init_fpu_44x |
40 | bl __fixup_440A_mcheck | 41 | bl __fixup_440A_mcheck |
diff --git a/arch/powerpc/kernel/cpu_setup_6xx.S b/arch/powerpc/kernel/cpu_setup_6xx.S index 55cba4a8a959..f8cd9fba4d35 100644 --- a/arch/powerpc/kernel/cpu_setup_6xx.S +++ b/arch/powerpc/kernel/cpu_setup_6xx.S | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <asm/mmu.h> | 18 | #include <asm/mmu.h> |
19 | 19 | ||
20 | _GLOBAL(__setup_cpu_603) | 20 | _GLOBAL(__setup_cpu_603) |
21 | mflr r4 | 21 | mflr r5 |
22 | BEGIN_MMU_FTR_SECTION | 22 | BEGIN_MMU_FTR_SECTION |
23 | li r10,0 | 23 | li r10,0 |
24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ | 24 | mtspr SPRN_SPRG_603_LRU,r10 /* init SW LRU tracking */ |
@@ -27,60 +27,60 @@ BEGIN_FTR_SECTION | |||
27 | bl __init_fpu_registers | 27 | bl __init_fpu_registers |
28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) | 28 | END_FTR_SECTION_IFCLR(CPU_FTR_FPU_UNAVAILABLE) |
29 | bl setup_common_caches | 29 | bl setup_common_caches |
30 | mtlr r4 | 30 | mtlr r5 |
31 | blr | 31 | blr |
32 | _GLOBAL(__setup_cpu_604) | 32 | _GLOBAL(__setup_cpu_604) |
33 | mflr r4 | 33 | mflr r5 |
34 | bl setup_common_caches | 34 | bl setup_common_caches |
35 | bl setup_604_hid0 | 35 | bl setup_604_hid0 |
36 | mtlr r4 | 36 | mtlr r5 |
37 | blr | 37 | blr |
38 | _GLOBAL(__setup_cpu_750) | 38 | _GLOBAL(__setup_cpu_750) |
39 | mflr r4 | 39 | mflr r5 |
40 | bl __init_fpu_registers | 40 | bl __init_fpu_registers |
41 | bl setup_common_caches | 41 | bl setup_common_caches |
42 | bl setup_750_7400_hid0 | 42 | bl setup_750_7400_hid0 |
43 | mtlr r4 | 43 | mtlr r5 |
44 | blr | 44 | blr |
45 | _GLOBAL(__setup_cpu_750cx) | 45 | _GLOBAL(__setup_cpu_750cx) |
46 | mflr r4 | 46 | mflr r5 |
47 | bl __init_fpu_registers | 47 | bl __init_fpu_registers |
48 | bl setup_common_caches | 48 | bl setup_common_caches |
49 | bl setup_750_7400_hid0 | 49 | bl setup_750_7400_hid0 |
50 | bl setup_750cx | 50 | bl setup_750cx |
51 | mtlr r4 | 51 | mtlr r5 |
52 | blr | 52 | blr |
53 | _GLOBAL(__setup_cpu_750fx) | 53 | _GLOBAL(__setup_cpu_750fx) |
54 | mflr r4 | 54 | mflr r5 |
55 | bl __init_fpu_registers | 55 | bl __init_fpu_registers |
56 | bl setup_common_caches | 56 | bl setup_common_caches |
57 | bl setup_750_7400_hid0 | 57 | bl setup_750_7400_hid0 |
58 | bl setup_750fx | 58 | bl setup_750fx |
59 | mtlr r4 | 59 | mtlr r5 |
60 | blr | 60 | blr |
61 | _GLOBAL(__setup_cpu_7400) | 61 | _GLOBAL(__setup_cpu_7400) |
62 | mflr r4 | 62 | mflr r5 |
63 | bl __init_fpu_registers | 63 | bl __init_fpu_registers |
64 | bl setup_7400_workarounds | 64 | bl setup_7400_workarounds |
65 | bl setup_common_caches | 65 | bl setup_common_caches |
66 | bl setup_750_7400_hid0 | 66 | bl setup_750_7400_hid0 |
67 | mtlr r4 | 67 | mtlr r5 |
68 | blr | 68 | blr |
69 | _GLOBAL(__setup_cpu_7410) | 69 | _GLOBAL(__setup_cpu_7410) |
70 | mflr r4 | 70 | mflr r5 |
71 | bl __init_fpu_registers | 71 | bl __init_fpu_registers |
72 | bl setup_7410_workarounds | 72 | bl setup_7410_workarounds |
73 | bl setup_common_caches | 73 | bl setup_common_caches |
74 | bl setup_750_7400_hid0 | 74 | bl setup_750_7400_hid0 |
75 | li r3,0 | 75 | li r3,0 |
76 | mtspr SPRN_L2CR2,r3 | 76 | mtspr SPRN_L2CR2,r3 |
77 | mtlr r4 | 77 | mtlr r5 |
78 | blr | 78 | blr |
79 | _GLOBAL(__setup_cpu_745x) | 79 | _GLOBAL(__setup_cpu_745x) |
80 | mflr r4 | 80 | mflr r5 |
81 | bl setup_common_caches | 81 | bl setup_common_caches |
82 | bl setup_745x_specifics | 82 | bl setup_745x_specifics |
83 | mtlr r4 | 83 | mtlr r5 |
84 | blr | 84 | blr |
85 | 85 | ||
86 | /* Enable caches for 603's, 604, 750 & 7400 */ | 86 | /* Enable caches for 603's, 604, 750 & 7400 */ |
@@ -194,10 +194,10 @@ setup_750cx: | |||
194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq | 194 | cror 4*cr0+eq,4*cr0+eq,4*cr1+eq |
195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq | 195 | cror 4*cr0+eq,4*cr0+eq,4*cr2+eq |
196 | bnelr | 196 | bnelr |
197 | lwz r6,CPU_SPEC_FEATURES(r5) | 197 | lwz r6,CPU_SPEC_FEATURES(r4) |
198 | li r7,CPU_FTR_CAN_NAP | 198 | li r7,CPU_FTR_CAN_NAP |
199 | andc r6,r6,r7 | 199 | andc r6,r6,r7 |
200 | stw r6,CPU_SPEC_FEATURES(r5) | 200 | stw r6,CPU_SPEC_FEATURES(r4) |
201 | blr | 201 | blr |
202 | 202 | ||
203 | /* 750fx specific | 203 | /* 750fx specific |
@@ -225,12 +225,12 @@ BEGIN_FTR_SECTION | |||
225 | andis. r11,r11,L3CR_L3E@h | 225 | andis. r11,r11,L3CR_L3E@h |
226 | beq 1f | 226 | beq 1f |
227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) | 227 | END_FTR_SECTION_IFSET(CPU_FTR_L3CR) |
228 | lwz r6,CPU_SPEC_FEATURES(r5) | 228 | lwz r6,CPU_SPEC_FEATURES(r4) |
229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP | 229 | andi. r0,r6,CPU_FTR_L3_DISABLE_NAP |
230 | beq 1f | 230 | beq 1f |
231 | li r7,CPU_FTR_CAN_NAP | 231 | li r7,CPU_FTR_CAN_NAP |
232 | andc r6,r6,r7 | 232 | andc r6,r6,r7 |
233 | stw r6,CPU_SPEC_FEATURES(r5) | 233 | stw r6,CPU_SPEC_FEATURES(r4) |
234 | 1: | 234 | 1: |
235 | mfspr r11,SPRN_HID0 | 235 | mfspr r11,SPRN_HID0 |
236 | 236 | ||
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S new file mode 100644 index 000000000000..7f818feaa7a5 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_a2.S | |||
@@ -0,0 +1,114 @@ | |||
1 | /* | ||
2 | * A2 specific assembly support code | ||
3 | * | ||
4 | * Copyright 2009 Ben Herrenschmidt, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <asm/asm-offsets.h> | ||
13 | #include <asm/ppc_asm.h> | ||
14 | #include <asm/ppc-opcode.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <asm/reg_a2.h> | ||
17 | #include <asm/reg.h> | ||
18 | #include <asm/thread_info.h> | ||
19 | |||
20 | /* | ||
21 | * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity. | ||
22 | * This also prevents external LPID accesses but that isn't a problem when not a | ||
23 | * guest. Under PV, this setting will be ignored and MMUCR will return the right | ||
24 | * number of PID bits we can use. | ||
25 | */ | ||
26 | #define MMUCR1_EXTEND_PID \ | ||
27 | (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \ | ||
28 | MMUCR1_DTTID | MMUCR1_DCCD) | ||
29 | |||
30 | /* | ||
31 | * Use extended PIDs if enabled. | ||
32 | * Don't clear the ERATs on context sync events and enable I & D LRU. | ||
33 | * Enable ERAT back invalidate when tlbwe overwrites an entry. | ||
34 | */ | ||
35 | #define INITIAL_MMUCR1 \ | ||
36 | (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \ | ||
37 | MMUCR1_DRRE | MMUCR1_TLBWE_BINV) | ||
38 | |||
39 | _GLOBAL(__setup_cpu_a2) | ||
40 | /* Some of these are actually thread local and some are | ||
41 | * core local but doing it always won't hurt | ||
42 | */ | ||
43 | |||
44 | #ifdef CONFIG_PPC_WSP_COPRO | ||
45 | /* Make sure ACOP starts out as zero */ | ||
46 | li r3,0 | ||
47 | mtspr SPRN_ACOP,r3 | ||
48 | |||
49 | /* Enable icswx instruction */ | ||
50 | mfspr r3,SPRN_A2_CCR2 | ||
51 | ori r3,r3,A2_CCR2_ENABLE_ICSWX | ||
52 | mtspr SPRN_A2_CCR2,r3 | ||
53 | |||
54 | /* Unmask all CTs in HACOP */ | ||
55 | li r3,-1 | ||
56 | mtspr SPRN_HACOP,r3 | ||
57 | #endif /* CONFIG_PPC_WSP_COPRO */ | ||
58 | |||
59 | /* Enable doorbell */ | ||
60 | mfspr r3,SPRN_A2_CCR2 | ||
61 | oris r3,r3,A2_CCR2_ENABLE_PC@h | ||
62 | mtspr SPRN_A2_CCR2,r3 | ||
63 | isync | ||
64 | |||
65 | /* Setup CCR0 to disable power saving for now as it's busted | ||
66 | * in the current implementations. Setup CCR1 to wake on | ||
67 | * interrupts normally (we write the default value but who | ||
68 | * knows what FW may have clobbered...) | ||
69 | */ | ||
70 | li r3,0 | ||
71 | mtspr SPRN_A2_CCR0, r3 | ||
72 | LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f) | ||
73 | mtspr SPRN_A2_CCR1, r3 | ||
74 | |||
75 | /* Initialise MMUCR1 */ | ||
76 | lis r3,INITIAL_MMUCR1@h | ||
77 | ori r3,r3,INITIAL_MMUCR1@l | ||
78 | mtspr SPRN_MMUCR1,r3 | ||
79 | |||
80 | /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ | ||
81 | LOAD_REG_IMMEDIATE(r3, 0x000a7531) | ||
82 | mtspr SPRN_MMUCR2,r3 | ||
83 | |||
84 | /* Set MMUCR3 to write all thids bit to the TLB */ | ||
85 | LOAD_REG_IMMEDIATE(r3, 0x0000000f) | ||
86 | mtspr SPRN_MMUCR3,r3 | ||
87 | |||
88 | /* Don't do ERAT stuff if running guest mode */ | ||
89 | mfmsr r3 | ||
90 | andis. r0,r3,MSR_GS@h | ||
91 | bne 1f | ||
92 | |||
93 | /* Now set the I-ERAT watermark to 15 */ | ||
94 | lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h | ||
95 | mtspr SPRN_MMUCR0, r4 | ||
96 | li r4,A2_IERAT_SIZE-1 | ||
97 | PPC_ERATWE(r4,r4,3) | ||
98 | |||
99 | /* Now set the D-ERAT watermark to 31 */ | ||
100 | lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h | ||
101 | mtspr SPRN_MMUCR0, r4 | ||
102 | li r4,A2_DERAT_SIZE-1 | ||
103 | PPC_ERATWE(r4,r4,3) | ||
104 | |||
105 | /* And invalidate the beast just in case. That won't get rid of | ||
106 | * a bolted entry though it will be in LRU and so will go away eventually | ||
107 | * but let's not bother for now | ||
108 | */ | ||
109 | PPC_ERATILX(0,0,0) | ||
110 | 1: | ||
111 | blr | ||
112 | |||
113 | _GLOBAL(__restore_cpu_a2) | ||
114 | b __setup_cpu_a2 | ||
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S index 0adb50ad8031..8053db02b85e 100644 --- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S +++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S | |||
@@ -51,6 +51,7 @@ _GLOBAL(__e500_dcache_setup) | |||
51 | isync | 51 | isync |
52 | blr | 52 | blr |
53 | 53 | ||
54 | #ifdef CONFIG_PPC32 | ||
54 | _GLOBAL(__setup_cpu_e200) | 55 | _GLOBAL(__setup_cpu_e200) |
55 | /* enable dedicated debug exception handling resources (Debug APU) */ | 56 | /* enable dedicated debug exception handling resources (Debug APU) */ |
56 | mfspr r3,SPRN_HID0 | 57 | mfspr r3,SPRN_HID0 |
@@ -63,6 +64,12 @@ _GLOBAL(__setup_cpu_e500v2) | |||
63 | bl __e500_icache_setup | 64 | bl __e500_icache_setup |
64 | bl __e500_dcache_setup | 65 | bl __e500_dcache_setup |
65 | bl __setup_e500_ivors | 66 | bl __setup_e500_ivors |
67 | #ifdef CONFIG_FSL_RIO | ||
68 | /* Ensure that RFXE is set */ | ||
69 | mfspr r3,SPRN_HID1 | ||
70 | oris r3,r3,HID1_RFXE@h | ||
71 | mtspr SPRN_HID1,r3 | ||
72 | #endif | ||
66 | mtlr r4 | 73 | mtlr r4 |
67 | blr | 74 | blr |
68 | _GLOBAL(__setup_cpu_e500mc) | 75 | _GLOBAL(__setup_cpu_e500mc) |
@@ -72,3 +79,20 @@ _GLOBAL(__setup_cpu_e500mc) | |||
72 | bl __setup_e500mc_ivors | 79 | bl __setup_e500mc_ivors |
73 | mtlr r4 | 80 | mtlr r4 |
74 | blr | 81 | blr |
82 | #endif | ||
83 | /* Right now, restore and setup are the same thing */ | ||
84 | _GLOBAL(__restore_cpu_e5500) | ||
85 | _GLOBAL(__setup_cpu_e5500) | ||
86 | mflr r4 | ||
87 | bl __e500_icache_setup | ||
88 | bl __e500_dcache_setup | ||
89 | #ifdef CONFIG_PPC_BOOK3E_64 | ||
90 | bl .__setup_base_ivors | ||
91 | bl .setup_perfmon_ivor | ||
92 | bl .setup_doorbell_ivors | ||
93 | bl .setup_ehv_ivors | ||
94 | #else | ||
95 | bl __setup_e500mc_ivors | ||
96 | #endif | ||
97 | mtlr r4 | ||
98 | blr | ||
diff --git a/arch/powerpc/kernel/cpu_setup_power7.S b/arch/powerpc/kernel/cpu_setup_power7.S new file mode 100644 index 000000000000..4f9a93fcfe07 --- /dev/null +++ b/arch/powerpc/kernel/cpu_setup_power7.S | |||
@@ -0,0 +1,91 @@ | |||
1 | /* | ||
2 | * This file contains low level CPU setup functions. | ||
3 | * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * as published by the Free Software Foundation; either version | ||
8 | * 2 of the License, or (at your option) any later version. | ||
9 | * | ||
10 | */ | ||
11 | |||
12 | #include <asm/processor.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <asm/cputable.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/cache.h> | ||
18 | |||
19 | /* Entry: r3 = crap, r4 = ptr to cputable entry | ||
20 | * | ||
21 | * Note that we can be called twice for pseudo-PVRs | ||
22 | */ | ||
23 | _GLOBAL(__setup_cpu_power7) | ||
24 | mflr r11 | ||
25 | bl __init_hvmode_206 | ||
26 | mtlr r11 | ||
27 | beqlr | ||
28 | li r0,0 | ||
29 | mtspr SPRN_LPID,r0 | ||
30 | bl __init_LPCR | ||
31 | bl __init_TLB | ||
32 | mtlr r11 | ||
33 | blr | ||
34 | |||
35 | _GLOBAL(__restore_cpu_power7) | ||
36 | mflr r11 | ||
37 | mfmsr r3 | ||
38 | rldicl. r0,r3,4,63 | ||
39 | beqlr | ||
40 | li r0,0 | ||
41 | mtspr SPRN_LPID,r0 | ||
42 | bl __init_LPCR | ||
43 | bl __init_TLB | ||
44 | mtlr r11 | ||
45 | blr | ||
46 | |||
47 | __init_hvmode_206: | ||
48 | /* Disable CPU_FTR_HVMODE_206 and exit if MSR:HV is not set */ | ||
49 | mfmsr r3 | ||
50 | rldicl. r0,r3,4,63 | ||
51 | bnelr | ||
52 | ld r5,CPU_SPEC_FEATURES(r4) | ||
53 | LOAD_REG_IMMEDIATE(r6,CPU_FTR_HVMODE_206) | ||
54 | xor r5,r5,r6 | ||
55 | std r5,CPU_SPEC_FEATURES(r4) | ||
56 | blr | ||
57 | |||
58 | __init_LPCR: | ||
59 | /* Setup a sane LPCR: | ||
60 | * | ||
61 | * LPES = 0b01 (HSRR0/1 used for 0x500) | ||
62 | * PECE = 0b111 | ||
63 | * DPFD = 4 | ||
64 | * | ||
65 | * Other bits untouched for now | ||
66 | */ | ||
67 | mfspr r3,SPRN_LPCR | ||
68 | ori r3,r3,(LPCR_LPES0|LPCR_LPES1) | ||
69 | xori r3,r3, LPCR_LPES0 | ||
70 | ori r3,r3,(LPCR_PECE0|LPCR_PECE1|LPCR_PECE2) | ||
71 | li r5,7 | ||
72 | sldi r5,r5,LPCR_DPFD_SH | ||
73 | andc r3,r3,r5 | ||
74 | li r5,4 | ||
75 | sldi r5,r5,LPCR_DPFD_SH | ||
76 | or r3,r3,r5 | ||
77 | mtspr SPRN_LPCR,r3 | ||
78 | isync | ||
79 | blr | ||
80 | |||
81 | __init_TLB: | ||
82 | /* Clear the TLB */ | ||
83 | li r6,128 | ||
84 | mtctr r6 | ||
85 | li r7,0xc00 /* IS field = 0b11 */ | ||
86 | ptesync | ||
87 | 2: tlbiel r7 | ||
88 | addi r7,r7,0x1000 | ||
89 | bdnz 2b | ||
90 | ptesync | ||
91 | 1: blr | ||
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 1f9123f412ec..9fb933248ab6 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -48,6 +48,7 @@ extern void __setup_cpu_440x5(unsigned long offset, struct cpu_spec* spec); | |||
48 | extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); | 48 | extern void __setup_cpu_460ex(unsigned long offset, struct cpu_spec* spec); |
49 | extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); | 49 | extern void __setup_cpu_460gt(unsigned long offset, struct cpu_spec* spec); |
50 | extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); | 50 | extern void __setup_cpu_460sx(unsigned long offset, struct cpu_spec *spec); |
51 | extern void __setup_cpu_apm821xx(unsigned long offset, struct cpu_spec *spec); | ||
51 | extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); | 52 | extern void __setup_cpu_603(unsigned long offset, struct cpu_spec* spec); |
52 | extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); | 53 | extern void __setup_cpu_604(unsigned long offset, struct cpu_spec* spec); |
53 | extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); | 54 | extern void __setup_cpu_750(unsigned long offset, struct cpu_spec* spec); |
@@ -61,11 +62,17 @@ extern void __setup_cpu_745x(unsigned long offset, struct cpu_spec* spec); | |||
61 | extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); | 62 | extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); |
62 | extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); | 63 | extern void __setup_cpu_ppc970MP(unsigned long offset, struct cpu_spec* spec); |
63 | extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); | 64 | extern void __setup_cpu_pa6t(unsigned long offset, struct cpu_spec* spec); |
65 | extern void __setup_cpu_a2(unsigned long offset, struct cpu_spec* spec); | ||
64 | extern void __restore_cpu_pa6t(void); | 66 | extern void __restore_cpu_pa6t(void); |
65 | extern void __restore_cpu_ppc970(void); | 67 | extern void __restore_cpu_ppc970(void); |
66 | extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); | 68 | extern void __setup_cpu_power7(unsigned long offset, struct cpu_spec* spec); |
67 | extern void __restore_cpu_power7(void); | 69 | extern void __restore_cpu_power7(void); |
70 | extern void __restore_cpu_a2(void); | ||
68 | #endif /* CONFIG_PPC64 */ | 71 | #endif /* CONFIG_PPC64 */ |
72 | #if defined(CONFIG_E500) | ||
73 | extern void __setup_cpu_e5500(unsigned long offset, struct cpu_spec* spec); | ||
74 | extern void __restore_cpu_e5500(void); | ||
75 | #endif /* CONFIG_E500 */ | ||
69 | 76 | ||
70 | /* This table only contains "desktop" CPUs, it need to be filled with embedded | 77 | /* This table only contains "desktop" CPUs, it need to be filled with embedded |
71 | * ones as well... | 78 | * ones as well... |
@@ -111,7 +118,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
111 | .pmc_type = PPC_PMC_IBM, | 118 | .pmc_type = PPC_PMC_IBM, |
112 | .oprofile_cpu_type = "ppc64/power3", | 119 | .oprofile_cpu_type = "ppc64/power3", |
113 | .oprofile_type = PPC_OPROFILE_RS64, | 120 | .oprofile_type = PPC_OPROFILE_RS64, |
114 | .machine_check = machine_check_generic, | ||
115 | .platform = "power3", | 121 | .platform = "power3", |
116 | }, | 122 | }, |
117 | { /* Power3+ */ | 123 | { /* Power3+ */ |
@@ -127,7 +133,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
127 | .pmc_type = PPC_PMC_IBM, | 133 | .pmc_type = PPC_PMC_IBM, |
128 | .oprofile_cpu_type = "ppc64/power3", | 134 | .oprofile_cpu_type = "ppc64/power3", |
129 | .oprofile_type = PPC_OPROFILE_RS64, | 135 | .oprofile_type = PPC_OPROFILE_RS64, |
130 | .machine_check = machine_check_generic, | ||
131 | .platform = "power3", | 136 | .platform = "power3", |
132 | }, | 137 | }, |
133 | { /* Northstar */ | 138 | { /* Northstar */ |
@@ -143,7 +148,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
143 | .pmc_type = PPC_PMC_IBM, | 148 | .pmc_type = PPC_PMC_IBM, |
144 | .oprofile_cpu_type = "ppc64/rs64", | 149 | .oprofile_cpu_type = "ppc64/rs64", |
145 | .oprofile_type = PPC_OPROFILE_RS64, | 150 | .oprofile_type = PPC_OPROFILE_RS64, |
146 | .machine_check = machine_check_generic, | ||
147 | .platform = "rs64", | 151 | .platform = "rs64", |
148 | }, | 152 | }, |
149 | { /* Pulsar */ | 153 | { /* Pulsar */ |
@@ -159,7 +163,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
159 | .pmc_type = PPC_PMC_IBM, | 163 | .pmc_type = PPC_PMC_IBM, |
160 | .oprofile_cpu_type = "ppc64/rs64", | 164 | .oprofile_cpu_type = "ppc64/rs64", |
161 | .oprofile_type = PPC_OPROFILE_RS64, | 165 | .oprofile_type = PPC_OPROFILE_RS64, |
162 | .machine_check = machine_check_generic, | ||
163 | .platform = "rs64", | 166 | .platform = "rs64", |
164 | }, | 167 | }, |
165 | { /* I-star */ | 168 | { /* I-star */ |
@@ -175,7 +178,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
175 | .pmc_type = PPC_PMC_IBM, | 178 | .pmc_type = PPC_PMC_IBM, |
176 | .oprofile_cpu_type = "ppc64/rs64", | 179 | .oprofile_cpu_type = "ppc64/rs64", |
177 | .oprofile_type = PPC_OPROFILE_RS64, | 180 | .oprofile_type = PPC_OPROFILE_RS64, |
178 | .machine_check = machine_check_generic, | ||
179 | .platform = "rs64", | 181 | .platform = "rs64", |
180 | }, | 182 | }, |
181 | { /* S-star */ | 183 | { /* S-star */ |
@@ -191,7 +193,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
191 | .pmc_type = PPC_PMC_IBM, | 193 | .pmc_type = PPC_PMC_IBM, |
192 | .oprofile_cpu_type = "ppc64/rs64", | 194 | .oprofile_cpu_type = "ppc64/rs64", |
193 | .oprofile_type = PPC_OPROFILE_RS64, | 195 | .oprofile_type = PPC_OPROFILE_RS64, |
194 | .machine_check = machine_check_generic, | ||
195 | .platform = "rs64", | 196 | .platform = "rs64", |
196 | }, | 197 | }, |
197 | { /* Power4 */ | 198 | { /* Power4 */ |
@@ -200,14 +201,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
200 | .cpu_name = "POWER4 (gp)", | 201 | .cpu_name = "POWER4 (gp)", |
201 | .cpu_features = CPU_FTRS_POWER4, | 202 | .cpu_features = CPU_FTRS_POWER4, |
202 | .cpu_user_features = COMMON_USER_POWER4, | 203 | .cpu_user_features = COMMON_USER_POWER4, |
203 | .mmu_features = MMU_FTR_HPTE_TABLE, | 204 | .mmu_features = MMU_FTRS_POWER4, |
204 | .icache_bsize = 128, | 205 | .icache_bsize = 128, |
205 | .dcache_bsize = 128, | 206 | .dcache_bsize = 128, |
206 | .num_pmcs = 8, | 207 | .num_pmcs = 8, |
207 | .pmc_type = PPC_PMC_IBM, | 208 | .pmc_type = PPC_PMC_IBM, |
208 | .oprofile_cpu_type = "ppc64/power4", | 209 | .oprofile_cpu_type = "ppc64/power4", |
209 | .oprofile_type = PPC_OPROFILE_POWER4, | 210 | .oprofile_type = PPC_OPROFILE_POWER4, |
210 | .machine_check = machine_check_generic, | ||
211 | .platform = "power4", | 211 | .platform = "power4", |
212 | }, | 212 | }, |
213 | { /* Power4+ */ | 213 | { /* Power4+ */ |
@@ -216,14 +216,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
216 | .cpu_name = "POWER4+ (gq)", | 216 | .cpu_name = "POWER4+ (gq)", |
217 | .cpu_features = CPU_FTRS_POWER4, | 217 | .cpu_features = CPU_FTRS_POWER4, |
218 | .cpu_user_features = COMMON_USER_POWER4, | 218 | .cpu_user_features = COMMON_USER_POWER4, |
219 | .mmu_features = MMU_FTR_HPTE_TABLE, | 219 | .mmu_features = MMU_FTRS_POWER4, |
220 | .icache_bsize = 128, | 220 | .icache_bsize = 128, |
221 | .dcache_bsize = 128, | 221 | .dcache_bsize = 128, |
222 | .num_pmcs = 8, | 222 | .num_pmcs = 8, |
223 | .pmc_type = PPC_PMC_IBM, | 223 | .pmc_type = PPC_PMC_IBM, |
224 | .oprofile_cpu_type = "ppc64/power4", | 224 | .oprofile_cpu_type = "ppc64/power4", |
225 | .oprofile_type = PPC_OPROFILE_POWER4, | 225 | .oprofile_type = PPC_OPROFILE_POWER4, |
226 | .machine_check = machine_check_generic, | ||
227 | .platform = "power4", | 226 | .platform = "power4", |
228 | }, | 227 | }, |
229 | { /* PPC970 */ | 228 | { /* PPC970 */ |
@@ -233,7 +232,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
233 | .cpu_features = CPU_FTRS_PPC970, | 232 | .cpu_features = CPU_FTRS_PPC970, |
234 | .cpu_user_features = COMMON_USER_POWER4 | | 233 | .cpu_user_features = COMMON_USER_POWER4 | |
235 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 234 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
236 | .mmu_features = MMU_FTR_HPTE_TABLE, | 235 | .mmu_features = MMU_FTRS_PPC970, |
237 | .icache_bsize = 128, | 236 | .icache_bsize = 128, |
238 | .dcache_bsize = 128, | 237 | .dcache_bsize = 128, |
239 | .num_pmcs = 8, | 238 | .num_pmcs = 8, |
@@ -242,7 +241,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
242 | .cpu_restore = __restore_cpu_ppc970, | 241 | .cpu_restore = __restore_cpu_ppc970, |
243 | .oprofile_cpu_type = "ppc64/970", | 242 | .oprofile_cpu_type = "ppc64/970", |
244 | .oprofile_type = PPC_OPROFILE_POWER4, | 243 | .oprofile_type = PPC_OPROFILE_POWER4, |
245 | .machine_check = machine_check_generic, | ||
246 | .platform = "ppc970", | 244 | .platform = "ppc970", |
247 | }, | 245 | }, |
248 | { /* PPC970FX */ | 246 | { /* PPC970FX */ |
@@ -252,7 +250,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
252 | .cpu_features = CPU_FTRS_PPC970, | 250 | .cpu_features = CPU_FTRS_PPC970, |
253 | .cpu_user_features = COMMON_USER_POWER4 | | 251 | .cpu_user_features = COMMON_USER_POWER4 | |
254 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 252 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
255 | .mmu_features = MMU_FTR_HPTE_TABLE, | 253 | .mmu_features = MMU_FTRS_PPC970, |
256 | .icache_bsize = 128, | 254 | .icache_bsize = 128, |
257 | .dcache_bsize = 128, | 255 | .dcache_bsize = 128, |
258 | .num_pmcs = 8, | 256 | .num_pmcs = 8, |
@@ -261,7 +259,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
261 | .cpu_restore = __restore_cpu_ppc970, | 259 | .cpu_restore = __restore_cpu_ppc970, |
262 | .oprofile_cpu_type = "ppc64/970", | 260 | .oprofile_cpu_type = "ppc64/970", |
263 | .oprofile_type = PPC_OPROFILE_POWER4, | 261 | .oprofile_type = PPC_OPROFILE_POWER4, |
264 | .machine_check = machine_check_generic, | ||
265 | .platform = "ppc970", | 262 | .platform = "ppc970", |
266 | }, | 263 | }, |
267 | { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ | 264 | { /* PPC970MP DD1.0 - no DEEPNAP, use regular 970 init */ |
@@ -280,7 +277,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
280 | .cpu_restore = __restore_cpu_ppc970, | 277 | .cpu_restore = __restore_cpu_ppc970, |
281 | .oprofile_cpu_type = "ppc64/970MP", | 278 | .oprofile_cpu_type = "ppc64/970MP", |
282 | .oprofile_type = PPC_OPROFILE_POWER4, | 279 | .oprofile_type = PPC_OPROFILE_POWER4, |
283 | .machine_check = machine_check_generic, | ||
284 | .platform = "ppc970", | 280 | .platform = "ppc970", |
285 | }, | 281 | }, |
286 | { /* PPC970MP */ | 282 | { /* PPC970MP */ |
@@ -290,7 +286,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
290 | .cpu_features = CPU_FTRS_PPC970, | 286 | .cpu_features = CPU_FTRS_PPC970, |
291 | .cpu_user_features = COMMON_USER_POWER4 | | 287 | .cpu_user_features = COMMON_USER_POWER4 | |
292 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 288 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
293 | .mmu_features = MMU_FTR_HPTE_TABLE, | 289 | .mmu_features = MMU_FTRS_PPC970, |
294 | .icache_bsize = 128, | 290 | .icache_bsize = 128, |
295 | .dcache_bsize = 128, | 291 | .dcache_bsize = 128, |
296 | .num_pmcs = 8, | 292 | .num_pmcs = 8, |
@@ -299,7 +295,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
299 | .cpu_restore = __restore_cpu_ppc970, | 295 | .cpu_restore = __restore_cpu_ppc970, |
300 | .oprofile_cpu_type = "ppc64/970MP", | 296 | .oprofile_cpu_type = "ppc64/970MP", |
301 | .oprofile_type = PPC_OPROFILE_POWER4, | 297 | .oprofile_type = PPC_OPROFILE_POWER4, |
302 | .machine_check = machine_check_generic, | ||
303 | .platform = "ppc970", | 298 | .platform = "ppc970", |
304 | }, | 299 | }, |
305 | { /* PPC970GX */ | 300 | { /* PPC970GX */ |
@@ -309,7 +304,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
309 | .cpu_features = CPU_FTRS_PPC970, | 304 | .cpu_features = CPU_FTRS_PPC970, |
310 | .cpu_user_features = COMMON_USER_POWER4 | | 305 | .cpu_user_features = COMMON_USER_POWER4 | |
311 | PPC_FEATURE_HAS_ALTIVEC_COMP, | 306 | PPC_FEATURE_HAS_ALTIVEC_COMP, |
312 | .mmu_features = MMU_FTR_HPTE_TABLE, | 307 | .mmu_features = MMU_FTRS_PPC970, |
313 | .icache_bsize = 128, | 308 | .icache_bsize = 128, |
314 | .dcache_bsize = 128, | 309 | .dcache_bsize = 128, |
315 | .num_pmcs = 8, | 310 | .num_pmcs = 8, |
@@ -317,7 +312,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
317 | .cpu_setup = __setup_cpu_ppc970, | 312 | .cpu_setup = __setup_cpu_ppc970, |
318 | .oprofile_cpu_type = "ppc64/970", | 313 | .oprofile_cpu_type = "ppc64/970", |
319 | .oprofile_type = PPC_OPROFILE_POWER4, | 314 | .oprofile_type = PPC_OPROFILE_POWER4, |
320 | .machine_check = machine_check_generic, | ||
321 | .platform = "ppc970", | 315 | .platform = "ppc970", |
322 | }, | 316 | }, |
323 | { /* Power5 GR */ | 317 | { /* Power5 GR */ |
@@ -326,7 +320,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
326 | .cpu_name = "POWER5 (gr)", | 320 | .cpu_name = "POWER5 (gr)", |
327 | .cpu_features = CPU_FTRS_POWER5, | 321 | .cpu_features = CPU_FTRS_POWER5, |
328 | .cpu_user_features = COMMON_USER_POWER5, | 322 | .cpu_user_features = COMMON_USER_POWER5, |
329 | .mmu_features = MMU_FTR_HPTE_TABLE, | 323 | .mmu_features = MMU_FTRS_POWER5, |
330 | .icache_bsize = 128, | 324 | .icache_bsize = 128, |
331 | .dcache_bsize = 128, | 325 | .dcache_bsize = 128, |
332 | .num_pmcs = 6, | 326 | .num_pmcs = 6, |
@@ -338,7 +332,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
338 | */ | 332 | */ |
339 | .oprofile_mmcra_sihv = MMCRA_SIHV, | 333 | .oprofile_mmcra_sihv = MMCRA_SIHV, |
340 | .oprofile_mmcra_sipr = MMCRA_SIPR, | 334 | .oprofile_mmcra_sipr = MMCRA_SIPR, |
341 | .machine_check = machine_check_generic, | ||
342 | .platform = "power5", | 335 | .platform = "power5", |
343 | }, | 336 | }, |
344 | { /* Power5++ */ | 337 | { /* Power5++ */ |
@@ -347,7 +340,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
347 | .cpu_name = "POWER5+ (gs)", | 340 | .cpu_name = "POWER5+ (gs)", |
348 | .cpu_features = CPU_FTRS_POWER5, | 341 | .cpu_features = CPU_FTRS_POWER5, |
349 | .cpu_user_features = COMMON_USER_POWER5_PLUS, | 342 | .cpu_user_features = COMMON_USER_POWER5_PLUS, |
350 | .mmu_features = MMU_FTR_HPTE_TABLE, | 343 | .mmu_features = MMU_FTRS_POWER5, |
351 | .icache_bsize = 128, | 344 | .icache_bsize = 128, |
352 | .dcache_bsize = 128, | 345 | .dcache_bsize = 128, |
353 | .num_pmcs = 6, | 346 | .num_pmcs = 6, |
@@ -355,7 +348,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
355 | .oprofile_type = PPC_OPROFILE_POWER4, | 348 | .oprofile_type = PPC_OPROFILE_POWER4, |
356 | .oprofile_mmcra_sihv = MMCRA_SIHV, | 349 | .oprofile_mmcra_sihv = MMCRA_SIHV, |
357 | .oprofile_mmcra_sipr = MMCRA_SIPR, | 350 | .oprofile_mmcra_sipr = MMCRA_SIPR, |
358 | .machine_check = machine_check_generic, | ||
359 | .platform = "power5+", | 351 | .platform = "power5+", |
360 | }, | 352 | }, |
361 | { /* Power5 GS */ | 353 | { /* Power5 GS */ |
@@ -364,7 +356,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
364 | .cpu_name = "POWER5+ (gs)", | 356 | .cpu_name = "POWER5+ (gs)", |
365 | .cpu_features = CPU_FTRS_POWER5, | 357 | .cpu_features = CPU_FTRS_POWER5, |
366 | .cpu_user_features = COMMON_USER_POWER5_PLUS, | 358 | .cpu_user_features = COMMON_USER_POWER5_PLUS, |
367 | .mmu_features = MMU_FTR_HPTE_TABLE, | 359 | .mmu_features = MMU_FTRS_POWER5, |
368 | .icache_bsize = 128, | 360 | .icache_bsize = 128, |
369 | .dcache_bsize = 128, | 361 | .dcache_bsize = 128, |
370 | .num_pmcs = 6, | 362 | .num_pmcs = 6, |
@@ -373,7 +365,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
373 | .oprofile_type = PPC_OPROFILE_POWER4, | 365 | .oprofile_type = PPC_OPROFILE_POWER4, |
374 | .oprofile_mmcra_sihv = MMCRA_SIHV, | 366 | .oprofile_mmcra_sihv = MMCRA_SIHV, |
375 | .oprofile_mmcra_sipr = MMCRA_SIPR, | 367 | .oprofile_mmcra_sipr = MMCRA_SIPR, |
376 | .machine_check = machine_check_generic, | ||
377 | .platform = "power5+", | 368 | .platform = "power5+", |
378 | }, | 369 | }, |
379 | { /* POWER6 in P5+ mode; 2.04-compliant processor */ | 370 | { /* POWER6 in P5+ mode; 2.04-compliant processor */ |
@@ -382,10 +373,9 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
382 | .cpu_name = "POWER5+", | 373 | .cpu_name = "POWER5+", |
383 | .cpu_features = CPU_FTRS_POWER5, | 374 | .cpu_features = CPU_FTRS_POWER5, |
384 | .cpu_user_features = COMMON_USER_POWER5_PLUS, | 375 | .cpu_user_features = COMMON_USER_POWER5_PLUS, |
385 | .mmu_features = MMU_FTR_HPTE_TABLE, | 376 | .mmu_features = MMU_FTRS_POWER5, |
386 | .icache_bsize = 128, | 377 | .icache_bsize = 128, |
387 | .dcache_bsize = 128, | 378 | .dcache_bsize = 128, |
388 | .machine_check = machine_check_generic, | ||
389 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", | 379 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", |
390 | .oprofile_type = PPC_OPROFILE_POWER4, | 380 | .oprofile_type = PPC_OPROFILE_POWER4, |
391 | .platform = "power5+", | 381 | .platform = "power5+", |
@@ -397,7 +387,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
397 | .cpu_features = CPU_FTRS_POWER6, | 387 | .cpu_features = CPU_FTRS_POWER6, |
398 | .cpu_user_features = COMMON_USER_POWER6 | | 388 | .cpu_user_features = COMMON_USER_POWER6 | |
399 | PPC_FEATURE_POWER6_EXT, | 389 | PPC_FEATURE_POWER6_EXT, |
400 | .mmu_features = MMU_FTR_HPTE_TABLE, | 390 | .mmu_features = MMU_FTRS_POWER6, |
401 | .icache_bsize = 128, | 391 | .icache_bsize = 128, |
402 | .dcache_bsize = 128, | 392 | .dcache_bsize = 128, |
403 | .num_pmcs = 6, | 393 | .num_pmcs = 6, |
@@ -408,7 +398,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
408 | .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, | 398 | .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, |
409 | .oprofile_mmcra_clear = POWER6_MMCRA_THRM | | 399 | .oprofile_mmcra_clear = POWER6_MMCRA_THRM | |
410 | POWER6_MMCRA_OTHER, | 400 | POWER6_MMCRA_OTHER, |
411 | .machine_check = machine_check_generic, | ||
412 | .platform = "power6x", | 401 | .platform = "power6x", |
413 | }, | 402 | }, |
414 | { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ | 403 | { /* 2.05-compliant processor, i.e. Power6 "architected" mode */ |
@@ -417,10 +406,9 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
417 | .cpu_name = "POWER6 (architected)", | 406 | .cpu_name = "POWER6 (architected)", |
418 | .cpu_features = CPU_FTRS_POWER6, | 407 | .cpu_features = CPU_FTRS_POWER6, |
419 | .cpu_user_features = COMMON_USER_POWER6, | 408 | .cpu_user_features = COMMON_USER_POWER6, |
420 | .mmu_features = MMU_FTR_HPTE_TABLE, | 409 | .mmu_features = MMU_FTRS_POWER6, |
421 | .icache_bsize = 128, | 410 | .icache_bsize = 128, |
422 | .dcache_bsize = 128, | 411 | .dcache_bsize = 128, |
423 | .machine_check = machine_check_generic, | ||
424 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", | 412 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", |
425 | .oprofile_type = PPC_OPROFILE_POWER4, | 413 | .oprofile_type = PPC_OPROFILE_POWER4, |
426 | .platform = "power6", | 414 | .platform = "power6", |
@@ -431,13 +419,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
431 | .cpu_name = "POWER7 (architected)", | 419 | .cpu_name = "POWER7 (architected)", |
432 | .cpu_features = CPU_FTRS_POWER7, | 420 | .cpu_features = CPU_FTRS_POWER7, |
433 | .cpu_user_features = COMMON_USER_POWER7, | 421 | .cpu_user_features = COMMON_USER_POWER7, |
434 | .mmu_features = MMU_FTR_HPTE_TABLE | | 422 | .mmu_features = MMU_FTRS_POWER7, |
435 | MMU_FTR_TLBIE_206, | ||
436 | .icache_bsize = 128, | 423 | .icache_bsize = 128, |
437 | .dcache_bsize = 128, | 424 | .dcache_bsize = 128, |
438 | .machine_check = machine_check_generic, | ||
439 | .oprofile_type = PPC_OPROFILE_POWER4, | 425 | .oprofile_type = PPC_OPROFILE_POWER4, |
440 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", | 426 | .oprofile_cpu_type = "ppc64/ibm-compat-v1", |
427 | .cpu_setup = __setup_cpu_power7, | ||
428 | .cpu_restore = __restore_cpu_power7, | ||
441 | .platform = "power7", | 429 | .platform = "power7", |
442 | }, | 430 | }, |
443 | { /* Power7 */ | 431 | { /* Power7 */ |
@@ -446,21 +434,33 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
446 | .cpu_name = "POWER7 (raw)", | 434 | .cpu_name = "POWER7 (raw)", |
447 | .cpu_features = CPU_FTRS_POWER7, | 435 | .cpu_features = CPU_FTRS_POWER7, |
448 | .cpu_user_features = COMMON_USER_POWER7, | 436 | .cpu_user_features = COMMON_USER_POWER7, |
449 | .mmu_features = MMU_FTR_HPTE_TABLE | | 437 | .mmu_features = MMU_FTRS_POWER7, |
450 | MMU_FTR_TLBIE_206, | ||
451 | .icache_bsize = 128, | 438 | .icache_bsize = 128, |
452 | .dcache_bsize = 128, | 439 | .dcache_bsize = 128, |
453 | .num_pmcs = 6, | 440 | .num_pmcs = 6, |
454 | .pmc_type = PPC_PMC_IBM, | 441 | .pmc_type = PPC_PMC_IBM, |
442 | .oprofile_cpu_type = "ppc64/power7", | ||
443 | .oprofile_type = PPC_OPROFILE_POWER4, | ||
455 | .cpu_setup = __setup_cpu_power7, | 444 | .cpu_setup = __setup_cpu_power7, |
456 | .cpu_restore = __restore_cpu_power7, | 445 | .cpu_restore = __restore_cpu_power7, |
446 | .platform = "power7", | ||
447 | }, | ||
448 | { /* Power7+ */ | ||
449 | .pvr_mask = 0xffff0000, | ||
450 | .pvr_value = 0x004A0000, | ||
451 | .cpu_name = "POWER7+ (raw)", | ||
452 | .cpu_features = CPU_FTRS_POWER7, | ||
453 | .cpu_user_features = COMMON_USER_POWER7, | ||
454 | .mmu_features = MMU_FTRS_POWER7, | ||
455 | .icache_bsize = 128, | ||
456 | .dcache_bsize = 128, | ||
457 | .num_pmcs = 6, | ||
458 | .pmc_type = PPC_PMC_IBM, | ||
457 | .oprofile_cpu_type = "ppc64/power7", | 459 | .oprofile_cpu_type = "ppc64/power7", |
458 | .oprofile_type = PPC_OPROFILE_POWER4, | 460 | .oprofile_type = PPC_OPROFILE_POWER4, |
459 | .oprofile_mmcra_sihv = POWER6_MMCRA_SIHV, | 461 | .cpu_setup = __setup_cpu_power7, |
460 | .oprofile_mmcra_sipr = POWER6_MMCRA_SIPR, | 462 | .cpu_restore = __restore_cpu_power7, |
461 | .oprofile_mmcra_clear = POWER6_MMCRA_THRM | | 463 | .platform = "power7+", |
462 | POWER6_MMCRA_OTHER, | ||
463 | .platform = "power7", | ||
464 | }, | 464 | }, |
465 | { /* Cell Broadband Engine */ | 465 | { /* Cell Broadband Engine */ |
466 | .pvr_mask = 0xffff0000, | 466 | .pvr_mask = 0xffff0000, |
@@ -470,14 +470,13 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
470 | .cpu_user_features = COMMON_USER_PPC64 | | 470 | .cpu_user_features = COMMON_USER_PPC64 | |
471 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | | 471 | PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP | |
472 | PPC_FEATURE_SMT, | 472 | PPC_FEATURE_SMT, |
473 | .mmu_features = MMU_FTR_HPTE_TABLE, | 473 | .mmu_features = MMU_FTRS_CELL, |
474 | .icache_bsize = 128, | 474 | .icache_bsize = 128, |
475 | .dcache_bsize = 128, | 475 | .dcache_bsize = 128, |
476 | .num_pmcs = 4, | 476 | .num_pmcs = 4, |
477 | .pmc_type = PPC_PMC_IBM, | 477 | .pmc_type = PPC_PMC_IBM, |
478 | .oprofile_cpu_type = "ppc64/cell-be", | 478 | .oprofile_cpu_type = "ppc64/cell-be", |
479 | .oprofile_type = PPC_OPROFILE_CELL, | 479 | .oprofile_type = PPC_OPROFILE_CELL, |
480 | .machine_check = machine_check_generic, | ||
481 | .platform = "ppc-cell-be", | 480 | .platform = "ppc-cell-be", |
482 | }, | 481 | }, |
483 | { /* PA Semi PA6T */ | 482 | { /* PA Semi PA6T */ |
@@ -486,7 +485,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
486 | .cpu_name = "PA6T", | 485 | .cpu_name = "PA6T", |
487 | .cpu_features = CPU_FTRS_PA6T, | 486 | .cpu_features = CPU_FTRS_PA6T, |
488 | .cpu_user_features = COMMON_USER_PA6T, | 487 | .cpu_user_features = COMMON_USER_PA6T, |
489 | .mmu_features = MMU_FTR_HPTE_TABLE, | 488 | .mmu_features = MMU_FTRS_PA6T, |
490 | .icache_bsize = 64, | 489 | .icache_bsize = 64, |
491 | .dcache_bsize = 64, | 490 | .dcache_bsize = 64, |
492 | .num_pmcs = 6, | 491 | .num_pmcs = 6, |
@@ -495,7 +494,6 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
495 | .cpu_restore = __restore_cpu_pa6t, | 494 | .cpu_restore = __restore_cpu_pa6t, |
496 | .oprofile_cpu_type = "ppc64/pa6t", | 495 | .oprofile_cpu_type = "ppc64/pa6t", |
497 | .oprofile_type = PPC_OPROFILE_PA6T, | 496 | .oprofile_type = PPC_OPROFILE_PA6T, |
498 | .machine_check = machine_check_generic, | ||
499 | .platform = "pa6t", | 497 | .platform = "pa6t", |
500 | }, | 498 | }, |
501 | { /* default match */ | 499 | { /* default match */ |
@@ -504,12 +502,11 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
504 | .cpu_name = "POWER4 (compatible)", | 502 | .cpu_name = "POWER4 (compatible)", |
505 | .cpu_features = CPU_FTRS_COMPATIBLE, | 503 | .cpu_features = CPU_FTRS_COMPATIBLE, |
506 | .cpu_user_features = COMMON_USER_PPC64, | 504 | .cpu_user_features = COMMON_USER_PPC64, |
507 | .mmu_features = MMU_FTR_HPTE_TABLE, | 505 | .mmu_features = MMU_FTRS_DEFAULT_HPTE_ARCH_V2, |
508 | .icache_bsize = 128, | 506 | .icache_bsize = 128, |
509 | .dcache_bsize = 128, | 507 | .dcache_bsize = 128, |
510 | .num_pmcs = 6, | 508 | .num_pmcs = 6, |
511 | .pmc_type = PPC_PMC_IBM, | 509 | .pmc_type = PPC_PMC_IBM, |
512 | .machine_check = machine_check_generic, | ||
513 | .platform = "power4", | 510 | .platform = "power4", |
514 | } | 511 | } |
515 | #endif /* CONFIG_PPC_BOOK3S_64 */ | 512 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
@@ -1805,11 +1802,25 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1805 | .machine_check = machine_check_440A, | 1802 | .machine_check = machine_check_440A, |
1806 | .platform = "ppc440", | 1803 | .platform = "ppc440", |
1807 | }, | 1804 | }, |
1808 | { /* 476 core */ | 1805 | { /* 464 in APM821xx */ |
1809 | .pvr_mask = 0xffff0000, | 1806 | .pvr_mask = 0xffffff00, |
1810 | .pvr_value = 0x11a50000, | 1807 | .pvr_value = 0x12C41C80, |
1808 | .cpu_name = "APM821XX", | ||
1809 | .cpu_features = CPU_FTRS_44X, | ||
1810 | .cpu_user_features = COMMON_USER_BOOKE | | ||
1811 | PPC_FEATURE_HAS_FPU, | ||
1812 | .mmu_features = MMU_FTR_TYPE_44x, | ||
1813 | .icache_bsize = 32, | ||
1814 | .dcache_bsize = 32, | ||
1815 | .cpu_setup = __setup_cpu_apm821xx, | ||
1816 | .machine_check = machine_check_440A, | ||
1817 | .platform = "ppc440", | ||
1818 | }, | ||
1819 | { /* 476 DD2 core */ | ||
1820 | .pvr_mask = 0xffffffff, | ||
1821 | .pvr_value = 0x11a52080, | ||
1811 | .cpu_name = "476", | 1822 | .cpu_name = "476", |
1812 | .cpu_features = CPU_FTRS_47X, | 1823 | .cpu_features = CPU_FTRS_47X | CPU_FTR_476_DD2, |
1813 | .cpu_user_features = COMMON_USER_BOOKE | | 1824 | .cpu_user_features = COMMON_USER_BOOKE | |
1814 | PPC_FEATURE_HAS_FPU, | 1825 | PPC_FEATURE_HAS_FPU, |
1815 | .mmu_features = MMU_FTR_TYPE_47x | | 1826 | .mmu_features = MMU_FTR_TYPE_47x | |
@@ -1833,6 +1844,20 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1833 | .machine_check = machine_check_47x, | 1844 | .machine_check = machine_check_47x, |
1834 | .platform = "ppc470", | 1845 | .platform = "ppc470", |
1835 | }, | 1846 | }, |
1847 | { /* 476 others */ | ||
1848 | .pvr_mask = 0xffff0000, | ||
1849 | .pvr_value = 0x11a50000, | ||
1850 | .cpu_name = "476", | ||
1851 | .cpu_features = CPU_FTRS_47X, | ||
1852 | .cpu_user_features = COMMON_USER_BOOKE | | ||
1853 | PPC_FEATURE_HAS_FPU, | ||
1854 | .mmu_features = MMU_FTR_TYPE_47x | | ||
1855 | MMU_FTR_USE_TLBIVAX_BCAST | MMU_FTR_LOCK_BCAST_INVAL, | ||
1856 | .icache_bsize = 32, | ||
1857 | .dcache_bsize = 128, | ||
1858 | .machine_check = machine_check_47x, | ||
1859 | .platform = "ppc470", | ||
1860 | }, | ||
1836 | { /* default match */ | 1861 | { /* default match */ |
1837 | .pvr_mask = 0x00000000, | 1862 | .pvr_mask = 0x00000000, |
1838 | .pvr_value = 0x00000000, | 1863 | .pvr_value = 0x00000000, |
@@ -1891,7 +1916,9 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1891 | .platform = "ppc5554", | 1916 | .platform = "ppc5554", |
1892 | } | 1917 | } |
1893 | #endif /* CONFIG_E200 */ | 1918 | #endif /* CONFIG_E200 */ |
1919 | #endif /* CONFIG_PPC32 */ | ||
1894 | #ifdef CONFIG_E500 | 1920 | #ifdef CONFIG_E500 |
1921 | #ifdef CONFIG_PPC32 | ||
1895 | { /* e500 */ | 1922 | { /* e500 */ |
1896 | .pvr_mask = 0xffff0000, | 1923 | .pvr_mask = 0xffff0000, |
1897 | .pvr_value = 0x80200000, | 1924 | .pvr_value = 0x80200000, |
@@ -1946,6 +1973,26 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1946 | .machine_check = machine_check_e500mc, | 1973 | .machine_check = machine_check_e500mc, |
1947 | .platform = "ppce500mc", | 1974 | .platform = "ppce500mc", |
1948 | }, | 1975 | }, |
1976 | #endif /* CONFIG_PPC32 */ | ||
1977 | { /* e5500 */ | ||
1978 | .pvr_mask = 0xffff0000, | ||
1979 | .pvr_value = 0x80240000, | ||
1980 | .cpu_name = "e5500", | ||
1981 | .cpu_features = CPU_FTRS_E5500, | ||
1982 | .cpu_user_features = COMMON_USER_BOOKE | PPC_FEATURE_HAS_FPU, | ||
1983 | .mmu_features = MMU_FTR_TYPE_FSL_E | MMU_FTR_BIG_PHYS | | ||
1984 | MMU_FTR_USE_TLBILX, | ||
1985 | .icache_bsize = 64, | ||
1986 | .dcache_bsize = 64, | ||
1987 | .num_pmcs = 4, | ||
1988 | .oprofile_cpu_type = "ppc/e500mc", | ||
1989 | .oprofile_type = PPC_OPROFILE_FSL_EMB, | ||
1990 | .cpu_setup = __setup_cpu_e5500, | ||
1991 | .cpu_restore = __restore_cpu_e5500, | ||
1992 | .machine_check = machine_check_e500mc, | ||
1993 | .platform = "ppce5500", | ||
1994 | }, | ||
1995 | #ifdef CONFIG_PPC32 | ||
1949 | { /* default match */ | 1996 | { /* default match */ |
1950 | .pvr_mask = 0x00000000, | 1997 | .pvr_mask = 0x00000000, |
1951 | .pvr_value = 0x00000000, | 1998 | .pvr_value = 0x00000000, |
@@ -1960,10 +2007,25 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1960 | .machine_check = machine_check_e500, | 2007 | .machine_check = machine_check_e500, |
1961 | .platform = "powerpc", | 2008 | .platform = "powerpc", |
1962 | } | 2009 | } |
1963 | #endif /* CONFIG_E500 */ | ||
1964 | #endif /* CONFIG_PPC32 */ | 2010 | #endif /* CONFIG_PPC32 */ |
2011 | #endif /* CONFIG_E500 */ | ||
1965 | 2012 | ||
1966 | #ifdef CONFIG_PPC_BOOK3E_64 | 2013 | #ifdef CONFIG_PPC_A2 |
2014 | { /* Standard A2 (>= DD2) + FPU core */ | ||
2015 | .pvr_mask = 0xffff0000, | ||
2016 | .pvr_value = 0x00480000, | ||
2017 | .cpu_name = "A2 (>= DD2)", | ||
2018 | .cpu_features = CPU_FTRS_A2, | ||
2019 | .cpu_user_features = COMMON_USER_PPC64, | ||
2020 | .mmu_features = MMU_FTRS_A2, | ||
2021 | .icache_bsize = 64, | ||
2022 | .dcache_bsize = 64, | ||
2023 | .num_pmcs = 0, | ||
2024 | .cpu_setup = __setup_cpu_a2, | ||
2025 | .cpu_restore = __restore_cpu_a2, | ||
2026 | .machine_check = machine_check_generic, | ||
2027 | .platform = "ppca2", | ||
2028 | }, | ||
1967 | { /* This is a default entry to get going, to be replaced by | 2029 | { /* This is a default entry to get going, to be replaced by |
1968 | * a real one at some stage | 2030 | * a real one at some stage |
1969 | */ | 2031 | */ |
@@ -1984,7 +2046,7 @@ static struct cpu_spec __initdata cpu_specs[] = { | |||
1984 | .machine_check = machine_check_generic, | 2046 | .machine_check = machine_check_generic, |
1985 | .platform = "power6", | 2047 | .platform = "power6", |
1986 | }, | 2048 | }, |
1987 | #endif | 2049 | #endif /* CONFIG_PPC_A2 */ |
1988 | }; | 2050 | }; |
1989 | 2051 | ||
1990 | static struct cpu_spec the_cpu_spec; | 2052 | static struct cpu_spec the_cpu_spec; |
@@ -2048,8 +2110,8 @@ static void __init setup_cpu_spec(unsigned long offset, struct cpu_spec *s) | |||
2048 | * pointer on ppc64 and booke as we are running at 0 in real mode | 2110 | * pointer on ppc64 and booke as we are running at 0 in real mode |
2049 | * on ppc64 and reloc_offset is always 0 on booke. | 2111 | * on ppc64 and reloc_offset is always 0 on booke. |
2050 | */ | 2112 | */ |
2051 | if (s->cpu_setup) { | 2113 | if (t->cpu_setup) { |
2052 | s->cpu_setup(offset, s); | 2114 | t->cpu_setup(offset, t); |
2053 | } | 2115 | } |
2054 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ | 2116 | #endif /* CONFIG_PPC64 || CONFIG_BOOKE */ |
2055 | } | 2117 | } |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 4457382f8667..4e6ee944495a 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c | |||
@@ -48,7 +48,7 @@ int crashing_cpu = -1; | |||
48 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; | 48 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; |
49 | cpumask_t cpus_in_sr = CPU_MASK_NONE; | 49 | cpumask_t cpus_in_sr = CPU_MASK_NONE; |
50 | 50 | ||
51 | #define CRASH_HANDLER_MAX 2 | 51 | #define CRASH_HANDLER_MAX 3 |
52 | /* NULL terminated list of shutdown handles */ | 52 | /* NULL terminated list of shutdown handles */ |
53 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; | 53 | static crash_shutdown_t crash_shutdown_handles[CRASH_HANDLER_MAX+1]; |
54 | static DEFINE_SPINLOCK(crash_handlers_lock); | 54 | static DEFINE_SPINLOCK(crash_handlers_lock); |
@@ -64,9 +64,9 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
64 | return; | 64 | return; |
65 | 65 | ||
66 | hard_irq_disable(); | 66 | hard_irq_disable(); |
67 | if (!cpu_isset(cpu, cpus_in_crash)) | 67 | if (!cpumask_test_cpu(cpu, &cpus_in_crash)) |
68 | crash_save_cpu(regs, cpu); | 68 | crash_save_cpu(regs, cpu); |
69 | cpu_set(cpu, cpus_in_crash); | 69 | cpumask_set_cpu(cpu, &cpus_in_crash); |
70 | 70 | ||
71 | /* | 71 | /* |
72 | * Entered via soft-reset - could be the kdump | 72 | * Entered via soft-reset - could be the kdump |
@@ -77,8 +77,8 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
77 | * Tell the kexec CPU that entered via soft-reset and ready | 77 | * Tell the kexec CPU that entered via soft-reset and ready |
78 | * to go down. | 78 | * to go down. |
79 | */ | 79 | */ |
80 | if (cpu_isset(cpu, cpus_in_sr)) { | 80 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) { |
81 | cpu_clear(cpu, cpus_in_sr); | 81 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
82 | atomic_inc(&enter_on_soft_reset); | 82 | atomic_inc(&enter_on_soft_reset); |
83 | } | 83 | } |
84 | 84 | ||
@@ -87,7 +87,7 @@ void crash_ipi_callback(struct pt_regs *regs) | |||
87 | * This barrier is needed to make sure that all CPUs are stopped. | 87 | * This barrier is needed to make sure that all CPUs are stopped. |
88 | * If not, soft-reset will be invoked to bring other CPUs. | 88 | * If not, soft-reset will be invoked to bring other CPUs. |
89 | */ | 89 | */ |
90 | while (!cpu_isset(crashing_cpu, cpus_in_crash)) | 90 | while (!cpumask_test_cpu(crashing_cpu, &cpus_in_crash)) |
91 | cpu_relax(); | 91 | cpu_relax(); |
92 | 92 | ||
93 | if (ppc_md.kexec_cpu_down) | 93 | if (ppc_md.kexec_cpu_down) |
@@ -109,7 +109,7 @@ static void crash_soft_reset_check(int cpu) | |||
109 | { | 109 | { |
110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | 110 | unsigned int ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ |
111 | 111 | ||
112 | cpu_clear(cpu, cpus_in_sr); | 112 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
113 | while (atomic_read(&enter_on_soft_reset) != ncpus) | 113 | while (atomic_read(&enter_on_soft_reset) != ncpus) |
114 | cpu_relax(); | 114 | cpu_relax(); |
115 | } | 115 | } |
@@ -125,14 +125,14 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
125 | smp_wmb(); | 125 | smp_wmb(); |
126 | 126 | ||
127 | /* | 127 | /* |
128 | * FIXME: Until we will have the way to stop other CPUSs reliabally, | 128 | * FIXME: Until we will have the way to stop other CPUs reliably, |
129 | * the crash CPU will send an IPI and wait for other CPUs to | 129 | * the crash CPU will send an IPI and wait for other CPUs to |
130 | * respond. | 130 | * respond. |
131 | * Delay of at least 10 seconds. | 131 | * Delay of at least 10 seconds. |
132 | */ | 132 | */ |
133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); | 133 | printk(KERN_EMERG "Sending IPI to other cpus...\n"); |
134 | msecs = 10000; | 134 | msecs = 10000; |
135 | while ((cpus_weight(cpus_in_crash) < ncpus) && (--msecs > 0)) { | 135 | while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { |
136 | cpu_relax(); | 136 | cpu_relax(); |
137 | mdelay(1); | 137 | mdelay(1); |
138 | } | 138 | } |
@@ -144,52 +144,24 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
144 | * user to do soft reset such that we get all. | 144 | * user to do soft reset such that we get all. |
145 | * Soft-reset will be used until better mechanism is implemented. | 145 | * Soft-reset will be used until better mechanism is implemented. |
146 | */ | 146 | */ |
147 | if (cpus_weight(cpus_in_crash) < ncpus) { | 147 | if (cpumask_weight(&cpus_in_crash) < ncpus) { |
148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", | 148 | printk(KERN_EMERG "done waiting: %d cpu(s) not responding\n", |
149 | ncpus - cpus_weight(cpus_in_crash)); | 149 | ncpus - cpumask_weight(&cpus_in_crash)); |
150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); | 150 | printk(KERN_EMERG "Activate soft-reset to stop other cpu(s)\n"); |
151 | cpus_in_sr = CPU_MASK_NONE; | 151 | cpumask_clear(&cpus_in_sr); |
152 | atomic_set(&enter_on_soft_reset, 0); | 152 | atomic_set(&enter_on_soft_reset, 0); |
153 | while (cpus_weight(cpus_in_crash) < ncpus) | 153 | while (cpumask_weight(&cpus_in_crash) < ncpus) |
154 | cpu_relax(); | 154 | cpu_relax(); |
155 | } | 155 | } |
156 | /* | 156 | /* |
157 | * Make sure all CPUs are entered via soft-reset if the kdump is | 157 | * Make sure all CPUs are entered via soft-reset if the kdump is |
158 | * invoked using soft-reset. | 158 | * invoked using soft-reset. |
159 | */ | 159 | */ |
160 | if (cpu_isset(cpu, cpus_in_sr)) | 160 | if (cpumask_test_cpu(cpu, &cpus_in_sr)) |
161 | crash_soft_reset_check(cpu); | 161 | crash_soft_reset_check(cpu); |
162 | /* Leave the IPI callback set */ | 162 | /* Leave the IPI callback set */ |
163 | } | 163 | } |
164 | 164 | ||
165 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ | ||
166 | #ifdef CONFIG_PPC_STD_MMU_64 | ||
167 | static void crash_kexec_wait_realmode(int cpu) | ||
168 | { | ||
169 | unsigned int msecs; | ||
170 | int i; | ||
171 | |||
172 | msecs = 10000; | ||
173 | for (i=0; i < NR_CPUS && msecs > 0; i++) { | ||
174 | if (i == cpu) | ||
175 | continue; | ||
176 | |||
177 | while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { | ||
178 | barrier(); | ||
179 | if (!cpu_possible(i)) { | ||
180 | break; | ||
181 | } | ||
182 | if (!cpu_online(i)) { | ||
183 | break; | ||
184 | } | ||
185 | msecs--; | ||
186 | mdelay(1); | ||
187 | } | ||
188 | } | ||
189 | mb(); | ||
190 | } | ||
191 | #endif | ||
192 | |||
193 | /* | 165 | /* |
194 | * This function will be called by secondary cpus or by kexec cpu | 166 | * This function will be called by secondary cpus or by kexec cpu |
195 | * if soft-reset is activated to stop some CPUs. | 167 | * if soft-reset is activated to stop some CPUs. |
@@ -210,7 +182,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
210 | * exited using 'x'(exit and recover) or | 182 | * exited using 'x'(exit and recover) or |
211 | * kexec_should_crash() failed for all running tasks. | 183 | * kexec_should_crash() failed for all running tasks. |
212 | */ | 184 | */ |
213 | cpu_clear(cpu, cpus_in_sr); | 185 | cpumask_clear_cpu(cpu, &cpus_in_sr); |
214 | local_irq_restore(flags); | 186 | local_irq_restore(flags); |
215 | return; | 187 | return; |
216 | } | 188 | } |
@@ -224,7 +196,7 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
224 | * then start kexec boot. | 196 | * then start kexec boot. |
225 | */ | 197 | */ |
226 | crash_soft_reset_check(cpu); | 198 | crash_soft_reset_check(cpu); |
227 | cpu_set(crashing_cpu, cpus_in_crash); | 199 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
228 | if (ppc_md.kexec_cpu_down) | 200 | if (ppc_md.kexec_cpu_down) |
229 | ppc_md.kexec_cpu_down(1, 0); | 201 | ppc_md.kexec_cpu_down(1, 0); |
230 | machine_kexec(kexec_crash_image); | 202 | machine_kexec(kexec_crash_image); |
@@ -233,7 +205,8 @@ void crash_kexec_secondary(struct pt_regs *regs) | |||
233 | crash_ipi_callback(regs); | 205 | crash_ipi_callback(regs); |
234 | } | 206 | } |
235 | 207 | ||
236 | #else | 208 | #else /* ! CONFIG_SMP */ |
209 | |||
237 | static void crash_kexec_prepare_cpus(int cpu) | 210 | static void crash_kexec_prepare_cpus(int cpu) |
238 | { | 211 | { |
239 | /* | 212 | /* |
@@ -251,75 +224,39 @@ static void crash_kexec_prepare_cpus(int cpu) | |||
251 | 224 | ||
252 | void crash_kexec_secondary(struct pt_regs *regs) | 225 | void crash_kexec_secondary(struct pt_regs *regs) |
253 | { | 226 | { |
254 | cpus_in_sr = CPU_MASK_NONE; | 227 | cpumask_clear(&cpus_in_sr); |
255 | } | 228 | } |
256 | #endif | 229 | #endif /* CONFIG_SMP */ |
257 | #ifdef CONFIG_SPU_BASE | ||
258 | |||
259 | #include <asm/spu.h> | ||
260 | #include <asm/spu_priv1.h> | ||
261 | |||
262 | struct crash_spu_info { | ||
263 | struct spu *spu; | ||
264 | u32 saved_spu_runcntl_RW; | ||
265 | u32 saved_spu_status_R; | ||
266 | u32 saved_spu_npc_RW; | ||
267 | u64 saved_mfc_sr1_RW; | ||
268 | u64 saved_mfc_dar; | ||
269 | u64 saved_mfc_dsisr; | ||
270 | }; | ||
271 | 230 | ||
272 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ | 231 | /* wait for all the CPUs to hit real mode but timeout if they don't come in */ |
273 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; | 232 | #if defined(CONFIG_SMP) && defined(CONFIG_PPC_STD_MMU_64) |
274 | 233 | static void crash_kexec_wait_realmode(int cpu) | |
275 | static void crash_kexec_stop_spus(void) | ||
276 | { | 234 | { |
277 | struct spu *spu; | 235 | unsigned int msecs; |
278 | int i; | 236 | int i; |
279 | u64 tmp; | ||
280 | 237 | ||
281 | for (i = 0; i < CRASH_NUM_SPUS; i++) { | 238 | msecs = 10000; |
282 | if (!crash_spu_info[i].spu) | 239 | for (i=0; i < nr_cpu_ids && msecs > 0; i++) { |
283 | continue; | 240 | if (i == cpu) |
284 | |||
285 | spu = crash_spu_info[i].spu; | ||
286 | |||
287 | crash_spu_info[i].saved_spu_runcntl_RW = | ||
288 | in_be32(&spu->problem->spu_runcntl_RW); | ||
289 | crash_spu_info[i].saved_spu_status_R = | ||
290 | in_be32(&spu->problem->spu_status_R); | ||
291 | crash_spu_info[i].saved_spu_npc_RW = | ||
292 | in_be32(&spu->problem->spu_npc_RW); | ||
293 | |||
294 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); | ||
295 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); | ||
296 | tmp = spu_mfc_sr1_get(spu); | ||
297 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; | ||
298 | |||
299 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
300 | spu_mfc_sr1_set(spu, tmp); | ||
301 | |||
302 | __delay(200); | ||
303 | } | ||
304 | } | ||
305 | |||
306 | void crash_register_spus(struct list_head *list) | ||
307 | { | ||
308 | struct spu *spu; | ||
309 | |||
310 | list_for_each_entry(spu, list, full_list) { | ||
311 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) | ||
312 | continue; | 241 | continue; |
313 | 242 | ||
314 | crash_spu_info[spu->number].spu = spu; | 243 | while (paca[i].kexec_state < KEXEC_STATE_REAL_MODE) { |
244 | barrier(); | ||
245 | if (!cpu_possible(i)) { | ||
246 | break; | ||
247 | } | ||
248 | if (!cpu_online(i)) { | ||
249 | break; | ||
250 | } | ||
251 | msecs--; | ||
252 | mdelay(1); | ||
253 | } | ||
315 | } | 254 | } |
255 | mb(); | ||
316 | } | 256 | } |
317 | |||
318 | #else | 257 | #else |
319 | static inline void crash_kexec_stop_spus(void) | 258 | static inline void crash_kexec_wait_realmode(int cpu) {} |
320 | { | 259 | #endif /* CONFIG_SMP && CONFIG_PPC_STD_MMU_64 */ |
321 | } | ||
322 | #endif /* CONFIG_SPU_BASE */ | ||
323 | 260 | ||
324 | /* | 261 | /* |
325 | * Register a function to be called on shutdown. Only use this if you | 262 | * Register a function to be called on shutdown. Only use this if you |
@@ -409,23 +346,10 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
409 | crashing_cpu = smp_processor_id(); | 346 | crashing_cpu = smp_processor_id(); |
410 | crash_save_cpu(regs, crashing_cpu); | 347 | crash_save_cpu(regs, crashing_cpu); |
411 | crash_kexec_prepare_cpus(crashing_cpu); | 348 | crash_kexec_prepare_cpus(crashing_cpu); |
412 | cpu_set(crashing_cpu, cpus_in_crash); | 349 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); |
413 | #if defined(CONFIG_PPC_STD_MMU_64) && defined(CONFIG_SMP) | ||
414 | crash_kexec_wait_realmode(crashing_cpu); | 350 | crash_kexec_wait_realmode(crashing_cpu); |
415 | #endif | ||
416 | |||
417 | for_each_irq(i) { | ||
418 | struct irq_desc *desc = irq_to_desc(i); | ||
419 | |||
420 | if (!desc || !desc->chip || !desc->chip->eoi) | ||
421 | continue; | ||
422 | |||
423 | if (desc->status & IRQ_INPROGRESS) | ||
424 | desc->chip->eoi(i); | ||
425 | 351 | ||
426 | if (!(desc->status & IRQ_DISABLED)) | 352 | machine_kexec_mask_interrupts(); |
427 | desc->chip->shutdown(i); | ||
428 | } | ||
429 | 353 | ||
430 | /* | 354 | /* |
431 | * Call registered shutdown routines savely. Swap out | 355 | * Call registered shutdown routines savely. Swap out |
@@ -450,8 +374,6 @@ void default_machine_crash_shutdown(struct pt_regs *regs) | |||
450 | crash_shutdown_cpu = -1; | 374 | crash_shutdown_cpu = -1; |
451 | __debugger_fault_handler = old_handler; | 375 | __debugger_fault_handler = old_handler; |
452 | 376 | ||
453 | crash_kexec_stop_spus(); | ||
454 | |||
455 | if (ppc_md.kexec_cpu_down) | 377 | if (ppc_md.kexec_cpu_down) |
456 | ppc_md.kexec_cpu_down(1, 0); | 378 | ppc_md.kexec_cpu_down(1, 0); |
457 | } | 379 | } |
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 8e05c16344e4..424afb6b8fba 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <asm/prom.h> | 19 | #include <asm/prom.h> |
20 | #include <asm/firmware.h> | 20 | #include <asm/firmware.h> |
21 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
22 | #include <asm/rtas.h> | ||
22 | 23 | ||
23 | #ifdef DEBUG | 24 | #ifdef DEBUG |
24 | #include <asm/udbg.h> | 25 | #include <asm/udbg.h> |
@@ -27,9 +28,6 @@ | |||
27 | #define DBG(fmt...) | 28 | #define DBG(fmt...) |
28 | #endif | 29 | #endif |
29 | 30 | ||
30 | /* Stores the physical address of elf header of crash image. */ | ||
31 | unsigned long long elfcorehdr_addr = ELFCORE_ADDR_MAX; | ||
32 | |||
33 | #ifndef CONFIG_RELOCATABLE | 31 | #ifndef CONFIG_RELOCATABLE |
34 | void __init reserve_kdump_trampoline(void) | 32 | void __init reserve_kdump_trampoline(void) |
35 | { | 33 | { |
@@ -71,20 +69,6 @@ void __init setup_kdump_trampoline(void) | |||
71 | } | 69 | } |
72 | #endif /* CONFIG_RELOCATABLE */ | 70 | #endif /* CONFIG_RELOCATABLE */ |
73 | 71 | ||
74 | /* | ||
75 | * Note: elfcorehdr_addr is not just limited to vmcore. It is also used by | ||
76 | * is_kdump_kernel() to determine if we are booting after a panic. Hence | ||
77 | * ifdef it under CONFIG_CRASH_DUMP and not CONFIG_PROC_VMCORE. | ||
78 | */ | ||
79 | static int __init parse_elfcorehdr(char *p) | ||
80 | { | ||
81 | if (p) | ||
82 | elfcorehdr_addr = memparse(p, &p); | ||
83 | |||
84 | return 1; | ||
85 | } | ||
86 | __setup("elfcorehdr=", parse_elfcorehdr); | ||
87 | |||
88 | static int __init parse_savemaxmem(char *p) | 72 | static int __init parse_savemaxmem(char *p) |
89 | { | 73 | { |
90 | if (p) | 74 | if (p) |
@@ -141,3 +125,35 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | |||
141 | 125 | ||
142 | return csize; | 126 | return csize; |
143 | } | 127 | } |
128 | |||
129 | #ifdef CONFIG_PPC_RTAS | ||
130 | /* | ||
131 | * The crashkernel region will almost always overlap the RTAS region, so | ||
132 | * we have to be careful when shrinking the crashkernel region. | ||
133 | */ | ||
134 | void crash_free_reserved_phys_range(unsigned long begin, unsigned long end) | ||
135 | { | ||
136 | unsigned long addr; | ||
137 | const u32 *basep, *sizep; | ||
138 | unsigned int rtas_start = 0, rtas_end = 0; | ||
139 | |||
140 | basep = of_get_property(rtas.dev, "linux,rtas-base", NULL); | ||
141 | sizep = of_get_property(rtas.dev, "rtas-size", NULL); | ||
142 | |||
143 | if (basep && sizep) { | ||
144 | rtas_start = *basep; | ||
145 | rtas_end = *basep + *sizep; | ||
146 | } | ||
147 | |||
148 | for (addr = begin; addr < end; addr += PAGE_SIZE) { | ||
149 | /* Does this page overlap with the RTAS region? */ | ||
150 | if (addr <= rtas_end && ((addr + PAGE_SIZE) > rtas_start)) | ||
151 | continue; | ||
152 | |||
153 | ClearPageReserved(pfn_to_page(addr >> PAGE_SHIFT)); | ||
154 | init_page_count(pfn_to_page(addr >> PAGE_SHIFT)); | ||
155 | free_page((unsigned long)__va(addr)); | ||
156 | totalram_pages++; | ||
157 | } | ||
158 | } | ||
159 | #endif | ||
diff --git a/arch/powerpc/kernel/dbell.c b/arch/powerpc/kernel/dbell.c index 3307a52d797f..2cc451aaaca7 100644 --- a/arch/powerpc/kernel/dbell.c +++ b/arch/powerpc/kernel/dbell.c | |||
@@ -13,84 +13,35 @@ | |||
13 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
14 | #include <linux/smp.h> | 14 | #include <linux/smp.h> |
15 | #include <linux/threads.h> | 15 | #include <linux/threads.h> |
16 | #include <linux/percpu.h> | 16 | #include <linux/hardirq.h> |
17 | 17 | ||
18 | #include <asm/dbell.h> | 18 | #include <asm/dbell.h> |
19 | #include <asm/irq_regs.h> | 19 | #include <asm/irq_regs.h> |
20 | 20 | ||
21 | #ifdef CONFIG_SMP | 21 | #ifdef CONFIG_SMP |
22 | struct doorbell_cpu_info { | ||
23 | unsigned long messages; /* current messages bits */ | ||
24 | unsigned int tag; /* tag value */ | ||
25 | }; | ||
26 | |||
27 | static DEFINE_PER_CPU(struct doorbell_cpu_info, doorbell_cpu_info); | ||
28 | |||
29 | void doorbell_setup_this_cpu(void) | 22 | void doorbell_setup_this_cpu(void) |
30 | { | 23 | { |
31 | struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | 24 | unsigned long tag = mfspr(SPRN_PIR) & 0x3fff; |
32 | 25 | ||
33 | info->messages = 0; | 26 | smp_muxed_ipi_set_data(smp_processor_id(), tag); |
34 | info->tag = mfspr(SPRN_PIR) & 0x3fff; | ||
35 | } | 27 | } |
36 | 28 | ||
37 | void doorbell_message_pass(int target, int msg) | 29 | void doorbell_cause_ipi(int cpu, unsigned long data) |
38 | { | 30 | { |
39 | struct doorbell_cpu_info *info; | 31 | ppc_msgsnd(PPC_DBELL, 0, data); |
40 | int i; | ||
41 | |||
42 | if (target < NR_CPUS) { | ||
43 | info = &per_cpu(doorbell_cpu_info, target); | ||
44 | set_bit(msg, &info->messages); | ||
45 | ppc_msgsnd(PPC_DBELL, 0, info->tag); | ||
46 | } | ||
47 | else if (target == MSG_ALL_BUT_SELF) { | ||
48 | for_each_online_cpu(i) { | ||
49 | if (i == smp_processor_id()) | ||
50 | continue; | ||
51 | info = &per_cpu(doorbell_cpu_info, i); | ||
52 | set_bit(msg, &info->messages); | ||
53 | ppc_msgsnd(PPC_DBELL, 0, info->tag); | ||
54 | } | ||
55 | } | ||
56 | else { /* target == MSG_ALL */ | ||
57 | for_each_online_cpu(i) { | ||
58 | info = &per_cpu(doorbell_cpu_info, i); | ||
59 | set_bit(msg, &info->messages); | ||
60 | } | ||
61 | ppc_msgsnd(PPC_DBELL, PPC_DBELL_MSG_BRDCAST, 0); | ||
62 | } | ||
63 | } | 32 | } |
64 | 33 | ||
65 | void doorbell_exception(struct pt_regs *regs) | 34 | void doorbell_exception(struct pt_regs *regs) |
66 | { | 35 | { |
67 | struct pt_regs *old_regs = set_irq_regs(regs); | 36 | struct pt_regs *old_regs = set_irq_regs(regs); |
68 | struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | ||
69 | int msg; | ||
70 | 37 | ||
71 | /* Warning: regs can be NULL when called from irq enable */ | 38 | irq_enter(); |
72 | 39 | ||
73 | if (!info->messages || (num_online_cpus() < 2)) | 40 | smp_ipi_demux(); |
74 | goto out; | ||
75 | 41 | ||
76 | for (msg = 0; msg < 4; msg++) | 42 | irq_exit(); |
77 | if (test_and_clear_bit(msg, &info->messages)) | ||
78 | smp_message_recv(msg); | ||
79 | |||
80 | out: | ||
81 | set_irq_regs(old_regs); | 43 | set_irq_regs(old_regs); |
82 | } | 44 | } |
83 | |||
84 | void doorbell_check_self(void) | ||
85 | { | ||
86 | struct doorbell_cpu_info *info = &__get_cpu_var(doorbell_cpu_info); | ||
87 | |||
88 | if (!info->messages) | ||
89 | return; | ||
90 | |||
91 | ppc_msgsnd(PPC_DBELL, 0, info->tag); | ||
92 | } | ||
93 | |||
94 | #else /* CONFIG_SMP */ | 45 | #else /* CONFIG_SMP */ |
95 | void doorbell_exception(struct pt_regs *regs) | 46 | void doorbell_exception(struct pt_regs *regs) |
96 | { | 47 | { |
diff --git a/arch/powerpc/kernel/dma-iommu.c b/arch/powerpc/kernel/dma-iommu.c index 37771a518119..e7554154a6de 100644 --- a/arch/powerpc/kernel/dma-iommu.c +++ b/arch/powerpc/kernel/dma-iommu.c | |||
@@ -19,7 +19,7 @@ static void *dma_iommu_alloc_coherent(struct device *dev, size_t size, | |||
19 | dma_addr_t *dma_handle, gfp_t flag) | 19 | dma_addr_t *dma_handle, gfp_t flag) |
20 | { | 20 | { |
21 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, | 21 | return iommu_alloc_coherent(dev, get_iommu_table_base(dev), size, |
22 | dma_handle, device_to_mask(dev), flag, | 22 | dma_handle, dev->coherent_dma_mask, flag, |
23 | dev_to_node(dev)); | 23 | dev_to_node(dev)); |
24 | } | 24 | } |
25 | 25 | ||
@@ -74,16 +74,17 @@ static int dma_iommu_dma_supported(struct device *dev, u64 mask) | |||
74 | { | 74 | { |
75 | struct iommu_table *tbl = get_iommu_table_base(dev); | 75 | struct iommu_table *tbl = get_iommu_table_base(dev); |
76 | 76 | ||
77 | if (!tbl || tbl->it_offset > mask) { | 77 | if (!tbl) { |
78 | printk(KERN_INFO | 78 | dev_info(dev, "Warning: IOMMU dma not supported: mask 0x%08llx" |
79 | "Warning: IOMMU offset too big for device mask\n"); | 79 | ", table unavailable\n", mask); |
80 | if (tbl) | 80 | return 0; |
81 | printk(KERN_INFO | 81 | } |
82 | "mask: 0x%08llx, table offset: 0x%08lx\n", | 82 | |
83 | mask, tbl->it_offset); | 83 | if ((tbl->it_offset + tbl->it_size) > (mask >> IOMMU_PAGE_SHIFT)) { |
84 | else | 84 | dev_info(dev, "Warning: IOMMU window too big for device mask\n"); |
85 | printk(KERN_INFO "mask: 0x%08llx, table unavailable\n", | 85 | dev_info(dev, "mask: 0x%08llx, table end: 0x%08lx\n", |
86 | mask); | 86 | mask, (tbl->it_offset + tbl->it_size) << |
87 | IOMMU_PAGE_SHIFT); | ||
87 | return 0; | 88 | return 0; |
88 | } else | 89 | } else |
89 | return 1; | 90 | return 1; |
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c index 84d6367ec003..d238c082c3c5 100644 --- a/arch/powerpc/kernel/dma.c +++ b/arch/powerpc/kernel/dma.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/memblock.h> | 12 | #include <linux/memblock.h> |
13 | #include <asm/bug.h> | 13 | #include <asm/bug.h> |
14 | #include <asm/abs_addr.h> | 14 | #include <asm/abs_addr.h> |
15 | #include <asm/machdep.h> | ||
15 | 16 | ||
16 | /* | 17 | /* |
17 | * Generic direct DMA implementation | 18 | * Generic direct DMA implementation |
@@ -89,7 +90,7 @@ static int dma_direct_dma_supported(struct device *dev, u64 mask) | |||
89 | /* Could be improved so platforms can set the limit in case | 90 | /* Could be improved so platforms can set the limit in case |
90 | * they have limited DMA windows | 91 | * they have limited DMA windows |
91 | */ | 92 | */ |
92 | return mask >= (memblock_end_of_DRAM() - 1); | 93 | return mask >= get_dma_offset(dev) + (memblock_end_of_DRAM() - 1); |
93 | #else | 94 | #else |
94 | return 1; | 95 | return 1; |
95 | #endif | 96 | #endif |
@@ -154,6 +155,23 @@ EXPORT_SYMBOL(dma_direct_ops); | |||
154 | 155 | ||
155 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) | 156 | #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) |
156 | 157 | ||
158 | int dma_set_mask(struct device *dev, u64 dma_mask) | ||
159 | { | ||
160 | struct dma_map_ops *dma_ops = get_dma_ops(dev); | ||
161 | |||
162 | if (ppc_md.dma_set_mask) | ||
163 | return ppc_md.dma_set_mask(dev, dma_mask); | ||
164 | if (unlikely(dma_ops == NULL)) | ||
165 | return -EIO; | ||
166 | if (dma_ops->set_dma_mask != NULL) | ||
167 | return dma_ops->set_dma_mask(dev, dma_mask); | ||
168 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
169 | return -EIO; | ||
170 | *dev->dma_mask = dma_mask; | ||
171 | return 0; | ||
172 | } | ||
173 | EXPORT_SYMBOL(dma_set_mask); | ||
174 | |||
157 | static int __init dma_init(void) | 175 | static int __init dma_init(void) |
158 | { | 176 | { |
159 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); | 177 | dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); |
@@ -161,3 +179,21 @@ static int __init dma_init(void) | |||
161 | return 0; | 179 | return 0; |
162 | } | 180 | } |
163 | fs_initcall(dma_init); | 181 | fs_initcall(dma_init); |
182 | |||
183 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, | ||
184 | void *cpu_addr, dma_addr_t handle, size_t size) | ||
185 | { | ||
186 | unsigned long pfn; | ||
187 | |||
188 | #ifdef CONFIG_NOT_COHERENT_CACHE | ||
189 | vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); | ||
190 | pfn = __dma_get_coherent_pfn((unsigned long)cpu_addr); | ||
191 | #else | ||
192 | pfn = page_to_pfn(virt_to_page(cpu_addr)); | ||
193 | #endif | ||
194 | return remap_pfn_range(vma, vma->vm_start, | ||
195 | pfn + vma->vm_pgoff, | ||
196 | vma->vm_end - vma->vm_start, | ||
197 | vma->vm_page_prot); | ||
198 | } | ||
199 | EXPORT_SYMBOL_GPL(dma_mmap_coherent); | ||
diff --git a/arch/powerpc/kernel/e500-pmu.c b/arch/powerpc/kernel/e500-pmu.c index 7c07de0d8943..b150b510510f 100644 --- a/arch/powerpc/kernel/e500-pmu.c +++ b/arch/powerpc/kernel/e500-pmu.c | |||
@@ -126,4 +126,4 @@ static int init_e500_pmu(void) | |||
126 | return register_fsl_emb_pmu(&e500_pmu); | 126 | return register_fsl_emb_pmu(&e500_pmu); |
127 | } | 127 | } |
128 | 128 | ||
129 | arch_initcall(init_e500_pmu); | 129 | early_initcall(init_e500_pmu); |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index ed4aeb96398b..56212bc0ab08 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/asm-offsets.h> | 31 | #include <asm/asm-offsets.h> |
32 | #include <asm/unistd.h> | 32 | #include <asm/unistd.h> |
33 | #include <asm/ftrace.h> | 33 | #include <asm/ftrace.h> |
34 | #include <asm/ptrace.h> | ||
34 | 35 | ||
35 | #undef SHOW_SYSCALLS | 36 | #undef SHOW_SYSCALLS |
36 | #undef SHOW_SYSCALLS_TASK | 37 | #undef SHOW_SYSCALLS_TASK |
@@ -879,7 +880,18 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) | |||
879 | */ | 880 | */ |
880 | andi. r10,r9,MSR_EE | 881 | andi. r10,r9,MSR_EE |
881 | beq 1f | 882 | beq 1f |
883 | /* | ||
884 | * Since the ftrace irqsoff latency trace checks CALLER_ADDR1, | ||
885 | * which is the stack frame here, we need to force a stack frame | ||
886 | * in case we came from user space. | ||
887 | */ | ||
888 | stwu r1,-32(r1) | ||
889 | mflr r0 | ||
890 | stw r0,4(r1) | ||
891 | stwu r1,-32(r1) | ||
882 | bl trace_hardirqs_on | 892 | bl trace_hardirqs_on |
893 | lwz r1,0(r1) | ||
894 | lwz r1,0(r1) | ||
883 | lwz r9,_MSR(r1) | 895 | lwz r9,_MSR(r1) |
884 | 1: | 896 | 1: |
885 | #endif /* CONFIG_TRACE_IRQFLAGS */ | 897 | #endif /* CONFIG_TRACE_IRQFLAGS */ |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index 42e9d908914a..d834425186ae 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -97,6 +97,24 @@ system_call_common: | |||
97 | addi r9,r1,STACK_FRAME_OVERHEAD | 97 | addi r9,r1,STACK_FRAME_OVERHEAD |
98 | ld r11,exception_marker@toc(r2) | 98 | ld r11,exception_marker@toc(r2) |
99 | std r11,-16(r9) /* "regshere" marker */ | 99 | std r11,-16(r9) /* "regshere" marker */ |
100 | #if defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR) | ||
101 | BEGIN_FW_FTR_SECTION | ||
102 | beq 33f | ||
103 | /* if from user, see if there are any DTL entries to process */ | ||
104 | ld r10,PACALPPACAPTR(r13) /* get ptr to VPA */ | ||
105 | ld r11,PACA_DTL_RIDX(r13) /* get log read index */ | ||
106 | ld r10,LPPACA_DTLIDX(r10) /* get log write index */ | ||
107 | cmpd cr1,r11,r10 | ||
108 | beq+ cr1,33f | ||
109 | bl .accumulate_stolen_time | ||
110 | REST_GPR(0,r1) | ||
111 | REST_4GPRS(3,r1) | ||
112 | REST_2GPRS(7,r1) | ||
113 | addi r9,r1,STACK_FRAME_OVERHEAD | ||
114 | 33: | ||
115 | END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR) | ||
116 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING && CONFIG_PPC_SPLPAR */ | ||
117 | |||
100 | #ifdef CONFIG_TRACE_IRQFLAGS | 118 | #ifdef CONFIG_TRACE_IRQFLAGS |
101 | bl .trace_hardirqs_on | 119 | bl .trace_hardirqs_on |
102 | REST_GPR(0,r1) | 120 | REST_GPR(0,r1) |
@@ -202,7 +220,9 @@ syscall_exit: | |||
202 | bge- syscall_error | 220 | bge- syscall_error |
203 | syscall_error_cont: | 221 | syscall_error_cont: |
204 | ld r7,_NIP(r1) | 222 | ld r7,_NIP(r1) |
223 | BEGIN_FTR_SECTION | ||
205 | stdcx. r0,0,r1 /* to clear the reservation */ | 224 | stdcx. r0,0,r1 /* to clear the reservation */ |
225 | END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
206 | andi. r6,r8,MSR_PR | 226 | andi. r6,r8,MSR_PR |
207 | ld r4,_LINK(r1) | 227 | ld r4,_LINK(r1) |
208 | /* | 228 | /* |
@@ -401,6 +421,12 @@ BEGIN_FTR_SECTION | |||
401 | std r24,THREAD_VRSAVE(r3) | 421 | std r24,THREAD_VRSAVE(r3) |
402 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 422 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
403 | #endif /* CONFIG_ALTIVEC */ | 423 | #endif /* CONFIG_ALTIVEC */ |
424 | #ifdef CONFIG_PPC64 | ||
425 | BEGIN_FTR_SECTION | ||
426 | mfspr r25,SPRN_DSCR | ||
427 | std r25,THREAD_DSCR(r3) | ||
428 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | ||
429 | #endif | ||
404 | and. r0,r0,r22 | 430 | and. r0,r0,r22 |
405 | beq+ 1f | 431 | beq+ 1f |
406 | andc r22,r22,r0 | 432 | andc r22,r22,r0 |
@@ -419,6 +445,17 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
419 | sync | 445 | sync |
420 | #endif /* CONFIG_SMP */ | 446 | #endif /* CONFIG_SMP */ |
421 | 447 | ||
448 | /* | ||
449 | * If we optimise away the clear of the reservation in system | ||
450 | * calls because we know the CPU tracks the address of the | ||
451 | * reservation, then we need to clear it here to cover the | ||
452 | * case that the kernel context switch path has no larx | ||
453 | * instructions. | ||
454 | */ | ||
455 | BEGIN_FTR_SECTION | ||
456 | ldarx r6,0,r1 | ||
457 | END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
458 | |||
422 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ | 459 | addi r6,r4,-THREAD /* Convert THREAD to 'current' */ |
423 | std r6,PACACURRENT(r13) /* Set new 'current' */ | 460 | std r6,PACACURRENT(r13) /* Set new 'current' */ |
424 | 461 | ||
@@ -431,10 +468,10 @@ BEGIN_FTR_SECTION | |||
431 | FTR_SECTION_ELSE_NESTED(95) | 468 | FTR_SECTION_ELSE_NESTED(95) |
432 | clrrdi r6,r8,40 /* get its 1T ESID */ | 469 | clrrdi r6,r8,40 /* get its 1T ESID */ |
433 | clrrdi r9,r1,40 /* get current sp 1T ESID */ | 470 | clrrdi r9,r1,40 /* get current sp 1T ESID */ |
434 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_1T_SEGMENT, 95) | 471 | ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95) |
435 | FTR_SECTION_ELSE | 472 | FTR_SECTION_ELSE |
436 | b 2f | 473 | b 2f |
437 | ALT_FTR_SECTION_END_IFSET(CPU_FTR_SLB) | 474 | ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB) |
438 | clrldi. r0,r6,2 /* is new ESID c00000000? */ | 475 | clrldi. r0,r6,2 /* is new ESID c00000000? */ |
439 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ | 476 | cmpd cr1,r6,r9 /* or is new ESID the same as current ESID? */ |
440 | cror eq,4*cr1+eq,eq | 477 | cror eq,4*cr1+eq,eq |
@@ -448,7 +485,7 @@ BEGIN_FTR_SECTION | |||
448 | li r9,MMU_SEGSIZE_1T /* insert B field */ | 485 | li r9,MMU_SEGSIZE_1T /* insert B field */ |
449 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h | 486 | oris r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h |
450 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 | 487 | rldimi r7,r9,SLB_VSID_SSIZE_SHIFT,0 |
451 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 488 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
452 | 489 | ||
453 | /* Update the last bolted SLB. No write barriers are needed | 490 | /* Update the last bolted SLB. No write barriers are needed |
454 | * here, provided we only update the current CPU's SLB shadow | 491 | * here, provided we only update the current CPU's SLB shadow |
@@ -460,7 +497,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | |||
460 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ | 497 | std r7,SLBSHADOW_STACKVSID(r9) /* Save VSID */ |
461 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ | 498 | std r0,SLBSHADOW_STACKESID(r9) /* Save ESID */ |
462 | 499 | ||
463 | /* No need to check for CPU_FTR_NO_SLBIE_B here, since when | 500 | /* No need to check for MMU_FTR_NO_SLBIE_B here, since when |
464 | * we have 1TB segments, the only CPUs known to have the errata | 501 | * we have 1TB segments, the only CPUs known to have the errata |
465 | * only support less than 1TB of system memory and we'll never | 502 | * only support less than 1TB of system memory and we'll never |
466 | * actually hit this code path. | 503 | * actually hit this code path. |
@@ -491,6 +528,15 @@ BEGIN_FTR_SECTION | |||
491 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ | 528 | mtspr SPRN_VRSAVE,r0 /* if G4, restore VRSAVE reg */ |
492 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | 529 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) |
493 | #endif /* CONFIG_ALTIVEC */ | 530 | #endif /* CONFIG_ALTIVEC */ |
531 | #ifdef CONFIG_PPC64 | ||
532 | BEGIN_FTR_SECTION | ||
533 | ld r0,THREAD_DSCR(r4) | ||
534 | cmpd r0,r25 | ||
535 | beq 1f | ||
536 | mtspr SPRN_DSCR,r0 | ||
537 | 1: | ||
538 | END_FTR_SECTION_IFSET(CPU_FTR_DSCR) | ||
539 | #endif | ||
494 | 540 | ||
495 | /* r3-r13 are destroyed -- Cort */ | 541 | /* r3-r13 are destroyed -- Cort */ |
496 | REST_8GPRS(14, r1) | 542 | REST_8GPRS(14, r1) |
@@ -576,7 +622,16 @@ ALT_FW_FTR_SECTION_END_IFCLR(FW_FEATURE_ISERIES) | |||
576 | andi. r0,r3,MSR_RI | 622 | andi. r0,r3,MSR_RI |
577 | beq- unrecov_restore | 623 | beq- unrecov_restore |
578 | 624 | ||
625 | /* | ||
626 | * Clear the reservation. If we know the CPU tracks the address of | ||
627 | * the reservation then we can potentially save some cycles and use | ||
628 | * a larx. On POWER6 and POWER7 this is significantly faster. | ||
629 | */ | ||
630 | BEGIN_FTR_SECTION | ||
579 | stdcx. r0,0,r1 /* to clear the reservation */ | 631 | stdcx. r0,0,r1 /* to clear the reservation */ |
632 | FTR_SECTION_ELSE | ||
633 | ldarx r4,0,r1 | ||
634 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS) | ||
580 | 635 | ||
581 | /* | 636 | /* |
582 | * Clear RI before restoring r13. If we are returning to | 637 | * Clear RI before restoring r13. If we are returning to |
@@ -798,7 +853,7 @@ _GLOBAL(enter_rtas) | |||
798 | 853 | ||
799 | _STATIC(rtas_return_loc) | 854 | _STATIC(rtas_return_loc) |
800 | /* relocation is off at this point */ | 855 | /* relocation is off at this point */ |
801 | mfspr r4,SPRN_SPRG_PACA /* Get PACA */ | 856 | GET_PACA(r4) |
802 | clrldi r4,r4,2 /* convert to realmode address */ | 857 | clrldi r4,r4,2 /* convert to realmode address */ |
803 | 858 | ||
804 | bcl 20,31,$+4 | 859 | bcl 20,31,$+4 |
@@ -829,7 +884,7 @@ _STATIC(rtas_restore_regs) | |||
829 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ | 884 | REST_8GPRS(14, r1) /* Restore the non-volatiles */ |
830 | REST_10GPRS(22, r1) /* ditto */ | 885 | REST_10GPRS(22, r1) /* ditto */ |
831 | 886 | ||
832 | mfspr r13,SPRN_SPRG_PACA | 887 | GET_PACA(r13) |
833 | 888 | ||
834 | ld r4,_CCR(r1) | 889 | ld r4,_CCR(r1) |
835 | mtcr r4 | 890 | mtcr r4 |
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S index 5c43063d2506..d24d4400cc79 100644 --- a/arch/powerpc/kernel/exceptions-64e.S +++ b/arch/powerpc/kernel/exceptions-64e.S | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/cputable.h> | 17 | #include <asm/cputable.h> |
18 | #include <asm/setup.h> | 18 | #include <asm/setup.h> |
19 | #include <asm/thread_info.h> | 19 | #include <asm/thread_info.h> |
20 | #include <asm/reg_a2.h> | ||
20 | #include <asm/exception-64e.h> | 21 | #include <asm/exception-64e.h> |
21 | #include <asm/bug.h> | 22 | #include <asm/bug.h> |
22 | #include <asm/irqflags.h> | 23 | #include <asm/irqflags.h> |
@@ -252,9 +253,6 @@ exception_marker: | |||
252 | .balign 0x1000 | 253 | .balign 0x1000 |
253 | .globl interrupt_base_book3e | 254 | .globl interrupt_base_book3e |
254 | interrupt_base_book3e: /* fake trap */ | 255 | interrupt_base_book3e: /* fake trap */ |
255 | /* Note: If real debug exceptions are supported by the HW, the vector | ||
256 | * below will have to be patched up to point to an appropriate handler | ||
257 | */ | ||
258 | EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ | 256 | EXCEPTION_STUB(0x000, machine_check) /* 0x0200 */ |
259 | EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ | 257 | EXCEPTION_STUB(0x020, critical_input) /* 0x0580 */ |
260 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ | 258 | EXCEPTION_STUB(0x040, debug_crit) /* 0x0d00 */ |
@@ -271,8 +269,13 @@ interrupt_base_book3e: /* fake trap */ | |||
271 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ | 269 | EXCEPTION_STUB(0x1a0, watchdog) /* 0x09f0 */ |
272 | EXCEPTION_STUB(0x1c0, data_tlb_miss) | 270 | EXCEPTION_STUB(0x1c0, data_tlb_miss) |
273 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) | 271 | EXCEPTION_STUB(0x1e0, instruction_tlb_miss) |
272 | EXCEPTION_STUB(0x260, perfmon) | ||
274 | EXCEPTION_STUB(0x280, doorbell) | 273 | EXCEPTION_STUB(0x280, doorbell) |
275 | EXCEPTION_STUB(0x2a0, doorbell_crit) | 274 | EXCEPTION_STUB(0x2a0, doorbell_crit) |
275 | EXCEPTION_STUB(0x2c0, guest_doorbell) | ||
276 | EXCEPTION_STUB(0x2e0, guest_doorbell_crit) | ||
277 | EXCEPTION_STUB(0x300, hypercall) | ||
278 | EXCEPTION_STUB(0x320, ehpriv) | ||
276 | 279 | ||
277 | .globl interrupt_end_book3e | 280 | .globl interrupt_end_book3e |
278 | interrupt_end_book3e: | 281 | interrupt_end_book3e: |
@@ -379,7 +382,7 @@ interrupt_end_book3e: | |||
379 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ | 382 | mfspr r13,SPRN_SPRG_PACA /* get our PACA */ |
380 | b system_call_common | 383 | b system_call_common |
381 | 384 | ||
382 | /* Auxillary Processor Unavailable Interrupt */ | 385 | /* Auxiliary Processor Unavailable Interrupt */ |
383 | START_EXCEPTION(ap_unavailable); | 386 | START_EXCEPTION(ap_unavailable); |
384 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) | 387 | NORMAL_EXCEPTION_PROLOG(0xf20, PROLOG_ADDITION_NONE) |
385 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) | 388 | EXCEPTION_COMMON(0xf20, PACA_EXGEN, INTS_KEEP) |
@@ -454,6 +457,70 @@ interrupt_end_book3e: | |||
454 | kernel_dbg_exc: | 457 | kernel_dbg_exc: |
455 | b . /* NYI */ | 458 | b . /* NYI */ |
456 | 459 | ||
460 | /* Debug exception as a debug interrupt*/ | ||
461 | START_EXCEPTION(debug_debug); | ||
462 | DBG_EXCEPTION_PROLOG(0xd00, PROLOG_ADDITION_2REGS) | ||
463 | |||
464 | /* | ||
465 | * If there is a single step or branch-taken exception in an | ||
466 | * exception entry sequence, it was probably meant to apply to | ||
467 | * the code where the exception occurred (since exception entry | ||
468 | * doesn't turn off DE automatically). We simulate the effect | ||
469 | * of turning off DE on entry to an exception handler by turning | ||
470 | * off DE in the DSRR1 value and clearing the debug status. | ||
471 | */ | ||
472 | |||
473 | mfspr r14,SPRN_DBSR /* check single-step/branch taken */ | ||
474 | andis. r15,r14,DBSR_IC@h | ||
475 | beq+ 1f | ||
476 | |||
477 | LOAD_REG_IMMEDIATE(r14,interrupt_base_book3e) | ||
478 | LOAD_REG_IMMEDIATE(r15,interrupt_end_book3e) | ||
479 | cmpld cr0,r10,r14 | ||
480 | cmpld cr1,r10,r15 | ||
481 | blt+ cr0,1f | ||
482 | bge+ cr1,1f | ||
483 | |||
484 | /* here it looks like we got an inappropriate debug exception. */ | ||
485 | lis r14,DBSR_IC@h /* clear the IC event */ | ||
486 | rlwinm r11,r11,0,~MSR_DE /* clear DE in the DSRR1 value */ | ||
487 | mtspr SPRN_DBSR,r14 | ||
488 | mtspr SPRN_DSRR1,r11 | ||
489 | lwz r10,PACA_EXDBG+EX_CR(r13) /* restore registers */ | ||
490 | ld r1,PACA_EXDBG+EX_R1(r13) | ||
491 | ld r14,PACA_EXDBG+EX_R14(r13) | ||
492 | ld r15,PACA_EXDBG+EX_R15(r13) | ||
493 | mtcr r10 | ||
494 | ld r10,PACA_EXDBG+EX_R10(r13) /* restore registers */ | ||
495 | ld r11,PACA_EXDBG+EX_R11(r13) | ||
496 | mfspr r13,SPRN_SPRG_DBG_SCRATCH | ||
497 | rfdi | ||
498 | |||
499 | /* Normal debug exception */ | ||
500 | /* XXX We only handle coming from userspace for now since we can't | ||
501 | * quite save properly an interrupted kernel state yet | ||
502 | */ | ||
503 | 1: andi. r14,r11,MSR_PR; /* check for userspace again */ | ||
504 | beq kernel_dbg_exc; /* if from kernel mode */ | ||
505 | |||
506 | /* Now we mash up things to make it look like we are coming on a | ||
507 | * normal exception | ||
508 | */ | ||
509 | mfspr r15,SPRN_SPRG_DBG_SCRATCH | ||
510 | mtspr SPRN_SPRG_GEN_SCRATCH,r15 | ||
511 | mfspr r14,SPRN_DBSR | ||
512 | EXCEPTION_COMMON(0xd00, PACA_EXDBG, INTS_DISABLE_ALL) | ||
513 | std r14,_DSISR(r1) | ||
514 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
515 | mr r4,r14 | ||
516 | ld r14,PACA_EXDBG+EX_R14(r13) | ||
517 | ld r15,PACA_EXDBG+EX_R15(r13) | ||
518 | bl .save_nvgprs | ||
519 | bl .DebugException | ||
520 | b .ret_from_except | ||
521 | |||
522 | MASKABLE_EXCEPTION(0x260, perfmon, .performance_monitor_exception, ACK_NONE) | ||
523 | |||
457 | /* Doorbell interrupt */ | 524 | /* Doorbell interrupt */ |
458 | MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) | 525 | MASKABLE_EXCEPTION(0x2070, doorbell, .doorbell_exception, ACK_NONE) |
459 | 526 | ||
@@ -468,6 +535,11 @@ kernel_dbg_exc: | |||
468 | // b ret_from_crit_except | 535 | // b ret_from_crit_except |
469 | b . | 536 | b . |
470 | 537 | ||
538 | MASKABLE_EXCEPTION(0x2c0, guest_doorbell, .unknown_exception, ACK_NONE) | ||
539 | MASKABLE_EXCEPTION(0x2e0, guest_doorbell_crit, .unknown_exception, ACK_NONE) | ||
540 | MASKABLE_EXCEPTION(0x310, hypercall, .unknown_exception, ACK_NONE) | ||
541 | MASKABLE_EXCEPTION(0x320, ehpriv, .unknown_exception, ACK_NONE) | ||
542 | |||
471 | 543 | ||
472 | /* | 544 | /* |
473 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 545 | * An interrupt came in while soft-disabled; clear EE in SRR1, |
@@ -587,7 +659,12 @@ fast_exception_return: | |||
587 | BAD_STACK_TRAMPOLINE(0x000) | 659 | BAD_STACK_TRAMPOLINE(0x000) |
588 | BAD_STACK_TRAMPOLINE(0x100) | 660 | BAD_STACK_TRAMPOLINE(0x100) |
589 | BAD_STACK_TRAMPOLINE(0x200) | 661 | BAD_STACK_TRAMPOLINE(0x200) |
662 | BAD_STACK_TRAMPOLINE(0x260) | ||
663 | BAD_STACK_TRAMPOLINE(0x2c0) | ||
664 | BAD_STACK_TRAMPOLINE(0x2e0) | ||
590 | BAD_STACK_TRAMPOLINE(0x300) | 665 | BAD_STACK_TRAMPOLINE(0x300) |
666 | BAD_STACK_TRAMPOLINE(0x310) | ||
667 | BAD_STACK_TRAMPOLINE(0x320) | ||
591 | BAD_STACK_TRAMPOLINE(0x400) | 668 | BAD_STACK_TRAMPOLINE(0x400) |
592 | BAD_STACK_TRAMPOLINE(0x500) | 669 | BAD_STACK_TRAMPOLINE(0x500) |
593 | BAD_STACK_TRAMPOLINE(0x600) | 670 | BAD_STACK_TRAMPOLINE(0x600) |
@@ -864,8 +941,23 @@ have_hes: | |||
864 | * that will have to be made dependent on whether we are running under | 941 | * that will have to be made dependent on whether we are running under |
865 | * a hypervisor I suppose. | 942 | * a hypervisor I suppose. |
866 | */ | 943 | */ |
867 | ori r3,r3,MAS0_HES | MAS0_WQ_ALLWAYS | 944 | |
868 | mtspr SPRN_MAS0,r3 | 945 | /* BEWARE, MAGIC |
946 | * This code is called as an ordinary function on the boot CPU. But to | ||
947 | * avoid duplication, this code is also used in SCOM bringup of | ||
948 | * secondary CPUs. We read the code between the initial_tlb_code_start | ||
949 | * and initial_tlb_code_end labels one instruction at a time and RAM it | ||
950 | * into the new core via SCOM. That doesn't process branches, so there | ||
951 | * must be none between those two labels. It also means if this code | ||
952 | * ever takes any parameters, the SCOM code must also be updated to | ||
953 | * provide them. | ||
954 | */ | ||
955 | .globl a2_tlbinit_code_start | ||
956 | a2_tlbinit_code_start: | ||
957 | |||
958 | ori r11,r3,MAS0_WQ_ALLWAYS | ||
959 | oris r11,r11,MAS0_ESEL(3)@h /* Use way 3: workaround A2 erratum 376 */ | ||
960 | mtspr SPRN_MAS0,r11 | ||
869 | lis r3,(MAS1_VALID | MAS1_IPROT)@h | 961 | lis r3,(MAS1_VALID | MAS1_IPROT)@h |
870 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT | 962 | ori r3,r3,BOOK3E_PAGESZ_1GB << MAS1_TSIZE_SHIFT |
871 | mtspr SPRN_MAS1,r3 | 963 | mtspr SPRN_MAS1,r3 |
@@ -879,18 +971,86 @@ have_hes: | |||
879 | /* Write the TLB entry */ | 971 | /* Write the TLB entry */ |
880 | tlbwe | 972 | tlbwe |
881 | 973 | ||
974 | .globl a2_tlbinit_after_linear_map | ||
975 | a2_tlbinit_after_linear_map: | ||
976 | |||
882 | /* Now we branch the new virtual address mapped by this entry */ | 977 | /* Now we branch the new virtual address mapped by this entry */ |
883 | LOAD_REG_IMMEDIATE(r3,1f) | 978 | LOAD_REG_IMMEDIATE(r3,1f) |
884 | mtctr r3 | 979 | mtctr r3 |
885 | bctr | 980 | bctr |
886 | 981 | ||
887 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything | 982 | 1: /* We are now running at PAGE_OFFSET, clean the TLB of everything |
888 | * else (XXX we should scan for bolted crap from the firmware too) | 983 | * else (including IPROTed things left by firmware) |
984 | * r4 = TLBnCFG | ||
985 | * r3 = current address (more or less) | ||
889 | */ | 986 | */ |
987 | |||
988 | li r5,0 | ||
989 | mtspr SPRN_MAS6,r5 | ||
990 | tlbsx 0,r3 | ||
991 | |||
992 | rlwinm r9,r4,0,TLBnCFG_N_ENTRY | ||
993 | rlwinm r10,r4,8,0xff | ||
994 | addi r10,r10,-1 /* Get inner loop mask */ | ||
995 | |||
996 | li r3,1 | ||
997 | |||
998 | mfspr r5,SPRN_MAS1 | ||
999 | rlwinm r5,r5,0,(~(MAS1_VALID|MAS1_IPROT)) | ||
1000 | |||
1001 | mfspr r6,SPRN_MAS2 | ||
1002 | rldicr r6,r6,0,51 /* Extract EPN */ | ||
1003 | |||
1004 | mfspr r7,SPRN_MAS0 | ||
1005 | rlwinm r7,r7,0,0xffff0fff /* Clear HES and WQ */ | ||
1006 | |||
1007 | rlwinm r8,r7,16,0xfff /* Extract ESEL */ | ||
1008 | |||
1009 | 2: add r4,r3,r8 | ||
1010 | and r4,r4,r10 | ||
1011 | |||
1012 | rlwimi r7,r4,16,MAS0_ESEL_MASK | ||
1013 | |||
1014 | mtspr SPRN_MAS0,r7 | ||
1015 | mtspr SPRN_MAS1,r5 | ||
1016 | mtspr SPRN_MAS2,r6 | ||
1017 | tlbwe | ||
1018 | |||
1019 | addi r3,r3,1 | ||
1020 | and. r4,r3,r10 | ||
1021 | |||
1022 | bne 3f | ||
1023 | addis r6,r6,(1<<30)@h | ||
1024 | 3: | ||
1025 | cmpw r3,r9 | ||
1026 | blt 2b | ||
1027 | |||
1028 | .globl a2_tlbinit_after_iprot_flush | ||
1029 | a2_tlbinit_after_iprot_flush: | ||
1030 | |||
1031 | #ifdef CONFIG_PPC_EARLY_DEBUG_WSP | ||
1032 | /* Now establish early debug mappings if applicable */ | ||
1033 | /* Restore the MAS0 we used for linear mapping load */ | ||
1034 | mtspr SPRN_MAS0,r11 | ||
1035 | |||
1036 | lis r3,(MAS1_VALID | MAS1_IPROT)@h | ||
1037 | ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT) | ||
1038 | mtspr SPRN_MAS1,r3 | ||
1039 | LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G) | ||
1040 | mtspr SPRN_MAS2,r3 | ||
1041 | LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW) | ||
1042 | mtspr SPRN_MAS7_MAS3,r3 | ||
1043 | /* re-use the MAS8 value from the linear mapping */ | ||
1044 | tlbwe | ||
1045 | #endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ | ||
1046 | |||
890 | PPC_TLBILX(0,0,0) | 1047 | PPC_TLBILX(0,0,0) |
891 | sync | 1048 | sync |
892 | isync | 1049 | isync |
893 | 1050 | ||
1051 | .globl a2_tlbinit_code_end | ||
1052 | a2_tlbinit_code_end: | ||
1053 | |||
894 | /* We translate LR and return */ | 1054 | /* We translate LR and return */ |
895 | mflr r3 | 1055 | mflr r3 |
896 | tovirt(r3,r3) | 1056 | tovirt(r3,r3) |
@@ -1040,3 +1200,33 @@ _GLOBAL(__setup_base_ivors) | |||
1040 | sync | 1200 | sync |
1041 | 1201 | ||
1042 | blr | 1202 | blr |
1203 | |||
1204 | _GLOBAL(setup_perfmon_ivor) | ||
1205 | SET_IVOR(35, 0x260) /* Performance Monitor */ | ||
1206 | blr | ||
1207 | |||
1208 | _GLOBAL(setup_doorbell_ivors) | ||
1209 | SET_IVOR(36, 0x280) /* Processor Doorbell */ | ||
1210 | SET_IVOR(37, 0x2a0) /* Processor Doorbell Crit */ | ||
1211 | |||
1212 | /* Check MMUCFG[LPIDSIZE] to determine if we have category E.HV */ | ||
1213 | mfspr r10,SPRN_MMUCFG | ||
1214 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | ||
1215 | beqlr | ||
1216 | |||
1217 | SET_IVOR(38, 0x2c0) /* Guest Processor Doorbell */ | ||
1218 | SET_IVOR(39, 0x2e0) /* Guest Processor Doorbell Crit/MC */ | ||
1219 | blr | ||
1220 | |||
1221 | _GLOBAL(setup_ehv_ivors) | ||
1222 | /* | ||
1223 | * We may be running as a guest and lack E.HV even on a chip | ||
1224 | * that normally has it. | ||
1225 | */ | ||
1226 | mfspr r10,SPRN_MMUCFG | ||
1227 | rlwinm. r10,r10,0,MMUCFG_LPIDSIZE | ||
1228 | beqlr | ||
1229 | |||
1230 | SET_IVOR(40, 0x300) /* Embedded Hypervisor System Call */ | ||
1231 | SET_IVOR(41, 0x320) /* Embedded Hypervisor Privilege */ | ||
1232 | blr | ||
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S index f53029a01554..a85f4874cba7 100644 --- a/arch/powerpc/kernel/exceptions-64s.S +++ b/arch/powerpc/kernel/exceptions-64s.S | |||
@@ -5,7 +5,7 @@ | |||
5 | * handling and other fixed offset specific things. | 5 | * handling and other fixed offset specific things. |
6 | * | 6 | * |
7 | * This file is meant to be #included from head_64.S due to | 7 | * This file is meant to be #included from head_64.S due to |
8 | * position dependant assembly. | 8 | * position dependent assembly. |
9 | * | 9 | * |
10 | * Most of this originates from head_64.S and thus has the same | 10 | * Most of this originates from head_64.S and thus has the same |
11 | * copyright history. | 11 | * copyright history. |
@@ -13,6 +13,7 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #include <asm/exception-64s.h> | 15 | #include <asm/exception-64s.h> |
16 | #include <asm/ptrace.h> | ||
16 | 17 | ||
17 | /* | 18 | /* |
18 | * We layout physical memory as follows: | 19 | * We layout physical memory as follows: |
@@ -36,23 +37,51 @@ | |||
36 | .globl __start_interrupts | 37 | .globl __start_interrupts |
37 | __start_interrupts: | 38 | __start_interrupts: |
38 | 39 | ||
39 | STD_EXCEPTION_PSERIES(0x100, system_reset) | 40 | .globl system_reset_pSeries; |
41 | system_reset_pSeries: | ||
42 | HMT_MEDIUM; | ||
43 | DO_KVM 0x100; | ||
44 | SET_SCRATCH0(r13) | ||
45 | #ifdef CONFIG_PPC_P7_NAP | ||
46 | BEGIN_FTR_SECTION | ||
47 | /* Running native on arch 2.06 or later, check if we are | ||
48 | * waking up from nap. We only handle no state loss and | ||
49 | * supervisor state loss. We do -not- handle hypervisor | ||
50 | * state loss at this time. | ||
51 | */ | ||
52 | mfspr r13,SPRN_SRR1 | ||
53 | rlwinm r13,r13,47-31,30,31 | ||
54 | cmpwi cr0,r13,1 | ||
55 | bne 1f | ||
56 | b .power7_wakeup_noloss | ||
57 | 1: cmpwi cr0,r13,2 | ||
58 | bne 1f | ||
59 | b .power7_wakeup_loss | ||
60 | /* Total loss of HV state is fatal, we could try to use the | ||
61 | * PIR to locate a PACA, then use an emergency stack etc... | ||
62 | * but for now, let's just stay stuck here | ||
63 | */ | ||
64 | 1: cmpwi cr0,r13,3 | ||
65 | beq . | ||
66 | END_FTR_SECTION_IFSET(CPU_FTR_HVMODE_206) | ||
67 | #endif /* CONFIG_PPC_P7_NAP */ | ||
68 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) | ||
40 | 69 | ||
41 | . = 0x200 | 70 | . = 0x200 |
42 | _machine_check_pSeries: | 71 | _machine_check_pSeries: |
43 | HMT_MEDIUM | 72 | HMT_MEDIUM |
44 | DO_KVM 0x200 | 73 | DO_KVM 0x200 |
45 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 74 | SET_SCRATCH0(r13) |
46 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 75 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) |
47 | 76 | ||
48 | . = 0x300 | 77 | . = 0x300 |
49 | .globl data_access_pSeries | 78 | .globl data_access_pSeries |
50 | data_access_pSeries: | 79 | data_access_pSeries: |
51 | HMT_MEDIUM | 80 | HMT_MEDIUM |
52 | DO_KVM 0x300 | 81 | DO_KVM 0x300 |
53 | mtspr SPRN_SPRG_SCRATCH0,r13 | 82 | SET_SCRATCH0(r13) |
54 | BEGIN_FTR_SECTION | 83 | BEGIN_FTR_SECTION |
55 | mfspr r13,SPRN_SPRG_PACA | 84 | GET_PACA(r13) |
56 | std r9,PACA_EXSLB+EX_R9(r13) | 85 | std r9,PACA_EXSLB+EX_R9(r13) |
57 | std r10,PACA_EXSLB+EX_R10(r13) | 86 | std r10,PACA_EXSLB+EX_R10(r13) |
58 | mfspr r10,SPRN_DAR | 87 | mfspr r10,SPRN_DAR |
@@ -66,22 +95,22 @@ BEGIN_FTR_SECTION | |||
66 | std r11,PACA_EXGEN+EX_R11(r13) | 95 | std r11,PACA_EXGEN+EX_R11(r13) |
67 | ld r11,PACA_EXSLB+EX_R9(r13) | 96 | ld r11,PACA_EXSLB+EX_R9(r13) |
68 | std r12,PACA_EXGEN+EX_R12(r13) | 97 | std r12,PACA_EXGEN+EX_R12(r13) |
69 | mfspr r12,SPRN_SPRG_SCRATCH0 | 98 | GET_SCRATCH0(r12) |
70 | std r10,PACA_EXGEN+EX_R10(r13) | 99 | std r10,PACA_EXGEN+EX_R10(r13) |
71 | std r11,PACA_EXGEN+EX_R9(r13) | 100 | std r11,PACA_EXGEN+EX_R9(r13) |
72 | std r12,PACA_EXGEN+EX_R13(r13) | 101 | std r12,PACA_EXGEN+EX_R13(r13) |
73 | EXCEPTION_PROLOG_PSERIES_1(data_access_common) | 102 | EXCEPTION_PROLOG_PSERIES_1(data_access_common, EXC_STD) |
74 | FTR_SECTION_ELSE | 103 | FTR_SECTION_ELSE |
75 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common) | 104 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD) |
76 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) | 105 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) |
77 | 106 | ||
78 | . = 0x380 | 107 | . = 0x380 |
79 | .globl data_access_slb_pSeries | 108 | .globl data_access_slb_pSeries |
80 | data_access_slb_pSeries: | 109 | data_access_slb_pSeries: |
81 | HMT_MEDIUM | 110 | HMT_MEDIUM |
82 | DO_KVM 0x380 | 111 | DO_KVM 0x380 |
83 | mtspr SPRN_SPRG_SCRATCH0,r13 | 112 | SET_SCRATCH0(r13) |
84 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 113 | GET_PACA(r13) |
85 | std r3,PACA_EXSLB+EX_R3(r13) | 114 | std r3,PACA_EXSLB+EX_R3(r13) |
86 | mfspr r3,SPRN_DAR | 115 | mfspr r3,SPRN_DAR |
87 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 116 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
@@ -94,7 +123,7 @@ data_access_slb_pSeries: | |||
94 | std r10,PACA_EXSLB+EX_R10(r13) | 123 | std r10,PACA_EXSLB+EX_R10(r13) |
95 | std r11,PACA_EXSLB+EX_R11(r13) | 124 | std r11,PACA_EXSLB+EX_R11(r13) |
96 | std r12,PACA_EXSLB+EX_R12(r13) | 125 | std r12,PACA_EXSLB+EX_R12(r13) |
97 | mfspr r10,SPRN_SPRG_SCRATCH0 | 126 | GET_SCRATCH0(r10) |
98 | std r10,PACA_EXSLB+EX_R13(r13) | 127 | std r10,PACA_EXSLB+EX_R13(r13) |
99 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 128 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
100 | #ifndef CONFIG_RELOCATABLE | 129 | #ifndef CONFIG_RELOCATABLE |
@@ -112,15 +141,15 @@ data_access_slb_pSeries: | |||
112 | bctr | 141 | bctr |
113 | #endif | 142 | #endif |
114 | 143 | ||
115 | STD_EXCEPTION_PSERIES(0x400, instruction_access) | 144 | STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access) |
116 | 145 | ||
117 | . = 0x480 | 146 | . = 0x480 |
118 | .globl instruction_access_slb_pSeries | 147 | .globl instruction_access_slb_pSeries |
119 | instruction_access_slb_pSeries: | 148 | instruction_access_slb_pSeries: |
120 | HMT_MEDIUM | 149 | HMT_MEDIUM |
121 | DO_KVM 0x480 | 150 | DO_KVM 0x480 |
122 | mtspr SPRN_SPRG_SCRATCH0,r13 | 151 | SET_SCRATCH0(r13) |
123 | mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */ | 152 | GET_PACA(r13) |
124 | std r3,PACA_EXSLB+EX_R3(r13) | 153 | std r3,PACA_EXSLB+EX_R3(r13) |
125 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ | 154 | mfspr r3,SPRN_SRR0 /* SRR0 is faulting address */ |
126 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ | 155 | std r9,PACA_EXSLB+EX_R9(r13) /* save r9 - r12 */ |
@@ -133,7 +162,7 @@ instruction_access_slb_pSeries: | |||
133 | std r10,PACA_EXSLB+EX_R10(r13) | 162 | std r10,PACA_EXSLB+EX_R10(r13) |
134 | std r11,PACA_EXSLB+EX_R11(r13) | 163 | std r11,PACA_EXSLB+EX_R11(r13) |
135 | std r12,PACA_EXSLB+EX_R12(r13) | 164 | std r12,PACA_EXSLB+EX_R12(r13) |
136 | mfspr r10,SPRN_SPRG_SCRATCH0 | 165 | GET_SCRATCH0(r10) |
137 | std r10,PACA_EXSLB+EX_R13(r13) | 166 | std r10,PACA_EXSLB+EX_R13(r13) |
138 | mfspr r12,SPRN_SRR1 /* and SRR1 */ | 167 | mfspr r12,SPRN_SRR1 /* and SRR1 */ |
139 | #ifndef CONFIG_RELOCATABLE | 168 | #ifndef CONFIG_RELOCATABLE |
@@ -146,13 +175,29 @@ instruction_access_slb_pSeries: | |||
146 | bctr | 175 | bctr |
147 | #endif | 176 | #endif |
148 | 177 | ||
149 | MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt) | 178 | /* We open code these as we can't have a ". = x" (even with |
150 | STD_EXCEPTION_PSERIES(0x600, alignment) | 179 | * x = "." within a feature section |
151 | STD_EXCEPTION_PSERIES(0x700, program_check) | 180 | */ |
152 | STD_EXCEPTION_PSERIES(0x800, fp_unavailable) | 181 | . = 0x500; |
153 | MASKABLE_EXCEPTION_PSERIES(0x900, decrementer) | 182 | .globl hardware_interrupt_pSeries; |
154 | STD_EXCEPTION_PSERIES(0xa00, trap_0a) | 183 | .globl hardware_interrupt_hv; |
155 | STD_EXCEPTION_PSERIES(0xb00, trap_0b) | 184 | hardware_interrupt_pSeries: |
185 | hardware_interrupt_hv: | ||
186 | BEGIN_FTR_SECTION | ||
187 | _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD) | ||
188 | FTR_SECTION_ELSE | ||
189 | _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV) | ||
190 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_HVMODE_206) | ||
191 | |||
192 | STD_EXCEPTION_PSERIES(0x600, 0x600, alignment) | ||
193 | STD_EXCEPTION_PSERIES(0x700, 0x700, program_check) | ||
194 | STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable) | ||
195 | |||
196 | MASKABLE_EXCEPTION_PSERIES(0x900, 0x900, decrementer) | ||
197 | MASKABLE_EXCEPTION_HV(0x980, 0x980, decrementer) | ||
198 | |||
199 | STD_EXCEPTION_PSERIES(0xa00, 0xa00, trap_0a) | ||
200 | STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b) | ||
156 | 201 | ||
157 | . = 0xc00 | 202 | . = 0xc00 |
158 | .globl system_call_pSeries | 203 | .globl system_call_pSeries |
@@ -164,13 +209,13 @@ BEGIN_FTR_SECTION | |||
164 | beq- 1f | 209 | beq- 1f |
165 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | 210 | END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) |
166 | mr r9,r13 | 211 | mr r9,r13 |
167 | mfspr r13,SPRN_SPRG_PACA | 212 | GET_PACA(r13) |
168 | mfspr r11,SPRN_SRR0 | 213 | mfspr r11,SPRN_SRR0 |
169 | ld r12,PACAKBASE(r13) | ||
170 | ld r10,PACAKMSR(r13) | ||
171 | LOAD_HANDLER(r12, system_call_entry) | ||
172 | mtspr SPRN_SRR0,r12 | ||
173 | mfspr r12,SPRN_SRR1 | 214 | mfspr r12,SPRN_SRR1 |
215 | ld r10,PACAKBASE(r13) | ||
216 | LOAD_HANDLER(r10, system_call_entry) | ||
217 | mtspr SPRN_SRR0,r10 | ||
218 | ld r10,PACAKMSR(r13) | ||
174 | mtspr SPRN_SRR1,r10 | 219 | mtspr SPRN_SRR1,r10 |
175 | rfid | 220 | rfid |
176 | b . /* prevent speculative execution */ | 221 | b . /* prevent speculative execution */ |
@@ -182,8 +227,21 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
182 | rfid /* return to userspace */ | 227 | rfid /* return to userspace */ |
183 | b . | 228 | b . |
184 | 229 | ||
185 | STD_EXCEPTION_PSERIES(0xd00, single_step) | 230 | STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step) |
186 | STD_EXCEPTION_PSERIES(0xe00, trap_0e) | 231 | |
232 | /* At 0xe??? we have a bunch of hypervisor exceptions, we branch | ||
233 | * out of line to handle them | ||
234 | */ | ||
235 | . = 0xe00 | ||
236 | b h_data_storage_hv | ||
237 | . = 0xe20 | ||
238 | b h_instr_storage_hv | ||
239 | . = 0xe40 | ||
240 | b emulation_assist_hv | ||
241 | . = 0xe50 | ||
242 | b hmi_exception_hv | ||
243 | . = 0xe60 | ||
244 | b hmi_exception_hv | ||
187 | 245 | ||
188 | /* We need to deal with the Altivec unavailable exception | 246 | /* We need to deal with the Altivec unavailable exception |
189 | * here which is at 0xf20, thus in the middle of the | 247 | * here which is at 0xf20, thus in the middle of the |
@@ -192,39 +250,42 @@ END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE) | |||
192 | */ | 250 | */ |
193 | performance_monitor_pSeries_1: | 251 | performance_monitor_pSeries_1: |
194 | . = 0xf00 | 252 | . = 0xf00 |
195 | DO_KVM 0xf00 | ||
196 | b performance_monitor_pSeries | 253 | b performance_monitor_pSeries |
197 | 254 | ||
198 | altivec_unavailable_pSeries_1: | 255 | altivec_unavailable_pSeries_1: |
199 | . = 0xf20 | 256 | . = 0xf20 |
200 | DO_KVM 0xf20 | ||
201 | b altivec_unavailable_pSeries | 257 | b altivec_unavailable_pSeries |
202 | 258 | ||
203 | vsx_unavailable_pSeries_1: | 259 | vsx_unavailable_pSeries_1: |
204 | . = 0xf40 | 260 | . = 0xf40 |
205 | DO_KVM 0xf40 | ||
206 | b vsx_unavailable_pSeries | 261 | b vsx_unavailable_pSeries |
207 | 262 | ||
208 | #ifdef CONFIG_CBE_RAS | 263 | #ifdef CONFIG_CBE_RAS |
209 | HSTD_EXCEPTION_PSERIES(0x1200, cbe_system_error) | 264 | STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error) |
210 | #endif /* CONFIG_CBE_RAS */ | 265 | #endif /* CONFIG_CBE_RAS */ |
211 | STD_EXCEPTION_PSERIES(0x1300, instruction_breakpoint) | 266 | STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint) |
212 | #ifdef CONFIG_CBE_RAS | 267 | #ifdef CONFIG_CBE_RAS |
213 | HSTD_EXCEPTION_PSERIES(0x1600, cbe_maintenance) | 268 | STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance) |
214 | #endif /* CONFIG_CBE_RAS */ | 269 | #endif /* CONFIG_CBE_RAS */ |
215 | STD_EXCEPTION_PSERIES(0x1700, altivec_assist) | 270 | STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist) |
216 | #ifdef CONFIG_CBE_RAS | 271 | #ifdef CONFIG_CBE_RAS |
217 | HSTD_EXCEPTION_PSERIES(0x1800, cbe_thermal) | 272 | STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal) |
218 | #endif /* CONFIG_CBE_RAS */ | 273 | #endif /* CONFIG_CBE_RAS */ |
219 | 274 | ||
220 | . = 0x3000 | 275 | . = 0x3000 |
221 | 276 | ||
222 | /*** pSeries interrupt support ***/ | 277 | /*** Out of line interrupts support ***/ |
278 | |||
279 | /* moved from 0xe00 */ | ||
280 | STD_EXCEPTION_HV(., 0xe00, h_data_storage) | ||
281 | STD_EXCEPTION_HV(., 0xe20, h_instr_storage) | ||
282 | STD_EXCEPTION_HV(., 0xe40, emulation_assist) | ||
283 | STD_EXCEPTION_HV(., 0xe60, hmi_exception) /* need to flush cache ? */ | ||
223 | 284 | ||
224 | /* moved from 0xf00 */ | 285 | /* moved from 0xf00 */ |
225 | STD_EXCEPTION_PSERIES(., performance_monitor) | 286 | STD_EXCEPTION_PSERIES(., 0xf00, performance_monitor) |
226 | STD_EXCEPTION_PSERIES(., altivec_unavailable) | 287 | STD_EXCEPTION_PSERIES(., 0xf20, altivec_unavailable) |
227 | STD_EXCEPTION_PSERIES(., vsx_unavailable) | 288 | STD_EXCEPTION_PSERIES(., 0xf40, vsx_unavailable) |
228 | 289 | ||
229 | /* | 290 | /* |
230 | * An interrupt came in while soft-disabled; clear EE in SRR1, | 291 | * An interrupt came in while soft-disabled; clear EE in SRR1, |
@@ -239,17 +300,30 @@ masked_interrupt: | |||
239 | rotldi r10,r10,16 | 300 | rotldi r10,r10,16 |
240 | mtspr SPRN_SRR1,r10 | 301 | mtspr SPRN_SRR1,r10 |
241 | ld r10,PACA_EXGEN+EX_R10(r13) | 302 | ld r10,PACA_EXGEN+EX_R10(r13) |
242 | mfspr r13,SPRN_SPRG_SCRATCH0 | 303 | GET_SCRATCH0(r13) |
243 | rfid | 304 | rfid |
244 | b . | 305 | b . |
245 | 306 | ||
307 | masked_Hinterrupt: | ||
308 | stb r10,PACAHARDIRQEN(r13) | ||
309 | mtcrf 0x80,r9 | ||
310 | ld r9,PACA_EXGEN+EX_R9(r13) | ||
311 | mfspr r10,SPRN_HSRR1 | ||
312 | rldicl r10,r10,48,1 /* clear MSR_EE */ | ||
313 | rotldi r10,r10,16 | ||
314 | mtspr SPRN_HSRR1,r10 | ||
315 | ld r10,PACA_EXGEN+EX_R10(r13) | ||
316 | GET_SCRATCH0(r13) | ||
317 | hrfid | ||
318 | b . | ||
319 | |||
246 | .align 7 | 320 | .align 7 |
247 | do_stab_bolted_pSeries: | 321 | do_stab_bolted_pSeries: |
248 | std r11,PACA_EXSLB+EX_R11(r13) | 322 | std r11,PACA_EXSLB+EX_R11(r13) |
249 | std r12,PACA_EXSLB+EX_R12(r13) | 323 | std r12,PACA_EXSLB+EX_R12(r13) |
250 | mfspr r10,SPRN_SPRG_SCRATCH0 | 324 | GET_SCRATCH0(r10) |
251 | std r10,PACA_EXSLB+EX_R13(r13) | 325 | std r10,PACA_EXSLB+EX_R13(r13) |
252 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted) | 326 | EXCEPTION_PROLOG_PSERIES_1(.do_stab_bolted, EXC_STD) |
253 | 327 | ||
254 | #ifdef CONFIG_PPC_PSERIES | 328 | #ifdef CONFIG_PPC_PSERIES |
255 | /* | 329 | /* |
@@ -259,15 +333,15 @@ do_stab_bolted_pSeries: | |||
259 | .align 7 | 333 | .align 7 |
260 | system_reset_fwnmi: | 334 | system_reset_fwnmi: |
261 | HMT_MEDIUM | 335 | HMT_MEDIUM |
262 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 336 | SET_SCRATCH0(r13) /* save r13 */ |
263 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | 337 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD) |
264 | 338 | ||
265 | .globl machine_check_fwnmi | 339 | .globl machine_check_fwnmi |
266 | .align 7 | 340 | .align 7 |
267 | machine_check_fwnmi: | 341 | machine_check_fwnmi: |
268 | HMT_MEDIUM | 342 | HMT_MEDIUM |
269 | mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */ | 343 | SET_SCRATCH0(r13) /* save r13 */ |
270 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common) | 344 | EXCEPTION_PROLOG_PSERIES(PACA_EXMC, machine_check_common, EXC_STD) |
271 | 345 | ||
272 | #endif /* CONFIG_PPC_PSERIES */ | 346 | #endif /* CONFIG_PPC_PSERIES */ |
273 | 347 | ||
@@ -281,7 +355,7 @@ slb_miss_user_pseries: | |||
281 | std r10,PACA_EXGEN+EX_R10(r13) | 355 | std r10,PACA_EXGEN+EX_R10(r13) |
282 | std r11,PACA_EXGEN+EX_R11(r13) | 356 | std r11,PACA_EXGEN+EX_R11(r13) |
283 | std r12,PACA_EXGEN+EX_R12(r13) | 357 | std r12,PACA_EXGEN+EX_R12(r13) |
284 | mfspr r10,SPRG_SCRATCH0 | 358 | GET_SCRATCH0(r10) |
285 | ld r11,PACA_EXSLB+EX_R9(r13) | 359 | ld r11,PACA_EXSLB+EX_R9(r13) |
286 | ld r12,PACA_EXSLB+EX_R3(r13) | 360 | ld r12,PACA_EXSLB+EX_R3(r13) |
287 | std r10,PACA_EXGEN+EX_R13(r13) | 361 | std r10,PACA_EXGEN+EX_R13(r13) |
@@ -299,6 +373,12 @@ slb_miss_user_pseries: | |||
299 | b . /* prevent spec. execution */ | 373 | b . /* prevent spec. execution */ |
300 | #endif /* __DISABLED__ */ | 374 | #endif /* __DISABLED__ */ |
301 | 375 | ||
376 | /* KVM's trampoline code needs to be close to the interrupt handlers */ | ||
377 | |||
378 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
379 | #include "../kvm/book3s_rmhandlers.S" | ||
380 | #endif | ||
381 | |||
302 | .align 7 | 382 | .align 7 |
303 | .globl __end_interrupts | 383 | .globl __end_interrupts |
304 | __end_interrupts: | 384 | __end_interrupts: |
@@ -335,6 +415,8 @@ machine_check_common: | |||
335 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) | 415 | STD_EXCEPTION_COMMON(0xb00, trap_0b, .unknown_exception) |
336 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) | 416 | STD_EXCEPTION_COMMON(0xd00, single_step, .single_step_exception) |
337 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) | 417 | STD_EXCEPTION_COMMON(0xe00, trap_0e, .unknown_exception) |
418 | STD_EXCEPTION_COMMON(0xe40, emulation_assist, .program_check_exception) | ||
419 | STD_EXCEPTION_COMMON(0xe60, hmi_exception, .unknown_exception) | ||
338 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) | 420 | STD_EXCEPTION_COMMON_IDLE(0xf00, performance_monitor, .performance_monitor_exception) |
339 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) | 421 | STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, .instruction_breakpoint_exception) |
340 | #ifdef CONFIG_ALTIVEC | 422 | #ifdef CONFIG_ALTIVEC |
@@ -379,9 +461,24 @@ bad_stack: | |||
379 | std r12,_XER(r1) | 461 | std r12,_XER(r1) |
380 | SAVE_GPR(0,r1) | 462 | SAVE_GPR(0,r1) |
381 | SAVE_GPR(2,r1) | 463 | SAVE_GPR(2,r1) |
382 | SAVE_4GPRS(3,r1) | 464 | ld r10,EX_R3(r3) |
383 | SAVE_2GPRS(7,r1) | 465 | std r10,GPR3(r1) |
384 | SAVE_10GPRS(12,r1) | 466 | SAVE_GPR(4,r1) |
467 | SAVE_4GPRS(5,r1) | ||
468 | ld r9,EX_R9(r3) | ||
469 | ld r10,EX_R10(r3) | ||
470 | SAVE_2GPRS(9,r1) | ||
471 | ld r9,EX_R11(r3) | ||
472 | ld r10,EX_R12(r3) | ||
473 | ld r11,EX_R13(r3) | ||
474 | std r9,GPR11(r1) | ||
475 | std r10,GPR12(r1) | ||
476 | std r11,GPR13(r1) | ||
477 | BEGIN_FTR_SECTION | ||
478 | ld r10,EX_CFAR(r3) | ||
479 | std r10,ORIG_GPR3(r1) | ||
480 | END_FTR_SECTION_IFSET(CPU_FTR_CFAR) | ||
481 | SAVE_8GPRS(14,r1) | ||
385 | SAVE_10GPRS(22,r1) | 482 | SAVE_10GPRS(22,r1) |
386 | lhz r12,PACA_TRAP_SAVE(r13) | 483 | lhz r12,PACA_TRAP_SAVE(r13) |
387 | std r12,_TRAP(r1) | 484 | std r12,_TRAP(r1) |
@@ -390,6 +487,9 @@ bad_stack: | |||
390 | li r12,0 | 487 | li r12,0 |
391 | std r12,0(r11) | 488 | std r12,0(r11) |
392 | ld r2,PACATOC(r13) | 489 | ld r2,PACATOC(r13) |
490 | ld r11,exception_marker@toc(r2) | ||
491 | std r12,RESULT(r1) | ||
492 | std r11,STACK_FRAME_OVERHEAD-16(r1) | ||
393 | 1: addi r3,r1,STACK_FRAME_OVERHEAD | 493 | 1: addi r3,r1,STACK_FRAME_OVERHEAD |
394 | bl .kernel_bad_stack | 494 | bl .kernel_bad_stack |
395 | b 1b | 495 | b 1b |
@@ -412,6 +512,19 @@ data_access_common: | |||
412 | li r5,0x300 | 512 | li r5,0x300 |
413 | b .do_hash_page /* Try to handle as hpte fault */ | 513 | b .do_hash_page /* Try to handle as hpte fault */ |
414 | 514 | ||
515 | .align 7 | ||
516 | .globl h_data_storage_common | ||
517 | h_data_storage_common: | ||
518 | mfspr r10,SPRN_HDAR | ||
519 | std r10,PACA_EXGEN+EX_DAR(r13) | ||
520 | mfspr r10,SPRN_HDSISR | ||
521 | stw r10,PACA_EXGEN+EX_DSISR(r13) | ||
522 | EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN) | ||
523 | bl .save_nvgprs | ||
524 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
525 | bl .unknown_exception | ||
526 | b .ret_from_except | ||
527 | |||
415 | .align 7 | 528 | .align 7 |
416 | .globl instruction_access_common | 529 | .globl instruction_access_common |
417 | instruction_access_common: | 530 | instruction_access_common: |
@@ -421,6 +534,8 @@ instruction_access_common: | |||
421 | li r5,0x400 | 534 | li r5,0x400 |
422 | b .do_hash_page /* Try to handle as hpte fault */ | 535 | b .do_hash_page /* Try to handle as hpte fault */ |
423 | 536 | ||
537 | STD_EXCEPTION_COMMON(0xe20, h_instr_storage, .unknown_exception) | ||
538 | |||
424 | /* | 539 | /* |
425 | * Here is the common SLB miss user that is used when going to virtual | 540 | * Here is the common SLB miss user that is used when going to virtual |
426 | * mode for SLB misses, that is currently not used | 541 | * mode for SLB misses, that is currently not used |
@@ -743,7 +858,7 @@ _STATIC(do_hash_page) | |||
743 | BEGIN_FTR_SECTION | 858 | BEGIN_FTR_SECTION |
744 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ | 859 | andis. r0,r4,0x0020 /* Is it a segment table fault? */ |
745 | bne- do_ste_alloc /* If so handle it */ | 860 | bne- do_ste_alloc /* If so handle it */ |
746 | END_FTR_SECTION_IFCLR(CPU_FTR_SLB) | 861 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB) |
747 | 862 | ||
748 | clrrdi r11,r1,THREAD_SHIFT | 863 | clrrdi r11,r1,THREAD_SHIFT |
749 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ | 864 | lwz r0,TI_PREEMPT(r11) /* If we're in an "NMI" */ |
@@ -818,12 +933,12 @@ END_FW_FTR_SECTION_IFCLR(FW_FEATURE_ISERIES) | |||
818 | 933 | ||
819 | /* | 934 | /* |
820 | * hash_page couldn't handle it, set soft interrupt enable back | 935 | * hash_page couldn't handle it, set soft interrupt enable back |
821 | * to what it was before the trap. Note that .raw_local_irq_restore | 936 | * to what it was before the trap. Note that .arch_local_irq_restore |
822 | * handles any interrupts pending at this point. | 937 | * handles any interrupts pending at this point. |
823 | */ | 938 | */ |
824 | ld r3,SOFTE(r1) | 939 | ld r3,SOFTE(r1) |
825 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) | 940 | TRACE_AND_RESTORE_IRQ_PARTIAL(r3, 11f) |
826 | bl .raw_local_irq_restore | 941 | bl .arch_local_irq_restore |
827 | b 11f | 942 | b 11f |
828 | 943 | ||
829 | /* We have a data breakpoint exception - handle it */ | 944 | /* We have a data breakpoint exception - handle it */ |
@@ -970,20 +1085,6 @@ _GLOBAL(do_stab_bolted) | |||
970 | rfid | 1085 | rfid |
971 | b . /* prevent speculative execution */ | 1086 | b . /* prevent speculative execution */ |
972 | 1087 | ||
973 | /* | ||
974 | * Space for CPU0's segment table. | ||
975 | * | ||
976 | * On iSeries, the hypervisor must fill in at least one entry before | ||
977 | * we get control (with relocate on). The address is given to the hv | ||
978 | * as a page number (see xLparMap below), so this must be at a | ||
979 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
980 | * PAGE_SHIFT). | ||
981 | */ | ||
982 | . = STAB0_OFFSET /* 0x6000 */ | ||
983 | .globl initial_stab | ||
984 | initial_stab: | ||
985 | .space 4096 | ||
986 | |||
987 | #ifdef CONFIG_PPC_PSERIES | 1088 | #ifdef CONFIG_PPC_PSERIES |
988 | /* | 1089 | /* |
989 | * Data area reserved for FWNMI option. | 1090 | * Data area reserved for FWNMI option. |
@@ -1020,3 +1121,17 @@ xLparMap: | |||
1020 | #ifdef CONFIG_PPC_PSERIES | 1121 | #ifdef CONFIG_PPC_PSERIES |
1021 | . = 0x8000 | 1122 | . = 0x8000 |
1022 | #endif /* CONFIG_PPC_PSERIES */ | 1123 | #endif /* CONFIG_PPC_PSERIES */ |
1124 | |||
1125 | /* | ||
1126 | * Space for CPU0's segment table. | ||
1127 | * | ||
1128 | * On iSeries, the hypervisor must fill in at least one entry before | ||
1129 | * we get control (with relocate on). The address is given to the hv | ||
1130 | * as a page number (see xLparMap above), so this must be at a | ||
1131 | * fixed address (the linker can't compute (u64)&initial_stab >> | ||
1132 | * PAGE_SHIFT). | ||
1133 | */ | ||
1134 | . = STAB0_OFFSET /* 0x8000 */ | ||
1135 | .globl initial_stab | ||
1136 | initial_stab: | ||
1137 | .space 4096 | ||
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S index fc8f5b14019c..de369558bf0a 100644 --- a/arch/powerpc/kernel/fpu.S +++ b/arch/powerpc/kernel/fpu.S | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <asm/thread_info.h> | 23 | #include <asm/thread_info.h> |
24 | #include <asm/ppc_asm.h> | 24 | #include <asm/ppc_asm.h> |
25 | #include <asm/asm-offsets.h> | 25 | #include <asm/asm-offsets.h> |
26 | #include <asm/ptrace.h> | ||
26 | 27 | ||
27 | #ifdef CONFIG_VSX | 28 | #ifdef CONFIG_VSX |
28 | #define REST_32FPVSRS(n,c,base) \ | 29 | #define REST_32FPVSRS(n,c,base) \ |
@@ -163,24 +164,14 @@ END_FTR_SECTION_IFSET(CPU_FTR_VSX) | |||
163 | /* | 164 | /* |
164 | * These are used in the alignment trap handler when emulating | 165 | * These are used in the alignment trap handler when emulating |
165 | * single-precision loads and stores. | 166 | * single-precision loads and stores. |
166 | * We restore and save the fpscr so the task gets the same result | ||
167 | * and exceptions as if the cpu had performed the load or store. | ||
168 | */ | 167 | */ |
169 | 168 | ||
170 | _GLOBAL(cvt_fd) | 169 | _GLOBAL(cvt_fd) |
171 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
172 | MTFSF_L(0) | ||
173 | lfs 0,0(r3) | 170 | lfs 0,0(r3) |
174 | stfd 0,0(r4) | 171 | stfd 0,0(r4) |
175 | mffs 0 | ||
176 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
177 | blr | 172 | blr |
178 | 173 | ||
179 | _GLOBAL(cvt_df) | 174 | _GLOBAL(cvt_df) |
180 | lfd 0,THREAD_FPSCR(r5) /* load up fpscr value */ | ||
181 | MTFSF_L(0) | ||
182 | lfd 0,0(r3) | 175 | lfd 0,0(r3) |
183 | stfs 0,0(r4) | 176 | stfs 0,0(r4) |
184 | mffs 0 | ||
185 | stfd 0,THREAD_FPSCR(r5) /* save new fpscr value */ | ||
186 | blr | 177 | blr |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index ce1f3e44c24f..bf99cfa6bbfe 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/code-patching.h> | 23 | #include <asm/code-patching.h> |
24 | #include <asm/ftrace.h> | 24 | #include <asm/ftrace.h> |
25 | #include <asm/syscall.h> | ||
25 | 26 | ||
26 | 27 | ||
27 | #ifdef CONFIG_DYNAMIC_FTRACE | 28 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -600,3 +601,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
600 | } | 601 | } |
601 | } | 602 | } |
602 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 603 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
604 | |||
605 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | ||
606 | unsigned long __init arch_syscall_addr(int nr) | ||
607 | { | ||
608 | return sys_call_table[nr*2]; | ||
609 | } | ||
610 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ | ||
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 98c4b29a56f4..ba250d505e07 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -805,19 +805,6 @@ _ENTRY(copy_and_flush) | |||
805 | blr | 805 | blr |
806 | 806 | ||
807 | #ifdef CONFIG_SMP | 807 | #ifdef CONFIG_SMP |
808 | #ifdef CONFIG_GEMINI | ||
809 | .globl __secondary_start_gemini | ||
810 | __secondary_start_gemini: | ||
811 | mfspr r4,SPRN_HID0 | ||
812 | ori r4,r4,HID0_ICFI | ||
813 | li r3,0 | ||
814 | ori r3,r3,HID0_ICE | ||
815 | andc r4,r4,r3 | ||
816 | mtspr SPRN_HID0,r4 | ||
817 | sync | ||
818 | b __secondary_start | ||
819 | #endif /* CONFIG_GEMINI */ | ||
820 | |||
821 | .globl __secondary_start_mpc86xx | 808 | .globl __secondary_start_mpc86xx |
822 | __secondary_start_mpc86xx: | 809 | __secondary_start_mpc86xx: |
823 | mfspr r3, SPRN_PIR | 810 | mfspr r3, SPRN_PIR |
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S index a90625f9b485..a91626d87fc9 100644 --- a/arch/powerpc/kernel/head_40x.S +++ b/arch/powerpc/kernel/head_40x.S | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <asm/thread_info.h> | 40 | #include <asm/thread_info.h> |
41 | #include <asm/ppc_asm.h> | 41 | #include <asm/ppc_asm.h> |
42 | #include <asm/asm-offsets.h> | 42 | #include <asm/asm-offsets.h> |
43 | #include <asm/ptrace.h> | ||
43 | 44 | ||
44 | /* As with the other PowerPC ports, it is expected that when code | 45 | /* As with the other PowerPC ports, it is expected that when code |
45 | * execution begins here, the following registers contain valid, yet | 46 | * execution begins here, the following registers contain valid, yet |
@@ -765,7 +766,7 @@ DataAccess: | |||
765 | * miss get to this point to load the TLB. | 766 | * miss get to this point to load the TLB. |
766 | * r10 - TLB_TAG value | 767 | * r10 - TLB_TAG value |
767 | * r11 - Linux PTE | 768 | * r11 - Linux PTE |
768 | * r12, r9 - avilable to use | 769 | * r12, r9 - available to use |
769 | * PID - loaded with proper value when we get here | 770 | * PID - loaded with proper value when we get here |
770 | * Upon exit, we reload everything and RFI. | 771 | * Upon exit, we reload everything and RFI. |
771 | * Actually, it will fit now, but oh well.....a common place | 772 | * Actually, it will fit now, but oh well.....a common place |
@@ -923,11 +924,7 @@ initial_mmu: | |||
923 | mtspr SPRN_PID,r0 | 924 | mtspr SPRN_PID,r0 |
924 | sync | 925 | sync |
925 | 926 | ||
926 | /* Configure and load two entries into TLB slots 62 and 63. | 927 | /* Configure and load one entry into TLB slots 63 */ |
927 | * In case we are pinning TLBs, these are reserved in by the | ||
928 | * other TLB functions. If not reserving, then it doesn't | ||
929 | * matter where they are loaded. | ||
930 | */ | ||
931 | clrrwi r4,r4,10 /* Mask off the real page number */ | 928 | clrrwi r4,r4,10 /* Mask off the real page number */ |
932 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ | 929 | ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */ |
933 | 930 | ||
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S index 562305b40a8e..5e12b741ba5f 100644 --- a/arch/powerpc/kernel/head_44x.S +++ b/arch/powerpc/kernel/head_44x.S | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <asm/thread_info.h> | 37 | #include <asm/thread_info.h> |
38 | #include <asm/ppc_asm.h> | 38 | #include <asm/ppc_asm.h> |
39 | #include <asm/asm-offsets.h> | 39 | #include <asm/asm-offsets.h> |
40 | #include <asm/ptrace.h> | ||
40 | #include <asm/synch.h> | 41 | #include <asm/synch.h> |
41 | #include "head_booke.h" | 42 | #include "head_booke.h" |
42 | 43 | ||
@@ -177,7 +178,7 @@ interrupt_base: | |||
177 | NORMAL_EXCEPTION_PROLOG | 178 | NORMAL_EXCEPTION_PROLOG |
178 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | 179 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) |
179 | 180 | ||
180 | /* Auxillary Processor Unavailable Interrupt */ | 181 | /* Auxiliary Processor Unavailable Interrupt */ |
181 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) | 182 | EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) |
182 | 183 | ||
183 | /* Decrementer Interrupt */ | 184 | /* Decrementer Interrupt */ |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index c571cd3c1453..ba504099844a 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -38,8 +38,9 @@ | |||
38 | #include <asm/page_64.h> | 38 | #include <asm/page_64.h> |
39 | #include <asm/irqflags.h> | 39 | #include <asm/irqflags.h> |
40 | #include <asm/kvm_book3s_asm.h> | 40 | #include <asm/kvm_book3s_asm.h> |
41 | #include <asm/ptrace.h> | ||
41 | 42 | ||
42 | /* The physical memory is layed out such that the secondary processor | 43 | /* The physical memory is laid out such that the secondary processor |
43 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow | 44 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow |
44 | * using the layout described in exceptions-64s.S | 45 | * using the layout described in exceptions-64s.S |
45 | */ | 46 | */ |
@@ -96,7 +97,7 @@ __secondary_hold_acknowledge: | |||
96 | .llong hvReleaseData-KERNELBASE | 97 | .llong hvReleaseData-KERNELBASE |
97 | #endif /* CONFIG_PPC_ISERIES */ | 98 | #endif /* CONFIG_PPC_ISERIES */ |
98 | 99 | ||
99 | #ifdef CONFIG_CRASH_DUMP | 100 | #ifdef CONFIG_RELOCATABLE |
100 | /* This flag is set to 1 by a loader if the kernel should run | 101 | /* This flag is set to 1 by a loader if the kernel should run |
101 | * at the loaded address instead of the linked address. This | 102 | * at the loaded address instead of the linked address. This |
102 | * is used by kexec-tools to keep the the kdump kernel in the | 103 | * is used by kexec-tools to keep the the kdump kernel in the |
@@ -146,6 +147,8 @@ __secondary_hold: | |||
146 | mtctr r4 | 147 | mtctr r4 |
147 | mr r3,r24 | 148 | mr r3,r24 |
148 | li r4,0 | 149 | li r4,0 |
150 | /* Make sure that patched code is visible */ | ||
151 | isync | ||
149 | bctr | 152 | bctr |
150 | #else | 153 | #else |
151 | BUG_OPCODE | 154 | BUG_OPCODE |
@@ -166,12 +169,6 @@ exception_marker: | |||
166 | #include "exceptions-64s.S" | 169 | #include "exceptions-64s.S" |
167 | #endif | 170 | #endif |
168 | 171 | ||
169 | /* KVM trampoline code needs to be close to the interrupt handlers */ | ||
170 | |||
171 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | ||
172 | #include "../kvm/book3s_rmhandlers.S" | ||
173 | #endif | ||
174 | |||
175 | _GLOBAL(generic_secondary_thread_init) | 172 | _GLOBAL(generic_secondary_thread_init) |
176 | mr r24,r3 | 173 | mr r24,r3 |
177 | 174 | ||
@@ -221,19 +218,25 @@ generic_secondary_common_init: | |||
221 | */ | 218 | */ |
222 | LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ | 219 | LOAD_REG_ADDR(r13, paca) /* Load paca pointer */ |
223 | ld r13,0(r13) /* Get base vaddr of paca array */ | 220 | ld r13,0(r13) /* Get base vaddr of paca array */ |
221 | #ifndef CONFIG_SMP | ||
222 | addi r13,r13,PACA_SIZE /* know r13 if used accidentally */ | ||
223 | b .kexec_wait /* wait for next kernel if !SMP */ | ||
224 | #else | ||
225 | LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ | ||
226 | lwz r7,0(r7) /* also the max paca allocated */ | ||
224 | li r5,0 /* logical cpu id */ | 227 | li r5,0 /* logical cpu id */ |
225 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ | 228 | 1: lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ |
226 | cmpw r6,r24 /* Compare to our id */ | 229 | cmpw r6,r24 /* Compare to our id */ |
227 | beq 2f | 230 | beq 2f |
228 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ | 231 | addi r13,r13,PACA_SIZE /* Loop to next PACA on miss */ |
229 | addi r5,r5,1 | 232 | addi r5,r5,1 |
230 | cmpwi r5,NR_CPUS | 233 | cmpw r5,r7 /* Check if more pacas exist */ |
231 | blt 1b | 234 | blt 1b |
232 | 235 | ||
233 | mr r3,r24 /* not found, copy phys to r3 */ | 236 | mr r3,r24 /* not found, copy phys to r3 */ |
234 | b .kexec_wait /* next kernel might do better */ | 237 | b .kexec_wait /* next kernel might do better */ |
235 | 238 | ||
236 | 2: mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG */ | 239 | 2: SET_PACA(r13) |
237 | #ifdef CONFIG_PPC_BOOK3E | 240 | #ifdef CONFIG_PPC_BOOK3E |
238 | addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ | 241 | addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ |
239 | mtspr SPRN_SPRG_TLB_EXFRAME,r12 | 242 | mtspr SPRN_SPRG_TLB_EXFRAME,r12 |
@@ -241,34 +244,39 @@ generic_secondary_common_init: | |||
241 | 244 | ||
242 | /* From now on, r24 is expected to be logical cpuid */ | 245 | /* From now on, r24 is expected to be logical cpuid */ |
243 | mr r24,r5 | 246 | mr r24,r5 |
244 | 3: HMT_LOW | ||
245 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | ||
246 | /* start. */ | ||
247 | |||
248 | #ifndef CONFIG_SMP | ||
249 | b 3b /* Never go on non-SMP */ | ||
250 | #else | ||
251 | cmpwi 0,r23,0 | ||
252 | beq 3b /* Loop until told to go */ | ||
253 | |||
254 | sync /* order paca.run and cur_cpu_spec */ | ||
255 | 247 | ||
256 | /* See if we need to call a cpu state restore handler */ | 248 | /* See if we need to call a cpu state restore handler */ |
257 | LOAD_REG_ADDR(r23, cur_cpu_spec) | 249 | LOAD_REG_ADDR(r23, cur_cpu_spec) |
258 | ld r23,0(r23) | 250 | ld r23,0(r23) |
259 | ld r23,CPU_SPEC_RESTORE(r23) | 251 | ld r23,CPU_SPEC_RESTORE(r23) |
260 | cmpdi 0,r23,0 | 252 | cmpdi 0,r23,0 |
261 | beq 4f | 253 | beq 3f |
262 | ld r23,0(r23) | 254 | ld r23,0(r23) |
263 | mtctr r23 | 255 | mtctr r23 |
264 | bctrl | 256 | bctrl |
265 | 257 | ||
266 | 4: /* Create a temp kernel stack for use before relocation is on. */ | 258 | 3: LOAD_REG_ADDR(r3, boot_cpu_count) /* Decrement boot_cpu_count */ |
259 | lwarx r4,0,r3 | ||
260 | subi r4,r4,1 | ||
261 | stwcx. r4,0,r3 | ||
262 | bne 3b | ||
263 | isync | ||
264 | |||
265 | 4: HMT_LOW | ||
266 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ | ||
267 | /* start. */ | ||
268 | cmpwi 0,r23,0 | ||
269 | beq 4b /* Loop until told to go */ | ||
270 | |||
271 | sync /* order paca.run and cur_cpu_spec */ | ||
272 | isync /* In case code patching happened */ | ||
273 | |||
274 | /* Create a temp kernel stack for use before relocation is on. */ | ||
267 | ld r1,PACAEMERGSP(r13) | 275 | ld r1,PACAEMERGSP(r13) |
268 | subi r1,r1,STACK_FRAME_OVERHEAD | 276 | subi r1,r1,STACK_FRAME_OVERHEAD |
269 | 277 | ||
270 | b __secondary_start | 278 | b __secondary_start |
271 | #endif | 279 | #endif /* SMP */ |
272 | 280 | ||
273 | /* | 281 | /* |
274 | * Turn the MMU off. | 282 | * Turn the MMU off. |
@@ -390,12 +398,10 @@ _STATIC(__after_prom_start) | |||
390 | /* process relocations for the final address of the kernel */ | 398 | /* process relocations for the final address of the kernel */ |
391 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ | 399 | lis r25,PAGE_OFFSET@highest /* compute virtual base of kernel */ |
392 | sldi r25,r25,32 | 400 | sldi r25,r25,32 |
393 | #ifdef CONFIG_CRASH_DUMP | ||
394 | lwz r7,__run_at_load-_stext(r26) | 401 | lwz r7,__run_at_load-_stext(r26) |
395 | cmplwi cr0,r7,1 /* kdump kernel ? - stay where we are */ | 402 | cmplwi cr0,r7,1 /* flagged to stay where we are ? */ |
396 | bne 1f | 403 | bne 1f |
397 | add r25,r25,r26 | 404 | add r25,r25,r26 |
398 | #endif | ||
399 | 1: mr r3,r25 | 405 | 1: mr r3,r25 |
400 | bl .relocate | 406 | bl .relocate |
401 | #endif | 407 | #endif |
@@ -541,7 +547,14 @@ _GLOBAL(pmac_secondary_start) | |||
541 | ld r4,0(r4) /* Get base vaddr of paca array */ | 547 | ld r4,0(r4) /* Get base vaddr of paca array */ |
542 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ | 548 | mulli r13,r24,PACA_SIZE /* Calculate vaddr of right paca */ |
543 | add r13,r13,r4 /* for this processor. */ | 549 | add r13,r13,r4 /* for this processor. */ |
544 | mtspr SPRN_SPRG_PACA,r13 /* Save vaddr of paca in an SPRG*/ | 550 | SET_PACA(r13) /* Save vaddr of paca in an SPRG*/ |
551 | |||
552 | /* Mark interrupts soft and hard disabled (they might be enabled | ||
553 | * in the PACA when doing hotplug) | ||
554 | */ | ||
555 | li r0,0 | ||
556 | stb r0,PACASOFTIRQEN(r13) | ||
557 | stb r0,PACAHARDIRQEN(r13) | ||
545 | 558 | ||
546 | /* Create a temp kernel stack for use before relocation is on. */ | 559 | /* Create a temp kernel stack for use before relocation is on. */ |
547 | ld r1,PACAEMERGSP(r13) | 560 | ld r1,PACAEMERGSP(r13) |
@@ -645,7 +658,7 @@ _GLOBAL(enable_64b_mode) | |||
645 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ | 658 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ |
646 | mtmsr r11 | 659 | mtmsr r11 |
647 | #else /* CONFIG_PPC_BOOK3E */ | 660 | #else /* CONFIG_PPC_BOOK3E */ |
648 | li r12,(MSR_SF | MSR_ISF)@highest | 661 | li r12,(MSR_64BIT | MSR_ISF)@highest |
649 | sldi r12,r12,48 | 662 | sldi r12,r12,48 |
650 | or r11,r11,r12 | 663 | or r11,r11,r12 |
651 | mtmsrd r11 | 664 | mtmsrd r11 |
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S index 1f1a04b5c2a4..1cbf64e6b416 100644 --- a/arch/powerpc/kernel/head_8xx.S +++ b/arch/powerpc/kernel/head_8xx.S | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <asm/thread_info.h> | 29 | #include <asm/thread_info.h> |
30 | #include <asm/ppc_asm.h> | 30 | #include <asm/ppc_asm.h> |
31 | #include <asm/asm-offsets.h> | 31 | #include <asm/asm-offsets.h> |
32 | #include <asm/ptrace.h> | ||
32 | 33 | ||
33 | /* Macro to make the code more readable. */ | 34 | /* Macro to make the code more readable. */ |
34 | #ifdef CONFIG_8xx_CPU6 | 35 | #ifdef CONFIG_8xx_CPU6 |
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S index 4faeba247854..5ecf54cfa7d4 100644 --- a/arch/powerpc/kernel/head_fsl_booke.S +++ b/arch/powerpc/kernel/head_fsl_booke.S | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <asm/ppc_asm.h> | 41 | #include <asm/ppc_asm.h> |
42 | #include <asm/asm-offsets.h> | 42 | #include <asm/asm-offsets.h> |
43 | #include <asm/cache.h> | 43 | #include <asm/cache.h> |
44 | #include <asm/ptrace.h> | ||
44 | #include "head_booke.h" | 45 | #include "head_booke.h" |
45 | 46 | ||
46 | /* As with the other PowerPC ports, it is expected that when code | 47 | /* As with the other PowerPC ports, it is expected that when code |
@@ -152,8 +153,11 @@ _ENTRY(__early_start) | |||
152 | /* Check to see if we're the second processor, and jump | 153 | /* Check to see if we're the second processor, and jump |
153 | * to the secondary_start code if so | 154 | * to the secondary_start code if so |
154 | */ | 155 | */ |
155 | mfspr r24,SPRN_PIR | 156 | lis r24, boot_cpuid@h |
156 | cmpwi r24,0 | 157 | ori r24, r24, boot_cpuid@l |
158 | lwz r24, 0(r24) | ||
159 | cmpwi r24, -1 | ||
160 | mfspr r24,SPRN_PIR | ||
157 | bne __secondary_start | 161 | bne __secondary_start |
158 | #endif | 162 | #endif |
159 | 163 | ||
@@ -175,6 +179,9 @@ _ENTRY(__early_start) | |||
175 | li r0,0 | 179 | li r0,0 |
176 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) | 180 | stwu r0,THREAD_SIZE-STACK_FRAME_OVERHEAD(r1) |
177 | 181 | ||
182 | rlwinm r22,r1,0,0,31-THREAD_SHIFT /* current thread_info */ | ||
183 | stw r24, TI_CPU(r22) | ||
184 | |||
178 | bl early_init | 185 | bl early_init |
179 | 186 | ||
180 | #ifdef CONFIG_RELOCATABLE | 187 | #ifdef CONFIG_RELOCATABLE |
@@ -319,7 +326,7 @@ interrupt_base: | |||
319 | NORMAL_EXCEPTION_PROLOG | 326 | NORMAL_EXCEPTION_PROLOG |
320 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) | 327 | EXC_XFER_EE_LITE(0x0c00, DoSyscall) |
321 | 328 | ||
322 | /* Auxillary Processor Unavailable Interrupt */ | 329 | /* Auxiliary Processor Unavailable Interrupt */ |
323 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) | 330 | EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) |
324 | 331 | ||
325 | /* Decrementer Interrupt */ | 332 | /* Decrementer Interrupt */ |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c index 9b626cfffce1..28581f1ad2c0 100644 --- a/arch/powerpc/kernel/ibmebus.c +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -162,13 +162,10 @@ static int ibmebus_create_device(struct device_node *dn) | |||
162 | dev->dev.bus = &ibmebus_bus_type; | 162 | dev->dev.bus = &ibmebus_bus_type; |
163 | dev->dev.archdata.dma_ops = &ibmebus_dma_ops; | 163 | dev->dev.archdata.dma_ops = &ibmebus_dma_ops; |
164 | 164 | ||
165 | ret = of_device_register(dev); | 165 | ret = of_device_add(dev); |
166 | if (ret) { | 166 | if (ret) |
167 | of_device_free(dev); | 167 | platform_device_put(dev); |
168 | return ret; | 168 | return ret; |
169 | } | ||
170 | |||
171 | return 0; | ||
172 | } | 169 | } |
173 | 170 | ||
174 | static int ibmebus_create_devices(const struct of_device_id *matches) | 171 | static int ibmebus_create_devices(const struct of_device_id *matches) |
@@ -204,13 +201,14 @@ int ibmebus_register_driver(struct of_platform_driver *drv) | |||
204 | /* If the driver uses devices that ibmebus doesn't know, add them */ | 201 | /* If the driver uses devices that ibmebus doesn't know, add them */ |
205 | ibmebus_create_devices(drv->driver.of_match_table); | 202 | ibmebus_create_devices(drv->driver.of_match_table); |
206 | 203 | ||
207 | return of_register_driver(drv, &ibmebus_bus_type); | 204 | drv->driver.bus = &ibmebus_bus_type; |
205 | return driver_register(&drv->driver); | ||
208 | } | 206 | } |
209 | EXPORT_SYMBOL(ibmebus_register_driver); | 207 | EXPORT_SYMBOL(ibmebus_register_driver); |
210 | 208 | ||
211 | void ibmebus_unregister_driver(struct of_platform_driver *drv) | 209 | void ibmebus_unregister_driver(struct of_platform_driver *drv) |
212 | { | 210 | { |
213 | of_unregister_driver(drv); | 211 | driver_unregister(&drv->driver); |
214 | } | 212 | } |
215 | EXPORT_SYMBOL(ibmebus_unregister_driver); | 213 | EXPORT_SYMBOL(ibmebus_unregister_driver); |
216 | 214 | ||
@@ -311,15 +309,410 @@ static ssize_t ibmebus_store_remove(struct bus_type *bus, | |||
311 | } | 309 | } |
312 | } | 310 | } |
313 | 311 | ||
312 | |||
314 | static struct bus_attribute ibmebus_bus_attrs[] = { | 313 | static struct bus_attribute ibmebus_bus_attrs[] = { |
315 | __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), | 314 | __ATTR(probe, S_IWUSR, NULL, ibmebus_store_probe), |
316 | __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), | 315 | __ATTR(remove, S_IWUSR, NULL, ibmebus_store_remove), |
317 | __ATTR_NULL | 316 | __ATTR_NULL |
318 | }; | 317 | }; |
319 | 318 | ||
319 | static int ibmebus_bus_bus_match(struct device *dev, struct device_driver *drv) | ||
320 | { | ||
321 | const struct of_device_id *matches = drv->of_match_table; | ||
322 | |||
323 | if (!matches) | ||
324 | return 0; | ||
325 | |||
326 | return of_match_device(matches, dev) != NULL; | ||
327 | } | ||
328 | |||
329 | static int ibmebus_bus_device_probe(struct device *dev) | ||
330 | { | ||
331 | int error = -ENODEV; | ||
332 | struct of_platform_driver *drv; | ||
333 | struct platform_device *of_dev; | ||
334 | const struct of_device_id *match; | ||
335 | |||
336 | drv = to_of_platform_driver(dev->driver); | ||
337 | of_dev = to_platform_device(dev); | ||
338 | |||
339 | if (!drv->probe) | ||
340 | return error; | ||
341 | |||
342 | of_dev_get(of_dev); | ||
343 | |||
344 | match = of_match_device(drv->driver.of_match_table, dev); | ||
345 | if (match) | ||
346 | error = drv->probe(of_dev, match); | ||
347 | if (error) | ||
348 | of_dev_put(of_dev); | ||
349 | |||
350 | return error; | ||
351 | } | ||
352 | |||
353 | static int ibmebus_bus_device_remove(struct device *dev) | ||
354 | { | ||
355 | struct platform_device *of_dev = to_platform_device(dev); | ||
356 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
357 | |||
358 | if (dev->driver && drv->remove) | ||
359 | drv->remove(of_dev); | ||
360 | return 0; | ||
361 | } | ||
362 | |||
363 | static void ibmebus_bus_device_shutdown(struct device *dev) | ||
364 | { | ||
365 | struct platform_device *of_dev = to_platform_device(dev); | ||
366 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
367 | |||
368 | if (dev->driver && drv->shutdown) | ||
369 | drv->shutdown(of_dev); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * ibmebus_bus_device_attrs | ||
374 | */ | ||
375 | static ssize_t devspec_show(struct device *dev, | ||
376 | struct device_attribute *attr, char *buf) | ||
377 | { | ||
378 | struct platform_device *ofdev; | ||
379 | |||
380 | ofdev = to_platform_device(dev); | ||
381 | return sprintf(buf, "%s\n", ofdev->dev.of_node->full_name); | ||
382 | } | ||
383 | |||
384 | static ssize_t name_show(struct device *dev, | ||
385 | struct device_attribute *attr, char *buf) | ||
386 | { | ||
387 | struct platform_device *ofdev; | ||
388 | |||
389 | ofdev = to_platform_device(dev); | ||
390 | return sprintf(buf, "%s\n", ofdev->dev.of_node->name); | ||
391 | } | ||
392 | |||
393 | static ssize_t modalias_show(struct device *dev, | ||
394 | struct device_attribute *attr, char *buf) | ||
395 | { | ||
396 | ssize_t len = of_device_get_modalias(dev, buf, PAGE_SIZE - 2); | ||
397 | buf[len] = '\n'; | ||
398 | buf[len+1] = 0; | ||
399 | return len+1; | ||
400 | } | ||
401 | |||
402 | struct device_attribute ibmebus_bus_device_attrs[] = { | ||
403 | __ATTR_RO(devspec), | ||
404 | __ATTR_RO(name), | ||
405 | __ATTR_RO(modalias), | ||
406 | __ATTR_NULL | ||
407 | }; | ||
408 | |||
409 | #ifdef CONFIG_PM_SLEEP | ||
410 | static int ibmebus_bus_legacy_suspend(struct device *dev, pm_message_t mesg) | ||
411 | { | ||
412 | struct platform_device *of_dev = to_platform_device(dev); | ||
413 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
414 | int ret = 0; | ||
415 | |||
416 | if (dev->driver && drv->suspend) | ||
417 | ret = drv->suspend(of_dev, mesg); | ||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static int ibmebus_bus_legacy_resume(struct device *dev) | ||
422 | { | ||
423 | struct platform_device *of_dev = to_platform_device(dev); | ||
424 | struct of_platform_driver *drv = to_of_platform_driver(dev->driver); | ||
425 | int ret = 0; | ||
426 | |||
427 | if (dev->driver && drv->resume) | ||
428 | ret = drv->resume(of_dev); | ||
429 | return ret; | ||
430 | } | ||
431 | |||
432 | static int ibmebus_bus_pm_prepare(struct device *dev) | ||
433 | { | ||
434 | struct device_driver *drv = dev->driver; | ||
435 | int ret = 0; | ||
436 | |||
437 | if (drv && drv->pm && drv->pm->prepare) | ||
438 | ret = drv->pm->prepare(dev); | ||
439 | |||
440 | return ret; | ||
441 | } | ||
442 | |||
443 | static void ibmebus_bus_pm_complete(struct device *dev) | ||
444 | { | ||
445 | struct device_driver *drv = dev->driver; | ||
446 | |||
447 | if (drv && drv->pm && drv->pm->complete) | ||
448 | drv->pm->complete(dev); | ||
449 | } | ||
450 | |||
451 | #ifdef CONFIG_SUSPEND | ||
452 | |||
453 | static int ibmebus_bus_pm_suspend(struct device *dev) | ||
454 | { | ||
455 | struct device_driver *drv = dev->driver; | ||
456 | int ret = 0; | ||
457 | |||
458 | if (!drv) | ||
459 | return 0; | ||
460 | |||
461 | if (drv->pm) { | ||
462 | if (drv->pm->suspend) | ||
463 | ret = drv->pm->suspend(dev); | ||
464 | } else { | ||
465 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_SUSPEND); | ||
466 | } | ||
467 | |||
468 | return ret; | ||
469 | } | ||
470 | |||
471 | static int ibmebus_bus_pm_suspend_noirq(struct device *dev) | ||
472 | { | ||
473 | struct device_driver *drv = dev->driver; | ||
474 | int ret = 0; | ||
475 | |||
476 | if (!drv) | ||
477 | return 0; | ||
478 | |||
479 | if (drv->pm) { | ||
480 | if (drv->pm->suspend_noirq) | ||
481 | ret = drv->pm->suspend_noirq(dev); | ||
482 | } | ||
483 | |||
484 | return ret; | ||
485 | } | ||
486 | |||
487 | static int ibmebus_bus_pm_resume(struct device *dev) | ||
488 | { | ||
489 | struct device_driver *drv = dev->driver; | ||
490 | int ret = 0; | ||
491 | |||
492 | if (!drv) | ||
493 | return 0; | ||
494 | |||
495 | if (drv->pm) { | ||
496 | if (drv->pm->resume) | ||
497 | ret = drv->pm->resume(dev); | ||
498 | } else { | ||
499 | ret = ibmebus_bus_legacy_resume(dev); | ||
500 | } | ||
501 | |||
502 | return ret; | ||
503 | } | ||
504 | |||
505 | static int ibmebus_bus_pm_resume_noirq(struct device *dev) | ||
506 | { | ||
507 | struct device_driver *drv = dev->driver; | ||
508 | int ret = 0; | ||
509 | |||
510 | if (!drv) | ||
511 | return 0; | ||
512 | |||
513 | if (drv->pm) { | ||
514 | if (drv->pm->resume_noirq) | ||
515 | ret = drv->pm->resume_noirq(dev); | ||
516 | } | ||
517 | |||
518 | return ret; | ||
519 | } | ||
520 | |||
521 | #else /* !CONFIG_SUSPEND */ | ||
522 | |||
523 | #define ibmebus_bus_pm_suspend NULL | ||
524 | #define ibmebus_bus_pm_resume NULL | ||
525 | #define ibmebus_bus_pm_suspend_noirq NULL | ||
526 | #define ibmebus_bus_pm_resume_noirq NULL | ||
527 | |||
528 | #endif /* !CONFIG_SUSPEND */ | ||
529 | |||
530 | #ifdef CONFIG_HIBERNATE_CALLBACKS | ||
531 | |||
532 | static int ibmebus_bus_pm_freeze(struct device *dev) | ||
533 | { | ||
534 | struct device_driver *drv = dev->driver; | ||
535 | int ret = 0; | ||
536 | |||
537 | if (!drv) | ||
538 | return 0; | ||
539 | |||
540 | if (drv->pm) { | ||
541 | if (drv->pm->freeze) | ||
542 | ret = drv->pm->freeze(dev); | ||
543 | } else { | ||
544 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_FREEZE); | ||
545 | } | ||
546 | |||
547 | return ret; | ||
548 | } | ||
549 | |||
550 | static int ibmebus_bus_pm_freeze_noirq(struct device *dev) | ||
551 | { | ||
552 | struct device_driver *drv = dev->driver; | ||
553 | int ret = 0; | ||
554 | |||
555 | if (!drv) | ||
556 | return 0; | ||
557 | |||
558 | if (drv->pm) { | ||
559 | if (drv->pm->freeze_noirq) | ||
560 | ret = drv->pm->freeze_noirq(dev); | ||
561 | } | ||
562 | |||
563 | return ret; | ||
564 | } | ||
565 | |||
566 | static int ibmebus_bus_pm_thaw(struct device *dev) | ||
567 | { | ||
568 | struct device_driver *drv = dev->driver; | ||
569 | int ret = 0; | ||
570 | |||
571 | if (!drv) | ||
572 | return 0; | ||
573 | |||
574 | if (drv->pm) { | ||
575 | if (drv->pm->thaw) | ||
576 | ret = drv->pm->thaw(dev); | ||
577 | } else { | ||
578 | ret = ibmebus_bus_legacy_resume(dev); | ||
579 | } | ||
580 | |||
581 | return ret; | ||
582 | } | ||
583 | |||
584 | static int ibmebus_bus_pm_thaw_noirq(struct device *dev) | ||
585 | { | ||
586 | struct device_driver *drv = dev->driver; | ||
587 | int ret = 0; | ||
588 | |||
589 | if (!drv) | ||
590 | return 0; | ||
591 | |||
592 | if (drv->pm) { | ||
593 | if (drv->pm->thaw_noirq) | ||
594 | ret = drv->pm->thaw_noirq(dev); | ||
595 | } | ||
596 | |||
597 | return ret; | ||
598 | } | ||
599 | |||
600 | static int ibmebus_bus_pm_poweroff(struct device *dev) | ||
601 | { | ||
602 | struct device_driver *drv = dev->driver; | ||
603 | int ret = 0; | ||
604 | |||
605 | if (!drv) | ||
606 | return 0; | ||
607 | |||
608 | if (drv->pm) { | ||
609 | if (drv->pm->poweroff) | ||
610 | ret = drv->pm->poweroff(dev); | ||
611 | } else { | ||
612 | ret = ibmebus_bus_legacy_suspend(dev, PMSG_HIBERNATE); | ||
613 | } | ||
614 | |||
615 | return ret; | ||
616 | } | ||
617 | |||
618 | static int ibmebus_bus_pm_poweroff_noirq(struct device *dev) | ||
619 | { | ||
620 | struct device_driver *drv = dev->driver; | ||
621 | int ret = 0; | ||
622 | |||
623 | if (!drv) | ||
624 | return 0; | ||
625 | |||
626 | if (drv->pm) { | ||
627 | if (drv->pm->poweroff_noirq) | ||
628 | ret = drv->pm->poweroff_noirq(dev); | ||
629 | } | ||
630 | |||
631 | return ret; | ||
632 | } | ||
633 | |||
634 | static int ibmebus_bus_pm_restore(struct device *dev) | ||
635 | { | ||
636 | struct device_driver *drv = dev->driver; | ||
637 | int ret = 0; | ||
638 | |||
639 | if (!drv) | ||
640 | return 0; | ||
641 | |||
642 | if (drv->pm) { | ||
643 | if (drv->pm->restore) | ||
644 | ret = drv->pm->restore(dev); | ||
645 | } else { | ||
646 | ret = ibmebus_bus_legacy_resume(dev); | ||
647 | } | ||
648 | |||
649 | return ret; | ||
650 | } | ||
651 | |||
652 | static int ibmebus_bus_pm_restore_noirq(struct device *dev) | ||
653 | { | ||
654 | struct device_driver *drv = dev->driver; | ||
655 | int ret = 0; | ||
656 | |||
657 | if (!drv) | ||
658 | return 0; | ||
659 | |||
660 | if (drv->pm) { | ||
661 | if (drv->pm->restore_noirq) | ||
662 | ret = drv->pm->restore_noirq(dev); | ||
663 | } | ||
664 | |||
665 | return ret; | ||
666 | } | ||
667 | |||
668 | #else /* !CONFIG_HIBERNATE_CALLBACKS */ | ||
669 | |||
670 | #define ibmebus_bus_pm_freeze NULL | ||
671 | #define ibmebus_bus_pm_thaw NULL | ||
672 | #define ibmebus_bus_pm_poweroff NULL | ||
673 | #define ibmebus_bus_pm_restore NULL | ||
674 | #define ibmebus_bus_pm_freeze_noirq NULL | ||
675 | #define ibmebus_bus_pm_thaw_noirq NULL | ||
676 | #define ibmebus_bus_pm_poweroff_noirq NULL | ||
677 | #define ibmebus_bus_pm_restore_noirq NULL | ||
678 | |||
679 | #endif /* !CONFIG_HIBERNATE_CALLBACKS */ | ||
680 | |||
681 | static struct dev_pm_ops ibmebus_bus_dev_pm_ops = { | ||
682 | .prepare = ibmebus_bus_pm_prepare, | ||
683 | .complete = ibmebus_bus_pm_complete, | ||
684 | .suspend = ibmebus_bus_pm_suspend, | ||
685 | .resume = ibmebus_bus_pm_resume, | ||
686 | .freeze = ibmebus_bus_pm_freeze, | ||
687 | .thaw = ibmebus_bus_pm_thaw, | ||
688 | .poweroff = ibmebus_bus_pm_poweroff, | ||
689 | .restore = ibmebus_bus_pm_restore, | ||
690 | .suspend_noirq = ibmebus_bus_pm_suspend_noirq, | ||
691 | .resume_noirq = ibmebus_bus_pm_resume_noirq, | ||
692 | .freeze_noirq = ibmebus_bus_pm_freeze_noirq, | ||
693 | .thaw_noirq = ibmebus_bus_pm_thaw_noirq, | ||
694 | .poweroff_noirq = ibmebus_bus_pm_poweroff_noirq, | ||
695 | .restore_noirq = ibmebus_bus_pm_restore_noirq, | ||
696 | }; | ||
697 | |||
698 | #define IBMEBUS_BUS_PM_OPS_PTR (&ibmebus_bus_dev_pm_ops) | ||
699 | |||
700 | #else /* !CONFIG_PM_SLEEP */ | ||
701 | |||
702 | #define IBMEBUS_BUS_PM_OPS_PTR NULL | ||
703 | |||
704 | #endif /* !CONFIG_PM_SLEEP */ | ||
705 | |||
320 | struct bus_type ibmebus_bus_type = { | 706 | struct bus_type ibmebus_bus_type = { |
707 | .name = "ibmebus", | ||
321 | .uevent = of_device_uevent, | 708 | .uevent = of_device_uevent, |
322 | .bus_attrs = ibmebus_bus_attrs | 709 | .bus_attrs = ibmebus_bus_attrs, |
710 | .match = ibmebus_bus_bus_match, | ||
711 | .probe = ibmebus_bus_device_probe, | ||
712 | .remove = ibmebus_bus_device_remove, | ||
713 | .shutdown = ibmebus_bus_device_shutdown, | ||
714 | .dev_attrs = ibmebus_bus_device_attrs, | ||
715 | .pm = IBMEBUS_BUS_PM_OPS_PTR, | ||
323 | }; | 716 | }; |
324 | EXPORT_SYMBOL(ibmebus_bus_type); | 717 | EXPORT_SYMBOL(ibmebus_bus_type); |
325 | 718 | ||
@@ -329,7 +722,7 @@ static int __init ibmebus_bus_init(void) | |||
329 | 722 | ||
330 | printk(KERN_INFO "IBM eBus Device Driver\n"); | 723 | printk(KERN_INFO "IBM eBus Device Driver\n"); |
331 | 724 | ||
332 | err = of_bus_type_init(&ibmebus_bus_type, "ibmebus"); | 725 | err = bus_register(&ibmebus_bus_type); |
333 | if (err) { | 726 | if (err) { |
334 | printk(KERN_ERR "%s: failed to register IBM eBus.\n", | 727 | printk(KERN_ERR "%s: failed to register IBM eBus.\n", |
335 | __func__); | 728 | __func__); |
diff --git a/arch/powerpc/kernel/idle_power4.S b/arch/powerpc/kernel/idle_power4.S index 5328709eeedc..ba3195478600 100644 --- a/arch/powerpc/kernel/idle_power4.S +++ b/arch/powerpc/kernel/idle_power4.S | |||
@@ -53,24 +53,3 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
53 | isync | 53 | isync |
54 | b 1b | 54 | b 1b |
55 | 55 | ||
56 | _GLOBAL(power4_cpu_offline_powersave) | ||
57 | /* Go to NAP now */ | ||
58 | mfmsr r7 | ||
59 | rldicl r0,r7,48,1 | ||
60 | rotldi r0,r0,16 | ||
61 | mtmsrd r0,1 /* hard-disable interrupts */ | ||
62 | li r0,1 | ||
63 | li r6,0 | ||
64 | stb r0,PACAHARDIRQEN(r13) /* we'll hard-enable shortly */ | ||
65 | stb r6,PACASOFTIRQEN(r13) /* soft-disable irqs */ | ||
66 | BEGIN_FTR_SECTION | ||
67 | DSSALL | ||
68 | sync | ||
69 | END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | ||
70 | ori r7,r7,MSR_EE | ||
71 | oris r7,r7,MSR_POW@h | ||
72 | sync | ||
73 | isync | ||
74 | mtmsrd r7 | ||
75 | isync | ||
76 | blr | ||
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S new file mode 100644 index 000000000000..f8f0bc7f1d4f --- /dev/null +++ b/arch/powerpc/kernel/idle_power7.S | |||
@@ -0,0 +1,97 @@ | |||
1 | /* | ||
2 | * This file contains the power_save function for 970-family CPUs. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/threads.h> | ||
11 | #include <asm/processor.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/cputable.h> | ||
14 | #include <asm/thread_info.h> | ||
15 | #include <asm/ppc_asm.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/ppc-opcode.h> | ||
18 | |||
19 | #undef DEBUG | ||
20 | |||
21 | .text | ||
22 | |||
23 | _GLOBAL(power7_idle) | ||
24 | /* Now check if user or arch enabled NAP mode */ | ||
25 | LOAD_REG_ADDRBASE(r3,powersave_nap) | ||
26 | lwz r4,ADDROFF(powersave_nap)(r3) | ||
27 | cmpwi 0,r4,0 | ||
28 | beqlr | ||
29 | |||
30 | /* NAP is a state loss, we create a regs frame on the | ||
31 | * stack, fill it up with the state we care about and | ||
32 | * stick a pointer to it in PACAR1. We really only | ||
33 | * need to save PC, some CR bits and the NV GPRs, | ||
34 | * but for now an interrupt frame will do. | ||
35 | */ | ||
36 | mflr r0 | ||
37 | std r0,16(r1) | ||
38 | stdu r1,-INT_FRAME_SIZE(r1) | ||
39 | std r0,_LINK(r1) | ||
40 | std r0,_NIP(r1) | ||
41 | |||
42 | #ifndef CONFIG_SMP | ||
43 | /* Make sure FPU, VSX etc... are flushed as we may lose | ||
44 | * state when going to nap mode | ||
45 | */ | ||
46 | bl .discard_lazy_cpu_state | ||
47 | #endif /* CONFIG_SMP */ | ||
48 | |||
49 | /* Hard disable interrupts */ | ||
50 | mfmsr r9 | ||
51 | rldicl r9,r9,48,1 | ||
52 | rotldi r9,r9,16 | ||
53 | mtmsrd r9,1 /* hard-disable interrupts */ | ||
54 | li r0,0 | ||
55 | stb r0,PACASOFTIRQEN(r13) /* we'll hard-enable shortly */ | ||
56 | stb r0,PACAHARDIRQEN(r13) | ||
57 | |||
58 | /* Continue saving state */ | ||
59 | SAVE_GPR(2, r1) | ||
60 | SAVE_NVGPRS(r1) | ||
61 | mfcr r3 | ||
62 | std r3,_CCR(r1) | ||
63 | std r9,_MSR(r1) | ||
64 | std r1,PACAR1(r13) | ||
65 | |||
66 | /* Magic NAP mode enter sequence */ | ||
67 | std r0,0(r1) | ||
68 | ptesync | ||
69 | ld r0,0(r1) | ||
70 | 1: cmp cr0,r0,r0 | ||
71 | bne 1b | ||
72 | PPC_NAP | ||
73 | b . | ||
74 | |||
75 | _GLOBAL(power7_wakeup_loss) | ||
76 | GET_PACA(r13) | ||
77 | ld r1,PACAR1(r13) | ||
78 | REST_NVGPRS(r1) | ||
79 | REST_GPR(2, r1) | ||
80 | ld r3,_CCR(r1) | ||
81 | ld r4,_MSR(r1) | ||
82 | ld r5,_NIP(r1) | ||
83 | addi r1,r1,INT_FRAME_SIZE | ||
84 | mtcr r3 | ||
85 | mtspr SPRN_SRR1,r4 | ||
86 | mtspr SPRN_SRR0,r5 | ||
87 | rfid | ||
88 | |||
89 | _GLOBAL(power7_wakeup_noloss) | ||
90 | GET_PACA(r13) | ||
91 | ld r1,PACAR1(r13) | ||
92 | ld r4,_MSR(r1) | ||
93 | ld r5,_NIP(r1) | ||
94 | addi r1,r1,INT_FRAME_SIZE | ||
95 | mtspr SPRN_SRR1,r4 | ||
96 | mtspr SPRN_SRR0,r5 | ||
97 | rfid | ||
diff --git a/arch/powerpc/platforms/cell/io-workarounds.c b/arch/powerpc/kernel/io-workarounds.c index 5c1118e31940..ffafaea3d261 100644 --- a/arch/powerpc/platforms/cell/io-workarounds.c +++ b/arch/powerpc/kernel/io-workarounds.c | |||
@@ -17,8 +17,7 @@ | |||
17 | #include <asm/machdep.h> | 17 | #include <asm/machdep.h> |
18 | #include <asm/pgtable.h> | 18 | #include <asm/pgtable.h> |
19 | #include <asm/ppc-pci.h> | 19 | #include <asm/ppc-pci.h> |
20 | 20 | #include <asm/io-workarounds.h> | |
21 | #include "io-workarounds.h" | ||
22 | 21 | ||
23 | #define IOWA_MAX_BUS 8 | 22 | #define IOWA_MAX_BUS 8 |
24 | 23 | ||
@@ -145,7 +144,19 @@ static void __iomem *iowa_ioremap(phys_addr_t addr, unsigned long size, | |||
145 | return res; | 144 | return res; |
146 | } | 145 | } |
147 | 146 | ||
148 | /* Regist new bus to support workaround */ | 147 | /* Enable IO workaround */ |
148 | static void __devinit io_workaround_init(void) | ||
149 | { | ||
150 | static int io_workaround_inited; | ||
151 | |||
152 | if (io_workaround_inited) | ||
153 | return; | ||
154 | ppc_pci_io = iowa_pci_io; | ||
155 | ppc_md.ioremap = iowa_ioremap; | ||
156 | io_workaround_inited = 1; | ||
157 | } | ||
158 | |||
159 | /* Register new bus to support workaround */ | ||
149 | void __devinit iowa_register_bus(struct pci_controller *phb, | 160 | void __devinit iowa_register_bus(struct pci_controller *phb, |
150 | struct ppc_pci_io *ops, | 161 | struct ppc_pci_io *ops, |
151 | int (*initfunc)(struct iowa_bus *, void *), void *data) | 162 | int (*initfunc)(struct iowa_bus *, void *), void *data) |
@@ -153,6 +164,8 @@ void __devinit iowa_register_bus(struct pci_controller *phb, | |||
153 | struct iowa_bus *bus; | 164 | struct iowa_bus *bus; |
154 | struct device_node *np = phb->dn; | 165 | struct device_node *np = phb->dn; |
155 | 166 | ||
167 | io_workaround_init(); | ||
168 | |||
156 | if (iowa_bus_count >= IOWA_MAX_BUS) { | 169 | if (iowa_bus_count >= IOWA_MAX_BUS) { |
157 | pr_err("IOWA:Too many pci bridges, " | 170 | pr_err("IOWA:Too many pci bridges, " |
158 | "workarounds disabled for %s\n", np->full_name); | 171 | "workarounds disabled for %s\n", np->full_name); |
@@ -162,6 +175,7 @@ void __devinit iowa_register_bus(struct pci_controller *phb, | |||
162 | bus = &iowa_busses[iowa_bus_count]; | 175 | bus = &iowa_busses[iowa_bus_count]; |
163 | bus->phb = phb; | 176 | bus->phb = phb; |
164 | bus->ops = ops; | 177 | bus->ops = ops; |
178 | bus->private = data; | ||
165 | 179 | ||
166 | if (initfunc) | 180 | if (initfunc) |
167 | if ((*initfunc)(bus, data)) | 181 | if ((*initfunc)(bus, data)) |
@@ -172,14 +186,3 @@ void __devinit iowa_register_bus(struct pci_controller *phb, | |||
172 | pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); | 186 | pr_debug("IOWA:[%d]Add bus, %s.\n", iowa_bus_count-1, np->full_name); |
173 | } | 187 | } |
174 | 188 | ||
175 | /* enable IO workaround */ | ||
176 | void __devinit io_workaround_init(void) | ||
177 | { | ||
178 | static int io_workaround_inited; | ||
179 | |||
180 | if (io_workaround_inited) | ||
181 | return; | ||
182 | ppc_pci_io = iowa_pci_io; | ||
183 | ppc_md.ioremap = iowa_ioremap; | ||
184 | io_workaround_inited = 1; | ||
185 | } | ||
diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index d5839179ec77..961bb03413f3 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c | |||
@@ -311,8 +311,9 @@ int iommu_map_sg(struct device *dev, struct iommu_table *tbl, | |||
311 | /* Handle failure */ | 311 | /* Handle failure */ |
312 | if (unlikely(entry == DMA_ERROR_CODE)) { | 312 | if (unlikely(entry == DMA_ERROR_CODE)) { |
313 | if (printk_ratelimit()) | 313 | if (printk_ratelimit()) |
314 | printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" | 314 | dev_info(dev, "iommu_alloc failed, tbl %p " |
315 | " npages %lx\n", tbl, vaddr, npages); | 315 | "vaddr %lx npages %lu\n", tbl, vaddr, |
316 | npages); | ||
316 | goto failure; | 317 | goto failure; |
317 | } | 318 | } |
318 | 319 | ||
@@ -579,9 +580,9 @@ dma_addr_t iommu_map_page(struct device *dev, struct iommu_table *tbl, | |||
579 | attrs); | 580 | attrs); |
580 | if (dma_handle == DMA_ERROR_CODE) { | 581 | if (dma_handle == DMA_ERROR_CODE) { |
581 | if (printk_ratelimit()) { | 582 | if (printk_ratelimit()) { |
582 | printk(KERN_INFO "iommu_alloc failed, " | 583 | dev_info(dev, "iommu_alloc failed, tbl %p " |
583 | "tbl %p vaddr %p npages %d\n", | 584 | "vaddr %p npages %d\n", tbl, vaddr, |
584 | tbl, vaddr, npages); | 585 | npages); |
585 | } | 586 | } |
586 | } else | 587 | } else |
587 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); | 588 | dma_handle |= (uaddr & ~IOMMU_PAGE_MASK); |
@@ -627,7 +628,8 @@ void *iommu_alloc_coherent(struct device *dev, struct iommu_table *tbl, | |||
627 | * the tce tables. | 628 | * the tce tables. |
628 | */ | 629 | */ |
629 | if (order >= IOMAP_MAX_ORDER) { | 630 | if (order >= IOMAP_MAX_ORDER) { |
630 | printk("iommu_alloc_consistent size too large: 0x%lx\n", size); | 631 | dev_info(dev, "iommu_alloc_consistent size too large: 0x%lx\n", |
632 | size); | ||
631 | return NULL; | 633 | return NULL; |
632 | } | 634 | } |
633 | 635 | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 4a65386995d7..5b428e308666 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -66,7 +66,6 @@ | |||
66 | #include <asm/ptrace.h> | 66 | #include <asm/ptrace.h> |
67 | #include <asm/machdep.h> | 67 | #include <asm/machdep.h> |
68 | #include <asm/udbg.h> | 68 | #include <asm/udbg.h> |
69 | #include <asm/dbell.h> | ||
70 | #include <asm/smp.h> | 69 | #include <asm/smp.h> |
71 | 70 | ||
72 | #ifdef CONFIG_PPC64 | 71 | #ifdef CONFIG_PPC64 |
@@ -116,7 +115,7 @@ static inline notrace void set_soft_enabled(unsigned long enable) | |||
116 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); | 115 | : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled))); |
117 | } | 116 | } |
118 | 117 | ||
119 | notrace void raw_local_irq_restore(unsigned long en) | 118 | notrace void arch_local_irq_restore(unsigned long en) |
120 | { | 119 | { |
121 | /* | 120 | /* |
122 | * get_paca()->soft_enabled = en; | 121 | * get_paca()->soft_enabled = en; |
@@ -160,7 +159,8 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
160 | 159 | ||
161 | #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) | 160 | #if defined(CONFIG_BOOKE) && defined(CONFIG_SMP) |
162 | /* Check for pending doorbell interrupts and resend to ourself */ | 161 | /* Check for pending doorbell interrupts and resend to ourself */ |
163 | doorbell_check_self(); | 162 | if (cpu_has_feature(CPU_FTR_DBELL)) |
163 | smp_muxed_ipi_resend(); | ||
164 | #endif | 164 | #endif |
165 | 165 | ||
166 | /* | 166 | /* |
@@ -192,10 +192,10 @@ notrace void raw_local_irq_restore(unsigned long en) | |||
192 | 192 | ||
193 | __hard_irq_enable(); | 193 | __hard_irq_enable(); |
194 | } | 194 | } |
195 | EXPORT_SYMBOL(raw_local_irq_restore); | 195 | EXPORT_SYMBOL(arch_local_irq_restore); |
196 | #endif /* CONFIG_PPC64 */ | 196 | #endif /* CONFIG_PPC64 */ |
197 | 197 | ||
198 | static int show_other_interrupts(struct seq_file *p, int prec) | 198 | int arch_show_interrupts(struct seq_file *p, int prec) |
199 | { | 199 | { |
200 | int j; | 200 | int j; |
201 | 201 | ||
@@ -231,63 +231,6 @@ static int show_other_interrupts(struct seq_file *p, int prec) | |||
231 | return 0; | 231 | return 0; |
232 | } | 232 | } |
233 | 233 | ||
234 | int show_interrupts(struct seq_file *p, void *v) | ||
235 | { | ||
236 | unsigned long flags, any_count = 0; | ||
237 | int i = *(loff_t *) v, j, prec; | ||
238 | struct irqaction *action; | ||
239 | struct irq_desc *desc; | ||
240 | |||
241 | if (i > nr_irqs) | ||
242 | return 0; | ||
243 | |||
244 | for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec) | ||
245 | j *= 10; | ||
246 | |||
247 | if (i == nr_irqs) | ||
248 | return show_other_interrupts(p, prec); | ||
249 | |||
250 | /* print header */ | ||
251 | if (i == 0) { | ||
252 | seq_printf(p, "%*s", prec + 8, ""); | ||
253 | for_each_online_cpu(j) | ||
254 | seq_printf(p, "CPU%-8d", j); | ||
255 | seq_putc(p, '\n'); | ||
256 | } | ||
257 | |||
258 | desc = irq_to_desc(i); | ||
259 | if (!desc) | ||
260 | return 0; | ||
261 | |||
262 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
263 | for_each_online_cpu(j) | ||
264 | any_count |= kstat_irqs_cpu(i, j); | ||
265 | action = desc->action; | ||
266 | if (!action && !any_count) | ||
267 | goto out; | ||
268 | |||
269 | seq_printf(p, "%*d: ", prec, i); | ||
270 | for_each_online_cpu(j) | ||
271 | seq_printf(p, "%10u ", kstat_irqs_cpu(i, j)); | ||
272 | |||
273 | if (desc->chip) | ||
274 | seq_printf(p, " %-16s", desc->chip->name); | ||
275 | else | ||
276 | seq_printf(p, " %-16s", "None"); | ||
277 | seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge"); | ||
278 | |||
279 | if (action) { | ||
280 | seq_printf(p, " %s", action->name); | ||
281 | while ((action = action->next) != NULL) | ||
282 | seq_printf(p, ", %s", action->name); | ||
283 | } | ||
284 | |||
285 | seq_putc(p, '\n'); | ||
286 | out: | ||
287 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* | 234 | /* |
292 | * /proc/stat helpers | 235 | * /proc/stat helpers |
293 | */ | 236 | */ |
@@ -303,30 +246,37 @@ u64 arch_irq_stat_cpu(unsigned int cpu) | |||
303 | } | 246 | } |
304 | 247 | ||
305 | #ifdef CONFIG_HOTPLUG_CPU | 248 | #ifdef CONFIG_HOTPLUG_CPU |
306 | void fixup_irqs(const struct cpumask *map) | 249 | void migrate_irqs(void) |
307 | { | 250 | { |
308 | struct irq_desc *desc; | 251 | struct irq_desc *desc; |
309 | unsigned int irq; | 252 | unsigned int irq; |
310 | static int warned; | 253 | static int warned; |
311 | cpumask_var_t mask; | 254 | cpumask_var_t mask; |
255 | const struct cpumask *map = cpu_online_mask; | ||
312 | 256 | ||
313 | alloc_cpumask_var(&mask, GFP_KERNEL); | 257 | alloc_cpumask_var(&mask, GFP_KERNEL); |
314 | 258 | ||
315 | for_each_irq(irq) { | 259 | for_each_irq(irq) { |
260 | struct irq_data *data; | ||
261 | struct irq_chip *chip; | ||
262 | |||
316 | desc = irq_to_desc(irq); | 263 | desc = irq_to_desc(irq); |
317 | if (!desc) | 264 | if (!desc) |
318 | continue; | 265 | continue; |
319 | 266 | ||
320 | if (desc->status & IRQ_PER_CPU) | 267 | data = irq_desc_get_irq_data(desc); |
268 | if (irqd_is_per_cpu(data)) | ||
321 | continue; | 269 | continue; |
322 | 270 | ||
323 | cpumask_and(mask, desc->affinity, map); | 271 | chip = irq_data_get_irq_chip(data); |
272 | |||
273 | cpumask_and(mask, data->affinity, map); | ||
324 | if (cpumask_any(mask) >= nr_cpu_ids) { | 274 | if (cpumask_any(mask) >= nr_cpu_ids) { |
325 | printk("Breaking affinity for irq %i\n", irq); | 275 | printk("Breaking affinity for irq %i\n", irq); |
326 | cpumask_copy(mask, map); | 276 | cpumask_copy(mask, map); |
327 | } | 277 | } |
328 | if (desc->chip->set_affinity) | 278 | if (chip->irq_set_affinity) |
329 | desc->chip->set_affinity(irq, mask); | 279 | chip->irq_set_affinity(data, mask, true); |
330 | else if (desc->action && !(warned++)) | 280 | else if (desc->action && !(warned++)) |
331 | printk("Cannot set affinity for irq %i\n", irq); | 281 | printk("Cannot set affinity for irq %i\n", irq); |
332 | } | 282 | } |
@@ -345,17 +295,20 @@ static inline void handle_one_irq(unsigned int irq) | |||
345 | unsigned long saved_sp_limit; | 295 | unsigned long saved_sp_limit; |
346 | struct irq_desc *desc; | 296 | struct irq_desc *desc; |
347 | 297 | ||
298 | desc = irq_to_desc(irq); | ||
299 | if (!desc) | ||
300 | return; | ||
301 | |||
348 | /* Switch to the irq stack to handle this */ | 302 | /* Switch to the irq stack to handle this */ |
349 | curtp = current_thread_info(); | 303 | curtp = current_thread_info(); |
350 | irqtp = hardirq_ctx[smp_processor_id()]; | 304 | irqtp = hardirq_ctx[smp_processor_id()]; |
351 | 305 | ||
352 | if (curtp == irqtp) { | 306 | if (curtp == irqtp) { |
353 | /* We're already on the irq stack, just handle it */ | 307 | /* We're already on the irq stack, just handle it */ |
354 | generic_handle_irq(irq); | 308 | desc->handle_irq(irq, desc); |
355 | return; | 309 | return; |
356 | } | 310 | } |
357 | 311 | ||
358 | desc = irq_to_desc(irq); | ||
359 | saved_sp_limit = current->thread.ksp_limit; | 312 | saved_sp_limit = current->thread.ksp_limit; |
360 | 313 | ||
361 | irqtp->task = curtp->task; | 314 | irqtp->task = curtp->task; |
@@ -447,24 +400,28 @@ struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly; | |||
447 | void exc_lvl_ctx_init(void) | 400 | void exc_lvl_ctx_init(void) |
448 | { | 401 | { |
449 | struct thread_info *tp; | 402 | struct thread_info *tp; |
450 | int i, hw_cpu; | 403 | int i, cpu_nr; |
451 | 404 | ||
452 | for_each_possible_cpu(i) { | 405 | for_each_possible_cpu(i) { |
453 | hw_cpu = get_hard_smp_processor_id(i); | 406 | #ifdef CONFIG_PPC64 |
454 | memset((void *)critirq_ctx[hw_cpu], 0, THREAD_SIZE); | 407 | cpu_nr = i; |
455 | tp = critirq_ctx[hw_cpu]; | 408 | #else |
456 | tp->cpu = i; | 409 | cpu_nr = get_hard_smp_processor_id(i); |
410 | #endif | ||
411 | memset((void *)critirq_ctx[cpu_nr], 0, THREAD_SIZE); | ||
412 | tp = critirq_ctx[cpu_nr]; | ||
413 | tp->cpu = cpu_nr; | ||
457 | tp->preempt_count = 0; | 414 | tp->preempt_count = 0; |
458 | 415 | ||
459 | #ifdef CONFIG_BOOKE | 416 | #ifdef CONFIG_BOOKE |
460 | memset((void *)dbgirq_ctx[hw_cpu], 0, THREAD_SIZE); | 417 | memset((void *)dbgirq_ctx[cpu_nr], 0, THREAD_SIZE); |
461 | tp = dbgirq_ctx[hw_cpu]; | 418 | tp = dbgirq_ctx[cpu_nr]; |
462 | tp->cpu = i; | 419 | tp->cpu = cpu_nr; |
463 | tp->preempt_count = 0; | 420 | tp->preempt_count = 0; |
464 | 421 | ||
465 | memset((void *)mcheckirq_ctx[hw_cpu], 0, THREAD_SIZE); | 422 | memset((void *)mcheckirq_ctx[cpu_nr], 0, THREAD_SIZE); |
466 | tp = mcheckirq_ctx[hw_cpu]; | 423 | tp = mcheckirq_ctx[cpu_nr]; |
467 | tp->cpu = i; | 424 | tp->cpu = cpu_nr; |
468 | tp->preempt_count = HARDIRQ_OFFSET; | 425 | tp->preempt_count = HARDIRQ_OFFSET; |
469 | #endif | 426 | #endif |
470 | } | 427 | } |
@@ -527,20 +484,41 @@ void do_softirq(void) | |||
527 | * IRQ controller and virtual interrupts | 484 | * IRQ controller and virtual interrupts |
528 | */ | 485 | */ |
529 | 486 | ||
487 | /* The main irq map itself is an array of NR_IRQ entries containing the | ||
488 | * associate host and irq number. An entry with a host of NULL is free. | ||
489 | * An entry can be allocated if it's free, the allocator always then sets | ||
490 | * hwirq first to the host's invalid irq number and then fills ops. | ||
491 | */ | ||
492 | struct irq_map_entry { | ||
493 | irq_hw_number_t hwirq; | ||
494 | struct irq_host *host; | ||
495 | }; | ||
496 | |||
530 | static LIST_HEAD(irq_hosts); | 497 | static LIST_HEAD(irq_hosts); |
531 | static DEFINE_RAW_SPINLOCK(irq_big_lock); | 498 | static DEFINE_RAW_SPINLOCK(irq_big_lock); |
532 | static unsigned int revmap_trees_allocated; | ||
533 | static DEFINE_MUTEX(revmap_trees_mutex); | 499 | static DEFINE_MUTEX(revmap_trees_mutex); |
534 | struct irq_map_entry irq_map[NR_IRQS]; | 500 | static struct irq_map_entry irq_map[NR_IRQS]; |
535 | static unsigned int irq_virq_count = NR_IRQS; | 501 | static unsigned int irq_virq_count = NR_IRQS; |
536 | static struct irq_host *irq_default_host; | 502 | static struct irq_host *irq_default_host; |
537 | 503 | ||
504 | irq_hw_number_t irqd_to_hwirq(struct irq_data *d) | ||
505 | { | ||
506 | return irq_map[d->irq].hwirq; | ||
507 | } | ||
508 | EXPORT_SYMBOL_GPL(irqd_to_hwirq); | ||
509 | |||
538 | irq_hw_number_t virq_to_hw(unsigned int virq) | 510 | irq_hw_number_t virq_to_hw(unsigned int virq) |
539 | { | 511 | { |
540 | return irq_map[virq].hwirq; | 512 | return irq_map[virq].hwirq; |
541 | } | 513 | } |
542 | EXPORT_SYMBOL_GPL(virq_to_hw); | 514 | EXPORT_SYMBOL_GPL(virq_to_hw); |
543 | 515 | ||
516 | bool virq_is_host(unsigned int virq, struct irq_host *host) | ||
517 | { | ||
518 | return irq_map[virq].host == host; | ||
519 | } | ||
520 | EXPORT_SYMBOL_GPL(virq_is_host); | ||
521 | |||
544 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) | 522 | static int default_irq_host_match(struct irq_host *h, struct device_node *np) |
545 | { | 523 | { |
546 | return h->of_node != NULL && h->of_node == np; | 524 | return h->of_node != NULL && h->of_node == np; |
@@ -561,7 +539,7 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
561 | /* Allocate structure and revmap table if using linear mapping */ | 539 | /* Allocate structure and revmap table if using linear mapping */ |
562 | if (revmap_type == IRQ_HOST_MAP_LINEAR) | 540 | if (revmap_type == IRQ_HOST_MAP_LINEAR) |
563 | size += revmap_arg * sizeof(unsigned int); | 541 | size += revmap_arg * sizeof(unsigned int); |
564 | host = zalloc_maybe_bootmem(size, GFP_KERNEL); | 542 | host = kzalloc(size, GFP_KERNEL); |
565 | if (host == NULL) | 543 | if (host == NULL) |
566 | return NULL; | 544 | return NULL; |
567 | 545 | ||
@@ -582,13 +560,8 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
582 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | 560 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { |
583 | if (irq_map[0].host != NULL) { | 561 | if (irq_map[0].host != NULL) { |
584 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | 562 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
585 | /* If we are early boot, we can't free the structure, | 563 | of_node_put(host->of_node); |
586 | * too bad... | 564 | kfree(host); |
587 | * this will be fixed once slab is made available early | ||
588 | * instead of the current cruft | ||
589 | */ | ||
590 | if (mem_init_done) | ||
591 | kfree(host); | ||
592 | return NULL; | 565 | return NULL; |
593 | } | 566 | } |
594 | irq_map[0].host = host; | 567 | irq_map[0].host = host; |
@@ -609,14 +582,14 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
609 | irq_map[i].host = host; | 582 | irq_map[i].host = host; |
610 | smp_wmb(); | 583 | smp_wmb(); |
611 | 584 | ||
612 | /* Clear norequest flags */ | ||
613 | irq_to_desc(i)->status &= ~IRQ_NOREQUEST; | ||
614 | |||
615 | /* Legacy flags are left to default at this point, | 585 | /* Legacy flags are left to default at this point, |
616 | * one can then use irq_create_mapping() to | 586 | * one can then use irq_create_mapping() to |
617 | * explicitly change them | 587 | * explicitly change them |
618 | */ | 588 | */ |
619 | ops->map(host, i, i); | 589 | ops->map(host, i, i); |
590 | |||
591 | /* Clear norequest flags */ | ||
592 | irq_clear_status_flags(i, IRQ_NOREQUEST); | ||
620 | } | 593 | } |
621 | break; | 594 | break; |
622 | case IRQ_HOST_MAP_LINEAR: | 595 | case IRQ_HOST_MAP_LINEAR: |
@@ -627,6 +600,9 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
627 | smp_wmb(); | 600 | smp_wmb(); |
628 | host->revmap_data.linear.revmap = rmap; | 601 | host->revmap_data.linear.revmap = rmap; |
629 | break; | 602 | break; |
603 | case IRQ_HOST_MAP_TREE: | ||
604 | INIT_RADIX_TREE(&host->revmap_data.tree, GFP_KERNEL); | ||
605 | break; | ||
630 | default: | 606 | default: |
631 | break; | 607 | break; |
632 | } | 608 | } |
@@ -676,17 +652,14 @@ void irq_set_virq_count(unsigned int count) | |||
676 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, | 652 | static int irq_setup_virq(struct irq_host *host, unsigned int virq, |
677 | irq_hw_number_t hwirq) | 653 | irq_hw_number_t hwirq) |
678 | { | 654 | { |
679 | struct irq_desc *desc; | 655 | int res; |
680 | 656 | ||
681 | desc = irq_to_desc_alloc_node(virq, 0); | 657 | res = irq_alloc_desc_at(virq, 0); |
682 | if (!desc) { | 658 | if (res != virq) { |
683 | pr_debug("irq: -> allocating desc failed\n"); | 659 | pr_debug("irq: -> allocating desc failed\n"); |
684 | goto error; | 660 | goto error; |
685 | } | 661 | } |
686 | 662 | ||
687 | /* Clear IRQ_NOREQUEST flag */ | ||
688 | desc->status &= ~IRQ_NOREQUEST; | ||
689 | |||
690 | /* map it */ | 663 | /* map it */ |
691 | smp_wmb(); | 664 | smp_wmb(); |
692 | irq_map[virq].hwirq = hwirq; | 665 | irq_map[virq].hwirq = hwirq; |
@@ -694,11 +667,15 @@ static int irq_setup_virq(struct irq_host *host, unsigned int virq, | |||
694 | 667 | ||
695 | if (host->ops->map(host, virq, hwirq)) { | 668 | if (host->ops->map(host, virq, hwirq)) { |
696 | pr_debug("irq: -> mapping failed, freeing\n"); | 669 | pr_debug("irq: -> mapping failed, freeing\n"); |
697 | goto error; | 670 | goto errdesc; |
698 | } | 671 | } |
699 | 672 | ||
673 | irq_clear_status_flags(virq, IRQ_NOREQUEST); | ||
674 | |||
700 | return 0; | 675 | return 0; |
701 | 676 | ||
677 | errdesc: | ||
678 | irq_free_descs(virq, 1); | ||
702 | error: | 679 | error: |
703 | irq_free_virt(virq, 1); | 680 | irq_free_virt(virq, 1); |
704 | return -1; | 681 | return -1; |
@@ -746,13 +723,9 @@ unsigned int irq_create_mapping(struct irq_host *host, | |||
746 | } | 723 | } |
747 | pr_debug("irq: -> using host @%p\n", host); | 724 | pr_debug("irq: -> using host @%p\n", host); |
748 | 725 | ||
749 | /* Check if mapping already exist, if it does, call | 726 | /* Check if mapping already exists */ |
750 | * host->ops->map() to update the flags | ||
751 | */ | ||
752 | virq = irq_find_mapping(host, hwirq); | 727 | virq = irq_find_mapping(host, hwirq); |
753 | if (virq != NO_IRQ) { | 728 | if (virq != NO_IRQ) { |
754 | if (host->ops->remap) | ||
755 | host->ops->remap(host, virq, hwirq); | ||
756 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | 729 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
757 | return virq; | 730 | return virq; |
758 | } | 731 | } |
@@ -818,8 +791,8 @@ unsigned int irq_create_of_mapping(struct device_node *controller, | |||
818 | 791 | ||
819 | /* Set type if specified and different than the current one */ | 792 | /* Set type if specified and different than the current one */ |
820 | if (type != IRQ_TYPE_NONE && | 793 | if (type != IRQ_TYPE_NONE && |
821 | type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK)) | 794 | type != (irqd_get_trigger_type(irq_get_irq_data(virq)))) |
822 | set_irq_type(virq, type); | 795 | irq_set_irq_type(virq, type); |
823 | return virq; | 796 | return virq; |
824 | } | 797 | } |
825 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); | 798 | EXPORT_SYMBOL_GPL(irq_create_of_mapping); |
@@ -833,16 +806,17 @@ void irq_dispose_mapping(unsigned int virq) | |||
833 | return; | 806 | return; |
834 | 807 | ||
835 | host = irq_map[virq].host; | 808 | host = irq_map[virq].host; |
836 | WARN_ON (host == NULL); | 809 | if (WARN_ON(host == NULL)) |
837 | if (host == NULL) | ||
838 | return; | 810 | return; |
839 | 811 | ||
840 | /* Never unmap legacy interrupts */ | 812 | /* Never unmap legacy interrupts */ |
841 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) | 813 | if (host->revmap_type == IRQ_HOST_MAP_LEGACY) |
842 | return; | 814 | return; |
843 | 815 | ||
816 | irq_set_status_flags(virq, IRQ_NOREQUEST); | ||
817 | |||
844 | /* remove chip and handler */ | 818 | /* remove chip and handler */ |
845 | set_irq_chip_and_handler(virq, NULL, NULL); | 819 | irq_set_chip_and_handler(virq, NULL, NULL); |
846 | 820 | ||
847 | /* Make sure it's completed */ | 821 | /* Make sure it's completed */ |
848 | synchronize_irq(virq); | 822 | synchronize_irq(virq); |
@@ -860,13 +834,6 @@ void irq_dispose_mapping(unsigned int virq) | |||
860 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; | 834 | host->revmap_data.linear.revmap[hwirq] = NO_IRQ; |
861 | break; | 835 | break; |
862 | case IRQ_HOST_MAP_TREE: | 836 | case IRQ_HOST_MAP_TREE: |
863 | /* | ||
864 | * Check if radix tree allocated yet, if not then nothing to | ||
865 | * remove. | ||
866 | */ | ||
867 | smp_rmb(); | ||
868 | if (revmap_trees_allocated < 1) | ||
869 | break; | ||
870 | mutex_lock(&revmap_trees_mutex); | 837 | mutex_lock(&revmap_trees_mutex); |
871 | radix_tree_delete(&host->revmap_data.tree, hwirq); | 838 | radix_tree_delete(&host->revmap_data.tree, hwirq); |
872 | mutex_unlock(&revmap_trees_mutex); | 839 | mutex_unlock(&revmap_trees_mutex); |
@@ -877,9 +844,7 @@ void irq_dispose_mapping(unsigned int virq) | |||
877 | smp_mb(); | 844 | smp_mb(); |
878 | irq_map[virq].hwirq = host->inval_irq; | 845 | irq_map[virq].hwirq = host->inval_irq; |
879 | 846 | ||
880 | /* Set some flags */ | 847 | irq_free_descs(virq, 1); |
881 | irq_to_desc(virq)->status |= IRQ_NOREQUEST; | ||
882 | |||
883 | /* Free it */ | 848 | /* Free it */ |
884 | irq_free_virt(virq, 1); | 849 | irq_free_virt(virq, 1); |
885 | } | 850 | } |
@@ -924,21 +889,17 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
924 | struct irq_map_entry *ptr; | 889 | struct irq_map_entry *ptr; |
925 | unsigned int virq; | 890 | unsigned int virq; |
926 | 891 | ||
927 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | 892 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_TREE)) |
928 | |||
929 | /* | ||
930 | * Check if the radix tree exists and has bee initialized. | ||
931 | * If not, we fallback to slow mode | ||
932 | */ | ||
933 | if (revmap_trees_allocated < 2) | ||
934 | return irq_find_mapping(host, hwirq); | 893 | return irq_find_mapping(host, hwirq); |
935 | 894 | ||
936 | /* Now try to resolve */ | ||
937 | /* | 895 | /* |
938 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us | 896 | * The ptr returned references the static global irq_map. |
939 | * as it's referencing an entry in the static irq_map table. | 897 | * but freeing an irq can delete nodes along the path to |
898 | * do the lookup via call_rcu. | ||
940 | */ | 899 | */ |
900 | rcu_read_lock(); | ||
941 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | 901 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
902 | rcu_read_unlock(); | ||
942 | 903 | ||
943 | /* | 904 | /* |
944 | * If found in radix tree, then fine. | 905 | * If found in radix tree, then fine. |
@@ -956,16 +917,7 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
956 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, | 917 | void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq, |
957 | irq_hw_number_t hwirq) | 918 | irq_hw_number_t hwirq) |
958 | { | 919 | { |
959 | 920 | if (WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE)) | |
960 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE); | ||
961 | |||
962 | /* | ||
963 | * Check if the radix tree exists yet. | ||
964 | * If not, then the irq will be inserted into the tree when it gets | ||
965 | * initialized. | ||
966 | */ | ||
967 | smp_rmb(); | ||
968 | if (revmap_trees_allocated < 1) | ||
969 | return; | 921 | return; |
970 | 922 | ||
971 | if (virq != NO_IRQ) { | 923 | if (virq != NO_IRQ) { |
@@ -981,7 +933,8 @@ unsigned int irq_linear_revmap(struct irq_host *host, | |||
981 | { | 933 | { |
982 | unsigned int *revmap; | 934 | unsigned int *revmap; |
983 | 935 | ||
984 | WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR); | 936 | if (WARN_ON_ONCE(host->revmap_type != IRQ_HOST_MAP_LINEAR)) |
937 | return irq_find_mapping(host, hwirq); | ||
985 | 938 | ||
986 | /* Check revmap bounds */ | 939 | /* Check revmap bounds */ |
987 | if (unlikely(hwirq >= host->revmap_data.linear.size)) | 940 | if (unlikely(hwirq >= host->revmap_data.linear.size)) |
@@ -1054,14 +1007,23 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1054 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | 1007 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
1055 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | 1008 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); |
1056 | 1009 | ||
1010 | if (virq < NUM_ISA_INTERRUPTS) { | ||
1011 | if (virq + count < NUM_ISA_INTERRUPTS) | ||
1012 | return; | ||
1013 | count =- NUM_ISA_INTERRUPTS - virq; | ||
1014 | virq = NUM_ISA_INTERRUPTS; | ||
1015 | } | ||
1016 | |||
1017 | if (count > irq_virq_count || virq > irq_virq_count - count) { | ||
1018 | if (virq > irq_virq_count) | ||
1019 | return; | ||
1020 | count = irq_virq_count - virq; | ||
1021 | } | ||
1022 | |||
1057 | raw_spin_lock_irqsave(&irq_big_lock, flags); | 1023 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
1058 | for (i = virq; i < (virq + count); i++) { | 1024 | for (i = virq; i < (virq + count); i++) { |
1059 | struct irq_host *host; | 1025 | struct irq_host *host; |
1060 | 1026 | ||
1061 | if (i < NUM_ISA_INTERRUPTS || | ||
1062 | (virq + count) > irq_virq_count) | ||
1063 | continue; | ||
1064 | |||
1065 | host = irq_map[i].host; | 1027 | host = irq_map[i].host; |
1066 | irq_map[i].hwirq = host->inval_irq; | 1028 | irq_map[i].hwirq = host->inval_irq; |
1067 | smp_wmb(); | 1029 | smp_wmb(); |
@@ -1072,82 +1034,21 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1072 | 1034 | ||
1073 | int arch_early_irq_init(void) | 1035 | int arch_early_irq_init(void) |
1074 | { | 1036 | { |
1075 | struct irq_desc *desc; | ||
1076 | int i; | ||
1077 | |||
1078 | for (i = 0; i < NR_IRQS; i++) { | ||
1079 | desc = irq_to_desc(i); | ||
1080 | if (desc) | ||
1081 | desc->status |= IRQ_NOREQUEST; | ||
1082 | } | ||
1083 | |||
1084 | return 0; | ||
1085 | } | ||
1086 | |||
1087 | int arch_init_chip_data(struct irq_desc *desc, int node) | ||
1088 | { | ||
1089 | desc->status |= IRQ_NOREQUEST; | ||
1090 | return 0; | 1037 | return 0; |
1091 | } | 1038 | } |
1092 | 1039 | ||
1093 | /* We need to create the radix trees late */ | ||
1094 | static int irq_late_init(void) | ||
1095 | { | ||
1096 | struct irq_host *h; | ||
1097 | unsigned int i; | ||
1098 | |||
1099 | /* | ||
1100 | * No mutual exclusion with respect to accessors of the tree is needed | ||
1101 | * here as the synchronization is done via the state variable | ||
1102 | * revmap_trees_allocated. | ||
1103 | */ | ||
1104 | list_for_each_entry(h, &irq_hosts, link) { | ||
1105 | if (h->revmap_type == IRQ_HOST_MAP_TREE) | ||
1106 | INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL); | ||
1107 | } | ||
1108 | |||
1109 | /* | ||
1110 | * Make sure the radix trees inits are visible before setting | ||
1111 | * the flag | ||
1112 | */ | ||
1113 | smp_wmb(); | ||
1114 | revmap_trees_allocated = 1; | ||
1115 | |||
1116 | /* | ||
1117 | * Insert the reverse mapping for those interrupts already present | ||
1118 | * in irq_map[]. | ||
1119 | */ | ||
1120 | mutex_lock(&revmap_trees_mutex); | ||
1121 | for (i = 0; i < irq_virq_count; i++) { | ||
1122 | if (irq_map[i].host && | ||
1123 | (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE)) | ||
1124 | radix_tree_insert(&irq_map[i].host->revmap_data.tree, | ||
1125 | irq_map[i].hwirq, &irq_map[i]); | ||
1126 | } | ||
1127 | mutex_unlock(&revmap_trees_mutex); | ||
1128 | |||
1129 | /* | ||
1130 | * Make sure the radix trees insertions are visible before setting | ||
1131 | * the flag | ||
1132 | */ | ||
1133 | smp_wmb(); | ||
1134 | revmap_trees_allocated = 2; | ||
1135 | |||
1136 | return 0; | ||
1137 | } | ||
1138 | arch_initcall(irq_late_init); | ||
1139 | |||
1140 | #ifdef CONFIG_VIRQ_DEBUG | 1040 | #ifdef CONFIG_VIRQ_DEBUG |
1141 | static int virq_debug_show(struct seq_file *m, void *private) | 1041 | static int virq_debug_show(struct seq_file *m, void *private) |
1142 | { | 1042 | { |
1143 | unsigned long flags; | 1043 | unsigned long flags; |
1144 | struct irq_desc *desc; | 1044 | struct irq_desc *desc; |
1145 | const char *p; | 1045 | const char *p; |
1146 | char none[] = "none"; | 1046 | static const char none[] = "none"; |
1047 | void *data; | ||
1147 | int i; | 1048 | int i; |
1148 | 1049 | ||
1149 | seq_printf(m, "%-5s %-7s %-15s %s\n", "virq", "hwirq", | 1050 | seq_printf(m, "%-5s %-7s %-15s %-18s %s\n", "virq", "hwirq", |
1150 | "chip name", "host name"); | 1051 | "chip name", "chip data", "host name"); |
1151 | 1052 | ||
1152 | for (i = 1; i < nr_irqs; i++) { | 1053 | for (i = 1; i < nr_irqs; i++) { |
1153 | desc = irq_to_desc(i); | 1054 | desc = irq_to_desc(i); |
@@ -1157,15 +1058,21 @@ static int virq_debug_show(struct seq_file *m, void *private) | |||
1157 | raw_spin_lock_irqsave(&desc->lock, flags); | 1058 | raw_spin_lock_irqsave(&desc->lock, flags); |
1158 | 1059 | ||
1159 | if (desc->action && desc->action->handler) { | 1060 | if (desc->action && desc->action->handler) { |
1061 | struct irq_chip *chip; | ||
1062 | |||
1160 | seq_printf(m, "%5d ", i); | 1063 | seq_printf(m, "%5d ", i); |
1161 | seq_printf(m, "0x%05lx ", virq_to_hw(i)); | 1064 | seq_printf(m, "0x%05lx ", irq_map[i].hwirq); |
1162 | 1065 | ||
1163 | if (desc->chip && desc->chip->name) | 1066 | chip = irq_desc_get_chip(desc); |
1164 | p = desc->chip->name; | 1067 | if (chip && chip->name) |
1068 | p = chip->name; | ||
1165 | else | 1069 | else |
1166 | p = none; | 1070 | p = none; |
1167 | seq_printf(m, "%-15s ", p); | 1071 | seq_printf(m, "%-15s ", p); |
1168 | 1072 | ||
1073 | data = irq_desc_get_chip_data(desc); | ||
1074 | seq_printf(m, "0x%16p ", data); | ||
1075 | |||
1169 | if (irq_map[i].host && irq_map[i].host->of_node) | 1076 | if (irq_map[i].host && irq_map[i].host->of_node) |
1170 | p = irq_map[i].host->of_node->full_name; | 1077 | p = irq_map[i].host->of_node->full_name; |
1171 | else | 1078 | else |
diff --git a/arch/powerpc/kernel/kgdb.c b/arch/powerpc/kernel/kgdb.c index 7f61a3ac787c..76a6e40a6f7c 100644 --- a/arch/powerpc/kernel/kgdb.c +++ b/arch/powerpc/kernel/kgdb.c | |||
@@ -109,7 +109,7 @@ static int kgdb_call_nmi_hook(struct pt_regs *regs) | |||
109 | #ifdef CONFIG_SMP | 109 | #ifdef CONFIG_SMP |
110 | void kgdb_roundup_cpus(unsigned long flags) | 110 | void kgdb_roundup_cpus(unsigned long flags) |
111 | { | 111 | { |
112 | smp_send_debugger_break(MSG_ALL_BUT_SELF); | 112 | smp_send_debugger_break(); |
113 | } | 113 | } |
114 | #endif | 114 | #endif |
115 | 115 | ||
@@ -142,7 +142,7 @@ static int kgdb_singlestep(struct pt_regs *regs) | |||
142 | return 0; | 142 | return 0; |
143 | 143 | ||
144 | /* | 144 | /* |
145 | * On Book E and perhaps other processsors, singlestep is handled on | 145 | * On Book E and perhaps other processors, singlestep is handled on |
146 | * the critical exception stack. This causes current_thread_info() | 146 | * the critical exception stack. This causes current_thread_info() |
147 | * to fail, since it it locates the thread_info by masking off | 147 | * to fail, since it it locates the thread_info by masking off |
148 | * the low bits of the current stack pointer. We work around | 148 | * the low bits of the current stack pointer. We work around |
@@ -194,40 +194,6 @@ static int kgdb_dabr_match(struct pt_regs *regs) | |||
194 | ptr = (unsigned long *)ptr32; \ | 194 | ptr = (unsigned long *)ptr32; \ |
195 | } while (0) | 195 | } while (0) |
196 | 196 | ||
197 | |||
198 | void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs) | ||
199 | { | ||
200 | unsigned long *ptr = gdb_regs; | ||
201 | int reg; | ||
202 | |||
203 | memset(gdb_regs, 0, NUMREGBYTES); | ||
204 | |||
205 | for (reg = 0; reg < 32; reg++) | ||
206 | PACK64(ptr, regs->gpr[reg]); | ||
207 | |||
208 | #ifdef CONFIG_FSL_BOOKE | ||
209 | #ifdef CONFIG_SPE | ||
210 | for (reg = 0; reg < 32; reg++) | ||
211 | PACK64(ptr, current->thread.evr[reg]); | ||
212 | #else | ||
213 | ptr += 32; | ||
214 | #endif | ||
215 | #else | ||
216 | /* fp registers not used by kernel, leave zero */ | ||
217 | ptr += 32 * 8 / sizeof(long); | ||
218 | #endif | ||
219 | |||
220 | PACK64(ptr, regs->nip); | ||
221 | PACK64(ptr, regs->msr); | ||
222 | PACK32(ptr, regs->ccr); | ||
223 | PACK64(ptr, regs->link); | ||
224 | PACK64(ptr, regs->ctr); | ||
225 | PACK32(ptr, regs->xer); | ||
226 | |||
227 | BUG_ON((unsigned long)ptr > | ||
228 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | ||
229 | } | ||
230 | |||
231 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | 197 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) |
232 | { | 198 | { |
233 | struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp + | 199 | struct pt_regs *regs = (struct pt_regs *)(p->thread.ksp + |
@@ -271,44 +237,140 @@ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | |||
271 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | 237 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); |
272 | } | 238 | } |
273 | 239 | ||
274 | #define UNPACK64(dest, ptr) do { dest = *(ptr++); } while (0) | 240 | #define GDB_SIZEOF_REG sizeof(unsigned long) |
241 | #define GDB_SIZEOF_REG_U32 sizeof(u32) | ||
275 | 242 | ||
276 | #define UNPACK32(dest, ptr) do { \ | 243 | #ifdef CONFIG_FSL_BOOKE |
277 | u32 *ptr32; \ | 244 | #define GDB_SIZEOF_FLOAT_REG sizeof(unsigned long) |
278 | ptr32 = (u32 *)ptr; \ | 245 | #else |
279 | dest = *(ptr32++); \ | 246 | #define GDB_SIZEOF_FLOAT_REG sizeof(u64) |
280 | ptr = (unsigned long *)ptr32; \ | 247 | #endif |
281 | } while (0) | ||
282 | 248 | ||
283 | void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs) | 249 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = |
284 | { | 250 | { |
285 | unsigned long *ptr = gdb_regs; | 251 | { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[0]) }, |
286 | int reg; | 252 | { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[1]) }, |
287 | 253 | { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[2]) }, | |
288 | for (reg = 0; reg < 32; reg++) | 254 | { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[3]) }, |
289 | UNPACK64(regs->gpr[reg], ptr); | 255 | { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[4]) }, |
256 | { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[5]) }, | ||
257 | { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[6]) }, | ||
258 | { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[7]) }, | ||
259 | { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[8]) }, | ||
260 | { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[9]) }, | ||
261 | { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[10]) }, | ||
262 | { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[11]) }, | ||
263 | { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[12]) }, | ||
264 | { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[13]) }, | ||
265 | { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[14]) }, | ||
266 | { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[15]) }, | ||
267 | { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[16]) }, | ||
268 | { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[17]) }, | ||
269 | { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[18]) }, | ||
270 | { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[19]) }, | ||
271 | { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[20]) }, | ||
272 | { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[21]) }, | ||
273 | { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[22]) }, | ||
274 | { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[23]) }, | ||
275 | { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[24]) }, | ||
276 | { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[25]) }, | ||
277 | { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[26]) }, | ||
278 | { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[27]) }, | ||
279 | { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[28]) }, | ||
280 | { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[29]) }, | ||
281 | { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[30]) }, | ||
282 | { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, gpr[31]) }, | ||
283 | |||
284 | { "f0", GDB_SIZEOF_FLOAT_REG, 0 }, | ||
285 | { "f1", GDB_SIZEOF_FLOAT_REG, 1 }, | ||
286 | { "f2", GDB_SIZEOF_FLOAT_REG, 2 }, | ||
287 | { "f3", GDB_SIZEOF_FLOAT_REG, 3 }, | ||
288 | { "f4", GDB_SIZEOF_FLOAT_REG, 4 }, | ||
289 | { "f5", GDB_SIZEOF_FLOAT_REG, 5 }, | ||
290 | { "f6", GDB_SIZEOF_FLOAT_REG, 6 }, | ||
291 | { "f7", GDB_SIZEOF_FLOAT_REG, 7 }, | ||
292 | { "f8", GDB_SIZEOF_FLOAT_REG, 8 }, | ||
293 | { "f9", GDB_SIZEOF_FLOAT_REG, 9 }, | ||
294 | { "f10", GDB_SIZEOF_FLOAT_REG, 10 }, | ||
295 | { "f11", GDB_SIZEOF_FLOAT_REG, 11 }, | ||
296 | { "f12", GDB_SIZEOF_FLOAT_REG, 12 }, | ||
297 | { "f13", GDB_SIZEOF_FLOAT_REG, 13 }, | ||
298 | { "f14", GDB_SIZEOF_FLOAT_REG, 14 }, | ||
299 | { "f15", GDB_SIZEOF_FLOAT_REG, 15 }, | ||
300 | { "f16", GDB_SIZEOF_FLOAT_REG, 16 }, | ||
301 | { "f17", GDB_SIZEOF_FLOAT_REG, 17 }, | ||
302 | { "f18", GDB_SIZEOF_FLOAT_REG, 18 }, | ||
303 | { "f19", GDB_SIZEOF_FLOAT_REG, 19 }, | ||
304 | { "f20", GDB_SIZEOF_FLOAT_REG, 20 }, | ||
305 | { "f21", GDB_SIZEOF_FLOAT_REG, 21 }, | ||
306 | { "f22", GDB_SIZEOF_FLOAT_REG, 22 }, | ||
307 | { "f23", GDB_SIZEOF_FLOAT_REG, 23 }, | ||
308 | { "f24", GDB_SIZEOF_FLOAT_REG, 24 }, | ||
309 | { "f25", GDB_SIZEOF_FLOAT_REG, 25 }, | ||
310 | { "f26", GDB_SIZEOF_FLOAT_REG, 26 }, | ||
311 | { "f27", GDB_SIZEOF_FLOAT_REG, 27 }, | ||
312 | { "f28", GDB_SIZEOF_FLOAT_REG, 28 }, | ||
313 | { "f29", GDB_SIZEOF_FLOAT_REG, 29 }, | ||
314 | { "f30", GDB_SIZEOF_FLOAT_REG, 30 }, | ||
315 | { "f31", GDB_SIZEOF_FLOAT_REG, 31 }, | ||
316 | |||
317 | { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, nip) }, | ||
318 | { "msr", GDB_SIZEOF_REG, offsetof(struct pt_regs, msr) }, | ||
319 | { "cr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ccr) }, | ||
320 | { "lr", GDB_SIZEOF_REG, offsetof(struct pt_regs, link) }, | ||
321 | { "ctr", GDB_SIZEOF_REG_U32, offsetof(struct pt_regs, ctr) }, | ||
322 | { "xer", GDB_SIZEOF_REG, offsetof(struct pt_regs, xer) }, | ||
323 | }; | ||
290 | 324 | ||
291 | #ifdef CONFIG_FSL_BOOKE | 325 | char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) |
292 | #ifdef CONFIG_SPE | 326 | { |
293 | for (reg = 0; reg < 32; reg++) | 327 | if (regno >= DBG_MAX_REG_NUM || regno < 0) |
294 | UNPACK64(current->thread.evr[reg], ptr); | 328 | return NULL; |
329 | |||
330 | if (regno < 32 || regno >= 64) | ||
331 | /* First 0 -> 31 gpr registers*/ | ||
332 | /* pc, msr, ls... registers 64 -> 69 */ | ||
333 | memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, | ||
334 | dbg_reg_def[regno].size); | ||
335 | |||
336 | if (regno >= 32 && regno < 64) { | ||
337 | /* FP registers 32 -> 63 */ | ||
338 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) | ||
339 | if (current) | ||
340 | memcpy(mem, ¤t->thread.evr[regno-32], | ||
341 | dbg_reg_def[regno].size); | ||
295 | #else | 342 | #else |
296 | ptr += 32; | 343 | /* fp registers not used by kernel, leave zero */ |
344 | memset(mem, 0, dbg_reg_def[regno].size); | ||
297 | #endif | 345 | #endif |
346 | } | ||
347 | |||
348 | return dbg_reg_def[regno].name; | ||
349 | } | ||
350 | |||
351 | int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) | ||
352 | { | ||
353 | if (regno >= DBG_MAX_REG_NUM || regno < 0) | ||
354 | return -EINVAL; | ||
355 | |||
356 | if (regno < 32 || regno >= 64) | ||
357 | /* First 0 -> 31 gpr registers*/ | ||
358 | /* pc, msr, ls... registers 64 -> 69 */ | ||
359 | memcpy((void *)regs + dbg_reg_def[regno].offset, mem, | ||
360 | dbg_reg_def[regno].size); | ||
361 | |||
362 | if (regno >= 32 && regno < 64) { | ||
363 | /* FP registers 32 -> 63 */ | ||
364 | #if defined(CONFIG_FSL_BOOKE) && defined(CONFIG_SPE) | ||
365 | memcpy(¤t->thread.evr[regno-32], mem, | ||
366 | dbg_reg_def[regno].size); | ||
298 | #else | 367 | #else |
299 | /* fp registers not used by kernel, leave zero */ | 368 | /* fp registers not used by kernel, leave zero */ |
300 | ptr += 32 * 8 / sizeof(int); | 369 | return 0; |
301 | #endif | 370 | #endif |
371 | } | ||
302 | 372 | ||
303 | UNPACK64(regs->nip, ptr); | 373 | return 0; |
304 | UNPACK64(regs->msr, ptr); | ||
305 | UNPACK32(regs->ccr, ptr); | ||
306 | UNPACK64(regs->link, ptr); | ||
307 | UNPACK64(regs->ctr, ptr); | ||
308 | UNPACK32(regs->xer, ptr); | ||
309 | |||
310 | BUG_ON((unsigned long)ptr > | ||
311 | (unsigned long)(((void *)gdb_regs) + NUMREGBYTES)); | ||
312 | } | 374 | } |
313 | 375 | ||
314 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) | 376 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) |
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c new file mode 100644 index 000000000000..b06bdae04064 --- /dev/null +++ b/arch/powerpc/kernel/kvm.c | |||
@@ -0,0 +1,596 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved. | ||
3 | * | ||
4 | * Authors: | ||
5 | * Alexander Graf <agraf@suse.de> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License, version 2, as | ||
9 | * published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/kvm_host.h> | ||
22 | #include <linux/init.h> | ||
23 | #include <linux/kvm_para.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/of.h> | ||
26 | |||
27 | #include <asm/reg.h> | ||
28 | #include <asm/sections.h> | ||
29 | #include <asm/cacheflush.h> | ||
30 | #include <asm/disassemble.h> | ||
31 | |||
32 | #define KVM_MAGIC_PAGE (-4096L) | ||
33 | #define magic_var(x) KVM_MAGIC_PAGE + offsetof(struct kvm_vcpu_arch_shared, x) | ||
34 | |||
35 | #define KVM_INST_LWZ 0x80000000 | ||
36 | #define KVM_INST_STW 0x90000000 | ||
37 | #define KVM_INST_LD 0xe8000000 | ||
38 | #define KVM_INST_STD 0xf8000000 | ||
39 | #define KVM_INST_NOP 0x60000000 | ||
40 | #define KVM_INST_B 0x48000000 | ||
41 | #define KVM_INST_B_MASK 0x03ffffff | ||
42 | #define KVM_INST_B_MAX 0x01ffffff | ||
43 | |||
44 | #define KVM_MASK_RT 0x03e00000 | ||
45 | #define KVM_RT_30 0x03c00000 | ||
46 | #define KVM_MASK_RB 0x0000f800 | ||
47 | #define KVM_INST_MFMSR 0x7c0000a6 | ||
48 | #define KVM_INST_MFSPR_SPRG0 0x7c1042a6 | ||
49 | #define KVM_INST_MFSPR_SPRG1 0x7c1142a6 | ||
50 | #define KVM_INST_MFSPR_SPRG2 0x7c1242a6 | ||
51 | #define KVM_INST_MFSPR_SPRG3 0x7c1342a6 | ||
52 | #define KVM_INST_MFSPR_SRR0 0x7c1a02a6 | ||
53 | #define KVM_INST_MFSPR_SRR1 0x7c1b02a6 | ||
54 | #define KVM_INST_MFSPR_DAR 0x7c1302a6 | ||
55 | #define KVM_INST_MFSPR_DSISR 0x7c1202a6 | ||
56 | |||
57 | #define KVM_INST_MTSPR_SPRG0 0x7c1043a6 | ||
58 | #define KVM_INST_MTSPR_SPRG1 0x7c1143a6 | ||
59 | #define KVM_INST_MTSPR_SPRG2 0x7c1243a6 | ||
60 | #define KVM_INST_MTSPR_SPRG3 0x7c1343a6 | ||
61 | #define KVM_INST_MTSPR_SRR0 0x7c1a03a6 | ||
62 | #define KVM_INST_MTSPR_SRR1 0x7c1b03a6 | ||
63 | #define KVM_INST_MTSPR_DAR 0x7c1303a6 | ||
64 | #define KVM_INST_MTSPR_DSISR 0x7c1203a6 | ||
65 | |||
66 | #define KVM_INST_TLBSYNC 0x7c00046c | ||
67 | #define KVM_INST_MTMSRD_L0 0x7c000164 | ||
68 | #define KVM_INST_MTMSRD_L1 0x7c010164 | ||
69 | #define KVM_INST_MTMSR 0x7c000124 | ||
70 | |||
71 | #define KVM_INST_WRTEEI_0 0x7c000146 | ||
72 | #define KVM_INST_WRTEEI_1 0x7c008146 | ||
73 | |||
74 | #define KVM_INST_MTSRIN 0x7c0001e4 | ||
75 | |||
76 | static bool kvm_patching_worked = true; | ||
77 | static char kvm_tmp[1024 * 1024]; | ||
78 | static int kvm_tmp_index; | ||
79 | |||
80 | static inline void kvm_patch_ins(u32 *inst, u32 new_inst) | ||
81 | { | ||
82 | *inst = new_inst; | ||
83 | flush_icache_range((ulong)inst, (ulong)inst + 4); | ||
84 | } | ||
85 | |||
86 | static void kvm_patch_ins_ll(u32 *inst, long addr, u32 rt) | ||
87 | { | ||
88 | #ifdef CONFIG_64BIT | ||
89 | kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); | ||
90 | #else | ||
91 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000fffc)); | ||
92 | #endif | ||
93 | } | ||
94 | |||
95 | static void kvm_patch_ins_ld(u32 *inst, long addr, u32 rt) | ||
96 | { | ||
97 | #ifdef CONFIG_64BIT | ||
98 | kvm_patch_ins(inst, KVM_INST_LD | rt | (addr & 0x0000fffc)); | ||
99 | #else | ||
100 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | ((addr + 4) & 0x0000fffc)); | ||
101 | #endif | ||
102 | } | ||
103 | |||
104 | static void kvm_patch_ins_lwz(u32 *inst, long addr, u32 rt) | ||
105 | { | ||
106 | kvm_patch_ins(inst, KVM_INST_LWZ | rt | (addr & 0x0000ffff)); | ||
107 | } | ||
108 | |||
109 | static void kvm_patch_ins_std(u32 *inst, long addr, u32 rt) | ||
110 | { | ||
111 | #ifdef CONFIG_64BIT | ||
112 | kvm_patch_ins(inst, KVM_INST_STD | rt | (addr & 0x0000fffc)); | ||
113 | #else | ||
114 | kvm_patch_ins(inst, KVM_INST_STW | rt | ((addr + 4) & 0x0000fffc)); | ||
115 | #endif | ||
116 | } | ||
117 | |||
118 | static void kvm_patch_ins_stw(u32 *inst, long addr, u32 rt) | ||
119 | { | ||
120 | kvm_patch_ins(inst, KVM_INST_STW | rt | (addr & 0x0000fffc)); | ||
121 | } | ||
122 | |||
123 | static void kvm_patch_ins_nop(u32 *inst) | ||
124 | { | ||
125 | kvm_patch_ins(inst, KVM_INST_NOP); | ||
126 | } | ||
127 | |||
128 | static void kvm_patch_ins_b(u32 *inst, int addr) | ||
129 | { | ||
130 | #if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S) | ||
131 | /* On relocatable kernels interrupts handlers and our code | ||
132 | can be in different regions, so we don't patch them */ | ||
133 | |||
134 | extern u32 __end_interrupts; | ||
135 | if ((ulong)inst < (ulong)&__end_interrupts) | ||
136 | return; | ||
137 | #endif | ||
138 | |||
139 | kvm_patch_ins(inst, KVM_INST_B | (addr & KVM_INST_B_MASK)); | ||
140 | } | ||
141 | |||
142 | static u32 *kvm_alloc(int len) | ||
143 | { | ||
144 | u32 *p; | ||
145 | |||
146 | if ((kvm_tmp_index + len) > ARRAY_SIZE(kvm_tmp)) { | ||
147 | printk(KERN_ERR "KVM: No more space (%d + %d)\n", | ||
148 | kvm_tmp_index, len); | ||
149 | kvm_patching_worked = false; | ||
150 | return NULL; | ||
151 | } | ||
152 | |||
153 | p = (void*)&kvm_tmp[kvm_tmp_index]; | ||
154 | kvm_tmp_index += len; | ||
155 | |||
156 | return p; | ||
157 | } | ||
158 | |||
159 | extern u32 kvm_emulate_mtmsrd_branch_offs; | ||
160 | extern u32 kvm_emulate_mtmsrd_reg_offs; | ||
161 | extern u32 kvm_emulate_mtmsrd_orig_ins_offs; | ||
162 | extern u32 kvm_emulate_mtmsrd_len; | ||
163 | extern u32 kvm_emulate_mtmsrd[]; | ||
164 | |||
165 | static void kvm_patch_ins_mtmsrd(u32 *inst, u32 rt) | ||
166 | { | ||
167 | u32 *p; | ||
168 | int distance_start; | ||
169 | int distance_end; | ||
170 | ulong next_inst; | ||
171 | |||
172 | p = kvm_alloc(kvm_emulate_mtmsrd_len * 4); | ||
173 | if (!p) | ||
174 | return; | ||
175 | |||
176 | /* Find out where we are and put everything there */ | ||
177 | distance_start = (ulong)p - (ulong)inst; | ||
178 | next_inst = ((ulong)inst + 4); | ||
179 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsrd_branch_offs]; | ||
180 | |||
181 | /* Make sure we only write valid b instructions */ | ||
182 | if (distance_start > KVM_INST_B_MAX) { | ||
183 | kvm_patching_worked = false; | ||
184 | return; | ||
185 | } | ||
186 | |||
187 | /* Modify the chunk to fit the invocation */ | ||
188 | memcpy(p, kvm_emulate_mtmsrd, kvm_emulate_mtmsrd_len * 4); | ||
189 | p[kvm_emulate_mtmsrd_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
190 | switch (get_rt(rt)) { | ||
191 | case 30: | ||
192 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], | ||
193 | magic_var(scratch2), KVM_RT_30); | ||
194 | break; | ||
195 | case 31: | ||
196 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsrd_reg_offs], | ||
197 | magic_var(scratch1), KVM_RT_30); | ||
198 | break; | ||
199 | default: | ||
200 | p[kvm_emulate_mtmsrd_reg_offs] |= rt; | ||
201 | break; | ||
202 | } | ||
203 | |||
204 | p[kvm_emulate_mtmsrd_orig_ins_offs] = *inst; | ||
205 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsrd_len * 4); | ||
206 | |||
207 | /* Patch the invocation */ | ||
208 | kvm_patch_ins_b(inst, distance_start); | ||
209 | } | ||
210 | |||
211 | extern u32 kvm_emulate_mtmsr_branch_offs; | ||
212 | extern u32 kvm_emulate_mtmsr_reg1_offs; | ||
213 | extern u32 kvm_emulate_mtmsr_reg2_offs; | ||
214 | extern u32 kvm_emulate_mtmsr_orig_ins_offs; | ||
215 | extern u32 kvm_emulate_mtmsr_len; | ||
216 | extern u32 kvm_emulate_mtmsr[]; | ||
217 | |||
218 | static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) | ||
219 | { | ||
220 | u32 *p; | ||
221 | int distance_start; | ||
222 | int distance_end; | ||
223 | ulong next_inst; | ||
224 | |||
225 | p = kvm_alloc(kvm_emulate_mtmsr_len * 4); | ||
226 | if (!p) | ||
227 | return; | ||
228 | |||
229 | /* Find out where we are and put everything there */ | ||
230 | distance_start = (ulong)p - (ulong)inst; | ||
231 | next_inst = ((ulong)inst + 4); | ||
232 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs]; | ||
233 | |||
234 | /* Make sure we only write valid b instructions */ | ||
235 | if (distance_start > KVM_INST_B_MAX) { | ||
236 | kvm_patching_worked = false; | ||
237 | return; | ||
238 | } | ||
239 | |||
240 | /* Modify the chunk to fit the invocation */ | ||
241 | memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); | ||
242 | p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
243 | |||
244 | /* Make clobbered registers work too */ | ||
245 | switch (get_rt(rt)) { | ||
246 | case 30: | ||
247 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], | ||
248 | magic_var(scratch2), KVM_RT_30); | ||
249 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], | ||
250 | magic_var(scratch2), KVM_RT_30); | ||
251 | break; | ||
252 | case 31: | ||
253 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], | ||
254 | magic_var(scratch1), KVM_RT_30); | ||
255 | kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], | ||
256 | magic_var(scratch1), KVM_RT_30); | ||
257 | break; | ||
258 | default: | ||
259 | p[kvm_emulate_mtmsr_reg1_offs] |= rt; | ||
260 | p[kvm_emulate_mtmsr_reg2_offs] |= rt; | ||
261 | break; | ||
262 | } | ||
263 | |||
264 | p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; | ||
265 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); | ||
266 | |||
267 | /* Patch the invocation */ | ||
268 | kvm_patch_ins_b(inst, distance_start); | ||
269 | } | ||
270 | |||
271 | #ifdef CONFIG_BOOKE | ||
272 | |||
273 | extern u32 kvm_emulate_wrteei_branch_offs; | ||
274 | extern u32 kvm_emulate_wrteei_ee_offs; | ||
275 | extern u32 kvm_emulate_wrteei_len; | ||
276 | extern u32 kvm_emulate_wrteei[]; | ||
277 | |||
278 | static void kvm_patch_ins_wrteei(u32 *inst) | ||
279 | { | ||
280 | u32 *p; | ||
281 | int distance_start; | ||
282 | int distance_end; | ||
283 | ulong next_inst; | ||
284 | |||
285 | p = kvm_alloc(kvm_emulate_wrteei_len * 4); | ||
286 | if (!p) | ||
287 | return; | ||
288 | |||
289 | /* Find out where we are and put everything there */ | ||
290 | distance_start = (ulong)p - (ulong)inst; | ||
291 | next_inst = ((ulong)inst + 4); | ||
292 | distance_end = next_inst - (ulong)&p[kvm_emulate_wrteei_branch_offs]; | ||
293 | |||
294 | /* Make sure we only write valid b instructions */ | ||
295 | if (distance_start > KVM_INST_B_MAX) { | ||
296 | kvm_patching_worked = false; | ||
297 | return; | ||
298 | } | ||
299 | |||
300 | /* Modify the chunk to fit the invocation */ | ||
301 | memcpy(p, kvm_emulate_wrteei, kvm_emulate_wrteei_len * 4); | ||
302 | p[kvm_emulate_wrteei_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
303 | p[kvm_emulate_wrteei_ee_offs] |= (*inst & MSR_EE); | ||
304 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrteei_len * 4); | ||
305 | |||
306 | /* Patch the invocation */ | ||
307 | kvm_patch_ins_b(inst, distance_start); | ||
308 | } | ||
309 | |||
310 | #endif | ||
311 | |||
312 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
313 | |||
314 | extern u32 kvm_emulate_mtsrin_branch_offs; | ||
315 | extern u32 kvm_emulate_mtsrin_reg1_offs; | ||
316 | extern u32 kvm_emulate_mtsrin_reg2_offs; | ||
317 | extern u32 kvm_emulate_mtsrin_orig_ins_offs; | ||
318 | extern u32 kvm_emulate_mtsrin_len; | ||
319 | extern u32 kvm_emulate_mtsrin[]; | ||
320 | |||
321 | static void kvm_patch_ins_mtsrin(u32 *inst, u32 rt, u32 rb) | ||
322 | { | ||
323 | u32 *p; | ||
324 | int distance_start; | ||
325 | int distance_end; | ||
326 | ulong next_inst; | ||
327 | |||
328 | p = kvm_alloc(kvm_emulate_mtsrin_len * 4); | ||
329 | if (!p) | ||
330 | return; | ||
331 | |||
332 | /* Find out where we are and put everything there */ | ||
333 | distance_start = (ulong)p - (ulong)inst; | ||
334 | next_inst = ((ulong)inst + 4); | ||
335 | distance_end = next_inst - (ulong)&p[kvm_emulate_mtsrin_branch_offs]; | ||
336 | |||
337 | /* Make sure we only write valid b instructions */ | ||
338 | if (distance_start > KVM_INST_B_MAX) { | ||
339 | kvm_patching_worked = false; | ||
340 | return; | ||
341 | } | ||
342 | |||
343 | /* Modify the chunk to fit the invocation */ | ||
344 | memcpy(p, kvm_emulate_mtsrin, kvm_emulate_mtsrin_len * 4); | ||
345 | p[kvm_emulate_mtsrin_branch_offs] |= distance_end & KVM_INST_B_MASK; | ||
346 | p[kvm_emulate_mtsrin_reg1_offs] |= (rb << 10); | ||
347 | p[kvm_emulate_mtsrin_reg2_offs] |= rt; | ||
348 | p[kvm_emulate_mtsrin_orig_ins_offs] = *inst; | ||
349 | flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtsrin_len * 4); | ||
350 | |||
351 | /* Patch the invocation */ | ||
352 | kvm_patch_ins_b(inst, distance_start); | ||
353 | } | ||
354 | |||
355 | #endif | ||
356 | |||
357 | static void kvm_map_magic_page(void *data) | ||
358 | { | ||
359 | u32 *features = data; | ||
360 | |||
361 | ulong in[8]; | ||
362 | ulong out[8]; | ||
363 | |||
364 | in[0] = KVM_MAGIC_PAGE; | ||
365 | in[1] = KVM_MAGIC_PAGE; | ||
366 | |||
367 | kvm_hypercall(in, out, HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE); | ||
368 | |||
369 | *features = out[0]; | ||
370 | } | ||
371 | |||
372 | static void kvm_check_ins(u32 *inst, u32 features) | ||
373 | { | ||
374 | u32 _inst = *inst; | ||
375 | u32 inst_no_rt = _inst & ~KVM_MASK_RT; | ||
376 | u32 inst_rt = _inst & KVM_MASK_RT; | ||
377 | |||
378 | switch (inst_no_rt) { | ||
379 | /* Loads */ | ||
380 | case KVM_INST_MFMSR: | ||
381 | kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); | ||
382 | break; | ||
383 | case KVM_INST_MFSPR_SPRG0: | ||
384 | kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); | ||
385 | break; | ||
386 | case KVM_INST_MFSPR_SPRG1: | ||
387 | kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); | ||
388 | break; | ||
389 | case KVM_INST_MFSPR_SPRG2: | ||
390 | kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); | ||
391 | break; | ||
392 | case KVM_INST_MFSPR_SPRG3: | ||
393 | kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); | ||
394 | break; | ||
395 | case KVM_INST_MFSPR_SRR0: | ||
396 | kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); | ||
397 | break; | ||
398 | case KVM_INST_MFSPR_SRR1: | ||
399 | kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); | ||
400 | break; | ||
401 | case KVM_INST_MFSPR_DAR: | ||
402 | kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); | ||
403 | break; | ||
404 | case KVM_INST_MFSPR_DSISR: | ||
405 | kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); | ||
406 | break; | ||
407 | |||
408 | /* Stores */ | ||
409 | case KVM_INST_MTSPR_SPRG0: | ||
410 | kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); | ||
411 | break; | ||
412 | case KVM_INST_MTSPR_SPRG1: | ||
413 | kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); | ||
414 | break; | ||
415 | case KVM_INST_MTSPR_SPRG2: | ||
416 | kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); | ||
417 | break; | ||
418 | case KVM_INST_MTSPR_SPRG3: | ||
419 | kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); | ||
420 | break; | ||
421 | case KVM_INST_MTSPR_SRR0: | ||
422 | kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); | ||
423 | break; | ||
424 | case KVM_INST_MTSPR_SRR1: | ||
425 | kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); | ||
426 | break; | ||
427 | case KVM_INST_MTSPR_DAR: | ||
428 | kvm_patch_ins_std(inst, magic_var(dar), inst_rt); | ||
429 | break; | ||
430 | case KVM_INST_MTSPR_DSISR: | ||
431 | kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); | ||
432 | break; | ||
433 | |||
434 | /* Nops */ | ||
435 | case KVM_INST_TLBSYNC: | ||
436 | kvm_patch_ins_nop(inst); | ||
437 | break; | ||
438 | |||
439 | /* Rewrites */ | ||
440 | case KVM_INST_MTMSRD_L1: | ||
441 | kvm_patch_ins_mtmsrd(inst, inst_rt); | ||
442 | break; | ||
443 | case KVM_INST_MTMSR: | ||
444 | case KVM_INST_MTMSRD_L0: | ||
445 | kvm_patch_ins_mtmsr(inst, inst_rt); | ||
446 | break; | ||
447 | } | ||
448 | |||
449 | switch (inst_no_rt & ~KVM_MASK_RB) { | ||
450 | #ifdef CONFIG_PPC_BOOK3S_32 | ||
451 | case KVM_INST_MTSRIN: | ||
452 | if (features & KVM_MAGIC_FEAT_SR) { | ||
453 | u32 inst_rb = _inst & KVM_MASK_RB; | ||
454 | kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb); | ||
455 | } | ||
456 | break; | ||
457 | break; | ||
458 | #endif | ||
459 | } | ||
460 | |||
461 | switch (_inst) { | ||
462 | #ifdef CONFIG_BOOKE | ||
463 | case KVM_INST_WRTEEI_0: | ||
464 | case KVM_INST_WRTEEI_1: | ||
465 | kvm_patch_ins_wrteei(inst); | ||
466 | break; | ||
467 | #endif | ||
468 | } | ||
469 | } | ||
470 | |||
471 | static void kvm_use_magic_page(void) | ||
472 | { | ||
473 | u32 *p; | ||
474 | u32 *start, *end; | ||
475 | u32 tmp; | ||
476 | u32 features; | ||
477 | |||
478 | /* Tell the host to map the magic page to -4096 on all CPUs */ | ||
479 | on_each_cpu(kvm_map_magic_page, &features, 1); | ||
480 | |||
481 | /* Quick self-test to see if the mapping works */ | ||
482 | if (__get_user(tmp, (u32*)KVM_MAGIC_PAGE)) { | ||
483 | kvm_patching_worked = false; | ||
484 | return; | ||
485 | } | ||
486 | |||
487 | /* Now loop through all code and find instructions */ | ||
488 | start = (void*)_stext; | ||
489 | end = (void*)_etext; | ||
490 | |||
491 | for (p = start; p < end; p++) | ||
492 | kvm_check_ins(p, features); | ||
493 | |||
494 | printk(KERN_INFO "KVM: Live patching for a fast VM %s\n", | ||
495 | kvm_patching_worked ? "worked" : "failed"); | ||
496 | } | ||
497 | |||
498 | unsigned long kvm_hypercall(unsigned long *in, | ||
499 | unsigned long *out, | ||
500 | unsigned long nr) | ||
501 | { | ||
502 | unsigned long register r0 asm("r0"); | ||
503 | unsigned long register r3 asm("r3") = in[0]; | ||
504 | unsigned long register r4 asm("r4") = in[1]; | ||
505 | unsigned long register r5 asm("r5") = in[2]; | ||
506 | unsigned long register r6 asm("r6") = in[3]; | ||
507 | unsigned long register r7 asm("r7") = in[4]; | ||
508 | unsigned long register r8 asm("r8") = in[5]; | ||
509 | unsigned long register r9 asm("r9") = in[6]; | ||
510 | unsigned long register r10 asm("r10") = in[7]; | ||
511 | unsigned long register r11 asm("r11") = nr; | ||
512 | unsigned long register r12 asm("r12"); | ||
513 | |||
514 | asm volatile("bl kvm_hypercall_start" | ||
515 | : "=r"(r0), "=r"(r3), "=r"(r4), "=r"(r5), "=r"(r6), | ||
516 | "=r"(r7), "=r"(r8), "=r"(r9), "=r"(r10), "=r"(r11), | ||
517 | "=r"(r12) | ||
518 | : "r"(r3), "r"(r4), "r"(r5), "r"(r6), "r"(r7), "r"(r8), | ||
519 | "r"(r9), "r"(r10), "r"(r11) | ||
520 | : "memory", "cc", "xer", "ctr", "lr"); | ||
521 | |||
522 | out[0] = r4; | ||
523 | out[1] = r5; | ||
524 | out[2] = r6; | ||
525 | out[3] = r7; | ||
526 | out[4] = r8; | ||
527 | out[5] = r9; | ||
528 | out[6] = r10; | ||
529 | out[7] = r11; | ||
530 | |||
531 | return r3; | ||
532 | } | ||
533 | EXPORT_SYMBOL_GPL(kvm_hypercall); | ||
534 | |||
535 | static int kvm_para_setup(void) | ||
536 | { | ||
537 | extern u32 kvm_hypercall_start; | ||
538 | struct device_node *hyper_node; | ||
539 | u32 *insts; | ||
540 | int len, i; | ||
541 | |||
542 | hyper_node = of_find_node_by_path("/hypervisor"); | ||
543 | if (!hyper_node) | ||
544 | return -1; | ||
545 | |||
546 | insts = (u32*)of_get_property(hyper_node, "hcall-instructions", &len); | ||
547 | if (len % 4) | ||
548 | return -1; | ||
549 | if (len > (4 * 4)) | ||
550 | return -1; | ||
551 | |||
552 | for (i = 0; i < (len / 4); i++) | ||
553 | kvm_patch_ins(&(&kvm_hypercall_start)[i], insts[i]); | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | static __init void kvm_free_tmp(void) | ||
559 | { | ||
560 | unsigned long start, end; | ||
561 | |||
562 | start = (ulong)&kvm_tmp[kvm_tmp_index + (PAGE_SIZE - 1)] & PAGE_MASK; | ||
563 | end = (ulong)&kvm_tmp[ARRAY_SIZE(kvm_tmp)] & PAGE_MASK; | ||
564 | |||
565 | /* Free the tmp space we don't need */ | ||
566 | for (; start < end; start += PAGE_SIZE) { | ||
567 | ClearPageReserved(virt_to_page(start)); | ||
568 | init_page_count(virt_to_page(start)); | ||
569 | free_page(start); | ||
570 | totalram_pages++; | ||
571 | } | ||
572 | } | ||
573 | |||
574 | static int __init kvm_guest_init(void) | ||
575 | { | ||
576 | if (!kvm_para_available()) | ||
577 | goto free_tmp; | ||
578 | |||
579 | if (kvm_para_setup()) | ||
580 | goto free_tmp; | ||
581 | |||
582 | if (kvm_para_has_feature(KVM_FEATURE_MAGIC_PAGE)) | ||
583 | kvm_use_magic_page(); | ||
584 | |||
585 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
586 | /* Enable napping */ | ||
587 | powersave_nap = 1; | ||
588 | #endif | ||
589 | |||
590 | free_tmp: | ||
591 | kvm_free_tmp(); | ||
592 | |||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | postcore_initcall(kvm_guest_init); | ||
diff --git a/arch/powerpc/kernel/kvm_emul.S b/arch/powerpc/kernel/kvm_emul.S new file mode 100644 index 000000000000..f2b1b2523e61 --- /dev/null +++ b/arch/powerpc/kernel/kvm_emul.S | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License, version 2, as | ||
4 | * published by the Free Software Foundation. | ||
5 | * | ||
6 | * This program is distributed in the hope that it will be useful, | ||
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
9 | * GNU General Public License for more details. | ||
10 | * | ||
11 | * You should have received a copy of the GNU General Public License | ||
12 | * along with this program; if not, write to the Free Software | ||
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | ||
14 | * | ||
15 | * Copyright SUSE Linux Products GmbH 2010 | ||
16 | * | ||
17 | * Authors: Alexander Graf <agraf@suse.de> | ||
18 | */ | ||
19 | |||
20 | #include <asm/ppc_asm.h> | ||
21 | #include <asm/kvm_asm.h> | ||
22 | #include <asm/reg.h> | ||
23 | #include <asm/page.h> | ||
24 | #include <asm/asm-offsets.h> | ||
25 | |||
26 | /* Hypercall entry point. Will be patched with device tree instructions. */ | ||
27 | |||
28 | .global kvm_hypercall_start | ||
29 | kvm_hypercall_start: | ||
30 | li r3, -1 | ||
31 | nop | ||
32 | nop | ||
33 | nop | ||
34 | blr | ||
35 | |||
36 | #define KVM_MAGIC_PAGE (-4096) | ||
37 | |||
38 | #ifdef CONFIG_64BIT | ||
39 | #define LL64(reg, offs, reg2) ld reg, (offs)(reg2) | ||
40 | #define STL64(reg, offs, reg2) std reg, (offs)(reg2) | ||
41 | #else | ||
42 | #define LL64(reg, offs, reg2) lwz reg, (offs + 4)(reg2) | ||
43 | #define STL64(reg, offs, reg2) stw reg, (offs + 4)(reg2) | ||
44 | #endif | ||
45 | |||
46 | #define SCRATCH_SAVE \ | ||
47 | /* Enable critical section. We are critical if \ | ||
48 | shared->critical == r1 */ \ | ||
49 | STL64(r1, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); \ | ||
50 | \ | ||
51 | /* Save state */ \ | ||
52 | PPC_STL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ | ||
53 | PPC_STL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ | ||
54 | mfcr r31; \ | ||
55 | stw r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); | ||
56 | |||
57 | #define SCRATCH_RESTORE \ | ||
58 | /* Restore state */ \ | ||
59 | PPC_LL r31, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH1)(0); \ | ||
60 | lwz r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH3)(0); \ | ||
61 | mtcr r30; \ | ||
62 | PPC_LL r30, (KVM_MAGIC_PAGE + KVM_MAGIC_SCRATCH2)(0); \ | ||
63 | \ | ||
64 | /* Disable critical section. We are critical if \ | ||
65 | shared->critical == r1 and r2 is always != r1 */ \ | ||
66 | STL64(r2, KVM_MAGIC_PAGE + KVM_MAGIC_CRITICAL, 0); | ||
67 | |||
68 | .global kvm_emulate_mtmsrd | ||
69 | kvm_emulate_mtmsrd: | ||
70 | |||
71 | SCRATCH_SAVE | ||
72 | |||
73 | /* Put MSR & ~(MSR_EE|MSR_RI) in r31 */ | ||
74 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
75 | lis r30, (~(MSR_EE | MSR_RI))@h | ||
76 | ori r30, r30, (~(MSR_EE | MSR_RI))@l | ||
77 | and r31, r31, r30 | ||
78 | |||
79 | /* OR the register's (MSR_EE|MSR_RI) on MSR */ | ||
80 | kvm_emulate_mtmsrd_reg: | ||
81 | ori r30, r0, 0 | ||
82 | andi. r30, r30, (MSR_EE|MSR_RI) | ||
83 | or r31, r31, r30 | ||
84 | |||
85 | /* Put MSR back into magic page */ | ||
86 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
87 | |||
88 | /* Check if we have to fetch an interrupt */ | ||
89 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | ||
90 | cmpwi r31, 0 | ||
91 | beq+ no_check | ||
92 | |||
93 | /* Check if we may trigger an interrupt */ | ||
94 | andi. r30, r30, MSR_EE | ||
95 | beq no_check | ||
96 | |||
97 | SCRATCH_RESTORE | ||
98 | |||
99 | /* Nag hypervisor */ | ||
100 | kvm_emulate_mtmsrd_orig_ins: | ||
101 | tlbsync | ||
102 | |||
103 | b kvm_emulate_mtmsrd_branch | ||
104 | |||
105 | no_check: | ||
106 | |||
107 | SCRATCH_RESTORE | ||
108 | |||
109 | /* Go back to caller */ | ||
110 | kvm_emulate_mtmsrd_branch: | ||
111 | b . | ||
112 | kvm_emulate_mtmsrd_end: | ||
113 | |||
114 | .global kvm_emulate_mtmsrd_branch_offs | ||
115 | kvm_emulate_mtmsrd_branch_offs: | ||
116 | .long (kvm_emulate_mtmsrd_branch - kvm_emulate_mtmsrd) / 4 | ||
117 | |||
118 | .global kvm_emulate_mtmsrd_reg_offs | ||
119 | kvm_emulate_mtmsrd_reg_offs: | ||
120 | .long (kvm_emulate_mtmsrd_reg - kvm_emulate_mtmsrd) / 4 | ||
121 | |||
122 | .global kvm_emulate_mtmsrd_orig_ins_offs | ||
123 | kvm_emulate_mtmsrd_orig_ins_offs: | ||
124 | .long (kvm_emulate_mtmsrd_orig_ins - kvm_emulate_mtmsrd) / 4 | ||
125 | |||
126 | .global kvm_emulate_mtmsrd_len | ||
127 | kvm_emulate_mtmsrd_len: | ||
128 | .long (kvm_emulate_mtmsrd_end - kvm_emulate_mtmsrd) / 4 | ||
129 | |||
130 | |||
131 | #define MSR_SAFE_BITS (MSR_EE | MSR_CE | MSR_ME | MSR_RI) | ||
132 | #define MSR_CRITICAL_BITS ~MSR_SAFE_BITS | ||
133 | |||
134 | .global kvm_emulate_mtmsr | ||
135 | kvm_emulate_mtmsr: | ||
136 | |||
137 | SCRATCH_SAVE | ||
138 | |||
139 | /* Fetch old MSR in r31 */ | ||
140 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
141 | |||
142 | /* Find the changed bits between old and new MSR */ | ||
143 | kvm_emulate_mtmsr_reg1: | ||
144 | ori r30, r0, 0 | ||
145 | xor r31, r30, r31 | ||
146 | |||
147 | /* Check if we need to really do mtmsr */ | ||
148 | LOAD_REG_IMMEDIATE(r30, MSR_CRITICAL_BITS) | ||
149 | and. r31, r31, r30 | ||
150 | |||
151 | /* No critical bits changed? Maybe we can stay in the guest. */ | ||
152 | beq maybe_stay_in_guest | ||
153 | |||
154 | do_mtmsr: | ||
155 | |||
156 | SCRATCH_RESTORE | ||
157 | |||
158 | /* Just fire off the mtmsr if it's critical */ | ||
159 | kvm_emulate_mtmsr_orig_ins: | ||
160 | mtmsr r0 | ||
161 | |||
162 | b kvm_emulate_mtmsr_branch | ||
163 | |||
164 | maybe_stay_in_guest: | ||
165 | |||
166 | /* Get the target register in r30 */ | ||
167 | kvm_emulate_mtmsr_reg2: | ||
168 | ori r30, r0, 0 | ||
169 | |||
170 | /* Check if we have to fetch an interrupt */ | ||
171 | lwz r31, (KVM_MAGIC_PAGE + KVM_MAGIC_INT)(0) | ||
172 | cmpwi r31, 0 | ||
173 | beq+ no_mtmsr | ||
174 | |||
175 | /* Check if we may trigger an interrupt */ | ||
176 | andi. r31, r30, MSR_EE | ||
177 | beq no_mtmsr | ||
178 | |||
179 | b do_mtmsr | ||
180 | |||
181 | no_mtmsr: | ||
182 | |||
183 | /* Put MSR into magic page because we don't call mtmsr */ | ||
184 | STL64(r30, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
185 | |||
186 | SCRATCH_RESTORE | ||
187 | |||
188 | /* Go back to caller */ | ||
189 | kvm_emulate_mtmsr_branch: | ||
190 | b . | ||
191 | kvm_emulate_mtmsr_end: | ||
192 | |||
193 | .global kvm_emulate_mtmsr_branch_offs | ||
194 | kvm_emulate_mtmsr_branch_offs: | ||
195 | .long (kvm_emulate_mtmsr_branch - kvm_emulate_mtmsr) / 4 | ||
196 | |||
197 | .global kvm_emulate_mtmsr_reg1_offs | ||
198 | kvm_emulate_mtmsr_reg1_offs: | ||
199 | .long (kvm_emulate_mtmsr_reg1 - kvm_emulate_mtmsr) / 4 | ||
200 | |||
201 | .global kvm_emulate_mtmsr_reg2_offs | ||
202 | kvm_emulate_mtmsr_reg2_offs: | ||
203 | .long (kvm_emulate_mtmsr_reg2 - kvm_emulate_mtmsr) / 4 | ||
204 | |||
205 | .global kvm_emulate_mtmsr_orig_ins_offs | ||
206 | kvm_emulate_mtmsr_orig_ins_offs: | ||
207 | .long (kvm_emulate_mtmsr_orig_ins - kvm_emulate_mtmsr) / 4 | ||
208 | |||
209 | .global kvm_emulate_mtmsr_len | ||
210 | kvm_emulate_mtmsr_len: | ||
211 | .long (kvm_emulate_mtmsr_end - kvm_emulate_mtmsr) / 4 | ||
212 | |||
213 | |||
214 | |||
215 | .global kvm_emulate_wrteei | ||
216 | kvm_emulate_wrteei: | ||
217 | |||
218 | SCRATCH_SAVE | ||
219 | |||
220 | /* Fetch old MSR in r31 */ | ||
221 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
222 | |||
223 | /* Remove MSR_EE from old MSR */ | ||
224 | li r30, 0 | ||
225 | ori r30, r30, MSR_EE | ||
226 | andc r31, r31, r30 | ||
227 | |||
228 | /* OR new MSR_EE onto the old MSR */ | ||
229 | kvm_emulate_wrteei_ee: | ||
230 | ori r31, r31, 0 | ||
231 | |||
232 | /* Write new MSR value back */ | ||
233 | STL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
234 | |||
235 | SCRATCH_RESTORE | ||
236 | |||
237 | /* Go back to caller */ | ||
238 | kvm_emulate_wrteei_branch: | ||
239 | b . | ||
240 | kvm_emulate_wrteei_end: | ||
241 | |||
242 | .global kvm_emulate_wrteei_branch_offs | ||
243 | kvm_emulate_wrteei_branch_offs: | ||
244 | .long (kvm_emulate_wrteei_branch - kvm_emulate_wrteei) / 4 | ||
245 | |||
246 | .global kvm_emulate_wrteei_ee_offs | ||
247 | kvm_emulate_wrteei_ee_offs: | ||
248 | .long (kvm_emulate_wrteei_ee - kvm_emulate_wrteei) / 4 | ||
249 | |||
250 | .global kvm_emulate_wrteei_len | ||
251 | kvm_emulate_wrteei_len: | ||
252 | .long (kvm_emulate_wrteei_end - kvm_emulate_wrteei) / 4 | ||
253 | |||
254 | |||
255 | .global kvm_emulate_mtsrin | ||
256 | kvm_emulate_mtsrin: | ||
257 | |||
258 | SCRATCH_SAVE | ||
259 | |||
260 | LL64(r31, KVM_MAGIC_PAGE + KVM_MAGIC_MSR, 0) | ||
261 | andi. r31, r31, MSR_DR | MSR_IR | ||
262 | beq kvm_emulate_mtsrin_reg1 | ||
263 | |||
264 | SCRATCH_RESTORE | ||
265 | |||
266 | kvm_emulate_mtsrin_orig_ins: | ||
267 | nop | ||
268 | b kvm_emulate_mtsrin_branch | ||
269 | |||
270 | kvm_emulate_mtsrin_reg1: | ||
271 | /* rX >> 26 */ | ||
272 | rlwinm r30,r0,6,26,29 | ||
273 | |||
274 | kvm_emulate_mtsrin_reg2: | ||
275 | stw r0, (KVM_MAGIC_PAGE + KVM_MAGIC_SR)(r30) | ||
276 | |||
277 | SCRATCH_RESTORE | ||
278 | |||
279 | /* Go back to caller */ | ||
280 | kvm_emulate_mtsrin_branch: | ||
281 | b . | ||
282 | kvm_emulate_mtsrin_end: | ||
283 | |||
284 | .global kvm_emulate_mtsrin_branch_offs | ||
285 | kvm_emulate_mtsrin_branch_offs: | ||
286 | .long (kvm_emulate_mtsrin_branch - kvm_emulate_mtsrin) / 4 | ||
287 | |||
288 | .global kvm_emulate_mtsrin_reg1_offs | ||
289 | kvm_emulate_mtsrin_reg1_offs: | ||
290 | .long (kvm_emulate_mtsrin_reg1 - kvm_emulate_mtsrin) / 4 | ||
291 | |||
292 | .global kvm_emulate_mtsrin_reg2_offs | ||
293 | kvm_emulate_mtsrin_reg2_offs: | ||
294 | .long (kvm_emulate_mtsrin_reg2 - kvm_emulate_mtsrin) / 4 | ||
295 | |||
296 | .global kvm_emulate_mtsrin_orig_ins_offs | ||
297 | kvm_emulate_mtsrin_orig_ins_offs: | ||
298 | .long (kvm_emulate_mtsrin_orig_ins - kvm_emulate_mtsrin) / 4 | ||
299 | |||
300 | .global kvm_emulate_mtsrin_len | ||
301 | kvm_emulate_mtsrin_len: | ||
302 | .long (kvm_emulate_mtsrin_end - kvm_emulate_mtsrin) / 4 | ||
diff --git a/arch/powerpc/kernel/l2cr_6xx.S b/arch/powerpc/kernel/l2cr_6xx.S index 2a2f3c3f6d80..97ec8557f974 100644 --- a/arch/powerpc/kernel/l2cr_6xx.S +++ b/arch/powerpc/kernel/l2cr_6xx.S | |||
@@ -151,7 +151,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
151 | /**** Might be a good idea to set L2DO here - to prevent instructions | 151 | /**** Might be a good idea to set L2DO here - to prevent instructions |
152 | from getting into the cache. But since we invalidate | 152 | from getting into the cache. But since we invalidate |
153 | the next time we enable the cache it doesn't really matter. | 153 | the next time we enable the cache it doesn't really matter. |
154 | Don't do this unless you accomodate all processor variations. | 154 | Don't do this unless you accommodate all processor variations. |
155 | The bit moved on the 7450..... | 155 | The bit moved on the 7450..... |
156 | ****/ | 156 | ****/ |
157 | 157 | ||
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c index c1fd0f9658fd..2b97b80d6d7d 100644 --- a/arch/powerpc/kernel/legacy_serial.c +++ b/arch/powerpc/kernel/legacy_serial.c | |||
@@ -52,14 +52,14 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
52 | phys_addr_t taddr, unsigned long irq, | 52 | phys_addr_t taddr, unsigned long irq, |
53 | upf_t flags, int irq_check_parent) | 53 | upf_t flags, int irq_check_parent) |
54 | { | 54 | { |
55 | const u32 *clk, *spd; | 55 | const __be32 *clk, *spd; |
56 | u32 clock = BASE_BAUD * 16; | 56 | u32 clock = BASE_BAUD * 16; |
57 | int index; | 57 | int index; |
58 | 58 | ||
59 | /* get clock freq. if present */ | 59 | /* get clock freq. if present */ |
60 | clk = of_get_property(np, "clock-frequency", NULL); | 60 | clk = of_get_property(np, "clock-frequency", NULL); |
61 | if (clk && *clk) | 61 | if (clk && *clk) |
62 | clock = *clk; | 62 | clock = be32_to_cpup(clk); |
63 | 63 | ||
64 | /* get default speed if present */ | 64 | /* get default speed if present */ |
65 | spd = of_get_property(np, "current-speed", NULL); | 65 | spd = of_get_property(np, "current-speed", NULL); |
@@ -109,7 +109,7 @@ static int __init add_legacy_port(struct device_node *np, int want_index, | |||
109 | legacy_serial_infos[index].taddr = taddr; | 109 | legacy_serial_infos[index].taddr = taddr; |
110 | legacy_serial_infos[index].np = of_node_get(np); | 110 | legacy_serial_infos[index].np = of_node_get(np); |
111 | legacy_serial_infos[index].clock = clock; | 111 | legacy_serial_infos[index].clock = clock; |
112 | legacy_serial_infos[index].speed = spd ? *spd : 0; | 112 | legacy_serial_infos[index].speed = spd ? be32_to_cpup(spd) : 0; |
113 | legacy_serial_infos[index].irq_check_parent = irq_check_parent; | 113 | legacy_serial_infos[index].irq_check_parent = irq_check_parent; |
114 | 114 | ||
115 | printk(KERN_DEBUG "Found legacy serial port %d for %s\n", | 115 | printk(KERN_DEBUG "Found legacy serial port %d for %s\n", |
@@ -168,7 +168,7 @@ static int __init add_legacy_soc_port(struct device_node *np, | |||
168 | static int __init add_legacy_isa_port(struct device_node *np, | 168 | static int __init add_legacy_isa_port(struct device_node *np, |
169 | struct device_node *isa_brg) | 169 | struct device_node *isa_brg) |
170 | { | 170 | { |
171 | const u32 *reg; | 171 | const __be32 *reg; |
172 | const char *typep; | 172 | const char *typep; |
173 | int index = -1; | 173 | int index = -1; |
174 | u64 taddr; | 174 | u64 taddr; |
@@ -181,7 +181,7 @@ static int __init add_legacy_isa_port(struct device_node *np, | |||
181 | return -1; | 181 | return -1; |
182 | 182 | ||
183 | /* Verify it's an IO port, we don't support anything else */ | 183 | /* Verify it's an IO port, we don't support anything else */ |
184 | if (!(reg[0] & 0x00000001)) | 184 | if (!(be32_to_cpu(reg[0]) & 0x00000001)) |
185 | return -1; | 185 | return -1; |
186 | 186 | ||
187 | /* Now look for an "ibm,aix-loc" property that gives us ordering | 187 | /* Now look for an "ibm,aix-loc" property that gives us ordering |
@@ -202,7 +202,7 @@ static int __init add_legacy_isa_port(struct device_node *np, | |||
202 | taddr = 0; | 202 | taddr = 0; |
203 | 203 | ||
204 | /* Add port, irq will be dealt with later */ | 204 | /* Add port, irq will be dealt with later */ |
205 | return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, | 205 | return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr, |
206 | NO_IRQ, UPF_BOOT_AUTOCONF, 0); | 206 | NO_IRQ, UPF_BOOT_AUTOCONF, 0); |
207 | 207 | ||
208 | } | 208 | } |
@@ -251,9 +251,9 @@ static int __init add_legacy_pci_port(struct device_node *np, | |||
251 | * we get to their "reg" property | 251 | * we get to their "reg" property |
252 | */ | 252 | */ |
253 | if (np != pci_dev) { | 253 | if (np != pci_dev) { |
254 | const u32 *reg = of_get_property(np, "reg", NULL); | 254 | const __be32 *reg = of_get_property(np, "reg", NULL); |
255 | if (reg && (*reg < 4)) | 255 | if (reg && (be32_to_cpup(reg) < 4)) |
256 | index = lindex = *reg; | 256 | index = lindex = be32_to_cpup(reg); |
257 | } | 257 | } |
258 | 258 | ||
259 | /* Local index means it's the Nth port in the PCI chip. Unfortunately | 259 | /* Local index means it's the Nth port in the PCI chip. Unfortunately |
@@ -330,9 +330,11 @@ void __init find_legacy_serial_ports(void) | |||
330 | if (!parent) | 330 | if (!parent) |
331 | continue; | 331 | continue; |
332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { | 332 | if (of_match_node(legacy_serial_parents, parent) != NULL) { |
333 | index = add_legacy_soc_port(np, np); | 333 | if (of_device_is_available(np)) { |
334 | if (index >= 0 && np == stdout) | 334 | index = add_legacy_soc_port(np, np); |
335 | legacy_serial_console = index; | 335 | if (index >= 0 && np == stdout) |
336 | legacy_serial_console = index; | ||
337 | } | ||
336 | } | 338 | } |
337 | of_node_put(parent); | 339 | of_node_put(parent); |
338 | } | 340 | } |
@@ -507,7 +509,7 @@ static int __init check_legacy_serial_console(void) | |||
507 | struct device_node *prom_stdout = NULL; | 509 | struct device_node *prom_stdout = NULL; |
508 | int i, speed = 0, offset = 0; | 510 | int i, speed = 0, offset = 0; |
509 | const char *name; | 511 | const char *name; |
510 | const u32 *spd; | 512 | const __be32 *spd; |
511 | 513 | ||
512 | DBG(" -> check_legacy_serial_console()\n"); | 514 | DBG(" -> check_legacy_serial_console()\n"); |
513 | 515 | ||
@@ -547,7 +549,7 @@ static int __init check_legacy_serial_console(void) | |||
547 | } | 549 | } |
548 | spd = of_get_property(prom_stdout, "current-speed", NULL); | 550 | spd = of_get_property(prom_stdout, "current-speed", NULL); |
549 | if (spd) | 551 | if (spd) |
550 | speed = *spd; | 552 | speed = be32_to_cpup(spd); |
551 | 553 | ||
552 | if (strcmp(name, "serial") != 0) | 554 | if (strcmp(name, "serial") != 0) |
553 | goto not_found; | 555 | goto not_found; |
diff --git a/arch/powerpc/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c index 50362b6ef6e9..84daabe2fcba 100644 --- a/arch/powerpc/kernel/lparcfg.c +++ b/arch/powerpc/kernel/lparcfg.c | |||
@@ -56,7 +56,7 @@ static unsigned long get_purr(void) | |||
56 | 56 | ||
57 | for_each_possible_cpu(cpu) { | 57 | for_each_possible_cpu(cpu) { |
58 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 58 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
59 | sum_purr += lppaca[cpu].emulated_time_base; | 59 | sum_purr += lppaca_of(cpu).emulated_time_base; |
60 | else { | 60 | else { |
61 | struct cpu_usage *cu; | 61 | struct cpu_usage *cu; |
62 | 62 | ||
@@ -132,34 +132,6 @@ static int iseries_lparcfg_data(struct seq_file *m, void *v) | |||
132 | /* | 132 | /* |
133 | * Methods used to fetch LPAR data when running on a pSeries platform. | 133 | * Methods used to fetch LPAR data when running on a pSeries platform. |
134 | */ | 134 | */ |
135 | /** | ||
136 | * h_get_mpp | ||
137 | * H_GET_MPP hcall returns info in 7 parms | ||
138 | */ | ||
139 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | ||
140 | { | ||
141 | int rc; | ||
142 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
143 | |||
144 | rc = plpar_hcall9(H_GET_MPP, retbuf); | ||
145 | |||
146 | mpp_data->entitled_mem = retbuf[0]; | ||
147 | mpp_data->mapped_mem = retbuf[1]; | ||
148 | |||
149 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | ||
150 | mpp_data->pool_num = retbuf[2] & 0xffff; | ||
151 | |||
152 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | ||
153 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | ||
154 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; | ||
155 | |||
156 | mpp_data->pool_size = retbuf[4]; | ||
157 | mpp_data->loan_request = retbuf[5]; | ||
158 | mpp_data->backing_mem = retbuf[6]; | ||
159 | |||
160 | return rc; | ||
161 | } | ||
162 | EXPORT_SYMBOL(h_get_mpp); | ||
163 | 135 | ||
164 | struct hvcall_ppp_data { | 136 | struct hvcall_ppp_data { |
165 | u64 entitlement; | 137 | u64 entitlement; |
@@ -262,8 +234,8 @@ static void parse_ppp_data(struct seq_file *m) | |||
262 | seq_printf(m, "system_active_processors=%d\n", | 234 | seq_printf(m, "system_active_processors=%d\n", |
263 | ppp_data.active_system_procs); | 235 | ppp_data.active_system_procs); |
264 | 236 | ||
265 | /* pool related entries are apropriate for shared configs */ | 237 | /* pool related entries are appropriate for shared configs */ |
266 | if (lppaca[0].shared_proc) { | 238 | if (lppaca_of(0).shared_proc) { |
267 | unsigned long pool_idle_time, pool_procs; | 239 | unsigned long pool_idle_time, pool_procs; |
268 | 240 | ||
269 | seq_printf(m, "pool=%d\n", ppp_data.pool_num); | 241 | seq_printf(m, "pool=%d\n", ppp_data.pool_num); |
@@ -345,6 +317,30 @@ static void parse_mpp_data(struct seq_file *m) | |||
345 | seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem); | 317 | seq_printf(m, "backing_memory=%ld bytes\n", mpp_data.backing_mem); |
346 | } | 318 | } |
347 | 319 | ||
320 | /** | ||
321 | * parse_mpp_x_data | ||
322 | * Parse out data returned from h_get_mpp_x | ||
323 | */ | ||
324 | static void parse_mpp_x_data(struct seq_file *m) | ||
325 | { | ||
326 | struct hvcall_mpp_x_data mpp_x_data; | ||
327 | |||
328 | if (!firmware_has_feature(FW_FEATURE_XCMO)) | ||
329 | return; | ||
330 | if (h_get_mpp_x(&mpp_x_data)) | ||
331 | return; | ||
332 | |||
333 | seq_printf(m, "coalesced_bytes=%ld\n", mpp_x_data.coalesced_bytes); | ||
334 | |||
335 | if (mpp_x_data.pool_coalesced_bytes) | ||
336 | seq_printf(m, "pool_coalesced_bytes=%ld\n", | ||
337 | mpp_x_data.pool_coalesced_bytes); | ||
338 | if (mpp_x_data.pool_purr_cycles) | ||
339 | seq_printf(m, "coalesce_pool_purr=%ld\n", mpp_x_data.pool_purr_cycles); | ||
340 | if (mpp_x_data.pool_spurr_cycles) | ||
341 | seq_printf(m, "coalesce_pool_spurr=%ld\n", mpp_x_data.pool_spurr_cycles); | ||
342 | } | ||
343 | |||
348 | #define SPLPAR_CHARACTERISTICS_TOKEN 20 | 344 | #define SPLPAR_CHARACTERISTICS_TOKEN 20 |
349 | #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) | 345 | #define SPLPAR_MAXLENGTH 1026*(sizeof(char)) |
350 | 346 | ||
@@ -460,8 +456,8 @@ static void pseries_cmo_data(struct seq_file *m) | |||
460 | return; | 456 | return; |
461 | 457 | ||
462 | for_each_possible_cpu(cpu) { | 458 | for_each_possible_cpu(cpu) { |
463 | cmo_faults += lppaca[cpu].cmo_faults; | 459 | cmo_faults += lppaca_of(cpu).cmo_faults; |
464 | cmo_fault_time += lppaca[cpu].cmo_fault_time; | 460 | cmo_fault_time += lppaca_of(cpu).cmo_fault_time; |
465 | } | 461 | } |
466 | 462 | ||
467 | seq_printf(m, "cmo_faults=%lu\n", cmo_faults); | 463 | seq_printf(m, "cmo_faults=%lu\n", cmo_faults); |
@@ -479,8 +475,8 @@ static void splpar_dispatch_data(struct seq_file *m) | |||
479 | unsigned long dispatch_dispersions = 0; | 475 | unsigned long dispatch_dispersions = 0; |
480 | 476 | ||
481 | for_each_possible_cpu(cpu) { | 477 | for_each_possible_cpu(cpu) { |
482 | dispatches += lppaca[cpu].yield_count; | 478 | dispatches += lppaca_of(cpu).yield_count; |
483 | dispatch_dispersions += lppaca[cpu].dispersion_count; | 479 | dispatch_dispersions += lppaca_of(cpu).dispersion_count; |
484 | } | 480 | } |
485 | 481 | ||
486 | seq_printf(m, "dispatches=%lu\n", dispatches); | 482 | seq_printf(m, "dispatches=%lu\n", dispatches); |
@@ -520,6 +516,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
520 | parse_system_parameter_string(m); | 516 | parse_system_parameter_string(m); |
521 | parse_ppp_data(m); | 517 | parse_ppp_data(m); |
522 | parse_mpp_data(m); | 518 | parse_mpp_data(m); |
519 | parse_mpp_x_data(m); | ||
523 | pseries_cmo_data(m); | 520 | pseries_cmo_data(m); |
524 | splpar_dispatch_data(m); | 521 | splpar_dispatch_data(m); |
525 | 522 | ||
@@ -545,7 +542,7 @@ static int pseries_lparcfg_data(struct seq_file *m, void *v) | |||
545 | seq_printf(m, "partition_potential_processors=%d\n", | 542 | seq_printf(m, "partition_potential_processors=%d\n", |
546 | partition_potential_processors); | 543 | partition_potential_processors); |
547 | 544 | ||
548 | seq_printf(m, "shared_processor_mode=%d\n", lppaca[0].shared_proc); | 545 | seq_printf(m, "shared_processor_mode=%d\n", lppaca_of(0).shared_proc); |
549 | 546 | ||
550 | seq_printf(m, "slb_size=%d\n", mmu_slb_size); | 547 | seq_printf(m, "slb_size=%d\n", mmu_slb_size); |
551 | 548 | ||
@@ -780,6 +777,7 @@ static const struct file_operations lparcfg_fops = { | |||
780 | .write = lparcfg_write, | 777 | .write = lparcfg_write, |
781 | .open = lparcfg_open, | 778 | .open = lparcfg_open, |
782 | .release = single_release, | 779 | .release = single_release, |
780 | .llseek = seq_lseek, | ||
783 | }; | 781 | }; |
784 | 782 | ||
785 | static int __init lparcfg_init(void) | 783 | static int __init lparcfg_init(void) |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index dd6c141f1662..7ee50f0547cb 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -14,16 +14,41 @@ | |||
14 | #include <linux/threads.h> | 14 | #include <linux/threads.h> |
15 | #include <linux/memblock.h> | 15 | #include <linux/memblock.h> |
16 | #include <linux/of.h> | 16 | #include <linux/of.h> |
17 | #include <linux/irq.h> | ||
18 | #include <linux/ftrace.h> | ||
19 | |||
17 | #include <asm/machdep.h> | 20 | #include <asm/machdep.h> |
18 | #include <asm/prom.h> | 21 | #include <asm/prom.h> |
19 | #include <asm/sections.h> | 22 | #include <asm/sections.h> |
20 | 23 | ||
24 | void machine_kexec_mask_interrupts(void) { | ||
25 | unsigned int i; | ||
26 | |||
27 | for_each_irq(i) { | ||
28 | struct irq_desc *desc = irq_to_desc(i); | ||
29 | struct irq_chip *chip; | ||
30 | |||
31 | if (!desc) | ||
32 | continue; | ||
33 | |||
34 | chip = irq_desc_get_chip(desc); | ||
35 | if (!chip) | ||
36 | continue; | ||
37 | |||
38 | if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data)) | ||
39 | chip->irq_eoi(&desc->irq_data); | ||
40 | |||
41 | if (chip->irq_mask) | ||
42 | chip->irq_mask(&desc->irq_data); | ||
43 | |||
44 | if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data)) | ||
45 | chip->irq_disable(&desc->irq_data); | ||
46 | } | ||
47 | } | ||
48 | |||
21 | void machine_crash_shutdown(struct pt_regs *regs) | 49 | void machine_crash_shutdown(struct pt_regs *regs) |
22 | { | 50 | { |
23 | if (ppc_md.machine_crash_shutdown) | 51 | default_machine_crash_shutdown(regs); |
24 | ppc_md.machine_crash_shutdown(regs); | ||
25 | else | ||
26 | default_machine_crash_shutdown(regs); | ||
27 | } | 52 | } |
28 | 53 | ||
29 | /* | 54 | /* |
@@ -41,8 +66,6 @@ int machine_kexec_prepare(struct kimage *image) | |||
41 | 66 | ||
42 | void machine_kexec_cleanup(struct kimage *image) | 67 | void machine_kexec_cleanup(struct kimage *image) |
43 | { | 68 | { |
44 | if (ppc_md.machine_kexec_cleanup) | ||
45 | ppc_md.machine_kexec_cleanup(image); | ||
46 | } | 69 | } |
47 | 70 | ||
48 | void arch_crash_save_vmcoreinfo(void) | 71 | void arch_crash_save_vmcoreinfo(void) |
@@ -63,11 +86,17 @@ void arch_crash_save_vmcoreinfo(void) | |||
63 | */ | 86 | */ |
64 | void machine_kexec(struct kimage *image) | 87 | void machine_kexec(struct kimage *image) |
65 | { | 88 | { |
89 | int save_ftrace_enabled; | ||
90 | |||
91 | save_ftrace_enabled = __ftrace_enabled_save(); | ||
92 | |||
66 | if (ppc_md.machine_kexec) | 93 | if (ppc_md.machine_kexec) |
67 | ppc_md.machine_kexec(image); | 94 | ppc_md.machine_kexec(image); |
68 | else | 95 | else |
69 | default_machine_kexec(image); | 96 | default_machine_kexec(image); |
70 | 97 | ||
98 | __ftrace_enabled_restore(save_ftrace_enabled); | ||
99 | |||
71 | /* Fall back to normal restart if we're still alive. */ | 100 | /* Fall back to normal restart if we're still alive. */ |
72 | machine_restart(NULL); | 101 | machine_restart(NULL); |
73 | for(;;); | 102 | for(;;); |
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c index ae63a964b858..e63f2e7d2efb 100644 --- a/arch/powerpc/kernel/machine_kexec_32.c +++ b/arch/powerpc/kernel/machine_kexec_32.c | |||
@@ -39,6 +39,10 @@ void default_machine_kexec(struct kimage *image) | |||
39 | /* Interrupts aren't acceptable while we reboot */ | 39 | /* Interrupts aren't acceptable while we reboot */ |
40 | local_irq_disable(); | 40 | local_irq_disable(); |
41 | 41 | ||
42 | /* mask each interrupt so we are in a more sane state for the | ||
43 | * kexec kernel */ | ||
44 | machine_kexec_mask_interrupts(); | ||
45 | |||
42 | page_list = image->head; | 46 | page_list = image->head; |
43 | 47 | ||
44 | /* we need both effective and real address here */ | 48 | /* we need both effective and real address here */ |
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S index 2d29752cbe16..b69463ec2010 100644 --- a/arch/powerpc/kernel/misc.S +++ b/arch/powerpc/kernel/misc.S | |||
@@ -122,8 +122,3 @@ _GLOBAL(longjmp) | |||
122 | mtlr r0 | 122 | mtlr r0 |
123 | mr r3,r4 | 123 | mr r3,r4 |
124 | blr | 124 | blr |
125 | |||
126 | _GLOBAL(__setup_cpu_power7) | ||
127 | _GLOBAL(__restore_cpu_power7) | ||
128 | /* place holder */ | ||
129 | blr | ||
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index a7a570dcdd57..998a10028608 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/processor.h> | 30 | #include <asm/processor.h> |
31 | #include <asm/kexec.h> | 31 | #include <asm/kexec.h> |
32 | #include <asm/bug.h> | 32 | #include <asm/bug.h> |
33 | #include <asm/ptrace.h> | ||
33 | 34 | ||
34 | .text | 35 | .text |
35 | 36 | ||
@@ -693,6 +694,17 @@ _GLOBAL(kernel_thread) | |||
693 | addi r1,r1,16 | 694 | addi r1,r1,16 |
694 | blr | 695 | blr |
695 | 696 | ||
697 | #ifdef CONFIG_SMP | ||
698 | _GLOBAL(start_secondary_resume) | ||
699 | /* Reset stack */ | ||
700 | rlwinm r1,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | ||
701 | addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD | ||
702 | li r3,0 | ||
703 | stw r3,0(r1) /* Zero the stack frame pointer */ | ||
704 | bl start_secondary | ||
705 | b . | ||
706 | #endif /* CONFIG_SMP */ | ||
707 | |||
696 | /* | 708 | /* |
697 | * This routine is just here to keep GCC happy - sigh... | 709 | * This routine is just here to keep GCC happy - sigh... |
698 | */ | 710 | */ |
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S index e5144906a56d..e89df59cdc5a 100644 --- a/arch/powerpc/kernel/misc_64.S +++ b/arch/powerpc/kernel/misc_64.S | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <asm/cputable.h> | 25 | #include <asm/cputable.h> |
26 | #include <asm/thread_info.h> | 26 | #include <asm/thread_info.h> |
27 | #include <asm/kexec.h> | 27 | #include <asm/kexec.h> |
28 | #include <asm/ptrace.h> | ||
28 | 29 | ||
29 | .text | 30 | .text |
30 | 31 | ||
@@ -461,7 +462,8 @@ _GLOBAL(disable_kernel_fp) | |||
461 | * wait for the flag to change, indicating this kernel is going away but | 462 | * wait for the flag to change, indicating this kernel is going away but |
462 | * the slave code for the next one is at addresses 0 to 100. | 463 | * the slave code for the next one is at addresses 0 to 100. |
463 | * | 464 | * |
464 | * This is used by all slaves. | 465 | * This is used by all slaves, even those that did not find a matching |
466 | * paca in the secondary startup code. | ||
465 | * | 467 | * |
466 | * Physical (hardware) cpu id should be in r3. | 468 | * Physical (hardware) cpu id should be in r3. |
467 | */ | 469 | */ |
@@ -470,10 +472,6 @@ _GLOBAL(kexec_wait) | |||
470 | 1: mflr r5 | 472 | 1: mflr r5 |
471 | addi r5,r5,kexec_flag-1b | 473 | addi r5,r5,kexec_flag-1b |
472 | 474 | ||
473 | li r4,KEXEC_STATE_REAL_MODE | ||
474 | stb r4,PACAKEXECSTATE(r13) | ||
475 | SYNC | ||
476 | |||
477 | 99: HMT_LOW | 475 | 99: HMT_LOW |
478 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ | 476 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ |
479 | lwz r4,0(r5) | 477 | lwz r4,0(r5) |
@@ -498,11 +496,17 @@ kexec_flag: | |||
498 | * | 496 | * |
499 | * get phys id from paca | 497 | * get phys id from paca |
500 | * switch to real mode | 498 | * switch to real mode |
499 | * mark the paca as no longer used | ||
501 | * join other cpus in kexec_wait(phys_id) | 500 | * join other cpus in kexec_wait(phys_id) |
502 | */ | 501 | */ |
503 | _GLOBAL(kexec_smp_wait) | 502 | _GLOBAL(kexec_smp_wait) |
504 | lhz r3,PACAHWCPUID(r13) | 503 | lhz r3,PACAHWCPUID(r13) |
505 | bl real_mode | 504 | bl real_mode |
505 | |||
506 | li r4,KEXEC_STATE_REAL_MODE | ||
507 | stb r4,PACAKEXECSTATE(r13) | ||
508 | SYNC | ||
509 | |||
506 | b .kexec_wait | 510 | b .kexec_wait |
507 | 511 | ||
508 | /* | 512 | /* |
diff --git a/arch/powerpc/kernel/mpc7450-pmu.c b/arch/powerpc/kernel/mpc7450-pmu.c index 09d72028f317..2cc5e0301d0b 100644 --- a/arch/powerpc/kernel/mpc7450-pmu.c +++ b/arch/powerpc/kernel/mpc7450-pmu.c | |||
@@ -414,4 +414,4 @@ static int init_mpc7450_pmu(void) | |||
414 | return register_power_pmu(&mpc7450_pmu); | 414 | return register_power_pmu(&mpc7450_pmu); |
415 | } | 415 | } |
416 | 416 | ||
417 | arch_initcall(init_mpc7450_pmu); | 417 | early_initcall(init_mpc7450_pmu); |
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index 9cf197f01e94..bec1e930ed73 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
@@ -34,15 +34,26 @@ | |||
34 | 34 | ||
35 | #undef DEBUG_NVRAM | 35 | #undef DEBUG_NVRAM |
36 | 36 | ||
37 | static struct nvram_partition * nvram_part; | 37 | #define NVRAM_HEADER_LEN sizeof(struct nvram_header) |
38 | static long nvram_error_log_index = -1; | 38 | #define NVRAM_BLOCK_LEN NVRAM_HEADER_LEN |
39 | static long nvram_error_log_size = 0; | 39 | |
40 | /* If change this size, then change the size of NVNAME_LEN */ | ||
41 | struct nvram_header { | ||
42 | unsigned char signature; | ||
43 | unsigned char checksum; | ||
44 | unsigned short length; | ||
45 | /* Terminating null required only for names < 12 chars. */ | ||
46 | char name[12]; | ||
47 | }; | ||
40 | 48 | ||
41 | struct err_log_info { | 49 | struct nvram_partition { |
42 | int error_type; | 50 | struct list_head partition; |
43 | unsigned int seq_num; | 51 | struct nvram_header header; |
52 | unsigned int index; | ||
44 | }; | 53 | }; |
45 | 54 | ||
55 | static LIST_HEAD(nvram_partitions); | ||
56 | |||
46 | static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) | 57 | static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) |
47 | { | 58 | { |
48 | int size; | 59 | int size; |
@@ -186,14 +197,12 @@ static struct miscdevice nvram_dev = { | |||
186 | #ifdef DEBUG_NVRAM | 197 | #ifdef DEBUG_NVRAM |
187 | static void __init nvram_print_partitions(char * label) | 198 | static void __init nvram_print_partitions(char * label) |
188 | { | 199 | { |
189 | struct list_head * p; | ||
190 | struct nvram_partition * tmp_part; | 200 | struct nvram_partition * tmp_part; |
191 | 201 | ||
192 | printk(KERN_WARNING "--------%s---------\n", label); | 202 | printk(KERN_WARNING "--------%s---------\n", label); |
193 | printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); | 203 | printk(KERN_WARNING "indx\t\tsig\tchks\tlen\tname\n"); |
194 | list_for_each(p, &nvram_part->partition) { | 204 | list_for_each_entry(tmp_part, &nvram_partitions, partition) { |
195 | tmp_part = list_entry(p, struct nvram_partition, partition); | 205 | printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%12s\n", |
196 | printk(KERN_WARNING "%4d \t%02x\t%02x\t%d\t%s\n", | ||
197 | tmp_part->index, tmp_part->header.signature, | 206 | tmp_part->index, tmp_part->header.signature, |
198 | tmp_part->header.checksum, tmp_part->header.length, | 207 | tmp_part->header.checksum, tmp_part->header.length, |
199 | tmp_part->header.name); | 208 | tmp_part->header.name); |
@@ -228,95 +237,136 @@ static unsigned char __init nvram_checksum(struct nvram_header *p) | |||
228 | return c_sum; | 237 | return c_sum; |
229 | } | 238 | } |
230 | 239 | ||
231 | static int __init nvram_remove_os_partition(void) | 240 | /* |
241 | * Per the criteria passed via nvram_remove_partition(), should this | ||
242 | * partition be removed? 1=remove, 0=keep | ||
243 | */ | ||
244 | static int nvram_can_remove_partition(struct nvram_partition *part, | ||
245 | const char *name, int sig, const char *exceptions[]) | ||
246 | { | ||
247 | if (part->header.signature != sig) | ||
248 | return 0; | ||
249 | if (name) { | ||
250 | if (strncmp(name, part->header.name, 12)) | ||
251 | return 0; | ||
252 | } else if (exceptions) { | ||
253 | const char **except; | ||
254 | for (except = exceptions; *except; except++) { | ||
255 | if (!strncmp(*except, part->header.name, 12)) | ||
256 | return 0; | ||
257 | } | ||
258 | } | ||
259 | return 1; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * nvram_remove_partition - Remove one or more partitions in nvram | ||
264 | * @name: name of the partition to remove, or NULL for a | ||
265 | * signature only match | ||
266 | * @sig: signature of the partition(s) to remove | ||
267 | * @exceptions: When removing all partitions with a matching signature, | ||
268 | * leave these alone. | ||
269 | */ | ||
270 | |||
271 | int __init nvram_remove_partition(const char *name, int sig, | ||
272 | const char *exceptions[]) | ||
232 | { | 273 | { |
233 | struct list_head *i; | 274 | struct nvram_partition *part, *prev, *tmp; |
234 | struct list_head *j; | ||
235 | struct nvram_partition * part; | ||
236 | struct nvram_partition * cur_part; | ||
237 | int rc; | 275 | int rc; |
238 | 276 | ||
239 | list_for_each(i, &nvram_part->partition) { | 277 | list_for_each_entry(part, &nvram_partitions, partition) { |
240 | part = list_entry(i, struct nvram_partition, partition); | 278 | if (!nvram_can_remove_partition(part, name, sig, exceptions)) |
241 | if (part->header.signature != NVRAM_SIG_OS) | ||
242 | continue; | 279 | continue; |
243 | 280 | ||
244 | /* Make os partition a free partition */ | 281 | /* Make partition a free partition */ |
245 | part->header.signature = NVRAM_SIG_FREE; | 282 | part->header.signature = NVRAM_SIG_FREE; |
246 | sprintf(part->header.name, "wwwwwwwwwwww"); | 283 | strncpy(part->header.name, "wwwwwwwwwwww", 12); |
247 | part->header.checksum = nvram_checksum(&part->header); | 284 | part->header.checksum = nvram_checksum(&part->header); |
248 | |||
249 | /* Merge contiguous free partitions backwards */ | ||
250 | list_for_each_prev(j, &part->partition) { | ||
251 | cur_part = list_entry(j, struct nvram_partition, partition); | ||
252 | if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) { | ||
253 | break; | ||
254 | } | ||
255 | |||
256 | part->header.length += cur_part->header.length; | ||
257 | part->header.checksum = nvram_checksum(&part->header); | ||
258 | part->index = cur_part->index; | ||
259 | |||
260 | list_del(&cur_part->partition); | ||
261 | kfree(cur_part); | ||
262 | j = &part->partition; /* fixup our loop */ | ||
263 | } | ||
264 | |||
265 | /* Merge contiguous free partitions forwards */ | ||
266 | list_for_each(j, &part->partition) { | ||
267 | cur_part = list_entry(j, struct nvram_partition, partition); | ||
268 | if (cur_part == nvram_part || cur_part->header.signature != NVRAM_SIG_FREE) { | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | part->header.length += cur_part->header.length; | ||
273 | part->header.checksum = nvram_checksum(&part->header); | ||
274 | |||
275 | list_del(&cur_part->partition); | ||
276 | kfree(cur_part); | ||
277 | j = &part->partition; /* fixup our loop */ | ||
278 | } | ||
279 | |||
280 | rc = nvram_write_header(part); | 285 | rc = nvram_write_header(part); |
281 | if (rc <= 0) { | 286 | if (rc <= 0) { |
282 | printk(KERN_ERR "nvram_remove_os_partition: nvram_write failed (%d)\n", rc); | 287 | printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); |
283 | return rc; | 288 | return rc; |
284 | } | 289 | } |
290 | } | ||
285 | 291 | ||
292 | /* Merge contiguous ones */ | ||
293 | prev = NULL; | ||
294 | list_for_each_entry_safe(part, tmp, &nvram_partitions, partition) { | ||
295 | if (part->header.signature != NVRAM_SIG_FREE) { | ||
296 | prev = NULL; | ||
297 | continue; | ||
298 | } | ||
299 | if (prev) { | ||
300 | prev->header.length += part->header.length; | ||
301 | prev->header.checksum = nvram_checksum(&part->header); | ||
302 | rc = nvram_write_header(part); | ||
303 | if (rc <= 0) { | ||
304 | printk(KERN_ERR "nvram_remove_partition: nvram_write failed (%d)\n", rc); | ||
305 | return rc; | ||
306 | } | ||
307 | list_del(&part->partition); | ||
308 | kfree(part); | ||
309 | } else | ||
310 | prev = part; | ||
286 | } | 311 | } |
287 | 312 | ||
288 | return 0; | 313 | return 0; |
289 | } | 314 | } |
290 | 315 | ||
291 | /* nvram_create_os_partition | 316 | /** |
317 | * nvram_create_partition - Create a partition in nvram | ||
318 | * @name: name of the partition to create | ||
319 | * @sig: signature of the partition to create | ||
320 | * @req_size: size of data to allocate in bytes | ||
321 | * @min_size: minimum acceptable size (0 means req_size) | ||
292 | * | 322 | * |
293 | * Create a OS linux partition to buffer error logs. | 323 | * Returns a negative error code or a positive nvram index |
294 | * Will create a partition starting at the first free | 324 | * of the beginning of the data area of the newly created |
295 | * space found if space has enough room. | 325 | * partition. If you provided a min_size smaller than req_size |
326 | * you need to query for the actual size yourself after the | ||
327 | * call using nvram_partition_get_size(). | ||
296 | */ | 328 | */ |
297 | static int __init nvram_create_os_partition(void) | 329 | loff_t __init nvram_create_partition(const char *name, int sig, |
330 | int req_size, int min_size) | ||
298 | { | 331 | { |
299 | struct nvram_partition *part; | 332 | struct nvram_partition *part; |
300 | struct nvram_partition *new_part; | 333 | struct nvram_partition *new_part; |
301 | struct nvram_partition *free_part = NULL; | 334 | struct nvram_partition *free_part = NULL; |
302 | int seq_init[2] = { 0, 0 }; | 335 | static char nv_init_vals[16]; |
303 | loff_t tmp_index; | 336 | loff_t tmp_index; |
304 | long size = 0; | 337 | long size = 0; |
305 | int rc; | 338 | int rc; |
306 | 339 | ||
340 | /* Convert sizes from bytes to blocks */ | ||
341 | req_size = _ALIGN_UP(req_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; | ||
342 | min_size = _ALIGN_UP(min_size, NVRAM_BLOCK_LEN) / NVRAM_BLOCK_LEN; | ||
343 | |||
344 | /* If no minimum size specified, make it the same as the | ||
345 | * requested size | ||
346 | */ | ||
347 | if (min_size == 0) | ||
348 | min_size = req_size; | ||
349 | if (min_size > req_size) | ||
350 | return -EINVAL; | ||
351 | |||
352 | /* Now add one block to each for the header */ | ||
353 | req_size += 1; | ||
354 | min_size += 1; | ||
355 | |||
307 | /* Find a free partition that will give us the maximum needed size | 356 | /* Find a free partition that will give us the maximum needed size |
308 | If can't find one that will give us the minimum size needed */ | 357 | If can't find one that will give us the minimum size needed */ |
309 | list_for_each_entry(part, &nvram_part->partition, partition) { | 358 | list_for_each_entry(part, &nvram_partitions, partition) { |
310 | if (part->header.signature != NVRAM_SIG_FREE) | 359 | if (part->header.signature != NVRAM_SIG_FREE) |
311 | continue; | 360 | continue; |
312 | 361 | ||
313 | if (part->header.length >= NVRAM_MAX_REQ) { | 362 | if (part->header.length >= req_size) { |
314 | size = NVRAM_MAX_REQ; | 363 | size = req_size; |
315 | free_part = part; | 364 | free_part = part; |
316 | break; | 365 | break; |
317 | } | 366 | } |
318 | if (!size && part->header.length >= NVRAM_MIN_REQ) { | 367 | if (part->header.length > size && |
319 | size = NVRAM_MIN_REQ; | 368 | part->header.length >= min_size) { |
369 | size = part->header.length; | ||
320 | free_part = part; | 370 | free_part = part; |
321 | } | 371 | } |
322 | } | 372 | } |
@@ -326,136 +376,95 @@ static int __init nvram_create_os_partition(void) | |||
326 | /* Create our OS partition */ | 376 | /* Create our OS partition */ |
327 | new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); | 377 | new_part = kmalloc(sizeof(*new_part), GFP_KERNEL); |
328 | if (!new_part) { | 378 | if (!new_part) { |
329 | printk(KERN_ERR "nvram_create_os_partition: kmalloc failed\n"); | 379 | pr_err("nvram_create_os_partition: kmalloc failed\n"); |
330 | return -ENOMEM; | 380 | return -ENOMEM; |
331 | } | 381 | } |
332 | 382 | ||
333 | new_part->index = free_part->index; | 383 | new_part->index = free_part->index; |
334 | new_part->header.signature = NVRAM_SIG_OS; | 384 | new_part->header.signature = sig; |
335 | new_part->header.length = size; | 385 | new_part->header.length = size; |
336 | strcpy(new_part->header.name, "ppc64,linux"); | 386 | strncpy(new_part->header.name, name, 12); |
337 | new_part->header.checksum = nvram_checksum(&new_part->header); | 387 | new_part->header.checksum = nvram_checksum(&new_part->header); |
338 | 388 | ||
339 | rc = nvram_write_header(new_part); | 389 | rc = nvram_write_header(new_part); |
340 | if (rc <= 0) { | 390 | if (rc <= 0) { |
341 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " | 391 | pr_err("nvram_create_os_partition: nvram_write_header " |
342 | "failed (%d)\n", rc); | ||
343 | return rc; | ||
344 | } | ||
345 | |||
346 | /* make sure and initialize to zero the sequence number and the error | ||
347 | type logged */ | ||
348 | tmp_index = new_part->index + NVRAM_HEADER_LEN; | ||
349 | rc = ppc_md.nvram_write((char *)&seq_init, sizeof(seq_init), &tmp_index); | ||
350 | if (rc <= 0) { | ||
351 | printk(KERN_ERR "nvram_create_os_partition: nvram_write " | ||
352 | "failed (%d)\n", rc); | 392 | "failed (%d)\n", rc); |
353 | return rc; | 393 | return rc; |
354 | } | 394 | } |
355 | |||
356 | nvram_error_log_index = new_part->index + NVRAM_HEADER_LEN; | ||
357 | nvram_error_log_size = ((part->header.length - 1) * | ||
358 | NVRAM_BLOCK_LEN) - sizeof(struct err_log_info); | ||
359 | |||
360 | list_add_tail(&new_part->partition, &free_part->partition); | 395 | list_add_tail(&new_part->partition, &free_part->partition); |
361 | 396 | ||
362 | if (free_part->header.length <= size) { | 397 | /* Adjust or remove the partition we stole the space from */ |
398 | if (free_part->header.length > size) { | ||
399 | free_part->index += size * NVRAM_BLOCK_LEN; | ||
400 | free_part->header.length -= size; | ||
401 | free_part->header.checksum = nvram_checksum(&free_part->header); | ||
402 | rc = nvram_write_header(free_part); | ||
403 | if (rc <= 0) { | ||
404 | pr_err("nvram_create_os_partition: nvram_write_header " | ||
405 | "failed (%d)\n", rc); | ||
406 | return rc; | ||
407 | } | ||
408 | } else { | ||
363 | list_del(&free_part->partition); | 409 | list_del(&free_part->partition); |
364 | kfree(free_part); | 410 | kfree(free_part); |
365 | return 0; | ||
366 | } | 411 | } |
367 | 412 | ||
368 | /* Adjust the partition we stole the space from */ | 413 | /* Clear the new partition */ |
369 | free_part->index += size * NVRAM_BLOCK_LEN; | 414 | for (tmp_index = new_part->index + NVRAM_HEADER_LEN; |
370 | free_part->header.length -= size; | 415 | tmp_index < ((size - 1) * NVRAM_BLOCK_LEN); |
371 | free_part->header.checksum = nvram_checksum(&free_part->header); | 416 | tmp_index += NVRAM_BLOCK_LEN) { |
372 | 417 | rc = ppc_md.nvram_write(nv_init_vals, NVRAM_BLOCK_LEN, &tmp_index); | |
373 | rc = nvram_write_header(free_part); | 418 | if (rc <= 0) { |
374 | if (rc <= 0) { | 419 | pr_err("nvram_create_partition: nvram_write failed (%d)\n", rc); |
375 | printk(KERN_ERR "nvram_create_os_partition: nvram_write_header " | 420 | return rc; |
376 | "failed (%d)\n", rc); | 421 | } |
377 | return rc; | ||
378 | } | 422 | } |
379 | 423 | ||
380 | return 0; | 424 | return new_part->index + NVRAM_HEADER_LEN; |
381 | } | 425 | } |
382 | 426 | ||
383 | 427 | /** | |
384 | /* nvram_setup_partition | 428 | * nvram_get_partition_size - Get the data size of an nvram partition |
385 | * | 429 | * @data_index: This is the offset of the start of the data of |
386 | * This will setup the partition we need for buffering the | 430 | * the partition. The same value that is returned by |
387 | * error logs and cleanup partitions if needed. | 431 | * nvram_create_partition(). |
388 | * | ||
389 | * The general strategy is the following: | ||
390 | * 1.) If there is ppc64,linux partition large enough then use it. | ||
391 | * 2.) If there is not a ppc64,linux partition large enough, search | ||
392 | * for a free partition that is large enough. | ||
393 | * 3.) If there is not a free partition large enough remove | ||
394 | * _all_ OS partitions and consolidate the space. | ||
395 | * 4.) Will first try getting a chunk that will satisfy the maximum | ||
396 | * error log size (NVRAM_MAX_REQ). | ||
397 | * 5.) If the max chunk cannot be allocated then try finding a chunk | ||
398 | * that will satisfy the minum needed (NVRAM_MIN_REQ). | ||
399 | */ | 432 | */ |
400 | static int __init nvram_setup_partition(void) | 433 | int nvram_get_partition_size(loff_t data_index) |
401 | { | 434 | { |
402 | struct list_head * p; | 435 | struct nvram_partition *part; |
403 | struct nvram_partition * part; | 436 | |
404 | int rc; | 437 | list_for_each_entry(part, &nvram_partitions, partition) { |
405 | 438 | if (part->index + NVRAM_HEADER_LEN == data_index) | |
406 | /* For now, we don't do any of this on pmac, until I | 439 | return (part->header.length - 1) * NVRAM_BLOCK_LEN; |
407 | * have figured out if it's worth killing some unused stuffs | 440 | } |
408 | * in our nvram, as Apple defined partitions use pretty much | 441 | return -1; |
409 | * all of the space | 442 | } |
410 | */ | ||
411 | if (machine_is(powermac)) | ||
412 | return -ENOSPC; | ||
413 | |||
414 | /* see if we have an OS partition that meets our needs. | ||
415 | will try getting the max we need. If not we'll delete | ||
416 | partitions and try again. */ | ||
417 | list_for_each(p, &nvram_part->partition) { | ||
418 | part = list_entry(p, struct nvram_partition, partition); | ||
419 | if (part->header.signature != NVRAM_SIG_OS) | ||
420 | continue; | ||
421 | 443 | ||
422 | if (strcmp(part->header.name, "ppc64,linux")) | ||
423 | continue; | ||
424 | 444 | ||
425 | if (part->header.length >= NVRAM_MIN_REQ) { | 445 | /** |
426 | /* found our partition */ | 446 | * nvram_find_partition - Find an nvram partition by signature and name |
427 | nvram_error_log_index = part->index + NVRAM_HEADER_LEN; | 447 | * @name: Name of the partition or NULL for any name |
428 | nvram_error_log_size = ((part->header.length - 1) * | 448 | * @sig: Signature to test against |
429 | NVRAM_BLOCK_LEN) - sizeof(struct err_log_info); | 449 | * @out_size: if non-NULL, returns the size of the data part of the partition |
430 | return 0; | 450 | */ |
451 | loff_t nvram_find_partition(const char *name, int sig, int *out_size) | ||
452 | { | ||
453 | struct nvram_partition *p; | ||
454 | |||
455 | list_for_each_entry(p, &nvram_partitions, partition) { | ||
456 | if (p->header.signature == sig && | ||
457 | (!name || !strncmp(p->header.name, name, 12))) { | ||
458 | if (out_size) | ||
459 | *out_size = (p->header.length - 1) * | ||
460 | NVRAM_BLOCK_LEN; | ||
461 | return p->index + NVRAM_HEADER_LEN; | ||
431 | } | 462 | } |
432 | } | 463 | } |
433 | |||
434 | /* try creating a partition with the free space we have */ | ||
435 | rc = nvram_create_os_partition(); | ||
436 | if (!rc) { | ||
437 | return 0; | ||
438 | } | ||
439 | |||
440 | /* need to free up some space */ | ||
441 | rc = nvram_remove_os_partition(); | ||
442 | if (rc) { | ||
443 | return rc; | ||
444 | } | ||
445 | |||
446 | /* create a partition in this new space */ | ||
447 | rc = nvram_create_os_partition(); | ||
448 | if (rc) { | ||
449 | printk(KERN_ERR "nvram_create_os_partition: Could not find a " | ||
450 | "NVRAM partition large enough\n"); | ||
451 | return rc; | ||
452 | } | ||
453 | |||
454 | return 0; | 464 | return 0; |
455 | } | 465 | } |
456 | 466 | ||
457 | 467 | int __init nvram_scan_partitions(void) | |
458 | static int __init nvram_scan_partitions(void) | ||
459 | { | 468 | { |
460 | loff_t cur_index = 0; | 469 | loff_t cur_index = 0; |
461 | struct nvram_header phead; | 470 | struct nvram_header phead; |
@@ -465,7 +474,7 @@ static int __init nvram_scan_partitions(void) | |||
465 | int total_size; | 474 | int total_size; |
466 | int err; | 475 | int err; |
467 | 476 | ||
468 | if (ppc_md.nvram_size == NULL) | 477 | if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) |
469 | return -ENODEV; | 478 | return -ENODEV; |
470 | total_size = ppc_md.nvram_size(); | 479 | total_size = ppc_md.nvram_size(); |
471 | 480 | ||
@@ -512,12 +521,16 @@ static int __init nvram_scan_partitions(void) | |||
512 | 521 | ||
513 | memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN); | 522 | memcpy(&tmp_part->header, &phead, NVRAM_HEADER_LEN); |
514 | tmp_part->index = cur_index; | 523 | tmp_part->index = cur_index; |
515 | list_add_tail(&tmp_part->partition, &nvram_part->partition); | 524 | list_add_tail(&tmp_part->partition, &nvram_partitions); |
516 | 525 | ||
517 | cur_index += phead.length * NVRAM_BLOCK_LEN; | 526 | cur_index += phead.length * NVRAM_BLOCK_LEN; |
518 | } | 527 | } |
519 | err = 0; | 528 | err = 0; |
520 | 529 | ||
530 | #ifdef DEBUG_NVRAM | ||
531 | nvram_print_partitions("NVRAM Partitions"); | ||
532 | #endif | ||
533 | |||
521 | out: | 534 | out: |
522 | kfree(header); | 535 | kfree(header); |
523 | return err; | 536 | return err; |
@@ -525,9 +538,10 @@ static int __init nvram_scan_partitions(void) | |||
525 | 538 | ||
526 | static int __init nvram_init(void) | 539 | static int __init nvram_init(void) |
527 | { | 540 | { |
528 | int error; | ||
529 | int rc; | 541 | int rc; |
530 | 542 | ||
543 | BUILD_BUG_ON(NVRAM_BLOCK_LEN != 16); | ||
544 | |||
531 | if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) | 545 | if (ppc_md.nvram_size == NULL || ppc_md.nvram_size() <= 0) |
532 | return -ENODEV; | 546 | return -ENODEV; |
533 | 547 | ||
@@ -537,29 +551,6 @@ static int __init nvram_init(void) | |||
537 | return rc; | 551 | return rc; |
538 | } | 552 | } |
539 | 553 | ||
540 | /* initialize our anchor for the nvram partition list */ | ||
541 | nvram_part = kmalloc(sizeof(struct nvram_partition), GFP_KERNEL); | ||
542 | if (!nvram_part) { | ||
543 | printk(KERN_ERR "nvram_init: Failed kmalloc\n"); | ||
544 | return -ENOMEM; | ||
545 | } | ||
546 | INIT_LIST_HEAD(&nvram_part->partition); | ||
547 | |||
548 | /* Get all the NVRAM partitions */ | ||
549 | error = nvram_scan_partitions(); | ||
550 | if (error) { | ||
551 | printk(KERN_ERR "nvram_init: Failed nvram_scan_partitions\n"); | ||
552 | return error; | ||
553 | } | ||
554 | |||
555 | if(nvram_setup_partition()) | ||
556 | printk(KERN_WARNING "nvram_init: Could not find nvram partition" | ||
557 | " for nvram buffered error logging.\n"); | ||
558 | |||
559 | #ifdef DEBUG_NVRAM | ||
560 | nvram_print_partitions("NVRAM Partitions"); | ||
561 | #endif | ||
562 | |||
563 | return rc; | 554 | return rc; |
564 | } | 555 | } |
565 | 556 | ||
@@ -568,135 +559,6 @@ void __exit nvram_cleanup(void) | |||
568 | misc_deregister( &nvram_dev ); | 559 | misc_deregister( &nvram_dev ); |
569 | } | 560 | } |
570 | 561 | ||
571 | |||
572 | #ifdef CONFIG_PPC_PSERIES | ||
573 | |||
574 | /* nvram_write_error_log | ||
575 | * | ||
576 | * We need to buffer the error logs into nvram to ensure that we have | ||
577 | * the failure information to decode. If we have a severe error there | ||
578 | * is no way to guarantee that the OS or the machine is in a state to | ||
579 | * get back to user land and write the error to disk. For example if | ||
580 | * the SCSI device driver causes a Machine Check by writing to a bad | ||
581 | * IO address, there is no way of guaranteeing that the device driver | ||
582 | * is in any state that is would also be able to write the error data | ||
583 | * captured to disk, thus we buffer it in NVRAM for analysis on the | ||
584 | * next boot. | ||
585 | * | ||
586 | * In NVRAM the partition containing the error log buffer will looks like: | ||
587 | * Header (in bytes): | ||
588 | * +-----------+----------+--------+------------+------------------+ | ||
589 | * | signature | checksum | length | name | data | | ||
590 | * |0 |1 |2 3|4 15|16 length-1| | ||
591 | * +-----------+----------+--------+------------+------------------+ | ||
592 | * | ||
593 | * The 'data' section would look like (in bytes): | ||
594 | * +--------------+------------+-----------------------------------+ | ||
595 | * | event_logged | sequence # | error log | | ||
596 | * |0 3|4 7|8 nvram_error_log_size-1| | ||
597 | * +--------------+------------+-----------------------------------+ | ||
598 | * | ||
599 | * event_logged: 0 if event has not been logged to syslog, 1 if it has | ||
600 | * sequence #: The unique sequence # for each event. (until it wraps) | ||
601 | * error log: The error log from event_scan | ||
602 | */ | ||
603 | int nvram_write_error_log(char * buff, int length, | ||
604 | unsigned int err_type, unsigned int error_log_cnt) | ||
605 | { | ||
606 | int rc; | ||
607 | loff_t tmp_index; | ||
608 | struct err_log_info info; | ||
609 | |||
610 | if (nvram_error_log_index == -1) { | ||
611 | return -ESPIPE; | ||
612 | } | ||
613 | |||
614 | if (length > nvram_error_log_size) { | ||
615 | length = nvram_error_log_size; | ||
616 | } | ||
617 | |||
618 | info.error_type = err_type; | ||
619 | info.seq_num = error_log_cnt; | ||
620 | |||
621 | tmp_index = nvram_error_log_index; | ||
622 | |||
623 | rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
624 | if (rc <= 0) { | ||
625 | printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc); | ||
626 | return rc; | ||
627 | } | ||
628 | |||
629 | rc = ppc_md.nvram_write(buff, length, &tmp_index); | ||
630 | if (rc <= 0) { | ||
631 | printk(KERN_ERR "nvram_write_error_log: Failed nvram_write (%d)\n", rc); | ||
632 | return rc; | ||
633 | } | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | /* nvram_read_error_log | ||
639 | * | ||
640 | * Reads nvram for error log for at most 'length' | ||
641 | */ | ||
642 | int nvram_read_error_log(char * buff, int length, | ||
643 | unsigned int * err_type, unsigned int * error_log_cnt) | ||
644 | { | ||
645 | int rc; | ||
646 | loff_t tmp_index; | ||
647 | struct err_log_info info; | ||
648 | |||
649 | if (nvram_error_log_index == -1) | ||
650 | return -1; | ||
651 | |||
652 | if (length > nvram_error_log_size) | ||
653 | length = nvram_error_log_size; | ||
654 | |||
655 | tmp_index = nvram_error_log_index; | ||
656 | |||
657 | rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
658 | if (rc <= 0) { | ||
659 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
660 | return rc; | ||
661 | } | ||
662 | |||
663 | rc = ppc_md.nvram_read(buff, length, &tmp_index); | ||
664 | if (rc <= 0) { | ||
665 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
666 | return rc; | ||
667 | } | ||
668 | |||
669 | *error_log_cnt = info.seq_num; | ||
670 | *err_type = info.error_type; | ||
671 | |||
672 | return 0; | ||
673 | } | ||
674 | |||
675 | /* This doesn't actually zero anything, but it sets the event_logged | ||
676 | * word to tell that this event is safely in syslog. | ||
677 | */ | ||
678 | int nvram_clear_error_log(void) | ||
679 | { | ||
680 | loff_t tmp_index; | ||
681 | int clear_word = ERR_FLAG_ALREADY_LOGGED; | ||
682 | int rc; | ||
683 | |||
684 | if (nvram_error_log_index == -1) | ||
685 | return -1; | ||
686 | |||
687 | tmp_index = nvram_error_log_index; | ||
688 | |||
689 | rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); | ||
690 | if (rc <= 0) { | ||
691 | printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc); | ||
692 | return rc; | ||
693 | } | ||
694 | |||
695 | return 0; | ||
696 | } | ||
697 | |||
698 | #endif /* CONFIG_PPC_PSERIES */ | ||
699 | |||
700 | module_init(nvram_init); | 562 | module_init(nvram_init); |
701 | module_exit(nvram_cleanup); | 563 | module_exit(nvram_cleanup); |
702 | MODULE_LICENSE("GPL"); | 564 | MODULE_LICENSE("GPL"); |
diff --git a/arch/powerpc/kernel/of_platform.c b/arch/powerpc/kernel/of_platform.c index b2c363ef38ad..24582181b6ec 100644 --- a/arch/powerpc/kernel/of_platform.c +++ b/arch/powerpc/kernel/of_platform.c | |||
@@ -36,8 +36,7 @@ | |||
36 | * lacking some bits needed here. | 36 | * lacking some bits needed here. |
37 | */ | 37 | */ |
38 | 38 | ||
39 | static int __devinit of_pci_phb_probe(struct platform_device *dev, | 39 | static int __devinit of_pci_phb_probe(struct platform_device *dev) |
40 | const struct of_device_id *match) | ||
41 | { | 40 | { |
42 | struct pci_controller *phb; | 41 | struct pci_controller *phb; |
43 | 42 | ||
@@ -74,7 +73,7 @@ static int __devinit of_pci_phb_probe(struct platform_device *dev, | |||
74 | #endif /* CONFIG_EEH */ | 73 | #endif /* CONFIG_EEH */ |
75 | 74 | ||
76 | /* Scan the bus */ | 75 | /* Scan the bus */ |
77 | pcibios_scan_phb(phb, dev->dev.of_node); | 76 | pcibios_scan_phb(phb); |
78 | if (phb->bus == NULL) | 77 | if (phb->bus == NULL) |
79 | return -ENXIO; | 78 | return -ENXIO; |
80 | 79 | ||
@@ -104,7 +103,7 @@ static struct of_device_id of_pci_phb_ids[] = { | |||
104 | {} | 103 | {} |
105 | }; | 104 | }; |
106 | 105 | ||
107 | static struct of_platform_driver of_pci_phb_driver = { | 106 | static struct platform_driver of_pci_phb_driver = { |
108 | .probe = of_pci_phb_probe, | 107 | .probe = of_pci_phb_probe, |
109 | .driver = { | 108 | .driver = { |
110 | .name = "of-pci", | 109 | .name = "of-pci", |
@@ -115,7 +114,7 @@ static struct of_platform_driver of_pci_phb_driver = { | |||
115 | 114 | ||
116 | static __init int of_pci_phb_init(void) | 115 | static __init int of_pci_phb_init(void) |
117 | { | 116 | { |
118 | return of_register_platform_driver(&of_pci_phb_driver); | 117 | return platform_driver_register(&of_pci_phb_driver); |
119 | } | 118 | } |
120 | 119 | ||
121 | device_initcall(of_pci_phb_init); | 120 | device_initcall(of_pci_phb_init); |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index d0a26f1770fe..efeb88184182 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/threads.h> | 10 | #include <linux/smp.h> |
11 | #include <linux/module.h> | 11 | #include <linux/module.h> |
12 | #include <linux/memblock.h> | 12 | #include <linux/memblock.h> |
13 | 13 | ||
@@ -36,7 +36,7 @@ extern unsigned long __toc_start; | |||
36 | * will suffice to ensure that it doesn't cross a page boundary. | 36 | * will suffice to ensure that it doesn't cross a page boundary. |
37 | */ | 37 | */ |
38 | struct lppaca lppaca[] = { | 38 | struct lppaca lppaca[] = { |
39 | [0 ... (NR_CPUS-1)] = { | 39 | [0 ... (NR_LPPACAS-1)] = { |
40 | .desc = 0xd397d781, /* "LpPa" */ | 40 | .desc = 0xd397d781, /* "LpPa" */ |
41 | .size = sizeof(struct lppaca), | 41 | .size = sizeof(struct lppaca), |
42 | .dyn_proc_status = 2, | 42 | .dyn_proc_status = 2, |
@@ -49,6 +49,54 @@ struct lppaca lppaca[] = { | |||
49 | }, | 49 | }, |
50 | }; | 50 | }; |
51 | 51 | ||
52 | static struct lppaca *extra_lppacas; | ||
53 | static long __initdata lppaca_size; | ||
54 | |||
55 | static void allocate_lppacas(int nr_cpus, unsigned long limit) | ||
56 | { | ||
57 | if (nr_cpus <= NR_LPPACAS) | ||
58 | return; | ||
59 | |||
60 | lppaca_size = PAGE_ALIGN(sizeof(struct lppaca) * | ||
61 | (nr_cpus - NR_LPPACAS)); | ||
62 | extra_lppacas = __va(memblock_alloc_base(lppaca_size, | ||
63 | PAGE_SIZE, limit)); | ||
64 | } | ||
65 | |||
66 | static struct lppaca *new_lppaca(int cpu) | ||
67 | { | ||
68 | struct lppaca *lp; | ||
69 | |||
70 | if (cpu < NR_LPPACAS) | ||
71 | return &lppaca[cpu]; | ||
72 | |||
73 | lp = extra_lppacas + (cpu - NR_LPPACAS); | ||
74 | *lp = lppaca[0]; | ||
75 | |||
76 | return lp; | ||
77 | } | ||
78 | |||
79 | static void free_lppacas(void) | ||
80 | { | ||
81 | long new_size = 0, nr; | ||
82 | |||
83 | if (!lppaca_size) | ||
84 | return; | ||
85 | nr = num_possible_cpus() - NR_LPPACAS; | ||
86 | if (nr > 0) | ||
87 | new_size = PAGE_ALIGN(nr * sizeof(struct lppaca)); | ||
88 | if (new_size >= lppaca_size) | ||
89 | return; | ||
90 | |||
91 | memblock_free(__pa(extra_lppacas) + new_size, lppaca_size - new_size); | ||
92 | lppaca_size = new_size; | ||
93 | } | ||
94 | |||
95 | #else | ||
96 | |||
97 | static inline void allocate_lppacas(int nr_cpus, unsigned long limit) { } | ||
98 | static inline void free_lppacas(void) { } | ||
99 | |||
52 | #endif /* CONFIG_PPC_BOOK3S */ | 100 | #endif /* CONFIG_PPC_BOOK3S */ |
53 | 101 | ||
54 | #ifdef CONFIG_PPC_STD_MMU_64 | 102 | #ifdef CONFIG_PPC_STD_MMU_64 |
@@ -88,7 +136,7 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) | |||
88 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; | 136 | unsigned long kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL; |
89 | 137 | ||
90 | #ifdef CONFIG_PPC_BOOK3S | 138 | #ifdef CONFIG_PPC_BOOK3S |
91 | new_paca->lppaca_ptr = &lppaca[cpu]; | 139 | new_paca->lppaca_ptr = new_lppaca(cpu); |
92 | #else | 140 | #else |
93 | new_paca->kernel_pgd = swapper_pg_dir; | 141 | new_paca->kernel_pgd = swapper_pg_dir; |
94 | #endif | 142 | #endif |
@@ -108,18 +156,29 @@ void __init initialise_paca(struct paca_struct *new_paca, int cpu) | |||
108 | /* Put the paca pointer into r13 and SPRG_PACA */ | 156 | /* Put the paca pointer into r13 and SPRG_PACA */ |
109 | void setup_paca(struct paca_struct *new_paca) | 157 | void setup_paca(struct paca_struct *new_paca) |
110 | { | 158 | { |
159 | /* Setup r13 */ | ||
111 | local_paca = new_paca; | 160 | local_paca = new_paca; |
112 | mtspr(SPRN_SPRG_PACA, local_paca); | 161 | |
113 | #ifdef CONFIG_PPC_BOOK3E | 162 | #ifdef CONFIG_PPC_BOOK3E |
163 | /* On Book3E, initialize the TLB miss exception frames */ | ||
114 | mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); | 164 | mtspr(SPRN_SPRG_TLB_EXFRAME, local_paca->extlb); |
165 | #else | ||
166 | /* In HV mode, we setup both HPACA and PACA to avoid problems | ||
167 | * if we do a GET_PACA() before the feature fixups have been | ||
168 | * applied | ||
169 | */ | ||
170 | if (cpu_has_feature(CPU_FTR_HVMODE_206)) | ||
171 | mtspr(SPRN_SPRG_HPACA, local_paca); | ||
115 | #endif | 172 | #endif |
173 | mtspr(SPRN_SPRG_PACA, local_paca); | ||
174 | |||
116 | } | 175 | } |
117 | 176 | ||
118 | static int __initdata paca_size; | 177 | static int __initdata paca_size; |
119 | 178 | ||
120 | void __init allocate_pacas(void) | 179 | void __init allocate_pacas(void) |
121 | { | 180 | { |
122 | int nr_cpus, cpu, limit; | 181 | int cpu, limit; |
123 | 182 | ||
124 | /* | 183 | /* |
125 | * We can't take SLB misses on the paca, and we want to access them | 184 | * We can't take SLB misses on the paca, and we want to access them |
@@ -127,25 +186,22 @@ void __init allocate_pacas(void) | |||
127 | * the first segment. On iSeries they must be within the area mapped | 186 | * the first segment. On iSeries they must be within the area mapped |
128 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. | 187 | * by the HV, which is HvPagesToMap * HVPAGESIZE bytes. |
129 | */ | 188 | */ |
130 | limit = min(0x10000000ULL, memblock.rmo_size); | 189 | limit = min(0x10000000ULL, ppc64_rma_size); |
131 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | 190 | if (firmware_has_feature(FW_FEATURE_ISERIES)) |
132 | limit = min(limit, HvPagesToMap * HVPAGESIZE); | 191 | limit = min(limit, HvPagesToMap * HVPAGESIZE); |
133 | 192 | ||
134 | nr_cpus = NR_CPUS; | 193 | paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); |
135 | /* On iSeries we know we can never have more than 64 cpus */ | ||
136 | if (firmware_has_feature(FW_FEATURE_ISERIES)) | ||
137 | nr_cpus = min(64, nr_cpus); | ||
138 | |||
139 | paca_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpus); | ||
140 | 194 | ||
141 | paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); | 195 | paca = __va(memblock_alloc_base(paca_size, PAGE_SIZE, limit)); |
142 | memset(paca, 0, paca_size); | 196 | memset(paca, 0, paca_size); |
143 | 197 | ||
144 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", | 198 | printk(KERN_DEBUG "Allocated %u bytes for %d pacas at %p\n", |
145 | paca_size, nr_cpus, paca); | 199 | paca_size, nr_cpu_ids, paca); |
200 | |||
201 | allocate_lppacas(nr_cpu_ids, limit); | ||
146 | 202 | ||
147 | /* Can't use for_each_*_cpu, as they aren't functional yet */ | 203 | /* Can't use for_each_*_cpu, as they aren't functional yet */ |
148 | for (cpu = 0; cpu < nr_cpus; cpu++) | 204 | for (cpu = 0; cpu < nr_cpu_ids; cpu++) |
149 | initialise_paca(&paca[cpu], cpu); | 205 | initialise_paca(&paca[cpu], cpu); |
150 | } | 206 | } |
151 | 207 | ||
@@ -153,7 +209,7 @@ void __init free_unused_pacas(void) | |||
153 | { | 209 | { |
154 | int new_size; | 210 | int new_size; |
155 | 211 | ||
156 | new_size = PAGE_ALIGN(sizeof(struct paca_struct) * num_possible_cpus()); | 212 | new_size = PAGE_ALIGN(sizeof(struct paca_struct) * nr_cpu_ids); |
157 | 213 | ||
158 | if (new_size >= paca_size) | 214 | if (new_size >= paca_size) |
159 | return; | 215 | return; |
@@ -164,4 +220,6 @@ void __init free_unused_pacas(void) | |||
164 | paca_size - new_size); | 220 | paca_size - new_size); |
165 | 221 | ||
166 | paca_size = new_size; | 222 | paca_size = new_size; |
223 | |||
224 | free_lppacas(); | ||
167 | } | 225 | } |
diff --git a/arch/powerpc/kernel/pci-common.c b/arch/powerpc/kernel/pci-common.c index 9021c4ad4bbd..893af2a9cd03 100644 --- a/arch/powerpc/kernel/pci-common.c +++ b/arch/powerpc/kernel/pci-common.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <linux/init.h> | 22 | #include <linux/init.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/of_address.h> | 24 | #include <linux/of_address.h> |
25 | #include <linux/of_pci.h> | ||
25 | #include <linux/mm.h> | 26 | #include <linux/mm.h> |
26 | #include <linux/list.h> | 27 | #include <linux/list.h> |
27 | #include <linux/syscalls.h> | 28 | #include <linux/syscalls.h> |
@@ -260,7 +261,7 @@ int pci_read_irq_line(struct pci_dev *pci_dev) | |||
260 | 261 | ||
261 | virq = irq_create_mapping(NULL, line); | 262 | virq = irq_create_mapping(NULL, line); |
262 | if (virq != NO_IRQ) | 263 | if (virq != NO_IRQ) |
263 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | 264 | irq_set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); |
264 | } else { | 265 | } else { |
265 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", | 266 | pr_debug(" Got one, spec %d cells (0x%08x 0x%08x...) on %s\n", |
266 | oirq.size, oirq.specifier[0], oirq.specifier[1], | 267 | oirq.size, oirq.specifier[0], oirq.specifier[1], |
@@ -1090,8 +1091,6 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | |||
1090 | bus->number, bus->self ? pci_name(bus->self) : "PHB"); | 1091 | bus->number, bus->self ? pci_name(bus->self) : "PHB"); |
1091 | 1092 | ||
1092 | list_for_each_entry(dev, &bus->devices, bus_list) { | 1093 | list_for_each_entry(dev, &bus->devices, bus_list) { |
1093 | struct dev_archdata *sd = &dev->dev.archdata; | ||
1094 | |||
1095 | /* Cardbus can call us to add new devices to a bus, so ignore | 1094 | /* Cardbus can call us to add new devices to a bus, so ignore |
1096 | * those who are already fully discovered | 1095 | * those who are already fully discovered |
1097 | */ | 1096 | */ |
@@ -1107,7 +1106,7 @@ void __devinit pcibios_setup_bus_devices(struct pci_bus *bus) | |||
1107 | set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); | 1106 | set_dev_node(&dev->dev, pcibus_to_node(dev->bus)); |
1108 | 1107 | ||
1109 | /* Hook up default DMA ops */ | 1108 | /* Hook up default DMA ops */ |
1110 | sd->dma_ops = pci_dma_ops; | 1109 | set_dma_ops(&dev->dev, pci_dma_ops); |
1111 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); | 1110 | set_dma_offset(&dev->dev, PCI_DRAM_OFFSET); |
1112 | 1111 | ||
1113 | /* Additional platform DMA/iommu setup */ | 1112 | /* Additional platform DMA/iommu setup */ |
@@ -1689,13 +1688,8 @@ int early_find_capability(struct pci_controller *hose, int bus, int devfn, | |||
1689 | /** | 1688 | /** |
1690 | * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus | 1689 | * pci_scan_phb - Given a pci_controller, setup and scan the PCI bus |
1691 | * @hose: Pointer to the PCI host controller instance structure | 1690 | * @hose: Pointer to the PCI host controller instance structure |
1692 | * @sysdata: value to use for sysdata pointer. ppc32 and ppc64 differ here | ||
1693 | * | ||
1694 | * Note: the 'data' pointer is a temporary measure. As 32 and 64 bit | ||
1695 | * pci code gets merged, this parameter should become unnecessary because | ||
1696 | * both will use the same value. | ||
1697 | */ | 1691 | */ |
1698 | void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) | 1692 | void __devinit pcibios_scan_phb(struct pci_controller *hose) |
1699 | { | 1693 | { |
1700 | struct pci_bus *bus; | 1694 | struct pci_bus *bus; |
1701 | struct device_node *node = hose->dn; | 1695 | struct device_node *node = hose->dn; |
@@ -1705,13 +1699,13 @@ void __devinit pcibios_scan_phb(struct pci_controller *hose, void *sysdata) | |||
1705 | node ? node->full_name : "<NO NAME>"); | 1699 | node ? node->full_name : "<NO NAME>"); |
1706 | 1700 | ||
1707 | /* Create an empty bus for the toplevel */ | 1701 | /* Create an empty bus for the toplevel */ |
1708 | bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, | 1702 | bus = pci_create_bus(hose->parent, hose->first_busno, hose->ops, hose); |
1709 | sysdata); | ||
1710 | if (bus == NULL) { | 1703 | if (bus == NULL) { |
1711 | pr_err("Failed to create bus for PCI domain %04x\n", | 1704 | pr_err("Failed to create bus for PCI domain %04x\n", |
1712 | hose->global_number); | 1705 | hose->global_number); |
1713 | return; | 1706 | return; |
1714 | } | 1707 | } |
1708 | bus->dev.of_node = of_node_get(node); | ||
1715 | bus->secondary = hose->first_busno; | 1709 | bus->secondary = hose->first_busno; |
1716 | hose->bus = bus; | 1710 | hose->bus = bus; |
1717 | 1711 | ||
diff --git a/arch/powerpc/kernel/pci_32.c b/arch/powerpc/kernel/pci_32.c index e7db5b48004a..bedb370459f2 100644 --- a/arch/powerpc/kernel/pci_32.c +++ b/arch/powerpc/kernel/pci_32.c | |||
@@ -381,7 +381,7 @@ static int __init pcibios_init(void) | |||
381 | if (pci_assign_all_buses) | 381 | if (pci_assign_all_buses) |
382 | hose->first_busno = next_busno; | 382 | hose->first_busno = next_busno; |
383 | hose->last_busno = 0xff; | 383 | hose->last_busno = 0xff; |
384 | pcibios_scan_phb(hose, hose); | 384 | pcibios_scan_phb(hose); |
385 | pci_bus_add_devices(hose->bus); | 385 | pci_bus_add_devices(hose->bus); |
386 | if (pci_assign_all_buses || next_busno <= hose->last_busno) | 386 | if (pci_assign_all_buses || next_busno <= hose->last_busno) |
387 | next_busno = hose->last_busno + pcibios_assign_bus_offset; | 387 | next_busno = hose->last_busno + pcibios_assign_bus_offset; |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index d43fc65749c1..fc6452b6be9f 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -64,7 +64,7 @@ static int __init pcibios_init(void) | |||
64 | 64 | ||
65 | /* Scan all of the recorded PCI controllers. */ | 65 | /* Scan all of the recorded PCI controllers. */ |
66 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | 66 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { |
67 | pcibios_scan_phb(hose, hose->dn); | 67 | pcibios_scan_phb(hose); |
68 | pci_bus_add_devices(hose->bus); | 68 | pci_bus_add_devices(hose->bus); |
69 | } | 69 | } |
70 | 70 | ||
@@ -193,8 +193,7 @@ int __devinit pcibios_map_io_space(struct pci_bus *bus) | |||
193 | hose->io_resource.start += io_virt_offset; | 193 | hose->io_resource.start += io_virt_offset; |
194 | hose->io_resource.end += io_virt_offset; | 194 | hose->io_resource.end += io_virt_offset; |
195 | 195 | ||
196 | pr_debug(" hose->io_resource=0x%016llx...0x%016llx\n", | 196 | pr_debug(" hose->io_resource=%pR\n", &hose->io_resource); |
197 | hose->io_resource.start, hose->io_resource.end); | ||
198 | 197 | ||
199 | return 0; | 198 | return 0; |
200 | } | 199 | } |
@@ -243,10 +242,10 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus, | |||
243 | break; | 242 | break; |
244 | bus = NULL; | 243 | bus = NULL; |
245 | } | 244 | } |
246 | if (bus == NULL || bus->sysdata == NULL) | 245 | if (bus == NULL || bus->dev.of_node == NULL) |
247 | return -ENODEV; | 246 | return -ENODEV; |
248 | 247 | ||
249 | hose_node = (struct device_node *)bus->sysdata; | 248 | hose_node = bus->dev.of_node; |
250 | hose = PCI_DN(hose_node)->phb; | 249 | hose = PCI_DN(hose_node)->phb; |
251 | 250 | ||
252 | switch (which) { | 251 | switch (which) { |
diff --git a/arch/powerpc/kernel/pci_dn.c b/arch/powerpc/kernel/pci_dn.c index d56b35ee7f74..6baabc13306a 100644 --- a/arch/powerpc/kernel/pci_dn.c +++ b/arch/powerpc/kernel/pci_dn.c | |||
@@ -43,10 +43,9 @@ void * __devinit update_dn_pci_info(struct device_node *dn, void *data) | |||
43 | const u32 *regs; | 43 | const u32 *regs; |
44 | struct pci_dn *pdn; | 44 | struct pci_dn *pdn; |
45 | 45 | ||
46 | pdn = alloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); | 46 | pdn = zalloc_maybe_bootmem(sizeof(*pdn), GFP_KERNEL); |
47 | if (pdn == NULL) | 47 | if (pdn == NULL) |
48 | return NULL; | 48 | return NULL; |
49 | memset(pdn, 0, sizeof(*pdn)); | ||
50 | dn->data = pdn; | 49 | dn->data = pdn; |
51 | pdn->node = dn; | 50 | pdn->node = dn; |
52 | pdn->phb = phb; | 51 | pdn->phb = phb; |
@@ -161,7 +160,7 @@ static void *is_devfn_node(struct device_node *dn, void *data) | |||
161 | /* | 160 | /* |
162 | * This is the "slow" path for looking up a device_node from a | 161 | * This is the "slow" path for looking up a device_node from a |
163 | * pci_dev. It will hunt for the device under its parent's | 162 | * pci_dev. It will hunt for the device under its parent's |
164 | * phb and then update sysdata for a future fastpath. | 163 | * phb and then update of_node pointer. |
165 | * | 164 | * |
166 | * It may also do fixups on the actual device since this happens | 165 | * It may also do fixups on the actual device since this happens |
167 | * on the first read/write. | 166 | * on the first read/write. |
@@ -170,16 +169,22 @@ static void *is_devfn_node(struct device_node *dn, void *data) | |||
170 | * In this case it may probe for real hardware ("just in case") | 169 | * In this case it may probe for real hardware ("just in case") |
171 | * and add a device_node to the device tree if necessary. | 170 | * and add a device_node to the device tree if necessary. |
172 | * | 171 | * |
172 | * Is this function necessary anymore now that dev->dev.of_node is | ||
173 | * used to store the node pointer? | ||
174 | * | ||
173 | */ | 175 | */ |
174 | struct device_node *fetch_dev_dn(struct pci_dev *dev) | 176 | struct device_node *fetch_dev_dn(struct pci_dev *dev) |
175 | { | 177 | { |
176 | struct device_node *orig_dn = dev->sysdata; | 178 | struct pci_controller *phb = dev->sysdata; |
177 | struct device_node *dn; | 179 | struct device_node *dn; |
178 | unsigned long searchval = (dev->bus->number << 8) | dev->devfn; | 180 | unsigned long searchval = (dev->bus->number << 8) | dev->devfn; |
179 | 181 | ||
180 | dn = traverse_pci_devices(orig_dn, is_devfn_node, (void *)searchval); | 182 | if (WARN_ON(!phb)) |
183 | return NULL; | ||
184 | |||
185 | dn = traverse_pci_devices(phb->dn, is_devfn_node, (void *)searchval); | ||
181 | if (dn) | 186 | if (dn) |
182 | dev->sysdata = dn; | 187 | dev->dev.of_node = dn; |
183 | return dn; | 188 | return dn; |
184 | } | 189 | } |
185 | EXPORT_SYMBOL(fetch_dev_dn); | 190 | EXPORT_SYMBOL(fetch_dev_dn); |
diff --git a/arch/powerpc/kernel/pci_of_scan.c b/arch/powerpc/kernel/pci_of_scan.c index e751506323b4..1e89a72fd030 100644 --- a/arch/powerpc/kernel/pci_of_scan.c +++ b/arch/powerpc/kernel/pci_of_scan.c | |||
@@ -135,7 +135,7 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
135 | pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); | 135 | pr_debug(" create device, devfn: %x, type: %s\n", devfn, type); |
136 | 136 | ||
137 | dev->bus = bus; | 137 | dev->bus = bus; |
138 | dev->sysdata = node; | 138 | dev->dev.of_node = of_node_get(node); |
139 | dev->dev.parent = bus->bridge; | 139 | dev->dev.parent = bus->bridge; |
140 | dev->dev.bus = &pci_bus_type; | 140 | dev->dev.bus = &pci_bus_type; |
141 | dev->devfn = devfn; | 141 | dev->devfn = devfn; |
@@ -238,7 +238,7 @@ void __devinit of_scan_pci_bridge(struct device_node *node, | |||
238 | bus->primary = dev->bus->number; | 238 | bus->primary = dev->bus->number; |
239 | bus->subordinate = busrange[1]; | 239 | bus->subordinate = busrange[1]; |
240 | bus->bridge_ctl = 0; | 240 | bus->bridge_ctl = 0; |
241 | bus->sysdata = node; | 241 | bus->dev.of_node = of_node_get(node); |
242 | 242 | ||
243 | /* parse ranges property */ | 243 | /* parse ranges property */ |
244 | /* PCI #address-cells == 3 and #size-cells == 2 always */ | 244 | /* PCI #address-cells == 3 and #size-cells == 2 always */ |
diff --git a/arch/powerpc/kernel/perf_callchain.c b/arch/powerpc/kernel/perf_callchain.c index 95ad9dad298e..d05ae4204bbf 100644 --- a/arch/powerpc/kernel/perf_callchain.c +++ b/arch/powerpc/kernel/perf_callchain.c | |||
@@ -23,18 +23,6 @@ | |||
23 | #include "ppc32.h" | 23 | #include "ppc32.h" |
24 | #endif | 24 | #endif |
25 | 25 | ||
26 | /* | ||
27 | * Store another value in a callchain_entry. | ||
28 | */ | ||
29 | static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip) | ||
30 | { | ||
31 | unsigned int nr = entry->nr; | ||
32 | |||
33 | if (nr < PERF_MAX_STACK_DEPTH) { | ||
34 | entry->ip[nr] = ip; | ||
35 | entry->nr = nr + 1; | ||
36 | } | ||
37 | } | ||
38 | 26 | ||
39 | /* | 27 | /* |
40 | * Is sp valid as the address of the next kernel stack frame after prev_sp? | 28 | * Is sp valid as the address of the next kernel stack frame after prev_sp? |
@@ -58,8 +46,8 @@ static int valid_next_sp(unsigned long sp, unsigned long prev_sp) | |||
58 | return 0; | 46 | return 0; |
59 | } | 47 | } |
60 | 48 | ||
61 | static void perf_callchain_kernel(struct pt_regs *regs, | 49 | void |
62 | struct perf_callchain_entry *entry) | 50 | perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) |
63 | { | 51 | { |
64 | unsigned long sp, next_sp; | 52 | unsigned long sp, next_sp; |
65 | unsigned long next_ip; | 53 | unsigned long next_ip; |
@@ -69,8 +57,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
69 | 57 | ||
70 | lr = regs->link; | 58 | lr = regs->link; |
71 | sp = regs->gpr[1]; | 59 | sp = regs->gpr[1]; |
72 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 60 | perf_callchain_store(entry, regs->nip); |
73 | callchain_store(entry, regs->nip); | ||
74 | 61 | ||
75 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) | 62 | if (!validate_sp(sp, current, STACK_FRAME_OVERHEAD)) |
76 | return; | 63 | return; |
@@ -89,7 +76,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
89 | next_ip = regs->nip; | 76 | next_ip = regs->nip; |
90 | lr = regs->link; | 77 | lr = regs->link; |
91 | level = 0; | 78 | level = 0; |
92 | callchain_store(entry, PERF_CONTEXT_KERNEL); | 79 | perf_callchain_store(entry, PERF_CONTEXT_KERNEL); |
93 | 80 | ||
94 | } else { | 81 | } else { |
95 | if (level == 0) | 82 | if (level == 0) |
@@ -111,7 +98,7 @@ static void perf_callchain_kernel(struct pt_regs *regs, | |||
111 | ++level; | 98 | ++level; |
112 | } | 99 | } |
113 | 100 | ||
114 | callchain_store(entry, next_ip); | 101 | perf_callchain_store(entry, next_ip); |
115 | if (!valid_next_sp(next_sp, sp)) | 102 | if (!valid_next_sp(next_sp, sp)) |
116 | return; | 103 | return; |
117 | sp = next_sp; | 104 | sp = next_sp; |
@@ -233,8 +220,8 @@ static int sane_signal_64_frame(unsigned long sp) | |||
233 | puc == (unsigned long) &sf->uc; | 220 | puc == (unsigned long) &sf->uc; |
234 | } | 221 | } |
235 | 222 | ||
236 | static void perf_callchain_user_64(struct pt_regs *regs, | 223 | static void perf_callchain_user_64(struct perf_callchain_entry *entry, |
237 | struct perf_callchain_entry *entry) | 224 | struct pt_regs *regs) |
238 | { | 225 | { |
239 | unsigned long sp, next_sp; | 226 | unsigned long sp, next_sp; |
240 | unsigned long next_ip; | 227 | unsigned long next_ip; |
@@ -246,8 +233,7 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
246 | next_ip = regs->nip; | 233 | next_ip = regs->nip; |
247 | lr = regs->link; | 234 | lr = regs->link; |
248 | sp = regs->gpr[1]; | 235 | sp = regs->gpr[1]; |
249 | callchain_store(entry, PERF_CONTEXT_USER); | 236 | perf_callchain_store(entry, next_ip); |
250 | callchain_store(entry, next_ip); | ||
251 | 237 | ||
252 | for (;;) { | 238 | for (;;) { |
253 | fp = (unsigned long __user *) sp; | 239 | fp = (unsigned long __user *) sp; |
@@ -276,14 +262,14 @@ static void perf_callchain_user_64(struct pt_regs *regs, | |||
276 | read_user_stack_64(&uregs[PT_R1], &sp)) | 262 | read_user_stack_64(&uregs[PT_R1], &sp)) |
277 | return; | 263 | return; |
278 | level = 0; | 264 | level = 0; |
279 | callchain_store(entry, PERF_CONTEXT_USER); | 265 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
280 | callchain_store(entry, next_ip); | 266 | perf_callchain_store(entry, next_ip); |
281 | continue; | 267 | continue; |
282 | } | 268 | } |
283 | 269 | ||
284 | if (level == 0) | 270 | if (level == 0) |
285 | next_ip = lr; | 271 | next_ip = lr; |
286 | callchain_store(entry, next_ip); | 272 | perf_callchain_store(entry, next_ip); |
287 | ++level; | 273 | ++level; |
288 | sp = next_sp; | 274 | sp = next_sp; |
289 | } | 275 | } |
@@ -315,8 +301,8 @@ static int read_user_stack_32(unsigned int __user *ptr, unsigned int *ret) | |||
315 | return __get_user_inatomic(*ret, ptr); | 301 | return __get_user_inatomic(*ret, ptr); |
316 | } | 302 | } |
317 | 303 | ||
318 | static inline void perf_callchain_user_64(struct pt_regs *regs, | 304 | static inline void perf_callchain_user_64(struct perf_callchain_entry *entry, |
319 | struct perf_callchain_entry *entry) | 305 | struct pt_regs *regs) |
320 | { | 306 | { |
321 | } | 307 | } |
322 | 308 | ||
@@ -435,8 +421,8 @@ static unsigned int __user *signal_frame_32_regs(unsigned int sp, | |||
435 | return mctx->mc_gregs; | 421 | return mctx->mc_gregs; |
436 | } | 422 | } |
437 | 423 | ||
438 | static void perf_callchain_user_32(struct pt_regs *regs, | 424 | static void perf_callchain_user_32(struct perf_callchain_entry *entry, |
439 | struct perf_callchain_entry *entry) | 425 | struct pt_regs *regs) |
440 | { | 426 | { |
441 | unsigned int sp, next_sp; | 427 | unsigned int sp, next_sp; |
442 | unsigned int next_ip; | 428 | unsigned int next_ip; |
@@ -447,8 +433,7 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
447 | next_ip = regs->nip; | 433 | next_ip = regs->nip; |
448 | lr = regs->link; | 434 | lr = regs->link; |
449 | sp = regs->gpr[1]; | 435 | sp = regs->gpr[1]; |
450 | callchain_store(entry, PERF_CONTEXT_USER); | 436 | perf_callchain_store(entry, next_ip); |
451 | callchain_store(entry, next_ip); | ||
452 | 437 | ||
453 | while (entry->nr < PERF_MAX_STACK_DEPTH) { | 438 | while (entry->nr < PERF_MAX_STACK_DEPTH) { |
454 | fp = (unsigned int __user *) (unsigned long) sp; | 439 | fp = (unsigned int __user *) (unsigned long) sp; |
@@ -470,45 +455,24 @@ static void perf_callchain_user_32(struct pt_regs *regs, | |||
470 | read_user_stack_32(&uregs[PT_R1], &sp)) | 455 | read_user_stack_32(&uregs[PT_R1], &sp)) |
471 | return; | 456 | return; |
472 | level = 0; | 457 | level = 0; |
473 | callchain_store(entry, PERF_CONTEXT_USER); | 458 | perf_callchain_store(entry, PERF_CONTEXT_USER); |
474 | callchain_store(entry, next_ip); | 459 | perf_callchain_store(entry, next_ip); |
475 | continue; | 460 | continue; |
476 | } | 461 | } |
477 | 462 | ||
478 | if (level == 0) | 463 | if (level == 0) |
479 | next_ip = lr; | 464 | next_ip = lr; |
480 | callchain_store(entry, next_ip); | 465 | perf_callchain_store(entry, next_ip); |
481 | ++level; | 466 | ++level; |
482 | sp = next_sp; | 467 | sp = next_sp; |
483 | } | 468 | } |
484 | } | 469 | } |
485 | 470 | ||
486 | /* | 471 | void |
487 | * Since we can't get PMU interrupts inside a PMU interrupt handler, | 472 | perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) |
488 | * we don't need separate irq and nmi entries here. | ||
489 | */ | ||
490 | static DEFINE_PER_CPU(struct perf_callchain_entry, cpu_perf_callchain); | ||
491 | |||
492 | struct perf_callchain_entry *perf_callchain(struct pt_regs *regs) | ||
493 | { | 473 | { |
494 | struct perf_callchain_entry *entry = &__get_cpu_var(cpu_perf_callchain); | 474 | if (current_is_64bit()) |
495 | 475 | perf_callchain_user_64(entry, regs); | |
496 | entry->nr = 0; | 476 | else |
497 | 477 | perf_callchain_user_32(entry, regs); | |
498 | if (!user_mode(regs)) { | ||
499 | perf_callchain_kernel(regs, entry); | ||
500 | if (current->mm) | ||
501 | regs = task_pt_regs(current); | ||
502 | else | ||
503 | regs = NULL; | ||
504 | } | ||
505 | |||
506 | if (regs) { | ||
507 | if (current_is_64bit()) | ||
508 | perf_callchain_user_64(regs, entry); | ||
509 | else | ||
510 | perf_callchain_user_32(regs, entry); | ||
511 | } | ||
512 | |||
513 | return entry; | ||
514 | } | 478 | } |
diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index d301a30445e0..822f63008ae1 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c | |||
@@ -398,10 +398,32 @@ static int check_excludes(struct perf_event **ctrs, unsigned int cflags[], | |||
398 | return 0; | 398 | return 0; |
399 | } | 399 | } |
400 | 400 | ||
401 | static u64 check_and_compute_delta(u64 prev, u64 val) | ||
402 | { | ||
403 | u64 delta = (val - prev) & 0xfffffffful; | ||
404 | |||
405 | /* | ||
406 | * POWER7 can roll back counter values, if the new value is smaller | ||
407 | * than the previous value it will cause the delta and the counter to | ||
408 | * have bogus values unless we rolled a counter over. If a coutner is | ||
409 | * rolled back, it will be smaller, but within 256, which is the maximum | ||
410 | * number of events to rollback at once. If we dectect a rollback | ||
411 | * return 0. This can lead to a small lack of precision in the | ||
412 | * counters. | ||
413 | */ | ||
414 | if (prev > val && (prev - val) < 256) | ||
415 | delta = 0; | ||
416 | |||
417 | return delta; | ||
418 | } | ||
419 | |||
401 | static void power_pmu_read(struct perf_event *event) | 420 | static void power_pmu_read(struct perf_event *event) |
402 | { | 421 | { |
403 | s64 val, delta, prev; | 422 | s64 val, delta, prev; |
404 | 423 | ||
424 | if (event->hw.state & PERF_HES_STOPPED) | ||
425 | return; | ||
426 | |||
405 | if (!event->hw.idx) | 427 | if (!event->hw.idx) |
406 | return; | 428 | return; |
407 | /* | 429 | /* |
@@ -413,10 +435,11 @@ static void power_pmu_read(struct perf_event *event) | |||
413 | prev = local64_read(&event->hw.prev_count); | 435 | prev = local64_read(&event->hw.prev_count); |
414 | barrier(); | 436 | barrier(); |
415 | val = read_pmc(event->hw.idx); | 437 | val = read_pmc(event->hw.idx); |
438 | delta = check_and_compute_delta(prev, val); | ||
439 | if (!delta) | ||
440 | return; | ||
416 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); | 441 | } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); |
417 | 442 | ||
418 | /* The counters are only 32 bits wide */ | ||
419 | delta = (val - prev) & 0xfffffffful; | ||
420 | local64_add(delta, &event->count); | 443 | local64_add(delta, &event->count); |
421 | local64_sub(delta, &event->hw.period_left); | 444 | local64_sub(delta, &event->hw.period_left); |
422 | } | 445 | } |
@@ -446,8 +469,9 @@ static void freeze_limited_counters(struct cpu_hw_events *cpuhw, | |||
446 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 469 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
447 | prev = local64_read(&event->hw.prev_count); | 470 | prev = local64_read(&event->hw.prev_count); |
448 | event->hw.idx = 0; | 471 | event->hw.idx = 0; |
449 | delta = (val - prev) & 0xfffffffful; | 472 | delta = check_and_compute_delta(prev, val); |
450 | local64_add(delta, &event->count); | 473 | if (delta) |
474 | local64_add(delta, &event->count); | ||
451 | } | 475 | } |
452 | } | 476 | } |
453 | 477 | ||
@@ -455,14 +479,16 @@ static void thaw_limited_counters(struct cpu_hw_events *cpuhw, | |||
455 | unsigned long pmc5, unsigned long pmc6) | 479 | unsigned long pmc5, unsigned long pmc6) |
456 | { | 480 | { |
457 | struct perf_event *event; | 481 | struct perf_event *event; |
458 | u64 val; | 482 | u64 val, prev; |
459 | int i; | 483 | int i; |
460 | 484 | ||
461 | for (i = 0; i < cpuhw->n_limited; ++i) { | 485 | for (i = 0; i < cpuhw->n_limited; ++i) { |
462 | event = cpuhw->limited_counter[i]; | 486 | event = cpuhw->limited_counter[i]; |
463 | event->hw.idx = cpuhw->limited_hwidx[i]; | 487 | event->hw.idx = cpuhw->limited_hwidx[i]; |
464 | val = (event->hw.idx == 5) ? pmc5 : pmc6; | 488 | val = (event->hw.idx == 5) ? pmc5 : pmc6; |
465 | local64_set(&event->hw.prev_count, val); | 489 | prev = local64_read(&event->hw.prev_count); |
490 | if (check_and_compute_delta(prev, val)) | ||
491 | local64_set(&event->hw.prev_count, val); | ||
466 | perf_event_update_userpage(event); | 492 | perf_event_update_userpage(event); |
467 | } | 493 | } |
468 | } | 494 | } |
@@ -517,7 +543,7 @@ static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0) | |||
517 | * Disable all events to prevent PMU interrupts and to allow | 543 | * Disable all events to prevent PMU interrupts and to allow |
518 | * events to be added or removed. | 544 | * events to be added or removed. |
519 | */ | 545 | */ |
520 | void hw_perf_disable(void) | 546 | static void power_pmu_disable(struct pmu *pmu) |
521 | { | 547 | { |
522 | struct cpu_hw_events *cpuhw; | 548 | struct cpu_hw_events *cpuhw; |
523 | unsigned long flags; | 549 | unsigned long flags; |
@@ -565,7 +591,7 @@ void hw_perf_disable(void) | |||
565 | * If we were previously disabled and events were added, then | 591 | * If we were previously disabled and events were added, then |
566 | * put the new config on the PMU. | 592 | * put the new config on the PMU. |
567 | */ | 593 | */ |
568 | void hw_perf_enable(void) | 594 | static void power_pmu_enable(struct pmu *pmu) |
569 | { | 595 | { |
570 | struct perf_event *event; | 596 | struct perf_event *event; |
571 | struct cpu_hw_events *cpuhw; | 597 | struct cpu_hw_events *cpuhw; |
@@ -672,6 +698,8 @@ void hw_perf_enable(void) | |||
672 | } | 698 | } |
673 | local64_set(&event->hw.prev_count, val); | 699 | local64_set(&event->hw.prev_count, val); |
674 | event->hw.idx = idx; | 700 | event->hw.idx = idx; |
701 | if (event->hw.state & PERF_HES_STOPPED) | ||
702 | val = 0; | ||
675 | write_pmc(idx, val); | 703 | write_pmc(idx, val); |
676 | perf_event_update_userpage(event); | 704 | perf_event_update_userpage(event); |
677 | } | 705 | } |
@@ -727,7 +755,7 @@ static int collect_events(struct perf_event *group, int max_count, | |||
727 | * re-enable the PMU in order to get hw_perf_enable to do the | 755 | * re-enable the PMU in order to get hw_perf_enable to do the |
728 | * actual work of reconfiguring the PMU. | 756 | * actual work of reconfiguring the PMU. |
729 | */ | 757 | */ |
730 | static int power_pmu_enable(struct perf_event *event) | 758 | static int power_pmu_add(struct perf_event *event, int ef_flags) |
731 | { | 759 | { |
732 | struct cpu_hw_events *cpuhw; | 760 | struct cpu_hw_events *cpuhw; |
733 | unsigned long flags; | 761 | unsigned long flags; |
@@ -735,7 +763,7 @@ static int power_pmu_enable(struct perf_event *event) | |||
735 | int ret = -EAGAIN; | 763 | int ret = -EAGAIN; |
736 | 764 | ||
737 | local_irq_save(flags); | 765 | local_irq_save(flags); |
738 | perf_disable(); | 766 | perf_pmu_disable(event->pmu); |
739 | 767 | ||
740 | /* | 768 | /* |
741 | * Add the event to the list (if there is room) | 769 | * Add the event to the list (if there is room) |
@@ -749,9 +777,12 @@ static int power_pmu_enable(struct perf_event *event) | |||
749 | cpuhw->events[n0] = event->hw.config; | 777 | cpuhw->events[n0] = event->hw.config; |
750 | cpuhw->flags[n0] = event->hw.event_base; | 778 | cpuhw->flags[n0] = event->hw.event_base; |
751 | 779 | ||
780 | if (!(ef_flags & PERF_EF_START)) | ||
781 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
782 | |||
752 | /* | 783 | /* |
753 | * If group events scheduling transaction was started, | 784 | * If group events scheduling transaction was started, |
754 | * skip the schedulability test here, it will be peformed | 785 | * skip the schedulability test here, it will be performed |
755 | * at commit time(->commit_txn) as a whole | 786 | * at commit time(->commit_txn) as a whole |
756 | */ | 787 | */ |
757 | if (cpuhw->group_flag & PERF_EVENT_TXN) | 788 | if (cpuhw->group_flag & PERF_EVENT_TXN) |
@@ -769,7 +800,7 @@ nocheck: | |||
769 | 800 | ||
770 | ret = 0; | 801 | ret = 0; |
771 | out: | 802 | out: |
772 | perf_enable(); | 803 | perf_pmu_enable(event->pmu); |
773 | local_irq_restore(flags); | 804 | local_irq_restore(flags); |
774 | return ret; | 805 | return ret; |
775 | } | 806 | } |
@@ -777,14 +808,14 @@ nocheck: | |||
777 | /* | 808 | /* |
778 | * Remove a event from the PMU. | 809 | * Remove a event from the PMU. |
779 | */ | 810 | */ |
780 | static void power_pmu_disable(struct perf_event *event) | 811 | static void power_pmu_del(struct perf_event *event, int ef_flags) |
781 | { | 812 | { |
782 | struct cpu_hw_events *cpuhw; | 813 | struct cpu_hw_events *cpuhw; |
783 | long i; | 814 | long i; |
784 | unsigned long flags; | 815 | unsigned long flags; |
785 | 816 | ||
786 | local_irq_save(flags); | 817 | local_irq_save(flags); |
787 | perf_disable(); | 818 | perf_pmu_disable(event->pmu); |
788 | 819 | ||
789 | power_pmu_read(event); | 820 | power_pmu_read(event); |
790 | 821 | ||
@@ -821,34 +852,60 @@ static void power_pmu_disable(struct perf_event *event) | |||
821 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); | 852 | cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE); |
822 | } | 853 | } |
823 | 854 | ||
824 | perf_enable(); | 855 | perf_pmu_enable(event->pmu); |
825 | local_irq_restore(flags); | 856 | local_irq_restore(flags); |
826 | } | 857 | } |
827 | 858 | ||
828 | /* | 859 | /* |
829 | * Re-enable interrupts on a event after they were throttled | 860 | * POWER-PMU does not support disabling individual counters, hence |
830 | * because they were coming too fast. | 861 | * program their cycle counter to their max value and ignore the interrupts. |
831 | */ | 862 | */ |
832 | static void power_pmu_unthrottle(struct perf_event *event) | 863 | |
864 | static void power_pmu_start(struct perf_event *event, int ef_flags) | ||
865 | { | ||
866 | unsigned long flags; | ||
867 | s64 left; | ||
868 | |||
869 | if (!event->hw.idx || !event->hw.sample_period) | ||
870 | return; | ||
871 | |||
872 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
873 | return; | ||
874 | |||
875 | if (ef_flags & PERF_EF_RELOAD) | ||
876 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
877 | |||
878 | local_irq_save(flags); | ||
879 | perf_pmu_disable(event->pmu); | ||
880 | |||
881 | event->hw.state = 0; | ||
882 | left = local64_read(&event->hw.period_left); | ||
883 | write_pmc(event->hw.idx, left); | ||
884 | |||
885 | perf_event_update_userpage(event); | ||
886 | perf_pmu_enable(event->pmu); | ||
887 | local_irq_restore(flags); | ||
888 | } | ||
889 | |||
890 | static void power_pmu_stop(struct perf_event *event, int ef_flags) | ||
833 | { | 891 | { |
834 | s64 val, left; | ||
835 | unsigned long flags; | 892 | unsigned long flags; |
836 | 893 | ||
837 | if (!event->hw.idx || !event->hw.sample_period) | 894 | if (!event->hw.idx || !event->hw.sample_period) |
838 | return; | 895 | return; |
896 | |||
897 | if (event->hw.state & PERF_HES_STOPPED) | ||
898 | return; | ||
899 | |||
839 | local_irq_save(flags); | 900 | local_irq_save(flags); |
840 | perf_disable(); | 901 | perf_pmu_disable(event->pmu); |
902 | |||
841 | power_pmu_read(event); | 903 | power_pmu_read(event); |
842 | left = event->hw.sample_period; | 904 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
843 | event->hw.last_period = left; | 905 | write_pmc(event->hw.idx, 0); |
844 | val = 0; | 906 | |
845 | if (left < 0x80000000L) | ||
846 | val = 0x80000000L - left; | ||
847 | write_pmc(event->hw.idx, val); | ||
848 | local64_set(&event->hw.prev_count, val); | ||
849 | local64_set(&event->hw.period_left, left); | ||
850 | perf_event_update_userpage(event); | 907 | perf_event_update_userpage(event); |
851 | perf_enable(); | 908 | perf_pmu_enable(event->pmu); |
852 | local_irq_restore(flags); | 909 | local_irq_restore(flags); |
853 | } | 910 | } |
854 | 911 | ||
@@ -857,10 +914,11 @@ static void power_pmu_unthrottle(struct perf_event *event) | |||
857 | * Set the flag to make pmu::enable() not perform the | 914 | * Set the flag to make pmu::enable() not perform the |
858 | * schedulability test, it will be performed at commit time | 915 | * schedulability test, it will be performed at commit time |
859 | */ | 916 | */ |
860 | void power_pmu_start_txn(const struct pmu *pmu) | 917 | void power_pmu_start_txn(struct pmu *pmu) |
861 | { | 918 | { |
862 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 919 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
863 | 920 | ||
921 | perf_pmu_disable(pmu); | ||
864 | cpuhw->group_flag |= PERF_EVENT_TXN; | 922 | cpuhw->group_flag |= PERF_EVENT_TXN; |
865 | cpuhw->n_txn_start = cpuhw->n_events; | 923 | cpuhw->n_txn_start = cpuhw->n_events; |
866 | } | 924 | } |
@@ -870,11 +928,12 @@ void power_pmu_start_txn(const struct pmu *pmu) | |||
870 | * Clear the flag and pmu::enable() will perform the | 928 | * Clear the flag and pmu::enable() will perform the |
871 | * schedulability test. | 929 | * schedulability test. |
872 | */ | 930 | */ |
873 | void power_pmu_cancel_txn(const struct pmu *pmu) | 931 | void power_pmu_cancel_txn(struct pmu *pmu) |
874 | { | 932 | { |
875 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); | 933 | struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events); |
876 | 934 | ||
877 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 935 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
936 | perf_pmu_enable(pmu); | ||
878 | } | 937 | } |
879 | 938 | ||
880 | /* | 939 | /* |
@@ -882,7 +941,7 @@ void power_pmu_cancel_txn(const struct pmu *pmu) | |||
882 | * Perform the group schedulability test as a whole | 941 | * Perform the group schedulability test as a whole |
883 | * Return 0 if success | 942 | * Return 0 if success |
884 | */ | 943 | */ |
885 | int power_pmu_commit_txn(const struct pmu *pmu) | 944 | int power_pmu_commit_txn(struct pmu *pmu) |
886 | { | 945 | { |
887 | struct cpu_hw_events *cpuhw; | 946 | struct cpu_hw_events *cpuhw; |
888 | long i, n; | 947 | long i, n; |
@@ -901,19 +960,10 @@ int power_pmu_commit_txn(const struct pmu *pmu) | |||
901 | cpuhw->event[i]->hw.config = cpuhw->events[i]; | 960 | cpuhw->event[i]->hw.config = cpuhw->events[i]; |
902 | 961 | ||
903 | cpuhw->group_flag &= ~PERF_EVENT_TXN; | 962 | cpuhw->group_flag &= ~PERF_EVENT_TXN; |
963 | perf_pmu_enable(pmu); | ||
904 | return 0; | 964 | return 0; |
905 | } | 965 | } |
906 | 966 | ||
907 | struct pmu power_pmu = { | ||
908 | .enable = power_pmu_enable, | ||
909 | .disable = power_pmu_disable, | ||
910 | .read = power_pmu_read, | ||
911 | .unthrottle = power_pmu_unthrottle, | ||
912 | .start_txn = power_pmu_start_txn, | ||
913 | .cancel_txn = power_pmu_cancel_txn, | ||
914 | .commit_txn = power_pmu_commit_txn, | ||
915 | }; | ||
916 | |||
917 | /* | 967 | /* |
918 | * Return 1 if we might be able to put event on a limited PMC, | 968 | * Return 1 if we might be able to put event on a limited PMC, |
919 | * or 0 if not. | 969 | * or 0 if not. |
@@ -1014,7 +1064,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
1014 | return 0; | 1064 | return 0; |
1015 | } | 1065 | } |
1016 | 1066 | ||
1017 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 1067 | static int power_pmu_event_init(struct perf_event *event) |
1018 | { | 1068 | { |
1019 | u64 ev; | 1069 | u64 ev; |
1020 | unsigned long flags; | 1070 | unsigned long flags; |
@@ -1026,25 +1076,27 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1026 | struct cpu_hw_events *cpuhw; | 1076 | struct cpu_hw_events *cpuhw; |
1027 | 1077 | ||
1028 | if (!ppmu) | 1078 | if (!ppmu) |
1029 | return ERR_PTR(-ENXIO); | 1079 | return -ENOENT; |
1080 | |||
1030 | switch (event->attr.type) { | 1081 | switch (event->attr.type) { |
1031 | case PERF_TYPE_HARDWARE: | 1082 | case PERF_TYPE_HARDWARE: |
1032 | ev = event->attr.config; | 1083 | ev = event->attr.config; |
1033 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 1084 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
1034 | return ERR_PTR(-EOPNOTSUPP); | 1085 | return -EOPNOTSUPP; |
1035 | ev = ppmu->generic_events[ev]; | 1086 | ev = ppmu->generic_events[ev]; |
1036 | break; | 1087 | break; |
1037 | case PERF_TYPE_HW_CACHE: | 1088 | case PERF_TYPE_HW_CACHE: |
1038 | err = hw_perf_cache_event(event->attr.config, &ev); | 1089 | err = hw_perf_cache_event(event->attr.config, &ev); |
1039 | if (err) | 1090 | if (err) |
1040 | return ERR_PTR(err); | 1091 | return err; |
1041 | break; | 1092 | break; |
1042 | case PERF_TYPE_RAW: | 1093 | case PERF_TYPE_RAW: |
1043 | ev = event->attr.config; | 1094 | ev = event->attr.config; |
1044 | break; | 1095 | break; |
1045 | default: | 1096 | default: |
1046 | return ERR_PTR(-EINVAL); | 1097 | return -ENOENT; |
1047 | } | 1098 | } |
1099 | |||
1048 | event->hw.config_base = ev; | 1100 | event->hw.config_base = ev; |
1049 | event->hw.idx = 0; | 1101 | event->hw.idx = 0; |
1050 | 1102 | ||
@@ -1063,7 +1115,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1063 | * XXX we should check if the task is an idle task. | 1115 | * XXX we should check if the task is an idle task. |
1064 | */ | 1116 | */ |
1065 | flags = 0; | 1117 | flags = 0; |
1066 | if (event->ctx->task) | 1118 | if (event->attach_state & PERF_ATTACH_TASK) |
1067 | flags |= PPMU_ONLY_COUNT_RUN; | 1119 | flags |= PPMU_ONLY_COUNT_RUN; |
1068 | 1120 | ||
1069 | /* | 1121 | /* |
@@ -1081,7 +1133,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1081 | */ | 1133 | */ |
1082 | ev = normal_pmc_alternative(ev, flags); | 1134 | ev = normal_pmc_alternative(ev, flags); |
1083 | if (!ev) | 1135 | if (!ev) |
1084 | return ERR_PTR(-EINVAL); | 1136 | return -EINVAL; |
1085 | } | 1137 | } |
1086 | } | 1138 | } |
1087 | 1139 | ||
@@ -1095,19 +1147,19 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1095 | n = collect_events(event->group_leader, ppmu->n_counter - 1, | 1147 | n = collect_events(event->group_leader, ppmu->n_counter - 1, |
1096 | ctrs, events, cflags); | 1148 | ctrs, events, cflags); |
1097 | if (n < 0) | 1149 | if (n < 0) |
1098 | return ERR_PTR(-EINVAL); | 1150 | return -EINVAL; |
1099 | } | 1151 | } |
1100 | events[n] = ev; | 1152 | events[n] = ev; |
1101 | ctrs[n] = event; | 1153 | ctrs[n] = event; |
1102 | cflags[n] = flags; | 1154 | cflags[n] = flags; |
1103 | if (check_excludes(ctrs, cflags, n, 1)) | 1155 | if (check_excludes(ctrs, cflags, n, 1)) |
1104 | return ERR_PTR(-EINVAL); | 1156 | return -EINVAL; |
1105 | 1157 | ||
1106 | cpuhw = &get_cpu_var(cpu_hw_events); | 1158 | cpuhw = &get_cpu_var(cpu_hw_events); |
1107 | err = power_check_constraints(cpuhw, events, cflags, n + 1); | 1159 | err = power_check_constraints(cpuhw, events, cflags, n + 1); |
1108 | put_cpu_var(cpu_hw_events); | 1160 | put_cpu_var(cpu_hw_events); |
1109 | if (err) | 1161 | if (err) |
1110 | return ERR_PTR(-EINVAL); | 1162 | return -EINVAL; |
1111 | 1163 | ||
1112 | event->hw.config = events[n]; | 1164 | event->hw.config = events[n]; |
1113 | event->hw.event_base = cflags[n]; | 1165 | event->hw.event_base = cflags[n]; |
@@ -1132,11 +1184,23 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
1132 | } | 1184 | } |
1133 | event->destroy = hw_perf_event_destroy; | 1185 | event->destroy = hw_perf_event_destroy; |
1134 | 1186 | ||
1135 | if (err) | 1187 | return err; |
1136 | return ERR_PTR(err); | ||
1137 | return &power_pmu; | ||
1138 | } | 1188 | } |
1139 | 1189 | ||
1190 | struct pmu power_pmu = { | ||
1191 | .pmu_enable = power_pmu_enable, | ||
1192 | .pmu_disable = power_pmu_disable, | ||
1193 | .event_init = power_pmu_event_init, | ||
1194 | .add = power_pmu_add, | ||
1195 | .del = power_pmu_del, | ||
1196 | .start = power_pmu_start, | ||
1197 | .stop = power_pmu_stop, | ||
1198 | .read = power_pmu_read, | ||
1199 | .start_txn = power_pmu_start_txn, | ||
1200 | .cancel_txn = power_pmu_cancel_txn, | ||
1201 | .commit_txn = power_pmu_commit_txn, | ||
1202 | }; | ||
1203 | |||
1140 | /* | 1204 | /* |
1141 | * A counter has overflowed; update its count and record | 1205 | * A counter has overflowed; update its count and record |
1142 | * things if requested. Note that interrupts are hard-disabled | 1206 | * things if requested. Note that interrupts are hard-disabled |
@@ -1149,9 +1213,14 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1149 | s64 prev, delta, left; | 1213 | s64 prev, delta, left; |
1150 | int record = 0; | 1214 | int record = 0; |
1151 | 1215 | ||
1216 | if (event->hw.state & PERF_HES_STOPPED) { | ||
1217 | write_pmc(event->hw.idx, 0); | ||
1218 | return; | ||
1219 | } | ||
1220 | |||
1152 | /* we don't have to worry about interrupts here */ | 1221 | /* we don't have to worry about interrupts here */ |
1153 | prev = local64_read(&event->hw.prev_count); | 1222 | prev = local64_read(&event->hw.prev_count); |
1154 | delta = (val - prev) & 0xfffffffful; | 1223 | delta = check_and_compute_delta(prev, val); |
1155 | local64_add(delta, &event->count); | 1224 | local64_add(delta, &event->count); |
1156 | 1225 | ||
1157 | /* | 1226 | /* |
@@ -1166,11 +1235,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1166 | if (left <= 0) | 1235 | if (left <= 0) |
1167 | left = period; | 1236 | left = period; |
1168 | record = 1; | 1237 | record = 1; |
1238 | event->hw.last_period = event->hw.sample_period; | ||
1169 | } | 1239 | } |
1170 | if (left < 0x80000000LL) | 1240 | if (left < 0x80000000LL) |
1171 | val = 0x80000000LL - left; | 1241 | val = 0x80000000LL - left; |
1172 | } | 1242 | } |
1173 | 1243 | ||
1244 | write_pmc(event->hw.idx, val); | ||
1245 | local64_set(&event->hw.prev_count, val); | ||
1246 | local64_set(&event->hw.period_left, left); | ||
1247 | perf_event_update_userpage(event); | ||
1248 | |||
1174 | /* | 1249 | /* |
1175 | * Finally record data if requested. | 1250 | * Finally record data if requested. |
1176 | */ | 1251 | */ |
@@ -1183,23 +1258,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
1183 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) | 1258 | if (event->attr.sample_type & PERF_SAMPLE_ADDR) |
1184 | perf_get_data_addr(regs, &data.addr); | 1259 | perf_get_data_addr(regs, &data.addr); |
1185 | 1260 | ||
1186 | if (perf_event_overflow(event, nmi, &data, regs)) { | 1261 | if (perf_event_overflow(event, nmi, &data, regs)) |
1187 | /* | 1262 | power_pmu_stop(event, 0); |
1188 | * Interrupts are coming too fast - throttle them | ||
1189 | * by setting the event to 0, so it will be | ||
1190 | * at least 2^30 cycles until the next interrupt | ||
1191 | * (assuming each event counts at most 2 counts | ||
1192 | * per cycle). | ||
1193 | */ | ||
1194 | val = 0; | ||
1195 | left = ~0ULL >> 1; | ||
1196 | } | ||
1197 | } | 1263 | } |
1198 | |||
1199 | write_pmc(event->hw.idx, val); | ||
1200 | local64_set(&event->hw.prev_count, val); | ||
1201 | local64_set(&event->hw.period_left, left); | ||
1202 | perf_event_update_userpage(event); | ||
1203 | } | 1264 | } |
1204 | 1265 | ||
1205 | /* | 1266 | /* |
@@ -1231,6 +1292,28 @@ unsigned long perf_instruction_pointer(struct pt_regs *regs) | |||
1231 | return ip; | 1292 | return ip; |
1232 | } | 1293 | } |
1233 | 1294 | ||
1295 | static bool pmc_overflow(unsigned long val) | ||
1296 | { | ||
1297 | if ((int)val < 0) | ||
1298 | return true; | ||
1299 | |||
1300 | /* | ||
1301 | * Events on POWER7 can roll back if a speculative event doesn't | ||
1302 | * eventually complete. Unfortunately in some rare cases they will | ||
1303 | * raise a performance monitor exception. We need to catch this to | ||
1304 | * ensure we reset the PMC. In all cases the PMC will be 256 or less | ||
1305 | * cycles from overflow. | ||
1306 | * | ||
1307 | * We only do this if the first pass fails to find any overflowing | ||
1308 | * PMCs because a user might set a period of less than 256 and we | ||
1309 | * don't want to mistakenly reset them. | ||
1310 | */ | ||
1311 | if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) | ||
1312 | return true; | ||
1313 | |||
1314 | return false; | ||
1315 | } | ||
1316 | |||
1234 | /* | 1317 | /* |
1235 | * Performance monitor interrupt stuff | 1318 | * Performance monitor interrupt stuff |
1236 | */ | 1319 | */ |
@@ -1278,7 +1361,7 @@ static void perf_event_interrupt(struct pt_regs *regs) | |||
1278 | if (is_limited_pmc(i + 1)) | 1361 | if (is_limited_pmc(i + 1)) |
1279 | continue; | 1362 | continue; |
1280 | val = read_pmc(i + 1); | 1363 | val = read_pmc(i + 1); |
1281 | if ((int)val < 0) | 1364 | if (pmc_overflow(val)) |
1282 | write_pmc(i + 1, 0); | 1365 | write_pmc(i + 1, 0); |
1283 | } | 1366 | } |
1284 | } | 1367 | } |
@@ -1342,6 +1425,7 @@ int register_power_pmu(struct power_pmu *pmu) | |||
1342 | freeze_events_kernel = MMCR0_FCHV; | 1425 | freeze_events_kernel = MMCR0_FCHV; |
1343 | #endif /* CONFIG_PPC64 */ | 1426 | #endif /* CONFIG_PPC64 */ |
1344 | 1427 | ||
1428 | perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW); | ||
1345 | perf_cpu_notifier(power_pmu_notifier); | 1429 | perf_cpu_notifier(power_pmu_notifier); |
1346 | 1430 | ||
1347 | return 0; | 1431 | return 0; |
diff --git a/arch/powerpc/kernel/perf_event_fsl_emb.c b/arch/powerpc/kernel/perf_event_fsl_emb.c index 1ba45471ae43..b0dc8f7069cd 100644 --- a/arch/powerpc/kernel/perf_event_fsl_emb.c +++ b/arch/powerpc/kernel/perf_event_fsl_emb.c | |||
@@ -156,6 +156,9 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
156 | { | 156 | { |
157 | s64 val, delta, prev; | 157 | s64 val, delta, prev; |
158 | 158 | ||
159 | if (event->hw.state & PERF_HES_STOPPED) | ||
160 | return; | ||
161 | |||
159 | /* | 162 | /* |
160 | * Performance monitor interrupts come even when interrupts | 163 | * Performance monitor interrupts come even when interrupts |
161 | * are soft-disabled, as long as interrupts are hard-enabled. | 164 | * are soft-disabled, as long as interrupts are hard-enabled. |
@@ -177,7 +180,7 @@ static void fsl_emb_pmu_read(struct perf_event *event) | |||
177 | * Disable all events to prevent PMU interrupts and to allow | 180 | * Disable all events to prevent PMU interrupts and to allow |
178 | * events to be added or removed. | 181 | * events to be added or removed. |
179 | */ | 182 | */ |
180 | void hw_perf_disable(void) | 183 | static void fsl_emb_pmu_disable(struct pmu *pmu) |
181 | { | 184 | { |
182 | struct cpu_hw_events *cpuhw; | 185 | struct cpu_hw_events *cpuhw; |
183 | unsigned long flags; | 186 | unsigned long flags; |
@@ -216,7 +219,7 @@ void hw_perf_disable(void) | |||
216 | * If we were previously disabled and events were added, then | 219 | * If we were previously disabled and events were added, then |
217 | * put the new config on the PMU. | 220 | * put the new config on the PMU. |
218 | */ | 221 | */ |
219 | void hw_perf_enable(void) | 222 | static void fsl_emb_pmu_enable(struct pmu *pmu) |
220 | { | 223 | { |
221 | struct cpu_hw_events *cpuhw; | 224 | struct cpu_hw_events *cpuhw; |
222 | unsigned long flags; | 225 | unsigned long flags; |
@@ -262,8 +265,8 @@ static int collect_events(struct perf_event *group, int max_count, | |||
262 | return n; | 265 | return n; |
263 | } | 266 | } |
264 | 267 | ||
265 | /* perf must be disabled, context locked on entry */ | 268 | /* context locked on entry */ |
266 | static int fsl_emb_pmu_enable(struct perf_event *event) | 269 | static int fsl_emb_pmu_add(struct perf_event *event, int flags) |
267 | { | 270 | { |
268 | struct cpu_hw_events *cpuhw; | 271 | struct cpu_hw_events *cpuhw; |
269 | int ret = -EAGAIN; | 272 | int ret = -EAGAIN; |
@@ -271,6 +274,7 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
271 | u64 val; | 274 | u64 val; |
272 | int i; | 275 | int i; |
273 | 276 | ||
277 | perf_pmu_disable(event->pmu); | ||
274 | cpuhw = &get_cpu_var(cpu_hw_events); | 278 | cpuhw = &get_cpu_var(cpu_hw_events); |
275 | 279 | ||
276 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) | 280 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) |
@@ -301,6 +305,12 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
301 | val = 0x80000000L - left; | 305 | val = 0x80000000L - left; |
302 | } | 306 | } |
303 | local64_set(&event->hw.prev_count, val); | 307 | local64_set(&event->hw.prev_count, val); |
308 | |||
309 | if (!(flags & PERF_EF_START)) { | ||
310 | event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
311 | val = 0; | ||
312 | } | ||
313 | |||
304 | write_pmc(i, val); | 314 | write_pmc(i, val); |
305 | perf_event_update_userpage(event); | 315 | perf_event_update_userpage(event); |
306 | 316 | ||
@@ -310,15 +320,17 @@ static int fsl_emb_pmu_enable(struct perf_event *event) | |||
310 | ret = 0; | 320 | ret = 0; |
311 | out: | 321 | out: |
312 | put_cpu_var(cpu_hw_events); | 322 | put_cpu_var(cpu_hw_events); |
323 | perf_pmu_enable(event->pmu); | ||
313 | return ret; | 324 | return ret; |
314 | } | 325 | } |
315 | 326 | ||
316 | /* perf must be disabled, context locked on entry */ | 327 | /* context locked on entry */ |
317 | static void fsl_emb_pmu_disable(struct perf_event *event) | 328 | static void fsl_emb_pmu_del(struct perf_event *event, int flags) |
318 | { | 329 | { |
319 | struct cpu_hw_events *cpuhw; | 330 | struct cpu_hw_events *cpuhw; |
320 | int i = event->hw.idx; | 331 | int i = event->hw.idx; |
321 | 332 | ||
333 | perf_pmu_disable(event->pmu); | ||
322 | if (i < 0) | 334 | if (i < 0) |
323 | goto out; | 335 | goto out; |
324 | 336 | ||
@@ -346,44 +358,57 @@ static void fsl_emb_pmu_disable(struct perf_event *event) | |||
346 | cpuhw->n_events--; | 358 | cpuhw->n_events--; |
347 | 359 | ||
348 | out: | 360 | out: |
361 | perf_pmu_enable(event->pmu); | ||
349 | put_cpu_var(cpu_hw_events); | 362 | put_cpu_var(cpu_hw_events); |
350 | } | 363 | } |
351 | 364 | ||
352 | /* | 365 | static void fsl_emb_pmu_start(struct perf_event *event, int ef_flags) |
353 | * Re-enable interrupts on a event after they were throttled | ||
354 | * because they were coming too fast. | ||
355 | * | ||
356 | * Context is locked on entry, but perf is not disabled. | ||
357 | */ | ||
358 | static void fsl_emb_pmu_unthrottle(struct perf_event *event) | ||
359 | { | 366 | { |
360 | s64 val, left; | ||
361 | unsigned long flags; | 367 | unsigned long flags; |
368 | s64 left; | ||
362 | 369 | ||
363 | if (event->hw.idx < 0 || !event->hw.sample_period) | 370 | if (event->hw.idx < 0 || !event->hw.sample_period) |
364 | return; | 371 | return; |
372 | |||
373 | if (!(event->hw.state & PERF_HES_STOPPED)) | ||
374 | return; | ||
375 | |||
376 | if (ef_flags & PERF_EF_RELOAD) | ||
377 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
378 | |||
365 | local_irq_save(flags); | 379 | local_irq_save(flags); |
366 | perf_disable(); | 380 | perf_pmu_disable(event->pmu); |
367 | fsl_emb_pmu_read(event); | 381 | |
368 | left = event->hw.sample_period; | 382 | event->hw.state = 0; |
369 | event->hw.last_period = left; | 383 | left = local64_read(&event->hw.period_left); |
370 | val = 0; | 384 | write_pmc(event->hw.idx, left); |
371 | if (left < 0x80000000L) | 385 | |
372 | val = 0x80000000L - left; | ||
373 | write_pmc(event->hw.idx, val); | ||
374 | local64_set(&event->hw.prev_count, val); | ||
375 | local64_set(&event->hw.period_left, left); | ||
376 | perf_event_update_userpage(event); | 386 | perf_event_update_userpage(event); |
377 | perf_enable(); | 387 | perf_pmu_enable(event->pmu); |
378 | local_irq_restore(flags); | 388 | local_irq_restore(flags); |
379 | } | 389 | } |
380 | 390 | ||
381 | static struct pmu fsl_emb_pmu = { | 391 | static void fsl_emb_pmu_stop(struct perf_event *event, int ef_flags) |
382 | .enable = fsl_emb_pmu_enable, | 392 | { |
383 | .disable = fsl_emb_pmu_disable, | 393 | unsigned long flags; |
384 | .read = fsl_emb_pmu_read, | 394 | |
385 | .unthrottle = fsl_emb_pmu_unthrottle, | 395 | if (event->hw.idx < 0 || !event->hw.sample_period) |
386 | }; | 396 | return; |
397 | |||
398 | if (event->hw.state & PERF_HES_STOPPED) | ||
399 | return; | ||
400 | |||
401 | local_irq_save(flags); | ||
402 | perf_pmu_disable(event->pmu); | ||
403 | |||
404 | fsl_emb_pmu_read(event); | ||
405 | event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
406 | write_pmc(event->hw.idx, 0); | ||
407 | |||
408 | perf_event_update_userpage(event); | ||
409 | perf_pmu_enable(event->pmu); | ||
410 | local_irq_restore(flags); | ||
411 | } | ||
387 | 412 | ||
388 | /* | 413 | /* |
389 | * Release the PMU if this is the last perf_event. | 414 | * Release the PMU if this is the last perf_event. |
@@ -428,7 +453,7 @@ static int hw_perf_cache_event(u64 config, u64 *eventp) | |||
428 | return 0; | 453 | return 0; |
429 | } | 454 | } |
430 | 455 | ||
431 | const struct pmu *hw_perf_event_init(struct perf_event *event) | 456 | static int fsl_emb_pmu_event_init(struct perf_event *event) |
432 | { | 457 | { |
433 | u64 ev; | 458 | u64 ev; |
434 | struct perf_event *events[MAX_HWEVENTS]; | 459 | struct perf_event *events[MAX_HWEVENTS]; |
@@ -441,14 +466,14 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
441 | case PERF_TYPE_HARDWARE: | 466 | case PERF_TYPE_HARDWARE: |
442 | ev = event->attr.config; | 467 | ev = event->attr.config; |
443 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) | 468 | if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0) |
444 | return ERR_PTR(-EOPNOTSUPP); | 469 | return -EOPNOTSUPP; |
445 | ev = ppmu->generic_events[ev]; | 470 | ev = ppmu->generic_events[ev]; |
446 | break; | 471 | break; |
447 | 472 | ||
448 | case PERF_TYPE_HW_CACHE: | 473 | case PERF_TYPE_HW_CACHE: |
449 | err = hw_perf_cache_event(event->attr.config, &ev); | 474 | err = hw_perf_cache_event(event->attr.config, &ev); |
450 | if (err) | 475 | if (err) |
451 | return ERR_PTR(err); | 476 | return err; |
452 | break; | 477 | break; |
453 | 478 | ||
454 | case PERF_TYPE_RAW: | 479 | case PERF_TYPE_RAW: |
@@ -456,12 +481,12 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
456 | break; | 481 | break; |
457 | 482 | ||
458 | default: | 483 | default: |
459 | return ERR_PTR(-EINVAL); | 484 | return -ENOENT; |
460 | } | 485 | } |
461 | 486 | ||
462 | event->hw.config = ppmu->xlate_event(ev); | 487 | event->hw.config = ppmu->xlate_event(ev); |
463 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) | 488 | if (!(event->hw.config & FSL_EMB_EVENT_VALID)) |
464 | return ERR_PTR(-EINVAL); | 489 | return -EINVAL; |
465 | 490 | ||
466 | /* | 491 | /* |
467 | * If this is in a group, check if it can go on with all the | 492 | * If this is in a group, check if it can go on with all the |
@@ -473,7 +498,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
473 | n = collect_events(event->group_leader, | 498 | n = collect_events(event->group_leader, |
474 | ppmu->n_counter - 1, events); | 499 | ppmu->n_counter - 1, events); |
475 | if (n < 0) | 500 | if (n < 0) |
476 | return ERR_PTR(-EINVAL); | 501 | return -EINVAL; |
477 | } | 502 | } |
478 | 503 | ||
479 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { | 504 | if (event->hw.config & FSL_EMB_EVENT_RESTRICTED) { |
@@ -484,7 +509,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
484 | } | 509 | } |
485 | 510 | ||
486 | if (num_restricted >= ppmu->n_restricted) | 511 | if (num_restricted >= ppmu->n_restricted) |
487 | return ERR_PTR(-EINVAL); | 512 | return -EINVAL; |
488 | } | 513 | } |
489 | 514 | ||
490 | event->hw.idx = -1; | 515 | event->hw.idx = -1; |
@@ -497,7 +522,7 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
497 | if (event->attr.exclude_kernel) | 522 | if (event->attr.exclude_kernel) |
498 | event->hw.config_base |= PMLCA_FCS; | 523 | event->hw.config_base |= PMLCA_FCS; |
499 | if (event->attr.exclude_idle) | 524 | if (event->attr.exclude_idle) |
500 | return ERR_PTR(-ENOTSUPP); | 525 | return -ENOTSUPP; |
501 | 526 | ||
502 | event->hw.last_period = event->hw.sample_period; | 527 | event->hw.last_period = event->hw.sample_period; |
503 | local64_set(&event->hw.period_left, event->hw.last_period); | 528 | local64_set(&event->hw.period_left, event->hw.last_period); |
@@ -523,11 +548,20 @@ const struct pmu *hw_perf_event_init(struct perf_event *event) | |||
523 | } | 548 | } |
524 | event->destroy = hw_perf_event_destroy; | 549 | event->destroy = hw_perf_event_destroy; |
525 | 550 | ||
526 | if (err) | 551 | return err; |
527 | return ERR_PTR(err); | ||
528 | return &fsl_emb_pmu; | ||
529 | } | 552 | } |
530 | 553 | ||
554 | static struct pmu fsl_emb_pmu = { | ||
555 | .pmu_enable = fsl_emb_pmu_enable, | ||
556 | .pmu_disable = fsl_emb_pmu_disable, | ||
557 | .event_init = fsl_emb_pmu_event_init, | ||
558 | .add = fsl_emb_pmu_add, | ||
559 | .del = fsl_emb_pmu_del, | ||
560 | .start = fsl_emb_pmu_start, | ||
561 | .stop = fsl_emb_pmu_stop, | ||
562 | .read = fsl_emb_pmu_read, | ||
563 | }; | ||
564 | |||
531 | /* | 565 | /* |
532 | * A counter has overflowed; update its count and record | 566 | * A counter has overflowed; update its count and record |
533 | * things if requested. Note that interrupts are hard-disabled | 567 | * things if requested. Note that interrupts are hard-disabled |
@@ -540,6 +574,11 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
540 | s64 prev, delta, left; | 574 | s64 prev, delta, left; |
541 | int record = 0; | 575 | int record = 0; |
542 | 576 | ||
577 | if (event->hw.state & PERF_HES_STOPPED) { | ||
578 | write_pmc(event->hw.idx, 0); | ||
579 | return; | ||
580 | } | ||
581 | |||
543 | /* we don't have to worry about interrupts here */ | 582 | /* we don't have to worry about interrupts here */ |
544 | prev = local64_read(&event->hw.prev_count); | 583 | prev = local64_read(&event->hw.prev_count); |
545 | delta = (val - prev) & 0xfffffffful; | 584 | delta = (val - prev) & 0xfffffffful; |
@@ -557,11 +596,17 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
557 | if (left <= 0) | 596 | if (left <= 0) |
558 | left = period; | 597 | left = period; |
559 | record = 1; | 598 | record = 1; |
599 | event->hw.last_period = event->hw.sample_period; | ||
560 | } | 600 | } |
561 | if (left < 0x80000000LL) | 601 | if (left < 0x80000000LL) |
562 | val = 0x80000000LL - left; | 602 | val = 0x80000000LL - left; |
563 | } | 603 | } |
564 | 604 | ||
605 | write_pmc(event->hw.idx, val); | ||
606 | local64_set(&event->hw.prev_count, val); | ||
607 | local64_set(&event->hw.period_left, left); | ||
608 | perf_event_update_userpage(event); | ||
609 | |||
565 | /* | 610 | /* |
566 | * Finally record data if requested. | 611 | * Finally record data if requested. |
567 | */ | 612 | */ |
@@ -571,23 +616,9 @@ static void record_and_restart(struct perf_event *event, unsigned long val, | |||
571 | perf_sample_data_init(&data, 0); | 616 | perf_sample_data_init(&data, 0); |
572 | data.period = event->hw.last_period; | 617 | data.period = event->hw.last_period; |
573 | 618 | ||
574 | if (perf_event_overflow(event, nmi, &data, regs)) { | 619 | if (perf_event_overflow(event, nmi, &data, regs)) |
575 | /* | 620 | fsl_emb_pmu_stop(event, 0); |
576 | * Interrupts are coming too fast - throttle them | ||
577 | * by setting the event to 0, so it will be | ||
578 | * at least 2^30 cycles until the next interrupt | ||
579 | * (assuming each event counts at most 2 counts | ||
580 | * per cycle). | ||
581 | */ | ||
582 | val = 0; | ||
583 | left = ~0ULL >> 1; | ||
584 | } | ||
585 | } | 621 | } |
586 | |||
587 | write_pmc(event->hw.idx, val); | ||
588 | local64_set(&event->hw.prev_count, val); | ||
589 | local64_set(&event->hw.period_left, left); | ||
590 | perf_event_update_userpage(event); | ||
591 | } | 622 | } |
592 | 623 | ||
593 | static void perf_event_interrupt(struct pt_regs *regs) | 624 | static void perf_event_interrupt(struct pt_regs *regs) |
@@ -651,5 +682,7 @@ int register_fsl_emb_pmu(struct fsl_emb_pmu *pmu) | |||
651 | pr_info("%s performance monitor hardware support registered\n", | 682 | pr_info("%s performance monitor hardware support registered\n", |
652 | pmu->name); | 683 | pmu->name); |
653 | 684 | ||
685 | perf_pmu_register(&fsl_emb_pmu, "cpu", PERF_TYPE_RAW); | ||
686 | |||
654 | return 0; | 687 | return 0; |
655 | } | 688 | } |
diff --git a/arch/powerpc/kernel/power4-pmu.c b/arch/powerpc/kernel/power4-pmu.c index 2a361cdda635..ead8b3c2649e 100644 --- a/arch/powerpc/kernel/power4-pmu.c +++ b/arch/powerpc/kernel/power4-pmu.c | |||
@@ -613,4 +613,4 @@ static int init_power4_pmu(void) | |||
613 | return register_power_pmu(&power4_pmu); | 613 | return register_power_pmu(&power4_pmu); |
614 | } | 614 | } |
615 | 615 | ||
616 | arch_initcall(init_power4_pmu); | 616 | early_initcall(init_power4_pmu); |
diff --git a/arch/powerpc/kernel/power5+-pmu.c b/arch/powerpc/kernel/power5+-pmu.c index 199de527d411..eca0ac595cb6 100644 --- a/arch/powerpc/kernel/power5+-pmu.c +++ b/arch/powerpc/kernel/power5+-pmu.c | |||
@@ -682,4 +682,4 @@ static int init_power5p_pmu(void) | |||
682 | return register_power_pmu(&power5p_pmu); | 682 | return register_power_pmu(&power5p_pmu); |
683 | } | 683 | } |
684 | 684 | ||
685 | arch_initcall(init_power5p_pmu); | 685 | early_initcall(init_power5p_pmu); |
diff --git a/arch/powerpc/kernel/power5-pmu.c b/arch/powerpc/kernel/power5-pmu.c index 98b6a729a9dd..d5ff0f64a5e6 100644 --- a/arch/powerpc/kernel/power5-pmu.c +++ b/arch/powerpc/kernel/power5-pmu.c | |||
@@ -621,4 +621,4 @@ static int init_power5_pmu(void) | |||
621 | return register_power_pmu(&power5_pmu); | 621 | return register_power_pmu(&power5_pmu); |
622 | } | 622 | } |
623 | 623 | ||
624 | arch_initcall(init_power5_pmu); | 624 | early_initcall(init_power5_pmu); |
diff --git a/arch/powerpc/kernel/power6-pmu.c b/arch/powerpc/kernel/power6-pmu.c index 84a607bda8fb..31603927e376 100644 --- a/arch/powerpc/kernel/power6-pmu.c +++ b/arch/powerpc/kernel/power6-pmu.c | |||
@@ -544,4 +544,4 @@ static int init_power6_pmu(void) | |||
544 | return register_power_pmu(&power6_pmu); | 544 | return register_power_pmu(&power6_pmu); |
545 | } | 545 | } |
546 | 546 | ||
547 | arch_initcall(init_power6_pmu); | 547 | early_initcall(init_power6_pmu); |
diff --git a/arch/powerpc/kernel/power7-pmu.c b/arch/powerpc/kernel/power7-pmu.c index 852f7b7f6b40..593740fcb799 100644 --- a/arch/powerpc/kernel/power7-pmu.c +++ b/arch/powerpc/kernel/power7-pmu.c | |||
@@ -369,4 +369,4 @@ static int init_power7_pmu(void) | |||
369 | return register_power_pmu(&power7_pmu); | 369 | return register_power_pmu(&power7_pmu); |
370 | } | 370 | } |
371 | 371 | ||
372 | arch_initcall(init_power7_pmu); | 372 | early_initcall(init_power7_pmu); |
diff --git a/arch/powerpc/kernel/ppc970-pmu.c b/arch/powerpc/kernel/ppc970-pmu.c index 8eff48e20dba..9a6e093858fe 100644 --- a/arch/powerpc/kernel/ppc970-pmu.c +++ b/arch/powerpc/kernel/ppc970-pmu.c | |||
@@ -169,9 +169,11 @@ static int p970_marked_instr_event(u64 event) | |||
169 | switch (unit) { | 169 | switch (unit) { |
170 | case PM_VPU: | 170 | case PM_VPU: |
171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ | 171 | mask = 0x4c; /* byte 0 bits 2,3,6 */ |
172 | break; | ||
172 | case PM_LSU0: | 173 | case PM_LSU0: |
173 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ | 174 | /* byte 2 bits 0,2,3,4,6; all of byte 1 */ |
174 | mask = 0x085dff00; | 175 | mask = 0x085dff00; |
176 | break; | ||
175 | case PM_LSU1L: | 177 | case PM_LSU1L: |
176 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ | 178 | mask = 0x50 << 24; /* byte 3 bits 4,6 */ |
177 | break; | 179 | break; |
@@ -492,4 +494,4 @@ static int init_ppc970_pmu(void) | |||
492 | return register_power_pmu(&ppc970_pmu); | 494 | return register_power_pmu(&ppc970_pmu); |
493 | } | 495 | } |
494 | 496 | ||
495 | arch_initcall(init_ppc970_pmu); | 497 | early_initcall(init_ppc970_pmu); |
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index ab3e392ac63c..7d28f540200c 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -54,7 +54,6 @@ extern void single_step_exception(struct pt_regs *regs); | |||
54 | extern int sys_sigreturn(struct pt_regs *regs); | 54 | extern int sys_sigreturn(struct pt_regs *regs); |
55 | 55 | ||
56 | EXPORT_SYMBOL(clear_pages); | 56 | EXPORT_SYMBOL(clear_pages); |
57 | EXPORT_SYMBOL(copy_page); | ||
58 | EXPORT_SYMBOL(ISA_DMA_THRESHOLD); | 57 | EXPORT_SYMBOL(ISA_DMA_THRESHOLD); |
59 | EXPORT_SYMBOL(DMA_MODE_READ); | 58 | EXPORT_SYMBOL(DMA_MODE_READ); |
60 | EXPORT_SYMBOL(DMA_MODE_WRITE); | 59 | EXPORT_SYMBOL(DMA_MODE_WRITE); |
@@ -88,9 +87,7 @@ EXPORT_SYMBOL(__copy_tofrom_user); | |||
88 | EXPORT_SYMBOL(__clear_user); | 87 | EXPORT_SYMBOL(__clear_user); |
89 | EXPORT_SYMBOL(__strncpy_from_user); | 88 | EXPORT_SYMBOL(__strncpy_from_user); |
90 | EXPORT_SYMBOL(__strnlen_user); | 89 | EXPORT_SYMBOL(__strnlen_user); |
91 | #ifdef CONFIG_PPC64 | 90 | EXPORT_SYMBOL(copy_page); |
92 | EXPORT_SYMBOL(copy_4K_page); | ||
93 | #endif | ||
94 | 91 | ||
95 | #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) | 92 | #if defined(CONFIG_PCI) && defined(CONFIG_PPC32) |
96 | EXPORT_SYMBOL(isa_io_base); | 93 | EXPORT_SYMBOL(isa_io_base); |
@@ -186,3 +183,10 @@ EXPORT_SYMBOL(__mtdcr); | |||
186 | EXPORT_SYMBOL(__mfdcr); | 183 | EXPORT_SYMBOL(__mfdcr); |
187 | #endif | 184 | #endif |
188 | EXPORT_SYMBOL(empty_zero_page); | 185 | EXPORT_SYMBOL(empty_zero_page); |
186 | |||
187 | #ifdef CONFIG_PPC64 | ||
188 | EXPORT_SYMBOL(__arch_hweight8); | ||
189 | EXPORT_SYMBOL(__arch_hweight16); | ||
190 | EXPORT_SYMBOL(__arch_hweight32); | ||
191 | EXPORT_SYMBOL(__arch_hweight64); | ||
192 | #endif | ||
diff --git a/arch/powerpc/kernel/ppc_save_regs.S b/arch/powerpc/kernel/ppc_save_regs.S index 5113bd2285e1..1b1787d52896 100644 --- a/arch/powerpc/kernel/ppc_save_regs.S +++ b/arch/powerpc/kernel/ppc_save_regs.S | |||
@@ -11,10 +11,11 @@ | |||
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/ppc_asm.h> | 12 | #include <asm/ppc_asm.h> |
13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/ptrace.h> | ||
14 | 15 | ||
15 | /* | 16 | /* |
16 | * Grab the register values as they are now. | 17 | * Grab the register values as they are now. |
17 | * This won't do a particularily good job because we really | 18 | * This won't do a particularly good job because we really |
18 | * want our caller's caller's registers, and our caller has | 19 | * want our caller's caller's registers, and our caller has |
19 | * already executed its prologue. | 20 | * already executed its prologue. |
20 | * ToDo: We could reach back into the caller's save area to do | 21 | * ToDo: We could reach back into the caller's save area to do |
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c index b1c648a36b03..91e52df3d81d 100644 --- a/arch/powerpc/kernel/process.c +++ b/arch/powerpc/kernel/process.c | |||
@@ -353,6 +353,7 @@ static void switch_booke_debug_regs(struct thread_struct *new_thread) | |||
353 | prime_debug_regs(new_thread); | 353 | prime_debug_regs(new_thread); |
354 | } | 354 | } |
355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ | 355 | #else /* !CONFIG_PPC_ADV_DEBUG_REGS */ |
356 | #ifndef CONFIG_HAVE_HW_BREAKPOINT | ||
356 | static void set_debug_reg_defaults(struct thread_struct *thread) | 357 | static void set_debug_reg_defaults(struct thread_struct *thread) |
357 | { | 358 | { |
358 | if (thread->dabr) { | 359 | if (thread->dabr) { |
@@ -360,6 +361,7 @@ static void set_debug_reg_defaults(struct thread_struct *thread) | |||
360 | set_dabr(0); | 361 | set_dabr(0); |
361 | } | 362 | } |
362 | } | 363 | } |
364 | #endif /* !CONFIG_HAVE_HW_BREAKPOINT */ | ||
363 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 365 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
364 | 366 | ||
365 | int set_dabr(unsigned long dabr) | 367 | int set_dabr(unsigned long dabr) |
@@ -393,6 +395,9 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
393 | struct thread_struct *new_thread, *old_thread; | 395 | struct thread_struct *new_thread, *old_thread; |
394 | unsigned long flags; | 396 | unsigned long flags; |
395 | struct task_struct *last; | 397 | struct task_struct *last; |
398 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
399 | struct ppc64_tlb_batch *batch; | ||
400 | #endif | ||
396 | 401 | ||
397 | #ifdef CONFIG_SMP | 402 | #ifdef CONFIG_SMP |
398 | /* avoid complexity of lazy save/restore of fpu | 403 | /* avoid complexity of lazy save/restore of fpu |
@@ -511,13 +516,22 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
511 | old_thread->accum_tb += (current_tb - start_tb); | 516 | old_thread->accum_tb += (current_tb - start_tb); |
512 | new_thread->start_tb = current_tb; | 517 | new_thread->start_tb = current_tb; |
513 | } | 518 | } |
514 | #endif | 519 | #endif /* CONFIG_PPC64 */ |
520 | |||
521 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
522 | batch = &__get_cpu_var(ppc64_tlb_batch); | ||
523 | if (batch->active) { | ||
524 | current_thread_info()->local_flags |= _TLF_LAZY_MMU; | ||
525 | if (batch->index) | ||
526 | __flush_tlb_pending(batch); | ||
527 | batch->active = 0; | ||
528 | } | ||
529 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
515 | 530 | ||
516 | local_irq_save(flags); | 531 | local_irq_save(flags); |
517 | 532 | ||
518 | account_system_vtime(current); | 533 | account_system_vtime(current); |
519 | account_process_vtime(current); | 534 | account_process_vtime(current); |
520 | calculate_steal_time(); | ||
521 | 535 | ||
522 | /* | 536 | /* |
523 | * We can't take a PMU exception inside _switch() since there is a | 537 | * We can't take a PMU exception inside _switch() since there is a |
@@ -527,6 +541,14 @@ struct task_struct *__switch_to(struct task_struct *prev, | |||
527 | hard_irq_disable(); | 541 | hard_irq_disable(); |
528 | last = _switch(old_thread, new_thread); | 542 | last = _switch(old_thread, new_thread); |
529 | 543 | ||
544 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
545 | if (current_thread_info()->local_flags & _TLF_LAZY_MMU) { | ||
546 | current_thread_info()->local_flags &= ~_TLF_LAZY_MMU; | ||
547 | batch = &__get_cpu_var(ppc64_tlb_batch); | ||
548 | batch->active = 1; | ||
549 | } | ||
550 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
551 | |||
530 | local_irq_restore(flags); | 552 | local_irq_restore(flags); |
531 | 553 | ||
532 | return last; | 554 | return last; |
@@ -632,7 +654,7 @@ void show_regs(struct pt_regs * regs) | |||
632 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 654 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
633 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); | 655 | printk("DEAR: "REG", ESR: "REG"\n", regs->dar, regs->dsisr); |
634 | #else | 656 | #else |
635 | printk("DAR: "REG", DSISR: "REG"\n", regs->dar, regs->dsisr); | 657 | printk("DAR: "REG", DSISR: %08lx\n", regs->dar, regs->dsisr); |
636 | #endif | 658 | #endif |
637 | printk("TASK = %p[%d] '%s' THREAD: %p", | 659 | printk("TASK = %p[%d] '%s' THREAD: %p", |
638 | current, task_pid_nr(current), current->comm, task_thread_info(current)); | 660 | current, task_pid_nr(current), current->comm, task_thread_info(current)); |
@@ -671,11 +693,11 @@ void flush_thread(void) | |||
671 | { | 693 | { |
672 | discard_lazy_cpu_state(); | 694 | discard_lazy_cpu_state(); |
673 | 695 | ||
674 | #ifdef CONFIG_HAVE_HW_BREAKPOINTS | 696 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
675 | flush_ptrace_hw_breakpoint(current); | 697 | flush_ptrace_hw_breakpoint(current); |
676 | #else /* CONFIG_HAVE_HW_BREAKPOINTS */ | 698 | #else /* CONFIG_HAVE_HW_BREAKPOINT */ |
677 | set_debug_reg_defaults(¤t->thread); | 699 | set_debug_reg_defaults(¤t->thread); |
678 | #endif /* CONFIG_HAVE_HW_BREAKPOINTS */ | 700 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
679 | } | 701 | } |
680 | 702 | ||
681 | void | 703 | void |
@@ -701,6 +723,8 @@ void prepare_to_copy(struct task_struct *tsk) | |||
701 | /* | 723 | /* |
702 | * Copy a thread.. | 724 | * Copy a thread.. |
703 | */ | 725 | */ |
726 | extern unsigned long dscr_default; /* defined in arch/powerpc/kernel/sysfs.c */ | ||
727 | |||
704 | int copy_thread(unsigned long clone_flags, unsigned long usp, | 728 | int copy_thread(unsigned long clone_flags, unsigned long usp, |
705 | unsigned long unused, struct task_struct *p, | 729 | unsigned long unused, struct task_struct *p, |
706 | struct pt_regs *regs) | 730 | struct pt_regs *regs) |
@@ -754,11 +778,11 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
754 | _ALIGN_UP(sizeof(struct thread_info), 16); | 778 | _ALIGN_UP(sizeof(struct thread_info), 16); |
755 | 779 | ||
756 | #ifdef CONFIG_PPC_STD_MMU_64 | 780 | #ifdef CONFIG_PPC_STD_MMU_64 |
757 | if (cpu_has_feature(CPU_FTR_SLB)) { | 781 | if (mmu_has_feature(MMU_FTR_SLB)) { |
758 | unsigned long sp_vsid; | 782 | unsigned long sp_vsid; |
759 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; | 783 | unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; |
760 | 784 | ||
761 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) | 785 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
762 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) | 786 | sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T) |
763 | << SLB_VSID_SHIFT_1T; | 787 | << SLB_VSID_SHIFT_1T; |
764 | else | 788 | else |
@@ -768,6 +792,20 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, | |||
768 | p->thread.ksp_vsid = sp_vsid; | 792 | p->thread.ksp_vsid = sp_vsid; |
769 | } | 793 | } |
770 | #endif /* CONFIG_PPC_STD_MMU_64 */ | 794 | #endif /* CONFIG_PPC_STD_MMU_64 */ |
795 | #ifdef CONFIG_PPC64 | ||
796 | if (cpu_has_feature(CPU_FTR_DSCR)) { | ||
797 | if (current->thread.dscr_inherit) { | ||
798 | p->thread.dscr_inherit = 1; | ||
799 | p->thread.dscr = current->thread.dscr; | ||
800 | } else if (0 != dscr_default) { | ||
801 | p->thread.dscr_inherit = 1; | ||
802 | p->thread.dscr = dscr_default; | ||
803 | } else { | ||
804 | p->thread.dscr_inherit = 0; | ||
805 | p->thread.dscr = 0; | ||
806 | } | ||
807 | } | ||
808 | #endif | ||
771 | 809 | ||
772 | /* | 810 | /* |
773 | * The PPC64 ABI makes use of a TOC to contain function | 811 | * The PPC64 ABI makes use of a TOC to contain function |
@@ -1217,11 +1255,11 @@ void __ppc64_runlatch_off(void) | |||
1217 | 1255 | ||
1218 | static struct kmem_cache *thread_info_cache; | 1256 | static struct kmem_cache *thread_info_cache; |
1219 | 1257 | ||
1220 | struct thread_info *alloc_thread_info(struct task_struct *tsk) | 1258 | struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node) |
1221 | { | 1259 | { |
1222 | struct thread_info *ti; | 1260 | struct thread_info *ti; |
1223 | 1261 | ||
1224 | ti = kmem_cache_alloc(thread_info_cache, GFP_KERNEL); | 1262 | ti = kmem_cache_alloc_node(thread_info_cache, GFP_KERNEL, node); |
1225 | if (unlikely(ti == NULL)) | 1263 | if (unlikely(ti == NULL)) |
1226 | return NULL; | 1264 | return NULL; |
1227 | #ifdef CONFIG_DEBUG_STACK_USAGE | 1265 | #ifdef CONFIG_DEBUG_STACK_USAGE |
@@ -1298,14 +1336,3 @@ unsigned long randomize_et_dyn(unsigned long base) | |||
1298 | 1336 | ||
1299 | return ret; | 1337 | return ret; |
1300 | } | 1338 | } |
1301 | |||
1302 | #ifdef CONFIG_SMP | ||
1303 | int arch_sd_sibling_asym_packing(void) | ||
1304 | { | ||
1305 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
1306 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
1307 | return SD_ASYM_PACKING; | ||
1308 | } | ||
1309 | return 0; | ||
1310 | } | ||
1311 | #endif | ||
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index fed9bf6187d1..8c3112a57cf2 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -66,7 +66,9 @@ | |||
66 | int __initdata iommu_is_off; | 66 | int __initdata iommu_is_off; |
67 | int __initdata iommu_force_on; | 67 | int __initdata iommu_force_on; |
68 | unsigned long tce_alloc_start, tce_alloc_end; | 68 | unsigned long tce_alloc_start, tce_alloc_end; |
69 | u64 ppc64_rma_size; | ||
69 | #endif | 70 | #endif |
71 | static phys_addr_t first_memblock_size; | ||
70 | 72 | ||
71 | static int __init early_parse_mem(char *p) | 73 | static int __init early_parse_mem(char *p) |
72 | { | 74 | { |
@@ -80,11 +82,29 @@ static int __init early_parse_mem(char *p) | |||
80 | } | 82 | } |
81 | early_param("mem", early_parse_mem); | 83 | early_param("mem", early_parse_mem); |
82 | 84 | ||
85 | /* | ||
86 | * overlaps_initrd - check for overlap with page aligned extension of | ||
87 | * initrd. | ||
88 | */ | ||
89 | static inline int overlaps_initrd(unsigned long start, unsigned long size) | ||
90 | { | ||
91 | #ifdef CONFIG_BLK_DEV_INITRD | ||
92 | if (!initrd_start) | ||
93 | return 0; | ||
94 | |||
95 | return (start + size) > _ALIGN_DOWN(initrd_start, PAGE_SIZE) && | ||
96 | start <= _ALIGN_UP(initrd_end, PAGE_SIZE); | ||
97 | #else | ||
98 | return 0; | ||
99 | #endif | ||
100 | } | ||
101 | |||
83 | /** | 102 | /** |
84 | * move_device_tree - move tree to an unused area, if needed. | 103 | * move_device_tree - move tree to an unused area, if needed. |
85 | * | 104 | * |
86 | * The device tree may be allocated beyond our memory limit, or inside the | 105 | * The device tree may be allocated beyond our memory limit, or inside the |
87 | * crash kernel region for kdump. If so, move it out of the way. | 106 | * crash kernel region for kdump, or within the page aligned range of initrd. |
107 | * If so, move it out of the way. | ||
88 | */ | 108 | */ |
89 | static void __init move_device_tree(void) | 109 | static void __init move_device_tree(void) |
90 | { | 110 | { |
@@ -96,9 +116,10 @@ static void __init move_device_tree(void) | |||
96 | start = __pa(initial_boot_params); | 116 | start = __pa(initial_boot_params); |
97 | size = be32_to_cpu(initial_boot_params->totalsize); | 117 | size = be32_to_cpu(initial_boot_params->totalsize); |
98 | 118 | ||
99 | if ((memory_limit && (start + size) > memory_limit) || | 119 | if ((memory_limit && (start + size) > PHYSICAL_START + memory_limit) || |
100 | overlaps_crashkernel(start, size)) { | 120 | overlaps_crashkernel(start, size) || |
101 | p = __va(memblock_alloc_base(size, PAGE_SIZE, memblock.rmo_size)); | 121 | overlaps_initrd(start, size)) { |
122 | p = __va(memblock_alloc(size, PAGE_SIZE)); | ||
102 | memcpy(p, initial_boot_params, size); | 123 | memcpy(p, initial_boot_params, size); |
103 | initial_boot_params = (struct boot_param_header *)p; | 124 | initial_boot_params = (struct boot_param_header *)p; |
104 | DBG("Moved device tree to 0x%p\n", p); | 125 | DBG("Moved device tree to 0x%p\n", p); |
@@ -122,18 +143,19 @@ static void __init move_device_tree(void) | |||
122 | */ | 143 | */ |
123 | static struct ibm_pa_feature { | 144 | static struct ibm_pa_feature { |
124 | unsigned long cpu_features; /* CPU_FTR_xxx bit */ | 145 | unsigned long cpu_features; /* CPU_FTR_xxx bit */ |
146 | unsigned long mmu_features; /* MMU_FTR_xxx bit */ | ||
125 | unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ | 147 | unsigned int cpu_user_ftrs; /* PPC_FEATURE_xxx bit */ |
126 | unsigned char pabyte; /* byte number in ibm,pa-features */ | 148 | unsigned char pabyte; /* byte number in ibm,pa-features */ |
127 | unsigned char pabit; /* bit number (big-endian) */ | 149 | unsigned char pabit; /* bit number (big-endian) */ |
128 | unsigned char invert; /* if 1, pa bit set => clear feature */ | 150 | unsigned char invert; /* if 1, pa bit set => clear feature */ |
129 | } ibm_pa_features[] __initdata = { | 151 | } ibm_pa_features[] __initdata = { |
130 | {0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, | 152 | {0, 0, PPC_FEATURE_HAS_MMU, 0, 0, 0}, |
131 | {0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, | 153 | {0, 0, PPC_FEATURE_HAS_FPU, 0, 1, 0}, |
132 | {CPU_FTR_SLB, 0, 0, 2, 0}, | 154 | {0, MMU_FTR_SLB, 0, 0, 2, 0}, |
133 | {CPU_FTR_CTRL, 0, 0, 3, 0}, | 155 | {CPU_FTR_CTRL, 0, 0, 0, 3, 0}, |
134 | {CPU_FTR_NOEXECUTE, 0, 0, 6, 0}, | 156 | {CPU_FTR_NOEXECUTE, 0, 0, 0, 6, 0}, |
135 | {CPU_FTR_NODSISRALIGN, 0, 1, 1, 1}, | 157 | {CPU_FTR_NODSISRALIGN, 0, 0, 1, 1, 1}, |
136 | {CPU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, | 158 | {0, MMU_FTR_CI_LARGE_PAGE, 0, 1, 2, 0}, |
137 | {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, | 159 | {CPU_FTR_REAL_LE, PPC_FEATURE_TRUE_LE, 5, 0, 0}, |
138 | }; | 160 | }; |
139 | 161 | ||
@@ -165,9 +187,11 @@ static void __init scan_features(unsigned long node, unsigned char *ftrs, | |||
165 | if (bit ^ fp->invert) { | 187 | if (bit ^ fp->invert) { |
166 | cur_cpu_spec->cpu_features |= fp->cpu_features; | 188 | cur_cpu_spec->cpu_features |= fp->cpu_features; |
167 | cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; | 189 | cur_cpu_spec->cpu_user_features |= fp->cpu_user_ftrs; |
190 | cur_cpu_spec->mmu_features |= fp->mmu_features; | ||
168 | } else { | 191 | } else { |
169 | cur_cpu_spec->cpu_features &= ~fp->cpu_features; | 192 | cur_cpu_spec->cpu_features &= ~fp->cpu_features; |
170 | cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; | 193 | cur_cpu_spec->cpu_user_features &= ~fp->cpu_user_ftrs; |
194 | cur_cpu_spec->mmu_features &= ~fp->mmu_features; | ||
171 | } | 195 | } |
172 | } | 196 | } |
173 | } | 197 | } |
@@ -267,13 +291,13 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
267 | const char *uname, int depth, | 291 | const char *uname, int depth, |
268 | void *data) | 292 | void *data) |
269 | { | 293 | { |
270 | static int logical_cpuid = 0; | ||
271 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); | 294 | char *type = of_get_flat_dt_prop(node, "device_type", NULL); |
272 | const u32 *prop; | 295 | const u32 *prop; |
273 | const u32 *intserv; | 296 | const u32 *intserv; |
274 | int i, nthreads; | 297 | int i, nthreads; |
275 | unsigned long len; | 298 | unsigned long len; |
276 | int found = 0; | 299 | int found = -1; |
300 | int found_thread = 0; | ||
277 | 301 | ||
278 | /* We are scanning "cpu" nodes only */ | 302 | /* We are scanning "cpu" nodes only */ |
279 | if (type == NULL || strcmp(type, "cpu") != 0) | 303 | if (type == NULL || strcmp(type, "cpu") != 0) |
@@ -297,11 +321,10 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
297 | * version 2 of the kexec param format adds the phys cpuid of | 321 | * version 2 of the kexec param format adds the phys cpuid of |
298 | * booted proc. | 322 | * booted proc. |
299 | */ | 323 | */ |
300 | if (initial_boot_params && initial_boot_params->version >= 2) { | 324 | if (initial_boot_params->version >= 2) { |
301 | if (intserv[i] == | 325 | if (intserv[i] == initial_boot_params->boot_cpuid_phys) { |
302 | initial_boot_params->boot_cpuid_phys) { | 326 | found = boot_cpu_count; |
303 | found = 1; | 327 | found_thread = i; |
304 | break; | ||
305 | } | 328 | } |
306 | } else { | 329 | } else { |
307 | /* | 330 | /* |
@@ -310,23 +333,20 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
310 | * off secondary threads. | 333 | * off secondary threads. |
311 | */ | 334 | */ |
312 | if (of_get_flat_dt_prop(node, | 335 | if (of_get_flat_dt_prop(node, |
313 | "linux,boot-cpu", NULL) != NULL) { | 336 | "linux,boot-cpu", NULL) != NULL) |
314 | found = 1; | 337 | found = boot_cpu_count; |
315 | break; | ||
316 | } | ||
317 | } | 338 | } |
318 | |||
319 | #ifdef CONFIG_SMP | 339 | #ifdef CONFIG_SMP |
320 | /* logical cpu id is always 0 on UP kernels */ | 340 | /* logical cpu id is always 0 on UP kernels */ |
321 | logical_cpuid++; | 341 | boot_cpu_count++; |
322 | #endif | 342 | #endif |
323 | } | 343 | } |
324 | 344 | ||
325 | if (found) { | 345 | if (found >= 0) { |
326 | DBG("boot cpu: logical %d physical %d\n", logical_cpuid, | 346 | DBG("boot cpu: logical %d physical %d\n", found, |
327 | intserv[i]); | 347 | intserv[found_thread]); |
328 | boot_cpuid = logical_cpuid; | 348 | boot_cpuid = found; |
329 | set_hard_smp_processor_id(boot_cpuid, intserv[i]); | 349 | set_hard_smp_processor_id(found, intserv[found_thread]); |
330 | 350 | ||
331 | /* | 351 | /* |
332 | * PAPR defines "logical" PVR values for cpus that | 352 | * PAPR defines "logical" PVR values for cpus that |
@@ -363,10 +383,15 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
363 | return 0; | 383 | return 0; |
364 | } | 384 | } |
365 | 385 | ||
366 | void __init early_init_dt_scan_chosen_arch(unsigned long node) | 386 | int __init early_init_dt_scan_chosen_ppc(unsigned long node, const char *uname, |
387 | int depth, void *data) | ||
367 | { | 388 | { |
368 | unsigned long *lprop; | 389 | unsigned long *lprop; |
369 | 390 | ||
391 | /* Use common scan routine to determine if this is the chosen node */ | ||
392 | if (early_init_dt_scan_chosen(node, uname, depth, data) == 0) | ||
393 | return 0; | ||
394 | |||
370 | #ifdef CONFIG_PPC64 | 395 | #ifdef CONFIG_PPC64 |
371 | /* check if iommu is forced on or off */ | 396 | /* check if iommu is forced on or off */ |
372 | if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) | 397 | if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL) |
@@ -398,6 +423,9 @@ void __init early_init_dt_scan_chosen_arch(unsigned long node) | |||
398 | if (lprop) | 423 | if (lprop) |
399 | crashk_res.end = crashk_res.start + *lprop - 1; | 424 | crashk_res.end = crashk_res.start + *lprop - 1; |
400 | #endif | 425 | #endif |
426 | |||
427 | /* break now */ | ||
428 | return 1; | ||
401 | } | 429 | } |
402 | 430 | ||
403 | #ifdef CONFIG_PPC_PSERIES | 431 | #ifdef CONFIG_PPC_PSERIES |
@@ -492,7 +520,7 @@ static int __init early_init_dt_scan_memory_ppc(unsigned long node, | |||
492 | 520 | ||
493 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) | 521 | void __init early_init_dt_add_memory_arch(u64 base, u64 size) |
494 | { | 522 | { |
495 | #if defined(CONFIG_PPC64) | 523 | #ifdef CONFIG_PPC64 |
496 | if (iommu_is_off) { | 524 | if (iommu_is_off) { |
497 | if (base >= 0x80000000ul) | 525 | if (base >= 0x80000000ul) |
498 | return; | 526 | return; |
@@ -500,15 +528,22 @@ void __init early_init_dt_add_memory_arch(u64 base, u64 size) | |||
500 | size = 0x80000000ul - base; | 528 | size = 0x80000000ul - base; |
501 | } | 529 | } |
502 | #endif | 530 | #endif |
531 | /* Keep track of the beginning of memory -and- the size of | ||
532 | * the very first block in the device-tree as it represents | ||
533 | * the RMA on ppc64 server | ||
534 | */ | ||
535 | if (base < memstart_addr) { | ||
536 | memstart_addr = base; | ||
537 | first_memblock_size = size; | ||
538 | } | ||
503 | 539 | ||
540 | /* Add the chunk to the MEMBLOCK list */ | ||
504 | memblock_add(base, size); | 541 | memblock_add(base, size); |
505 | |||
506 | memstart_addr = min((u64)memstart_addr, base); | ||
507 | } | 542 | } |
508 | 543 | ||
509 | u64 __init early_init_dt_alloc_memory_arch(u64 size, u64 align) | 544 | void * __init early_init_dt_alloc_memory_arch(u64 size, u64 align) |
510 | { | 545 | { |
511 | return memblock_alloc(size, align); | 546 | return __va(memblock_alloc(size, align)); |
512 | } | 547 | } |
513 | 548 | ||
514 | #ifdef CONFIG_BLK_DEV_INITRD | 549 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -539,7 +574,9 @@ static void __init early_reserve_mem(void) | |||
539 | #ifdef CONFIG_BLK_DEV_INITRD | 574 | #ifdef CONFIG_BLK_DEV_INITRD |
540 | /* then reserve the initrd, if any */ | 575 | /* then reserve the initrd, if any */ |
541 | if (initrd_start && (initrd_end > initrd_start)) | 576 | if (initrd_start && (initrd_end > initrd_start)) |
542 | memblock_reserve(__pa(initrd_start), initrd_end - initrd_start); | 577 | memblock_reserve(_ALIGN_DOWN(__pa(initrd_start), PAGE_SIZE), |
578 | _ALIGN_UP(initrd_end, PAGE_SIZE) - | ||
579 | _ALIGN_DOWN(initrd_start, PAGE_SIZE)); | ||
543 | #endif /* CONFIG_BLK_DEV_INITRD */ | 580 | #endif /* CONFIG_BLK_DEV_INITRD */ |
544 | 581 | ||
545 | #ifdef CONFIG_PPC32 | 582 | #ifdef CONFIG_PPC32 |
@@ -655,7 +692,6 @@ static void __init phyp_dump_reserve_mem(void) | |||
655 | static inline void __init phyp_dump_reserve_mem(void) {} | 692 | static inline void __init phyp_dump_reserve_mem(void) {} |
656 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ | 693 | #endif /* CONFIG_PHYP_DUMP && CONFIG_PPC_RTAS */ |
657 | 694 | ||
658 | |||
659 | void __init early_init_devtree(void *params) | 695 | void __init early_init_devtree(void *params) |
660 | { | 696 | { |
661 | phys_addr_t limit; | 697 | phys_addr_t limit; |
@@ -671,7 +707,7 @@ void __init early_init_devtree(void *params) | |||
671 | #endif | 707 | #endif |
672 | 708 | ||
673 | #ifdef CONFIG_PHYP_DUMP | 709 | #ifdef CONFIG_PHYP_DUMP |
674 | /* scan tree to see if dump occured during last boot */ | 710 | /* scan tree to see if dump occurred during last boot */ |
675 | of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); | 711 | of_scan_flat_dt(early_init_dt_scan_phyp_dump, NULL); |
676 | #endif | 712 | #endif |
677 | 713 | ||
@@ -679,12 +715,14 @@ void __init early_init_devtree(void *params) | |||
679 | * device-tree, including the platform type, initrd location and | 715 | * device-tree, including the platform type, initrd location and |
680 | * size, TCE reserve, and more ... | 716 | * size, TCE reserve, and more ... |
681 | */ | 717 | */ |
682 | of_scan_flat_dt(early_init_dt_scan_chosen, NULL); | 718 | of_scan_flat_dt(early_init_dt_scan_chosen_ppc, cmd_line); |
683 | 719 | ||
684 | /* Scan memory nodes and rebuild MEMBLOCKs */ | 720 | /* Scan memory nodes and rebuild MEMBLOCKs */ |
685 | memblock_init(); | 721 | memblock_init(); |
722 | |||
686 | of_scan_flat_dt(early_init_dt_scan_root, NULL); | 723 | of_scan_flat_dt(early_init_dt_scan_root, NULL); |
687 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); | 724 | of_scan_flat_dt(early_init_dt_scan_memory_ppc, NULL); |
725 | setup_initial_memory_limit(memstart_addr, first_memblock_size); | ||
688 | 726 | ||
689 | /* Save command line for /proc/cmdline and then parse parameters */ | 727 | /* Save command line for /proc/cmdline and then parse parameters */ |
690 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); | 728 | strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE); |
@@ -726,7 +764,7 @@ void __init early_init_devtree(void *params) | |||
726 | 764 | ||
727 | DBG("Scanning CPUs ...\n"); | 765 | DBG("Scanning CPUs ...\n"); |
728 | 766 | ||
729 | /* Retreive CPU related informations from the flat tree | 767 | /* Retrieve CPU related informations from the flat tree |
730 | * (altivec support, boot CPU ID, ...) | 768 | * (altivec support, boot CPU ID, ...) |
731 | */ | 769 | */ |
732 | of_scan_flat_dt(early_init_dt_scan_cpus, NULL); | 770 | of_scan_flat_dt(early_init_dt_scan_cpus, NULL); |
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index 941ff4dbc567..c016033ba78d 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -335,6 +335,7 @@ static void __init prom_printf(const char *format, ...) | |||
335 | const char *p, *q, *s; | 335 | const char *p, *q, *s; |
336 | va_list args; | 336 | va_list args; |
337 | unsigned long v; | 337 | unsigned long v; |
338 | long vs; | ||
338 | struct prom_t *_prom = &RELOC(prom); | 339 | struct prom_t *_prom = &RELOC(prom); |
339 | 340 | ||
340 | va_start(args, format); | 341 | va_start(args, format); |
@@ -368,12 +369,35 @@ static void __init prom_printf(const char *format, ...) | |||
368 | v = va_arg(args, unsigned long); | 369 | v = va_arg(args, unsigned long); |
369 | prom_print_hex(v); | 370 | prom_print_hex(v); |
370 | break; | 371 | break; |
372 | case 'd': | ||
373 | ++q; | ||
374 | vs = va_arg(args, int); | ||
375 | if (vs < 0) { | ||
376 | prom_print(RELOC("-")); | ||
377 | vs = -vs; | ||
378 | } | ||
379 | prom_print_dec(vs); | ||
380 | break; | ||
371 | case 'l': | 381 | case 'l': |
372 | ++q; | 382 | ++q; |
373 | if (*q == 'u') { /* '%lu' */ | 383 | if (*q == 0) |
384 | break; | ||
385 | else if (*q == 'x') { | ||
386 | ++q; | ||
387 | v = va_arg(args, unsigned long); | ||
388 | prom_print_hex(v); | ||
389 | } else if (*q == 'u') { /* '%lu' */ | ||
374 | ++q; | 390 | ++q; |
375 | v = va_arg(args, unsigned long); | 391 | v = va_arg(args, unsigned long); |
376 | prom_print_dec(v); | 392 | prom_print_dec(v); |
393 | } else if (*q == 'd') { /* %ld */ | ||
394 | ++q; | ||
395 | vs = va_arg(args, long); | ||
396 | if (vs < 0) { | ||
397 | prom_print(RELOC("-")); | ||
398 | vs = -vs; | ||
399 | } | ||
400 | prom_print_dec(vs); | ||
377 | } | 401 | } |
378 | break; | 402 | break; |
379 | } | 403 | } |
@@ -676,8 +700,10 @@ static void __init early_cmdline_parse(void) | |||
676 | #endif /* CONFIG_PCI_MSI */ | 700 | #endif /* CONFIG_PCI_MSI */ |
677 | #ifdef CONFIG_PPC_SMLPAR | 701 | #ifdef CONFIG_PPC_SMLPAR |
678 | #define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */ | 702 | #define OV5_CMO 0x80 /* Cooperative Memory Overcommitment */ |
703 | #define OV5_XCMO 0x40 /* Page Coalescing */ | ||
679 | #else | 704 | #else |
680 | #define OV5_CMO 0x00 | 705 | #define OV5_CMO 0x00 |
706 | #define OV5_XCMO 0x00 | ||
681 | #endif | 707 | #endif |
682 | #define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ | 708 | #define OV5_TYPE1_AFFINITY 0x80 /* Type 1 NUMA affinity */ |
683 | 709 | ||
@@ -732,7 +758,7 @@ static unsigned char ibm_architecture_vec[] = { | |||
732 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | | 758 | OV5_LPAR | OV5_SPLPAR | OV5_LARGE_PAGES | OV5_DRCONF_MEMORY | |
733 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, | 759 | OV5_DONATE_DEDICATE_CPU | OV5_MSI, |
734 | 0, | 760 | 0, |
735 | OV5_CMO, | 761 | OV5_CMO | OV5_XCMO, |
736 | OV5_TYPE1_AFFINITY, | 762 | OV5_TYPE1_AFFINITY, |
737 | 0, | 763 | 0, |
738 | 0, | 764 | 0, |
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c index 88334af038e5..47187cc2cf00 100644 --- a/arch/powerpc/kernel/prom_parse.c +++ b/arch/powerpc/kernel/prom_parse.c | |||
@@ -2,95 +2,11 @@ | |||
2 | 2 | ||
3 | #include <linux/kernel.h> | 3 | #include <linux/kernel.h> |
4 | #include <linux/string.h> | 4 | #include <linux/string.h> |
5 | #include <linux/pci_regs.h> | ||
6 | #include <linux/module.h> | 5 | #include <linux/module.h> |
7 | #include <linux/ioport.h> | 6 | #include <linux/ioport.h> |
8 | #include <linux/etherdevice.h> | 7 | #include <linux/etherdevice.h> |
9 | #include <linux/of_address.h> | 8 | #include <linux/of_address.h> |
10 | #include <asm/prom.h> | 9 | #include <asm/prom.h> |
11 | #include <asm/pci-bridge.h> | ||
12 | |||
13 | #ifdef CONFIG_PCI | ||
14 | int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) | ||
15 | { | ||
16 | struct device_node *dn, *ppnode; | ||
17 | struct pci_dev *ppdev; | ||
18 | u32 lspec; | ||
19 | u32 laddr[3]; | ||
20 | u8 pin; | ||
21 | int rc; | ||
22 | |||
23 | /* Check if we have a device node, if yes, fallback to standard OF | ||
24 | * parsing | ||
25 | */ | ||
26 | dn = pci_device_to_OF_node(pdev); | ||
27 | if (dn) { | ||
28 | rc = of_irq_map_one(dn, 0, out_irq); | ||
29 | if (!rc) | ||
30 | return rc; | ||
31 | } | ||
32 | |||
33 | /* Ok, we don't, time to have fun. Let's start by building up an | ||
34 | * interrupt spec. we assume #interrupt-cells is 1, which is standard | ||
35 | * for PCI. If you do different, then don't use that routine. | ||
36 | */ | ||
37 | rc = pci_read_config_byte(pdev, PCI_INTERRUPT_PIN, &pin); | ||
38 | if (rc != 0) | ||
39 | return rc; | ||
40 | /* No pin, exit */ | ||
41 | if (pin == 0) | ||
42 | return -ENODEV; | ||
43 | |||
44 | /* Now we walk up the PCI tree */ | ||
45 | lspec = pin; | ||
46 | for (;;) { | ||
47 | /* Get the pci_dev of our parent */ | ||
48 | ppdev = pdev->bus->self; | ||
49 | |||
50 | /* Ouch, it's a host bridge... */ | ||
51 | if (ppdev == NULL) { | ||
52 | #ifdef CONFIG_PPC64 | ||
53 | ppnode = pci_bus_to_OF_node(pdev->bus); | ||
54 | #else | ||
55 | struct pci_controller *host; | ||
56 | host = pci_bus_to_host(pdev->bus); | ||
57 | ppnode = host ? host->dn : NULL; | ||
58 | #endif | ||
59 | /* No node for host bridge ? give up */ | ||
60 | if (ppnode == NULL) | ||
61 | return -EINVAL; | ||
62 | } else | ||
63 | /* We found a P2P bridge, check if it has a node */ | ||
64 | ppnode = pci_device_to_OF_node(ppdev); | ||
65 | |||
66 | /* Ok, we have found a parent with a device-node, hand over to | ||
67 | * the OF parsing code. | ||
68 | * We build a unit address from the linux device to be used for | ||
69 | * resolution. Note that we use the linux bus number which may | ||
70 | * not match your firmware bus numbering. | ||
71 | * Fortunately, in most cases, interrupt-map-mask doesn't include | ||
72 | * the bus number as part of the matching. | ||
73 | * You should still be careful about that though if you intend | ||
74 | * to rely on this function (you ship a firmware that doesn't | ||
75 | * create device nodes for all PCI devices). | ||
76 | */ | ||
77 | if (ppnode) | ||
78 | break; | ||
79 | |||
80 | /* We can only get here if we hit a P2P bridge with no node, | ||
81 | * let's do standard swizzling and try again | ||
82 | */ | ||
83 | lspec = pci_swizzle_interrupt_pin(pdev, lspec); | ||
84 | pdev = ppdev; | ||
85 | } | ||
86 | |||
87 | laddr[0] = (pdev->bus->number << 16) | ||
88 | | (pdev->devfn << 8); | ||
89 | laddr[1] = laddr[2] = 0; | ||
90 | return of_irq_map_raw(ppnode, &lspec, 1, laddr, out_irq); | ||
91 | } | ||
92 | EXPORT_SYMBOL_GPL(of_irq_map_pci); | ||
93 | #endif /* CONFIG_PCI */ | ||
94 | 10 | ||
95 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | 11 | void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, |
96 | unsigned long *busno, unsigned long *phys, unsigned long *size) | 12 | unsigned long *busno, unsigned long *phys, unsigned long *size) |
@@ -117,41 +33,3 @@ void of_parse_dma_window(struct device_node *dn, const void *dma_window_prop, | |||
117 | cells = prop ? *(u32 *)prop : of_n_size_cells(dn); | 33 | cells = prop ? *(u32 *)prop : of_n_size_cells(dn); |
118 | *size = of_read_number(dma_window, cells); | 34 | *size = of_read_number(dma_window, cells); |
119 | } | 35 | } |
120 | |||
121 | /** | ||
122 | * Search the device tree for the best MAC address to use. 'mac-address' is | ||
123 | * checked first, because that is supposed to contain to "most recent" MAC | ||
124 | * address. If that isn't set, then 'local-mac-address' is checked next, | ||
125 | * because that is the default address. If that isn't set, then the obsolete | ||
126 | * 'address' is checked, just in case we're using an old device tree. | ||
127 | * | ||
128 | * Note that the 'address' property is supposed to contain a virtual address of | ||
129 | * the register set, but some DTS files have redefined that property to be the | ||
130 | * MAC address. | ||
131 | * | ||
132 | * All-zero MAC addresses are rejected, because those could be properties that | ||
133 | * exist in the device tree, but were not set by U-Boot. For example, the | ||
134 | * DTS could define 'mac-address' and 'local-mac-address', with zero MAC | ||
135 | * addresses. Some older U-Boots only initialized 'local-mac-address'. In | ||
136 | * this case, the real MAC is in 'local-mac-address', and 'mac-address' exists | ||
137 | * but is all zeros. | ||
138 | */ | ||
139 | const void *of_get_mac_address(struct device_node *np) | ||
140 | { | ||
141 | struct property *pp; | ||
142 | |||
143 | pp = of_find_property(np, "mac-address", NULL); | ||
144 | if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) | ||
145 | return pp->value; | ||
146 | |||
147 | pp = of_find_property(np, "local-mac-address", NULL); | ||
148 | if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) | ||
149 | return pp->value; | ||
150 | |||
151 | pp = of_find_property(np, "address", NULL); | ||
152 | if (pp && (pp->length == 6) && is_valid_ether_addr(pp->value)) | ||
153 | return pp->value; | ||
154 | |||
155 | return NULL; | ||
156 | } | ||
157 | EXPORT_SYMBOL(of_get_mac_address); | ||
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index 11f3cd9c832f..cb22024f2b42 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
30 | #include <linux/seccomp.h> | 30 | #include <linux/seccomp.h> |
31 | #include <linux/audit.h> | 31 | #include <linux/audit.h> |
32 | #include <trace/syscall.h> | ||
32 | #ifdef CONFIG_PPC32 | 33 | #ifdef CONFIG_PPC32 |
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | #endif | 35 | #endif |
@@ -40,6 +41,9 @@ | |||
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
41 | #include <asm/system.h> | 42 | #include <asm/system.h> |
42 | 43 | ||
44 | #define CREATE_TRACE_POINTS | ||
45 | #include <trace/events/syscalls.h> | ||
46 | |||
43 | /* | 47 | /* |
44 | * The parameter save area on the stack is used to store arguments being passed | 48 | * The parameter save area on the stack is used to store arguments being passed |
45 | * to callee function and is located at fixed offset from stack pointer. | 49 | * to callee function and is located at fixed offset from stack pointer. |
@@ -229,12 +233,16 @@ static int gpr_get(struct task_struct *target, const struct user_regset *regset, | |||
229 | unsigned int pos, unsigned int count, | 233 | unsigned int pos, unsigned int count, |
230 | void *kbuf, void __user *ubuf) | 234 | void *kbuf, void __user *ubuf) |
231 | { | 235 | { |
232 | int ret; | 236 | int i, ret; |
233 | 237 | ||
234 | if (target->thread.regs == NULL) | 238 | if (target->thread.regs == NULL) |
235 | return -EIO; | 239 | return -EIO; |
236 | 240 | ||
237 | CHECK_FULL_REGS(target->thread.regs); | 241 | if (!FULL_REGS(target->thread.regs)) { |
242 | /* We have a partial register set. Fill 14-31 with bogus values */ | ||
243 | for (i = 14; i < 32; i++) | ||
244 | target->thread.regs->gpr[i] = NV_REG_POISON; | ||
245 | } | ||
238 | 246 | ||
239 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, | 247 | ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, |
240 | target->thread.regs, | 248 | target->thread.regs, |
@@ -459,7 +467,7 @@ static int vr_set(struct task_struct *target, const struct user_regset *regset, | |||
459 | #ifdef CONFIG_VSX | 467 | #ifdef CONFIG_VSX |
460 | /* | 468 | /* |
461 | * Currently to set and and get all the vsx state, you need to call | 469 | * Currently to set and and get all the vsx state, you need to call |
462 | * the fp and VMX calls aswell. This only get/sets the lower 32 | 470 | * the fp and VMX calls as well. This only get/sets the lower 32 |
463 | * 128bit VSX registers. | 471 | * 128bit VSX registers. |
464 | */ | 472 | */ |
465 | 473 | ||
@@ -641,11 +649,16 @@ static int gpr32_get(struct task_struct *target, | |||
641 | compat_ulong_t *k = kbuf; | 649 | compat_ulong_t *k = kbuf; |
642 | compat_ulong_t __user *u = ubuf; | 650 | compat_ulong_t __user *u = ubuf; |
643 | compat_ulong_t reg; | 651 | compat_ulong_t reg; |
652 | int i; | ||
644 | 653 | ||
645 | if (target->thread.regs == NULL) | 654 | if (target->thread.regs == NULL) |
646 | return -EIO; | 655 | return -EIO; |
647 | 656 | ||
648 | CHECK_FULL_REGS(target->thread.regs); | 657 | if (!FULL_REGS(target->thread.regs)) { |
658 | /* We have a partial register set. Fill 14-31 with bogus values */ | ||
659 | for (i = 14; i < 32; i++) | ||
660 | target->thread.regs->gpr[i] = NV_REG_POISON; | ||
661 | } | ||
649 | 662 | ||
650 | pos /= sizeof(reg); | 663 | pos /= sizeof(reg); |
651 | count /= sizeof(reg); | 664 | count /= sizeof(reg); |
@@ -924,12 +937,16 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
924 | if (data && !(data & DABR_TRANSLATION)) | 937 | if (data && !(data & DABR_TRANSLATION)) |
925 | return -EIO; | 938 | return -EIO; |
926 | #ifdef CONFIG_HAVE_HW_BREAKPOINT | 939 | #ifdef CONFIG_HAVE_HW_BREAKPOINT |
940 | if (ptrace_get_breakpoints(task) < 0) | ||
941 | return -ESRCH; | ||
942 | |||
927 | bp = thread->ptrace_bps[0]; | 943 | bp = thread->ptrace_bps[0]; |
928 | if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { | 944 | if ((!data) || !(data & (DABR_DATA_WRITE | DABR_DATA_READ))) { |
929 | if (bp) { | 945 | if (bp) { |
930 | unregister_hw_breakpoint(bp); | 946 | unregister_hw_breakpoint(bp); |
931 | thread->ptrace_bps[0] = NULL; | 947 | thread->ptrace_bps[0] = NULL; |
932 | } | 948 | } |
949 | ptrace_put_breakpoints(task); | ||
933 | return 0; | 950 | return 0; |
934 | } | 951 | } |
935 | if (bp) { | 952 | if (bp) { |
@@ -939,9 +956,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
939 | (DABR_DATA_WRITE | DABR_DATA_READ), | 956 | (DABR_DATA_WRITE | DABR_DATA_READ), |
940 | &attr.bp_type); | 957 | &attr.bp_type); |
941 | ret = modify_user_hw_breakpoint(bp, &attr); | 958 | ret = modify_user_hw_breakpoint(bp, &attr); |
942 | if (ret) | 959 | if (ret) { |
960 | ptrace_put_breakpoints(task); | ||
943 | return ret; | 961 | return ret; |
962 | } | ||
944 | thread->ptrace_bps[0] = bp; | 963 | thread->ptrace_bps[0] = bp; |
964 | ptrace_put_breakpoints(task); | ||
945 | thread->dabr = data; | 965 | thread->dabr = data; |
946 | return 0; | 966 | return 0; |
947 | } | 967 | } |
@@ -956,9 +976,12 @@ int ptrace_set_debugreg(struct task_struct *task, unsigned long addr, | |||
956 | ptrace_triggered, task); | 976 | ptrace_triggered, task); |
957 | if (IS_ERR(bp)) { | 977 | if (IS_ERR(bp)) { |
958 | thread->ptrace_bps[0] = NULL; | 978 | thread->ptrace_bps[0] = NULL; |
979 | ptrace_put_breakpoints(task); | ||
959 | return PTR_ERR(bp); | 980 | return PTR_ERR(bp); |
960 | } | 981 | } |
961 | 982 | ||
983 | ptrace_put_breakpoints(task); | ||
984 | |||
962 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ | 985 | #endif /* CONFIG_HAVE_HW_BREAKPOINT */ |
963 | 986 | ||
964 | /* Move contents to the DABR register */ | 987 | /* Move contents to the DABR register */ |
@@ -1316,6 +1339,10 @@ static int set_dac_range(struct task_struct *child, | |||
1316 | static long ppc_set_hwdebug(struct task_struct *child, | 1339 | static long ppc_set_hwdebug(struct task_struct *child, |
1317 | struct ppc_hw_breakpoint *bp_info) | 1340 | struct ppc_hw_breakpoint *bp_info) |
1318 | { | 1341 | { |
1342 | #ifndef CONFIG_PPC_ADV_DEBUG_REGS | ||
1343 | unsigned long dabr; | ||
1344 | #endif | ||
1345 | |||
1319 | if (bp_info->version != 1) | 1346 | if (bp_info->version != 1) |
1320 | return -ENOTSUPP; | 1347 | return -ENOTSUPP; |
1321 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 1348 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
@@ -1353,11 +1380,10 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1353 | /* | 1380 | /* |
1354 | * We only support one data breakpoint | 1381 | * We only support one data breakpoint |
1355 | */ | 1382 | */ |
1356 | if (((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0) || | 1383 | if ((bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_RW) == 0 || |
1357 | ((bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0) || | 1384 | (bp_info->trigger_type & ~PPC_BREAKPOINT_TRIGGER_RW) != 0 || |
1358 | (bp_info->trigger_type != PPC_BREAKPOINT_TRIGGER_WRITE) || | 1385 | bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT || |
1359 | (bp_info->addr_mode != PPC_BREAKPOINT_MODE_EXACT) || | 1386 | bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE) |
1360 | (bp_info->condition_mode != PPC_BREAKPOINT_CONDITION_NONE)) | ||
1361 | return -EINVAL; | 1387 | return -EINVAL; |
1362 | 1388 | ||
1363 | if (child->thread.dabr) | 1389 | if (child->thread.dabr) |
@@ -1366,7 +1392,14 @@ static long ppc_set_hwdebug(struct task_struct *child, | |||
1366 | if ((unsigned long)bp_info->addr >= TASK_SIZE) | 1392 | if ((unsigned long)bp_info->addr >= TASK_SIZE) |
1367 | return -EIO; | 1393 | return -EIO; |
1368 | 1394 | ||
1369 | child->thread.dabr = (unsigned long)bp_info->addr; | 1395 | dabr = (unsigned long)bp_info->addr & ~7UL; |
1396 | dabr |= DABR_TRANSLATION; | ||
1397 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) | ||
1398 | dabr |= DABR_DATA_READ; | ||
1399 | if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) | ||
1400 | dabr |= DABR_DATA_WRITE; | ||
1401 | |||
1402 | child->thread.dabr = dabr; | ||
1370 | 1403 | ||
1371 | return 1; | 1404 | return 1; |
1372 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ | 1405 | #endif /* !CONFIG_PPC_ADV_DEBUG_DVCS */ |
@@ -1406,37 +1439,42 @@ static long ppc_del_hwdebug(struct task_struct *child, long addr, long data) | |||
1406 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, | 1439 | * Here are the old "legacy" powerpc specific getregs/setregs ptrace calls, |
1407 | * we mark them as obsolete now, they will be removed in a future version | 1440 | * we mark them as obsolete now, they will be removed in a future version |
1408 | */ | 1441 | */ |
1409 | static long arch_ptrace_old(struct task_struct *child, long request, long addr, | 1442 | static long arch_ptrace_old(struct task_struct *child, long request, |
1410 | long data) | 1443 | unsigned long addr, unsigned long data) |
1411 | { | 1444 | { |
1445 | void __user *datavp = (void __user *) data; | ||
1446 | |||
1412 | switch (request) { | 1447 | switch (request) { |
1413 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ | 1448 | case PPC_PTRACE_GETREGS: /* Get GPRs 0 - 31. */ |
1414 | return copy_regset_to_user(child, &user_ppc_native_view, | 1449 | return copy_regset_to_user(child, &user_ppc_native_view, |
1415 | REGSET_GPR, 0, 32 * sizeof(long), | 1450 | REGSET_GPR, 0, 32 * sizeof(long), |
1416 | (void __user *) data); | 1451 | datavp); |
1417 | 1452 | ||
1418 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ | 1453 | case PPC_PTRACE_SETREGS: /* Set GPRs 0 - 31. */ |
1419 | return copy_regset_from_user(child, &user_ppc_native_view, | 1454 | return copy_regset_from_user(child, &user_ppc_native_view, |
1420 | REGSET_GPR, 0, 32 * sizeof(long), | 1455 | REGSET_GPR, 0, 32 * sizeof(long), |
1421 | (const void __user *) data); | 1456 | datavp); |
1422 | 1457 | ||
1423 | case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ | 1458 | case PPC_PTRACE_GETFPREGS: /* Get FPRs 0 - 31. */ |
1424 | return copy_regset_to_user(child, &user_ppc_native_view, | 1459 | return copy_regset_to_user(child, &user_ppc_native_view, |
1425 | REGSET_FPR, 0, 32 * sizeof(double), | 1460 | REGSET_FPR, 0, 32 * sizeof(double), |
1426 | (void __user *) data); | 1461 | datavp); |
1427 | 1462 | ||
1428 | case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ | 1463 | case PPC_PTRACE_SETFPREGS: /* Set FPRs 0 - 31. */ |
1429 | return copy_regset_from_user(child, &user_ppc_native_view, | 1464 | return copy_regset_from_user(child, &user_ppc_native_view, |
1430 | REGSET_FPR, 0, 32 * sizeof(double), | 1465 | REGSET_FPR, 0, 32 * sizeof(double), |
1431 | (const void __user *) data); | 1466 | datavp); |
1432 | } | 1467 | } |
1433 | 1468 | ||
1434 | return -EPERM; | 1469 | return -EPERM; |
1435 | } | 1470 | } |
1436 | 1471 | ||
1437 | long arch_ptrace(struct task_struct *child, long request, long addr, long data) | 1472 | long arch_ptrace(struct task_struct *child, long request, |
1473 | unsigned long addr, unsigned long data) | ||
1438 | { | 1474 | { |
1439 | int ret = -EPERM; | 1475 | int ret = -EPERM; |
1476 | void __user *datavp = (void __user *) data; | ||
1477 | unsigned long __user *datalp = datavp; | ||
1440 | 1478 | ||
1441 | switch (request) { | 1479 | switch (request) { |
1442 | /* read the word at location addr in the USER area. */ | 1480 | /* read the word at location addr in the USER area. */ |
@@ -1446,11 +1484,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1446 | ret = -EIO; | 1484 | ret = -EIO; |
1447 | /* convert to index and check */ | 1485 | /* convert to index and check */ |
1448 | #ifdef CONFIG_PPC32 | 1486 | #ifdef CONFIG_PPC32 |
1449 | index = (unsigned long) addr >> 2; | 1487 | index = addr >> 2; |
1450 | if ((addr & 3) || (index > PT_FPSCR) | 1488 | if ((addr & 3) || (index > PT_FPSCR) |
1451 | || (child->thread.regs == NULL)) | 1489 | || (child->thread.regs == NULL)) |
1452 | #else | 1490 | #else |
1453 | index = (unsigned long) addr >> 3; | 1491 | index = addr >> 3; |
1454 | if ((addr & 7) || (index > PT_FPSCR)) | 1492 | if ((addr & 7) || (index > PT_FPSCR)) |
1455 | #endif | 1493 | #endif |
1456 | break; | 1494 | break; |
@@ -1463,7 +1501,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1463 | tmp = ((unsigned long *)child->thread.fpr) | 1501 | tmp = ((unsigned long *)child->thread.fpr) |
1464 | [TS_FPRWIDTH * (index - PT_FPR0)]; | 1502 | [TS_FPRWIDTH * (index - PT_FPR0)]; |
1465 | } | 1503 | } |
1466 | ret = put_user(tmp,(unsigned long __user *) data); | 1504 | ret = put_user(tmp, datalp); |
1467 | break; | 1505 | break; |
1468 | } | 1506 | } |
1469 | 1507 | ||
@@ -1474,11 +1512,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1474 | ret = -EIO; | 1512 | ret = -EIO; |
1475 | /* convert to index and check */ | 1513 | /* convert to index and check */ |
1476 | #ifdef CONFIG_PPC32 | 1514 | #ifdef CONFIG_PPC32 |
1477 | index = (unsigned long) addr >> 2; | 1515 | index = addr >> 2; |
1478 | if ((addr & 3) || (index > PT_FPSCR) | 1516 | if ((addr & 3) || (index > PT_FPSCR) |
1479 | || (child->thread.regs == NULL)) | 1517 | || (child->thread.regs == NULL)) |
1480 | #else | 1518 | #else |
1481 | index = (unsigned long) addr >> 3; | 1519 | index = addr >> 3; |
1482 | if ((addr & 7) || (index > PT_FPSCR)) | 1520 | if ((addr & 7) || (index > PT_FPSCR)) |
1483 | #endif | 1521 | #endif |
1484 | break; | 1522 | break; |
@@ -1525,11 +1563,11 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1525 | dbginfo.features = 0; | 1563 | dbginfo.features = 0; |
1526 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ | 1564 | #endif /* CONFIG_PPC_ADV_DEBUG_REGS */ |
1527 | 1565 | ||
1528 | if (!access_ok(VERIFY_WRITE, data, | 1566 | if (!access_ok(VERIFY_WRITE, datavp, |
1529 | sizeof(struct ppc_debug_info))) | 1567 | sizeof(struct ppc_debug_info))) |
1530 | return -EFAULT; | 1568 | return -EFAULT; |
1531 | ret = __copy_to_user((struct ppc_debug_info __user *)data, | 1569 | ret = __copy_to_user(datavp, &dbginfo, |
1532 | &dbginfo, sizeof(struct ppc_debug_info)) ? | 1570 | sizeof(struct ppc_debug_info)) ? |
1533 | -EFAULT : 0; | 1571 | -EFAULT : 0; |
1534 | break; | 1572 | break; |
1535 | } | 1573 | } |
@@ -1537,11 +1575,10 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1537 | case PPC_PTRACE_SETHWDEBUG: { | 1575 | case PPC_PTRACE_SETHWDEBUG: { |
1538 | struct ppc_hw_breakpoint bp_info; | 1576 | struct ppc_hw_breakpoint bp_info; |
1539 | 1577 | ||
1540 | if (!access_ok(VERIFY_READ, data, | 1578 | if (!access_ok(VERIFY_READ, datavp, |
1541 | sizeof(struct ppc_hw_breakpoint))) | 1579 | sizeof(struct ppc_hw_breakpoint))) |
1542 | return -EFAULT; | 1580 | return -EFAULT; |
1543 | ret = __copy_from_user(&bp_info, | 1581 | ret = __copy_from_user(&bp_info, datavp, |
1544 | (struct ppc_hw_breakpoint __user *)data, | ||
1545 | sizeof(struct ppc_hw_breakpoint)) ? | 1582 | sizeof(struct ppc_hw_breakpoint)) ? |
1546 | -EFAULT : 0; | 1583 | -EFAULT : 0; |
1547 | if (!ret) | 1584 | if (!ret) |
@@ -1560,11 +1597,9 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1560 | if (addr > 0) | 1597 | if (addr > 0) |
1561 | break; | 1598 | break; |
1562 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | 1599 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS |
1563 | ret = put_user(child->thread.dac1, | 1600 | ret = put_user(child->thread.dac1, datalp); |
1564 | (unsigned long __user *)data); | ||
1565 | #else | 1601 | #else |
1566 | ret = put_user(child->thread.dabr, | 1602 | ret = put_user(child->thread.dabr, datalp); |
1567 | (unsigned long __user *)data); | ||
1568 | #endif | 1603 | #endif |
1569 | break; | 1604 | break; |
1570 | } | 1605 | } |
@@ -1580,7 +1615,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1580 | return copy_regset_to_user(child, &user_ppc_native_view, | 1615 | return copy_regset_to_user(child, &user_ppc_native_view, |
1581 | REGSET_GPR, | 1616 | REGSET_GPR, |
1582 | 0, sizeof(struct pt_regs), | 1617 | 0, sizeof(struct pt_regs), |
1583 | (void __user *) data); | 1618 | datavp); |
1584 | 1619 | ||
1585 | #ifdef CONFIG_PPC64 | 1620 | #ifdef CONFIG_PPC64 |
1586 | case PTRACE_SETREGS64: | 1621 | case PTRACE_SETREGS64: |
@@ -1589,19 +1624,19 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1589 | return copy_regset_from_user(child, &user_ppc_native_view, | 1624 | return copy_regset_from_user(child, &user_ppc_native_view, |
1590 | REGSET_GPR, | 1625 | REGSET_GPR, |
1591 | 0, sizeof(struct pt_regs), | 1626 | 0, sizeof(struct pt_regs), |
1592 | (const void __user *) data); | 1627 | datavp); |
1593 | 1628 | ||
1594 | case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ | 1629 | case PTRACE_GETFPREGS: /* Get the child FPU state (FPR0...31 + FPSCR) */ |
1595 | return copy_regset_to_user(child, &user_ppc_native_view, | 1630 | return copy_regset_to_user(child, &user_ppc_native_view, |
1596 | REGSET_FPR, | 1631 | REGSET_FPR, |
1597 | 0, sizeof(elf_fpregset_t), | 1632 | 0, sizeof(elf_fpregset_t), |
1598 | (void __user *) data); | 1633 | datavp); |
1599 | 1634 | ||
1600 | case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ | 1635 | case PTRACE_SETFPREGS: /* Set the child FPU state (FPR0...31 + FPSCR) */ |
1601 | return copy_regset_from_user(child, &user_ppc_native_view, | 1636 | return copy_regset_from_user(child, &user_ppc_native_view, |
1602 | REGSET_FPR, | 1637 | REGSET_FPR, |
1603 | 0, sizeof(elf_fpregset_t), | 1638 | 0, sizeof(elf_fpregset_t), |
1604 | (const void __user *) data); | 1639 | datavp); |
1605 | 1640 | ||
1606 | #ifdef CONFIG_ALTIVEC | 1641 | #ifdef CONFIG_ALTIVEC |
1607 | case PTRACE_GETVRREGS: | 1642 | case PTRACE_GETVRREGS: |
@@ -1609,40 +1644,40 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data) | |||
1609 | REGSET_VMX, | 1644 | REGSET_VMX, |
1610 | 0, (33 * sizeof(vector128) + | 1645 | 0, (33 * sizeof(vector128) + |
1611 | sizeof(u32)), | 1646 | sizeof(u32)), |
1612 | (void __user *) data); | 1647 | datavp); |
1613 | 1648 | ||
1614 | case PTRACE_SETVRREGS: | 1649 | case PTRACE_SETVRREGS: |
1615 | return copy_regset_from_user(child, &user_ppc_native_view, | 1650 | return copy_regset_from_user(child, &user_ppc_native_view, |
1616 | REGSET_VMX, | 1651 | REGSET_VMX, |
1617 | 0, (33 * sizeof(vector128) + | 1652 | 0, (33 * sizeof(vector128) + |
1618 | sizeof(u32)), | 1653 | sizeof(u32)), |
1619 | (const void __user *) data); | 1654 | datavp); |
1620 | #endif | 1655 | #endif |
1621 | #ifdef CONFIG_VSX | 1656 | #ifdef CONFIG_VSX |
1622 | case PTRACE_GETVSRREGS: | 1657 | case PTRACE_GETVSRREGS: |
1623 | return copy_regset_to_user(child, &user_ppc_native_view, | 1658 | return copy_regset_to_user(child, &user_ppc_native_view, |
1624 | REGSET_VSX, | 1659 | REGSET_VSX, |
1625 | 0, 32 * sizeof(double), | 1660 | 0, 32 * sizeof(double), |
1626 | (void __user *) data); | 1661 | datavp); |
1627 | 1662 | ||
1628 | case PTRACE_SETVSRREGS: | 1663 | case PTRACE_SETVSRREGS: |
1629 | return copy_regset_from_user(child, &user_ppc_native_view, | 1664 | return copy_regset_from_user(child, &user_ppc_native_view, |
1630 | REGSET_VSX, | 1665 | REGSET_VSX, |
1631 | 0, 32 * sizeof(double), | 1666 | 0, 32 * sizeof(double), |
1632 | (const void __user *) data); | 1667 | datavp); |
1633 | #endif | 1668 | #endif |
1634 | #ifdef CONFIG_SPE | 1669 | #ifdef CONFIG_SPE |
1635 | case PTRACE_GETEVRREGS: | 1670 | case PTRACE_GETEVRREGS: |
1636 | /* Get the child spe register state. */ | 1671 | /* Get the child spe register state. */ |
1637 | return copy_regset_to_user(child, &user_ppc_native_view, | 1672 | return copy_regset_to_user(child, &user_ppc_native_view, |
1638 | REGSET_SPE, 0, 35 * sizeof(u32), | 1673 | REGSET_SPE, 0, 35 * sizeof(u32), |
1639 | (void __user *) data); | 1674 | datavp); |
1640 | 1675 | ||
1641 | case PTRACE_SETEVRREGS: | 1676 | case PTRACE_SETEVRREGS: |
1642 | /* Set the child spe register state. */ | 1677 | /* Set the child spe register state. */ |
1643 | return copy_regset_from_user(child, &user_ppc_native_view, | 1678 | return copy_regset_from_user(child, &user_ppc_native_view, |
1644 | REGSET_SPE, 0, 35 * sizeof(u32), | 1679 | REGSET_SPE, 0, 35 * sizeof(u32), |
1645 | (const void __user *) data); | 1680 | datavp); |
1646 | #endif | 1681 | #endif |
1647 | 1682 | ||
1648 | /* Old reverse args ptrace callss */ | 1683 | /* Old reverse args ptrace callss */ |
@@ -1679,9 +1714,12 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
1679 | */ | 1714 | */ |
1680 | ret = -1L; | 1715 | ret = -1L; |
1681 | 1716 | ||
1717 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1718 | trace_sys_enter(regs, regs->gpr[0]); | ||
1719 | |||
1682 | if (unlikely(current->audit_context)) { | 1720 | if (unlikely(current->audit_context)) { |
1683 | #ifdef CONFIG_PPC64 | 1721 | #ifdef CONFIG_PPC64 |
1684 | if (!test_thread_flag(TIF_32BIT)) | 1722 | if (!is_32bit_task()) |
1685 | audit_syscall_entry(AUDIT_ARCH_PPC64, | 1723 | audit_syscall_entry(AUDIT_ARCH_PPC64, |
1686 | regs->gpr[0], | 1724 | regs->gpr[0], |
1687 | regs->gpr[3], regs->gpr[4], | 1725 | regs->gpr[3], regs->gpr[4], |
@@ -1707,6 +1745,9 @@ void do_syscall_trace_leave(struct pt_regs *regs) | |||
1707 | audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, | 1745 | audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, |
1708 | regs->result); | 1746 | regs->result); |
1709 | 1747 | ||
1748 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1749 | trace_sys_exit(regs, regs->result); | ||
1750 | |||
1710 | step = test_thread_flag(TIF_SINGLESTEP); | 1751 | step = test_thread_flag(TIF_SINGLESTEP); |
1711 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 1752 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
1712 | tracehook_report_syscall_exit(regs, step); | 1753 | tracehook_report_syscall_exit(regs, step); |
diff --git a/arch/powerpc/kernel/ptrace32.c b/arch/powerpc/kernel/ptrace32.c index 8a6daf4129f6..69c4be917d07 100644 --- a/arch/powerpc/kernel/ptrace32.c +++ b/arch/powerpc/kernel/ptrace32.c | |||
@@ -280,7 +280,11 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
280 | /* We only support one DABR and no IABRS at the moment */ | 280 | /* We only support one DABR and no IABRS at the moment */ |
281 | if (addr > 0) | 281 | if (addr > 0) |
282 | break; | 282 | break; |
283 | #ifdef CONFIG_PPC_ADV_DEBUG_REGS | ||
284 | ret = put_user(child->thread.dac1, (u32 __user *)data); | ||
285 | #else | ||
283 | ret = put_user(child->thread.dabr, (u32 __user *)data); | 286 | ret = put_user(child->thread.dabr, (u32 __user *)data); |
287 | #endif | ||
284 | break; | 288 | break; |
285 | } | 289 | } |
286 | 290 | ||
@@ -312,6 +316,9 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | |||
312 | case PTRACE_SET_DEBUGREG: | 316 | case PTRACE_SET_DEBUGREG: |
313 | case PTRACE_SYSCALL: | 317 | case PTRACE_SYSCALL: |
314 | case PTRACE_CONT: | 318 | case PTRACE_CONT: |
319 | case PPC_PTRACE_GETHWDBGINFO: | ||
320 | case PPC_PTRACE_SETHWDEBUG: | ||
321 | case PPC_PTRACE_DELHWDEBUG: | ||
315 | ret = arch_ptrace(child, request, addr, data); | 322 | ret = arch_ptrace(child, request, addr, data); |
316 | break; | 323 | break; |
317 | 324 | ||
diff --git a/arch/powerpc/kernel/rtas-rtc.c b/arch/powerpc/kernel/rtas-rtc.c index 77578c093dda..c57c19358a26 100644 --- a/arch/powerpc/kernel/rtas-rtc.c +++ b/arch/powerpc/kernel/rtas-rtc.c | |||
@@ -4,6 +4,7 @@ | |||
4 | #include <linux/init.h> | 4 | #include <linux/init.h> |
5 | #include <linux/rtc.h> | 5 | #include <linux/rtc.h> |
6 | #include <linux/delay.h> | 6 | #include <linux/delay.h> |
7 | #include <linux/ratelimit.h> | ||
7 | #include <asm/prom.h> | 8 | #include <asm/prom.h> |
8 | #include <asm/rtas.h> | 9 | #include <asm/rtas.h> |
9 | #include <asm/time.h> | 10 | #include <asm/time.h> |
@@ -29,9 +30,10 @@ unsigned long __init rtas_get_boot_time(void) | |||
29 | } | 30 | } |
30 | } while (wait_time && (get_tb() < max_wait_tb)); | 31 | } while (wait_time && (get_tb() < max_wait_tb)); |
31 | 32 | ||
32 | if (error != 0 && printk_ratelimit()) { | 33 | if (error != 0) { |
33 | printk(KERN_WARNING "error: reading the clock failed (%d)\n", | 34 | printk_ratelimited(KERN_WARNING |
34 | error); | 35 | "error: reading the clock failed (%d)\n", |
36 | error); | ||
35 | return 0; | 37 | return 0; |
36 | } | 38 | } |
37 | 39 | ||
@@ -55,19 +57,21 @@ void rtas_get_rtc_time(struct rtc_time *rtc_tm) | |||
55 | 57 | ||
56 | wait_time = rtas_busy_delay_time(error); | 58 | wait_time = rtas_busy_delay_time(error); |
57 | if (wait_time) { | 59 | if (wait_time) { |
58 | if (in_interrupt() && printk_ratelimit()) { | 60 | if (in_interrupt()) { |
59 | memset(rtc_tm, 0, sizeof(struct rtc_time)); | 61 | memset(rtc_tm, 0, sizeof(struct rtc_time)); |
60 | printk(KERN_WARNING "error: reading clock" | 62 | printk_ratelimited(KERN_WARNING |
61 | " would delay interrupt\n"); | 63 | "error: reading clock " |
64 | "would delay interrupt\n"); | ||
62 | return; /* delay not allowed */ | 65 | return; /* delay not allowed */ |
63 | } | 66 | } |
64 | msleep(wait_time); | 67 | msleep(wait_time); |
65 | } | 68 | } |
66 | } while (wait_time && (get_tb() < max_wait_tb)); | 69 | } while (wait_time && (get_tb() < max_wait_tb)); |
67 | 70 | ||
68 | if (error != 0 && printk_ratelimit()) { | 71 | if (error != 0) { |
69 | printk(KERN_WARNING "error: reading the clock failed (%d)\n", | 72 | printk_ratelimited(KERN_WARNING |
70 | error); | 73 | "error: reading the clock failed (%d)\n", |
74 | error); | ||
71 | return; | 75 | return; |
72 | } | 76 | } |
73 | 77 | ||
@@ -99,9 +103,10 @@ int rtas_set_rtc_time(struct rtc_time *tm) | |||
99 | } | 103 | } |
100 | } while (wait_time && (get_tb() < max_wait_tb)); | 104 | } while (wait_time && (get_tb() < max_wait_tb)); |
101 | 105 | ||
102 | if (error != 0 && printk_ratelimit()) | 106 | if (error != 0) |
103 | printk(KERN_WARNING "error: setting the clock failed (%d)\n", | 107 | printk_ratelimited(KERN_WARNING |
104 | error); | 108 | "error: setting the clock failed (%d)\n", |
109 | error); | ||
105 | 110 | ||
106 | return 0; | 111 | return 0; |
107 | } | 112 | } |
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 41048de3c6c3..271ff6318eda 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c | |||
@@ -41,6 +41,8 @@ | |||
41 | #include <asm/atomic.h> | 41 | #include <asm/atomic.h> |
42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
43 | #include <asm/mmu.h> | 43 | #include <asm/mmu.h> |
44 | #include <asm/topology.h> | ||
45 | #include <asm/pSeries_reconfig.h> | ||
44 | 46 | ||
45 | struct rtas_t rtas = { | 47 | struct rtas_t rtas = { |
46 | .lock = __ARCH_SPIN_LOCK_UNLOCKED | 48 | .lock = __ARCH_SPIN_LOCK_UNLOCKED |
@@ -493,7 +495,7 @@ unsigned int rtas_busy_delay(int status) | |||
493 | 495 | ||
494 | might_sleep(); | 496 | might_sleep(); |
495 | ms = rtas_busy_delay_time(status); | 497 | ms = rtas_busy_delay_time(status); |
496 | if (ms) | 498 | if (ms && need_resched()) |
497 | msleep(ms); | 499 | msleep(ms); |
498 | 500 | ||
499 | return ms; | 501 | return ms; |
@@ -713,6 +715,7 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w | |||
713 | int cpu; | 715 | int cpu; |
714 | 716 | ||
715 | slb_set_size(SLB_MIN_SIZE); | 717 | slb_set_size(SLB_MIN_SIZE); |
718 | stop_topology_update(); | ||
716 | printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); | 719 | printk(KERN_DEBUG "calling ibm,suspend-me on cpu %i\n", smp_processor_id()); |
717 | 720 | ||
718 | while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && | 721 | while (rc == H_MULTI_THREADS_ACTIVE && !atomic_read(&data->done) && |
@@ -728,6 +731,8 @@ static int __rtas_suspend_last_cpu(struct rtas_suspend_me_data *data, int wake_w | |||
728 | rc = atomic_read(&data->error); | 731 | rc = atomic_read(&data->error); |
729 | 732 | ||
730 | atomic_set(&data->error, rc); | 733 | atomic_set(&data->error, rc); |
734 | start_topology_update(); | ||
735 | pSeries_coalesce_init(); | ||
731 | 736 | ||
732 | if (wake_when_done) { | 737 | if (wake_when_done) { |
733 | atomic_set(&data->done, 1); | 738 | atomic_set(&data->done, 1); |
@@ -805,7 +810,7 @@ static void rtas_percpu_suspend_me(void *info) | |||
805 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); | 810 | __rtas_suspend_cpu((struct rtas_suspend_me_data *)info, 1); |
806 | } | 811 | } |
807 | 812 | ||
808 | static int rtas_ibm_suspend_me(struct rtas_args *args) | 813 | int rtas_ibm_suspend_me(struct rtas_args *args) |
809 | { | 814 | { |
810 | long state; | 815 | long state; |
811 | long rc; | 816 | long rc; |
@@ -855,7 +860,7 @@ static int rtas_ibm_suspend_me(struct rtas_args *args) | |||
855 | return atomic_read(&data.error); | 860 | return atomic_read(&data.error); |
856 | } | 861 | } |
857 | #else /* CONFIG_PPC_PSERIES */ | 862 | #else /* CONFIG_PPC_PSERIES */ |
858 | static int rtas_ibm_suspend_me(struct rtas_args *args) | 863 | int rtas_ibm_suspend_me(struct rtas_args *args) |
859 | { | 864 | { |
860 | return -ENOSYS; | 865 | return -ENOSYS; |
861 | } | 866 | } |
@@ -969,7 +974,7 @@ void __init rtas_initialize(void) | |||
969 | */ | 974 | */ |
970 | #ifdef CONFIG_PPC64 | 975 | #ifdef CONFIG_PPC64 |
971 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { | 976 | if (machine_is(pseries) && firmware_has_feature(FW_FEATURE_LPAR)) { |
972 | rtas_region = min(memblock.rmo_size, RTAS_INSTANTIATE_MAX); | 977 | rtas_region = min(ppc64_rma_size, RTAS_INSTANTIATE_MAX); |
973 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); | 978 | ibm_suspend_me_token = rtas_token("ibm,suspend-me"); |
974 | } | 979 | } |
975 | #endif | 980 | #endif |
diff --git a/arch/powerpc/kernel/rtas_flash.c b/arch/powerpc/kernel/rtas_flash.c index 67a84d8f118d..bf5f5ce3a7bd 100644 --- a/arch/powerpc/kernel/rtas_flash.c +++ b/arch/powerpc/kernel/rtas_flash.c | |||
@@ -256,31 +256,16 @@ static ssize_t rtas_flash_read(struct file *file, char __user *buf, | |||
256 | struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); | 256 | struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); |
257 | struct rtas_update_flash_t *uf; | 257 | struct rtas_update_flash_t *uf; |
258 | char msg[RTAS_MSG_MAXLEN]; | 258 | char msg[RTAS_MSG_MAXLEN]; |
259 | int msglen; | ||
260 | 259 | ||
261 | uf = (struct rtas_update_flash_t *) dp->data; | 260 | uf = dp->data; |
262 | 261 | ||
263 | if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) { | 262 | if (!strcmp(dp->name, FIRMWARE_FLASH_NAME)) { |
264 | get_flash_status_msg(uf->status, msg); | 263 | get_flash_status_msg(uf->status, msg); |
265 | } else { /* FIRMWARE_UPDATE_NAME */ | 264 | } else { /* FIRMWARE_UPDATE_NAME */ |
266 | sprintf(msg, "%d\n", uf->status); | 265 | sprintf(msg, "%d\n", uf->status); |
267 | } | 266 | } |
268 | msglen = strlen(msg); | ||
269 | if (msglen > count) | ||
270 | msglen = count; | ||
271 | |||
272 | if (ppos && *ppos != 0) | ||
273 | return 0; /* be cheap */ | ||
274 | |||
275 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
276 | return -EINVAL; | ||
277 | 267 | ||
278 | if (copy_to_user(buf, msg, msglen)) | 268 | return simple_read_from_buffer(buf, count, ppos, msg, strlen(msg)); |
279 | return -EFAULT; | ||
280 | |||
281 | if (ppos) | ||
282 | *ppos = msglen; | ||
283 | return msglen; | ||
284 | } | 269 | } |
285 | 270 | ||
286 | /* constructor for flash_block_cache */ | 271 | /* constructor for flash_block_cache */ |
@@ -394,26 +379,13 @@ static ssize_t manage_flash_read(struct file *file, char __user *buf, | |||
394 | char msg[RTAS_MSG_MAXLEN]; | 379 | char msg[RTAS_MSG_MAXLEN]; |
395 | int msglen; | 380 | int msglen; |
396 | 381 | ||
397 | args_buf = (struct rtas_manage_flash_t *) dp->data; | 382 | args_buf = dp->data; |
398 | if (args_buf == NULL) | 383 | if (args_buf == NULL) |
399 | return 0; | 384 | return 0; |
400 | 385 | ||
401 | msglen = sprintf(msg, "%d\n", args_buf->status); | 386 | msglen = sprintf(msg, "%d\n", args_buf->status); |
402 | if (msglen > count) | ||
403 | msglen = count; | ||
404 | 387 | ||
405 | if (ppos && *ppos != 0) | 388 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); |
406 | return 0; /* be cheap */ | ||
407 | |||
408 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
409 | return -EINVAL; | ||
410 | |||
411 | if (copy_to_user(buf, msg, msglen)) | ||
412 | return -EFAULT; | ||
413 | |||
414 | if (ppos) | ||
415 | *ppos = msglen; | ||
416 | return msglen; | ||
417 | } | 389 | } |
418 | 390 | ||
419 | static ssize_t manage_flash_write(struct file *file, const char __user *buf, | 391 | static ssize_t manage_flash_write(struct file *file, const char __user *buf, |
@@ -495,24 +467,11 @@ static ssize_t validate_flash_read(struct file *file, char __user *buf, | |||
495 | char msg[RTAS_MSG_MAXLEN]; | 467 | char msg[RTAS_MSG_MAXLEN]; |
496 | int msglen; | 468 | int msglen; |
497 | 469 | ||
498 | args_buf = (struct rtas_validate_flash_t *) dp->data; | 470 | args_buf = dp->data; |
499 | 471 | ||
500 | if (ppos && *ppos != 0) | ||
501 | return 0; /* be cheap */ | ||
502 | |||
503 | msglen = get_validate_flash_msg(args_buf, msg); | 472 | msglen = get_validate_flash_msg(args_buf, msg); |
504 | if (msglen > count) | ||
505 | msglen = count; | ||
506 | |||
507 | if (!access_ok(VERIFY_WRITE, buf, msglen)) | ||
508 | return -EINVAL; | ||
509 | |||
510 | if (copy_to_user(buf, msg, msglen)) | ||
511 | return -EFAULT; | ||
512 | 473 | ||
513 | if (ppos) | 474 | return simple_read_from_buffer(buf, count, ppos, msg, msglen); |
514 | *ppos = msglen; | ||
515 | return msglen; | ||
516 | } | 475 | } |
517 | 476 | ||
518 | static ssize_t validate_flash_write(struct file *file, const char __user *buf, | 477 | static ssize_t validate_flash_write(struct file *file, const char __user *buf, |
@@ -716,6 +675,7 @@ static const struct file_operations rtas_flash_operations = { | |||
716 | .write = rtas_flash_write, | 675 | .write = rtas_flash_write, |
717 | .open = rtas_excl_open, | 676 | .open = rtas_excl_open, |
718 | .release = rtas_flash_release, | 677 | .release = rtas_flash_release, |
678 | .llseek = default_llseek, | ||
719 | }; | 679 | }; |
720 | 680 | ||
721 | static const struct file_operations manage_flash_operations = { | 681 | static const struct file_operations manage_flash_operations = { |
@@ -724,6 +684,7 @@ static const struct file_operations manage_flash_operations = { | |||
724 | .write = manage_flash_write, | 684 | .write = manage_flash_write, |
725 | .open = rtas_excl_open, | 685 | .open = rtas_excl_open, |
726 | .release = rtas_excl_release, | 686 | .release = rtas_excl_release, |
687 | .llseek = default_llseek, | ||
727 | }; | 688 | }; |
728 | 689 | ||
729 | static const struct file_operations validate_flash_operations = { | 690 | static const struct file_operations validate_flash_operations = { |
@@ -732,6 +693,7 @@ static const struct file_operations validate_flash_operations = { | |||
732 | .write = validate_flash_write, | 693 | .write = validate_flash_write, |
733 | .open = rtas_excl_open, | 694 | .open = rtas_excl_open, |
734 | .release = validate_flash_release, | 695 | .release = validate_flash_release, |
696 | .llseek = default_llseek, | ||
735 | }; | 697 | }; |
736 | 698 | ||
737 | static int __init rtas_flash_init(void) | 699 | static int __init rtas_flash_init(void) |
diff --git a/arch/powerpc/kernel/rtasd.c b/arch/powerpc/kernel/rtasd.c index 638883e23e3a..67f6c3b51357 100644 --- a/arch/powerpc/kernel/rtasd.c +++ b/arch/powerpc/kernel/rtasd.c | |||
@@ -160,7 +160,7 @@ static int log_rtas_len(char * buf) | |||
160 | /* rtas fixed header */ | 160 | /* rtas fixed header */ |
161 | len = 8; | 161 | len = 8; |
162 | err = (struct rtas_error_log *)buf; | 162 | err = (struct rtas_error_log *)buf; |
163 | if (err->extended_log_length) { | 163 | if (err->extended && err->extended_log_length) { |
164 | 164 | ||
165 | /* extended header */ | 165 | /* extended header */ |
166 | len += err->extended_log_length; | 166 | len += err->extended_log_length; |
@@ -354,6 +354,7 @@ static const struct file_operations proc_rtas_log_operations = { | |||
354 | .poll = rtas_log_poll, | 354 | .poll = rtas_log_poll, |
355 | .open = rtas_log_open, | 355 | .open = rtas_log_open, |
356 | .release = rtas_log_release, | 356 | .release = rtas_log_release, |
357 | .llseek = noop_llseek, | ||
357 | }; | 358 | }; |
358 | 359 | ||
359 | static int enable_surveillance(int timeout) | 360 | static int enable_surveillance(int timeout) |
@@ -411,7 +412,8 @@ static void rtas_event_scan(struct work_struct *w) | |||
411 | 412 | ||
412 | get_online_cpus(); | 413 | get_online_cpus(); |
413 | 414 | ||
414 | cpu = cpumask_next(smp_processor_id(), cpu_online_mask); | 415 | /* raw_ OK because just using CPU as starting point. */ |
416 | cpu = cpumask_next(raw_smp_processor_id(), cpu_online_mask); | ||
415 | if (cpu >= nr_cpu_ids) { | 417 | if (cpu >= nr_cpu_ids) { |
416 | cpu = cpumask_first(cpu_online_mask); | 418 | cpu = cpumask_first(cpu_online_mask); |
417 | 419 | ||
@@ -463,7 +465,7 @@ static void start_event_scan(void) | |||
463 | pr_debug("rtasd: will sleep for %d milliseconds\n", | 465 | pr_debug("rtasd: will sleep for %d milliseconds\n", |
464 | (30000 / rtas_event_scan_rate)); | 466 | (30000 / rtas_event_scan_rate)); |
465 | 467 | ||
466 | /* Retreive errors from nvram if any */ | 468 | /* Retrieve errors from nvram if any */ |
467 | retreive_nvram_error_log(); | 469 | retreive_nvram_error_log(); |
468 | 470 | ||
469 | schedule_delayed_work_on(cpumask_first(cpu_online_mask), | 471 | schedule_delayed_work_on(cpumask_first(cpu_online_mask), |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 9d4882a46647..79fca2651b65 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -381,7 +381,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
381 | int i; | 381 | int i; |
382 | 382 | ||
383 | threads_per_core = tpc; | 383 | threads_per_core = tpc; |
384 | threads_core_mask = CPU_MASK_NONE; | 384 | cpumask_clear(&threads_core_mask); |
385 | 385 | ||
386 | /* This implementation only supports power of 2 number of threads | 386 | /* This implementation only supports power of 2 number of threads |
387 | * for simplicity and performance | 387 | * for simplicity and performance |
@@ -390,7 +390,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
390 | BUG_ON(tpc != (1 << threads_shift)); | 390 | BUG_ON(tpc != (1 << threads_shift)); |
391 | 391 | ||
392 | for (i = 0; i < tpc; i++) | 392 | for (i = 0; i < tpc; i++) |
393 | cpu_set(i, threads_core_mask); | 393 | cpumask_set_cpu(i, &threads_core_mask); |
394 | 394 | ||
395 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", | 395 | printk(KERN_INFO "CPU maps initialized for %d thread%s per core\n", |
396 | tpc, tpc > 1 ? "s" : ""); | 396 | tpc, tpc > 1 ? "s" : ""); |
@@ -404,7 +404,7 @@ static void __init cpu_init_thread_core_maps(int tpc) | |||
404 | * cpu_present_mask | 404 | * cpu_present_mask |
405 | * | 405 | * |
406 | * Having the possible map set up early allows us to restrict allocations | 406 | * Having the possible map set up early allows us to restrict allocations |
407 | * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. | 407 | * of things like irqstacks to nr_cpu_ids rather than NR_CPUS. |
408 | * | 408 | * |
409 | * We do not initialize the online map here; cpus set their own bits in | 409 | * We do not initialize the online map here; cpus set their own bits in |
410 | * cpu_online_mask as they come up. | 410 | * cpu_online_mask as they come up. |
@@ -424,7 +424,7 @@ void __init smp_setup_cpu_maps(void) | |||
424 | 424 | ||
425 | DBG("smp_setup_cpu_maps()\n"); | 425 | DBG("smp_setup_cpu_maps()\n"); |
426 | 426 | ||
427 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { | 427 | while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < nr_cpu_ids) { |
428 | const int *intserv; | 428 | const int *intserv; |
429 | int j, len; | 429 | int j, len; |
430 | 430 | ||
@@ -443,7 +443,7 @@ void __init smp_setup_cpu_maps(void) | |||
443 | intserv = &cpu; /* assume logical == phys */ | 443 | intserv = &cpu; /* assume logical == phys */ |
444 | } | 444 | } |
445 | 445 | ||
446 | for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { | 446 | for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { |
447 | DBG(" thread %d -> cpu %d (hard id %d)\n", | 447 | DBG(" thread %d -> cpu %d (hard id %d)\n", |
448 | j, cpu, intserv[j]); | 448 | j, cpu, intserv[j]); |
449 | set_cpu_present(cpu, true); | 449 | set_cpu_present(cpu, true); |
@@ -483,12 +483,12 @@ void __init smp_setup_cpu_maps(void) | |||
483 | if (cpu_has_feature(CPU_FTR_SMT)) | 483 | if (cpu_has_feature(CPU_FTR_SMT)) |
484 | maxcpus *= nthreads; | 484 | maxcpus *= nthreads; |
485 | 485 | ||
486 | if (maxcpus > NR_CPUS) { | 486 | if (maxcpus > nr_cpu_ids) { |
487 | printk(KERN_WARNING | 487 | printk(KERN_WARNING |
488 | "Partition configured for %d cpus, " | 488 | "Partition configured for %d cpus, " |
489 | "operating system maximum is %d.\n", | 489 | "operating system maximum is %d.\n", |
490 | maxcpus, NR_CPUS); | 490 | maxcpus, nr_cpu_ids); |
491 | maxcpus = NR_CPUS; | 491 | maxcpus = nr_cpu_ids; |
492 | } else | 492 | } else |
493 | printk(KERN_INFO "Partition configured for %d cpus.\n", | 493 | printk(KERN_INFO "Partition configured for %d cpus.\n", |
494 | maxcpus); | 494 | maxcpus); |
@@ -509,6 +509,9 @@ void __init smp_setup_cpu_maps(void) | |||
509 | */ | 509 | */ |
510 | cpu_init_thread_core_maps(nthreads); | 510 | cpu_init_thread_core_maps(nthreads); |
511 | 511 | ||
512 | /* Now that possible cpus are set, set nr_cpu_ids for later use */ | ||
513 | setup_nr_cpu_ids(); | ||
514 | |||
512 | free_unused_pacas(); | 515 | free_unused_pacas(); |
513 | } | 516 | } |
514 | #endif /* CONFIG_SMP */ | 517 | #endif /* CONFIG_SMP */ |
@@ -599,6 +602,10 @@ int check_legacy_ioport(unsigned long base_port) | |||
599 | * name instead */ | 602 | * name instead */ |
600 | if (!np) | 603 | if (!np) |
601 | np = of_find_node_by_name(NULL, "8042"); | 604 | np = of_find_node_by_name(NULL, "8042"); |
605 | if (np) { | ||
606 | of_i8042_kbd_irq = 1; | ||
607 | of_i8042_aux_irq = 12; | ||
608 | } | ||
602 | break; | 609 | break; |
603 | case FDC_BASE: /* FDC1 */ | 610 | case FDC_BASE: /* FDC1 */ |
604 | np = of_find_node_by_type(NULL, "fdc"); | 611 | np = of_find_node_by_type(NULL, "fdc"); |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index 93666f9cabf1..620d792b52e4 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -46,8 +46,9 @@ | |||
46 | 46 | ||
47 | extern void bootx_init(unsigned long r4, unsigned long phys); | 47 | extern void bootx_init(unsigned long r4, unsigned long phys); |
48 | 48 | ||
49 | int boot_cpuid; | 49 | int boot_cpuid = -1; |
50 | EXPORT_SYMBOL_GPL(boot_cpuid); | 50 | EXPORT_SYMBOL_GPL(boot_cpuid); |
51 | int __initdata boot_cpu_count; | ||
51 | int boot_cpuid_phys; | 52 | int boot_cpuid_phys; |
52 | 53 | ||
53 | int smp_hw_index[NR_CPUS]; | 54 | int smp_hw_index[NR_CPUS]; |
@@ -246,7 +247,7 @@ static void __init irqstack_early_init(void) | |||
246 | unsigned int i; | 247 | unsigned int i; |
247 | 248 | ||
248 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 | 249 | /* interrupt stacks must be in lowmem, we get that for free on ppc32 |
249 | * as the memblock is limited to lowmem by MEMBLOCK_REAL_LIMIT */ | 250 | * as the memblock is limited to lowmem by default */ |
250 | for_each_possible_cpu(i) { | 251 | for_each_possible_cpu(i) { |
251 | softirq_ctx[i] = (struct thread_info *) | 252 | softirq_ctx[i] = (struct thread_info *) |
252 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 253 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e72690ec9b87..a88bf2713d41 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -62,6 +62,7 @@ | |||
62 | #include <asm/udbg.h> | 62 | #include <asm/udbg.h> |
63 | #include <asm/kexec.h> | 63 | #include <asm/kexec.h> |
64 | #include <asm/mmu_context.h> | 64 | #include <asm/mmu_context.h> |
65 | #include <asm/code-patching.h> | ||
65 | 66 | ||
66 | #include "setup.h" | 67 | #include "setup.h" |
67 | 68 | ||
@@ -72,6 +73,7 @@ | |||
72 | #endif | 73 | #endif |
73 | 74 | ||
74 | int boot_cpuid = 0; | 75 | int boot_cpuid = 0; |
76 | int __initdata boot_cpu_count; | ||
75 | u64 ppc64_pft_size; | 77 | u64 ppc64_pft_size; |
76 | 78 | ||
77 | /* Pick defaults since we might want to patch instructions | 79 | /* Pick defaults since we might want to patch instructions |
@@ -233,6 +235,7 @@ void early_setup_secondary(void) | |||
233 | void smp_release_cpus(void) | 235 | void smp_release_cpus(void) |
234 | { | 236 | { |
235 | unsigned long *ptr; | 237 | unsigned long *ptr; |
238 | int i; | ||
236 | 239 | ||
237 | DBG(" -> smp_release_cpus()\n"); | 240 | DBG(" -> smp_release_cpus()\n"); |
238 | 241 | ||
@@ -245,7 +248,16 @@ void smp_release_cpus(void) | |||
245 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop | 248 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
246 | - PHYSICAL_START); | 249 | - PHYSICAL_START); |
247 | *ptr = __pa(generic_secondary_smp_init); | 250 | *ptr = __pa(generic_secondary_smp_init); |
248 | mb(); | 251 | |
252 | /* And wait a bit for them to catch up */ | ||
253 | for (i = 0; i < 100000; i++) { | ||
254 | mb(); | ||
255 | HMT_low(); | ||
256 | if (boot_cpu_count == 0) | ||
257 | break; | ||
258 | udelay(1); | ||
259 | } | ||
260 | DBG("boot_cpu_count = %d\n", boot_cpu_count); | ||
249 | 261 | ||
250 | DBG(" <- smp_release_cpus()\n"); | 262 | DBG(" <- smp_release_cpus()\n"); |
251 | } | 263 | } |
@@ -423,22 +435,35 @@ void __init setup_system(void) | |||
423 | DBG(" <- setup_system()\n"); | 435 | DBG(" <- setup_system()\n"); |
424 | } | 436 | } |
425 | 437 | ||
426 | static u64 slb0_limit(void) | 438 | /* This returns the limit below which memory accesses to the linear |
439 | * mapping are guarnateed not to cause a TLB or SLB miss. This is | ||
440 | * used to allocate interrupt or emergency stacks for which our | ||
441 | * exception entry path doesn't deal with being interrupted. | ||
442 | */ | ||
443 | static u64 safe_stack_limit(void) | ||
427 | { | 444 | { |
428 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { | 445 | #ifdef CONFIG_PPC_BOOK3E |
446 | /* Freescale BookE bolts the entire linear mapping */ | ||
447 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) | ||
448 | return linear_map_top; | ||
449 | /* Other BookE, we assume the first GB is bolted */ | ||
450 | return 1ul << 30; | ||
451 | #else | ||
452 | /* BookS, the first segment is bolted */ | ||
453 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | ||
429 | return 1UL << SID_SHIFT_1T; | 454 | return 1UL << SID_SHIFT_1T; |
430 | } | ||
431 | return 1UL << SID_SHIFT; | 455 | return 1UL << SID_SHIFT; |
456 | #endif | ||
432 | } | 457 | } |
433 | 458 | ||
434 | static void __init irqstack_early_init(void) | 459 | static void __init irqstack_early_init(void) |
435 | { | 460 | { |
436 | u64 limit = slb0_limit(); | 461 | u64 limit = safe_stack_limit(); |
437 | unsigned int i; | 462 | unsigned int i; |
438 | 463 | ||
439 | /* | 464 | /* |
440 | * interrupt stacks must be under 256MB, we cannot afford to take | 465 | * Interrupt stacks must be in the first segment since we |
441 | * SLB misses on them. | 466 | * cannot afford to take SLB misses on them. |
442 | */ | 467 | */ |
443 | for_each_possible_cpu(i) { | 468 | for_each_possible_cpu(i) { |
444 | softirq_ctx[i] = (struct thread_info *) | 469 | softirq_ctx[i] = (struct thread_info *) |
@@ -453,6 +478,9 @@ static void __init irqstack_early_init(void) | |||
453 | #ifdef CONFIG_PPC_BOOK3E | 478 | #ifdef CONFIG_PPC_BOOK3E |
454 | static void __init exc_lvl_early_init(void) | 479 | static void __init exc_lvl_early_init(void) |
455 | { | 480 | { |
481 | extern unsigned int interrupt_base_book3e; | ||
482 | extern unsigned int exc_debug_debug_book3e; | ||
483 | |||
456 | unsigned int i; | 484 | unsigned int i; |
457 | 485 | ||
458 | for_each_possible_cpu(i) { | 486 | for_each_possible_cpu(i) { |
@@ -463,6 +491,10 @@ static void __init exc_lvl_early_init(void) | |||
463 | mcheckirq_ctx[i] = (struct thread_info *) | 491 | mcheckirq_ctx[i] = (struct thread_info *) |
464 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); | 492 | __va(memblock_alloc(THREAD_SIZE, THREAD_SIZE)); |
465 | } | 493 | } |
494 | |||
495 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) | ||
496 | patch_branch(&interrupt_base_book3e + (0x040 / 4) + 1, | ||
497 | (unsigned long)&exc_debug_debug_book3e, 0); | ||
466 | } | 498 | } |
467 | #else | 499 | #else |
468 | #define exc_lvl_early_init() | 500 | #define exc_lvl_early_init() |
@@ -486,7 +518,7 @@ static void __init emergency_stack_init(void) | |||
486 | * bringup, we need to get at them in real mode. This means they | 518 | * bringup, we need to get at them in real mode. This means they |
487 | * must also be within the RMO region. | 519 | * must also be within the RMO region. |
488 | */ | 520 | */ |
489 | limit = min(slb0_limit(), memblock.rmo_size); | 521 | limit = min(safe_stack_limit(), ppc64_rma_size); |
490 | 522 | ||
491 | for_each_possible_cpu(i) { | 523 | for_each_possible_cpu(i) { |
492 | unsigned long sp; | 524 | unsigned long sp; |
@@ -497,9 +529,8 @@ static void __init emergency_stack_init(void) | |||
497 | } | 529 | } |
498 | 530 | ||
499 | /* | 531 | /* |
500 | * Called into from start_kernel, after lock_kernel has been called. | 532 | * Called into from start_kernel this initializes bootmem, which is used |
501 | * Initializes bootmem, which is unsed to manage page allocation until | 533 | * to manage page allocation until mem_init is called. |
502 | * mem_init is called. | ||
503 | */ | 534 | */ |
504 | void __init setup_arch(char **cmdline_p) | 535 | void __init setup_arch(char **cmdline_p) |
505 | { | 536 | { |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index b96a3a010c26..78b76dc54dfb 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/errno.h> | 25 | #include <linux/errno.h> |
26 | #include <linux/elf.h> | 26 | #include <linux/elf.h> |
27 | #include <linux/ptrace.h> | 27 | #include <linux/ptrace.h> |
28 | #include <linux/ratelimit.h> | ||
28 | #ifdef CONFIG_PPC64 | 29 | #ifdef CONFIG_PPC64 |
29 | #include <linux/syscalls.h> | 30 | #include <linux/syscalls.h> |
30 | #include <linux/compat.h> | 31 | #include <linux/compat.h> |
@@ -892,11 +893,12 @@ badframe: | |||
892 | printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", | 893 | printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n", |
893 | regs, frame, newsp); | 894 | regs, frame, newsp); |
894 | #endif | 895 | #endif |
895 | if (show_unhandled_signals && printk_ratelimit()) | 896 | if (show_unhandled_signals) |
896 | printk(KERN_INFO "%s[%d]: bad frame in handle_rt_signal32: " | 897 | printk_ratelimited(KERN_INFO |
897 | "%p nip %08lx lr %08lx\n", | 898 | "%s[%d]: bad frame in handle_rt_signal32: " |
898 | current->comm, current->pid, | 899 | "%p nip %08lx lr %08lx\n", |
899 | addr, regs->nip, regs->link); | 900 | current->comm, current->pid, |
901 | addr, regs->nip, regs->link); | ||
900 | 902 | ||
901 | force_sigsegv(sig, current); | 903 | force_sigsegv(sig, current); |
902 | return 0; | 904 | return 0; |
@@ -1058,11 +1060,12 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | |||
1058 | return 0; | 1060 | return 0; |
1059 | 1061 | ||
1060 | bad: | 1062 | bad: |
1061 | if (show_unhandled_signals && printk_ratelimit()) | 1063 | if (show_unhandled_signals) |
1062 | printk(KERN_INFO "%s[%d]: bad frame in sys_rt_sigreturn: " | 1064 | printk_ratelimited(KERN_INFO |
1063 | "%p nip %08lx lr %08lx\n", | 1065 | "%s[%d]: bad frame in sys_rt_sigreturn: " |
1064 | current->comm, current->pid, | 1066 | "%p nip %08lx lr %08lx\n", |
1065 | rt_sf, regs->nip, regs->link); | 1067 | current->comm, current->pid, |
1068 | rt_sf, regs->nip, regs->link); | ||
1066 | 1069 | ||
1067 | force_sig(SIGSEGV, current); | 1070 | force_sig(SIGSEGV, current); |
1068 | return 0; | 1071 | return 0; |
@@ -1149,12 +1152,12 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1149 | * We kill the task with a SIGSEGV in this situation. | 1152 | * We kill the task with a SIGSEGV in this situation. |
1150 | */ | 1153 | */ |
1151 | if (do_setcontext(ctx, regs, 1)) { | 1154 | if (do_setcontext(ctx, regs, 1)) { |
1152 | if (show_unhandled_signals && printk_ratelimit()) | 1155 | if (show_unhandled_signals) |
1153 | printk(KERN_INFO "%s[%d]: bad frame in " | 1156 | printk_ratelimited(KERN_INFO "%s[%d]: bad frame in " |
1154 | "sys_debug_setcontext: %p nip %08lx " | 1157 | "sys_debug_setcontext: %p nip %08lx " |
1155 | "lr %08lx\n", | 1158 | "lr %08lx\n", |
1156 | current->comm, current->pid, | 1159 | current->comm, current->pid, |
1157 | ctx, regs->nip, regs->link); | 1160 | ctx, regs->nip, regs->link); |
1158 | 1161 | ||
1159 | force_sig(SIGSEGV, current); | 1162 | force_sig(SIGSEGV, current); |
1160 | goto out; | 1163 | goto out; |
@@ -1236,11 +1239,12 @@ badframe: | |||
1236 | printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", | 1239 | printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n", |
1237 | regs, frame, newsp); | 1240 | regs, frame, newsp); |
1238 | #endif | 1241 | #endif |
1239 | if (show_unhandled_signals && printk_ratelimit()) | 1242 | if (show_unhandled_signals) |
1240 | printk(KERN_INFO "%s[%d]: bad frame in handle_signal32: " | 1243 | printk_ratelimited(KERN_INFO |
1241 | "%p nip %08lx lr %08lx\n", | 1244 | "%s[%d]: bad frame in handle_signal32: " |
1242 | current->comm, current->pid, | 1245 | "%p nip %08lx lr %08lx\n", |
1243 | frame, regs->nip, regs->link); | 1246 | current->comm, current->pid, |
1247 | frame, regs->nip, regs->link); | ||
1244 | 1248 | ||
1245 | force_sigsegv(sig, current); | 1249 | force_sigsegv(sig, current); |
1246 | return 0; | 1250 | return 0; |
@@ -1288,11 +1292,12 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | |||
1288 | return 0; | 1292 | return 0; |
1289 | 1293 | ||
1290 | badframe: | 1294 | badframe: |
1291 | if (show_unhandled_signals && printk_ratelimit()) | 1295 | if (show_unhandled_signals) |
1292 | printk(KERN_INFO "%s[%d]: bad frame in sys_sigreturn: " | 1296 | printk_ratelimited(KERN_INFO |
1293 | "%p nip %08lx lr %08lx\n", | 1297 | "%s[%d]: bad frame in sys_sigreturn: " |
1294 | current->comm, current->pid, | 1298 | "%p nip %08lx lr %08lx\n", |
1295 | addr, regs->nip, regs->link); | 1299 | current->comm, current->pid, |
1300 | addr, regs->nip, regs->link); | ||
1296 | 1301 | ||
1297 | force_sig(SIGSEGV, current); | 1302 | force_sig(SIGSEGV, current); |
1298 | return 0; | 1303 | return 0; |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 27c4a4584f80..e91c736cc842 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/elf.h> | 24 | #include <linux/elf.h> |
25 | #include <linux/ptrace.h> | 25 | #include <linux/ptrace.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/ratelimit.h> | ||
27 | 28 | ||
28 | #include <asm/sigcontext.h> | 29 | #include <asm/sigcontext.h> |
29 | #include <asm/ucontext.h> | 30 | #include <asm/ucontext.h> |
@@ -380,10 +381,10 @@ badframe: | |||
380 | printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", | 381 | printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n", |
381 | regs, uc, &uc->uc_mcontext); | 382 | regs, uc, &uc->uc_mcontext); |
382 | #endif | 383 | #endif |
383 | if (show_unhandled_signals && printk_ratelimit()) | 384 | if (show_unhandled_signals) |
384 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | 385 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, |
385 | current->comm, current->pid, "rt_sigreturn", | 386 | current->comm, current->pid, "rt_sigreturn", |
386 | (long)uc, regs->nip, regs->link); | 387 | (long)uc, regs->nip, regs->link); |
387 | 388 | ||
388 | force_sig(SIGSEGV, current); | 389 | force_sig(SIGSEGV, current); |
389 | return 0; | 390 | return 0; |
@@ -468,10 +469,10 @@ badframe: | |||
468 | printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", | 469 | printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n", |
469 | regs, frame, newsp); | 470 | regs, frame, newsp); |
470 | #endif | 471 | #endif |
471 | if (show_unhandled_signals && printk_ratelimit()) | 472 | if (show_unhandled_signals) |
472 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | 473 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, |
473 | current->comm, current->pid, "setup_rt_frame", | 474 | current->comm, current->pid, "setup_rt_frame", |
474 | (long)frame, regs->nip, regs->link); | 475 | (long)frame, regs->nip, regs->link); |
475 | 476 | ||
476 | force_sigsegv(signr, current); | 477 | force_sigsegv(signr, current); |
477 | return 0; | 478 | return 0; |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 0008bc58e826..8ebc6700b98d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -57,6 +57,25 @@ | |||
57 | #define DBG(fmt...) | 57 | #define DBG(fmt...) |
58 | #endif | 58 | #endif |
59 | 59 | ||
60 | |||
61 | /* Store all idle threads, this can be reused instead of creating | ||
62 | * a new thread. Also avoids complicated thread destroy functionality | ||
63 | * for idle threads. | ||
64 | */ | ||
65 | #ifdef CONFIG_HOTPLUG_CPU | ||
66 | /* | ||
67 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | ||
68 | * removed after init for !CONFIG_HOTPLUG_CPU. | ||
69 | */ | ||
70 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | ||
71 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | ||
72 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | ||
73 | #else | ||
74 | static struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | ||
75 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | ||
76 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | ||
77 | #endif | ||
78 | |||
60 | struct thread_info *secondary_ti; | 79 | struct thread_info *secondary_ti; |
61 | 80 | ||
62 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); | 81 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
@@ -76,7 +95,7 @@ int smt_enabled_at_boot = 1; | |||
76 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; | 95 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; |
77 | 96 | ||
78 | #ifdef CONFIG_PPC64 | 97 | #ifdef CONFIG_PPC64 |
79 | void __devinit smp_generic_kick_cpu(int nr) | 98 | int __devinit smp_generic_kick_cpu(int nr) |
80 | { | 99 | { |
81 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 100 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
82 | 101 | ||
@@ -87,37 +106,10 @@ void __devinit smp_generic_kick_cpu(int nr) | |||
87 | */ | 106 | */ |
88 | paca[nr].cpu_start = 1; | 107 | paca[nr].cpu_start = 1; |
89 | smp_mb(); | 108 | smp_mb(); |
90 | } | ||
91 | #endif | ||
92 | 109 | ||
93 | void smp_message_recv(int msg) | 110 | return 0; |
94 | { | ||
95 | switch(msg) { | ||
96 | case PPC_MSG_CALL_FUNCTION: | ||
97 | generic_smp_call_function_interrupt(); | ||
98 | break; | ||
99 | case PPC_MSG_RESCHEDULE: | ||
100 | /* we notice need_resched on exit */ | ||
101 | break; | ||
102 | case PPC_MSG_CALL_FUNC_SINGLE: | ||
103 | generic_smp_call_function_single_interrupt(); | ||
104 | break; | ||
105 | case PPC_MSG_DEBUGGER_BREAK: | ||
106 | if (crash_ipi_function_ptr) { | ||
107 | crash_ipi_function_ptr(get_irq_regs()); | ||
108 | break; | ||
109 | } | ||
110 | #ifdef CONFIG_DEBUGGER | ||
111 | debugger_ipi(get_irq_regs()); | ||
112 | break; | ||
113 | #endif /* CONFIG_DEBUGGER */ | ||
114 | /* FALLTHROUGH */ | ||
115 | default: | ||
116 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | ||
117 | smp_processor_id(), msg); | ||
118 | break; | ||
119 | } | ||
120 | } | 111 | } |
112 | #endif | ||
121 | 113 | ||
122 | static irqreturn_t call_function_action(int irq, void *data) | 114 | static irqreturn_t call_function_action(int irq, void *data) |
123 | { | 115 | { |
@@ -127,7 +119,7 @@ static irqreturn_t call_function_action(int irq, void *data) | |||
127 | 119 | ||
128 | static irqreturn_t reschedule_action(int irq, void *data) | 120 | static irqreturn_t reschedule_action(int irq, void *data) |
129 | { | 121 | { |
130 | /* we just need the return path side effect of checking need_resched */ | 122 | scheduler_ipi(); |
131 | return IRQ_HANDLED; | 123 | return IRQ_HANDLED; |
132 | } | 124 | } |
133 | 125 | ||
@@ -139,7 +131,15 @@ static irqreturn_t call_function_single_action(int irq, void *data) | |||
139 | 131 | ||
140 | static irqreturn_t debug_ipi_action(int irq, void *data) | 132 | static irqreturn_t debug_ipi_action(int irq, void *data) |
141 | { | 133 | { |
142 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | 134 | if (crash_ipi_function_ptr) { |
135 | crash_ipi_function_ptr(get_irq_regs()); | ||
136 | return IRQ_HANDLED; | ||
137 | } | ||
138 | |||
139 | #ifdef CONFIG_DEBUGGER | ||
140 | debugger_ipi(get_irq_regs()); | ||
141 | #endif /* CONFIG_DEBUGGER */ | ||
142 | |||
143 | return IRQ_HANDLED; | 143 | return IRQ_HANDLED; |
144 | } | 144 | } |
145 | 145 | ||
@@ -178,6 +178,66 @@ int smp_request_message_ipi(int virq, int msg) | |||
178 | return err; | 178 | return err; |
179 | } | 179 | } |
180 | 180 | ||
181 | #ifdef CONFIG_PPC_SMP_MUXED_IPI | ||
182 | struct cpu_messages { | ||
183 | int messages; /* current messages */ | ||
184 | unsigned long data; /* data for cause ipi */ | ||
185 | }; | ||
186 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); | ||
187 | |||
188 | void smp_muxed_ipi_set_data(int cpu, unsigned long data) | ||
189 | { | ||
190 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | ||
191 | |||
192 | info->data = data; | ||
193 | } | ||
194 | |||
195 | void smp_muxed_ipi_message_pass(int cpu, int msg) | ||
196 | { | ||
197 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); | ||
198 | char *message = (char *)&info->messages; | ||
199 | |||
200 | message[msg] = 1; | ||
201 | mb(); | ||
202 | smp_ops->cause_ipi(cpu, info->data); | ||
203 | } | ||
204 | |||
205 | void smp_muxed_ipi_resend(void) | ||
206 | { | ||
207 | struct cpu_messages *info = &__get_cpu_var(ipi_message); | ||
208 | |||
209 | if (info->messages) | ||
210 | smp_ops->cause_ipi(smp_processor_id(), info->data); | ||
211 | } | ||
212 | |||
213 | irqreturn_t smp_ipi_demux(void) | ||
214 | { | ||
215 | struct cpu_messages *info = &__get_cpu_var(ipi_message); | ||
216 | unsigned int all; | ||
217 | |||
218 | mb(); /* order any irq clear */ | ||
219 | |||
220 | do { | ||
221 | all = xchg_local(&info->messages, 0); | ||
222 | |||
223 | #ifdef __BIG_ENDIAN | ||
224 | if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNCTION))) | ||
225 | generic_smp_call_function_interrupt(); | ||
226 | if (all & (1 << (24 - 8 * PPC_MSG_RESCHEDULE))) | ||
227 | scheduler_ipi(); | ||
228 | if (all & (1 << (24 - 8 * PPC_MSG_CALL_FUNC_SINGLE))) | ||
229 | generic_smp_call_function_single_interrupt(); | ||
230 | if (all & (1 << (24 - 8 * PPC_MSG_DEBUGGER_BREAK))) | ||
231 | debug_ipi_action(0, NULL); | ||
232 | #else | ||
233 | #error Unsupported ENDIAN | ||
234 | #endif | ||
235 | } while (info->messages); | ||
236 | |||
237 | return IRQ_HANDLED; | ||
238 | } | ||
239 | #endif /* CONFIG_PPC_SMP_MUXED_IPI */ | ||
240 | |||
181 | void smp_send_reschedule(int cpu) | 241 | void smp_send_reschedule(int cpu) |
182 | { | 242 | { |
183 | if (likely(smp_ops)) | 243 | if (likely(smp_ops)) |
@@ -197,11 +257,18 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask) | |||
197 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); | 257 | smp_ops->message_pass(cpu, PPC_MSG_CALL_FUNCTION); |
198 | } | 258 | } |
199 | 259 | ||
200 | #ifdef CONFIG_DEBUGGER | 260 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
201 | void smp_send_debugger_break(int cpu) | 261 | void smp_send_debugger_break(void) |
202 | { | 262 | { |
203 | if (likely(smp_ops)) | 263 | int cpu; |
204 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | 264 | int me = raw_smp_processor_id(); |
265 | |||
266 | if (unlikely(!smp_ops)) | ||
267 | return; | ||
268 | |||
269 | for_each_online_cpu(cpu) | ||
270 | if (cpu != me) | ||
271 | smp_ops->message_pass(cpu, PPC_MSG_DEBUGGER_BREAK); | ||
205 | } | 272 | } |
206 | #endif | 273 | #endif |
207 | 274 | ||
@@ -209,9 +276,9 @@ void smp_send_debugger_break(int cpu) | |||
209 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) | 276 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
210 | { | 277 | { |
211 | crash_ipi_function_ptr = crash_ipi_callback; | 278 | crash_ipi_function_ptr = crash_ipi_callback; |
212 | if (crash_ipi_callback && smp_ops) { | 279 | if (crash_ipi_callback) { |
213 | mb(); | 280 | mb(); |
214 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); | 281 | smp_send_debugger_break(); |
215 | } | 282 | } |
216 | } | 283 | } |
217 | #endif | 284 | #endif |
@@ -238,23 +305,6 @@ static void __devinit smp_store_cpu_info(int id) | |||
238 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); | 305 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
239 | } | 306 | } |
240 | 307 | ||
241 | static void __init smp_create_idle(unsigned int cpu) | ||
242 | { | ||
243 | struct task_struct *p; | ||
244 | |||
245 | /* create a process for the processor */ | ||
246 | p = fork_idle(cpu); | ||
247 | if (IS_ERR(p)) | ||
248 | panic("failed fork for CPU %u: %li", cpu, PTR_ERR(p)); | ||
249 | #ifdef CONFIG_PPC64 | ||
250 | paca[cpu].__current = p; | ||
251 | paca[cpu].kstack = (unsigned long) task_thread_info(p) | ||
252 | + THREAD_SIZE - STACK_FRAME_OVERHEAD; | ||
253 | #endif | ||
254 | current_set[cpu] = task_thread_info(p); | ||
255 | task_thread_info(p)->cpu = cpu; | ||
256 | } | ||
257 | |||
258 | void __init smp_prepare_cpus(unsigned int max_cpus) | 308 | void __init smp_prepare_cpus(unsigned int max_cpus) |
259 | { | 309 | { |
260 | unsigned int cpu; | 310 | unsigned int cpu; |
@@ -288,10 +338,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
288 | max_cpus = NR_CPUS; | 338 | max_cpus = NR_CPUS; |
289 | else | 339 | else |
290 | max_cpus = 1; | 340 | max_cpus = 1; |
291 | |||
292 | for_each_possible_cpu(cpu) | ||
293 | if (cpu != boot_cpuid) | ||
294 | smp_create_idle(cpu); | ||
295 | } | 341 | } |
296 | 342 | ||
297 | void __devinit smp_prepare_boot_cpu(void) | 343 | void __devinit smp_prepare_boot_cpu(void) |
@@ -305,7 +351,7 @@ void __devinit smp_prepare_boot_cpu(void) | |||
305 | 351 | ||
306 | #ifdef CONFIG_HOTPLUG_CPU | 352 | #ifdef CONFIG_HOTPLUG_CPU |
307 | /* State of each CPU during hotplug phases */ | 353 | /* State of each CPU during hotplug phases */ |
308 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 354 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
309 | 355 | ||
310 | int generic_cpu_disable(void) | 356 | int generic_cpu_disable(void) |
311 | { | 357 | { |
@@ -317,30 +363,8 @@ int generic_cpu_disable(void) | |||
317 | set_cpu_online(cpu, false); | 363 | set_cpu_online(cpu, false); |
318 | #ifdef CONFIG_PPC64 | 364 | #ifdef CONFIG_PPC64 |
319 | vdso_data->processorCount--; | 365 | vdso_data->processorCount--; |
320 | fixup_irqs(cpu_online_mask); | ||
321 | #endif | ||
322 | return 0; | ||
323 | } | ||
324 | |||
325 | int generic_cpu_enable(unsigned int cpu) | ||
326 | { | ||
327 | /* Do the normal bootup if we haven't | ||
328 | * already bootstrapped. */ | ||
329 | if (system_state != SYSTEM_RUNNING) | ||
330 | return -ENOSYS; | ||
331 | |||
332 | /* get the target out of it's holding state */ | ||
333 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | ||
334 | smp_wmb(); | ||
335 | |||
336 | while (!cpu_online(cpu)) | ||
337 | cpu_relax(); | ||
338 | |||
339 | #ifdef CONFIG_PPC64 | ||
340 | fixup_irqs(cpu_online_mask); | ||
341 | /* counter the irq disable in fixup_irqs */ | ||
342 | local_irq_enable(); | ||
343 | #endif | 366 | #endif |
367 | migrate_irqs(); | ||
344 | return 0; | 368 | return 0; |
345 | } | 369 | } |
346 | 370 | ||
@@ -362,37 +386,89 @@ void generic_mach_cpu_die(void) | |||
362 | unsigned int cpu; | 386 | unsigned int cpu; |
363 | 387 | ||
364 | local_irq_disable(); | 388 | local_irq_disable(); |
389 | idle_task_exit(); | ||
365 | cpu = smp_processor_id(); | 390 | cpu = smp_processor_id(); |
366 | printk(KERN_DEBUG "CPU%d offline\n", cpu); | 391 | printk(KERN_DEBUG "CPU%d offline\n", cpu); |
367 | __get_cpu_var(cpu_state) = CPU_DEAD; | 392 | __get_cpu_var(cpu_state) = CPU_DEAD; |
368 | smp_wmb(); | 393 | smp_wmb(); |
369 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) | 394 | while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE) |
370 | cpu_relax(); | 395 | cpu_relax(); |
371 | set_cpu_online(cpu, true); | 396 | } |
372 | local_irq_enable(); | 397 | |
398 | void generic_set_cpu_dead(unsigned int cpu) | ||
399 | { | ||
400 | per_cpu(cpu_state, cpu) = CPU_DEAD; | ||
373 | } | 401 | } |
374 | #endif | 402 | #endif |
375 | 403 | ||
376 | static int __devinit cpu_enable(unsigned int cpu) | 404 | struct create_idle { |
405 | struct work_struct work; | ||
406 | struct task_struct *idle; | ||
407 | struct completion done; | ||
408 | int cpu; | ||
409 | }; | ||
410 | |||
411 | static void __cpuinit do_fork_idle(struct work_struct *work) | ||
377 | { | 412 | { |
378 | if (smp_ops && smp_ops->cpu_enable) | 413 | struct create_idle *c_idle = |
379 | return smp_ops->cpu_enable(cpu); | 414 | container_of(work, struct create_idle, work); |
380 | 415 | ||
381 | return -ENOSYS; | 416 | c_idle->idle = fork_idle(c_idle->cpu); |
417 | complete(&c_idle->done); | ||
382 | } | 418 | } |
383 | 419 | ||
384 | int __cpuinit __cpu_up(unsigned int cpu) | 420 | static int __cpuinit create_idle(unsigned int cpu) |
385 | { | 421 | { |
386 | int c; | 422 | struct thread_info *ti; |
423 | struct create_idle c_idle = { | ||
424 | .cpu = cpu, | ||
425 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | ||
426 | }; | ||
427 | INIT_WORK_ONSTACK(&c_idle.work, do_fork_idle); | ||
387 | 428 | ||
388 | secondary_ti = current_set[cpu]; | 429 | c_idle.idle = get_idle_for_cpu(cpu); |
389 | if (!cpu_enable(cpu)) | 430 | |
390 | return 0; | 431 | /* We can't use kernel_thread since we must avoid to |
432 | * reschedule the child. We use a workqueue because | ||
433 | * we want to fork from a kernel thread, not whatever | ||
434 | * userspace process happens to be trying to online us. | ||
435 | */ | ||
436 | if (!c_idle.idle) { | ||
437 | schedule_work(&c_idle.work); | ||
438 | wait_for_completion(&c_idle.done); | ||
439 | } else | ||
440 | init_idle(c_idle.idle, cpu); | ||
441 | if (IS_ERR(c_idle.idle)) { | ||
442 | pr_err("Failed fork for CPU %u: %li", cpu, PTR_ERR(c_idle.idle)); | ||
443 | return PTR_ERR(c_idle.idle); | ||
444 | } | ||
445 | ti = task_thread_info(c_idle.idle); | ||
446 | |||
447 | #ifdef CONFIG_PPC64 | ||
448 | paca[cpu].__current = c_idle.idle; | ||
449 | paca[cpu].kstack = (unsigned long)ti + THREAD_SIZE - STACK_FRAME_OVERHEAD; | ||
450 | #endif | ||
451 | ti->cpu = cpu; | ||
452 | current_set[cpu] = ti; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | int __cpuinit __cpu_up(unsigned int cpu) | ||
458 | { | ||
459 | int rc, c; | ||
391 | 460 | ||
392 | if (smp_ops == NULL || | 461 | if (smp_ops == NULL || |
393 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) | 462 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) |
394 | return -EINVAL; | 463 | return -EINVAL; |
395 | 464 | ||
465 | /* Make sure we have an idle thread */ | ||
466 | rc = create_idle(cpu); | ||
467 | if (rc) | ||
468 | return rc; | ||
469 | |||
470 | secondary_ti = current_set[cpu]; | ||
471 | |||
396 | /* Make sure callin-map entry is 0 (can be leftover a CPU | 472 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
397 | * hotplug | 473 | * hotplug |
398 | */ | 474 | */ |
@@ -406,7 +482,11 @@ int __cpuinit __cpu_up(unsigned int cpu) | |||
406 | 482 | ||
407 | /* wake up cpus */ | 483 | /* wake up cpus */ |
408 | DBG("smp: kicking cpu %d\n", cpu); | 484 | DBG("smp: kicking cpu %d\n", cpu); |
409 | smp_ops->kick_cpu(cpu); | 485 | rc = smp_ops->kick_cpu(cpu); |
486 | if (rc) { | ||
487 | pr_err("smp: failed starting cpu %d (rc %d)\n", cpu, rc); | ||
488 | return rc; | ||
489 | } | ||
410 | 490 | ||
411 | /* | 491 | /* |
412 | * wait to see if the cpu made a callin (is actually up). | 492 | * wait to see if the cpu made a callin (is actually up). |
@@ -466,6 +546,19 @@ out: | |||
466 | return id; | 546 | return id; |
467 | } | 547 | } |
468 | 548 | ||
549 | /* Helper routines for cpu to core mapping */ | ||
550 | int cpu_core_index_of_thread(int cpu) | ||
551 | { | ||
552 | return cpu >> threads_shift; | ||
553 | } | ||
554 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); | ||
555 | |||
556 | int cpu_first_thread_of_core(int core) | ||
557 | { | ||
558 | return core << threads_shift; | ||
559 | } | ||
560 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); | ||
561 | |||
469 | /* Must be called when no change can occur to cpu_present_mask, | 562 | /* Must be called when no change can occur to cpu_present_mask, |
470 | * i.e. during cpu online or offline. | 563 | * i.e. during cpu online or offline. |
471 | */ | 564 | */ |
@@ -489,7 +582,7 @@ static struct device_node *cpu_to_l2cache(int cpu) | |||
489 | } | 582 | } |
490 | 583 | ||
491 | /* Activate a secondary processor. */ | 584 | /* Activate a secondary processor. */ |
492 | int __devinit start_secondary(void *unused) | 585 | void __devinit start_secondary(void *unused) |
493 | { | 586 | { |
494 | unsigned int cpu = smp_processor_id(); | 587 | unsigned int cpu = smp_processor_id(); |
495 | struct device_node *l2_cache; | 588 | struct device_node *l2_cache; |
@@ -508,16 +601,17 @@ int __devinit start_secondary(void *unused) | |||
508 | if (smp_ops->take_timebase) | 601 | if (smp_ops->take_timebase) |
509 | smp_ops->take_timebase(); | 602 | smp_ops->take_timebase(); |
510 | 603 | ||
511 | if (system_state > SYSTEM_BOOTING) | ||
512 | snapshot_timebase(); | ||
513 | |||
514 | secondary_cpu_time_init(); | 604 | secondary_cpu_time_init(); |
515 | 605 | ||
606 | #ifdef CONFIG_PPC64 | ||
607 | if (system_state == SYSTEM_RUNNING) | ||
608 | vdso_data->processorCount++; | ||
609 | #endif | ||
516 | ipi_call_lock(); | 610 | ipi_call_lock(); |
517 | notify_cpu_starting(cpu); | 611 | notify_cpu_starting(cpu); |
518 | set_cpu_online(cpu, true); | 612 | set_cpu_online(cpu, true); |
519 | /* Update sibling maps */ | 613 | /* Update sibling maps */ |
520 | base = cpu_first_thread_in_core(cpu); | 614 | base = cpu_first_thread_sibling(cpu); |
521 | for (i = 0; i < threads_per_core; i++) { | 615 | for (i = 0; i < threads_per_core; i++) { |
522 | if (cpu_is_offline(base + i)) | 616 | if (cpu_is_offline(base + i)) |
523 | continue; | 617 | continue; |
@@ -548,7 +642,8 @@ int __devinit start_secondary(void *unused) | |||
548 | local_irq_enable(); | 642 | local_irq_enable(); |
549 | 643 | ||
550 | cpu_idle(); | 644 | cpu_idle(); |
551 | return 0; | 645 | |
646 | BUG(); | ||
552 | } | 647 | } |
553 | 648 | ||
554 | int setup_profiling_timer(unsigned int multiplier) | 649 | int setup_profiling_timer(unsigned int multiplier) |
@@ -565,7 +660,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
565 | * se we pin us down to CPU 0 for a short while | 660 | * se we pin us down to CPU 0 for a short while |
566 | */ | 661 | */ |
567 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); | 662 | alloc_cpumask_var(&old_mask, GFP_NOWAIT); |
568 | cpumask_copy(old_mask, ¤t->cpus_allowed); | 663 | cpumask_copy(old_mask, tsk_cpus_allowed(current)); |
569 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); | 664 | set_cpus_allowed_ptr(current, cpumask_of(boot_cpuid)); |
570 | 665 | ||
571 | if (smp_ops && smp_ops->setup_cpu) | 666 | if (smp_ops && smp_ops->setup_cpu) |
@@ -575,9 +670,20 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
575 | 670 | ||
576 | free_cpumask_var(old_mask); | 671 | free_cpumask_var(old_mask); |
577 | 672 | ||
578 | snapshot_timebases(); | 673 | if (smp_ops && smp_ops->bringup_done) |
674 | smp_ops->bringup_done(); | ||
579 | 675 | ||
580 | dump_numa_cpu_topology(); | 676 | dump_numa_cpu_topology(); |
677 | |||
678 | } | ||
679 | |||
680 | int arch_sd_sibling_asym_packing(void) | ||
681 | { | ||
682 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { | ||
683 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n"); | ||
684 | return SD_ASYM_PACKING; | ||
685 | } | ||
686 | return 0; | ||
581 | } | 687 | } |
582 | 688 | ||
583 | #ifdef CONFIG_HOTPLUG_CPU | 689 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -596,7 +702,7 @@ int __cpu_disable(void) | |||
596 | return err; | 702 | return err; |
597 | 703 | ||
598 | /* Update sibling maps */ | 704 | /* Update sibling maps */ |
599 | base = cpu_first_thread_in_core(cpu); | 705 | base = cpu_first_thread_sibling(cpu); |
600 | for (i = 0; i < threads_per_core; i++) { | 706 | for (i = 0; i < threads_per_core; i++) { |
601 | cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); | 707 | cpumask_clear_cpu(cpu, cpu_sibling_mask(base + i)); |
602 | cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); | 708 | cpumask_clear_cpu(base + i, cpu_sibling_mask(cpu)); |
@@ -643,5 +749,9 @@ void cpu_die(void) | |||
643 | { | 749 | { |
644 | if (ppc_md.cpu_die) | 750 | if (ppc_md.cpu_die) |
645 | ppc_md.cpu_die(); | 751 | ppc_md.cpu_die(); |
752 | |||
753 | /* If we return, we re-enter start_secondary */ | ||
754 | start_secondary_resume(); | ||
646 | } | 755 | } |
756 | |||
647 | #endif | 757 | #endif |
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index 560c96119501..aa17b76dd427 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <asm/suspend.h> | ||
14 | #include <asm/system.h> | 13 | #include <asm/system.h> |
15 | #include <asm/current.h> | 14 | #include <asm/current.h> |
16 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
diff --git a/arch/powerpc/kernel/swsusp_32.S b/arch/powerpc/kernel/swsusp_32.S index b0754e237438..ba4dee3d233f 100644 --- a/arch/powerpc/kernel/swsusp_32.S +++ b/arch/powerpc/kernel/swsusp_32.S | |||
@@ -143,7 +143,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) | |||
143 | 143 | ||
144 | /* Disable MSR:DR to make sure we don't take a TLB or | 144 | /* Disable MSR:DR to make sure we don't take a TLB or |
145 | * hash miss during the copy, as our hash table will | 145 | * hash miss during the copy, as our hash table will |
146 | * for a while be unuseable. For .text, we assume we are | 146 | * for a while be unusable. For .text, we assume we are |
147 | * covered by a BAT. This works only for non-G5 at this | 147 | * covered by a BAT. This works only for non-G5 at this |
148 | * point. G5 will need a better approach, possibly using | 148 | * point. G5 will need a better approach, possibly using |
149 | * a small temporary hash table filled with large mappings, | 149 | * a small temporary hash table filled with large mappings, |
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c index b1b6043a56c4..4e5bf1edc0f2 100644 --- a/arch/powerpc/kernel/sys_ppc32.c +++ b/arch/powerpc/kernel/sys_ppc32.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/resource.h> | 23 | #include <linux/resource.h> |
24 | #include <linux/times.h> | 24 | #include <linux/times.h> |
25 | #include <linux/smp.h> | 25 | #include <linux/smp.h> |
26 | #include <linux/smp_lock.h> | ||
27 | #include <linux/sem.h> | 26 | #include <linux/sem.h> |
28 | #include <linux/msg.h> | 27 | #include <linux/msg.h> |
29 | #include <linux/shm.h> | 28 | #include <linux/shm.h> |
diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c index c0d8c2006bf4..f0f2199e64e1 100644 --- a/arch/powerpc/kernel/sysfs.c +++ b/arch/powerpc/kernel/sysfs.c | |||
@@ -182,6 +182,41 @@ static SYSDEV_ATTR(mmcra, 0600, show_mmcra, store_mmcra); | |||
182 | static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); | 182 | static SYSDEV_ATTR(spurr, 0600, show_spurr, NULL); |
183 | static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); | 183 | static SYSDEV_ATTR(dscr, 0600, show_dscr, store_dscr); |
184 | static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); | 184 | static SYSDEV_ATTR(purr, 0600, show_purr, store_purr); |
185 | |||
186 | unsigned long dscr_default = 0; | ||
187 | EXPORT_SYMBOL(dscr_default); | ||
188 | |||
189 | static ssize_t show_dscr_default(struct sysdev_class *class, | ||
190 | struct sysdev_class_attribute *attr, char *buf) | ||
191 | { | ||
192 | return sprintf(buf, "%lx\n", dscr_default); | ||
193 | } | ||
194 | |||
195 | static ssize_t __used store_dscr_default(struct sysdev_class *class, | ||
196 | struct sysdev_class_attribute *attr, const char *buf, | ||
197 | size_t count) | ||
198 | { | ||
199 | unsigned long val; | ||
200 | int ret = 0; | ||
201 | |||
202 | ret = sscanf(buf, "%lx", &val); | ||
203 | if (ret != 1) | ||
204 | return -EINVAL; | ||
205 | dscr_default = val; | ||
206 | |||
207 | return count; | ||
208 | } | ||
209 | |||
210 | static SYSDEV_CLASS_ATTR(dscr_default, 0600, | ||
211 | show_dscr_default, store_dscr_default); | ||
212 | |||
213 | static void sysfs_create_dscr_default(void) | ||
214 | { | ||
215 | int err = 0; | ||
216 | if (cpu_has_feature(CPU_FTR_DSCR)) | ||
217 | err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
218 | &attr_dscr_default.attr); | ||
219 | } | ||
185 | #endif /* CONFIG_PPC64 */ | 220 | #endif /* CONFIG_PPC64 */ |
186 | 221 | ||
187 | #ifdef HAS_PPC_PMC_PA6T | 222 | #ifdef HAS_PPC_PMC_PA6T |
@@ -617,6 +652,9 @@ static int __init topology_init(void) | |||
617 | if (cpu_online(cpu)) | 652 | if (cpu_online(cpu)) |
618 | register_cpu_online(cpu); | 653 | register_cpu_online(cpu); |
619 | } | 654 | } |
655 | #ifdef CONFIG_PPC64 | ||
656 | sysfs_create_dscr_default(); | ||
657 | #endif /* CONFIG_PPC64 */ | ||
620 | 658 | ||
621 | return 0; | 659 | return 0; |
622 | } | 660 | } |
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index 8533b3b83f5d..f33acfd872ad 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -53,7 +53,7 @@ | |||
53 | #include <linux/posix-timers.h> | 53 | #include <linux/posix-timers.h> |
54 | #include <linux/irq.h> | 54 | #include <linux/irq.h> |
55 | #include <linux/delay.h> | 55 | #include <linux/delay.h> |
56 | #include <linux/perf_event.h> | 56 | #include <linux/irq_work.h> |
57 | #include <asm/trace.h> | 57 | #include <asm/trace.h> |
58 | 58 | ||
59 | #include <asm/io.h> | 59 | #include <asm/io.h> |
@@ -155,16 +155,15 @@ EXPORT_SYMBOL_GPL(rtc_lock); | |||
155 | 155 | ||
156 | static u64 tb_to_ns_scale __read_mostly; | 156 | static u64 tb_to_ns_scale __read_mostly; |
157 | static unsigned tb_to_ns_shift __read_mostly; | 157 | static unsigned tb_to_ns_shift __read_mostly; |
158 | static unsigned long boot_tb __read_mostly; | 158 | static u64 boot_tb __read_mostly; |
159 | 159 | ||
160 | extern struct timezone sys_tz; | 160 | extern struct timezone sys_tz; |
161 | static long timezone_offset; | 161 | static long timezone_offset; |
162 | 162 | ||
163 | unsigned long ppc_proc_freq; | 163 | unsigned long ppc_proc_freq; |
164 | EXPORT_SYMBOL(ppc_proc_freq); | 164 | EXPORT_SYMBOL_GPL(ppc_proc_freq); |
165 | unsigned long ppc_tb_freq; | 165 | unsigned long ppc_tb_freq; |
166 | 166 | EXPORT_SYMBOL_GPL(ppc_tb_freq); | |
167 | static DEFINE_PER_CPU(u64, last_jiffy); | ||
168 | 167 | ||
169 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | 168 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING |
170 | /* | 169 | /* |
@@ -185,6 +184,8 @@ DEFINE_PER_CPU(unsigned long, cputime_scaled_last_delta); | |||
185 | 184 | ||
186 | cputime_t cputime_one_jiffy; | 185 | cputime_t cputime_one_jiffy; |
187 | 186 | ||
187 | void (*dtl_consumer)(struct dtl_entry *, u64); | ||
188 | |||
188 | static void calc_cputime_factors(void) | 189 | static void calc_cputime_factors(void) |
189 | { | 190 | { |
190 | struct div_result res; | 191 | struct div_result res; |
@@ -200,62 +201,171 @@ static void calc_cputime_factors(void) | |||
200 | } | 201 | } |
201 | 202 | ||
202 | /* | 203 | /* |
203 | * Read the PURR on systems that have it, otherwise the timebase. | 204 | * Read the SPURR on systems that have it, otherwise the PURR, |
205 | * or if that doesn't exist return the timebase value passed in. | ||
204 | */ | 206 | */ |
205 | static u64 read_purr(void) | 207 | static u64 read_spurr(u64 tb) |
206 | { | 208 | { |
209 | if (cpu_has_feature(CPU_FTR_SPURR)) | ||
210 | return mfspr(SPRN_SPURR); | ||
207 | if (cpu_has_feature(CPU_FTR_PURR)) | 211 | if (cpu_has_feature(CPU_FTR_PURR)) |
208 | return mfspr(SPRN_PURR); | 212 | return mfspr(SPRN_PURR); |
209 | return mftb(); | 213 | return tb; |
210 | } | 214 | } |
211 | 215 | ||
216 | #ifdef CONFIG_PPC_SPLPAR | ||
217 | |||
212 | /* | 218 | /* |
213 | * Read the SPURR on systems that have it, otherwise the purr | 219 | * Scan the dispatch trace log and count up the stolen time. |
220 | * Should be called with interrupts disabled. | ||
214 | */ | 221 | */ |
215 | static u64 read_spurr(u64 purr) | 222 | static u64 scan_dispatch_log(u64 stop_tb) |
216 | { | 223 | { |
217 | /* | 224 | u64 i = local_paca->dtl_ridx; |
218 | * cpus without PURR won't have a SPURR | 225 | struct dtl_entry *dtl = local_paca->dtl_curr; |
219 | * We already know the former when we use this, so tell gcc | 226 | struct dtl_entry *dtl_end = local_paca->dispatch_log_end; |
227 | struct lppaca *vpa = local_paca->lppaca_ptr; | ||
228 | u64 tb_delta; | ||
229 | u64 stolen = 0; | ||
230 | u64 dtb; | ||
231 | |||
232 | if (!dtl) | ||
233 | return 0; | ||
234 | |||
235 | if (i == vpa->dtl_idx) | ||
236 | return 0; | ||
237 | while (i < vpa->dtl_idx) { | ||
238 | if (dtl_consumer) | ||
239 | dtl_consumer(dtl, i); | ||
240 | dtb = dtl->timebase; | ||
241 | tb_delta = dtl->enqueue_to_dispatch_time + | ||
242 | dtl->ready_to_enqueue_time; | ||
243 | barrier(); | ||
244 | if (i + N_DISPATCH_LOG < vpa->dtl_idx) { | ||
245 | /* buffer has overflowed */ | ||
246 | i = vpa->dtl_idx - N_DISPATCH_LOG; | ||
247 | dtl = local_paca->dispatch_log + (i % N_DISPATCH_LOG); | ||
248 | continue; | ||
249 | } | ||
250 | if (dtb > stop_tb) | ||
251 | break; | ||
252 | stolen += tb_delta; | ||
253 | ++i; | ||
254 | ++dtl; | ||
255 | if (dtl == dtl_end) | ||
256 | dtl = local_paca->dispatch_log; | ||
257 | } | ||
258 | local_paca->dtl_ridx = i; | ||
259 | local_paca->dtl_curr = dtl; | ||
260 | return stolen; | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Accumulate stolen time by scanning the dispatch trace log. | ||
265 | * Called on entry from user mode. | ||
266 | */ | ||
267 | void accumulate_stolen_time(void) | ||
268 | { | ||
269 | u64 sst, ust; | ||
270 | |||
271 | u8 save_soft_enabled = local_paca->soft_enabled; | ||
272 | u8 save_hard_enabled = local_paca->hard_enabled; | ||
273 | |||
274 | /* We are called early in the exception entry, before | ||
275 | * soft/hard_enabled are sync'ed to the expected state | ||
276 | * for the exception. We are hard disabled but the PACA | ||
277 | * needs to reflect that so various debug stuff doesn't | ||
278 | * complain | ||
220 | */ | 279 | */ |
221 | if (cpu_has_feature(CPU_FTR_PURR) && cpu_has_feature(CPU_FTR_SPURR)) | 280 | local_paca->soft_enabled = 0; |
222 | return mfspr(SPRN_SPURR); | 281 | local_paca->hard_enabled = 0; |
223 | return purr; | 282 | |
283 | sst = scan_dispatch_log(local_paca->starttime_user); | ||
284 | ust = scan_dispatch_log(local_paca->starttime); | ||
285 | local_paca->system_time -= sst; | ||
286 | local_paca->user_time -= ust; | ||
287 | local_paca->stolen_time += ust + sst; | ||
288 | |||
289 | local_paca->soft_enabled = save_soft_enabled; | ||
290 | local_paca->hard_enabled = save_hard_enabled; | ||
291 | } | ||
292 | |||
293 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
294 | { | ||
295 | u64 stolen = 0; | ||
296 | |||
297 | if (get_paca()->dtl_ridx != get_paca()->lppaca_ptr->dtl_idx) { | ||
298 | stolen = scan_dispatch_log(stop_tb); | ||
299 | get_paca()->system_time -= stolen; | ||
300 | } | ||
301 | |||
302 | stolen += get_paca()->stolen_time; | ||
303 | get_paca()->stolen_time = 0; | ||
304 | return stolen; | ||
224 | } | 305 | } |
225 | 306 | ||
307 | #else /* CONFIG_PPC_SPLPAR */ | ||
308 | static inline u64 calculate_stolen_time(u64 stop_tb) | ||
309 | { | ||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | #endif /* CONFIG_PPC_SPLPAR */ | ||
314 | |||
226 | /* | 315 | /* |
227 | * Account time for a transition between system, hard irq | 316 | * Account time for a transition between system, hard irq |
228 | * or soft irq state. | 317 | * or soft irq state. |
229 | */ | 318 | */ |
230 | void account_system_vtime(struct task_struct *tsk) | 319 | void account_system_vtime(struct task_struct *tsk) |
231 | { | 320 | { |
232 | u64 now, nowscaled, delta, deltascaled, sys_time; | 321 | u64 now, nowscaled, delta, deltascaled; |
233 | unsigned long flags; | 322 | unsigned long flags; |
323 | u64 stolen, udelta, sys_scaled, user_scaled; | ||
234 | 324 | ||
235 | local_irq_save(flags); | 325 | local_irq_save(flags); |
236 | now = read_purr(); | 326 | now = mftb(); |
237 | nowscaled = read_spurr(now); | 327 | nowscaled = read_spurr(now); |
238 | delta = now - get_paca()->startpurr; | 328 | get_paca()->system_time += now - get_paca()->starttime; |
329 | get_paca()->starttime = now; | ||
239 | deltascaled = nowscaled - get_paca()->startspurr; | 330 | deltascaled = nowscaled - get_paca()->startspurr; |
240 | get_paca()->startpurr = now; | ||
241 | get_paca()->startspurr = nowscaled; | 331 | get_paca()->startspurr = nowscaled; |
242 | if (!in_interrupt()) { | 332 | |
243 | /* deltascaled includes both user and system time. | 333 | stolen = calculate_stolen_time(now); |
244 | * Hence scale it based on the purr ratio to estimate | 334 | |
245 | * the system time */ | 335 | delta = get_paca()->system_time; |
246 | sys_time = get_paca()->system_time; | 336 | get_paca()->system_time = 0; |
247 | if (get_paca()->user_time) | 337 | udelta = get_paca()->user_time - get_paca()->utime_sspurr; |
248 | deltascaled = deltascaled * sys_time / | 338 | get_paca()->utime_sspurr = get_paca()->user_time; |
249 | (sys_time + get_paca()->user_time); | 339 | |
250 | delta += sys_time; | 340 | /* |
251 | get_paca()->system_time = 0; | 341 | * Because we don't read the SPURR on every kernel entry/exit, |
342 | * deltascaled includes both user and system SPURR ticks. | ||
343 | * Apportion these ticks to system SPURR ticks and user | ||
344 | * SPURR ticks in the same ratio as the system time (delta) | ||
345 | * and user time (udelta) values obtained from the timebase | ||
346 | * over the same interval. The system ticks get accounted here; | ||
347 | * the user ticks get saved up in paca->user_time_scaled to be | ||
348 | * used by account_process_tick. | ||
349 | */ | ||
350 | sys_scaled = delta; | ||
351 | user_scaled = udelta; | ||
352 | if (deltascaled != delta + udelta) { | ||
353 | if (udelta) { | ||
354 | sys_scaled = deltascaled * delta / (delta + udelta); | ||
355 | user_scaled = deltascaled - sys_scaled; | ||
356 | } else { | ||
357 | sys_scaled = deltascaled; | ||
358 | } | ||
359 | } | ||
360 | get_paca()->user_time_scaled += user_scaled; | ||
361 | |||
362 | if (in_interrupt() || idle_task(smp_processor_id()) != tsk) { | ||
363 | account_system_time(tsk, 0, delta, sys_scaled); | ||
364 | if (stolen) | ||
365 | account_steal_time(stolen); | ||
366 | } else { | ||
367 | account_idle_time(delta + stolen); | ||
252 | } | 368 | } |
253 | if (in_irq() || idle_task(smp_processor_id()) != tsk) | ||
254 | account_system_time(tsk, 0, delta, deltascaled); | ||
255 | else | ||
256 | account_idle_time(delta); | ||
257 | __get_cpu_var(cputime_last_delta) = delta; | ||
258 | __get_cpu_var(cputime_scaled_last_delta) = deltascaled; | ||
259 | local_irq_restore(flags); | 369 | local_irq_restore(flags); |
260 | } | 370 | } |
261 | EXPORT_SYMBOL_GPL(account_system_vtime); | 371 | EXPORT_SYMBOL_GPL(account_system_vtime); |
@@ -265,125 +375,26 @@ EXPORT_SYMBOL_GPL(account_system_vtime); | |||
265 | * by the exception entry and exit code to the generic process | 375 | * by the exception entry and exit code to the generic process |
266 | * user and system time records. | 376 | * user and system time records. |
267 | * Must be called with interrupts disabled. | 377 | * Must be called with interrupts disabled. |
378 | * Assumes that account_system_vtime() has been called recently | ||
379 | * (i.e. since the last entry from usermode) so that | ||
380 | * get_paca()->user_time_scaled is up to date. | ||
268 | */ | 381 | */ |
269 | void account_process_tick(struct task_struct *tsk, int user_tick) | 382 | void account_process_tick(struct task_struct *tsk, int user_tick) |
270 | { | 383 | { |
271 | cputime_t utime, utimescaled; | 384 | cputime_t utime, utimescaled; |
272 | 385 | ||
273 | utime = get_paca()->user_time; | 386 | utime = get_paca()->user_time; |
387 | utimescaled = get_paca()->user_time_scaled; | ||
274 | get_paca()->user_time = 0; | 388 | get_paca()->user_time = 0; |
275 | utimescaled = cputime_to_scaled(utime); | 389 | get_paca()->user_time_scaled = 0; |
390 | get_paca()->utime_sspurr = 0; | ||
276 | account_user_time(tsk, utime, utimescaled); | 391 | account_user_time(tsk, utime, utimescaled); |
277 | } | 392 | } |
278 | 393 | ||
279 | /* | ||
280 | * Stuff for accounting stolen time. | ||
281 | */ | ||
282 | struct cpu_purr_data { | ||
283 | int initialized; /* thread is running */ | ||
284 | u64 tb; /* last TB value read */ | ||
285 | u64 purr; /* last PURR value read */ | ||
286 | u64 spurr; /* last SPURR value read */ | ||
287 | }; | ||
288 | |||
289 | /* | ||
290 | * Each entry in the cpu_purr_data array is manipulated only by its | ||
291 | * "owner" cpu -- usually in the timer interrupt but also occasionally | ||
292 | * in process context for cpu online. As long as cpus do not touch | ||
293 | * each others' cpu_purr_data, disabling local interrupts is | ||
294 | * sufficient to serialize accesses. | ||
295 | */ | ||
296 | static DEFINE_PER_CPU(struct cpu_purr_data, cpu_purr_data); | ||
297 | |||
298 | static void snapshot_tb_and_purr(void *data) | ||
299 | { | ||
300 | unsigned long flags; | ||
301 | struct cpu_purr_data *p = &__get_cpu_var(cpu_purr_data); | ||
302 | |||
303 | local_irq_save(flags); | ||
304 | p->tb = get_tb_or_rtc(); | ||
305 | p->purr = mfspr(SPRN_PURR); | ||
306 | wmb(); | ||
307 | p->initialized = 1; | ||
308 | local_irq_restore(flags); | ||
309 | } | ||
310 | |||
311 | /* | ||
312 | * Called during boot when all cpus have come up. | ||
313 | */ | ||
314 | void snapshot_timebases(void) | ||
315 | { | ||
316 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
317 | return; | ||
318 | on_each_cpu(snapshot_tb_and_purr, NULL, 1); | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * Must be called with interrupts disabled. | ||
323 | */ | ||
324 | void calculate_steal_time(void) | ||
325 | { | ||
326 | u64 tb, purr; | ||
327 | s64 stolen; | ||
328 | struct cpu_purr_data *pme; | ||
329 | |||
330 | pme = &__get_cpu_var(cpu_purr_data); | ||
331 | if (!pme->initialized) | ||
332 | return; /* !CPU_FTR_PURR or early in early boot */ | ||
333 | tb = mftb(); | ||
334 | purr = mfspr(SPRN_PURR); | ||
335 | stolen = (tb - pme->tb) - (purr - pme->purr); | ||
336 | if (stolen > 0) { | ||
337 | if (idle_task(smp_processor_id()) != current) | ||
338 | account_steal_time(stolen); | ||
339 | else | ||
340 | account_idle_time(stolen); | ||
341 | } | ||
342 | pme->tb = tb; | ||
343 | pme->purr = purr; | ||
344 | } | ||
345 | |||
346 | #ifdef CONFIG_PPC_SPLPAR | ||
347 | /* | ||
348 | * Must be called before the cpu is added to the online map when | ||
349 | * a cpu is being brought up at runtime. | ||
350 | */ | ||
351 | static void snapshot_purr(void) | ||
352 | { | ||
353 | struct cpu_purr_data *pme; | ||
354 | unsigned long flags; | ||
355 | |||
356 | if (!cpu_has_feature(CPU_FTR_PURR)) | ||
357 | return; | ||
358 | local_irq_save(flags); | ||
359 | pme = &__get_cpu_var(cpu_purr_data); | ||
360 | pme->tb = mftb(); | ||
361 | pme->purr = mfspr(SPRN_PURR); | ||
362 | pme->initialized = 1; | ||
363 | local_irq_restore(flags); | ||
364 | } | ||
365 | |||
366 | #endif /* CONFIG_PPC_SPLPAR */ | ||
367 | |||
368 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ | 394 | #else /* ! CONFIG_VIRT_CPU_ACCOUNTING */ |
369 | #define calc_cputime_factors() | 395 | #define calc_cputime_factors() |
370 | #define calculate_steal_time() do { } while (0) | ||
371 | #endif | ||
372 | |||
373 | #if !(defined(CONFIG_VIRT_CPU_ACCOUNTING) && defined(CONFIG_PPC_SPLPAR)) | ||
374 | #define snapshot_purr() do { } while (0) | ||
375 | #endif | 396 | #endif |
376 | 397 | ||
377 | /* | ||
378 | * Called when a cpu comes up after the system has finished booting, | ||
379 | * i.e. as a result of a hotplug cpu action. | ||
380 | */ | ||
381 | void snapshot_timebase(void) | ||
382 | { | ||
383 | __get_cpu_var(last_jiffy) = get_tb_or_rtc(); | ||
384 | snapshot_purr(); | ||
385 | } | ||
386 | |||
387 | void __delay(unsigned long loops) | 398 | void __delay(unsigned long loops) |
388 | { | 399 | { |
389 | unsigned long start; | 400 | unsigned long start; |
@@ -493,60 +504,60 @@ void __init iSeries_time_init_early(void) | |||
493 | } | 504 | } |
494 | #endif /* CONFIG_PPC_ISERIES */ | 505 | #endif /* CONFIG_PPC_ISERIES */ |
495 | 506 | ||
496 | #ifdef CONFIG_PERF_EVENTS | 507 | #ifdef CONFIG_IRQ_WORK |
497 | 508 | ||
498 | /* | 509 | /* |
499 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... | 510 | * 64-bit uses a byte in the PACA, 32-bit uses a per-cpu variable... |
500 | */ | 511 | */ |
501 | #ifdef CONFIG_PPC64 | 512 | #ifdef CONFIG_PPC64 |
502 | static inline unsigned long test_perf_event_pending(void) | 513 | static inline unsigned long test_irq_work_pending(void) |
503 | { | 514 | { |
504 | unsigned long x; | 515 | unsigned long x; |
505 | 516 | ||
506 | asm volatile("lbz %0,%1(13)" | 517 | asm volatile("lbz %0,%1(13)" |
507 | : "=r" (x) | 518 | : "=r" (x) |
508 | : "i" (offsetof(struct paca_struct, perf_event_pending))); | 519 | : "i" (offsetof(struct paca_struct, irq_work_pending))); |
509 | return x; | 520 | return x; |
510 | } | 521 | } |
511 | 522 | ||
512 | static inline void set_perf_event_pending_flag(void) | 523 | static inline void set_irq_work_pending_flag(void) |
513 | { | 524 | { |
514 | asm volatile("stb %0,%1(13)" : : | 525 | asm volatile("stb %0,%1(13)" : : |
515 | "r" (1), | 526 | "r" (1), |
516 | "i" (offsetof(struct paca_struct, perf_event_pending))); | 527 | "i" (offsetof(struct paca_struct, irq_work_pending))); |
517 | } | 528 | } |
518 | 529 | ||
519 | static inline void clear_perf_event_pending(void) | 530 | static inline void clear_irq_work_pending(void) |
520 | { | 531 | { |
521 | asm volatile("stb %0,%1(13)" : : | 532 | asm volatile("stb %0,%1(13)" : : |
522 | "r" (0), | 533 | "r" (0), |
523 | "i" (offsetof(struct paca_struct, perf_event_pending))); | 534 | "i" (offsetof(struct paca_struct, irq_work_pending))); |
524 | } | 535 | } |
525 | 536 | ||
526 | #else /* 32-bit */ | 537 | #else /* 32-bit */ |
527 | 538 | ||
528 | DEFINE_PER_CPU(u8, perf_event_pending); | 539 | DEFINE_PER_CPU(u8, irq_work_pending); |
529 | 540 | ||
530 | #define set_perf_event_pending_flag() __get_cpu_var(perf_event_pending) = 1 | 541 | #define set_irq_work_pending_flag() __get_cpu_var(irq_work_pending) = 1 |
531 | #define test_perf_event_pending() __get_cpu_var(perf_event_pending) | 542 | #define test_irq_work_pending() __get_cpu_var(irq_work_pending) |
532 | #define clear_perf_event_pending() __get_cpu_var(perf_event_pending) = 0 | 543 | #define clear_irq_work_pending() __get_cpu_var(irq_work_pending) = 0 |
533 | 544 | ||
534 | #endif /* 32 vs 64 bit */ | 545 | #endif /* 32 vs 64 bit */ |
535 | 546 | ||
536 | void set_perf_event_pending(void) | 547 | void set_irq_work_pending(void) |
537 | { | 548 | { |
538 | preempt_disable(); | 549 | preempt_disable(); |
539 | set_perf_event_pending_flag(); | 550 | set_irq_work_pending_flag(); |
540 | set_dec(1); | 551 | set_dec(1); |
541 | preempt_enable(); | 552 | preempt_enable(); |
542 | } | 553 | } |
543 | 554 | ||
544 | #else /* CONFIG_PERF_EVENTS */ | 555 | #else /* CONFIG_IRQ_WORK */ |
545 | 556 | ||
546 | #define test_perf_event_pending() 0 | 557 | #define test_irq_work_pending() 0 |
547 | #define clear_perf_event_pending() | 558 | #define clear_irq_work_pending() |
548 | 559 | ||
549 | #endif /* CONFIG_PERF_EVENTS */ | 560 | #endif /* CONFIG_IRQ_WORK */ |
550 | 561 | ||
551 | /* | 562 | /* |
552 | * For iSeries shared processors, we have to let the hypervisor | 563 | * For iSeries shared processors, we have to let the hypervisor |
@@ -569,14 +580,21 @@ void timer_interrupt(struct pt_regs * regs) | |||
569 | struct clock_event_device *evt = &decrementer->event; | 580 | struct clock_event_device *evt = &decrementer->event; |
570 | u64 now; | 581 | u64 now; |
571 | 582 | ||
583 | /* Ensure a positive value is written to the decrementer, or else | ||
584 | * some CPUs will continue to take decrementer exceptions. | ||
585 | */ | ||
586 | set_dec(DECREMENTER_MAX); | ||
587 | |||
588 | /* Some implementations of hotplug will get timer interrupts while | ||
589 | * offline, just ignore these | ||
590 | */ | ||
591 | if (!cpu_online(smp_processor_id())) | ||
592 | return; | ||
593 | |||
572 | trace_timer_interrupt_entry(regs); | 594 | trace_timer_interrupt_entry(regs); |
573 | 595 | ||
574 | __get_cpu_var(irq_stat).timer_irqs++; | 596 | __get_cpu_var(irq_stat).timer_irqs++; |
575 | 597 | ||
576 | /* Ensure a positive value is written to the decrementer, or else | ||
577 | * some CPUs will continuue to take decrementer exceptions */ | ||
578 | set_dec(DECREMENTER_MAX); | ||
579 | |||
580 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) | 598 | #if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) |
581 | if (atomic_read(&ppc_n_lost_interrupts) != 0) | 599 | if (atomic_read(&ppc_n_lost_interrupts) != 0) |
582 | do_IRQ(regs); | 600 | do_IRQ(regs); |
@@ -585,11 +603,9 @@ void timer_interrupt(struct pt_regs * regs) | |||
585 | old_regs = set_irq_regs(regs); | 603 | old_regs = set_irq_regs(regs); |
586 | irq_enter(); | 604 | irq_enter(); |
587 | 605 | ||
588 | calculate_steal_time(); | 606 | if (test_irq_work_pending()) { |
589 | 607 | clear_irq_work_pending(); | |
590 | if (test_perf_event_pending()) { | 608 | irq_work_run(); |
591 | clear_perf_event_pending(); | ||
592 | perf_event_do_pending(); | ||
593 | } | 609 | } |
594 | 610 | ||
595 | #ifdef CONFIG_PPC_ISERIES | 611 | #ifdef CONFIG_PPC_ISERIES |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index a45a63c3a0c7..1a0141426cda 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/bug.h> | 34 | #include <linux/bug.h> |
35 | #include <linux/kdebug.h> | 35 | #include <linux/kdebug.h> |
36 | #include <linux/debugfs.h> | 36 | #include <linux/debugfs.h> |
37 | #include <linux/ratelimit.h> | ||
37 | 38 | ||
38 | #include <asm/emulated_ops.h> | 39 | #include <asm/emulated_ops.h> |
39 | #include <asm/pgtable.h> | 40 | #include <asm/pgtable.h> |
@@ -55,6 +56,7 @@ | |||
55 | #endif | 56 | #endif |
56 | #include <asm/kexec.h> | 57 | #include <asm/kexec.h> |
57 | #include <asm/ppc-opcode.h> | 58 | #include <asm/ppc-opcode.h> |
59 | #include <asm/rio.h> | ||
58 | 60 | ||
59 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 61 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
60 | int (*__debugger)(struct pt_regs *regs) __read_mostly; | 62 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
@@ -143,7 +145,6 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
143 | #endif | 145 | #endif |
144 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); | 146 | printk("%s\n", ppc_md.name ? ppc_md.name : ""); |
145 | 147 | ||
146 | sysfs_printk_last_file(); | ||
147 | if (notify_die(DIE_OOPS, str, regs, err, 255, | 148 | if (notify_die(DIE_OOPS, str, regs, err, 255, |
148 | SIGSEGV) == NOTIFY_STOP) | 149 | SIGSEGV) == NOTIFY_STOP) |
149 | return 1; | 150 | return 1; |
@@ -197,12 +198,11 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
197 | if (die("Exception in kernel mode", regs, signr)) | 198 | if (die("Exception in kernel mode", regs, signr)) |
198 | return; | 199 | return; |
199 | } else if (show_unhandled_signals && | 200 | } else if (show_unhandled_signals && |
200 | unhandled_signal(current, signr) && | 201 | unhandled_signal(current, signr)) { |
201 | printk_ratelimit()) { | 202 | printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, |
202 | printk(regs->msr & MSR_SF ? fmt64 : fmt32, | 203 | current->comm, current->pid, signr, |
203 | current->comm, current->pid, signr, | 204 | addr, regs->nip, regs->link, code); |
204 | addr, regs->nip, regs->link, code); | 205 | } |
205 | } | ||
206 | 206 | ||
207 | memset(&info, 0, sizeof(info)); | 207 | memset(&info, 0, sizeof(info)); |
208 | info.si_signo = signr; | 208 | info.si_signo = signr; |
@@ -221,7 +221,7 @@ void system_reset_exception(struct pt_regs *regs) | |||
221 | } | 221 | } |
222 | 222 | ||
223 | #ifdef CONFIG_KEXEC | 223 | #ifdef CONFIG_KEXEC |
224 | cpu_set(smp_processor_id(), cpus_in_sr); | 224 | cpumask_set_cpu(smp_processor_id(), &cpus_in_sr); |
225 | #endif | 225 | #endif |
226 | 226 | ||
227 | die("System Reset", regs, SIGABRT); | 227 | die("System Reset", regs, SIGABRT); |
@@ -425,6 +425,12 @@ int machine_check_e500mc(struct pt_regs *regs) | |||
425 | unsigned long reason = mcsr; | 425 | unsigned long reason = mcsr; |
426 | int recoverable = 1; | 426 | int recoverable = 1; |
427 | 427 | ||
428 | if (reason & MCSR_LD) { | ||
429 | recoverable = fsl_rio_mcheck_exception(regs); | ||
430 | if (recoverable == 1) | ||
431 | goto silent_out; | ||
432 | } | ||
433 | |||
428 | printk("Machine check in kernel mode.\n"); | 434 | printk("Machine check in kernel mode.\n"); |
429 | printk("Caused by (from MCSR=%lx): ", reason); | 435 | printk("Caused by (from MCSR=%lx): ", reason); |
430 | 436 | ||
@@ -500,6 +506,7 @@ int machine_check_e500mc(struct pt_regs *regs) | |||
500 | reason & MCSR_MEA ? "Effective" : "Physical", addr); | 506 | reason & MCSR_MEA ? "Effective" : "Physical", addr); |
501 | } | 507 | } |
502 | 508 | ||
509 | silent_out: | ||
503 | mtspr(SPRN_MCSR, mcsr); | 510 | mtspr(SPRN_MCSR, mcsr); |
504 | return mfspr(SPRN_MCSR) == 0 && recoverable; | 511 | return mfspr(SPRN_MCSR) == 0 && recoverable; |
505 | } | 512 | } |
@@ -508,6 +515,11 @@ int machine_check_e500(struct pt_regs *regs) | |||
508 | { | 515 | { |
509 | unsigned long reason = get_mc_reason(regs); | 516 | unsigned long reason = get_mc_reason(regs); |
510 | 517 | ||
518 | if (reason & MCSR_BUS_RBERR) { | ||
519 | if (fsl_rio_mcheck_exception(regs)) | ||
520 | return 1; | ||
521 | } | ||
522 | |||
511 | printk("Machine check in kernel mode.\n"); | 523 | printk("Machine check in kernel mode.\n"); |
512 | printk("Caused by (from MCSR=%lx): ", reason); | 524 | printk("Caused by (from MCSR=%lx): ", reason); |
513 | 525 | ||
@@ -538,6 +550,11 @@ int machine_check_e500(struct pt_regs *regs) | |||
538 | 550 | ||
539 | return 0; | 551 | return 0; |
540 | } | 552 | } |
553 | |||
554 | int machine_check_generic(struct pt_regs *regs) | ||
555 | { | ||
556 | return 0; | ||
557 | } | ||
541 | #elif defined(CONFIG_E200) | 558 | #elif defined(CONFIG_E200) |
542 | int machine_check_e200(struct pt_regs *regs) | 559 | int machine_check_e200(struct pt_regs *regs) |
543 | { | 560 | { |
@@ -621,12 +638,6 @@ void machine_check_exception(struct pt_regs *regs) | |||
621 | if (recover > 0) | 638 | if (recover > 0) |
622 | return; | 639 | return; |
623 | 640 | ||
624 | if (user_mode(regs)) { | ||
625 | regs->msr |= MSR_RI; | ||
626 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | ||
627 | return; | ||
628 | } | ||
629 | |||
630 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) | 641 | #if defined(CONFIG_8xx) && defined(CONFIG_PCI) |
631 | /* the qspan pci read routines can cause machine checks -- Cort | 642 | /* the qspan pci read routines can cause machine checks -- Cort |
632 | * | 643 | * |
@@ -638,16 +649,12 @@ void machine_check_exception(struct pt_regs *regs) | |||
638 | return; | 649 | return; |
639 | #endif | 650 | #endif |
640 | 651 | ||
641 | if (debugger_fault_handler(regs)) { | 652 | if (debugger_fault_handler(regs)) |
642 | regs->msr |= MSR_RI; | ||
643 | return; | 653 | return; |
644 | } | ||
645 | 654 | ||
646 | if (check_io_access(regs)) | 655 | if (check_io_access(regs)) |
647 | return; | 656 | return; |
648 | 657 | ||
649 | if (debugger_fault_handler(regs)) | ||
650 | return; | ||
651 | die("Machine check", regs, SIGBUS); | 658 | die("Machine check", regs, SIGBUS); |
652 | 659 | ||
653 | /* Must die if the interrupt is not recoverable */ | 660 | /* Must die if the interrupt is not recoverable */ |
@@ -914,6 +921,26 @@ static int emulate_instruction(struct pt_regs *regs) | |||
914 | return emulate_isel(regs, instword); | 921 | return emulate_isel(regs, instword); |
915 | } | 922 | } |
916 | 923 | ||
924 | #ifdef CONFIG_PPC64 | ||
925 | /* Emulate the mfspr rD, DSCR. */ | ||
926 | if (((instword & PPC_INST_MFSPR_DSCR_MASK) == PPC_INST_MFSPR_DSCR) && | ||
927 | cpu_has_feature(CPU_FTR_DSCR)) { | ||
928 | PPC_WARN_EMULATED(mfdscr, regs); | ||
929 | rd = (instword >> 21) & 0x1f; | ||
930 | regs->gpr[rd] = mfspr(SPRN_DSCR); | ||
931 | return 0; | ||
932 | } | ||
933 | /* Emulate the mtspr DSCR, rD. */ | ||
934 | if (((instword & PPC_INST_MTSPR_DSCR_MASK) == PPC_INST_MTSPR_DSCR) && | ||
935 | cpu_has_feature(CPU_FTR_DSCR)) { | ||
936 | PPC_WARN_EMULATED(mtdscr, regs); | ||
937 | rd = (instword >> 21) & 0x1f; | ||
938 | mtspr(SPRN_DSCR, regs->gpr[rd]); | ||
939 | current->thread.dscr_inherit = 1; | ||
940 | return 0; | ||
941 | } | ||
942 | #endif | ||
943 | |||
917 | return -EINVAL; | 944 | return -EINVAL; |
918 | } | 945 | } |
919 | 946 | ||
@@ -964,7 +991,7 @@ void __kprobes program_check_exception(struct pt_regs *regs) | |||
964 | * ESR_DST (!?) or 0. In the process of chasing this with the | 991 | * ESR_DST (!?) or 0. In the process of chasing this with the |
965 | * hardware people - not sure if it can happen on any illegal | 992 | * hardware people - not sure if it can happen on any illegal |
966 | * instruction or only on FP instructions, whether there is a | 993 | * instruction or only on FP instructions, whether there is a |
967 | * pattern to occurences etc. -dgibson 31/Mar/2003 */ | 994 | * pattern to occurrences etc. -dgibson 31/Mar/2003 */ |
968 | switch (do_mathemu(regs)) { | 995 | switch (do_mathemu(regs)) { |
969 | case 0: | 996 | case 0: |
970 | emulate_single_step(regs); | 997 | emulate_single_step(regs); |
@@ -1315,9 +1342,8 @@ void altivec_assist_exception(struct pt_regs *regs) | |||
1315 | } else { | 1342 | } else { |
1316 | /* didn't recognize the instruction */ | 1343 | /* didn't recognize the instruction */ |
1317 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ | 1344 | /* XXX quick hack for now: set the non-Java bit in the VSCR */ |
1318 | if (printk_ratelimit()) | 1345 | printk_ratelimited(KERN_ERR "Unrecognized altivec instruction " |
1319 | printk(KERN_ERR "Unrecognized altivec instruction " | 1346 | "in %s at %lx\n", current->comm, regs->nip); |
1320 | "in %s at %lx\n", current->comm, regs->nip); | ||
1321 | current->thread.vscr.u[3] |= 0x10000; | 1347 | current->thread.vscr.u[3] |= 0x10000; |
1322 | } | 1348 | } |
1323 | } | 1349 | } |
@@ -1511,15 +1537,18 @@ struct ppc_emulated ppc_emulated = { | |||
1511 | #ifdef CONFIG_VSX | 1537 | #ifdef CONFIG_VSX |
1512 | WARN_EMULATED_SETUP(vsx), | 1538 | WARN_EMULATED_SETUP(vsx), |
1513 | #endif | 1539 | #endif |
1540 | #ifdef CONFIG_PPC64 | ||
1541 | WARN_EMULATED_SETUP(mfdscr), | ||
1542 | WARN_EMULATED_SETUP(mtdscr), | ||
1543 | #endif | ||
1514 | }; | 1544 | }; |
1515 | 1545 | ||
1516 | u32 ppc_warn_emulated; | 1546 | u32 ppc_warn_emulated; |
1517 | 1547 | ||
1518 | void ppc_warn_emulated_print(const char *type) | 1548 | void ppc_warn_emulated_print(const char *type) |
1519 | { | 1549 | { |
1520 | if (printk_ratelimit()) | 1550 | pr_warn_ratelimited("%s used emulated %s instruction\n", current->comm, |
1521 | pr_warning("%s used emulated %s instruction\n", current->comm, | 1551 | type); |
1522 | type); | ||
1523 | } | 1552 | } |
1524 | 1553 | ||
1525 | static int __init ppc_warn_emulated_init(void) | 1554 | static int __init ppc_warn_emulated_init(void) |
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index e39cad83c884..23d65abbedce 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c | |||
@@ -62,6 +62,8 @@ void __init udbg_early_init(void) | |||
62 | udbg_init_cpm(); | 62 | udbg_init_cpm(); |
63 | #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) | 63 | #elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) |
64 | udbg_init_usbgecko(); | 64 | udbg_init_usbgecko(); |
65 | #elif defined(CONFIG_PPC_EARLY_DEBUG_WSP) | ||
66 | udbg_init_wsp(); | ||
65 | #endif | 67 | #endif |
66 | 68 | ||
67 | #ifdef CONFIG_PPC_EARLY_DEBUG | 69 | #ifdef CONFIG_PPC_EARLY_DEBUG |
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index b4b167b33643..6837f839ab78 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * udbg for NS16550 compatable serial ports | 2 | * udbg for NS16550 compatible serial ports |
3 | * | 3 | * |
4 | * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp | 4 | * Copyright (C) 2001-2005 PPC 64 Team, IBM Corp |
5 | * | 5 | * |
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/types.h> | 11 | #include <linux/types.h> |
12 | #include <asm/udbg.h> | 12 | #include <asm/udbg.h> |
13 | #include <asm/io.h> | 13 | #include <asm/io.h> |
14 | #include <asm/reg_a2.h> | ||
14 | 15 | ||
15 | extern u8 real_readb(volatile u8 __iomem *addr); | 16 | extern u8 real_readb(volatile u8 __iomem *addr); |
16 | extern void real_writeb(u8 data, volatile u8 __iomem *addr); | 17 | extern void real_writeb(u8 data, volatile u8 __iomem *addr); |
@@ -298,3 +299,53 @@ void __init udbg_init_40x_realmode(void) | |||
298 | udbg_getc_poll = NULL; | 299 | udbg_getc_poll = NULL; |
299 | } | 300 | } |
300 | #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ | 301 | #endif /* CONFIG_PPC_EARLY_DEBUG_40x */ |
302 | |||
303 | #ifdef CONFIG_PPC_EARLY_DEBUG_WSP | ||
304 | static void udbg_wsp_flush(void) | ||
305 | { | ||
306 | if (udbg_comport) { | ||
307 | while ((readb(&udbg_comport->lsr) & LSR_THRE) == 0) | ||
308 | /* wait for idle */; | ||
309 | } | ||
310 | } | ||
311 | |||
312 | static void udbg_wsp_putc(char c) | ||
313 | { | ||
314 | if (udbg_comport) { | ||
315 | if (c == '\n') | ||
316 | udbg_wsp_putc('\r'); | ||
317 | udbg_wsp_flush(); | ||
318 | writeb(c, &udbg_comport->thr); eieio(); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | static int udbg_wsp_getc(void) | ||
323 | { | ||
324 | if (udbg_comport) { | ||
325 | while ((readb(&udbg_comport->lsr) & LSR_DR) == 0) | ||
326 | ; /* wait for char */ | ||
327 | return readb(&udbg_comport->rbr); | ||
328 | } | ||
329 | return -1; | ||
330 | } | ||
331 | |||
332 | static int udbg_wsp_getc_poll(void) | ||
333 | { | ||
334 | if (udbg_comport) | ||
335 | if (readb(&udbg_comport->lsr) & LSR_DR) | ||
336 | return readb(&udbg_comport->rbr); | ||
337 | return -1; | ||
338 | } | ||
339 | |||
340 | void __init udbg_init_wsp(void) | ||
341 | { | ||
342 | udbg_comport = (struct NS16550 __iomem *)WSP_UART_VIRT; | ||
343 | |||
344 | udbg_init_uart(udbg_comport, 57600, 50000000); | ||
345 | |||
346 | udbg_putc = udbg_wsp_putc; | ||
347 | udbg_flush = udbg_wsp_flush; | ||
348 | udbg_getc = udbg_wsp_getc; | ||
349 | udbg_getc_poll = udbg_wsp_getc_poll; | ||
350 | } | ||
351 | #endif /* CONFIG_PPC_EARLY_DEBUG_WSP */ | ||
diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 13002fe206e7..142ab1008c3b 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c | |||
@@ -159,7 +159,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
159 | { | 159 | { |
160 | int i; | 160 | int i; |
161 | 161 | ||
162 | if (!vma || test_thread_flag(TIF_32BIT)) { | 162 | if (!vma || is_32bit_task()) { |
163 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); | 163 | printk("vDSO32 @ %016lx:\n", (unsigned long)vdso32_kbase); |
164 | for (i=0; i<vdso32_pages; i++) { | 164 | for (i=0; i<vdso32_pages; i++) { |
165 | struct page *pg = virt_to_page(vdso32_kbase + | 165 | struct page *pg = virt_to_page(vdso32_kbase + |
@@ -170,7 +170,7 @@ static void dump_vdso_pages(struct vm_area_struct * vma) | |||
170 | dump_one_vdso_page(pg, upg); | 170 | dump_one_vdso_page(pg, upg); |
171 | } | 171 | } |
172 | } | 172 | } |
173 | if (!vma || !test_thread_flag(TIF_32BIT)) { | 173 | if (!vma || !is_32bit_task()) { |
174 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); | 174 | printk("vDSO64 @ %016lx:\n", (unsigned long)vdso64_kbase); |
175 | for (i=0; i<vdso64_pages; i++) { | 175 | for (i=0; i<vdso64_pages; i++) { |
176 | struct page *pg = virt_to_page(vdso64_kbase + | 176 | struct page *pg = virt_to_page(vdso64_kbase + |
@@ -200,7 +200,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | |||
200 | return 0; | 200 | return 0; |
201 | 201 | ||
202 | #ifdef CONFIG_PPC64 | 202 | #ifdef CONFIG_PPC64 |
203 | if (test_thread_flag(TIF_32BIT)) { | 203 | if (is_32bit_task()) { |
204 | vdso_pagelist = vdso32_pagelist; | 204 | vdso_pagelist = vdso32_pagelist; |
205 | vdso_pages = vdso32_pages; | 205 | vdso_pages = vdso32_pages; |
206 | vdso_base = VDSO32_MBASE; | 206 | vdso_base = VDSO32_MBASE; |
@@ -820,17 +820,17 @@ static int __init vdso_init(void) | |||
820 | } | 820 | } |
821 | arch_initcall(vdso_init); | 821 | arch_initcall(vdso_init); |
822 | 822 | ||
823 | int in_gate_area_no_task(unsigned long addr) | 823 | int in_gate_area_no_mm(unsigned long addr) |
824 | { | 824 | { |
825 | return 0; | 825 | return 0; |
826 | } | 826 | } |
827 | 827 | ||
828 | int in_gate_area(struct task_struct *task, unsigned long addr) | 828 | int in_gate_area(struct mm_struct *mm, unsigned long addr) |
829 | { | 829 | { |
830 | return 0; | 830 | return 0; |
831 | } | 831 | } |
832 | 832 | ||
833 | struct vm_area_struct *get_gate_vma(struct task_struct *tsk) | 833 | struct vm_area_struct *get_gate_vma(struct mm_struct *mm) |
834 | { | 834 | { |
835 | return NULL; | 835 | return NULL; |
836 | } | 836 | } |
diff --git a/arch/powerpc/kernel/vdso32/Makefile b/arch/powerpc/kernel/vdso32/Makefile index 51ead52141bd..9a7946c41738 100644 --- a/arch/powerpc/kernel/vdso32/Makefile +++ b/arch/powerpc/kernel/vdso32/Makefile | |||
@@ -14,10 +14,10 @@ obj-vdso32 := $(addprefix $(obj)/, $(obj-vdso32)) | |||
14 | 14 | ||
15 | GCOV_PROFILE := n | 15 | GCOV_PROFILE := n |
16 | 16 | ||
17 | EXTRA_CFLAGS := -shared -fno-common -fno-builtin | 17 | ccflags-y := -shared -fno-common -fno-builtin |
18 | EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ | 18 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso32.so.1 \ |
19 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | 19 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) |
20 | EXTRA_AFLAGS := -D__VDSO32__ -s | 20 | asflags-y := -D__VDSO32__ -s |
21 | 21 | ||
22 | obj-y += vdso32_wrapper.o | 22 | obj-y += vdso32_wrapper.o |
23 | extra-y += vdso32.lds | 23 | extra-y += vdso32.lds |
diff --git a/arch/powerpc/kernel/vdso32/sigtramp.S b/arch/powerpc/kernel/vdso32/sigtramp.S index 68d49dd71dcc..cf0c9c9c24f9 100644 --- a/arch/powerpc/kernel/vdso32/sigtramp.S +++ b/arch/powerpc/kernel/vdso32/sigtramp.S | |||
@@ -19,7 +19,7 @@ | |||
19 | 19 | ||
20 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from | 20 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from |
21 | the return address to get an address in the middle of the presumed | 21 | the return address to get an address in the middle of the presumed |
22 | call instruction. Since we don't have a call here, we artifically | 22 | call instruction. Since we don't have a call here, we artificially |
23 | extend the range covered by the unwind info by adding a nop before | 23 | extend the range covered by the unwind info by adding a nop before |
24 | the real start. */ | 24 | the real start. */ |
25 | nop | 25 | nop |
diff --git a/arch/powerpc/kernel/vdso64/Makefile b/arch/powerpc/kernel/vdso64/Makefile index 79da65d44a2a..8c500d8622e4 100644 --- a/arch/powerpc/kernel/vdso64/Makefile +++ b/arch/powerpc/kernel/vdso64/Makefile | |||
@@ -9,10 +9,10 @@ obj-vdso64 := $(addprefix $(obj)/, $(obj-vdso64)) | |||
9 | 9 | ||
10 | GCOV_PROFILE := n | 10 | GCOV_PROFILE := n |
11 | 11 | ||
12 | EXTRA_CFLAGS := -shared -fno-common -fno-builtin | 12 | ccflags-y := -shared -fno-common -fno-builtin |
13 | EXTRA_CFLAGS += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ | 13 | ccflags-y += -nostdlib -Wl,-soname=linux-vdso64.so.1 \ |
14 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) | 14 | $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) |
15 | EXTRA_AFLAGS := -D__VDSO64__ -s | 15 | asflags-y := -D__VDSO64__ -s |
16 | 16 | ||
17 | obj-y += vdso64_wrapper.o | 17 | obj-y += vdso64_wrapper.o |
18 | extra-y += vdso64.lds | 18 | extra-y += vdso64.lds |
diff --git a/arch/powerpc/kernel/vdso64/sigtramp.S b/arch/powerpc/kernel/vdso64/sigtramp.S index 59eb59bb4082..45ea281e9a21 100644 --- a/arch/powerpc/kernel/vdso64/sigtramp.S +++ b/arch/powerpc/kernel/vdso64/sigtramp.S | |||
@@ -20,7 +20,7 @@ | |||
20 | 20 | ||
21 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from | 21 | /* The nop here is a hack. The dwarf2 unwind routines subtract 1 from |
22 | the return address to get an address in the middle of the presumed | 22 | the return address to get an address in the middle of the presumed |
23 | call instruction. Since we don't have a call here, we artifically | 23 | call instruction. Since we don't have a call here, we artificially |
24 | extend the range covered by the unwind info by padding before the | 24 | extend the range covered by the unwind info by padding before the |
25 | real start. */ | 25 | real start. */ |
26 | nop | 26 | nop |
diff --git a/arch/powerpc/kernel/vector.S b/arch/powerpc/kernel/vector.S index fe460482fa68..4d5a3edff49e 100644 --- a/arch/powerpc/kernel/vector.S +++ b/arch/powerpc/kernel/vector.S | |||
@@ -5,6 +5,7 @@ | |||
5 | #include <asm/cputable.h> | 5 | #include <asm/cputable.h> |
6 | #include <asm/thread_info.h> | 6 | #include <asm/thread_info.h> |
7 | #include <asm/page.h> | 7 | #include <asm/page.h> |
8 | #include <asm/ptrace.h> | ||
8 | 9 | ||
9 | /* | 10 | /* |
10 | * load_up_altivec(unused, unused, tsk) | 11 | * load_up_altivec(unused, unused, tsk) |
@@ -101,7 +102,7 @@ _GLOBAL(giveup_altivec) | |||
101 | MTMSRD(r5) /* enable use of VMX now */ | 102 | MTMSRD(r5) /* enable use of VMX now */ |
102 | isync | 103 | isync |
103 | PPC_LCMPI 0,r3,0 | 104 | PPC_LCMPI 0,r3,0 |
104 | beqlr- /* if no previous owner, done */ | 105 | beqlr /* if no previous owner, done */ |
105 | addi r3,r3,THREAD /* want THREAD of task */ | 106 | addi r3,r3,THREAD /* want THREAD of task */ |
106 | PPC_LL r5,PT_REGS(r3) | 107 | PPC_LL r5,PT_REGS(r3) |
107 | PPC_LCMPI 0,r5,0 | 108 | PPC_LCMPI 0,r5,0 |
diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index fa3469ddaef8..1b695fdc362b 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c | |||
@@ -238,9 +238,7 @@ static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size) | |||
238 | * memory in this pool does not change. | 238 | * memory in this pool does not change. |
239 | */ | 239 | */ |
240 | if (spare_needed && reserve_freed) { | 240 | if (spare_needed && reserve_freed) { |
241 | tmp = min(spare_needed, min(reserve_freed, | 241 | tmp = min3(spare_needed, reserve_freed, (viodev->cmo.entitled - VIO_CMO_MIN_ENT)); |
242 | (viodev->cmo.entitled - | ||
243 | VIO_CMO_MIN_ENT))); | ||
244 | 242 | ||
245 | vio_cmo.spare += tmp; | 243 | vio_cmo.spare += tmp; |
246 | viodev->cmo.entitled -= tmp; | 244 | viodev->cmo.entitled -= tmp; |
@@ -602,6 +600,11 @@ static void vio_dma_iommu_unmap_sg(struct device *dev, | |||
602 | vio_cmo_dealloc(viodev, alloc_size); | 600 | vio_cmo_dealloc(viodev, alloc_size); |
603 | } | 601 | } |
604 | 602 | ||
603 | static int vio_dma_iommu_dma_supported(struct device *dev, u64 mask) | ||
604 | { | ||
605 | return dma_iommu_ops.dma_supported(dev, mask); | ||
606 | } | ||
607 | |||
605 | struct dma_map_ops vio_dma_mapping_ops = { | 608 | struct dma_map_ops vio_dma_mapping_ops = { |
606 | .alloc_coherent = vio_dma_iommu_alloc_coherent, | 609 | .alloc_coherent = vio_dma_iommu_alloc_coherent, |
607 | .free_coherent = vio_dma_iommu_free_coherent, | 610 | .free_coherent = vio_dma_iommu_free_coherent, |
@@ -609,6 +612,7 @@ struct dma_map_ops vio_dma_mapping_ops = { | |||
609 | .unmap_sg = vio_dma_iommu_unmap_sg, | 612 | .unmap_sg = vio_dma_iommu_unmap_sg, |
610 | .map_page = vio_dma_iommu_map_page, | 613 | .map_page = vio_dma_iommu_map_page, |
611 | .unmap_page = vio_dma_iommu_unmap_page, | 614 | .unmap_page = vio_dma_iommu_unmap_page, |
615 | .dma_supported = vio_dma_iommu_dma_supported, | ||
612 | 616 | ||
613 | }; | 617 | }; |
614 | 618 | ||
@@ -860,8 +864,7 @@ static void vio_cmo_bus_remove(struct vio_dev *viodev) | |||
860 | 864 | ||
861 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) | 865 | static void vio_cmo_set_dma_ops(struct vio_dev *viodev) |
862 | { | 866 | { |
863 | vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported; | 867 | set_dma_ops(&viodev->dev, &vio_dma_mapping_ops); |
864 | viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops; | ||
865 | } | 868 | } |
866 | 869 | ||
867 | /** | 870 | /** |
@@ -1184,7 +1187,12 @@ EXPORT_SYMBOL(vio_unregister_driver); | |||
1184 | /* vio_dev refcount hit 0 */ | 1187 | /* vio_dev refcount hit 0 */ |
1185 | static void __devinit vio_dev_release(struct device *dev) | 1188 | static void __devinit vio_dev_release(struct device *dev) |
1186 | { | 1189 | { |
1187 | /* XXX should free TCE table */ | 1190 | struct iommu_table *tbl = get_iommu_table_base(dev); |
1191 | |||
1192 | /* iSeries uses a common table for all vio devices */ | ||
1193 | if (!firmware_has_feature(FW_FEATURE_ISERIES) && tbl) | ||
1194 | iommu_free_table(tbl, dev->of_node ? | ||
1195 | dev->of_node->full_name : dev_name(dev)); | ||
1188 | of_node_put(dev->of_node); | 1196 | of_node_put(dev->of_node); |
1189 | kfree(to_vio_dev(dev)); | 1197 | kfree(to_vio_dev(dev)); |
1190 | } | 1198 | } |
@@ -1241,7 +1249,7 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) | |||
1241 | if (firmware_has_feature(FW_FEATURE_CMO)) | 1249 | if (firmware_has_feature(FW_FEATURE_CMO)) |
1242 | vio_cmo_set_dma_ops(viodev); | 1250 | vio_cmo_set_dma_ops(viodev); |
1243 | else | 1251 | else |
1244 | viodev->dev.archdata.dma_ops = &dma_iommu_ops; | 1252 | set_dma_ops(&viodev->dev, &dma_iommu_ops); |
1245 | set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); | 1253 | set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev)); |
1246 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); | 1254 | set_dev_node(&viodev->dev, of_node_to_nid(of_node)); |
1247 | 1255 | ||
@@ -1249,13 +1257,16 @@ struct vio_dev *vio_register_device_node(struct device_node *of_node) | |||
1249 | viodev->dev.parent = &vio_bus_device.dev; | 1257 | viodev->dev.parent = &vio_bus_device.dev; |
1250 | viodev->dev.bus = &vio_bus_type; | 1258 | viodev->dev.bus = &vio_bus_type; |
1251 | viodev->dev.release = vio_dev_release; | 1259 | viodev->dev.release = vio_dev_release; |
1260 | /* needed to ensure proper operation of coherent allocations | ||
1261 | * later, in case driver doesn't set it explicitly */ | ||
1262 | dma_set_mask(&viodev->dev, DMA_BIT_MASK(64)); | ||
1263 | dma_set_coherent_mask(&viodev->dev, DMA_BIT_MASK(64)); | ||
1252 | 1264 | ||
1253 | /* register with generic device framework */ | 1265 | /* register with generic device framework */ |
1254 | if (device_register(&viodev->dev)) { | 1266 | if (device_register(&viodev->dev)) { |
1255 | printk(KERN_ERR "%s: failed to register device %s\n", | 1267 | printk(KERN_ERR "%s: failed to register device %s\n", |
1256 | __func__, dev_name(&viodev->dev)); | 1268 | __func__, dev_name(&viodev->dev)); |
1257 | /* XXX free TCE table */ | 1269 | put_device(&viodev->dev); |
1258 | kfree(viodev); | ||
1259 | return NULL; | 1270 | return NULL; |
1260 | } | 1271 | } |
1261 | 1272 | ||
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 8a0deefac08d..920276c0f6a1 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S | |||
@@ -160,7 +160,7 @@ SECTIONS | |||
160 | INIT_RAM_FS | 160 | INIT_RAM_FS |
161 | } | 161 | } |
162 | 162 | ||
163 | PERCPU(PAGE_SIZE) | 163 | PERCPU_SECTION(L1_CACHE_BYTES) |
164 | 164 | ||
165 | . = ALIGN(8); | 165 | . = ALIGN(8); |
166 | .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { | 166 | .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) { |
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c index 73c0a3f64ed1..da3a1225c0ac 100644 --- a/arch/powerpc/kvm/44x.c +++ b/arch/powerpc/kvm/44x.c | |||
@@ -43,7 +43,7 @@ int kvmppc_core_check_processor_compat(void) | |||
43 | { | 43 | { |
44 | int r; | 44 | int r; |
45 | 45 | ||
46 | if (strcmp(cur_cpu_spec->platform, "ppc440") == 0) | 46 | if (strncmp(cur_cpu_spec->platform, "ppc440", 6) == 0) |
47 | r = 0; | 47 | r = 0; |
48 | else | 48 | else |
49 | r = -ENOTSUPP; | 49 | r = -ENOTSUPP; |
@@ -72,6 +72,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
72 | /* Since the guest can directly access the timebase, it must know the | 72 | /* Since the guest can directly access the timebase, it must know the |
73 | * real timebase frequency. Accordingly, it must see the state of | 73 | * real timebase frequency. Accordingly, it must see the state of |
74 | * CCR1[TCS]. */ | 74 | * CCR1[TCS]. */ |
75 | /* XXX CCR1 doesn't exist on all 440 SoCs. */ | ||
75 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); | 76 | vcpu->arch.ccr1 = mfspr(SPRN_CCR1); |
76 | 77 | ||
77 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) | 78 | for (i = 0; i < ARRAY_SIZE(vcpu_44x->shadow_refs); i++) |
@@ -106,6 +107,16 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | |||
106 | return 0; | 107 | return 0; |
107 | } | 108 | } |
108 | 109 | ||
110 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
111 | { | ||
112 | kvmppc_get_sregs_ivor(vcpu, sregs); | ||
113 | } | ||
114 | |||
115 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
116 | { | ||
117 | return kvmppc_set_sregs_ivor(vcpu, sregs); | ||
118 | } | ||
119 | |||
109 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | 120 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) |
110 | { | 121 | { |
111 | struct kvmppc_vcpu_44x *vcpu_44x; | 122 | struct kvmppc_vcpu_44x *vcpu_44x; |
@@ -123,8 +134,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
123 | if (err) | 134 | if (err) |
124 | goto free_vcpu; | 135 | goto free_vcpu; |
125 | 136 | ||
137 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
138 | if (!vcpu->arch.shared) | ||
139 | goto uninit_vcpu; | ||
140 | |||
126 | return vcpu; | 141 | return vcpu; |
127 | 142 | ||
143 | uninit_vcpu: | ||
144 | kvm_vcpu_uninit(vcpu); | ||
128 | free_vcpu: | 145 | free_vcpu: |
129 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 146 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
130 | out: | 147 | out: |
@@ -135,6 +152,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
135 | { | 152 | { |
136 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | 153 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); |
137 | 154 | ||
155 | free_page((unsigned long)vcpu->arch.shared); | ||
138 | kvm_vcpu_uninit(vcpu); | 156 | kvm_vcpu_uninit(vcpu); |
139 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); | 157 | kmem_cache_free(kvm_vcpu_cache, vcpu_44x); |
140 | } | 158 | } |
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c index 65ea083a5b27..549bb2c9a47a 100644 --- a/arch/powerpc/kvm/44x_emulate.c +++ b/arch/powerpc/kvm/44x_emulate.c | |||
@@ -158,7 +158,6 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
158 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); | 158 | emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); |
159 | } | 159 | } |
160 | 160 | ||
161 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | ||
162 | return emulated; | 161 | return emulated; |
163 | } | 162 | } |
164 | 163 | ||
@@ -179,7 +178,6 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
179 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); | 178 | emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); |
180 | } | 179 | } |
181 | 180 | ||
182 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | ||
183 | return emulated; | 181 | return emulated; |
184 | } | 182 | } |
185 | 183 | ||
diff --git a/arch/powerpc/kvm/44x_tlb.c b/arch/powerpc/kvm/44x_tlb.c index 9b9b5cdea840..5f3cff83e089 100644 --- a/arch/powerpc/kvm/44x_tlb.c +++ b/arch/powerpc/kvm/44x_tlb.c | |||
@@ -47,6 +47,7 @@ | |||
47 | #ifdef DEBUG | 47 | #ifdef DEBUG |
48 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) | 48 | void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu) |
49 | { | 49 | { |
50 | struct kvmppc_vcpu_44x *vcpu_44x = to_44x(vcpu); | ||
50 | struct kvmppc_44x_tlbe *tlbe; | 51 | struct kvmppc_44x_tlbe *tlbe; |
51 | int i; | 52 | int i; |
52 | 53 | ||
@@ -221,14 +222,14 @@ gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, | |||
221 | 222 | ||
222 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 223 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
223 | { | 224 | { |
224 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 225 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
225 | 226 | ||
226 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 227 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
227 | } | 228 | } |
228 | 229 | ||
229 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 230 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
230 | { | 231 | { |
231 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 232 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
232 | 233 | ||
233 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); | 234 | return kvmppc_44x_tlb_index(vcpu, eaddr, vcpu->arch.pid, as); |
234 | } | 235 | } |
@@ -354,7 +355,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, | |||
354 | 355 | ||
355 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); | 356 | stlbe.word1 = (hpaddr & 0xfffffc00) | ((hpaddr >> 32) & 0xf); |
356 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, | 357 | stlbe.word2 = kvmppc_44x_tlb_shadow_attrib(flags, |
357 | vcpu->arch.msr & MSR_PR); | 358 | vcpu->arch.shared->msr & MSR_PR); |
358 | stlbe.tid = !(asid & 0xff); | 359 | stlbe.tid = !(asid & 0xff); |
359 | 360 | ||
360 | /* Keep track of the reference so we can properly release it later. */ | 361 | /* Keep track of the reference so we can properly release it later. */ |
@@ -423,7 +424,7 @@ static int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |||
423 | 424 | ||
424 | /* Does it match current guest AS? */ | 425 | /* Does it match current guest AS? */ |
425 | /* XXX what about IS != DS? */ | 426 | /* XXX what about IS != DS? */ |
426 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | 427 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) |
427 | return 0; | 428 | return 0; |
428 | 429 | ||
429 | gpa = get_tlb_raddr(tlbe); | 430 | gpa = get_tlb_raddr(tlbe); |
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile index d45c818a384c..4d6863823f69 100644 --- a/arch/powerpc/kvm/Makefile +++ b/arch/powerpc/kvm/Makefile | |||
@@ -4,7 +4,7 @@ | |||
4 | 4 | ||
5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
6 | 6 | ||
7 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/powerpc/kvm | 7 | ccflags-y := -Ivirt/kvm -Iarch/powerpc/kvm |
8 | 8 | ||
9 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) | 9 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o) |
10 | 10 | ||
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c index a3cef30d1d42..0f95b5cce033 100644 --- a/arch/powerpc/kvm/book3s.c +++ b/arch/powerpc/kvm/book3s.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/kvm_host.h> | 17 | #include <linux/kvm_host.h> |
18 | #include <linux/err.h> | 18 | #include <linux/err.h> |
19 | #include <linux/slab.h> | 19 | #include <linux/slab.h> |
20 | #include "trace.h" | ||
20 | 21 | ||
21 | #include <asm/reg.h> | 22 | #include <asm/reg.h> |
22 | #include <asm/cputable.h> | 23 | #include <asm/cputable.h> |
@@ -35,7 +36,6 @@ | |||
35 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | 36 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU |
36 | 37 | ||
37 | /* #define EXIT_DEBUG */ | 38 | /* #define EXIT_DEBUG */ |
38 | /* #define EXIT_DEBUG_SIMPLE */ | ||
39 | /* #define DEBUG_EXT */ | 39 | /* #define DEBUG_EXT */ |
40 | 40 | ||
41 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | 41 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, |
@@ -105,65 +105,71 @@ void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |||
105 | kvmppc_giveup_ext(vcpu, MSR_VSX); | 105 | kvmppc_giveup_ext(vcpu, MSR_VSX); |
106 | } | 106 | } |
107 | 107 | ||
108 | #if defined(EXIT_DEBUG) | ||
109 | static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu) | ||
110 | { | ||
111 | u64 jd = mftb() - vcpu->arch.dec_jiffies; | ||
112 | return vcpu->arch.dec - jd; | ||
113 | } | ||
114 | #endif | ||
115 | |||
116 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) | 108 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
117 | { | 109 | { |
118 | vcpu->arch.shadow_msr = vcpu->arch.msr; | 110 | ulong smsr = vcpu->arch.shared->msr; |
111 | |||
119 | /* Guest MSR values */ | 112 | /* Guest MSR values */ |
120 | vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | | 113 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_DE; |
121 | MSR_BE | MSR_DE; | ||
122 | /* Process MSR values */ | 114 | /* Process MSR values */ |
123 | vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | | 115 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; |
124 | MSR_EE; | ||
125 | /* External providers the guest reserved */ | 116 | /* External providers the guest reserved */ |
126 | vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext); | 117 | smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext); |
127 | /* 64-bit Process MSR values */ | 118 | /* 64-bit Process MSR values */ |
128 | #ifdef CONFIG_PPC_BOOK3S_64 | 119 | #ifdef CONFIG_PPC_BOOK3S_64 |
129 | vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV; | 120 | smsr |= MSR_ISF | MSR_HV; |
130 | #endif | 121 | #endif |
122 | vcpu->arch.shadow_msr = smsr; | ||
131 | } | 123 | } |
132 | 124 | ||
133 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) | 125 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) |
134 | { | 126 | { |
135 | ulong old_msr = vcpu->arch.msr; | 127 | ulong old_msr = vcpu->arch.shared->msr; |
136 | 128 | ||
137 | #ifdef EXIT_DEBUG | 129 | #ifdef EXIT_DEBUG |
138 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | 130 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); |
139 | #endif | 131 | #endif |
140 | 132 | ||
141 | msr &= to_book3s(vcpu)->msr_mask; | 133 | msr &= to_book3s(vcpu)->msr_mask; |
142 | vcpu->arch.msr = msr; | 134 | vcpu->arch.shared->msr = msr; |
143 | kvmppc_recalc_shadow_msr(vcpu); | 135 | kvmppc_recalc_shadow_msr(vcpu); |
144 | 136 | ||
145 | if (msr & (MSR_WE|MSR_POW)) { | 137 | if (msr & MSR_POW) { |
146 | if (!vcpu->arch.pending_exceptions) { | 138 | if (!vcpu->arch.pending_exceptions) { |
147 | kvm_vcpu_block(vcpu); | 139 | kvm_vcpu_block(vcpu); |
148 | vcpu->stat.halt_wakeup++; | 140 | vcpu->stat.halt_wakeup++; |
141 | |||
142 | /* Unset POW bit after we woke up */ | ||
143 | msr &= ~MSR_POW; | ||
144 | vcpu->arch.shared->msr = msr; | ||
149 | } | 145 | } |
150 | } | 146 | } |
151 | 147 | ||
152 | if ((vcpu->arch.msr & (MSR_PR|MSR_IR|MSR_DR)) != | 148 | if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) != |
153 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { | 149 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
154 | kvmppc_mmu_flush_segments(vcpu); | 150 | kvmppc_mmu_flush_segments(vcpu); |
155 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 151 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
152 | |||
153 | /* Preload magic page segment when in kernel mode */ | ||
154 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | ||
155 | struct kvm_vcpu_arch *a = &vcpu->arch; | ||
156 | |||
157 | if (msr & MSR_DR) | ||
158 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | ||
159 | else | ||
160 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | ||
161 | } | ||
156 | } | 162 | } |
157 | 163 | ||
158 | /* Preload FPU if it's enabled */ | 164 | /* Preload FPU if it's enabled */ |
159 | if (vcpu->arch.msr & MSR_FP) | 165 | if (vcpu->arch.shared->msr & MSR_FP) |
160 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 166 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
161 | } | 167 | } |
162 | 168 | ||
163 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) | 169 | void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags) |
164 | { | 170 | { |
165 | vcpu->arch.srr0 = kvmppc_get_pc(vcpu); | 171 | vcpu->arch.shared->srr0 = kvmppc_get_pc(vcpu); |
166 | vcpu->arch.srr1 = vcpu->arch.msr | flags; | 172 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr | flags; |
167 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); | 173 | kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec); |
168 | vcpu->arch.mmu.reset_msr(vcpu); | 174 | vcpu->arch.mmu.reset_msr(vcpu); |
169 | } | 175 | } |
@@ -180,6 +186,7 @@ static int kvmppc_book3s_vec2irqprio(unsigned int vec) | |||
180 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; | 186 | case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break; |
181 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; | 187 | case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break; |
182 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; | 188 | case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break; |
189 | case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break; | ||
183 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; | 190 | case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break; |
184 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; | 191 | case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break; |
185 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; | 192 | case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break; |
@@ -199,6 +206,9 @@ static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu, | |||
199 | { | 206 | { |
200 | clear_bit(kvmppc_book3s_vec2irqprio(vec), | 207 | clear_bit(kvmppc_book3s_vec2irqprio(vec), |
201 | &vcpu->arch.pending_exceptions); | 208 | &vcpu->arch.pending_exceptions); |
209 | |||
210 | if (!vcpu->arch.pending_exceptions) | ||
211 | vcpu->arch.shared->int_pending = 0; | ||
202 | } | 212 | } |
203 | 213 | ||
204 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) | 214 | void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec) |
@@ -226,7 +236,7 @@ void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | |||
226 | 236 | ||
227 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | 237 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) |
228 | { | 238 | { |
229 | return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions); | 239 | return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
230 | } | 240 | } |
231 | 241 | ||
232 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | 242 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
@@ -237,13 +247,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | |||
237 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 247 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
238 | struct kvm_interrupt *irq) | 248 | struct kvm_interrupt *irq) |
239 | { | 249 | { |
240 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | 250 | unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL; |
251 | |||
252 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | ||
253 | vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL; | ||
254 | |||
255 | kvmppc_book3s_queue_irqprio(vcpu, vec); | ||
241 | } | 256 | } |
242 | 257 | ||
243 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 258 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
244 | struct kvm_interrupt *irq) | 259 | struct kvm_interrupt *irq) |
245 | { | 260 | { |
246 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); | 261 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL); |
262 | kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL); | ||
247 | } | 263 | } |
248 | 264 | ||
249 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | 265 | int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) |
@@ -251,14 +267,29 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
251 | int deliver = 1; | 267 | int deliver = 1; |
252 | int vec = 0; | 268 | int vec = 0; |
253 | ulong flags = 0ULL; | 269 | ulong flags = 0ULL; |
270 | ulong crit_raw = vcpu->arch.shared->critical; | ||
271 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
272 | bool crit; | ||
273 | |||
274 | /* Truncate crit indicators in 32 bit mode */ | ||
275 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
276 | crit_raw &= 0xffffffff; | ||
277 | crit_r1 &= 0xffffffff; | ||
278 | } | ||
279 | |||
280 | /* Critical section when crit == r1 */ | ||
281 | crit = (crit_raw == crit_r1); | ||
282 | /* ... and we're in supervisor mode */ | ||
283 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
254 | 284 | ||
255 | switch (priority) { | 285 | switch (priority) { |
256 | case BOOK3S_IRQPRIO_DECREMENTER: | 286 | case BOOK3S_IRQPRIO_DECREMENTER: |
257 | deliver = vcpu->arch.msr & MSR_EE; | 287 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; |
258 | vec = BOOK3S_INTERRUPT_DECREMENTER; | 288 | vec = BOOK3S_INTERRUPT_DECREMENTER; |
259 | break; | 289 | break; |
260 | case BOOK3S_IRQPRIO_EXTERNAL: | 290 | case BOOK3S_IRQPRIO_EXTERNAL: |
261 | deliver = vcpu->arch.msr & MSR_EE; | 291 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: |
292 | deliver = (vcpu->arch.shared->msr & MSR_EE) && !crit; | ||
262 | vec = BOOK3S_INTERRUPT_EXTERNAL; | 293 | vec = BOOK3S_INTERRUPT_EXTERNAL; |
263 | break; | 294 | break; |
264 | case BOOK3S_IRQPRIO_SYSTEM_RESET: | 295 | case BOOK3S_IRQPRIO_SYSTEM_RESET: |
@@ -320,9 +351,27 @@ int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority) | |||
320 | return deliver; | 351 | return deliver; |
321 | } | 352 | } |
322 | 353 | ||
354 | /* | ||
355 | * This function determines if an irqprio should be cleared once issued. | ||
356 | */ | ||
357 | static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority) | ||
358 | { | ||
359 | switch (priority) { | ||
360 | case BOOK3S_IRQPRIO_DECREMENTER: | ||
361 | /* DEC interrupts get cleared by mtdec */ | ||
362 | return false; | ||
363 | case BOOK3S_IRQPRIO_EXTERNAL_LEVEL: | ||
364 | /* External interrupts get cleared by userspace */ | ||
365 | return false; | ||
366 | } | ||
367 | |||
368 | return true; | ||
369 | } | ||
370 | |||
323 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | 371 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) |
324 | { | 372 | { |
325 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 373 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
374 | unsigned long old_pending = vcpu->arch.pending_exceptions; | ||
326 | unsigned int priority; | 375 | unsigned int priority; |
327 | 376 | ||
328 | #ifdef EXIT_DEBUG | 377 | #ifdef EXIT_DEBUG |
@@ -332,8 +381,7 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
332 | priority = __ffs(*pending); | 381 | priority = __ffs(*pending); |
333 | while (priority < BOOK3S_IRQPRIO_MAX) { | 382 | while (priority < BOOK3S_IRQPRIO_MAX) { |
334 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && | 383 | if (kvmppc_book3s_irqprio_deliver(vcpu, priority) && |
335 | (priority != BOOK3S_IRQPRIO_DECREMENTER)) { | 384 | clear_irqprio(vcpu, priority)) { |
336 | /* DEC interrupts get cleared by mtdec */ | ||
337 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 385 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
338 | break; | 386 | break; |
339 | } | 387 | } |
@@ -342,6 +390,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
342 | BITS_PER_BYTE * sizeof(*pending), | 390 | BITS_PER_BYTE * sizeof(*pending), |
343 | priority + 1); | 391 | priority + 1); |
344 | } | 392 | } |
393 | |||
394 | /* Tell the guest about our interrupt status */ | ||
395 | if (*pending) | ||
396 | vcpu->arch.shared->int_pending = 1; | ||
397 | else if (old_pending) | ||
398 | vcpu->arch.shared->int_pending = 0; | ||
345 | } | 399 | } |
346 | 400 | ||
347 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | 401 | void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) |
@@ -398,6 +452,25 @@ void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr) | |||
398 | } | 452 | } |
399 | } | 453 | } |
400 | 454 | ||
455 | pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) | ||
456 | { | ||
457 | ulong mp_pa = vcpu->arch.magic_page_pa; | ||
458 | |||
459 | /* Magic page override */ | ||
460 | if (unlikely(mp_pa) && | ||
461 | unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == | ||
462 | ((mp_pa & PAGE_MASK) & KVM_PAM))) { | ||
463 | ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK; | ||
464 | pfn_t pfn; | ||
465 | |||
466 | pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT; | ||
467 | get_page(pfn_to_page(pfn)); | ||
468 | return pfn; | ||
469 | } | ||
470 | |||
471 | return gfn_to_pfn(vcpu->kvm, gfn); | ||
472 | } | ||
473 | |||
401 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | 474 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To |
402 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | 475 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to |
403 | * emulate 32 bytes dcbz length. | 476 | * emulate 32 bytes dcbz length. |
@@ -415,8 +488,10 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
415 | int i; | 488 | int i; |
416 | 489 | ||
417 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | 490 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); |
418 | if (is_error_page(hpage)) | 491 | if (is_error_page(hpage)) { |
492 | kvm_release_page_clean(hpage); | ||
419 | return; | 493 | return; |
494 | } | ||
420 | 495 | ||
421 | hpage_offset = pte->raddr & ~PAGE_MASK; | 496 | hpage_offset = pte->raddr & ~PAGE_MASK; |
422 | hpage_offset &= ~0xFFFULL; | 497 | hpage_offset &= ~0xFFFULL; |
@@ -437,14 +512,14 @@ static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |||
437 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, | 512 | static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data, |
438 | struct kvmppc_pte *pte) | 513 | struct kvmppc_pte *pte) |
439 | { | 514 | { |
440 | int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR)); | 515 | int relocated = (vcpu->arch.shared->msr & (data ? MSR_DR : MSR_IR)); |
441 | int r; | 516 | int r; |
442 | 517 | ||
443 | if (relocated) { | 518 | if (relocated) { |
444 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); | 519 | r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data); |
445 | } else { | 520 | } else { |
446 | pte->eaddr = eaddr; | 521 | pte->eaddr = eaddr; |
447 | pte->raddr = eaddr & 0xffffffff; | 522 | pte->raddr = eaddr & KVM_PAM; |
448 | pte->vpage = VSID_REAL | eaddr >> 12; | 523 | pte->vpage = VSID_REAL | eaddr >> 12; |
449 | pte->may_read = true; | 524 | pte->may_read = true; |
450 | pte->may_write = true; | 525 | pte->may_write = true; |
@@ -533,6 +608,13 @@ mmio: | |||
533 | 608 | ||
534 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) | 609 | static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn) |
535 | { | 610 | { |
611 | ulong mp_pa = vcpu->arch.magic_page_pa; | ||
612 | |||
613 | if (unlikely(mp_pa) && | ||
614 | unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { | ||
615 | return 1; | ||
616 | } | ||
617 | |||
536 | return kvm_is_visible_gfn(vcpu->kvm, gfn); | 618 | return kvm_is_visible_gfn(vcpu->kvm, gfn); |
537 | } | 619 | } |
538 | 620 | ||
@@ -545,8 +627,8 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
545 | int page_found = 0; | 627 | int page_found = 0; |
546 | struct kvmppc_pte pte; | 628 | struct kvmppc_pte pte; |
547 | bool is_mmio = false; | 629 | bool is_mmio = false; |
548 | bool dr = (vcpu->arch.msr & MSR_DR) ? true : false; | 630 | bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false; |
549 | bool ir = (vcpu->arch.msr & MSR_IR) ? true : false; | 631 | bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false; |
550 | u64 vsid; | 632 | u64 vsid; |
551 | 633 | ||
552 | relocated = data ? dr : ir; | 634 | relocated = data ? dr : ir; |
@@ -558,12 +640,12 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
558 | pte.may_execute = true; | 640 | pte.may_execute = true; |
559 | pte.may_read = true; | 641 | pte.may_read = true; |
560 | pte.may_write = true; | 642 | pte.may_write = true; |
561 | pte.raddr = eaddr & 0xffffffff; | 643 | pte.raddr = eaddr & KVM_PAM; |
562 | pte.eaddr = eaddr; | 644 | pte.eaddr = eaddr; |
563 | pte.vpage = eaddr >> 12; | 645 | pte.vpage = eaddr >> 12; |
564 | } | 646 | } |
565 | 647 | ||
566 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 648 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
567 | case 0: | 649 | case 0: |
568 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | 650 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); |
569 | break; | 651 | break; |
@@ -571,7 +653,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
571 | case MSR_IR: | 653 | case MSR_IR: |
572 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | 654 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); |
573 | 655 | ||
574 | if ((vcpu->arch.msr & (MSR_DR|MSR_IR)) == MSR_DR) | 656 | if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR) |
575 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); | 657 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
576 | else | 658 | else |
577 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | 659 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); |
@@ -594,20 +676,23 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
594 | 676 | ||
595 | if (page_found == -ENOENT) { | 677 | if (page_found == -ENOENT) { |
596 | /* Page not found in guest PTE entries */ | 678 | /* Page not found in guest PTE entries */ |
597 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 679 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
598 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; | 680 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; |
599 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 681 | vcpu->arch.shared->msr |= |
682 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | ||
600 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 683 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
601 | } else if (page_found == -EPERM) { | 684 | } else if (page_found == -EPERM) { |
602 | /* Storage protection */ | 685 | /* Storage protection */ |
603 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 686 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
604 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; | 687 | vcpu->arch.shared->dsisr = |
605 | to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT; | 688 | to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE; |
606 | vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | 689 | vcpu->arch.shared->dsisr |= DSISR_PROTFAULT; |
690 | vcpu->arch.shared->msr |= | ||
691 | (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL); | ||
607 | kvmppc_book3s_queue_irqprio(vcpu, vec); | 692 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
608 | } else if (page_found == -EINVAL) { | 693 | } else if (page_found == -EINVAL) { |
609 | /* Page not found in guest SLB */ | 694 | /* Page not found in guest SLB */ |
610 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 695 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
611 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); | 696 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
612 | } else if (!is_mmio && | 697 | } else if (!is_mmio && |
613 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { | 698 | kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) { |
@@ -695,9 +780,11 @@ static int kvmppc_read_inst(struct kvm_vcpu *vcpu) | |||
695 | 780 | ||
696 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); | 781 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); |
697 | if (ret == -ENOENT) { | 782 | if (ret == -ENOENT) { |
698 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1); | 783 | ulong msr = vcpu->arch.shared->msr; |
699 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0); | 784 | |
700 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); | 785 | msr = kvmppc_set_field(msr, 33, 33, 1); |
786 | msr = kvmppc_set_field(msr, 34, 36, 0); | ||
787 | vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0); | ||
701 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); | 788 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE); |
702 | return EMULATE_AGAIN; | 789 | return EMULATE_AGAIN; |
703 | } | 790 | } |
@@ -736,7 +823,7 @@ static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |||
736 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | 823 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) |
737 | return RESUME_GUEST; | 824 | return RESUME_GUEST; |
738 | 825 | ||
739 | if (!(vcpu->arch.msr & msr)) { | 826 | if (!(vcpu->arch.shared->msr & msr)) { |
740 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 827 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
741 | return RESUME_GUEST; | 828 | return RESUME_GUEST; |
742 | } | 829 | } |
@@ -796,16 +883,8 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
796 | 883 | ||
797 | run->exit_reason = KVM_EXIT_UNKNOWN; | 884 | run->exit_reason = KVM_EXIT_UNKNOWN; |
798 | run->ready_for_interrupt_injection = 1; | 885 | run->ready_for_interrupt_injection = 1; |
799 | #ifdef EXIT_DEBUG | 886 | |
800 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n", | 887 | trace_kvm_book3s_exit(exit_nr, vcpu); |
801 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), | ||
802 | kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1); | ||
803 | #elif defined (EXIT_DEBUG_SIMPLE) | ||
804 | if ((exit_nr != 0x900) && (exit_nr != 0x500)) | ||
805 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n", | ||
806 | exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu), | ||
807 | vcpu->arch.msr); | ||
808 | #endif | ||
809 | kvm_resched(vcpu); | 888 | kvm_resched(vcpu); |
810 | switch (exit_nr) { | 889 | switch (exit_nr) { |
811 | case BOOK3S_INTERRUPT_INST_STORAGE: | 890 | case BOOK3S_INTERRUPT_INST_STORAGE: |
@@ -836,9 +915,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
836 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | 915 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); |
837 | r = RESUME_GUEST; | 916 | r = RESUME_GUEST; |
838 | } else { | 917 | } else { |
839 | vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | 918 | vcpu->arch.shared->msr |= |
919 | to_svcpu(vcpu)->shadow_srr1 & 0x58000000; | ||
840 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 920 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
841 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | ||
842 | r = RESUME_GUEST; | 921 | r = RESUME_GUEST; |
843 | } | 922 | } |
844 | break; | 923 | break; |
@@ -861,17 +940,16 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
861 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { | 940 | if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) { |
862 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); | 941 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
863 | } else { | 942 | } else { |
864 | vcpu->arch.dear = dar; | 943 | vcpu->arch.shared->dar = dar; |
865 | to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr; | 944 | vcpu->arch.shared->dsisr = to_svcpu(vcpu)->fault_dsisr; |
866 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 945 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
867 | kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFUL); | ||
868 | r = RESUME_GUEST; | 946 | r = RESUME_GUEST; |
869 | } | 947 | } |
870 | break; | 948 | break; |
871 | } | 949 | } |
872 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | 950 | case BOOK3S_INTERRUPT_DATA_SEGMENT: |
873 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | 951 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { |
874 | vcpu->arch.dear = kvmppc_get_fault_dar(vcpu); | 952 | vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu); |
875 | kvmppc_book3s_queue_irqprio(vcpu, | 953 | kvmppc_book3s_queue_irqprio(vcpu, |
876 | BOOK3S_INTERRUPT_DATA_SEGMENT); | 954 | BOOK3S_INTERRUPT_DATA_SEGMENT); |
877 | } | 955 | } |
@@ -904,7 +982,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
904 | program_interrupt: | 982 | program_interrupt: |
905 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; | 983 | flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull; |
906 | 984 | ||
907 | if (vcpu->arch.msr & MSR_PR) { | 985 | if (vcpu->arch.shared->msr & MSR_PR) { |
908 | #ifdef EXIT_DEBUG | 986 | #ifdef EXIT_DEBUG |
909 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); | 987 | printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu)); |
910 | #endif | 988 | #endif |
@@ -941,10 +1019,10 @@ program_interrupt: | |||
941 | break; | 1019 | break; |
942 | } | 1020 | } |
943 | case BOOK3S_INTERRUPT_SYSCALL: | 1021 | case BOOK3S_INTERRUPT_SYSCALL: |
944 | // XXX make user settable | ||
945 | if (vcpu->arch.osi_enabled && | 1022 | if (vcpu->arch.osi_enabled && |
946 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && | 1023 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
947 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | 1024 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { |
1025 | /* MOL hypercalls */ | ||
948 | u64 *gprs = run->osi.gprs; | 1026 | u64 *gprs = run->osi.gprs; |
949 | int i; | 1027 | int i; |
950 | 1028 | ||
@@ -953,8 +1031,13 @@ program_interrupt: | |||
953 | gprs[i] = kvmppc_get_gpr(vcpu, i); | 1031 | gprs[i] = kvmppc_get_gpr(vcpu, i); |
954 | vcpu->arch.osi_needed = 1; | 1032 | vcpu->arch.osi_needed = 1; |
955 | r = RESUME_HOST_NV; | 1033 | r = RESUME_HOST_NV; |
956 | 1034 | } else if (!(vcpu->arch.shared->msr & MSR_PR) && | |
1035 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | ||
1036 | /* KVM PV hypercalls */ | ||
1037 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
1038 | r = RESUME_GUEST; | ||
957 | } else { | 1039 | } else { |
1040 | /* Guest syscalls */ | ||
958 | vcpu->stat.syscall_exits++; | 1041 | vcpu->stat.syscall_exits++; |
959 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 1042 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
960 | r = RESUME_GUEST; | 1043 | r = RESUME_GUEST; |
@@ -989,9 +1072,9 @@ program_interrupt: | |||
989 | } | 1072 | } |
990 | case BOOK3S_INTERRUPT_ALIGNMENT: | 1073 | case BOOK3S_INTERRUPT_ALIGNMENT: |
991 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { | 1074 | if (kvmppc_read_inst(vcpu) == EMULATE_DONE) { |
992 | to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu, | 1075 | vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu, |
993 | kvmppc_get_last_inst(vcpu)); | 1076 | kvmppc_get_last_inst(vcpu)); |
994 | vcpu->arch.dear = kvmppc_alignment_dar(vcpu, | 1077 | vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu, |
995 | kvmppc_get_last_inst(vcpu)); | 1078 | kvmppc_get_last_inst(vcpu)); |
996 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | 1079 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
997 | } | 1080 | } |
@@ -1031,9 +1114,7 @@ program_interrupt: | |||
1031 | } | 1114 | } |
1032 | } | 1115 | } |
1033 | 1116 | ||
1034 | #ifdef EXIT_DEBUG | 1117 | trace_kvm_book3s_reenter(r, vcpu); |
1035 | printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r); | ||
1036 | #endif | ||
1037 | 1118 | ||
1038 | return r; | 1119 | return r; |
1039 | } | 1120 | } |
@@ -1052,17 +1133,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1052 | regs->ctr = kvmppc_get_ctr(vcpu); | 1133 | regs->ctr = kvmppc_get_ctr(vcpu); |
1053 | regs->lr = kvmppc_get_lr(vcpu); | 1134 | regs->lr = kvmppc_get_lr(vcpu); |
1054 | regs->xer = kvmppc_get_xer(vcpu); | 1135 | regs->xer = kvmppc_get_xer(vcpu); |
1055 | regs->msr = vcpu->arch.msr; | 1136 | regs->msr = vcpu->arch.shared->msr; |
1056 | regs->srr0 = vcpu->arch.srr0; | 1137 | regs->srr0 = vcpu->arch.shared->srr0; |
1057 | regs->srr1 = vcpu->arch.srr1; | 1138 | regs->srr1 = vcpu->arch.shared->srr1; |
1058 | regs->pid = vcpu->arch.pid; | 1139 | regs->pid = vcpu->arch.pid; |
1059 | regs->sprg0 = vcpu->arch.sprg0; | 1140 | regs->sprg0 = vcpu->arch.shared->sprg0; |
1060 | regs->sprg1 = vcpu->arch.sprg1; | 1141 | regs->sprg1 = vcpu->arch.shared->sprg1; |
1061 | regs->sprg2 = vcpu->arch.sprg2; | 1142 | regs->sprg2 = vcpu->arch.shared->sprg2; |
1062 | regs->sprg3 = vcpu->arch.sprg3; | 1143 | regs->sprg3 = vcpu->arch.shared->sprg3; |
1063 | regs->sprg5 = vcpu->arch.sprg4; | 1144 | regs->sprg4 = vcpu->arch.sprg4; |
1064 | regs->sprg6 = vcpu->arch.sprg5; | 1145 | regs->sprg5 = vcpu->arch.sprg5; |
1065 | regs->sprg7 = vcpu->arch.sprg6; | 1146 | regs->sprg6 = vcpu->arch.sprg6; |
1147 | regs->sprg7 = vcpu->arch.sprg7; | ||
1066 | 1148 | ||
1067 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1149 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1068 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 1150 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
@@ -1080,15 +1162,16 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
1080 | kvmppc_set_lr(vcpu, regs->lr); | 1162 | kvmppc_set_lr(vcpu, regs->lr); |
1081 | kvmppc_set_xer(vcpu, regs->xer); | 1163 | kvmppc_set_xer(vcpu, regs->xer); |
1082 | kvmppc_set_msr(vcpu, regs->msr); | 1164 | kvmppc_set_msr(vcpu, regs->msr); |
1083 | vcpu->arch.srr0 = regs->srr0; | 1165 | vcpu->arch.shared->srr0 = regs->srr0; |
1084 | vcpu->arch.srr1 = regs->srr1; | 1166 | vcpu->arch.shared->srr1 = regs->srr1; |
1085 | vcpu->arch.sprg0 = regs->sprg0; | 1167 | vcpu->arch.shared->sprg0 = regs->sprg0; |
1086 | vcpu->arch.sprg1 = regs->sprg1; | 1168 | vcpu->arch.shared->sprg1 = regs->sprg1; |
1087 | vcpu->arch.sprg2 = regs->sprg2; | 1169 | vcpu->arch.shared->sprg2 = regs->sprg2; |
1088 | vcpu->arch.sprg3 = regs->sprg3; | 1170 | vcpu->arch.shared->sprg3 = regs->sprg3; |
1089 | vcpu->arch.sprg5 = regs->sprg4; | 1171 | vcpu->arch.sprg4 = regs->sprg4; |
1090 | vcpu->arch.sprg6 = regs->sprg5; | 1172 | vcpu->arch.sprg5 = regs->sprg5; |
1091 | vcpu->arch.sprg7 = regs->sprg6; | 1173 | vcpu->arch.sprg6 = regs->sprg6; |
1174 | vcpu->arch.sprg7 = regs->sprg7; | ||
1092 | 1175 | ||
1093 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 1176 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1094 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 1177 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
@@ -1111,10 +1194,9 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |||
1111 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; | 1194 | sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv; |
1112 | } | 1195 | } |
1113 | } else { | 1196 | } else { |
1114 | for (i = 0; i < 16; i++) { | 1197 | for (i = 0; i < 16; i++) |
1115 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | 1198 | sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i]; |
1116 | sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw; | 1199 | |
1117 | } | ||
1118 | for (i = 0; i < 8; i++) { | 1200 | for (i = 0; i < 8; i++) { |
1119 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | 1201 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; |
1120 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | 1202 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; |
@@ -1225,13 +1307,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1225 | struct kvmppc_vcpu_book3s *vcpu_book3s; | 1307 | struct kvmppc_vcpu_book3s *vcpu_book3s; |
1226 | struct kvm_vcpu *vcpu; | 1308 | struct kvm_vcpu *vcpu; |
1227 | int err = -ENOMEM; | 1309 | int err = -ENOMEM; |
1310 | unsigned long p; | ||
1228 | 1311 | ||
1229 | vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s)); | 1312 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1230 | if (!vcpu_book3s) | 1313 | if (!vcpu_book3s) |
1231 | goto out; | 1314 | goto out; |
1232 | 1315 | ||
1233 | memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s)); | ||
1234 | |||
1235 | vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) | 1316 | vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *) |
1236 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); | 1317 | kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL); |
1237 | if (!vcpu_book3s->shadow_vcpu) | 1318 | if (!vcpu_book3s->shadow_vcpu) |
@@ -1242,6 +1323,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1242 | if (err) | 1323 | if (err) |
1243 | goto free_shadow_vcpu; | 1324 | goto free_shadow_vcpu; |
1244 | 1325 | ||
1326 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
1327 | /* the real shared page fills the last 4k of our page */ | ||
1328 | vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096); | ||
1329 | if (!p) | ||
1330 | goto uninit_vcpu; | ||
1331 | |||
1245 | vcpu->arch.host_retip = kvm_return_point; | 1332 | vcpu->arch.host_retip = kvm_return_point; |
1246 | vcpu->arch.host_msr = mfmsr(); | 1333 | vcpu->arch.host_msr = mfmsr(); |
1247 | #ifdef CONFIG_PPC_BOOK3S_64 | 1334 | #ifdef CONFIG_PPC_BOOK3S_64 |
@@ -1268,10 +1355,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
1268 | 1355 | ||
1269 | err = kvmppc_mmu_init(vcpu); | 1356 | err = kvmppc_mmu_init(vcpu); |
1270 | if (err < 0) | 1357 | if (err < 0) |
1271 | goto free_shadow_vcpu; | 1358 | goto uninit_vcpu; |
1272 | 1359 | ||
1273 | return vcpu; | 1360 | return vcpu; |
1274 | 1361 | ||
1362 | uninit_vcpu: | ||
1363 | kvm_vcpu_uninit(vcpu); | ||
1275 | free_shadow_vcpu: | 1364 | free_shadow_vcpu: |
1276 | kfree(vcpu_book3s->shadow_vcpu); | 1365 | kfree(vcpu_book3s->shadow_vcpu); |
1277 | free_vcpu: | 1366 | free_vcpu: |
@@ -1284,6 +1373,7 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
1284 | { | 1373 | { |
1285 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 1374 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
1286 | 1375 | ||
1376 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | ||
1287 | kvm_vcpu_uninit(vcpu); | 1377 | kvm_vcpu_uninit(vcpu); |
1288 | kfree(vcpu_book3s->shadow_vcpu); | 1378 | kfree(vcpu_book3s->shadow_vcpu); |
1289 | vfree(vcpu_book3s); | 1379 | vfree(vcpu_book3s); |
@@ -1346,7 +1436,7 @@ int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
1346 | local_irq_enable(); | 1436 | local_irq_enable(); |
1347 | 1437 | ||
1348 | /* Preload FPU if it's enabled */ | 1438 | /* Preload FPU if it's enabled */ |
1349 | if (vcpu->arch.msr & MSR_FP) | 1439 | if (vcpu->arch.shared->msr & MSR_FP) |
1350 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); | 1440 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1351 | 1441 | ||
1352 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); | 1442 | ret = __kvmppc_vcpu_entry(kvm_run, vcpu); |
diff --git a/arch/powerpc/kvm/book3s_32_mmu.c b/arch/powerpc/kvm/book3s_32_mmu.c index 3292d76101d2..c8cefdd15fd8 100644 --- a/arch/powerpc/kvm/book3s_32_mmu.c +++ b/arch/powerpc/kvm/book3s_32_mmu.c | |||
@@ -58,14 +58,39 @@ static inline bool check_debug_ip(struct kvm_vcpu *vcpu) | |||
58 | #endif | 58 | #endif |
59 | } | 59 | } |
60 | 60 | ||
61 | static inline u32 sr_vsid(u32 sr_raw) | ||
62 | { | ||
63 | return sr_raw & 0x0fffffff; | ||
64 | } | ||
65 | |||
66 | static inline bool sr_valid(u32 sr_raw) | ||
67 | { | ||
68 | return (sr_raw & 0x80000000) ? false : true; | ||
69 | } | ||
70 | |||
71 | static inline bool sr_ks(u32 sr_raw) | ||
72 | { | ||
73 | return (sr_raw & 0x40000000) ? true: false; | ||
74 | } | ||
75 | |||
76 | static inline bool sr_kp(u32 sr_raw) | ||
77 | { | ||
78 | return (sr_raw & 0x20000000) ? true: false; | ||
79 | } | ||
80 | |||
81 | static inline bool sr_nx(u32 sr_raw) | ||
82 | { | ||
83 | return (sr_raw & 0x10000000) ? true: false; | ||
84 | } | ||
85 | |||
61 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | 86 | static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, |
62 | struct kvmppc_pte *pte, bool data); | 87 | struct kvmppc_pte *pte, bool data); |
63 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | 88 | static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
64 | u64 *vsid); | 89 | u64 *vsid); |
65 | 90 | ||
66 | static struct kvmppc_sr *find_sr(struct kvmppc_vcpu_book3s *vcpu_book3s, gva_t eaddr) | 91 | static u32 find_sr(struct kvm_vcpu *vcpu, gva_t eaddr) |
67 | { | 92 | { |
68 | return &vcpu_book3s->sr[(eaddr >> 28) & 0xf]; | 93 | return vcpu->arch.shared->sr[(eaddr >> 28) & 0xf]; |
69 | } | 94 | } |
70 | 95 | ||
71 | static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, | 96 | static u64 kvmppc_mmu_book3s_32_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, |
@@ -87,7 +112,7 @@ static void kvmppc_mmu_book3s_32_reset_msr(struct kvm_vcpu *vcpu) | |||
87 | } | 112 | } |
88 | 113 | ||
89 | static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, | 114 | static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3s, |
90 | struct kvmppc_sr *sre, gva_t eaddr, | 115 | u32 sre, gva_t eaddr, |
91 | bool primary) | 116 | bool primary) |
92 | { | 117 | { |
93 | u32 page, hash, pteg, htabmask; | 118 | u32 page, hash, pteg, htabmask; |
@@ -96,7 +121,7 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 | |||
96 | page = (eaddr & 0x0FFFFFFF) >> 12; | 121 | page = (eaddr & 0x0FFFFFFF) >> 12; |
97 | htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; | 122 | htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0; |
98 | 123 | ||
99 | hash = ((sre->vsid ^ page) << 6); | 124 | hash = ((sr_vsid(sre) ^ page) << 6); |
100 | if (!primary) | 125 | if (!primary) |
101 | hash = ~hash; | 126 | hash = ~hash; |
102 | hash &= htabmask; | 127 | hash &= htabmask; |
@@ -104,8 +129,8 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 | |||
104 | pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; | 129 | pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash; |
105 | 130 | ||
106 | dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", | 131 | dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n", |
107 | vcpu_book3s->vcpu.arch.pc, eaddr, vcpu_book3s->sdr1, pteg, | 132 | kvmppc_get_pc(&vcpu_book3s->vcpu), eaddr, vcpu_book3s->sdr1, pteg, |
108 | sre->vsid); | 133 | sr_vsid(sre)); |
109 | 134 | ||
110 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); | 135 | r = gfn_to_hva(vcpu_book3s->vcpu.kvm, pteg >> PAGE_SHIFT); |
111 | if (kvm_is_error_hva(r)) | 136 | if (kvm_is_error_hva(r)) |
@@ -113,10 +138,9 @@ static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvmppc_vcpu_book3s *vcpu_book3 | |||
113 | return r | (pteg & ~PAGE_MASK); | 138 | return r | (pteg & ~PAGE_MASK); |
114 | } | 139 | } |
115 | 140 | ||
116 | static u32 kvmppc_mmu_book3s_32_get_ptem(struct kvmppc_sr *sre, gva_t eaddr, | 141 | static u32 kvmppc_mmu_book3s_32_get_ptem(u32 sre, gva_t eaddr, bool primary) |
117 | bool primary) | ||
118 | { | 142 | { |
119 | return ((eaddr & 0x0fffffff) >> 22) | (sre->vsid << 7) | | 143 | return ((eaddr & 0x0fffffff) >> 22) | (sr_vsid(sre) << 7) | |
120 | (primary ? 0 : 0x40) | 0x80000000; | 144 | (primary ? 0 : 0x40) | 0x80000000; |
121 | } | 145 | } |
122 | 146 | ||
@@ -133,7 +157,7 @@ static int kvmppc_mmu_book3s_32_xlate_bat(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
133 | else | 157 | else |
134 | bat = &vcpu_book3s->ibat[i]; | 158 | bat = &vcpu_book3s->ibat[i]; |
135 | 159 | ||
136 | if (vcpu->arch.msr & MSR_PR) { | 160 | if (vcpu->arch.shared->msr & MSR_PR) { |
137 | if (!bat->vp) | 161 | if (!bat->vp) |
138 | continue; | 162 | continue; |
139 | } else { | 163 | } else { |
@@ -180,17 +204,17 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
180 | bool primary) | 204 | bool primary) |
181 | { | 205 | { |
182 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 206 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
183 | struct kvmppc_sr *sre; | 207 | u32 sre; |
184 | hva_t ptegp; | 208 | hva_t ptegp; |
185 | u32 pteg[16]; | 209 | u32 pteg[16]; |
186 | u32 ptem = 0; | 210 | u32 ptem = 0; |
187 | int i; | 211 | int i; |
188 | int found = 0; | 212 | int found = 0; |
189 | 213 | ||
190 | sre = find_sr(vcpu_book3s, eaddr); | 214 | sre = find_sr(vcpu, eaddr); |
191 | 215 | ||
192 | dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, | 216 | dprintk_pte("SR 0x%lx: vsid=0x%x, raw=0x%x\n", eaddr >> 28, |
193 | sre->vsid, sre->raw); | 217 | sr_vsid(sre), sre); |
194 | 218 | ||
195 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); | 219 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); |
196 | 220 | ||
@@ -214,8 +238,8 @@ static int kvmppc_mmu_book3s_32_xlate_pte(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
214 | pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); | 238 | pte->raddr = (pteg[i+1] & ~(0xFFFULL)) | (eaddr & 0xFFF); |
215 | pp = pteg[i+1] & 3; | 239 | pp = pteg[i+1] & 3; |
216 | 240 | ||
217 | if ((sre->Kp && (vcpu->arch.msr & MSR_PR)) || | 241 | if ((sr_kp(sre) && (vcpu->arch.shared->msr & MSR_PR)) || |
218 | (sre->Ks && !(vcpu->arch.msr & MSR_PR))) | 242 | (sr_ks(sre) && !(vcpu->arch.shared->msr & MSR_PR))) |
219 | pp |= 4; | 243 | pp |= 4; |
220 | 244 | ||
221 | pte->may_write = false; | 245 | pte->may_write = false; |
@@ -269,7 +293,7 @@ no_page_found: | |||
269 | dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", | 293 | dprintk_pte("KVM MMU: No PTE found (sdr1=0x%llx ptegp=0x%lx)\n", |
270 | to_book3s(vcpu)->sdr1, ptegp); | 294 | to_book3s(vcpu)->sdr1, ptegp); |
271 | for (i=0; i<16; i+=2) { | 295 | for (i=0; i<16; i+=2) { |
272 | dprintk_pte(" %02d: 0x%x - 0x%x (0x%llx)\n", | 296 | dprintk_pte(" %02d: 0x%x - 0x%x (0x%x)\n", |
273 | i, pteg[i], pteg[i+1], ptem); | 297 | i, pteg[i], pteg[i+1], ptem); |
274 | } | 298 | } |
275 | } | 299 | } |
@@ -281,8 +305,24 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
281 | struct kvmppc_pte *pte, bool data) | 305 | struct kvmppc_pte *pte, bool data) |
282 | { | 306 | { |
283 | int r; | 307 | int r; |
308 | ulong mp_ea = vcpu->arch.magic_page_ea; | ||
284 | 309 | ||
285 | pte->eaddr = eaddr; | 310 | pte->eaddr = eaddr; |
311 | |||
312 | /* Magic page override */ | ||
313 | if (unlikely(mp_ea) && | ||
314 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && | ||
315 | !(vcpu->arch.shared->msr & MSR_PR)) { | ||
316 | pte->vpage = kvmppc_mmu_book3s_32_ea_to_vp(vcpu, eaddr, data); | ||
317 | pte->raddr = vcpu->arch.magic_page_pa | (pte->raddr & 0xfff); | ||
318 | pte->raddr &= KVM_PAM; | ||
319 | pte->may_execute = true; | ||
320 | pte->may_read = true; | ||
321 | pte->may_write = true; | ||
322 | |||
323 | return 0; | ||
324 | } | ||
325 | |||
286 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); | 326 | r = kvmppc_mmu_book3s_32_xlate_bat(vcpu, eaddr, pte, data); |
287 | if (r < 0) | 327 | if (r < 0) |
288 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); | 328 | r = kvmppc_mmu_book3s_32_xlate_pte(vcpu, eaddr, pte, data, true); |
@@ -295,30 +335,13 @@ static int kvmppc_mmu_book3s_32_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
295 | 335 | ||
296 | static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) | 336 | static u32 kvmppc_mmu_book3s_32_mfsrin(struct kvm_vcpu *vcpu, u32 srnum) |
297 | { | 337 | { |
298 | return to_book3s(vcpu)->sr[srnum].raw; | 338 | return vcpu->arch.shared->sr[srnum]; |
299 | } | 339 | } |
300 | 340 | ||
301 | static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, | 341 | static void kvmppc_mmu_book3s_32_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, |
302 | ulong value) | 342 | ulong value) |
303 | { | 343 | { |
304 | struct kvmppc_sr *sre; | 344 | vcpu->arch.shared->sr[srnum] = value; |
305 | |||
306 | sre = &to_book3s(vcpu)->sr[srnum]; | ||
307 | |||
308 | /* Flush any left-over shadows from the previous SR */ | ||
309 | |||
310 | /* XXX Not necessary? */ | ||
311 | /* kvmppc_mmu_pte_flush(vcpu, ((u64)sre->vsid) << 28, 0xf0000000ULL); */ | ||
312 | |||
313 | /* And then put in the new SR */ | ||
314 | sre->raw = value; | ||
315 | sre->vsid = (value & 0x0fffffff); | ||
316 | sre->valid = (value & 0x80000000) ? false : true; | ||
317 | sre->Ks = (value & 0x40000000) ? true : false; | ||
318 | sre->Kp = (value & 0x20000000) ? true : false; | ||
319 | sre->nx = (value & 0x10000000) ? true : false; | ||
320 | |||
321 | /* Map the new segment */ | ||
322 | kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); | 345 | kvmppc_mmu_map_segment(vcpu, srnum << SID_SHIFT); |
323 | } | 346 | } |
324 | 347 | ||
@@ -331,19 +354,19 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
331 | u64 *vsid) | 354 | u64 *vsid) |
332 | { | 355 | { |
333 | ulong ea = esid << SID_SHIFT; | 356 | ulong ea = esid << SID_SHIFT; |
334 | struct kvmppc_sr *sr; | 357 | u32 sr; |
335 | u64 gvsid = esid; | 358 | u64 gvsid = esid; |
336 | 359 | ||
337 | if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 360 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
338 | sr = find_sr(to_book3s(vcpu), ea); | 361 | sr = find_sr(vcpu, ea); |
339 | if (sr->valid) | 362 | if (sr_valid(sr)) |
340 | gvsid = sr->vsid; | 363 | gvsid = sr_vsid(sr); |
341 | } | 364 | } |
342 | 365 | ||
343 | /* In case we only have one of MSR_IR or MSR_DR set, let's put | 366 | /* In case we only have one of MSR_IR or MSR_DR set, let's put |
344 | that in the real-mode context (and hope RM doesn't access | 367 | that in the real-mode context (and hope RM doesn't access |
345 | high memory) */ | 368 | high memory) */ |
346 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 369 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
347 | case 0: | 370 | case 0: |
348 | *vsid = VSID_REAL | esid; | 371 | *vsid = VSID_REAL | esid; |
349 | break; | 372 | break; |
@@ -354,8 +377,8 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
354 | *vsid = VSID_REAL_DR | gvsid; | 377 | *vsid = VSID_REAL_DR | gvsid; |
355 | break; | 378 | break; |
356 | case MSR_DR|MSR_IR: | 379 | case MSR_DR|MSR_IR: |
357 | if (sr->valid) | 380 | if (sr_valid(sr)) |
358 | *vsid = sr->vsid; | 381 | *vsid = sr_vsid(sr); |
359 | else | 382 | else |
360 | *vsid = VSID_BAT | gvsid; | 383 | *vsid = VSID_BAT | gvsid; |
361 | break; | 384 | break; |
@@ -363,7 +386,7 @@ static int kvmppc_mmu_book3s_32_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
363 | BUG(); | 386 | BUG(); |
364 | } | 387 | } |
365 | 388 | ||
366 | if (vcpu->arch.msr & MSR_PR) | 389 | if (vcpu->arch.shared->msr & MSR_PR) |
367 | *vsid |= VSID_PR; | 390 | *vsid |= VSID_PR; |
368 | 391 | ||
369 | return 0; | 392 | return 0; |
diff --git a/arch/powerpc/kvm/book3s_32_mmu_host.c b/arch/powerpc/kvm/book3s_32_mmu_host.c index 0b51ef872c1e..9fecbfbce773 100644 --- a/arch/powerpc/kvm/book3s_32_mmu_host.c +++ b/arch/powerpc/kvm/book3s_32_mmu_host.c | |||
@@ -19,7 +19,6 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
22 | #include <linux/hash.h> | ||
23 | 22 | ||
24 | #include <asm/kvm_ppc.h> | 23 | #include <asm/kvm_ppc.h> |
25 | #include <asm/kvm_book3s.h> | 24 | #include <asm/kvm_book3s.h> |
@@ -77,7 +76,14 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
77 | * a hash, so we don't waste cycles on looping */ | 76 | * a hash, so we don't waste cycles on looping */ |
78 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | 77 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) |
79 | { | 78 | { |
80 | return hash_64(gvsid, SID_MAP_BITS); | 79 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ |
80 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | ||
81 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | ||
82 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | ||
83 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | ||
84 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | ||
85 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | ||
86 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | ||
81 | } | 87 | } |
82 | 88 | ||
83 | 89 | ||
@@ -86,7 +92,7 @@ static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | |||
86 | struct kvmppc_sid_map *map; | 92 | struct kvmppc_sid_map *map; |
87 | u16 sid_map_mask; | 93 | u16 sid_map_mask; |
88 | 94 | ||
89 | if (vcpu->arch.msr & MSR_PR) | 95 | if (vcpu->arch.shared->msr & MSR_PR) |
90 | gvsid |= VSID_PR; | 96 | gvsid |= VSID_PR; |
91 | 97 | ||
92 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | 98 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
@@ -147,8 +153,8 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
147 | struct hpte_cache *pte; | 153 | struct hpte_cache *pte; |
148 | 154 | ||
149 | /* Get host physical address for gpa */ | 155 | /* Get host physical address for gpa */ |
150 | hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 156 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); |
151 | if (kvm_is_error_hva(hpaddr)) { | 157 | if (is_error_pfn(hpaddr)) { |
152 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", | 158 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", |
153 | orig_pte->eaddr); | 159 | orig_pte->eaddr); |
154 | return -EINVAL; | 160 | return -EINVAL; |
@@ -253,7 +259,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
253 | u16 sid_map_mask; | 259 | u16 sid_map_mask; |
254 | static int backwards_map = 0; | 260 | static int backwards_map = 0; |
255 | 261 | ||
256 | if (vcpu->arch.msr & MSR_PR) | 262 | if (vcpu->arch.shared->msr & MSR_PR) |
257 | gvsid |= VSID_PR; | 263 | gvsid |= VSID_PR; |
258 | 264 | ||
259 | /* We might get collisions that trap in preceding order, so let's | 265 | /* We might get collisions that trap in preceding order, so let's |
@@ -269,18 +275,15 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
269 | backwards_map = !backwards_map; | 275 | backwards_map = !backwards_map; |
270 | 276 | ||
271 | /* Uh-oh ... out of mappings. Let's flush! */ | 277 | /* Uh-oh ... out of mappings. Let's flush! */ |
272 | if (vcpu_book3s->vsid_next >= vcpu_book3s->vsid_max) { | 278 | if (vcpu_book3s->vsid_next >= VSID_POOL_SIZE) { |
273 | vcpu_book3s->vsid_next = vcpu_book3s->vsid_first; | 279 | vcpu_book3s->vsid_next = 0; |
274 | memset(vcpu_book3s->sid_map, 0, | 280 | memset(vcpu_book3s->sid_map, 0, |
275 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); | 281 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); |
276 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 282 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
277 | kvmppc_mmu_flush_segments(vcpu); | 283 | kvmppc_mmu_flush_segments(vcpu); |
278 | } | 284 | } |
279 | map->host_vsid = vcpu_book3s->vsid_next; | 285 | map->host_vsid = vcpu_book3s->vsid_pool[vcpu_book3s->vsid_next]; |
280 | 286 | vcpu_book3s->vsid_next++; | |
281 | /* Would have to be 111 to be completely aligned with the rest of | ||
282 | Linux, but that is just way too little space! */ | ||
283 | vcpu_book3s->vsid_next+=1; | ||
284 | 287 | ||
285 | map->guest_vsid = gvsid; | 288 | map->guest_vsid = gvsid; |
286 | map->valid = true; | 289 | map->valid = true; |
@@ -327,40 +330,38 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
327 | 330 | ||
328 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 331 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
329 | { | 332 | { |
333 | int i; | ||
334 | |||
330 | kvmppc_mmu_hpte_destroy(vcpu); | 335 | kvmppc_mmu_hpte_destroy(vcpu); |
331 | preempt_disable(); | 336 | preempt_disable(); |
332 | __destroy_context(to_book3s(vcpu)->context_id); | 337 | for (i = 0; i < SID_CONTEXTS; i++) |
338 | __destroy_context(to_book3s(vcpu)->context_id[i]); | ||
333 | preempt_enable(); | 339 | preempt_enable(); |
334 | } | 340 | } |
335 | 341 | ||
336 | /* From mm/mmu_context_hash32.c */ | 342 | /* From mm/mmu_context_hash32.c */ |
337 | #define CTX_TO_VSID(ctx) (((ctx) * (897 * 16)) & 0xffffff) | 343 | #define CTX_TO_VSID(c, id) ((((c) * (897 * 16)) + (id * 0x111)) & 0xffffff) |
338 | 344 | ||
339 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | 345 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) |
340 | { | 346 | { |
341 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | 347 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); |
342 | int err; | 348 | int err; |
343 | ulong sdr1; | 349 | ulong sdr1; |
350 | int i; | ||
351 | int j; | ||
344 | 352 | ||
345 | err = __init_new_context(); | 353 | for (i = 0; i < SID_CONTEXTS; i++) { |
346 | if (err < 0) | 354 | err = __init_new_context(); |
347 | return -1; | 355 | if (err < 0) |
348 | vcpu3s->context_id = err; | 356 | goto init_fail; |
349 | 357 | vcpu3s->context_id[i] = err; | |
350 | vcpu3s->vsid_max = CTX_TO_VSID(vcpu3s->context_id + 1) - 1; | ||
351 | vcpu3s->vsid_first = CTX_TO_VSID(vcpu3s->context_id); | ||
352 | |||
353 | #if 0 /* XXX still doesn't guarantee uniqueness */ | ||
354 | /* We could collide with the Linux vsid space because the vsid | ||
355 | * wraps around at 24 bits. We're safe if we do our own space | ||
356 | * though, so let's always set the highest bit. */ | ||
357 | 358 | ||
358 | vcpu3s->vsid_max |= 0x00800000; | 359 | /* Remember context id for this combination */ |
359 | vcpu3s->vsid_first |= 0x00800000; | 360 | for (j = 0; j < 16; j++) |
360 | #endif | 361 | vcpu3s->vsid_pool[(i * 16) + j] = CTX_TO_VSID(err, j); |
361 | BUG_ON(vcpu3s->vsid_max < vcpu3s->vsid_first); | 362 | } |
362 | 363 | ||
363 | vcpu3s->vsid_next = vcpu3s->vsid_first; | 364 | vcpu3s->vsid_next = 0; |
364 | 365 | ||
365 | /* Remember where the HTAB is */ | 366 | /* Remember where the HTAB is */ |
366 | asm ( "mfsdr1 %0" : "=r"(sdr1) ); | 367 | asm ( "mfsdr1 %0" : "=r"(sdr1) ); |
@@ -370,4 +371,14 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
370 | kvmppc_mmu_hpte_init(vcpu); | 371 | kvmppc_mmu_hpte_init(vcpu); |
371 | 372 | ||
372 | return 0; | 373 | return 0; |
374 | |||
375 | init_fail: | ||
376 | for (j = 0; j < i; j++) { | ||
377 | if (!vcpu3s->context_id[j]) | ||
378 | continue; | ||
379 | |||
380 | __destroy_context(to_book3s(vcpu)->context_id[j]); | ||
381 | } | ||
382 | |||
383 | return -1; | ||
373 | } | 384 | } |
diff --git a/arch/powerpc/kvm/book3s_64_mmu.c b/arch/powerpc/kvm/book3s_64_mmu.c index 4025ea26b3c1..d7889ef3211e 100644 --- a/arch/powerpc/kvm/book3s_64_mmu.c +++ b/arch/powerpc/kvm/book3s_64_mmu.c | |||
@@ -163,6 +163,22 @@ static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, | |||
163 | bool found = false; | 163 | bool found = false; |
164 | bool perm_err = false; | 164 | bool perm_err = false; |
165 | int second = 0; | 165 | int second = 0; |
166 | ulong mp_ea = vcpu->arch.magic_page_ea; | ||
167 | |||
168 | /* Magic page override */ | ||
169 | if (unlikely(mp_ea) && | ||
170 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && | ||
171 | !(vcpu->arch.shared->msr & MSR_PR)) { | ||
172 | gpte->eaddr = eaddr; | ||
173 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); | ||
174 | gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); | ||
175 | gpte->raddr &= KVM_PAM; | ||
176 | gpte->may_execute = true; | ||
177 | gpte->may_read = true; | ||
178 | gpte->may_write = true; | ||
179 | |||
180 | return 0; | ||
181 | } | ||
166 | 182 | ||
167 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); | 183 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu_book3s, eaddr); |
168 | if (!slbe) | 184 | if (!slbe) |
@@ -180,9 +196,9 @@ do_second: | |||
180 | goto no_page_found; | 196 | goto no_page_found; |
181 | } | 197 | } |
182 | 198 | ||
183 | if ((vcpu->arch.msr & MSR_PR) && slbe->Kp) | 199 | if ((vcpu->arch.shared->msr & MSR_PR) && slbe->Kp) |
184 | key = 4; | 200 | key = 4; |
185 | else if (!(vcpu->arch.msr & MSR_PR) && slbe->Ks) | 201 | else if (!(vcpu->arch.shared->msr & MSR_PR) && slbe->Ks) |
186 | key = 4; | 202 | key = 4; |
187 | 203 | ||
188 | for (i=0; i<16; i+=2) { | 204 | for (i=0; i<16; i+=2) { |
@@ -381,7 +397,7 @@ static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) | |||
381 | for (i = 1; i < vcpu_book3s->slb_nr; i++) | 397 | for (i = 1; i < vcpu_book3s->slb_nr; i++) |
382 | vcpu_book3s->slb[i].valid = false; | 398 | vcpu_book3s->slb[i].valid = false; |
383 | 399 | ||
384 | if (vcpu->arch.msr & MSR_IR) { | 400 | if (vcpu->arch.shared->msr & MSR_IR) { |
385 | kvmppc_mmu_flush_segments(vcpu); | 401 | kvmppc_mmu_flush_segments(vcpu); |
386 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | 402 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
387 | } | 403 | } |
@@ -445,14 +461,15 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
445 | ulong ea = esid << SID_SHIFT; | 461 | ulong ea = esid << SID_SHIFT; |
446 | struct kvmppc_slb *slb; | 462 | struct kvmppc_slb *slb; |
447 | u64 gvsid = esid; | 463 | u64 gvsid = esid; |
464 | ulong mp_ea = vcpu->arch.magic_page_ea; | ||
448 | 465 | ||
449 | if (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 466 | if (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
450 | slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); | 467 | slb = kvmppc_mmu_book3s_64_find_slbe(to_book3s(vcpu), ea); |
451 | if (slb) | 468 | if (slb) |
452 | gvsid = slb->vsid; | 469 | gvsid = slb->vsid; |
453 | } | 470 | } |
454 | 471 | ||
455 | switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) { | 472 | switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) { |
456 | case 0: | 473 | case 0: |
457 | *vsid = VSID_REAL | esid; | 474 | *vsid = VSID_REAL | esid; |
458 | break; | 475 | break; |
@@ -464,7 +481,7 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
464 | break; | 481 | break; |
465 | case MSR_DR|MSR_IR: | 482 | case MSR_DR|MSR_IR: |
466 | if (!slb) | 483 | if (!slb) |
467 | return -ENOENT; | 484 | goto no_slb; |
468 | 485 | ||
469 | *vsid = gvsid; | 486 | *vsid = gvsid; |
470 | break; | 487 | break; |
@@ -473,10 +490,21 @@ static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, | |||
473 | break; | 490 | break; |
474 | } | 491 | } |
475 | 492 | ||
476 | if (vcpu->arch.msr & MSR_PR) | 493 | if (vcpu->arch.shared->msr & MSR_PR) |
477 | *vsid |= VSID_PR; | 494 | *vsid |= VSID_PR; |
478 | 495 | ||
479 | return 0; | 496 | return 0; |
497 | |||
498 | no_slb: | ||
499 | /* Catch magic page case */ | ||
500 | if (unlikely(mp_ea) && | ||
501 | unlikely(esid == (mp_ea >> SID_SHIFT)) && | ||
502 | !(vcpu->arch.shared->msr & MSR_PR)) { | ||
503 | *vsid = VSID_REAL | esid; | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | return -EINVAL; | ||
480 | } | 508 | } |
481 | 509 | ||
482 | static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) | 510 | static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) |
diff --git a/arch/powerpc/kvm/book3s_64_mmu_host.c b/arch/powerpc/kvm/book3s_64_mmu_host.c index 384179a5002b..fa2f08434ba5 100644 --- a/arch/powerpc/kvm/book3s_64_mmu_host.c +++ b/arch/powerpc/kvm/book3s_64_mmu_host.c | |||
@@ -20,7 +20,6 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/kvm_host.h> | 22 | #include <linux/kvm_host.h> |
23 | #include <linux/hash.h> | ||
24 | 23 | ||
25 | #include <asm/kvm_ppc.h> | 24 | #include <asm/kvm_ppc.h> |
26 | #include <asm/kvm_book3s.h> | 25 | #include <asm/kvm_book3s.h> |
@@ -28,24 +27,9 @@ | |||
28 | #include <asm/machdep.h> | 27 | #include <asm/machdep.h> |
29 | #include <asm/mmu_context.h> | 28 | #include <asm/mmu_context.h> |
30 | #include <asm/hw_irq.h> | 29 | #include <asm/hw_irq.h> |
30 | #include "trace.h" | ||
31 | 31 | ||
32 | #define PTE_SIZE 12 | 32 | #define PTE_SIZE 12 |
33 | #define VSID_ALL 0 | ||
34 | |||
35 | /* #define DEBUG_MMU */ | ||
36 | /* #define DEBUG_SLB */ | ||
37 | |||
38 | #ifdef DEBUG_MMU | ||
39 | #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
40 | #else | ||
41 | #define dprintk_mmu(a, ...) do { } while(0) | ||
42 | #endif | ||
43 | |||
44 | #ifdef DEBUG_SLB | ||
45 | #define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
46 | #else | ||
47 | #define dprintk_slb(a, ...) do { } while(0) | ||
48 | #endif | ||
49 | 33 | ||
50 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 34 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
51 | { | 35 | { |
@@ -58,34 +42,39 @@ void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
58 | * a hash, so we don't waste cycles on looping */ | 42 | * a hash, so we don't waste cycles on looping */ |
59 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | 43 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) |
60 | { | 44 | { |
61 | return hash_64(gvsid, SID_MAP_BITS); | 45 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ |
46 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | ||
47 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | ||
48 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | ||
49 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | ||
50 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | ||
51 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | ||
52 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | ||
62 | } | 53 | } |
63 | 54 | ||
55 | |||
64 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) | 56 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) |
65 | { | 57 | { |
66 | struct kvmppc_sid_map *map; | 58 | struct kvmppc_sid_map *map; |
67 | u16 sid_map_mask; | 59 | u16 sid_map_mask; |
68 | 60 | ||
69 | if (vcpu->arch.msr & MSR_PR) | 61 | if (vcpu->arch.shared->msr & MSR_PR) |
70 | gvsid |= VSID_PR; | 62 | gvsid |= VSID_PR; |
71 | 63 | ||
72 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | 64 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); |
73 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | 65 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; |
74 | if (map->guest_vsid == gvsid) { | 66 | if (map->valid && (map->guest_vsid == gvsid)) { |
75 | dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n", | 67 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
76 | gvsid, map->host_vsid); | ||
77 | return map; | 68 | return map; |
78 | } | 69 | } |
79 | 70 | ||
80 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; | 71 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; |
81 | if (map->guest_vsid == gvsid) { | 72 | if (map->valid && (map->guest_vsid == gvsid)) { |
82 | dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n", | 73 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
83 | gvsid, map->host_vsid); | ||
84 | return map; | 74 | return map; |
85 | } | 75 | } |
86 | 76 | ||
87 | dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n", | 77 | trace_kvm_book3s_slb_fail(sid_map_mask, gvsid); |
88 | sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid); | ||
89 | return NULL; | 78 | return NULL; |
90 | } | 79 | } |
91 | 80 | ||
@@ -101,18 +90,13 @@ int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte) | |||
101 | struct kvmppc_sid_map *map; | 90 | struct kvmppc_sid_map *map; |
102 | 91 | ||
103 | /* Get host physical address for gpa */ | 92 | /* Get host physical address for gpa */ |
104 | hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT); | 93 | hpaddr = kvmppc_gfn_to_pfn(vcpu, orig_pte->raddr >> PAGE_SHIFT); |
105 | if (kvm_is_error_hva(hpaddr)) { | 94 | if (is_error_pfn(hpaddr)) { |
106 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); | 95 | printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr); |
107 | return -EINVAL; | 96 | return -EINVAL; |
108 | } | 97 | } |
109 | hpaddr <<= PAGE_SHIFT; | 98 | hpaddr <<= PAGE_SHIFT; |
110 | #if PAGE_SHIFT == 12 | 99 | hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); |
111 | #elif PAGE_SHIFT == 16 | ||
112 | hpaddr |= orig_pte->raddr & 0xf000; | ||
113 | #else | ||
114 | #error Unknown page size | ||
115 | #endif | ||
116 | 100 | ||
117 | /* and write the mapping ea -> hpa into the pt */ | 101 | /* and write the mapping ea -> hpa into the pt */ |
118 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); | 102 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); |
@@ -161,10 +145,7 @@ map_again: | |||
161 | } else { | 145 | } else { |
162 | struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); | 146 | struct hpte_cache *pte = kvmppc_mmu_hpte_cache_next(vcpu); |
163 | 147 | ||
164 | dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n", | 148 | trace_kvm_book3s_64_mmu_map(rflags, hpteg, va, hpaddr, orig_pte); |
165 | ((rflags & HPTE_R_PP) == 3) ? '-' : 'w', | ||
166 | (rflags & HPTE_R_N) ? '-' : 'x', | ||
167 | orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr); | ||
168 | 149 | ||
169 | /* The ppc_md code may give us a secondary entry even though we | 150 | /* The ppc_md code may give us a secondary entry even though we |
170 | asked for a primary. Fix up. */ | 151 | asked for a primary. Fix up. */ |
@@ -191,7 +172,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
191 | u16 sid_map_mask; | 172 | u16 sid_map_mask; |
192 | static int backwards_map = 0; | 173 | static int backwards_map = 0; |
193 | 174 | ||
194 | if (vcpu->arch.msr & MSR_PR) | 175 | if (vcpu->arch.shared->msr & MSR_PR) |
195 | gvsid |= VSID_PR; | 176 | gvsid |= VSID_PR; |
196 | 177 | ||
197 | /* We might get collisions that trap in preceding order, so let's | 178 | /* We might get collisions that trap in preceding order, so let's |
@@ -219,8 +200,7 @@ static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) | |||
219 | map->guest_vsid = gvsid; | 200 | map->guest_vsid = gvsid; |
220 | map->valid = true; | 201 | map->valid = true; |
221 | 202 | ||
222 | dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n", | 203 | trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid); |
223 | sid_map_mask, gvsid, map->host_vsid); | ||
224 | 204 | ||
225 | return map; | 205 | return map; |
226 | } | 206 | } |
@@ -292,7 +272,7 @@ int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |||
292 | to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; | 272 | to_svcpu(vcpu)->slb[slb_index].esid = slb_esid; |
293 | to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; | 273 | to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid; |
294 | 274 | ||
295 | dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid); | 275 | trace_kvm_book3s_slbmte(slb_vsid, slb_esid); |
296 | 276 | ||
297 | return 0; | 277 | return 0; |
298 | } | 278 | } |
@@ -306,7 +286,7 @@ void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) | |||
306 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) | 286 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
307 | { | 287 | { |
308 | kvmppc_mmu_hpte_destroy(vcpu); | 288 | kvmppc_mmu_hpte_destroy(vcpu); |
309 | __destroy_context(to_book3s(vcpu)->context_id); | 289 | __destroy_context(to_book3s(vcpu)->context_id[0]); |
310 | } | 290 | } |
311 | 291 | ||
312 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | 292 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) |
@@ -317,10 +297,10 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |||
317 | err = __init_new_context(); | 297 | err = __init_new_context(); |
318 | if (err < 0) | 298 | if (err < 0) |
319 | return -1; | 299 | return -1; |
320 | vcpu3s->context_id = err; | 300 | vcpu3s->context_id[0] = err; |
321 | 301 | ||
322 | vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1; | 302 | vcpu3s->vsid_max = ((vcpu3s->context_id[0] + 1) << USER_ESID_BITS) - 1; |
323 | vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS; | 303 | vcpu3s->vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; |
324 | vcpu3s->vsid_next = vcpu3s->vsid_first; | 304 | vcpu3s->vsid_next = vcpu3s->vsid_first; |
325 | 305 | ||
326 | kvmppc_mmu_hpte_init(vcpu); | 306 | kvmppc_mmu_hpte_init(vcpu); |
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c index c85f906038ce..466846557089 100644 --- a/arch/powerpc/kvm/book3s_emulate.c +++ b/arch/powerpc/kvm/book3s_emulate.c | |||
@@ -73,8 +73,8 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
73 | switch (get_xop(inst)) { | 73 | switch (get_xop(inst)) { |
74 | case OP_19_XOP_RFID: | 74 | case OP_19_XOP_RFID: |
75 | case OP_19_XOP_RFI: | 75 | case OP_19_XOP_RFI: |
76 | kvmppc_set_pc(vcpu, vcpu->arch.srr0); | 76 | kvmppc_set_pc(vcpu, vcpu->arch.shared->srr0); |
77 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | 77 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); |
78 | *advance = 0; | 78 | *advance = 0; |
79 | break; | 79 | break; |
80 | 80 | ||
@@ -86,14 +86,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
86 | case 31: | 86 | case 31: |
87 | switch (get_xop(inst)) { | 87 | switch (get_xop(inst)) { |
88 | case OP_31_XOP_MFMSR: | 88 | case OP_31_XOP_MFMSR: |
89 | kvmppc_set_gpr(vcpu, get_rt(inst), vcpu->arch.msr); | 89 | kvmppc_set_gpr(vcpu, get_rt(inst), |
90 | vcpu->arch.shared->msr); | ||
90 | break; | 91 | break; |
91 | case OP_31_XOP_MTMSRD: | 92 | case OP_31_XOP_MTMSRD: |
92 | { | 93 | { |
93 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); | 94 | ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); |
94 | if (inst & 0x10000) { | 95 | if (inst & 0x10000) { |
95 | vcpu->arch.msr &= ~(MSR_RI | MSR_EE); | 96 | vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); |
96 | vcpu->arch.msr |= rs & (MSR_RI | MSR_EE); | 97 | vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); |
97 | } else | 98 | } else |
98 | kvmppc_set_msr(vcpu, rs); | 99 | kvmppc_set_msr(vcpu, rs); |
99 | break; | 100 | break; |
@@ -204,14 +205,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
204 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); | 205 | ra = kvmppc_get_gpr(vcpu, get_ra(inst)); |
205 | 206 | ||
206 | addr = (ra + rb) & ~31ULL; | 207 | addr = (ra + rb) & ~31ULL; |
207 | if (!(vcpu->arch.msr & MSR_SF)) | 208 | if (!(vcpu->arch.shared->msr & MSR_SF)) |
208 | addr &= 0xffffffff; | 209 | addr &= 0xffffffff; |
209 | vaddr = addr; | 210 | vaddr = addr; |
210 | 211 | ||
211 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); | 212 | r = kvmppc_st(vcpu, &addr, 32, zeros, true); |
212 | if ((r == -ENOENT) || (r == -EPERM)) { | 213 | if ((r == -ENOENT) || (r == -EPERM)) { |
213 | *advance = 0; | 214 | *advance = 0; |
214 | vcpu->arch.dear = vaddr; | 215 | vcpu->arch.shared->dar = vaddr; |
215 | to_svcpu(vcpu)->fault_dar = vaddr; | 216 | to_svcpu(vcpu)->fault_dar = vaddr; |
216 | 217 | ||
217 | dsisr = DSISR_ISSTORE; | 218 | dsisr = DSISR_ISSTORE; |
@@ -220,7 +221,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
220 | else if (r == -EPERM) | 221 | else if (r == -EPERM) |
221 | dsisr |= DSISR_PROTFAULT; | 222 | dsisr |= DSISR_PROTFAULT; |
222 | 223 | ||
223 | to_book3s(vcpu)->dsisr = dsisr; | 224 | vcpu->arch.shared->dsisr = dsisr; |
224 | to_svcpu(vcpu)->fault_dsisr = dsisr; | 225 | to_svcpu(vcpu)->fault_dsisr = dsisr; |
225 | 226 | ||
226 | kvmppc_book3s_queue_irqprio(vcpu, | 227 | kvmppc_book3s_queue_irqprio(vcpu, |
@@ -263,7 +264,7 @@ void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper, | |||
263 | } | 264 | } |
264 | } | 265 | } |
265 | 266 | ||
266 | static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) | 267 | static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn) |
267 | { | 268 | { |
268 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | 269 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
269 | struct kvmppc_bat *bat; | 270 | struct kvmppc_bat *bat; |
@@ -285,35 +286,7 @@ static u32 kvmppc_read_bat(struct kvm_vcpu *vcpu, int sprn) | |||
285 | BUG(); | 286 | BUG(); |
286 | } | 287 | } |
287 | 288 | ||
288 | if (sprn % 2) | 289 | return bat; |
289 | return bat->raw >> 32; | ||
290 | else | ||
291 | return bat->raw; | ||
292 | } | ||
293 | |||
294 | static void kvmppc_write_bat(struct kvm_vcpu *vcpu, int sprn, u32 val) | ||
295 | { | ||
296 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | ||
297 | struct kvmppc_bat *bat; | ||
298 | |||
299 | switch (sprn) { | ||
300 | case SPRN_IBAT0U ... SPRN_IBAT3L: | ||
301 | bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2]; | ||
302 | break; | ||
303 | case SPRN_IBAT4U ... SPRN_IBAT7L: | ||
304 | bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)]; | ||
305 | break; | ||
306 | case SPRN_DBAT0U ... SPRN_DBAT3L: | ||
307 | bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2]; | ||
308 | break; | ||
309 | case SPRN_DBAT4U ... SPRN_DBAT7L: | ||
310 | bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)]; | ||
311 | break; | ||
312 | default: | ||
313 | BUG(); | ||
314 | } | ||
315 | |||
316 | kvmppc_set_bat(vcpu, bat, !(sprn % 2), val); | ||
317 | } | 290 | } |
318 | 291 | ||
319 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | 292 | int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) |
@@ -326,10 +299,10 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
326 | to_book3s(vcpu)->sdr1 = spr_val; | 299 | to_book3s(vcpu)->sdr1 = spr_val; |
327 | break; | 300 | break; |
328 | case SPRN_DSISR: | 301 | case SPRN_DSISR: |
329 | to_book3s(vcpu)->dsisr = spr_val; | 302 | vcpu->arch.shared->dsisr = spr_val; |
330 | break; | 303 | break; |
331 | case SPRN_DAR: | 304 | case SPRN_DAR: |
332 | vcpu->arch.dear = spr_val; | 305 | vcpu->arch.shared->dar = spr_val; |
333 | break; | 306 | break; |
334 | case SPRN_HIOR: | 307 | case SPRN_HIOR: |
335 | to_book3s(vcpu)->hior = spr_val; | 308 | to_book3s(vcpu)->hior = spr_val; |
@@ -338,12 +311,16 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
338 | case SPRN_IBAT4U ... SPRN_IBAT7L: | 311 | case SPRN_IBAT4U ... SPRN_IBAT7L: |
339 | case SPRN_DBAT0U ... SPRN_DBAT3L: | 312 | case SPRN_DBAT0U ... SPRN_DBAT3L: |
340 | case SPRN_DBAT4U ... SPRN_DBAT7L: | 313 | case SPRN_DBAT4U ... SPRN_DBAT7L: |
341 | kvmppc_write_bat(vcpu, sprn, (u32)spr_val); | 314 | { |
315 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); | ||
316 | |||
317 | kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val); | ||
342 | /* BAT writes happen so rarely that we're ok to flush | 318 | /* BAT writes happen so rarely that we're ok to flush |
343 | * everything here */ | 319 | * everything here */ |
344 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | 320 | kvmppc_mmu_pte_flush(vcpu, 0, 0); |
345 | kvmppc_mmu_flush_segments(vcpu); | 321 | kvmppc_mmu_flush_segments(vcpu); |
346 | break; | 322 | break; |
323 | } | ||
347 | case SPRN_HID0: | 324 | case SPRN_HID0: |
348 | to_book3s(vcpu)->hid[0] = spr_val; | 325 | to_book3s(vcpu)->hid[0] = spr_val; |
349 | break; | 326 | break; |
@@ -433,16 +410,24 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
433 | case SPRN_IBAT4U ... SPRN_IBAT7L: | 410 | case SPRN_IBAT4U ... SPRN_IBAT7L: |
434 | case SPRN_DBAT0U ... SPRN_DBAT3L: | 411 | case SPRN_DBAT0U ... SPRN_DBAT3L: |
435 | case SPRN_DBAT4U ... SPRN_DBAT7L: | 412 | case SPRN_DBAT4U ... SPRN_DBAT7L: |
436 | kvmppc_set_gpr(vcpu, rt, kvmppc_read_bat(vcpu, sprn)); | 413 | { |
414 | struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); | ||
415 | |||
416 | if (sprn % 2) | ||
417 | kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); | ||
418 | else | ||
419 | kvmppc_set_gpr(vcpu, rt, bat->raw); | ||
420 | |||
437 | break; | 421 | break; |
422 | } | ||
438 | case SPRN_SDR1: | 423 | case SPRN_SDR1: |
439 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); | 424 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); |
440 | break; | 425 | break; |
441 | case SPRN_DSISR: | 426 | case SPRN_DSISR: |
442 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->dsisr); | 427 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); |
443 | break; | 428 | break; |
444 | case SPRN_DAR: | 429 | case SPRN_DAR: |
445 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); | 430 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); |
446 | break; | 431 | break; |
447 | case SPRN_HIOR: | 432 | case SPRN_HIOR: |
448 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); | 433 | kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); |
diff --git a/arch/powerpc/kvm/book3s_mmu_hpte.c b/arch/powerpc/kvm/book3s_mmu_hpte.c index 4868d4a7ebc5..79751d8dd131 100644 --- a/arch/powerpc/kvm/book3s_mmu_hpte.c +++ b/arch/powerpc/kvm/book3s_mmu_hpte.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include <linux/kvm_host.h> | 21 | #include <linux/kvm_host.h> |
22 | #include <linux/hash.h> | 22 | #include <linux/hash.h> |
23 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
24 | #include "trace.h" | ||
24 | 25 | ||
25 | #include <asm/kvm_ppc.h> | 26 | #include <asm/kvm_ppc.h> |
26 | #include <asm/kvm_book3s.h> | 27 | #include <asm/kvm_book3s.h> |
@@ -30,14 +31,6 @@ | |||
30 | 31 | ||
31 | #define PTE_SIZE 12 | 32 | #define PTE_SIZE 12 |
32 | 33 | ||
33 | /* #define DEBUG_MMU */ | ||
34 | |||
35 | #ifdef DEBUG_MMU | ||
36 | #define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__) | ||
37 | #else | ||
38 | #define dprintk_mmu(a, ...) do { } while(0) | ||
39 | #endif | ||
40 | |||
41 | static struct kmem_cache *hpte_cache; | 34 | static struct kmem_cache *hpte_cache; |
42 | 35 | ||
43 | static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) | 36 | static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) |
@@ -45,6 +38,12 @@ static inline u64 kvmppc_mmu_hash_pte(u64 eaddr) | |||
45 | return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); | 38 | return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE); |
46 | } | 39 | } |
47 | 40 | ||
41 | static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr) | ||
42 | { | ||
43 | return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE, | ||
44 | HPTEG_HASH_BITS_PTE_LONG); | ||
45 | } | ||
46 | |||
48 | static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) | 47 | static inline u64 kvmppc_mmu_hash_vpte(u64 vpage) |
49 | { | 48 | { |
50 | return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); | 49 | return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE); |
@@ -60,77 +59,128 @@ void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | |||
60 | { | 59 | { |
61 | u64 index; | 60 | u64 index; |
62 | 61 | ||
62 | trace_kvm_book3s_mmu_map(pte); | ||
63 | |||
64 | spin_lock(&vcpu->arch.mmu_lock); | ||
65 | |||
63 | /* Add to ePTE list */ | 66 | /* Add to ePTE list */ |
64 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); | 67 | index = kvmppc_mmu_hash_pte(pte->pte.eaddr); |
65 | hlist_add_head(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); | 68 | hlist_add_head_rcu(&pte->list_pte, &vcpu->arch.hpte_hash_pte[index]); |
69 | |||
70 | /* Add to ePTE_long list */ | ||
71 | index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr); | ||
72 | hlist_add_head_rcu(&pte->list_pte_long, | ||
73 | &vcpu->arch.hpte_hash_pte_long[index]); | ||
66 | 74 | ||
67 | /* Add to vPTE list */ | 75 | /* Add to vPTE list */ |
68 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); | 76 | index = kvmppc_mmu_hash_vpte(pte->pte.vpage); |
69 | hlist_add_head(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); | 77 | hlist_add_head_rcu(&pte->list_vpte, &vcpu->arch.hpte_hash_vpte[index]); |
70 | 78 | ||
71 | /* Add to vPTE_long list */ | 79 | /* Add to vPTE_long list */ |
72 | index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); | 80 | index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage); |
73 | hlist_add_head(&pte->list_vpte_long, | 81 | hlist_add_head_rcu(&pte->list_vpte_long, |
74 | &vcpu->arch.hpte_hash_vpte_long[index]); | 82 | &vcpu->arch.hpte_hash_vpte_long[index]); |
83 | |||
84 | spin_unlock(&vcpu->arch.mmu_lock); | ||
85 | } | ||
86 | |||
87 | static void free_pte_rcu(struct rcu_head *head) | ||
88 | { | ||
89 | struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head); | ||
90 | kmem_cache_free(hpte_cache, pte); | ||
75 | } | 91 | } |
76 | 92 | ||
77 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) | 93 | static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
78 | { | 94 | { |
79 | dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n", | 95 | trace_kvm_book3s_mmu_invalidate(pte); |
80 | pte->pte.eaddr, pte->pte.vpage, pte->host_va); | ||
81 | 96 | ||
82 | /* Different for 32 and 64 bit */ | 97 | /* Different for 32 and 64 bit */ |
83 | kvmppc_mmu_invalidate_pte(vcpu, pte); | 98 | kvmppc_mmu_invalidate_pte(vcpu, pte); |
84 | 99 | ||
100 | spin_lock(&vcpu->arch.mmu_lock); | ||
101 | |||
102 | /* pte already invalidated in between? */ | ||
103 | if (hlist_unhashed(&pte->list_pte)) { | ||
104 | spin_unlock(&vcpu->arch.mmu_lock); | ||
105 | return; | ||
106 | } | ||
107 | |||
108 | hlist_del_init_rcu(&pte->list_pte); | ||
109 | hlist_del_init_rcu(&pte->list_pte_long); | ||
110 | hlist_del_init_rcu(&pte->list_vpte); | ||
111 | hlist_del_init_rcu(&pte->list_vpte_long); | ||
112 | |||
85 | if (pte->pte.may_write) | 113 | if (pte->pte.may_write) |
86 | kvm_release_pfn_dirty(pte->pfn); | 114 | kvm_release_pfn_dirty(pte->pfn); |
87 | else | 115 | else |
88 | kvm_release_pfn_clean(pte->pfn); | 116 | kvm_release_pfn_clean(pte->pfn); |
89 | 117 | ||
90 | hlist_del(&pte->list_pte); | 118 | spin_unlock(&vcpu->arch.mmu_lock); |
91 | hlist_del(&pte->list_vpte); | ||
92 | hlist_del(&pte->list_vpte_long); | ||
93 | 119 | ||
94 | vcpu->arch.hpte_cache_count--; | 120 | vcpu->arch.hpte_cache_count--; |
95 | kmem_cache_free(hpte_cache, pte); | 121 | call_rcu(&pte->rcu_head, free_pte_rcu); |
96 | } | 122 | } |
97 | 123 | ||
98 | static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) | 124 | static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu) |
99 | { | 125 | { |
100 | struct hpte_cache *pte; | 126 | struct hpte_cache *pte; |
101 | struct hlist_node *node, *tmp; | 127 | struct hlist_node *node; |
102 | int i; | 128 | int i; |
103 | 129 | ||
130 | rcu_read_lock(); | ||
131 | |||
104 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 132 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
105 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | 133 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; |
106 | 134 | ||
107 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 135 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
108 | invalidate_pte(vcpu, pte); | 136 | invalidate_pte(vcpu, pte); |
109 | } | 137 | } |
138 | |||
139 | rcu_read_unlock(); | ||
110 | } | 140 | } |
111 | 141 | ||
112 | static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) | 142 | static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea) |
113 | { | 143 | { |
114 | struct hlist_head *list; | 144 | struct hlist_head *list; |
115 | struct hlist_node *node, *tmp; | 145 | struct hlist_node *node; |
116 | struct hpte_cache *pte; | 146 | struct hpte_cache *pte; |
117 | 147 | ||
118 | /* Find the list of entries in the map */ | 148 | /* Find the list of entries in the map */ |
119 | list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; | 149 | list = &vcpu->arch.hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)]; |
120 | 150 | ||
151 | rcu_read_lock(); | ||
152 | |||
121 | /* Check the list for matching entries and invalidate */ | 153 | /* Check the list for matching entries and invalidate */ |
122 | hlist_for_each_entry_safe(pte, node, tmp, list, list_pte) | 154 | hlist_for_each_entry_rcu(pte, node, list, list_pte) |
123 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) | 155 | if ((pte->pte.eaddr & ~0xfffUL) == guest_ea) |
124 | invalidate_pte(vcpu, pte); | 156 | invalidate_pte(vcpu, pte); |
157 | |||
158 | rcu_read_unlock(); | ||
125 | } | 159 | } |
126 | 160 | ||
127 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | 161 | static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea) |
128 | { | 162 | { |
129 | u64 i; | 163 | struct hlist_head *list; |
164 | struct hlist_node *node; | ||
165 | struct hpte_cache *pte; | ||
130 | 166 | ||
131 | dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n", | 167 | /* Find the list of entries in the map */ |
132 | vcpu->arch.hpte_cache_count, guest_ea, ea_mask); | 168 | list = &vcpu->arch.hpte_hash_pte_long[ |
169 | kvmppc_mmu_hash_pte_long(guest_ea)]; | ||
133 | 170 | ||
171 | rcu_read_lock(); | ||
172 | |||
173 | /* Check the list for matching entries and invalidate */ | ||
174 | hlist_for_each_entry_rcu(pte, node, list, list_pte_long) | ||
175 | if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea) | ||
176 | invalidate_pte(vcpu, pte); | ||
177 | |||
178 | rcu_read_unlock(); | ||
179 | } | ||
180 | |||
181 | void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | ||
182 | { | ||
183 | trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask); | ||
134 | guest_ea &= ea_mask; | 184 | guest_ea &= ea_mask; |
135 | 185 | ||
136 | switch (ea_mask) { | 186 | switch (ea_mask) { |
@@ -138,9 +188,7 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | |||
138 | kvmppc_mmu_pte_flush_page(vcpu, guest_ea); | 188 | kvmppc_mmu_pte_flush_page(vcpu, guest_ea); |
139 | break; | 189 | break; |
140 | case 0x0ffff000: | 190 | case 0x0ffff000: |
141 | /* 32-bit flush w/o segment, go through all possible segments */ | 191 | kvmppc_mmu_pte_flush_long(vcpu, guest_ea); |
142 | for (i = 0; i < 0x100000000ULL; i += 0x10000000ULL) | ||
143 | kvmppc_mmu_pte_flush(vcpu, guest_ea | i, ~0xfffUL); | ||
144 | break; | 192 | break; |
145 | case 0: | 193 | case 0: |
146 | /* Doing a complete flush -> start from scratch */ | 194 | /* Doing a complete flush -> start from scratch */ |
@@ -156,39 +204,46 @@ void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask) | |||
156 | static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) | 204 | static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp) |
157 | { | 205 | { |
158 | struct hlist_head *list; | 206 | struct hlist_head *list; |
159 | struct hlist_node *node, *tmp; | 207 | struct hlist_node *node; |
160 | struct hpte_cache *pte; | 208 | struct hpte_cache *pte; |
161 | u64 vp_mask = 0xfffffffffULL; | 209 | u64 vp_mask = 0xfffffffffULL; |
162 | 210 | ||
163 | list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; | 211 | list = &vcpu->arch.hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)]; |
164 | 212 | ||
213 | rcu_read_lock(); | ||
214 | |||
165 | /* Check the list for matching entries and invalidate */ | 215 | /* Check the list for matching entries and invalidate */ |
166 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte) | 216 | hlist_for_each_entry_rcu(pte, node, list, list_vpte) |
167 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 217 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
168 | invalidate_pte(vcpu, pte); | 218 | invalidate_pte(vcpu, pte); |
219 | |||
220 | rcu_read_unlock(); | ||
169 | } | 221 | } |
170 | 222 | ||
171 | /* Flush with mask 0xffffff000 */ | 223 | /* Flush with mask 0xffffff000 */ |
172 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) | 224 | static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp) |
173 | { | 225 | { |
174 | struct hlist_head *list; | 226 | struct hlist_head *list; |
175 | struct hlist_node *node, *tmp; | 227 | struct hlist_node *node; |
176 | struct hpte_cache *pte; | 228 | struct hpte_cache *pte; |
177 | u64 vp_mask = 0xffffff000ULL; | 229 | u64 vp_mask = 0xffffff000ULL; |
178 | 230 | ||
179 | list = &vcpu->arch.hpte_hash_vpte_long[ | 231 | list = &vcpu->arch.hpte_hash_vpte_long[ |
180 | kvmppc_mmu_hash_vpte_long(guest_vp)]; | 232 | kvmppc_mmu_hash_vpte_long(guest_vp)]; |
181 | 233 | ||
234 | rcu_read_lock(); | ||
235 | |||
182 | /* Check the list for matching entries and invalidate */ | 236 | /* Check the list for matching entries and invalidate */ |
183 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 237 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
184 | if ((pte->pte.vpage & vp_mask) == guest_vp) | 238 | if ((pte->pte.vpage & vp_mask) == guest_vp) |
185 | invalidate_pte(vcpu, pte); | 239 | invalidate_pte(vcpu, pte); |
240 | |||
241 | rcu_read_unlock(); | ||
186 | } | 242 | } |
187 | 243 | ||
188 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | 244 | void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) |
189 | { | 245 | { |
190 | dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n", | 246 | trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask); |
191 | vcpu->arch.hpte_cache_count, guest_vp, vp_mask); | ||
192 | guest_vp &= vp_mask; | 247 | guest_vp &= vp_mask; |
193 | 248 | ||
194 | switch(vp_mask) { | 249 | switch(vp_mask) { |
@@ -206,21 +261,24 @@ void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask) | |||
206 | 261 | ||
207 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) | 262 | void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end) |
208 | { | 263 | { |
209 | struct hlist_node *node, *tmp; | 264 | struct hlist_node *node; |
210 | struct hpte_cache *pte; | 265 | struct hpte_cache *pte; |
211 | int i; | 266 | int i; |
212 | 267 | ||
213 | dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx - 0x%lx\n", | 268 | trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end); |
214 | vcpu->arch.hpte_cache_count, pa_start, pa_end); | 269 | |
270 | rcu_read_lock(); | ||
215 | 271 | ||
216 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { | 272 | for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) { |
217 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; | 273 | struct hlist_head *list = &vcpu->arch.hpte_hash_vpte_long[i]; |
218 | 274 | ||
219 | hlist_for_each_entry_safe(pte, node, tmp, list, list_vpte_long) | 275 | hlist_for_each_entry_rcu(pte, node, list, list_vpte_long) |
220 | if ((pte->pte.raddr >= pa_start) && | 276 | if ((pte->pte.raddr >= pa_start) && |
221 | (pte->pte.raddr < pa_end)) | 277 | (pte->pte.raddr < pa_end)) |
222 | invalidate_pte(vcpu, pte); | 278 | invalidate_pte(vcpu, pte); |
223 | } | 279 | } |
280 | |||
281 | rcu_read_unlock(); | ||
224 | } | 282 | } |
225 | 283 | ||
226 | struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) | 284 | struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu) |
@@ -254,11 +312,15 @@ int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu) | |||
254 | /* init hpte lookup hashes */ | 312 | /* init hpte lookup hashes */ |
255 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, | 313 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte, |
256 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); | 314 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte)); |
315 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_pte_long, | ||
316 | ARRAY_SIZE(vcpu->arch.hpte_hash_pte_long)); | ||
257 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, | 317 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte, |
258 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); | 318 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte)); |
259 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, | 319 | kvmppc_mmu_hpte_init_hash(vcpu->arch.hpte_hash_vpte_long, |
260 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); | 320 | ARRAY_SIZE(vcpu->arch.hpte_hash_vpte_long)); |
261 | 321 | ||
322 | spin_lock_init(&vcpu->arch.mmu_lock); | ||
323 | |||
262 | return 0; | 324 | return 0; |
263 | } | 325 | } |
264 | 326 | ||
diff --git a/arch/powerpc/kvm/book3s_paired_singles.c b/arch/powerpc/kvm/book3s_paired_singles.c index 474f2e24050a..7b0ee96c1bed 100644 --- a/arch/powerpc/kvm/book3s_paired_singles.c +++ b/arch/powerpc/kvm/book3s_paired_singles.c | |||
@@ -159,20 +159,21 @@ | |||
159 | 159 | ||
160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) | 160 | static inline void kvmppc_sync_qpr(struct kvm_vcpu *vcpu, int rt) |
161 | { | 161 | { |
162 | kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt], &vcpu->arch.fpscr); | 162 | kvm_cvt_df(&vcpu->arch.fpr[rt], &vcpu->arch.qpr[rt]); |
163 | } | 163 | } |
164 | 164 | ||
165 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) | 165 | static void kvmppc_inject_pf(struct kvm_vcpu *vcpu, ulong eaddr, bool is_store) |
166 | { | 166 | { |
167 | u64 dsisr; | 167 | u64 dsisr; |
168 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | ||
168 | 169 | ||
169 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 36, 0); | 170 | shared->msr = kvmppc_set_field(shared->msr, 33, 36, 0); |
170 | vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0); | 171 | shared->msr = kvmppc_set_field(shared->msr, 42, 47, 0); |
171 | vcpu->arch.dear = eaddr; | 172 | shared->dar = eaddr; |
172 | /* Page Fault */ | 173 | /* Page Fault */ |
173 | dsisr = kvmppc_set_field(0, 33, 33, 1); | 174 | dsisr = kvmppc_set_field(0, 33, 33, 1); |
174 | if (is_store) | 175 | if (is_store) |
175 | to_book3s(vcpu)->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); | 176 | shared->dsisr = kvmppc_set_field(dsisr, 38, 38, 1); |
176 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); | 177 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE); |
177 | } | 178 | } |
178 | 179 | ||
@@ -204,7 +205,7 @@ static int kvmppc_emulate_fpr_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
204 | /* put in registers */ | 205 | /* put in registers */ |
205 | switch (ls_type) { | 206 | switch (ls_type) { |
206 | case FPU_LS_SINGLE: | 207 | case FPU_LS_SINGLE: |
207 | kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); | 208 | kvm_cvt_fd((u32*)tmp, &vcpu->arch.fpr[rs]); |
208 | vcpu->arch.qpr[rs] = *((u32*)tmp); | 209 | vcpu->arch.qpr[rs] = *((u32*)tmp); |
209 | break; | 210 | break; |
210 | case FPU_LS_DOUBLE: | 211 | case FPU_LS_DOUBLE: |
@@ -230,7 +231,7 @@ static int kvmppc_emulate_fpr_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
230 | 231 | ||
231 | switch (ls_type) { | 232 | switch (ls_type) { |
232 | case FPU_LS_SINGLE: | 233 | case FPU_LS_SINGLE: |
233 | kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp, &vcpu->arch.fpscr); | 234 | kvm_cvt_df(&vcpu->arch.fpr[rs], (u32*)tmp); |
234 | val = *((u32*)tmp); | 235 | val = *((u32*)tmp); |
235 | len = sizeof(u32); | 236 | len = sizeof(u32); |
236 | break; | 237 | break; |
@@ -296,7 +297,7 @@ static int kvmppc_emulate_psq_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
296 | emulated = EMULATE_DONE; | 297 | emulated = EMULATE_DONE; |
297 | 298 | ||
298 | /* put in registers */ | 299 | /* put in registers */ |
299 | kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs], &vcpu->arch.fpscr); | 300 | kvm_cvt_fd(&tmp[0], &vcpu->arch.fpr[rs]); |
300 | vcpu->arch.qpr[rs] = tmp[1]; | 301 | vcpu->arch.qpr[rs] = tmp[1]; |
301 | 302 | ||
302 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], | 303 | dprintk(KERN_INFO "KVM: PSQ_LD [0x%x, 0x%x] at 0x%lx (%d)\n", tmp[0], |
@@ -314,7 +315,7 @@ static int kvmppc_emulate_psq_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
314 | u32 tmp[2]; | 315 | u32 tmp[2]; |
315 | int len = w ? sizeof(u32) : sizeof(u64); | 316 | int len = w ? sizeof(u32) : sizeof(u64); |
316 | 317 | ||
317 | kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0], &vcpu->arch.fpscr); | 318 | kvm_cvt_df(&vcpu->arch.fpr[rs], &tmp[0]); |
318 | tmp[1] = vcpu->arch.qpr[rs]; | 319 | tmp[1] = vcpu->arch.qpr[rs]; |
319 | 320 | ||
320 | r = kvmppc_st(vcpu, &addr, len, tmp, true); | 321 | r = kvmppc_st(vcpu, &addr, len, tmp, true); |
@@ -516,9 +517,9 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
516 | WARN_ON(rc); | 517 | WARN_ON(rc); |
517 | 518 | ||
518 | /* PS0 */ | 519 | /* PS0 */ |
519 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); | 520 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1); |
520 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); | 521 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2); |
521 | kvm_cvt_df(&fpr[reg_in3], &ps0_in3, &vcpu->arch.fpscr); | 522 | kvm_cvt_df(&fpr[reg_in3], &ps0_in3); |
522 | 523 | ||
523 | if (scalar & SCALAR_LOW) | 524 | if (scalar & SCALAR_LOW) |
524 | ps0_in2 = qpr[reg_in2]; | 525 | ps0_in2 = qpr[reg_in2]; |
@@ -529,7 +530,7 @@ static int kvmppc_ps_three_in(struct kvm_vcpu *vcpu, bool rc, | |||
529 | ps0_in1, ps0_in2, ps0_in3, ps0_out); | 530 | ps0_in1, ps0_in2, ps0_in3, ps0_out); |
530 | 531 | ||
531 | if (!(scalar & SCALAR_NO_PS0)) | 532 | if (!(scalar & SCALAR_NO_PS0)) |
532 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 533 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
533 | 534 | ||
534 | /* PS1 */ | 535 | /* PS1 */ |
535 | ps1_in1 = qpr[reg_in1]; | 536 | ps1_in1 = qpr[reg_in1]; |
@@ -566,12 +567,12 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
566 | WARN_ON(rc); | 567 | WARN_ON(rc); |
567 | 568 | ||
568 | /* PS0 */ | 569 | /* PS0 */ |
569 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1, &vcpu->arch.fpscr); | 570 | kvm_cvt_df(&fpr[reg_in1], &ps0_in1); |
570 | 571 | ||
571 | if (scalar & SCALAR_LOW) | 572 | if (scalar & SCALAR_LOW) |
572 | ps0_in2 = qpr[reg_in2]; | 573 | ps0_in2 = qpr[reg_in2]; |
573 | else | 574 | else |
574 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2, &vcpu->arch.fpscr); | 575 | kvm_cvt_df(&fpr[reg_in2], &ps0_in2); |
575 | 576 | ||
576 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); | 577 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in1, &ps0_in2); |
577 | 578 | ||
@@ -579,7 +580,7 @@ static int kvmppc_ps_two_in(struct kvm_vcpu *vcpu, bool rc, | |||
579 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", | 580 | dprintk(KERN_INFO "PS2 ps0 -> f(0x%x, 0x%x) = 0x%x\n", |
580 | ps0_in1, ps0_in2, ps0_out); | 581 | ps0_in1, ps0_in2, ps0_out); |
581 | 582 | ||
582 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 583 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
583 | } | 584 | } |
584 | 585 | ||
585 | /* PS1 */ | 586 | /* PS1 */ |
@@ -615,13 +616,13 @@ static int kvmppc_ps_one_in(struct kvm_vcpu *vcpu, bool rc, | |||
615 | WARN_ON(rc); | 616 | WARN_ON(rc); |
616 | 617 | ||
617 | /* PS0 */ | 618 | /* PS0 */ |
618 | kvm_cvt_df(&fpr[reg_in], &ps0_in, &vcpu->arch.fpscr); | 619 | kvm_cvt_df(&fpr[reg_in], &ps0_in); |
619 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); | 620 | func(&vcpu->arch.fpscr, &ps0_out, &ps0_in); |
620 | 621 | ||
621 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", | 622 | dprintk(KERN_INFO "PS1 ps0 -> f(0x%x) = 0x%x\n", |
622 | ps0_in, ps0_out); | 623 | ps0_in, ps0_out); |
623 | 624 | ||
624 | kvm_cvt_fd(&ps0_out, &fpr[reg_out], &vcpu->arch.fpscr); | 625 | kvm_cvt_fd(&ps0_out, &fpr[reg_out]); |
625 | 626 | ||
626 | /* PS1 */ | 627 | /* PS1 */ |
627 | ps1_in = qpr[reg_in]; | 628 | ps1_in = qpr[reg_in]; |
@@ -658,7 +659,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
658 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) | 659 | if (!kvmppc_inst_is_paired_single(vcpu, inst)) |
659 | return EMULATE_FAIL; | 660 | return EMULATE_FAIL; |
660 | 661 | ||
661 | if (!(vcpu->arch.msr & MSR_FP)) { | 662 | if (!(vcpu->arch.shared->msr & MSR_FP)) { |
662 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); | 663 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); |
663 | return EMULATE_AGAIN; | 664 | return EMULATE_AGAIN; |
664 | } | 665 | } |
@@ -671,7 +672,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
671 | #ifdef DEBUG | 672 | #ifdef DEBUG |
672 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 673 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
673 | u32 f; | 674 | u32 f; |
674 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); | 675 | kvm_cvt_df(&vcpu->arch.fpr[i], &f); |
675 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", | 676 | dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", |
676 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); | 677 | i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); |
677 | } | 678 | } |
@@ -796,8 +797,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
796 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; | 797 | vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; |
797 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 798 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
798 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], | 799 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
799 | &vcpu->arch.qpr[ax_rd], | 800 | &vcpu->arch.qpr[ax_rd]); |
800 | &vcpu->arch.fpscr); | ||
801 | break; | 801 | break; |
802 | case OP_4X_PS_MERGE01: | 802 | case OP_4X_PS_MERGE01: |
803 | WARN_ON(rcomp); | 803 | WARN_ON(rcomp); |
@@ -808,19 +808,16 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
808 | WARN_ON(rcomp); | 808 | WARN_ON(rcomp); |
809 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 809 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
810 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], | 810 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
811 | &vcpu->arch.fpr[ax_rd], | 811 | &vcpu->arch.fpr[ax_rd]); |
812 | &vcpu->arch.fpscr); | ||
813 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ | 812 | /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ |
814 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], | 813 | kvm_cvt_df(&vcpu->arch.fpr[ax_rb], |
815 | &vcpu->arch.qpr[ax_rd], | 814 | &vcpu->arch.qpr[ax_rd]); |
816 | &vcpu->arch.fpscr); | ||
817 | break; | 815 | break; |
818 | case OP_4X_PS_MERGE11: | 816 | case OP_4X_PS_MERGE11: |
819 | WARN_ON(rcomp); | 817 | WARN_ON(rcomp); |
820 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ | 818 | /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ |
821 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], | 819 | kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], |
822 | &vcpu->arch.fpr[ax_rd], | 820 | &vcpu->arch.fpr[ax_rd]); |
823 | &vcpu->arch.fpscr); | ||
824 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; | 821 | vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; |
825 | break; | 822 | break; |
826 | } | 823 | } |
@@ -1255,7 +1252,7 @@ int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
1255 | #ifdef DEBUG | 1252 | #ifdef DEBUG |
1256 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { | 1253 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { |
1257 | u32 f; | 1254 | u32 f; |
1258 | kvm_cvt_df(&vcpu->arch.fpr[i], &f, &vcpu->arch.fpscr); | 1255 | kvm_cvt_df(&vcpu->arch.fpr[i], &f); |
1259 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); | 1256 | dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); |
1260 | } | 1257 | } |
1261 | #endif | 1258 | #endif |
diff --git a/arch/powerpc/kvm/book3s_rmhandlers.S b/arch/powerpc/kvm/book3s_rmhandlers.S index 506d5c316c96..1a1b34487e71 100644 --- a/arch/powerpc/kvm/book3s_rmhandlers.S +++ b/arch/powerpc/kvm/book3s_rmhandlers.S | |||
@@ -35,9 +35,7 @@ | |||
35 | 35 | ||
36 | #if defined(CONFIG_PPC_BOOK3S_64) | 36 | #if defined(CONFIG_PPC_BOOK3S_64) |
37 | 37 | ||
38 | #define LOAD_SHADOW_VCPU(reg) \ | 38 | #define LOAD_SHADOW_VCPU(reg) GET_PACA(reg) |
39 | mfspr reg, SPRN_SPRG_PACA | ||
40 | |||
41 | #define SHADOW_VCPU_OFF PACA_KVM_SVCPU | 39 | #define SHADOW_VCPU_OFF PACA_KVM_SVCPU |
42 | #define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) | 40 | #define MSR_NOIRQ MSR_KERNEL & ~(MSR_IR | MSR_DR) |
43 | #define FUNC(name) GLUE(.,name) | 41 | #define FUNC(name) GLUE(.,name) |
@@ -72,7 +70,7 @@ | |||
72 | .global kvmppc_trampoline_\intno | 70 | .global kvmppc_trampoline_\intno |
73 | kvmppc_trampoline_\intno: | 71 | kvmppc_trampoline_\intno: |
74 | 72 | ||
75 | mtspr SPRN_SPRG_SCRATCH0, r13 /* Save r13 */ | 73 | SET_SCRATCH0(r13) /* Save r13 */ |
76 | 74 | ||
77 | /* | 75 | /* |
78 | * First thing to do is to find out if we're coming | 76 | * First thing to do is to find out if we're coming |
@@ -91,7 +89,7 @@ kvmppc_trampoline_\intno: | |||
91 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) | 89 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
92 | mtcr r12 | 90 | mtcr r12 |
93 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) | 91 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
94 | mfspr r13, SPRN_SPRG_SCRATCH0 /* r13 = original r13 */ | 92 | GET_SCRATCH0(r13) /* r13 = original r13 */ |
95 | b kvmppc_resume_\intno /* Get back original handler */ | 93 | b kvmppc_resume_\intno /* Get back original handler */ |
96 | 94 | ||
97 | /* Now we know we're handling a KVM guest */ | 95 | /* Now we know we're handling a KVM guest */ |
@@ -114,6 +112,9 @@ INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_MACHINE_CHECK | |||
114 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE | 112 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_DATA_STORAGE |
115 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE | 113 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_INST_STORAGE |
116 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL | 114 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL |
115 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
116 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_EXTERNAL_HV | ||
117 | #endif | ||
117 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT | 118 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_ALIGNMENT |
118 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM | 119 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_PROGRAM |
119 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL | 120 | INTERRUPT_TRAMPOLINE BOOK3S_INTERRUPT_FP_UNAVAIL |
@@ -158,7 +159,7 @@ kvmppc_handler_skip_ins: | |||
158 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) | 159 | lwz r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
159 | mtcr r12 | 160 | mtcr r12 |
160 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) | 161 | PPC_LL r12, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
161 | mfspr r13, SPRN_SPRG_SCRATCH0 | 162 | GET_SCRATCH0(r13) |
162 | 163 | ||
163 | /* And get back into the code */ | 164 | /* And get back into the code */ |
164 | RFI | 165 | RFI |
@@ -202,8 +203,25 @@ _GLOBAL(kvmppc_rmcall) | |||
202 | 203 | ||
203 | #if defined(CONFIG_PPC_BOOK3S_32) | 204 | #if defined(CONFIG_PPC_BOOK3S_32) |
204 | #define STACK_LR INT_FRAME_SIZE+4 | 205 | #define STACK_LR INT_FRAME_SIZE+4 |
206 | |||
207 | /* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */ | ||
208 | #define MSR_EXT_START \ | ||
209 | PPC_STL r20, _NIP(r1); \ | ||
210 | mfmsr r20; \ | ||
211 | LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ | ||
212 | andc r3,r20,r3; /* Disable DR,EE */ \ | ||
213 | mtmsr r3; \ | ||
214 | sync | ||
215 | |||
216 | #define MSR_EXT_END \ | ||
217 | mtmsr r20; /* Enable DR,EE */ \ | ||
218 | sync; \ | ||
219 | PPC_LL r20, _NIP(r1) | ||
220 | |||
205 | #elif defined(CONFIG_PPC_BOOK3S_64) | 221 | #elif defined(CONFIG_PPC_BOOK3S_64) |
206 | #define STACK_LR _LINK | 222 | #define STACK_LR _LINK |
223 | #define MSR_EXT_START | ||
224 | #define MSR_EXT_END | ||
207 | #endif | 225 | #endif |
208 | 226 | ||
209 | /* | 227 | /* |
@@ -215,19 +233,12 @@ _GLOBAL(kvmppc_load_up_ ## what); \ | |||
215 | PPC_STLU r1, -INT_FRAME_SIZE(r1); \ | 233 | PPC_STLU r1, -INT_FRAME_SIZE(r1); \ |
216 | mflr r3; \ | 234 | mflr r3; \ |
217 | PPC_STL r3, STACK_LR(r1); \ | 235 | PPC_STL r3, STACK_LR(r1); \ |
218 | PPC_STL r20, _NIP(r1); \ | 236 | MSR_EXT_START; \ |
219 | mfmsr r20; \ | ||
220 | LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE); \ | ||
221 | andc r3,r20,r3; /* Disable DR,EE */ \ | ||
222 | mtmsr r3; \ | ||
223 | sync; \ | ||
224 | \ | 237 | \ |
225 | bl FUNC(load_up_ ## what); \ | 238 | bl FUNC(load_up_ ## what); \ |
226 | \ | 239 | \ |
227 | mtmsr r20; /* Enable DR,EE */ \ | 240 | MSR_EXT_END; \ |
228 | sync; \ | ||
229 | PPC_LL r3, STACK_LR(r1); \ | 241 | PPC_LL r3, STACK_LR(r1); \ |
230 | PPC_LL r20, _NIP(r1); \ | ||
231 | mtlr r3; \ | 242 | mtlr r3; \ |
232 | addi r1, r1, INT_FRAME_SIZE; \ | 243 | addi r1, r1, INT_FRAME_SIZE; \ |
233 | blr | 244 | blr |
@@ -242,10 +253,10 @@ define_load_up(vsx) | |||
242 | 253 | ||
243 | .global kvmppc_trampoline_lowmem | 254 | .global kvmppc_trampoline_lowmem |
244 | kvmppc_trampoline_lowmem: | 255 | kvmppc_trampoline_lowmem: |
245 | .long kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START | 256 | PPC_LONG kvmppc_handler_lowmem_trampoline - CONFIG_KERNEL_START |
246 | 257 | ||
247 | .global kvmppc_trampoline_enter | 258 | .global kvmppc_trampoline_enter |
248 | kvmppc_trampoline_enter: | 259 | kvmppc_trampoline_enter: |
249 | .long kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START | 260 | PPC_LONG kvmppc_handler_trampoline_enter - CONFIG_KERNEL_START |
250 | 261 | ||
251 | #include "book3s_segment.S" | 262 | #include "book3s_segment.S" |
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S index 7c52ed0b7051..451264274b8c 100644 --- a/arch/powerpc/kvm/book3s_segment.S +++ b/arch/powerpc/kvm/book3s_segment.S | |||
@@ -155,14 +155,20 @@ kvmppc_handler_trampoline_exit: | |||
155 | PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) | 155 | PPC_LL r2, (SHADOW_VCPU_OFF + SVCPU_HOST_R2)(r13) |
156 | 156 | ||
157 | /* Save guest PC and MSR */ | 157 | /* Save guest PC and MSR */ |
158 | mfsrr0 r3 | 158 | andi. r0,r12,0x2 |
159 | beq 1f | ||
160 | mfspr r3,SPRN_HSRR0 | ||
161 | mfspr r4,SPRN_HSRR1 | ||
162 | andi. r12,r12,0x3ffd | ||
163 | b 2f | ||
164 | 1: mfsrr0 r3 | ||
159 | mfsrr1 r4 | 165 | mfsrr1 r4 |
160 | 166 | 2: | |
161 | PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) | 167 | PPC_STL r3, (SHADOW_VCPU_OFF + SVCPU_PC)(r13) |
162 | PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) | 168 | PPC_STL r4, (SHADOW_VCPU_OFF + SVCPU_SHADOW_SRR1)(r13) |
163 | 169 | ||
164 | /* Get scratch'ed off registers */ | 170 | /* Get scratch'ed off registers */ |
165 | mfspr r9, SPRN_SPRG_SCRATCH0 | 171 | GET_SCRATCH0(r9) |
166 | PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) | 172 | PPC_LL r8, (SHADOW_VCPU_OFF + SVCPU_SCRATCH0)(r13) |
167 | lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) | 173 | lwz r7, (SHADOW_VCPU_OFF + SVCPU_SCRATCH1)(r13) |
168 | 174 | ||
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c index 8d4e35f5372c..8462b3a1c1c7 100644 --- a/arch/powerpc/kvm/booke.c +++ b/arch/powerpc/kvm/booke.c | |||
@@ -62,9 +62,10 @@ void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |||
62 | { | 62 | { |
63 | int i; | 63 | int i; |
64 | 64 | ||
65 | printk("pc: %08lx msr: %08lx\n", vcpu->arch.pc, vcpu->arch.msr); | 65 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); |
66 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); | 66 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
67 | printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1); | 67 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
68 | vcpu->arch.shared->srr1); | ||
68 | 69 | ||
69 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | 70 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); |
70 | 71 | ||
@@ -130,13 +131,19 @@ void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) | |||
130 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, | 131 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
131 | struct kvm_interrupt *irq) | 132 | struct kvm_interrupt *irq) |
132 | { | 133 | { |
133 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL); | 134 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; |
135 | |||
136 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | ||
137 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; | ||
138 | |||
139 | kvmppc_booke_queue_irqprio(vcpu, prio); | ||
134 | } | 140 | } |
135 | 141 | ||
136 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, | 142 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu, |
137 | struct kvm_interrupt *irq) | 143 | struct kvm_interrupt *irq) |
138 | { | 144 | { |
139 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); | 145 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); |
146 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); | ||
140 | } | 147 | } |
141 | 148 | ||
142 | /* Deliver the interrupt of the corresponding priority, if possible. */ | 149 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
@@ -146,6 +153,26 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
146 | int allowed = 0; | 153 | int allowed = 0; |
147 | ulong uninitialized_var(msr_mask); | 154 | ulong uninitialized_var(msr_mask); |
148 | bool update_esr = false, update_dear = false; | 155 | bool update_esr = false, update_dear = false; |
156 | ulong crit_raw = vcpu->arch.shared->critical; | ||
157 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | ||
158 | bool crit; | ||
159 | bool keep_irq = false; | ||
160 | |||
161 | /* Truncate crit indicators in 32 bit mode */ | ||
162 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
163 | crit_raw &= 0xffffffff; | ||
164 | crit_r1 &= 0xffffffff; | ||
165 | } | ||
166 | |||
167 | /* Critical section when crit == r1 */ | ||
168 | crit = (crit_raw == crit_r1); | ||
169 | /* ... and we're in supervisor mode */ | ||
170 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | ||
171 | |||
172 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { | ||
173 | priority = BOOKE_IRQPRIO_EXTERNAL; | ||
174 | keep_irq = true; | ||
175 | } | ||
149 | 176 | ||
150 | switch (priority) { | 177 | switch (priority) { |
151 | case BOOKE_IRQPRIO_DTLB_MISS: | 178 | case BOOKE_IRQPRIO_DTLB_MISS: |
@@ -169,36 +196,38 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
169 | break; | 196 | break; |
170 | case BOOKE_IRQPRIO_CRITICAL: | 197 | case BOOKE_IRQPRIO_CRITICAL: |
171 | case BOOKE_IRQPRIO_WATCHDOG: | 198 | case BOOKE_IRQPRIO_WATCHDOG: |
172 | allowed = vcpu->arch.msr & MSR_CE; | 199 | allowed = vcpu->arch.shared->msr & MSR_CE; |
173 | msr_mask = MSR_ME; | 200 | msr_mask = MSR_ME; |
174 | break; | 201 | break; |
175 | case BOOKE_IRQPRIO_MACHINE_CHECK: | 202 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
176 | allowed = vcpu->arch.msr & MSR_ME; | 203 | allowed = vcpu->arch.shared->msr & MSR_ME; |
177 | msr_mask = 0; | 204 | msr_mask = 0; |
178 | break; | 205 | break; |
179 | case BOOKE_IRQPRIO_EXTERNAL: | 206 | case BOOKE_IRQPRIO_EXTERNAL: |
180 | case BOOKE_IRQPRIO_DECREMENTER: | 207 | case BOOKE_IRQPRIO_DECREMENTER: |
181 | case BOOKE_IRQPRIO_FIT: | 208 | case BOOKE_IRQPRIO_FIT: |
182 | allowed = vcpu->arch.msr & MSR_EE; | 209 | allowed = vcpu->arch.shared->msr & MSR_EE; |
210 | allowed = allowed && !crit; | ||
183 | msr_mask = MSR_CE|MSR_ME|MSR_DE; | 211 | msr_mask = MSR_CE|MSR_ME|MSR_DE; |
184 | break; | 212 | break; |
185 | case BOOKE_IRQPRIO_DEBUG: | 213 | case BOOKE_IRQPRIO_DEBUG: |
186 | allowed = vcpu->arch.msr & MSR_DE; | 214 | allowed = vcpu->arch.shared->msr & MSR_DE; |
187 | msr_mask = MSR_ME; | 215 | msr_mask = MSR_ME; |
188 | break; | 216 | break; |
189 | } | 217 | } |
190 | 218 | ||
191 | if (allowed) { | 219 | if (allowed) { |
192 | vcpu->arch.srr0 = vcpu->arch.pc; | 220 | vcpu->arch.shared->srr0 = vcpu->arch.pc; |
193 | vcpu->arch.srr1 = vcpu->arch.msr; | 221 | vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; |
194 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; | 222 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
195 | if (update_esr == true) | 223 | if (update_esr == true) |
196 | vcpu->arch.esr = vcpu->arch.queued_esr; | 224 | vcpu->arch.esr = vcpu->arch.queued_esr; |
197 | if (update_dear == true) | 225 | if (update_dear == true) |
198 | vcpu->arch.dear = vcpu->arch.queued_dear; | 226 | vcpu->arch.shared->dar = vcpu->arch.queued_dear; |
199 | kvmppc_set_msr(vcpu, vcpu->arch.msr & msr_mask); | 227 | kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); |
200 | 228 | ||
201 | clear_bit(priority, &vcpu->arch.pending_exceptions); | 229 | if (!keep_irq) |
230 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
202 | } | 231 | } |
203 | 232 | ||
204 | return allowed; | 233 | return allowed; |
@@ -208,6 +237,7 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |||
208 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | 237 | void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) |
209 | { | 238 | { |
210 | unsigned long *pending = &vcpu->arch.pending_exceptions; | 239 | unsigned long *pending = &vcpu->arch.pending_exceptions; |
240 | unsigned long old_pending = vcpu->arch.pending_exceptions; | ||
211 | unsigned int priority; | 241 | unsigned int priority; |
212 | 242 | ||
213 | priority = __ffs(*pending); | 243 | priority = __ffs(*pending); |
@@ -219,6 +249,12 @@ void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu) | |||
219 | BITS_PER_BYTE * sizeof(*pending), | 249 | BITS_PER_BYTE * sizeof(*pending), |
220 | priority + 1); | 250 | priority + 1); |
221 | } | 251 | } |
252 | |||
253 | /* Tell the guest about our interrupt status */ | ||
254 | if (*pending) | ||
255 | vcpu->arch.shared->int_pending = 1; | ||
256 | else if (old_pending) | ||
257 | vcpu->arch.shared->int_pending = 0; | ||
222 | } | 258 | } |
223 | 259 | ||
224 | /** | 260 | /** |
@@ -265,7 +301,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
265 | break; | 301 | break; |
266 | 302 | ||
267 | case BOOKE_INTERRUPT_PROGRAM: | 303 | case BOOKE_INTERRUPT_PROGRAM: |
268 | if (vcpu->arch.msr & MSR_PR) { | 304 | if (vcpu->arch.shared->msr & MSR_PR) { |
269 | /* Program traps generated by user-level software must be handled | 305 | /* Program traps generated by user-level software must be handled |
270 | * by the guest kernel. */ | 306 | * by the guest kernel. */ |
271 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); | 307 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
@@ -337,7 +373,15 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
337 | break; | 373 | break; |
338 | 374 | ||
339 | case BOOKE_INTERRUPT_SYSCALL: | 375 | case BOOKE_INTERRUPT_SYSCALL: |
340 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | 376 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
377 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | ||
378 | /* KVM PV hypercalls */ | ||
379 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | ||
380 | r = RESUME_GUEST; | ||
381 | } else { | ||
382 | /* Guest syscalls */ | ||
383 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | ||
384 | } | ||
341 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); | 385 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
342 | r = RESUME_GUEST; | 386 | r = RESUME_GUEST; |
343 | break; | 387 | break; |
@@ -466,15 +510,19 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
466 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ | 510 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
467 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | 511 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) |
468 | { | 512 | { |
513 | int i; | ||
514 | |||
469 | vcpu->arch.pc = 0; | 515 | vcpu->arch.pc = 0; |
470 | vcpu->arch.msr = 0; | 516 | vcpu->arch.shared->msr = 0; |
471 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ | 517 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
472 | 518 | ||
473 | vcpu->arch.shadow_pid = 1; | 519 | vcpu->arch.shadow_pid = 1; |
474 | 520 | ||
475 | /* Eye-catching number so we know if the guest takes an interrupt | 521 | /* Eye-catching numbers so we know if the guest takes an interrupt |
476 | * before it's programmed its own IVPR. */ | 522 | * before it's programmed its own IVPR/IVORs. */ |
477 | vcpu->arch.ivpr = 0x55550000; | 523 | vcpu->arch.ivpr = 0x55550000; |
524 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) | ||
525 | vcpu->arch.ivor[i] = 0x7700 | i * 4; | ||
478 | 526 | ||
479 | kvmppc_init_timing_stats(vcpu); | 527 | kvmppc_init_timing_stats(vcpu); |
480 | 528 | ||
@@ -490,17 +538,18 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
490 | regs->ctr = vcpu->arch.ctr; | 538 | regs->ctr = vcpu->arch.ctr; |
491 | regs->lr = vcpu->arch.lr; | 539 | regs->lr = vcpu->arch.lr; |
492 | regs->xer = kvmppc_get_xer(vcpu); | 540 | regs->xer = kvmppc_get_xer(vcpu); |
493 | regs->msr = vcpu->arch.msr; | 541 | regs->msr = vcpu->arch.shared->msr; |
494 | regs->srr0 = vcpu->arch.srr0; | 542 | regs->srr0 = vcpu->arch.shared->srr0; |
495 | regs->srr1 = vcpu->arch.srr1; | 543 | regs->srr1 = vcpu->arch.shared->srr1; |
496 | regs->pid = vcpu->arch.pid; | 544 | regs->pid = vcpu->arch.pid; |
497 | regs->sprg0 = vcpu->arch.sprg0; | 545 | regs->sprg0 = vcpu->arch.shared->sprg0; |
498 | regs->sprg1 = vcpu->arch.sprg1; | 546 | regs->sprg1 = vcpu->arch.shared->sprg1; |
499 | regs->sprg2 = vcpu->arch.sprg2; | 547 | regs->sprg2 = vcpu->arch.shared->sprg2; |
500 | regs->sprg3 = vcpu->arch.sprg3; | 548 | regs->sprg3 = vcpu->arch.shared->sprg3; |
501 | regs->sprg5 = vcpu->arch.sprg4; | 549 | regs->sprg4 = vcpu->arch.sprg4; |
502 | regs->sprg6 = vcpu->arch.sprg5; | 550 | regs->sprg5 = vcpu->arch.sprg5; |
503 | regs->sprg7 = vcpu->arch.sprg6; | 551 | regs->sprg6 = vcpu->arch.sprg6; |
552 | regs->sprg7 = vcpu->arch.sprg7; | ||
504 | 553 | ||
505 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 554 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
506 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); | 555 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
@@ -518,15 +567,17 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
518 | vcpu->arch.lr = regs->lr; | 567 | vcpu->arch.lr = regs->lr; |
519 | kvmppc_set_xer(vcpu, regs->xer); | 568 | kvmppc_set_xer(vcpu, regs->xer); |
520 | kvmppc_set_msr(vcpu, regs->msr); | 569 | kvmppc_set_msr(vcpu, regs->msr); |
521 | vcpu->arch.srr0 = regs->srr0; | 570 | vcpu->arch.shared->srr0 = regs->srr0; |
522 | vcpu->arch.srr1 = regs->srr1; | 571 | vcpu->arch.shared->srr1 = regs->srr1; |
523 | vcpu->arch.sprg0 = regs->sprg0; | 572 | kvmppc_set_pid(vcpu, regs->pid); |
524 | vcpu->arch.sprg1 = regs->sprg1; | 573 | vcpu->arch.shared->sprg0 = regs->sprg0; |
525 | vcpu->arch.sprg2 = regs->sprg2; | 574 | vcpu->arch.shared->sprg1 = regs->sprg1; |
526 | vcpu->arch.sprg3 = regs->sprg3; | 575 | vcpu->arch.shared->sprg2 = regs->sprg2; |
527 | vcpu->arch.sprg5 = regs->sprg4; | 576 | vcpu->arch.shared->sprg3 = regs->sprg3; |
528 | vcpu->arch.sprg6 = regs->sprg5; | 577 | vcpu->arch.sprg4 = regs->sprg4; |
529 | vcpu->arch.sprg7 = regs->sprg6; | 578 | vcpu->arch.sprg5 = regs->sprg5; |
579 | vcpu->arch.sprg6 = regs->sprg6; | ||
580 | vcpu->arch.sprg7 = regs->sprg7; | ||
530 | 581 | ||
531 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | 582 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
532 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | 583 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); |
@@ -534,16 +585,165 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
534 | return 0; | 585 | return 0; |
535 | } | 586 | } |
536 | 587 | ||
588 | static void get_sregs_base(struct kvm_vcpu *vcpu, | ||
589 | struct kvm_sregs *sregs) | ||
590 | { | ||
591 | u64 tb = get_tb(); | ||
592 | |||
593 | sregs->u.e.features |= KVM_SREGS_E_BASE; | ||
594 | |||
595 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | ||
596 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | ||
597 | sregs->u.e.mcsr = vcpu->arch.mcsr; | ||
598 | sregs->u.e.esr = vcpu->arch.esr; | ||
599 | sregs->u.e.dear = vcpu->arch.shared->dar; | ||
600 | sregs->u.e.tsr = vcpu->arch.tsr; | ||
601 | sregs->u.e.tcr = vcpu->arch.tcr; | ||
602 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | ||
603 | sregs->u.e.tb = tb; | ||
604 | sregs->u.e.vrsave = vcpu->arch.vrsave; | ||
605 | } | ||
606 | |||
607 | static int set_sregs_base(struct kvm_vcpu *vcpu, | ||
608 | struct kvm_sregs *sregs) | ||
609 | { | ||
610 | if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) | ||
611 | return 0; | ||
612 | |||
613 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | ||
614 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | ||
615 | vcpu->arch.mcsr = sregs->u.e.mcsr; | ||
616 | vcpu->arch.esr = sregs->u.e.esr; | ||
617 | vcpu->arch.shared->dar = sregs->u.e.dear; | ||
618 | vcpu->arch.vrsave = sregs->u.e.vrsave; | ||
619 | vcpu->arch.tcr = sregs->u.e.tcr; | ||
620 | |||
621 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) | ||
622 | vcpu->arch.dec = sregs->u.e.dec; | ||
623 | |||
624 | kvmppc_emulate_dec(vcpu); | ||
625 | |||
626 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) { | ||
627 | /* | ||
628 | * FIXME: existing KVM timer handling is incomplete. | ||
629 | * TSR cannot be read by the guest, and its value in | ||
630 | * vcpu->arch is always zero. For now, just handle | ||
631 | * the case where the caller is trying to inject a | ||
632 | * decrementer interrupt. | ||
633 | */ | ||
634 | |||
635 | if ((sregs->u.e.tsr & TSR_DIS) && | ||
636 | (vcpu->arch.tcr & TCR_DIE)) | ||
637 | kvmppc_core_queue_dec(vcpu); | ||
638 | } | ||
639 | |||
640 | return 0; | ||
641 | } | ||
642 | |||
643 | static void get_sregs_arch206(struct kvm_vcpu *vcpu, | ||
644 | struct kvm_sregs *sregs) | ||
645 | { | ||
646 | sregs->u.e.features |= KVM_SREGS_E_ARCH206; | ||
647 | |||
648 | sregs->u.e.pir = 0; | ||
649 | sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; | ||
650 | sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; | ||
651 | sregs->u.e.decar = vcpu->arch.decar; | ||
652 | sregs->u.e.ivpr = vcpu->arch.ivpr; | ||
653 | } | ||
654 | |||
655 | static int set_sregs_arch206(struct kvm_vcpu *vcpu, | ||
656 | struct kvm_sregs *sregs) | ||
657 | { | ||
658 | if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) | ||
659 | return 0; | ||
660 | |||
661 | if (sregs->u.e.pir != 0) | ||
662 | return -EINVAL; | ||
663 | |||
664 | vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; | ||
665 | vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; | ||
666 | vcpu->arch.decar = sregs->u.e.decar; | ||
667 | vcpu->arch.ivpr = sregs->u.e.ivpr; | ||
668 | |||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
673 | { | ||
674 | sregs->u.e.features |= KVM_SREGS_E_IVOR; | ||
675 | |||
676 | sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | ||
677 | sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | ||
678 | sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | ||
679 | sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | ||
680 | sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | ||
681 | sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | ||
682 | sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | ||
683 | sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | ||
684 | sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | ||
685 | sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | ||
686 | sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | ||
687 | sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | ||
688 | sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | ||
689 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | ||
690 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | ||
691 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | ||
692 | } | ||
693 | |||
694 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
695 | { | ||
696 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | ||
697 | return 0; | ||
698 | |||
699 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; | ||
700 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; | ||
701 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; | ||
702 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; | ||
703 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; | ||
704 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; | ||
705 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; | ||
706 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; | ||
707 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; | ||
708 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; | ||
709 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; | ||
710 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; | ||
711 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; | ||
712 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; | ||
713 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; | ||
714 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; | ||
715 | |||
716 | return 0; | ||
717 | } | ||
718 | |||
537 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | 719 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
538 | struct kvm_sregs *sregs) | 720 | struct kvm_sregs *sregs) |
539 | { | 721 | { |
540 | return -ENOTSUPP; | 722 | sregs->pvr = vcpu->arch.pvr; |
723 | |||
724 | get_sregs_base(vcpu, sregs); | ||
725 | get_sregs_arch206(vcpu, sregs); | ||
726 | kvmppc_core_get_sregs(vcpu, sregs); | ||
727 | return 0; | ||
541 | } | 728 | } |
542 | 729 | ||
543 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | 730 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
544 | struct kvm_sregs *sregs) | 731 | struct kvm_sregs *sregs) |
545 | { | 732 | { |
546 | return -ENOTSUPP; | 733 | int ret; |
734 | |||
735 | if (vcpu->arch.pvr != sregs->pvr) | ||
736 | return -EINVAL; | ||
737 | |||
738 | ret = set_sregs_base(vcpu, sregs); | ||
739 | if (ret < 0) | ||
740 | return ret; | ||
741 | |||
742 | ret = set_sregs_arch206(vcpu, sregs); | ||
743 | if (ret < 0) | ||
744 | return ret; | ||
745 | |||
746 | return kvmppc_core_set_sregs(vcpu, sregs); | ||
547 | } | 747 | } |
548 | 748 | ||
549 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | 749 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h index d59bcca1f9d8..492bb7030358 100644 --- a/arch/powerpc/kvm/booke.h +++ b/arch/powerpc/kvm/booke.h | |||
@@ -46,7 +46,9 @@ | |||
46 | #define BOOKE_IRQPRIO_FIT 17 | 46 | #define BOOKE_IRQPRIO_FIT 17 |
47 | #define BOOKE_IRQPRIO_DECREMENTER 18 | 47 | #define BOOKE_IRQPRIO_DECREMENTER 18 |
48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 | 48 | #define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 |
49 | #define BOOKE_IRQPRIO_MAX 19 | 49 | /* Internal pseudo-irqprio for level triggered externals */ |
50 | #define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 | ||
51 | #define BOOKE_IRQPRIO_MAX 20 | ||
50 | 52 | ||
51 | extern unsigned long kvmppc_booke_handlers; | 53 | extern unsigned long kvmppc_booke_handlers; |
52 | 54 | ||
@@ -54,12 +56,12 @@ extern unsigned long kvmppc_booke_handlers; | |||
54 | * changing. */ | 56 | * changing. */ |
55 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) | 57 | static inline void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) |
56 | { | 58 | { |
57 | if ((new_msr & MSR_PR) != (vcpu->arch.msr & MSR_PR)) | 59 | if ((new_msr & MSR_PR) != (vcpu->arch.shared->msr & MSR_PR)) |
58 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); | 60 | kvmppc_mmu_priv_switch(vcpu, new_msr & MSR_PR); |
59 | 61 | ||
60 | vcpu->arch.msr = new_msr; | 62 | vcpu->arch.shared->msr = new_msr; |
61 | 63 | ||
62 | if (vcpu->arch.msr & MSR_WE) { | 64 | if (vcpu->arch.shared->msr & MSR_WE) { |
63 | kvm_vcpu_block(vcpu); | 65 | kvm_vcpu_block(vcpu); |
64 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); |
65 | }; | 67 | }; |
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c index cbc790ee1928..1260f5f24c0c 100644 --- a/arch/powerpc/kvm/booke_emulate.c +++ b/arch/powerpc/kvm/booke_emulate.c | |||
@@ -31,8 +31,8 @@ | |||
31 | 31 | ||
32 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) | 32 | static void kvmppc_emul_rfi(struct kvm_vcpu *vcpu) |
33 | { | 33 | { |
34 | vcpu->arch.pc = vcpu->arch.srr0; | 34 | vcpu->arch.pc = vcpu->arch.shared->srr0; |
35 | kvmppc_set_msr(vcpu, vcpu->arch.srr1); | 35 | kvmppc_set_msr(vcpu, vcpu->arch.shared->srr1); |
36 | } | 36 | } |
37 | 37 | ||
38 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | 38 | int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, |
@@ -62,7 +62,7 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
62 | 62 | ||
63 | case OP_31_XOP_MFMSR: | 63 | case OP_31_XOP_MFMSR: |
64 | rt = get_rt(inst); | 64 | rt = get_rt(inst); |
65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.msr); | 65 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); |
66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); | 66 | kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); |
67 | break; | 67 | break; |
68 | 68 | ||
@@ -74,13 +74,13 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, | |||
74 | 74 | ||
75 | case OP_31_XOP_WRTEE: | 75 | case OP_31_XOP_WRTEE: |
76 | rs = get_rs(inst); | 76 | rs = get_rs(inst); |
77 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 77 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
78 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); | 78 | | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); |
79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 79 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
80 | break; | 80 | break; |
81 | 81 | ||
82 | case OP_31_XOP_WRTEEI: | 82 | case OP_31_XOP_WRTEEI: |
83 | vcpu->arch.msr = (vcpu->arch.msr & ~MSR_EE) | 83 | vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) |
84 | | (inst & MSR_EE); | 84 | | (inst & MSR_EE); |
85 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); | 85 | kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); |
86 | break; | 86 | break; |
@@ -105,7 +105,7 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
105 | 105 | ||
106 | switch (sprn) { | 106 | switch (sprn) { |
107 | case SPRN_DEAR: | 107 | case SPRN_DEAR: |
108 | vcpu->arch.dear = spr_val; break; | 108 | vcpu->arch.shared->dar = spr_val; break; |
109 | case SPRN_ESR: | 109 | case SPRN_ESR: |
110 | vcpu->arch.esr = spr_val; break; | 110 | vcpu->arch.esr = spr_val; break; |
111 | case SPRN_DBCR0: | 111 | case SPRN_DBCR0: |
@@ -200,7 +200,7 @@ int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
200 | case SPRN_IVPR: | 200 | case SPRN_IVPR: |
201 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; | 201 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; |
202 | case SPRN_DEAR: | 202 | case SPRN_DEAR: |
203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dear); break; | 203 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; |
204 | case SPRN_ESR: | 204 | case SPRN_ESR: |
205 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; | 205 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.esr); break; |
206 | case SPRN_DBCR0: | 206 | case SPRN_DBCR0: |
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S index 380a78cf484d..b58ccae95904 100644 --- a/arch/powerpc/kvm/booke_interrupts.S +++ b/arch/powerpc/kvm/booke_interrupts.S | |||
@@ -380,7 +380,6 @@ lightweight_exit: | |||
380 | * because host interrupt handlers would get confused. */ | 380 | * because host interrupt handlers would get confused. */ |
381 | lwz r1, VCPU_GPR(r1)(r4) | 381 | lwz r1, VCPU_GPR(r1)(r4) |
382 | 382 | ||
383 | /* XXX handle USPRG0 */ | ||
384 | /* Host interrupt handlers may have clobbered these guest-readable | 383 | /* Host interrupt handlers may have clobbered these guest-readable |
385 | * SPRGs, so we need to reload them here with the guest's values. */ | 384 | * SPRGs, so we need to reload them here with the guest's values. */ |
386 | lwz r3, VCPU_SPRG4(r4) | 385 | lwz r3, VCPU_SPRG4(r4) |
@@ -415,7 +414,8 @@ lightweight_exit: | |||
415 | lwz r8, VCPU_GPR(r8)(r4) | 414 | lwz r8, VCPU_GPR(r8)(r4) |
416 | lwz r3, VCPU_PC(r4) | 415 | lwz r3, VCPU_PC(r4) |
417 | mtsrr0 r3 | 416 | mtsrr0 r3 |
418 | lwz r3, VCPU_MSR(r4) | 417 | lwz r3, VCPU_SHARED(r4) |
418 | lwz r3, (VCPU_SHARED_MSR + 4)(r3) | ||
419 | oris r3, r3, KVMPPC_MSR_MASK@h | 419 | oris r3, r3, KVMPPC_MSR_MASK@h |
420 | ori r3, r3, KVMPPC_MSR_MASK@l | 420 | ori r3, r3, KVMPPC_MSR_MASK@l |
421 | mtsrr1 r3 | 421 | mtsrr1 r3 |
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c index e8a00b0c4449..318dbc61ba44 100644 --- a/arch/powerpc/kvm/e500.c +++ b/arch/powerpc/kvm/e500.c | |||
@@ -63,6 +63,7 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) | |||
63 | 63 | ||
64 | /* Registers init */ | 64 | /* Registers init */ |
65 | vcpu->arch.pvr = mfspr(SPRN_PVR); | 65 | vcpu->arch.pvr = mfspr(SPRN_PVR); |
66 | vcpu_e500->svr = mfspr(SPRN_SVR); | ||
66 | 67 | ||
67 | /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ | 68 | /* Since booke kvm only support one core, update all vcpus' PIR to 0 */ |
68 | vcpu->vcpu_id = 0; | 69 | vcpu->vcpu_id = 0; |
@@ -96,6 +97,81 @@ int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, | |||
96 | return 0; | 97 | return 0; |
97 | } | 98 | } |
98 | 99 | ||
100 | void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
101 | { | ||
102 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
103 | |||
104 | sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_SPE | | ||
105 | KVM_SREGS_E_PM; | ||
106 | sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL; | ||
107 | |||
108 | sregs->u.e.impl.fsl.features = 0; | ||
109 | sregs->u.e.impl.fsl.svr = vcpu_e500->svr; | ||
110 | sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; | ||
111 | sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; | ||
112 | |||
113 | sregs->u.e.mas0 = vcpu_e500->mas0; | ||
114 | sregs->u.e.mas1 = vcpu_e500->mas1; | ||
115 | sregs->u.e.mas2 = vcpu_e500->mas2; | ||
116 | sregs->u.e.mas7_3 = ((u64)vcpu_e500->mas7 << 32) | vcpu_e500->mas3; | ||
117 | sregs->u.e.mas4 = vcpu_e500->mas4; | ||
118 | sregs->u.e.mas6 = vcpu_e500->mas6; | ||
119 | |||
120 | sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG); | ||
121 | sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg; | ||
122 | sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg; | ||
123 | sregs->u.e.tlbcfg[2] = 0; | ||
124 | sregs->u.e.tlbcfg[3] = 0; | ||
125 | |||
126 | sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; | ||
127 | sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; | ||
128 | sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; | ||
129 | sregs->u.e.ivor_high[3] = | ||
130 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; | ||
131 | |||
132 | kvmppc_get_sregs_ivor(vcpu, sregs); | ||
133 | } | ||
134 | |||
135 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | ||
136 | { | ||
137 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
138 | |||
139 | if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { | ||
140 | vcpu_e500->svr = sregs->u.e.impl.fsl.svr; | ||
141 | vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0; | ||
142 | vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; | ||
143 | } | ||
144 | |||
145 | if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { | ||
146 | vcpu_e500->mas0 = sregs->u.e.mas0; | ||
147 | vcpu_e500->mas1 = sregs->u.e.mas1; | ||
148 | vcpu_e500->mas2 = sregs->u.e.mas2; | ||
149 | vcpu_e500->mas7 = sregs->u.e.mas7_3 >> 32; | ||
150 | vcpu_e500->mas3 = (u32)sregs->u.e.mas7_3; | ||
151 | vcpu_e500->mas4 = sregs->u.e.mas4; | ||
152 | vcpu_e500->mas6 = sregs->u.e.mas6; | ||
153 | } | ||
154 | |||
155 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | ||
156 | return 0; | ||
157 | |||
158 | if (sregs->u.e.features & KVM_SREGS_E_SPE) { | ||
159 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = | ||
160 | sregs->u.e.ivor_high[0]; | ||
161 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = | ||
162 | sregs->u.e.ivor_high[1]; | ||
163 | vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = | ||
164 | sregs->u.e.ivor_high[2]; | ||
165 | } | ||
166 | |||
167 | if (sregs->u.e.features & KVM_SREGS_E_PM) { | ||
168 | vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = | ||
169 | sregs->u.e.ivor_high[3]; | ||
170 | } | ||
171 | |||
172 | return kvmppc_set_sregs_ivor(vcpu, sregs); | ||
173 | } | ||
174 | |||
99 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | 175 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) |
100 | { | 176 | { |
101 | struct kvmppc_vcpu_e500 *vcpu_e500; | 177 | struct kvmppc_vcpu_e500 *vcpu_e500; |
@@ -117,8 +193,14 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |||
117 | if (err) | 193 | if (err) |
118 | goto uninit_vcpu; | 194 | goto uninit_vcpu; |
119 | 195 | ||
196 | vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); | ||
197 | if (!vcpu->arch.shared) | ||
198 | goto uninit_tlb; | ||
199 | |||
120 | return vcpu; | 200 | return vcpu; |
121 | 201 | ||
202 | uninit_tlb: | ||
203 | kvmppc_e500_tlb_uninit(vcpu_e500); | ||
122 | uninit_vcpu: | 204 | uninit_vcpu: |
123 | kvm_vcpu_uninit(vcpu); | 205 | kvm_vcpu_uninit(vcpu); |
124 | free_vcpu: | 206 | free_vcpu: |
@@ -131,8 +213,9 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |||
131 | { | 213 | { |
132 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | 214 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); |
133 | 215 | ||
134 | kvmppc_e500_tlb_uninit(vcpu_e500); | 216 | free_page((unsigned long)vcpu->arch.shared); |
135 | kvm_vcpu_uninit(vcpu); | 217 | kvm_vcpu_uninit(vcpu); |
218 | kvmppc_e500_tlb_uninit(vcpu_e500); | ||
136 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); | 219 | kmem_cache_free(kvm_vcpu_cache, vcpu_e500); |
137 | } | 220 | } |
138 | 221 | ||
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c index 8e3edfbc9634..69cd665a0caf 100644 --- a/arch/powerpc/kvm/e500_emulate.c +++ b/arch/powerpc/kvm/e500_emulate.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: Yu Liu, <yu.liu@freescale.com> | 4 | * Author: Yu Liu, <yu.liu@freescale.com> |
5 | * | 5 | * |
@@ -78,8 +78,7 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) | |||
78 | 78 | ||
79 | switch (sprn) { | 79 | switch (sprn) { |
80 | case SPRN_PID: | 80 | case SPRN_PID: |
81 | vcpu_e500->pid[0] = vcpu->arch.shadow_pid = | 81 | kvmppc_set_pid(vcpu, spr_val); |
82 | vcpu->arch.pid = spr_val; | ||
83 | break; | 82 | break; |
84 | case SPRN_PID1: | 83 | case SPRN_PID1: |
85 | vcpu_e500->pid[1] = spr_val; break; | 84 | vcpu_e500->pid[1] = spr_val; break; |
@@ -175,6 +174,8 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) | |||
175 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; | 174 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; |
176 | case SPRN_HID1: | 175 | case SPRN_HID1: |
177 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; | 176 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; |
177 | case SPRN_SVR: | ||
178 | kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; | ||
178 | 179 | ||
179 | case SPRN_MMUCSR0: | 180 | case SPRN_MMUCSR0: |
180 | kvmppc_set_gpr(vcpu, rt, 0); break; | 181 | kvmppc_set_gpr(vcpu, rt, 0); break; |
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c index 21011e12caeb..b18fe353397d 100644 --- a/arch/powerpc/kvm/e500_tlb.c +++ b/arch/powerpc/kvm/e500_tlb.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2008 Freescale Semiconductor, Inc. All rights reserved. | 2 | * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. |
3 | * | 3 | * |
4 | * Author: Yu Liu, yu.liu@freescale.com | 4 | * Author: Yu Liu, yu.liu@freescale.com |
5 | * | 5 | * |
@@ -24,6 +24,7 @@ | |||
24 | #include "../mm/mmu_decl.h" | 24 | #include "../mm/mmu_decl.h" |
25 | #include "e500_tlb.h" | 25 | #include "e500_tlb.h" |
26 | #include "trace.h" | 26 | #include "trace.h" |
27 | #include "timing.h" | ||
27 | 28 | ||
28 | #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) | 29 | #define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1) |
29 | 30 | ||
@@ -226,8 +227,7 @@ static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
226 | 227 | ||
227 | kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); | 228 | kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel); |
228 | stlbe->mas1 = 0; | 229 | stlbe->mas1 = 0; |
229 | trace_kvm_stlb_inval(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, | 230 | trace_kvm_stlb_inval(index_of(tlbsel, esel)); |
230 | stlbe->mas3, stlbe->mas7); | ||
231 | } | 231 | } |
232 | 232 | ||
233 | static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, | 233 | static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500, |
@@ -298,7 +298,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
298 | /* Get reference to new page. */ | 298 | /* Get reference to new page. */ |
299 | new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); | 299 | new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn); |
300 | if (is_error_page(new_page)) { | 300 | if (is_error_page(new_page)) { |
301 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", gfn); | 301 | printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n", |
302 | (long)gfn); | ||
302 | kvm_release_page_clean(new_page); | 303 | kvm_release_page_clean(new_page); |
303 | return; | 304 | return; |
304 | } | 305 | } |
@@ -314,10 +315,10 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, | |||
314 | | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; | 315 | | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID; |
315 | stlbe->mas2 = (gvaddr & MAS2_EPN) | 316 | stlbe->mas2 = (gvaddr & MAS2_EPN) |
316 | | e500_shadow_mas2_attrib(gtlbe->mas2, | 317 | | e500_shadow_mas2_attrib(gtlbe->mas2, |
317 | vcpu_e500->vcpu.arch.msr & MSR_PR); | 318 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); |
318 | stlbe->mas3 = (hpaddr & MAS3_RPN) | 319 | stlbe->mas3 = (hpaddr & MAS3_RPN) |
319 | | e500_shadow_mas3_attrib(gtlbe->mas3, | 320 | | e500_shadow_mas3_attrib(gtlbe->mas3, |
320 | vcpu_e500->vcpu.arch.msr & MSR_PR); | 321 | vcpu_e500->vcpu.arch.shared->msr & MSR_PR); |
321 | stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; | 322 | stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN; |
322 | 323 | ||
323 | trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, | 324 | trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2, |
@@ -506,6 +507,7 @@ int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb) | |||
506 | vcpu_e500->mas7 = 0; | 507 | vcpu_e500->mas7 = 0; |
507 | } | 508 | } |
508 | 509 | ||
510 | kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS); | ||
509 | return EMULATE_DONE; | 511 | return EMULATE_DONE; |
510 | } | 512 | } |
511 | 513 | ||
@@ -571,33 +573,34 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) | |||
571 | write_host_tlbe(vcpu_e500, stlbsel, sesel); | 573 | write_host_tlbe(vcpu_e500, stlbsel, sesel); |
572 | } | 574 | } |
573 | 575 | ||
576 | kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS); | ||
574 | return EMULATE_DONE; | 577 | return EMULATE_DONE; |
575 | } | 578 | } |
576 | 579 | ||
577 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 580 | int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
578 | { | 581 | { |
579 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 582 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
580 | 583 | ||
581 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); | 584 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); |
582 | } | 585 | } |
583 | 586 | ||
584 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) | 587 | int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) |
585 | { | 588 | { |
586 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 589 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
587 | 590 | ||
588 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); | 591 | return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as); |
589 | } | 592 | } |
590 | 593 | ||
591 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) | 594 | void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu) |
592 | { | 595 | { |
593 | unsigned int as = !!(vcpu->arch.msr & MSR_IS); | 596 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); |
594 | 597 | ||
595 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); | 598 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as); |
596 | } | 599 | } |
597 | 600 | ||
598 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) | 601 | void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu) |
599 | { | 602 | { |
600 | unsigned int as = !!(vcpu->arch.msr & MSR_DS); | 603 | unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS); |
601 | 604 | ||
602 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); | 605 | kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as); |
603 | } | 606 | } |
@@ -672,6 +675,14 @@ int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu, | |||
672 | return -1; | 675 | return -1; |
673 | } | 676 | } |
674 | 677 | ||
678 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid) | ||
679 | { | ||
680 | struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); | ||
681 | |||
682 | vcpu_e500->pid[0] = vcpu->arch.shadow_pid = | ||
683 | vcpu->arch.pid = pid; | ||
684 | } | ||
685 | |||
675 | void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) | 686 | void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500) |
676 | { | 687 | { |
677 | struct tlbe *tlbe; | 688 | struct tlbe *tlbe; |
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h index d28e3010a5e2..458946b4775d 100644 --- a/arch/powerpc/kvm/e500_tlb.h +++ b/arch/powerpc/kvm/e500_tlb.h | |||
@@ -171,7 +171,7 @@ static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu, | |||
171 | 171 | ||
172 | /* Does it match current guest AS? */ | 172 | /* Does it match current guest AS? */ |
173 | /* XXX what about IS != DS? */ | 173 | /* XXX what about IS != DS? */ |
174 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.msr & MSR_IS)) | 174 | if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS)) |
175 | return 0; | 175 | return 0; |
176 | 176 | ||
177 | gpa = get_tlb_raddr(tlbe); | 177 | gpa = get_tlb_raddr(tlbe); |
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c index 4568ec386c2a..141dce3c6810 100644 --- a/arch/powerpc/kvm/emulate.c +++ b/arch/powerpc/kvm/emulate.c | |||
@@ -114,6 +114,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) | |||
114 | } | 114 | } |
115 | } | 115 | } |
116 | 116 | ||
117 | u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb) | ||
118 | { | ||
119 | u64 jd = tb - vcpu->arch.dec_jiffies; | ||
120 | return vcpu->arch.dec - jd; | ||
121 | } | ||
122 | |||
117 | /* XXX to do: | 123 | /* XXX to do: |
118 | * lhax | 124 | * lhax |
119 | * lhaux | 125 | * lhaux |
@@ -145,7 +151,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
145 | /* this default type might be overwritten by subcategories */ | 151 | /* this default type might be overwritten by subcategories */ |
146 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); | 152 | kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); |
147 | 153 | ||
148 | pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); | 154 | pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); |
149 | 155 | ||
150 | switch (get_op(inst)) { | 156 | switch (get_op(inst)) { |
151 | case OP_TRAP: | 157 | case OP_TRAP: |
@@ -242,9 +248,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
242 | 248 | ||
243 | switch (sprn) { | 249 | switch (sprn) { |
244 | case SPRN_SRR0: | 250 | case SPRN_SRR0: |
245 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; | 251 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); |
252 | break; | ||
246 | case SPRN_SRR1: | 253 | case SPRN_SRR1: |
247 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; | 254 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); |
255 | break; | ||
248 | case SPRN_PVR: | 256 | case SPRN_PVR: |
249 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; | 257 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; |
250 | case SPRN_PIR: | 258 | case SPRN_PIR: |
@@ -261,23 +269,24 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
261 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; | 269 | kvmppc_set_gpr(vcpu, rt, get_tb()); break; |
262 | 270 | ||
263 | case SPRN_SPRG0: | 271 | case SPRN_SPRG0: |
264 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; | 272 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); |
273 | break; | ||
265 | case SPRN_SPRG1: | 274 | case SPRN_SPRG1: |
266 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; | 275 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); |
276 | break; | ||
267 | case SPRN_SPRG2: | 277 | case SPRN_SPRG2: |
268 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; | 278 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); |
279 | break; | ||
269 | case SPRN_SPRG3: | 280 | case SPRN_SPRG3: |
270 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; | 281 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); |
282 | break; | ||
271 | /* Note: SPRG4-7 are user-readable, so we don't get | 283 | /* Note: SPRG4-7 are user-readable, so we don't get |
272 | * a trap. */ | 284 | * a trap. */ |
273 | 285 | ||
274 | case SPRN_DEC: | 286 | case SPRN_DEC: |
275 | { | 287 | { |
276 | u64 jd = get_tb() - vcpu->arch.dec_jiffies; | 288 | kvmppc_set_gpr(vcpu, rt, |
277 | kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); | 289 | kvmppc_get_dec(vcpu, get_tb())); |
278 | pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", | ||
279 | vcpu->arch.dec, jd, | ||
280 | kvmppc_get_gpr(vcpu, rt)); | ||
281 | break; | 290 | break; |
282 | } | 291 | } |
283 | default: | 292 | default: |
@@ -288,6 +297,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
288 | } | 297 | } |
289 | break; | 298 | break; |
290 | } | 299 | } |
300 | kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); | ||
291 | break; | 301 | break; |
292 | 302 | ||
293 | case OP_31_XOP_STHX: | 303 | case OP_31_XOP_STHX: |
@@ -320,9 +330,11 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
320 | rs = get_rs(inst); | 330 | rs = get_rs(inst); |
321 | switch (sprn) { | 331 | switch (sprn) { |
322 | case SPRN_SRR0: | 332 | case SPRN_SRR0: |
323 | vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; | 333 | vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); |
334 | break; | ||
324 | case SPRN_SRR1: | 335 | case SPRN_SRR1: |
325 | vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; | 336 | vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); |
337 | break; | ||
326 | 338 | ||
327 | /* XXX We need to context-switch the timebase for | 339 | /* XXX We need to context-switch the timebase for |
328 | * watchdog and FIT. */ | 340 | * watchdog and FIT. */ |
@@ -337,13 +349,17 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
337 | break; | 349 | break; |
338 | 350 | ||
339 | case SPRN_SPRG0: | 351 | case SPRN_SPRG0: |
340 | vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; | 352 | vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); |
353 | break; | ||
341 | case SPRN_SPRG1: | 354 | case SPRN_SPRG1: |
342 | vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; | 355 | vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); |
356 | break; | ||
343 | case SPRN_SPRG2: | 357 | case SPRN_SPRG2: |
344 | vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; | 358 | vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); |
359 | break; | ||
345 | case SPRN_SPRG3: | 360 | case SPRN_SPRG3: |
346 | vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; | 361 | vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); |
362 | break; | ||
347 | 363 | ||
348 | default: | 364 | default: |
349 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); | 365 | emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); |
@@ -351,6 +367,7 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) | |||
351 | printk("mtspr: unknown spr %x\n", sprn); | 367 | printk("mtspr: unknown spr %x\n", sprn); |
352 | break; | 368 | break; |
353 | } | 369 | } |
370 | kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); | ||
354 | break; | 371 | break; |
355 | 372 | ||
356 | case OP_31_XOP_DCBI: | 373 | case OP_31_XOP_DCBI: |
diff --git a/arch/powerpc/kvm/fpu.S b/arch/powerpc/kvm/fpu.S index cb34bbe16113..bf68d597549e 100644 --- a/arch/powerpc/kvm/fpu.S +++ b/arch/powerpc/kvm/fpu.S | |||
@@ -273,19 +273,11 @@ FPD_THREE_IN(fnmsub) | |||
273 | FPD_THREE_IN(fnmadd) | 273 | FPD_THREE_IN(fnmadd) |
274 | 274 | ||
275 | _GLOBAL(kvm_cvt_fd) | 275 | _GLOBAL(kvm_cvt_fd) |
276 | lfd 0,0(r5) /* load up fpscr value */ | ||
277 | MTFSF_L(0) | ||
278 | lfs 0,0(r3) | 276 | lfs 0,0(r3) |
279 | stfd 0,0(r4) | 277 | stfd 0,0(r4) |
280 | mffs 0 | ||
281 | stfd 0,0(r5) /* save new fpscr value */ | ||
282 | blr | 278 | blr |
283 | 279 | ||
284 | _GLOBAL(kvm_cvt_df) | 280 | _GLOBAL(kvm_cvt_df) |
285 | lfd 0,0(r5) /* load up fpscr value */ | ||
286 | MTFSF_L(0) | ||
287 | lfd 0,0(r3) | 281 | lfd 0,0(r3) |
288 | stfs 0,0(r4) | 282 | stfs 0,0(r4) |
289 | mffs 0 | ||
290 | stfd 0,0(r5) /* save new fpscr value */ | ||
291 | blr | 283 | blr |
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c index 72a4ad86ee91..616dd516ca1f 100644 --- a/arch/powerpc/kvm/powerpc.c +++ b/arch/powerpc/kvm/powerpc.c | |||
@@ -38,9 +38,56 @@ | |||
38 | 38 | ||
39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | 39 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
40 | { | 40 | { |
41 | return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions); | 41 | return !(v->arch.shared->msr & MSR_WE) || |
42 | !!(v->arch.pending_exceptions); | ||
42 | } | 43 | } |
43 | 44 | ||
45 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | ||
46 | { | ||
47 | int nr = kvmppc_get_gpr(vcpu, 11); | ||
48 | int r; | ||
49 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | ||
50 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | ||
51 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | ||
52 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | ||
53 | unsigned long r2 = 0; | ||
54 | |||
55 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | ||
56 | /* 32 bit mode */ | ||
57 | param1 &= 0xffffffff; | ||
58 | param2 &= 0xffffffff; | ||
59 | param3 &= 0xffffffff; | ||
60 | param4 &= 0xffffffff; | ||
61 | } | ||
62 | |||
63 | switch (nr) { | ||
64 | case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: | ||
65 | { | ||
66 | vcpu->arch.magic_page_pa = param1; | ||
67 | vcpu->arch.magic_page_ea = param2; | ||
68 | |||
69 | r2 = KVM_MAGIC_FEAT_SR; | ||
70 | |||
71 | r = HC_EV_SUCCESS; | ||
72 | break; | ||
73 | } | ||
74 | case HC_VENDOR_KVM | KVM_HC_FEATURES: | ||
75 | r = HC_EV_SUCCESS; | ||
76 | #if defined(CONFIG_PPC_BOOK3S) /* XXX Missing magic page on BookE */ | ||
77 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | ||
78 | #endif | ||
79 | |||
80 | /* Second return value is in r4 */ | ||
81 | break; | ||
82 | default: | ||
83 | r = HC_EV_UNIMPLEMENTED; | ||
84 | break; | ||
85 | } | ||
86 | |||
87 | kvmppc_set_gpr(vcpu, 4, r2); | ||
88 | |||
89 | return r; | ||
90 | } | ||
44 | 91 | ||
45 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | 92 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
46 | { | 93 | { |
@@ -98,18 +145,12 @@ void kvm_arch_check_processor_compat(void *rtn) | |||
98 | *(int *)rtn = kvmppc_core_check_processor_compat(); | 145 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
99 | } | 146 | } |
100 | 147 | ||
101 | struct kvm *kvm_arch_create_vm(void) | 148 | int kvm_arch_init_vm(struct kvm *kvm) |
102 | { | 149 | { |
103 | struct kvm *kvm; | 150 | return 0; |
104 | |||
105 | kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); | ||
106 | if (!kvm) | ||
107 | return ERR_PTR(-ENOMEM); | ||
108 | |||
109 | return kvm; | ||
110 | } | 151 | } |
111 | 152 | ||
112 | static void kvmppc_free_vcpus(struct kvm *kvm) | 153 | void kvm_arch_destroy_vm(struct kvm *kvm) |
113 | { | 154 | { |
114 | unsigned int i; | 155 | unsigned int i; |
115 | struct kvm_vcpu *vcpu; | 156 | struct kvm_vcpu *vcpu; |
@@ -129,24 +170,22 @@ void kvm_arch_sync_events(struct kvm *kvm) | |||
129 | { | 170 | { |
130 | } | 171 | } |
131 | 172 | ||
132 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
133 | { | ||
134 | kvmppc_free_vcpus(kvm); | ||
135 | kvm_free_physmem(kvm); | ||
136 | cleanup_srcu_struct(&kvm->srcu); | ||
137 | kfree(kvm); | ||
138 | } | ||
139 | |||
140 | int kvm_dev_ioctl_check_extension(long ext) | 173 | int kvm_dev_ioctl_check_extension(long ext) |
141 | { | 174 | { |
142 | int r; | 175 | int r; |
143 | 176 | ||
144 | switch (ext) { | 177 | switch (ext) { |
178 | #ifdef CONFIG_BOOKE | ||
179 | case KVM_CAP_PPC_BOOKE_SREGS: | ||
180 | #else | ||
145 | case KVM_CAP_PPC_SEGSTATE: | 181 | case KVM_CAP_PPC_SEGSTATE: |
182 | #endif | ||
146 | case KVM_CAP_PPC_PAIRED_SINGLES: | 183 | case KVM_CAP_PPC_PAIRED_SINGLES: |
147 | case KVM_CAP_PPC_UNSET_IRQ: | 184 | case KVM_CAP_PPC_UNSET_IRQ: |
185 | case KVM_CAP_PPC_IRQ_LEVEL: | ||
148 | case KVM_CAP_ENABLE_CAP: | 186 | case KVM_CAP_ENABLE_CAP: |
149 | case KVM_CAP_PPC_OSI: | 187 | case KVM_CAP_PPC_OSI: |
188 | case KVM_CAP_PPC_GET_PVINFO: | ||
150 | r = 1; | 189 | r = 1; |
151 | break; | 190 | break; |
152 | case KVM_CAP_COALESCED_MMIO: | 191 | case KVM_CAP_COALESCED_MMIO: |
@@ -249,6 +288,10 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |||
249 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); | 288 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); |
250 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | 289 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
251 | 290 | ||
291 | #ifdef CONFIG_KVM_EXIT_TIMING | ||
292 | mutex_init(&vcpu->arch.exit_timing_lock); | ||
293 | #endif | ||
294 | |||
252 | return 0; | 295 | return 0; |
253 | } | 296 | } |
254 | 297 | ||
@@ -259,12 +302,25 @@ void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |||
259 | 302 | ||
260 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | 303 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
261 | { | 304 | { |
305 | #ifdef CONFIG_BOOKE | ||
306 | /* | ||
307 | * vrsave (formerly usprg0) isn't used by Linux, but may | ||
308 | * be used by the guest. | ||
309 | * | ||
310 | * On non-booke this is associated with Altivec and | ||
311 | * is handled by code in book3s.c. | ||
312 | */ | ||
313 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | ||
314 | #endif | ||
262 | kvmppc_core_vcpu_load(vcpu, cpu); | 315 | kvmppc_core_vcpu_load(vcpu, cpu); |
263 | } | 316 | } |
264 | 317 | ||
265 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | 318 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
266 | { | 319 | { |
267 | kvmppc_core_vcpu_put(vcpu); | 320 | kvmppc_core_vcpu_put(vcpu); |
321 | #ifdef CONFIG_BOOKE | ||
322 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | ||
323 | #endif | ||
268 | } | 324 | } |
269 | 325 | ||
270 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | 326 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
@@ -534,16 +590,54 @@ out: | |||
534 | return r; | 590 | return r; |
535 | } | 591 | } |
536 | 592 | ||
593 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) | ||
594 | { | ||
595 | u32 inst_lis = 0x3c000000; | ||
596 | u32 inst_ori = 0x60000000; | ||
597 | u32 inst_nop = 0x60000000; | ||
598 | u32 inst_sc = 0x44000002; | ||
599 | u32 inst_imm_mask = 0xffff; | ||
600 | |||
601 | /* | ||
602 | * The hypercall to get into KVM from within guest context is as | ||
603 | * follows: | ||
604 | * | ||
605 | * lis r0, r0, KVM_SC_MAGIC_R0@h | ||
606 | * ori r0, KVM_SC_MAGIC_R0@l | ||
607 | * sc | ||
608 | * nop | ||
609 | */ | ||
610 | pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); | ||
611 | pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); | ||
612 | pvinfo->hcall[2] = inst_sc; | ||
613 | pvinfo->hcall[3] = inst_nop; | ||
614 | |||
615 | return 0; | ||
616 | } | ||
617 | |||
537 | long kvm_arch_vm_ioctl(struct file *filp, | 618 | long kvm_arch_vm_ioctl(struct file *filp, |
538 | unsigned int ioctl, unsigned long arg) | 619 | unsigned int ioctl, unsigned long arg) |
539 | { | 620 | { |
621 | void __user *argp = (void __user *)arg; | ||
540 | long r; | 622 | long r; |
541 | 623 | ||
542 | switch (ioctl) { | 624 | switch (ioctl) { |
625 | case KVM_PPC_GET_PVINFO: { | ||
626 | struct kvm_ppc_pvinfo pvinfo; | ||
627 | memset(&pvinfo, 0, sizeof(pvinfo)); | ||
628 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); | ||
629 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | ||
630 | r = -EFAULT; | ||
631 | goto out; | ||
632 | } | ||
633 | |||
634 | break; | ||
635 | } | ||
543 | default: | 636 | default: |
544 | r = -ENOTTY; | 637 | r = -ENOTTY; |
545 | } | 638 | } |
546 | 639 | ||
640 | out: | ||
547 | return r; | 641 | return r; |
548 | } | 642 | } |
549 | 643 | ||
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c index 46fa04f12a9b..319177df9587 100644 --- a/arch/powerpc/kvm/timing.c +++ b/arch/powerpc/kvm/timing.c | |||
@@ -34,9 +34,8 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) | |||
34 | { | 34 | { |
35 | int i; | 35 | int i; |
36 | 36 | ||
37 | /* pause guest execution to avoid concurrent updates */ | 37 | /* Take a lock to avoid concurrent updates */ |
38 | local_irq_disable(); | 38 | mutex_lock(&vcpu->arch.exit_timing_lock); |
39 | mutex_lock(&vcpu->mutex); | ||
40 | 39 | ||
41 | vcpu->arch.last_exit_type = 0xDEAD; | 40 | vcpu->arch.last_exit_type = 0xDEAD; |
42 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | 41 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { |
@@ -50,8 +49,7 @@ void kvmppc_init_timing_stats(struct kvm_vcpu *vcpu) | |||
50 | vcpu->arch.timing_exit.tv64 = 0; | 49 | vcpu->arch.timing_exit.tv64 = 0; |
51 | vcpu->arch.timing_last_enter.tv64 = 0; | 50 | vcpu->arch.timing_last_enter.tv64 = 0; |
52 | 51 | ||
53 | mutex_unlock(&vcpu->mutex); | 52 | mutex_unlock(&vcpu->arch.exit_timing_lock); |
54 | local_irq_enable(); | ||
55 | } | 53 | } |
56 | 54 | ||
57 | static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) | 55 | static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) |
@@ -67,6 +65,8 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) | |||
67 | return; | 65 | return; |
68 | } | 66 | } |
69 | 67 | ||
68 | mutex_lock(&vcpu->arch.exit_timing_lock); | ||
69 | |||
70 | vcpu->arch.timing_count_type[type]++; | 70 | vcpu->arch.timing_count_type[type]++; |
71 | 71 | ||
72 | /* sum */ | 72 | /* sum */ |
@@ -95,6 +95,8 @@ static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type) | |||
95 | vcpu->arch.timing_min_duration[type] = duration; | 95 | vcpu->arch.timing_min_duration[type] = duration; |
96 | if (unlikely(duration > vcpu->arch.timing_max_duration[type])) | 96 | if (unlikely(duration > vcpu->arch.timing_max_duration[type])) |
97 | vcpu->arch.timing_max_duration[type] = duration; | 97 | vcpu->arch.timing_max_duration[type] = duration; |
98 | |||
99 | mutex_unlock(&vcpu->arch.exit_timing_lock); | ||
98 | } | 100 | } |
99 | 101 | ||
100 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) | 102 | void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) |
@@ -149,17 +151,30 @@ static int kvmppc_exit_timing_show(struct seq_file *m, void *private) | |||
149 | { | 151 | { |
150 | struct kvm_vcpu *vcpu = m->private; | 152 | struct kvm_vcpu *vcpu = m->private; |
151 | int i; | 153 | int i; |
154 | u64 min, max, sum, sum_quad; | ||
152 | 155 | ||
153 | seq_printf(m, "%s", "type count min max sum sum_squared\n"); | 156 | seq_printf(m, "%s", "type count min max sum sum_squared\n"); |
154 | 157 | ||
158 | |||
155 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { | 159 | for (i = 0; i < __NUMBER_OF_KVM_EXIT_TYPES; i++) { |
160 | |||
161 | min = vcpu->arch.timing_min_duration[i]; | ||
162 | do_div(min, tb_ticks_per_usec); | ||
163 | max = vcpu->arch.timing_max_duration[i]; | ||
164 | do_div(max, tb_ticks_per_usec); | ||
165 | sum = vcpu->arch.timing_sum_duration[i]; | ||
166 | do_div(sum, tb_ticks_per_usec); | ||
167 | sum_quad = vcpu->arch.timing_sum_quad_duration[i]; | ||
168 | do_div(sum_quad, tb_ticks_per_usec); | ||
169 | |||
156 | seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", | 170 | seq_printf(m, "%12s %10d %10lld %10lld %20lld %20lld\n", |
157 | kvm_exit_names[i], | 171 | kvm_exit_names[i], |
158 | vcpu->arch.timing_count_type[i], | 172 | vcpu->arch.timing_count_type[i], |
159 | vcpu->arch.timing_min_duration[i], | 173 | min, |
160 | vcpu->arch.timing_max_duration[i], | 174 | max, |
161 | vcpu->arch.timing_sum_duration[i], | 175 | sum, |
162 | vcpu->arch.timing_sum_quad_duration[i]); | 176 | sum_quad); |
177 | |||
163 | } | 178 | } |
164 | return 0; | 179 | return 0; |
165 | } | 180 | } |
diff --git a/arch/powerpc/kvm/trace.h b/arch/powerpc/kvm/trace.h index a8e840018052..3aca1b042b8c 100644 --- a/arch/powerpc/kvm/trace.h +++ b/arch/powerpc/kvm/trace.h | |||
@@ -98,6 +98,245 @@ TRACE_EVENT(kvm_gtlb_write, | |||
98 | __entry->word1, __entry->word2) | 98 | __entry->word1, __entry->word2) |
99 | ); | 99 | ); |
100 | 100 | ||
101 | |||
102 | /************************************************************************* | ||
103 | * Book3S trace points * | ||
104 | *************************************************************************/ | ||
105 | |||
106 | #ifdef CONFIG_PPC_BOOK3S | ||
107 | |||
108 | TRACE_EVENT(kvm_book3s_exit, | ||
109 | TP_PROTO(unsigned int exit_nr, struct kvm_vcpu *vcpu), | ||
110 | TP_ARGS(exit_nr, vcpu), | ||
111 | |||
112 | TP_STRUCT__entry( | ||
113 | __field( unsigned int, exit_nr ) | ||
114 | __field( unsigned long, pc ) | ||
115 | __field( unsigned long, msr ) | ||
116 | __field( unsigned long, dar ) | ||
117 | __field( unsigned long, srr1 ) | ||
118 | ), | ||
119 | |||
120 | TP_fast_assign( | ||
121 | __entry->exit_nr = exit_nr; | ||
122 | __entry->pc = kvmppc_get_pc(vcpu); | ||
123 | __entry->dar = kvmppc_get_fault_dar(vcpu); | ||
124 | __entry->msr = vcpu->arch.shared->msr; | ||
125 | __entry->srr1 = to_svcpu(vcpu)->shadow_srr1; | ||
126 | ), | ||
127 | |||
128 | TP_printk("exit=0x%x | pc=0x%lx | msr=0x%lx | dar=0x%lx | srr1=0x%lx", | ||
129 | __entry->exit_nr, __entry->pc, __entry->msr, __entry->dar, | ||
130 | __entry->srr1) | ||
131 | ); | ||
132 | |||
133 | TRACE_EVENT(kvm_book3s_reenter, | ||
134 | TP_PROTO(int r, struct kvm_vcpu *vcpu), | ||
135 | TP_ARGS(r, vcpu), | ||
136 | |||
137 | TP_STRUCT__entry( | ||
138 | __field( unsigned int, r ) | ||
139 | __field( unsigned long, pc ) | ||
140 | ), | ||
141 | |||
142 | TP_fast_assign( | ||
143 | __entry->r = r; | ||
144 | __entry->pc = kvmppc_get_pc(vcpu); | ||
145 | ), | ||
146 | |||
147 | TP_printk("reentry r=%d | pc=0x%lx", __entry->r, __entry->pc) | ||
148 | ); | ||
149 | |||
150 | #ifdef CONFIG_PPC_BOOK3S_64 | ||
151 | |||
152 | TRACE_EVENT(kvm_book3s_64_mmu_map, | ||
153 | TP_PROTO(int rflags, ulong hpteg, ulong va, pfn_t hpaddr, | ||
154 | struct kvmppc_pte *orig_pte), | ||
155 | TP_ARGS(rflags, hpteg, va, hpaddr, orig_pte), | ||
156 | |||
157 | TP_STRUCT__entry( | ||
158 | __field( unsigned char, flag_w ) | ||
159 | __field( unsigned char, flag_x ) | ||
160 | __field( unsigned long, eaddr ) | ||
161 | __field( unsigned long, hpteg ) | ||
162 | __field( unsigned long, va ) | ||
163 | __field( unsigned long long, vpage ) | ||
164 | __field( unsigned long, hpaddr ) | ||
165 | ), | ||
166 | |||
167 | TP_fast_assign( | ||
168 | __entry->flag_w = ((rflags & HPTE_R_PP) == 3) ? '-' : 'w'; | ||
169 | __entry->flag_x = (rflags & HPTE_R_N) ? '-' : 'x'; | ||
170 | __entry->eaddr = orig_pte->eaddr; | ||
171 | __entry->hpteg = hpteg; | ||
172 | __entry->va = va; | ||
173 | __entry->vpage = orig_pte->vpage; | ||
174 | __entry->hpaddr = hpaddr; | ||
175 | ), | ||
176 | |||
177 | TP_printk("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx", | ||
178 | __entry->flag_w, __entry->flag_x, __entry->eaddr, | ||
179 | __entry->hpteg, __entry->va, __entry->vpage, __entry->hpaddr) | ||
180 | ); | ||
181 | |||
182 | #endif /* CONFIG_PPC_BOOK3S_64 */ | ||
183 | |||
184 | TRACE_EVENT(kvm_book3s_mmu_map, | ||
185 | TP_PROTO(struct hpte_cache *pte), | ||
186 | TP_ARGS(pte), | ||
187 | |||
188 | TP_STRUCT__entry( | ||
189 | __field( u64, host_va ) | ||
190 | __field( u64, pfn ) | ||
191 | __field( ulong, eaddr ) | ||
192 | __field( u64, vpage ) | ||
193 | __field( ulong, raddr ) | ||
194 | __field( int, flags ) | ||
195 | ), | ||
196 | |||
197 | TP_fast_assign( | ||
198 | __entry->host_va = pte->host_va; | ||
199 | __entry->pfn = pte->pfn; | ||
200 | __entry->eaddr = pte->pte.eaddr; | ||
201 | __entry->vpage = pte->pte.vpage; | ||
202 | __entry->raddr = pte->pte.raddr; | ||
203 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
204 | (pte->pte.may_write ? 0x2 : 0) | | ||
205 | (pte->pte.may_execute ? 0x1 : 0); | ||
206 | ), | ||
207 | |||
208 | TP_printk("Map: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
209 | __entry->host_va, __entry->pfn, __entry->eaddr, | ||
210 | __entry->vpage, __entry->raddr, __entry->flags) | ||
211 | ); | ||
212 | |||
213 | TRACE_EVENT(kvm_book3s_mmu_invalidate, | ||
214 | TP_PROTO(struct hpte_cache *pte), | ||
215 | TP_ARGS(pte), | ||
216 | |||
217 | TP_STRUCT__entry( | ||
218 | __field( u64, host_va ) | ||
219 | __field( u64, pfn ) | ||
220 | __field( ulong, eaddr ) | ||
221 | __field( u64, vpage ) | ||
222 | __field( ulong, raddr ) | ||
223 | __field( int, flags ) | ||
224 | ), | ||
225 | |||
226 | TP_fast_assign( | ||
227 | __entry->host_va = pte->host_va; | ||
228 | __entry->pfn = pte->pfn; | ||
229 | __entry->eaddr = pte->pte.eaddr; | ||
230 | __entry->vpage = pte->pte.vpage; | ||
231 | __entry->raddr = pte->pte.raddr; | ||
232 | __entry->flags = (pte->pte.may_read ? 0x4 : 0) | | ||
233 | (pte->pte.may_write ? 0x2 : 0) | | ||
234 | (pte->pte.may_execute ? 0x1 : 0); | ||
235 | ), | ||
236 | |||
237 | TP_printk("Flush: hva=%llx pfn=%llx ea=%lx vp=%llx ra=%lx [%x]", | ||
238 | __entry->host_va, __entry->pfn, __entry->eaddr, | ||
239 | __entry->vpage, __entry->raddr, __entry->flags) | ||
240 | ); | ||
241 | |||
242 | TRACE_EVENT(kvm_book3s_mmu_flush, | ||
243 | TP_PROTO(const char *type, struct kvm_vcpu *vcpu, unsigned long long p1, | ||
244 | unsigned long long p2), | ||
245 | TP_ARGS(type, vcpu, p1, p2), | ||
246 | |||
247 | TP_STRUCT__entry( | ||
248 | __field( int, count ) | ||
249 | __field( unsigned long long, p1 ) | ||
250 | __field( unsigned long long, p2 ) | ||
251 | __field( const char *, type ) | ||
252 | ), | ||
253 | |||
254 | TP_fast_assign( | ||
255 | __entry->count = vcpu->arch.hpte_cache_count; | ||
256 | __entry->p1 = p1; | ||
257 | __entry->p2 = p2; | ||
258 | __entry->type = type; | ||
259 | ), | ||
260 | |||
261 | TP_printk("Flush %d %sPTEs: %llx - %llx", | ||
262 | __entry->count, __entry->type, __entry->p1, __entry->p2) | ||
263 | ); | ||
264 | |||
265 | TRACE_EVENT(kvm_book3s_slb_found, | ||
266 | TP_PROTO(unsigned long long gvsid, unsigned long long hvsid), | ||
267 | TP_ARGS(gvsid, hvsid), | ||
268 | |||
269 | TP_STRUCT__entry( | ||
270 | __field( unsigned long long, gvsid ) | ||
271 | __field( unsigned long long, hvsid ) | ||
272 | ), | ||
273 | |||
274 | TP_fast_assign( | ||
275 | __entry->gvsid = gvsid; | ||
276 | __entry->hvsid = hvsid; | ||
277 | ), | ||
278 | |||
279 | TP_printk("%llx -> %llx", __entry->gvsid, __entry->hvsid) | ||
280 | ); | ||
281 | |||
282 | TRACE_EVENT(kvm_book3s_slb_fail, | ||
283 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid), | ||
284 | TP_ARGS(sid_map_mask, gvsid), | ||
285 | |||
286 | TP_STRUCT__entry( | ||
287 | __field( unsigned short, sid_map_mask ) | ||
288 | __field( unsigned long long, gvsid ) | ||
289 | ), | ||
290 | |||
291 | TP_fast_assign( | ||
292 | __entry->sid_map_mask = sid_map_mask; | ||
293 | __entry->gvsid = gvsid; | ||
294 | ), | ||
295 | |||
296 | TP_printk("%x/%x: %llx", __entry->sid_map_mask, | ||
297 | SID_MAP_MASK - __entry->sid_map_mask, __entry->gvsid) | ||
298 | ); | ||
299 | |||
300 | TRACE_EVENT(kvm_book3s_slb_map, | ||
301 | TP_PROTO(u16 sid_map_mask, unsigned long long gvsid, | ||
302 | unsigned long long hvsid), | ||
303 | TP_ARGS(sid_map_mask, gvsid, hvsid), | ||
304 | |||
305 | TP_STRUCT__entry( | ||
306 | __field( unsigned short, sid_map_mask ) | ||
307 | __field( unsigned long long, guest_vsid ) | ||
308 | __field( unsigned long long, host_vsid ) | ||
309 | ), | ||
310 | |||
311 | TP_fast_assign( | ||
312 | __entry->sid_map_mask = sid_map_mask; | ||
313 | __entry->guest_vsid = gvsid; | ||
314 | __entry->host_vsid = hvsid; | ||
315 | ), | ||
316 | |||
317 | TP_printk("%x: %llx -> %llx", __entry->sid_map_mask, | ||
318 | __entry->guest_vsid, __entry->host_vsid) | ||
319 | ); | ||
320 | |||
321 | TRACE_EVENT(kvm_book3s_slbmte, | ||
322 | TP_PROTO(u64 slb_vsid, u64 slb_esid), | ||
323 | TP_ARGS(slb_vsid, slb_esid), | ||
324 | |||
325 | TP_STRUCT__entry( | ||
326 | __field( u64, slb_vsid ) | ||
327 | __field( u64, slb_esid ) | ||
328 | ), | ||
329 | |||
330 | TP_fast_assign( | ||
331 | __entry->slb_vsid = slb_vsid; | ||
332 | __entry->slb_esid = slb_esid; | ||
333 | ), | ||
334 | |||
335 | TP_printk("%llx, %llx", __entry->slb_vsid, __entry->slb_esid) | ||
336 | ); | ||
337 | |||
338 | #endif /* CONFIG_PPC_BOOK3S */ | ||
339 | |||
101 | #endif /* _TRACE_KVM_H */ | 340 | #endif /* _TRACE_KVM_H */ |
102 | 341 | ||
103 | /* This part must be outside protection */ | 342 | /* This part must be outside protection */ |
diff --git a/arch/powerpc/lib/Makefile b/arch/powerpc/lib/Makefile index 5bb89c828070..166a6a0ad544 100644 --- a/arch/powerpc/lib/Makefile +++ b/arch/powerpc/lib/Makefile | |||
@@ -4,9 +4,7 @@ | |||
4 | 4 | ||
5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
6 | 6 | ||
7 | ifeq ($(CONFIG_PPC64),y) | 7 | ccflags-$(CONFIG_PPC64) := -mno-minimal-toc |
8 | EXTRA_CFLAGS += -mno-minimal-toc | ||
9 | endif | ||
10 | 8 | ||
11 | CFLAGS_REMOVE_code-patching.o = -pg | 9 | CFLAGS_REMOVE_code-patching.o = -pg |
12 | CFLAGS_REMOVE_feature-fixups.o = -pg | 10 | CFLAGS_REMOVE_feature-fixups.o = -pg |
@@ -17,7 +15,8 @@ obj-$(CONFIG_PPC32) += div64.o copy_32.o | |||
17 | obj-$(CONFIG_HAS_IOMEM) += devres.o | 15 | obj-$(CONFIG_HAS_IOMEM) += devres.o |
18 | 16 | ||
19 | obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ | 17 | obj-$(CONFIG_PPC64) += copypage_64.o copyuser_64.o \ |
20 | memcpy_64.o usercopy_64.o mem_64.o string.o | 18 | memcpy_64.o usercopy_64.o mem_64.o string.o \ |
19 | checksum_wrappers_64.o hweight_64.o | ||
21 | obj-$(CONFIG_XMON) += sstep.o ldstfp.o | 20 | obj-$(CONFIG_XMON) += sstep.o ldstfp.o |
22 | obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o | 21 | obj-$(CONFIG_KPROBES) += sstep.o ldstfp.o |
23 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o | 22 | obj-$(CONFIG_HAVE_HW_BREAKPOINT) += sstep.o ldstfp.o |
diff --git a/arch/powerpc/lib/alloc.c b/arch/powerpc/lib/alloc.c index f53e09c7dac7..13b676c20d12 100644 --- a/arch/powerpc/lib/alloc.c +++ b/arch/powerpc/lib/alloc.c | |||
@@ -6,14 +6,6 @@ | |||
6 | 6 | ||
7 | #include <asm/system.h> | 7 | #include <asm/system.h> |
8 | 8 | ||
9 | void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) | ||
10 | { | ||
11 | if (mem_init_done) | ||
12 | return kmalloc(size, mask); | ||
13 | else | ||
14 | return alloc_bootmem(size); | ||
15 | } | ||
16 | |||
17 | void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) | 9 | void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) |
18 | { | 10 | { |
19 | void *p; | 11 | void *p; |
diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index ef96c6c58efc..18245af38aea 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S | |||
@@ -65,165 +65,393 @@ _GLOBAL(csum_tcpudp_magic) | |||
65 | srwi r3,r3,16 | 65 | srwi r3,r3,16 |
66 | blr | 66 | blr |
67 | 67 | ||
68 | #define STACKFRAMESIZE 256 | ||
69 | #define STK_REG(i) (112 + ((i)-14)*8) | ||
70 | |||
68 | /* | 71 | /* |
69 | * Computes the checksum of a memory block at buff, length len, | 72 | * Computes the checksum of a memory block at buff, length len, |
70 | * and adds in "sum" (32-bit). | 73 | * and adds in "sum" (32-bit). |
71 | * | 74 | * |
72 | * This code assumes at least halfword alignment, though the length | ||
73 | * can be any number of bytes. The sum is accumulated in r5. | ||
74 | * | ||
75 | * csum_partial(r3=buff, r4=len, r5=sum) | 75 | * csum_partial(r3=buff, r4=len, r5=sum) |
76 | */ | 76 | */ |
77 | _GLOBAL(csum_partial) | 77 | _GLOBAL(csum_partial) |
78 | subi r3,r3,8 /* we'll offset by 8 for the loads */ | 78 | addic r0,r5,0 /* clear carry */ |
79 | srdi. r6,r4,3 /* divide by 8 for doubleword count */ | 79 | |
80 | addic r5,r5,0 /* clear carry */ | 80 | srdi. r6,r4,3 /* less than 8 bytes? */ |
81 | beq 3f /* if we're doing < 8 bytes */ | 81 | beq .Lcsum_tail_word |
82 | andi. r0,r3,2 /* aligned on a word boundary already? */ | 82 | |
83 | beq+ 1f | 83 | /* |
84 | lhz r6,8(r3) /* do 2 bytes to get aligned */ | 84 | * If only halfword aligned, align to a double word. Since odd |
85 | addi r3,r3,2 | 85 | * aligned addresses should be rare and they would require more |
86 | subi r4,r4,2 | 86 | * work to calculate the correct checksum, we ignore that case |
87 | addc r5,r5,r6 | 87 | * and take the potential slowdown of unaligned loads. |
88 | srdi. r6,r4,3 /* recompute number of doublewords */ | 88 | */ |
89 | beq 3f /* any left? */ | 89 | rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ |
90 | 1: mtctr r6 | 90 | beq .Lcsum_aligned |
91 | 2: ldu r6,8(r3) /* main sum loop */ | 91 | |
92 | adde r5,r5,r6 | 92 | li r7,4 |
93 | bdnz 2b | 93 | sub r6,r7,r6 |
94 | andi. r4,r4,7 /* compute bytes left to sum after doublewords */ | 94 | mtctr r6 |
95 | 3: cmpwi 0,r4,4 /* is at least a full word left? */ | 95 | |
96 | blt 4f | 96 | 1: |
97 | lwz r6,8(r3) /* sum this word */ | 97 | lhz r6,0(r3) /* align to doubleword */ |
98 | subi r4,r4,2 | ||
99 | addi r3,r3,2 | ||
100 | adde r0,r0,r6 | ||
101 | bdnz 1b | ||
102 | |||
103 | .Lcsum_aligned: | ||
104 | /* | ||
105 | * We unroll the loop such that each iteration is 64 bytes with an | ||
106 | * entry and exit limb of 64 bytes, meaning a minimum size of | ||
107 | * 128 bytes. | ||
108 | */ | ||
109 | srdi. r6,r4,7 | ||
110 | beq .Lcsum_tail_doublewords /* len < 128 */ | ||
111 | |||
112 | srdi r6,r4,6 | ||
113 | subi r6,r6,1 | ||
114 | mtctr r6 | ||
115 | |||
116 | stdu r1,-STACKFRAMESIZE(r1) | ||
117 | std r14,STK_REG(r14)(r1) | ||
118 | std r15,STK_REG(r15)(r1) | ||
119 | std r16,STK_REG(r16)(r1) | ||
120 | |||
121 | ld r6,0(r3) | ||
122 | ld r9,8(r3) | ||
123 | |||
124 | ld r10,16(r3) | ||
125 | ld r11,24(r3) | ||
126 | |||
127 | /* | ||
128 | * On POWER6 and POWER7 back to back addes take 2 cycles because of | ||
129 | * the XER dependency. This means the fastest this loop can go is | ||
130 | * 16 cycles per iteration. The scheduling of the loop below has | ||
131 | * been shown to hit this on both POWER6 and POWER7. | ||
132 | */ | ||
133 | .align 5 | ||
134 | 2: | ||
135 | adde r0,r0,r6 | ||
136 | ld r12,32(r3) | ||
137 | ld r14,40(r3) | ||
138 | |||
139 | adde r0,r0,r9 | ||
140 | ld r15,48(r3) | ||
141 | ld r16,56(r3) | ||
142 | addi r3,r3,64 | ||
143 | |||
144 | adde r0,r0,r10 | ||
145 | |||
146 | adde r0,r0,r11 | ||
147 | |||
148 | adde r0,r0,r12 | ||
149 | |||
150 | adde r0,r0,r14 | ||
151 | |||
152 | adde r0,r0,r15 | ||
153 | ld r6,0(r3) | ||
154 | ld r9,8(r3) | ||
155 | |||
156 | adde r0,r0,r16 | ||
157 | ld r10,16(r3) | ||
158 | ld r11,24(r3) | ||
159 | bdnz 2b | ||
160 | |||
161 | |||
162 | adde r0,r0,r6 | ||
163 | ld r12,32(r3) | ||
164 | ld r14,40(r3) | ||
165 | |||
166 | adde r0,r0,r9 | ||
167 | ld r15,48(r3) | ||
168 | ld r16,56(r3) | ||
169 | addi r3,r3,64 | ||
170 | |||
171 | adde r0,r0,r10 | ||
172 | adde r0,r0,r11 | ||
173 | adde r0,r0,r12 | ||
174 | adde r0,r0,r14 | ||
175 | adde r0,r0,r15 | ||
176 | adde r0,r0,r16 | ||
177 | |||
178 | ld r14,STK_REG(r14)(r1) | ||
179 | ld r15,STK_REG(r15)(r1) | ||
180 | ld r16,STK_REG(r16)(r1) | ||
181 | addi r1,r1,STACKFRAMESIZE | ||
182 | |||
183 | andi. r4,r4,63 | ||
184 | |||
185 | .Lcsum_tail_doublewords: /* Up to 127 bytes to go */ | ||
186 | srdi. r6,r4,3 | ||
187 | beq .Lcsum_tail_word | ||
188 | |||
189 | mtctr r6 | ||
190 | 3: | ||
191 | ld r6,0(r3) | ||
192 | addi r3,r3,8 | ||
193 | adde r0,r0,r6 | ||
194 | bdnz 3b | ||
195 | |||
196 | andi. r4,r4,7 | ||
197 | |||
198 | .Lcsum_tail_word: /* Up to 7 bytes to go */ | ||
199 | srdi. r6,r4,2 | ||
200 | beq .Lcsum_tail_halfword | ||
201 | |||
202 | lwz r6,0(r3) | ||
98 | addi r3,r3,4 | 203 | addi r3,r3,4 |
204 | adde r0,r0,r6 | ||
99 | subi r4,r4,4 | 205 | subi r4,r4,4 |
100 | adde r5,r5,r6 | 206 | |
101 | 4: cmpwi 0,r4,2 /* is at least a halfword left? */ | 207 | .Lcsum_tail_halfword: /* Up to 3 bytes to go */ |
102 | blt+ 5f | 208 | srdi. r6,r4,1 |
103 | lhz r6,8(r3) /* sum this halfword */ | 209 | beq .Lcsum_tail_byte |
104 | addi r3,r3,2 | 210 | |
105 | subi r4,r4,2 | 211 | lhz r6,0(r3) |
106 | adde r5,r5,r6 | 212 | addi r3,r3,2 |
107 | 5: cmpwi 0,r4,1 /* is at least a byte left? */ | 213 | adde r0,r0,r6 |
108 | bne+ 6f | 214 | subi r4,r4,2 |
109 | lbz r6,8(r3) /* sum this byte */ | 215 | |
110 | slwi r6,r6,8 /* this byte is assumed to be the upper byte of a halfword */ | 216 | .Lcsum_tail_byte: /* Up to 1 byte to go */ |
111 | adde r5,r5,r6 | 217 | andi. r6,r4,1 |
112 | 6: addze r5,r5 /* add in final carry */ | 218 | beq .Lcsum_finish |
113 | rldicl r4,r5,32,0 /* fold two 32-bit halves together */ | 219 | |
114 | add r3,r4,r5 | 220 | lbz r6,0(r3) |
115 | srdi r3,r3,32 | 221 | sldi r9,r6,8 /* Pad the byte out to 16 bits */ |
116 | blr | 222 | adde r0,r0,r9 |
223 | |||
224 | .Lcsum_finish: | ||
225 | addze r0,r0 /* add in final carry */ | ||
226 | rldicl r4,r0,32,0 /* fold two 32 bit halves together */ | ||
227 | add r3,r4,r0 | ||
228 | srdi r3,r3,32 | ||
229 | blr | ||
230 | |||
231 | |||
232 | .macro source | ||
233 | 100: | ||
234 | .section __ex_table,"a" | ||
235 | .align 3 | ||
236 | .llong 100b,.Lsrc_error | ||
237 | .previous | ||
238 | .endm | ||
239 | |||
240 | .macro dest | ||
241 | 200: | ||
242 | .section __ex_table,"a" | ||
243 | .align 3 | ||
244 | .llong 200b,.Ldest_error | ||
245 | .previous | ||
246 | .endm | ||
117 | 247 | ||
118 | /* | 248 | /* |
119 | * Computes the checksum of a memory block at src, length len, | 249 | * Computes the checksum of a memory block at src, length len, |
120 | * and adds in "sum" (32-bit), while copying the block to dst. | 250 | * and adds in "sum" (32-bit), while copying the block to dst. |
121 | * If an access exception occurs on src or dst, it stores -EFAULT | 251 | * If an access exception occurs on src or dst, it stores -EFAULT |
122 | * to *src_err or *dst_err respectively, and (for an error on | 252 | * to *src_err or *dst_err respectively. The caller must take any action |
123 | * src) zeroes the rest of dst. | 253 | * required in this case (zeroing memory, recalculating partial checksum etc). |
124 | * | ||
125 | * This code needs to be reworked to take advantage of 64 bit sum+copy. | ||
126 | * However, due to tokenring halfword alignment problems this will be very | ||
127 | * tricky. For now we'll leave it until we instrument it somehow. | ||
128 | * | 254 | * |
129 | * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err) | 255 | * csum_partial_copy_generic(r3=src, r4=dst, r5=len, r6=sum, r7=src_err, r8=dst_err) |
130 | */ | 256 | */ |
131 | _GLOBAL(csum_partial_copy_generic) | 257 | _GLOBAL(csum_partial_copy_generic) |
132 | addic r0,r6,0 | 258 | addic r0,r6,0 /* clear carry */ |
133 | subi r3,r3,4 | 259 | |
134 | subi r4,r4,4 | 260 | srdi. r6,r5,3 /* less than 8 bytes? */ |
135 | srwi. r6,r5,2 | 261 | beq .Lcopy_tail_word |
136 | beq 3f /* if we're doing < 4 bytes */ | 262 | |
137 | andi. r9,r4,2 /* Align dst to longword boundary */ | 263 | /* |
138 | beq+ 1f | 264 | * If only halfword aligned, align to a double word. Since odd |
139 | 81: lhz r6,4(r3) /* do 2 bytes to get aligned */ | 265 | * aligned addresses should be rare and they would require more |
140 | addi r3,r3,2 | 266 | * work to calculate the correct checksum, we ignore that case |
267 | * and take the potential slowdown of unaligned loads. | ||
268 | * | ||
269 | * If the source and destination are relatively unaligned we only | ||
270 | * align the source. This keeps things simple. | ||
271 | */ | ||
272 | rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ | ||
273 | beq .Lcopy_aligned | ||
274 | |||
275 | li r7,4 | ||
276 | sub r6,r7,r6 | ||
277 | mtctr r6 | ||
278 | |||
279 | 1: | ||
280 | source; lhz r6,0(r3) /* align to doubleword */ | ||
141 | subi r5,r5,2 | 281 | subi r5,r5,2 |
142 | 91: sth r6,4(r4) | ||
143 | addi r4,r4,2 | ||
144 | addc r0,r0,r6 | ||
145 | srwi. r6,r5,2 /* # words to do */ | ||
146 | beq 3f | ||
147 | 1: mtctr r6 | ||
148 | 82: lwzu r6,4(r3) /* the bdnz has zero overhead, so it should */ | ||
149 | 92: stwu r6,4(r4) /* be unnecessary to unroll this loop */ | ||
150 | adde r0,r0,r6 | ||
151 | bdnz 82b | ||
152 | andi. r5,r5,3 | ||
153 | 3: cmpwi 0,r5,2 | ||
154 | blt+ 4f | ||
155 | 83: lhz r6,4(r3) | ||
156 | addi r3,r3,2 | 282 | addi r3,r3,2 |
157 | subi r5,r5,2 | 283 | adde r0,r0,r6 |
158 | 93: sth r6,4(r4) | 284 | dest; sth r6,0(r4) |
159 | addi r4,r4,2 | 285 | addi r4,r4,2 |
286 | bdnz 1b | ||
287 | |||
288 | .Lcopy_aligned: | ||
289 | /* | ||
290 | * We unroll the loop such that each iteration is 64 bytes with an | ||
291 | * entry and exit limb of 64 bytes, meaning a minimum size of | ||
292 | * 128 bytes. | ||
293 | */ | ||
294 | srdi. r6,r5,7 | ||
295 | beq .Lcopy_tail_doublewords /* len < 128 */ | ||
296 | |||
297 | srdi r6,r5,6 | ||
298 | subi r6,r6,1 | ||
299 | mtctr r6 | ||
300 | |||
301 | stdu r1,-STACKFRAMESIZE(r1) | ||
302 | std r14,STK_REG(r14)(r1) | ||
303 | std r15,STK_REG(r15)(r1) | ||
304 | std r16,STK_REG(r16)(r1) | ||
305 | |||
306 | source; ld r6,0(r3) | ||
307 | source; ld r9,8(r3) | ||
308 | |||
309 | source; ld r10,16(r3) | ||
310 | source; ld r11,24(r3) | ||
311 | |||
312 | /* | ||
313 | * On POWER6 and POWER7 back to back addes take 2 cycles because of | ||
314 | * the XER dependency. This means the fastest this loop can go is | ||
315 | * 16 cycles per iteration. The scheduling of the loop below has | ||
316 | * been shown to hit this on both POWER6 and POWER7. | ||
317 | */ | ||
318 | .align 5 | ||
319 | 2: | ||
160 | adde r0,r0,r6 | 320 | adde r0,r0,r6 |
161 | 4: cmpwi 0,r5,1 | 321 | source; ld r12,32(r3) |
162 | bne+ 5f | 322 | source; ld r14,40(r3) |
163 | 84: lbz r6,4(r3) | 323 | |
164 | 94: stb r6,4(r4) | 324 | adde r0,r0,r9 |
165 | slwi r6,r6,8 /* Upper byte of word */ | 325 | source; ld r15,48(r3) |
326 | source; ld r16,56(r3) | ||
327 | addi r3,r3,64 | ||
328 | |||
329 | adde r0,r0,r10 | ||
330 | dest; std r6,0(r4) | ||
331 | dest; std r9,8(r4) | ||
332 | |||
333 | adde r0,r0,r11 | ||
334 | dest; std r10,16(r4) | ||
335 | dest; std r11,24(r4) | ||
336 | |||
337 | adde r0,r0,r12 | ||
338 | dest; std r12,32(r4) | ||
339 | dest; std r14,40(r4) | ||
340 | |||
341 | adde r0,r0,r14 | ||
342 | dest; std r15,48(r4) | ||
343 | dest; std r16,56(r4) | ||
344 | addi r4,r4,64 | ||
345 | |||
346 | adde r0,r0,r15 | ||
347 | source; ld r6,0(r3) | ||
348 | source; ld r9,8(r3) | ||
349 | |||
350 | adde r0,r0,r16 | ||
351 | source; ld r10,16(r3) | ||
352 | source; ld r11,24(r3) | ||
353 | bdnz 2b | ||
354 | |||
355 | |||
166 | adde r0,r0,r6 | 356 | adde r0,r0,r6 |
167 | 5: addze r3,r0 /* add in final carry (unlikely with 64-bit regs) */ | 357 | source; ld r12,32(r3) |
168 | rldicl r4,r3,32,0 /* fold 64 bit value */ | 358 | source; ld r14,40(r3) |
169 | add r3,r4,r3 | ||
170 | srdi r3,r3,32 | ||
171 | blr | ||
172 | 359 | ||
173 | /* These shouldn't go in the fixup section, since that would | 360 | adde r0,r0,r9 |
174 | cause the ex_table addresses to get out of order. */ | 361 | source; ld r15,48(r3) |
362 | source; ld r16,56(r3) | ||
363 | addi r3,r3,64 | ||
364 | |||
365 | adde r0,r0,r10 | ||
366 | dest; std r6,0(r4) | ||
367 | dest; std r9,8(r4) | ||
368 | |||
369 | adde r0,r0,r11 | ||
370 | dest; std r10,16(r4) | ||
371 | dest; std r11,24(r4) | ||
372 | |||
373 | adde r0,r0,r12 | ||
374 | dest; std r12,32(r4) | ||
375 | dest; std r14,40(r4) | ||
376 | |||
377 | adde r0,r0,r14 | ||
378 | dest; std r15,48(r4) | ||
379 | dest; std r16,56(r4) | ||
380 | addi r4,r4,64 | ||
381 | |||
382 | adde r0,r0,r15 | ||
383 | adde r0,r0,r16 | ||
384 | |||
385 | ld r14,STK_REG(r14)(r1) | ||
386 | ld r15,STK_REG(r15)(r1) | ||
387 | ld r16,STK_REG(r16)(r1) | ||
388 | addi r1,r1,STACKFRAMESIZE | ||
389 | |||
390 | andi. r5,r5,63 | ||
391 | |||
392 | .Lcopy_tail_doublewords: /* Up to 127 bytes to go */ | ||
393 | srdi. r6,r5,3 | ||
394 | beq .Lcopy_tail_word | ||
175 | 395 | ||
176 | .globl src_error_1 | ||
177 | src_error_1: | ||
178 | li r6,0 | ||
179 | subi r5,r5,2 | ||
180 | 95: sth r6,4(r4) | ||
181 | addi r4,r4,2 | ||
182 | srwi. r6,r5,2 | ||
183 | beq 3f | ||
184 | mtctr r6 | 396 | mtctr r6 |
185 | .globl src_error_2 | 397 | 3: |
186 | src_error_2: | 398 | source; ld r6,0(r3) |
187 | li r6,0 | 399 | addi r3,r3,8 |
188 | 96: stwu r6,4(r4) | 400 | adde r0,r0,r6 |
189 | bdnz 96b | 401 | dest; std r6,0(r4) |
190 | 3: andi. r5,r5,3 | 402 | addi r4,r4,8 |
191 | beq src_error | 403 | bdnz 3b |
192 | .globl src_error_3 | 404 | |
193 | src_error_3: | 405 | andi. r5,r5,7 |
194 | li r6,0 | 406 | |
195 | mtctr r5 | 407 | .Lcopy_tail_word: /* Up to 7 bytes to go */ |
196 | addi r4,r4,3 | 408 | srdi. r6,r5,2 |
197 | 97: stbu r6,1(r4) | 409 | beq .Lcopy_tail_halfword |
198 | bdnz 97b | 410 | |
199 | .globl src_error | 411 | source; lwz r6,0(r3) |
200 | src_error: | 412 | addi r3,r3,4 |
413 | adde r0,r0,r6 | ||
414 | dest; stw r6,0(r4) | ||
415 | addi r4,r4,4 | ||
416 | subi r5,r5,4 | ||
417 | |||
418 | .Lcopy_tail_halfword: /* Up to 3 bytes to go */ | ||
419 | srdi. r6,r5,1 | ||
420 | beq .Lcopy_tail_byte | ||
421 | |||
422 | source; lhz r6,0(r3) | ||
423 | addi r3,r3,2 | ||
424 | adde r0,r0,r6 | ||
425 | dest; sth r6,0(r4) | ||
426 | addi r4,r4,2 | ||
427 | subi r5,r5,2 | ||
428 | |||
429 | .Lcopy_tail_byte: /* Up to 1 byte to go */ | ||
430 | andi. r6,r5,1 | ||
431 | beq .Lcopy_finish | ||
432 | |||
433 | source; lbz r6,0(r3) | ||
434 | sldi r9,r6,8 /* Pad the byte out to 16 bits */ | ||
435 | adde r0,r0,r9 | ||
436 | dest; stb r6,0(r4) | ||
437 | |||
438 | .Lcopy_finish: | ||
439 | addze r0,r0 /* add in final carry */ | ||
440 | rldicl r4,r0,32,0 /* fold two 32 bit halves together */ | ||
441 | add r3,r4,r0 | ||
442 | srdi r3,r3,32 | ||
443 | blr | ||
444 | |||
445 | .Lsrc_error: | ||
201 | cmpdi 0,r7,0 | 446 | cmpdi 0,r7,0 |
202 | beq 1f | 447 | beqlr |
203 | li r6,-EFAULT | 448 | li r6,-EFAULT |
204 | stw r6,0(r7) | 449 | stw r6,0(r7) |
205 | 1: addze r3,r0 | ||
206 | blr | 450 | blr |
207 | 451 | ||
208 | .globl dst_error | 452 | .Ldest_error: |
209 | dst_error: | ||
210 | cmpdi 0,r8,0 | 453 | cmpdi 0,r8,0 |
211 | beq 1f | 454 | beqlr |
212 | li r6,-EFAULT | 455 | li r6,-EFAULT |
213 | stw r6,0(r8) | 456 | stw r6,0(r8) |
214 | 1: addze r3,r0 | ||
215 | blr | 457 | blr |
216 | |||
217 | .section __ex_table,"a" | ||
218 | .align 3 | ||
219 | .llong 81b,src_error_1 | ||
220 | .llong 91b,dst_error | ||
221 | .llong 82b,src_error_2 | ||
222 | .llong 92b,dst_error | ||
223 | .llong 83b,src_error_3 | ||
224 | .llong 93b,dst_error | ||
225 | .llong 84b,src_error_3 | ||
226 | .llong 94b,dst_error | ||
227 | .llong 95b,dst_error | ||
228 | .llong 96b,dst_error | ||
229 | .llong 97b,dst_error | ||
diff --git a/arch/powerpc/lib/checksum_wrappers_64.c b/arch/powerpc/lib/checksum_wrappers_64.c new file mode 100644 index 000000000000..769b817fbb32 --- /dev/null +++ b/arch/powerpc/lib/checksum_wrappers_64.c | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) IBM Corporation, 2010 | ||
17 | * | ||
18 | * Author: Anton Blanchard <anton@au.ibm.com> | ||
19 | */ | ||
20 | #include <linux/module.h> | ||
21 | #include <linux/compiler.h> | ||
22 | #include <linux/types.h> | ||
23 | #include <asm/checksum.h> | ||
24 | #include <asm/uaccess.h> | ||
25 | |||
26 | __wsum csum_and_copy_from_user(const void __user *src, void *dst, | ||
27 | int len, __wsum sum, int *err_ptr) | ||
28 | { | ||
29 | unsigned int csum; | ||
30 | |||
31 | might_sleep(); | ||
32 | |||
33 | *err_ptr = 0; | ||
34 | |||
35 | if (!len) { | ||
36 | csum = 0; | ||
37 | goto out; | ||
38 | } | ||
39 | |||
40 | if (unlikely((len < 0) || !access_ok(VERIFY_READ, src, len))) { | ||
41 | *err_ptr = -EFAULT; | ||
42 | csum = (__force unsigned int)sum; | ||
43 | goto out; | ||
44 | } | ||
45 | |||
46 | csum = csum_partial_copy_generic((void __force *)src, dst, | ||
47 | len, sum, err_ptr, NULL); | ||
48 | |||
49 | if (unlikely(*err_ptr)) { | ||
50 | int missing = __copy_from_user(dst, src, len); | ||
51 | |||
52 | if (missing) { | ||
53 | memset(dst + len - missing, 0, missing); | ||
54 | *err_ptr = -EFAULT; | ||
55 | } else { | ||
56 | *err_ptr = 0; | ||
57 | } | ||
58 | |||
59 | csum = csum_partial(dst, len, sum); | ||
60 | } | ||
61 | |||
62 | out: | ||
63 | return (__force __wsum)csum; | ||
64 | } | ||
65 | EXPORT_SYMBOL(csum_and_copy_from_user); | ||
66 | |||
67 | __wsum csum_and_copy_to_user(const void *src, void __user *dst, int len, | ||
68 | __wsum sum, int *err_ptr) | ||
69 | { | ||
70 | unsigned int csum; | ||
71 | |||
72 | might_sleep(); | ||
73 | |||
74 | *err_ptr = 0; | ||
75 | |||
76 | if (!len) { | ||
77 | csum = 0; | ||
78 | goto out; | ||
79 | } | ||
80 | |||
81 | if (unlikely((len < 0) || !access_ok(VERIFY_WRITE, dst, len))) { | ||
82 | *err_ptr = -EFAULT; | ||
83 | csum = -1; /* invalid checksum */ | ||
84 | goto out; | ||
85 | } | ||
86 | |||
87 | csum = csum_partial_copy_generic(src, (void __force *)dst, | ||
88 | len, sum, NULL, err_ptr); | ||
89 | |||
90 | if (unlikely(*err_ptr)) { | ||
91 | csum = csum_partial(src, len, sum); | ||
92 | |||
93 | if (copy_to_user(dst, src, len)) { | ||
94 | *err_ptr = -EFAULT; | ||
95 | csum = -1; /* invalid checksum */ | ||
96 | } | ||
97 | } | ||
98 | |||
99 | out: | ||
100 | return (__force __wsum)csum; | ||
101 | } | ||
102 | EXPORT_SYMBOL(csum_and_copy_to_user); | ||
diff --git a/arch/powerpc/lib/copy_32.S b/arch/powerpc/lib/copy_32.S index 74a7f4130b4c..55f19f9fd708 100644 --- a/arch/powerpc/lib/copy_32.S +++ b/arch/powerpc/lib/copy_32.S | |||
@@ -62,7 +62,7 @@ | |||
62 | 62 | ||
63 | .text | 63 | .text |
64 | .stabs "arch/powerpc/lib/",N_SO,0,0,0f | 64 | .stabs "arch/powerpc/lib/",N_SO,0,0,0f |
65 | .stabs "copy32.S",N_SO,0,0,0f | 65 | .stabs "copy_32.S",N_SO,0,0,0f |
66 | 0: | 66 | 0: |
67 | 67 | ||
68 | CACHELINE_BYTES = L1_CACHE_BYTES | 68 | CACHELINE_BYTES = L1_CACHE_BYTES |
diff --git a/arch/powerpc/lib/copypage_64.S b/arch/powerpc/lib/copypage_64.S index 4d4eeb900486..53dcb6b1b708 100644 --- a/arch/powerpc/lib/copypage_64.S +++ b/arch/powerpc/lib/copypage_64.S | |||
@@ -6,6 +6,7 @@ | |||
6 | * as published by the Free Software Foundation; either version | 6 | * as published by the Free Software Foundation; either version |
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | #include <asm/page.h> | ||
9 | #include <asm/processor.h> | 10 | #include <asm/processor.h> |
10 | #include <asm/ppc_asm.h> | 11 | #include <asm/ppc_asm.h> |
11 | #include <asm/asm-offsets.h> | 12 | #include <asm/asm-offsets.h> |
@@ -15,9 +16,9 @@ PPC64_CACHES: | |||
15 | .tc ppc64_caches[TC],ppc64_caches | 16 | .tc ppc64_caches[TC],ppc64_caches |
16 | .section ".text" | 17 | .section ".text" |
17 | 18 | ||
18 | 19 | _GLOBAL(copy_page) | |
19 | _GLOBAL(copy_4K_page) | 20 | lis r5,PAGE_SIZE@h |
20 | li r5,4096 /* 4K page size */ | 21 | ori r5,r5,PAGE_SIZE@l |
21 | BEGIN_FTR_SECTION | 22 | BEGIN_FTR_SECTION |
22 | ld r10,PPC64_CACHES@toc(r2) | 23 | ld r10,PPC64_CACHES@toc(r2) |
23 | lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */ | 24 | lwz r11,DCACHEL1LOGLINESIZE(r10) /* log2 of cache line size */ |
diff --git a/arch/powerpc/lib/devres.c b/arch/powerpc/lib/devres.c index deac4d30daf4..e91615abae66 100644 --- a/arch/powerpc/lib/devres.c +++ b/arch/powerpc/lib/devres.c | |||
@@ -9,11 +9,11 @@ | |||
9 | 9 | ||
10 | #include <linux/device.h> /* devres_*(), devm_ioremap_release() */ | 10 | #include <linux/device.h> /* devres_*(), devm_ioremap_release() */ |
11 | #include <linux/gfp.h> | 11 | #include <linux/gfp.h> |
12 | #include <linux/io.h> /* ioremap_flags() */ | 12 | #include <linux/io.h> /* ioremap_prot() */ |
13 | #include <linux/module.h> /* EXPORT_SYMBOL() */ | 13 | #include <linux/module.h> /* EXPORT_SYMBOL() */ |
14 | 14 | ||
15 | /** | 15 | /** |
16 | * devm_ioremap_prot - Managed ioremap_flags() | 16 | * devm_ioremap_prot - Managed ioremap_prot() |
17 | * @dev: Generic device to remap IO address for | 17 | * @dev: Generic device to remap IO address for |
18 | * @offset: BUS offset to map | 18 | * @offset: BUS offset to map |
19 | * @size: Size of map | 19 | * @size: Size of map |
@@ -31,7 +31,7 @@ void __iomem *devm_ioremap_prot(struct device *dev, resource_size_t offset, | |||
31 | if (!ptr) | 31 | if (!ptr) |
32 | return NULL; | 32 | return NULL; |
33 | 33 | ||
34 | addr = ioremap_flags(offset, size, flags); | 34 | addr = ioremap_prot(offset, size, flags); |
35 | if (addr) { | 35 | if (addr) { |
36 | *ptr = addr; | 36 | *ptr = addr; |
37 | devres_add(dev, ptr); | 37 | devres_add(dev, ptr); |
diff --git a/arch/powerpc/lib/feature-fixups-test.S b/arch/powerpc/lib/feature-fixups-test.S index cb737484c5aa..f4613118132e 100644 --- a/arch/powerpc/lib/feature-fixups-test.S +++ b/arch/powerpc/lib/feature-fixups-test.S | |||
@@ -172,6 +172,25 @@ globl(ftr_fixup_test6_expected) | |||
172 | 3: or 3,3,3 | 172 | 3: or 3,3,3 |
173 | 173 | ||
174 | 174 | ||
175 | #if 0 | ||
176 | /* Test that if we have a larger else case the assembler spots it and | ||
177 | * reports an error. #if 0'ed so as not to break the build normally. | ||
178 | */ | ||
179 | ftr_fixup_test7: | ||
180 | or 1,1,1 | ||
181 | BEGIN_FTR_SECTION | ||
182 | or 2,2,2 | ||
183 | or 2,2,2 | ||
184 | or 2,2,2 | ||
185 | FTR_SECTION_ELSE | ||
186 | or 3,3,3 | ||
187 | or 3,3,3 | ||
188 | or 3,3,3 | ||
189 | or 3,3,3 | ||
190 | ALT_FTR_SECTION_END(0, 1) | ||
191 | or 1,1,1 | ||
192 | #endif | ||
193 | |||
175 | #define MAKE_MACRO_TEST(TYPE) \ | 194 | #define MAKE_MACRO_TEST(TYPE) \ |
176 | globl(ftr_fixup_test_ ##TYPE##_macros) \ | 195 | globl(ftr_fixup_test_ ##TYPE##_macros) \ |
177 | or 1,1,1; \ | 196 | or 1,1,1; \ |
diff --git a/arch/powerpc/lib/hweight_64.S b/arch/powerpc/lib/hweight_64.S new file mode 100644 index 000000000000..fda27868cf8c --- /dev/null +++ b/arch/powerpc/lib/hweight_64.S | |||
@@ -0,0 +1,110 @@ | |||
1 | /* | ||
2 | * This program is free software; you can redistribute it and/or modify | ||
3 | * it under the terms of the GNU General Public License as published by | ||
4 | * the Free Software Foundation; either version 2 of the License, or | ||
5 | * (at your option) any later version. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | * | ||
12 | * You should have received a copy of the GNU General Public License | ||
13 | * along with this program; if not, write to the Free Software | ||
14 | * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
15 | * | ||
16 | * Copyright (C) IBM Corporation, 2010 | ||
17 | * | ||
18 | * Author: Anton Blanchard <anton@au.ibm.com> | ||
19 | */ | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/ppc_asm.h> | ||
22 | |||
23 | /* Note: This code relies on -mminimal-toc */ | ||
24 | |||
25 | _GLOBAL(__arch_hweight8) | ||
26 | BEGIN_FTR_SECTION | ||
27 | b .__sw_hweight8 | ||
28 | nop | ||
29 | nop | ||
30 | FTR_SECTION_ELSE | ||
31 | PPC_POPCNTB(r3,r3) | ||
32 | clrldi r3,r3,64-8 | ||
33 | blr | ||
34 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | ||
35 | |||
36 | _GLOBAL(__arch_hweight16) | ||
37 | BEGIN_FTR_SECTION | ||
38 | b .__sw_hweight16 | ||
39 | nop | ||
40 | nop | ||
41 | nop | ||
42 | nop | ||
43 | FTR_SECTION_ELSE | ||
44 | BEGIN_FTR_SECTION_NESTED(50) | ||
45 | PPC_POPCNTB(r3,r3) | ||
46 | srdi r4,r3,8 | ||
47 | add r3,r4,r3 | ||
48 | clrldi r3,r3,64-8 | ||
49 | blr | ||
50 | FTR_SECTION_ELSE_NESTED(50) | ||
51 | clrlwi r3,r3,16 | ||
52 | PPC_POPCNTW(r3,r3) | ||
53 | clrldi r3,r3,64-8 | ||
54 | blr | ||
55 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 50) | ||
56 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | ||
57 | |||
58 | _GLOBAL(__arch_hweight32) | ||
59 | BEGIN_FTR_SECTION | ||
60 | b .__sw_hweight32 | ||
61 | nop | ||
62 | nop | ||
63 | nop | ||
64 | nop | ||
65 | nop | ||
66 | nop | ||
67 | FTR_SECTION_ELSE | ||
68 | BEGIN_FTR_SECTION_NESTED(51) | ||
69 | PPC_POPCNTB(r3,r3) | ||
70 | srdi r4,r3,16 | ||
71 | add r3,r4,r3 | ||
72 | srdi r4,r3,8 | ||
73 | add r3,r4,r3 | ||
74 | clrldi r3,r3,64-8 | ||
75 | blr | ||
76 | FTR_SECTION_ELSE_NESTED(51) | ||
77 | PPC_POPCNTW(r3,r3) | ||
78 | clrldi r3,r3,64-8 | ||
79 | blr | ||
80 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 51) | ||
81 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | ||
82 | |||
83 | _GLOBAL(__arch_hweight64) | ||
84 | BEGIN_FTR_SECTION | ||
85 | b .__sw_hweight64 | ||
86 | nop | ||
87 | nop | ||
88 | nop | ||
89 | nop | ||
90 | nop | ||
91 | nop | ||
92 | nop | ||
93 | nop | ||
94 | FTR_SECTION_ELSE | ||
95 | BEGIN_FTR_SECTION_NESTED(52) | ||
96 | PPC_POPCNTB(r3,r3) | ||
97 | srdi r4,r3,32 | ||
98 | add r3,r4,r3 | ||
99 | srdi r4,r3,16 | ||
100 | add r3,r4,r3 | ||
101 | srdi r4,r3,8 | ||
102 | add r3,r4,r3 | ||
103 | clrldi r3,r3,64-8 | ||
104 | blr | ||
105 | FTR_SECTION_ELSE_NESTED(52) | ||
106 | PPC_POPCNTD(r3,r3) | ||
107 | clrldi r3,r3,64-8 | ||
108 | blr | ||
109 | ALT_FTR_SECTION_END_NESTED_IFCLR(CPU_FTR_POPCNTD, 52) | ||
110 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_POPCNTB) | ||
diff --git a/arch/powerpc/lib/ldstfp.S b/arch/powerpc/lib/ldstfp.S index f6448636baf5..6a85380520b6 100644 --- a/arch/powerpc/lib/ldstfp.S +++ b/arch/powerpc/lib/ldstfp.S | |||
@@ -17,6 +17,8 @@ | |||
17 | #include <asm/asm-offsets.h> | 17 | #include <asm/asm-offsets.h> |
18 | #include <linux/errno.h> | 18 | #include <linux/errno.h> |
19 | 19 | ||
20 | #ifdef CONFIG_PPC_FPU | ||
21 | |||
20 | #define STKFRM (PPC_MIN_STKFRM + 16) | 22 | #define STKFRM (PPC_MIN_STKFRM + 16) |
21 | 23 | ||
22 | .macro extab instr,handler | 24 | .macro extab instr,handler |
@@ -81,7 +83,7 @@ _GLOBAL(do_lfs) | |||
81 | mfmsr r6 | 83 | mfmsr r6 |
82 | ori r7,r6,MSR_FP | 84 | ori r7,r6,MSR_FP |
83 | cmpwi cr7,r3,0 | 85 | cmpwi cr7,r3,0 |
84 | mtmsrd r7 | 86 | MTMSRD(r7) |
85 | isync | 87 | isync |
86 | beq cr7,1f | 88 | beq cr7,1f |
87 | stfd fr0,STKFRM-16(r1) | 89 | stfd fr0,STKFRM-16(r1) |
@@ -93,7 +95,7 @@ _GLOBAL(do_lfs) | |||
93 | lfd fr0,STKFRM-16(r1) | 95 | lfd fr0,STKFRM-16(r1) |
94 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 96 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
95 | mtlr r0 | 97 | mtlr r0 |
96 | mtmsrd r6 | 98 | MTMSRD(r6) |
97 | isync | 99 | isync |
98 | mr r3,r9 | 100 | mr r3,r9 |
99 | addi r1,r1,STKFRM | 101 | addi r1,r1,STKFRM |
@@ -108,7 +110,7 @@ _GLOBAL(do_lfd) | |||
108 | mfmsr r6 | 110 | mfmsr r6 |
109 | ori r7,r6,MSR_FP | 111 | ori r7,r6,MSR_FP |
110 | cmpwi cr7,r3,0 | 112 | cmpwi cr7,r3,0 |
111 | mtmsrd r7 | 113 | MTMSRD(r7) |
112 | isync | 114 | isync |
113 | beq cr7,1f | 115 | beq cr7,1f |
114 | stfd fr0,STKFRM-16(r1) | 116 | stfd fr0,STKFRM-16(r1) |
@@ -120,7 +122,7 @@ _GLOBAL(do_lfd) | |||
120 | lfd fr0,STKFRM-16(r1) | 122 | lfd fr0,STKFRM-16(r1) |
121 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 123 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
122 | mtlr r0 | 124 | mtlr r0 |
123 | mtmsrd r6 | 125 | MTMSRD(r6) |
124 | isync | 126 | isync |
125 | mr r3,r9 | 127 | mr r3,r9 |
126 | addi r1,r1,STKFRM | 128 | addi r1,r1,STKFRM |
@@ -135,7 +137,7 @@ _GLOBAL(do_stfs) | |||
135 | mfmsr r6 | 137 | mfmsr r6 |
136 | ori r7,r6,MSR_FP | 138 | ori r7,r6,MSR_FP |
137 | cmpwi cr7,r3,0 | 139 | cmpwi cr7,r3,0 |
138 | mtmsrd r7 | 140 | MTMSRD(r7) |
139 | isync | 141 | isync |
140 | beq cr7,1f | 142 | beq cr7,1f |
141 | stfd fr0,STKFRM-16(r1) | 143 | stfd fr0,STKFRM-16(r1) |
@@ -147,7 +149,7 @@ _GLOBAL(do_stfs) | |||
147 | lfd fr0,STKFRM-16(r1) | 149 | lfd fr0,STKFRM-16(r1) |
148 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 150 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
149 | mtlr r0 | 151 | mtlr r0 |
150 | mtmsrd r6 | 152 | MTMSRD(r6) |
151 | isync | 153 | isync |
152 | mr r3,r9 | 154 | mr r3,r9 |
153 | addi r1,r1,STKFRM | 155 | addi r1,r1,STKFRM |
@@ -162,7 +164,7 @@ _GLOBAL(do_stfd) | |||
162 | mfmsr r6 | 164 | mfmsr r6 |
163 | ori r7,r6,MSR_FP | 165 | ori r7,r6,MSR_FP |
164 | cmpwi cr7,r3,0 | 166 | cmpwi cr7,r3,0 |
165 | mtmsrd r7 | 167 | MTMSRD(r7) |
166 | isync | 168 | isync |
167 | beq cr7,1f | 169 | beq cr7,1f |
168 | stfd fr0,STKFRM-16(r1) | 170 | stfd fr0,STKFRM-16(r1) |
@@ -174,7 +176,7 @@ _GLOBAL(do_stfd) | |||
174 | lfd fr0,STKFRM-16(r1) | 176 | lfd fr0,STKFRM-16(r1) |
175 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 177 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
176 | mtlr r0 | 178 | mtlr r0 |
177 | mtmsrd r6 | 179 | MTMSRD(r6) |
178 | isync | 180 | isync |
179 | mr r3,r9 | 181 | mr r3,r9 |
180 | addi r1,r1,STKFRM | 182 | addi r1,r1,STKFRM |
@@ -229,7 +231,7 @@ _GLOBAL(do_lvx) | |||
229 | oris r7,r6,MSR_VEC@h | 231 | oris r7,r6,MSR_VEC@h |
230 | cmpwi cr7,r3,0 | 232 | cmpwi cr7,r3,0 |
231 | li r8,STKFRM-16 | 233 | li r8,STKFRM-16 |
232 | mtmsrd r7 | 234 | MTMSRD(r7) |
233 | isync | 235 | isync |
234 | beq cr7,1f | 236 | beq cr7,1f |
235 | stvx vr0,r1,r8 | 237 | stvx vr0,r1,r8 |
@@ -241,7 +243,7 @@ _GLOBAL(do_lvx) | |||
241 | lvx vr0,r1,r8 | 243 | lvx vr0,r1,r8 |
242 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 244 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
243 | mtlr r0 | 245 | mtlr r0 |
244 | mtmsrd r6 | 246 | MTMSRD(r6) |
245 | isync | 247 | isync |
246 | mr r3,r9 | 248 | mr r3,r9 |
247 | addi r1,r1,STKFRM | 249 | addi r1,r1,STKFRM |
@@ -257,7 +259,7 @@ _GLOBAL(do_stvx) | |||
257 | oris r7,r6,MSR_VEC@h | 259 | oris r7,r6,MSR_VEC@h |
258 | cmpwi cr7,r3,0 | 260 | cmpwi cr7,r3,0 |
259 | li r8,STKFRM-16 | 261 | li r8,STKFRM-16 |
260 | mtmsrd r7 | 262 | MTMSRD(r7) |
261 | isync | 263 | isync |
262 | beq cr7,1f | 264 | beq cr7,1f |
263 | stvx vr0,r1,r8 | 265 | stvx vr0,r1,r8 |
@@ -269,7 +271,7 @@ _GLOBAL(do_stvx) | |||
269 | lvx vr0,r1,r8 | 271 | lvx vr0,r1,r8 |
270 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 272 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
271 | mtlr r0 | 273 | mtlr r0 |
272 | mtmsrd r6 | 274 | MTMSRD(r6) |
273 | isync | 275 | isync |
274 | mr r3,r9 | 276 | mr r3,r9 |
275 | addi r1,r1,STKFRM | 277 | addi r1,r1,STKFRM |
@@ -325,7 +327,7 @@ _GLOBAL(do_lxvd2x) | |||
325 | oris r7,r6,MSR_VSX@h | 327 | oris r7,r6,MSR_VSX@h |
326 | cmpwi cr7,r3,0 | 328 | cmpwi cr7,r3,0 |
327 | li r8,STKFRM-16 | 329 | li r8,STKFRM-16 |
328 | mtmsrd r7 | 330 | MTMSRD(r7) |
329 | isync | 331 | isync |
330 | beq cr7,1f | 332 | beq cr7,1f |
331 | STXVD2X(0,r1,r8) | 333 | STXVD2X(0,r1,r8) |
@@ -337,7 +339,7 @@ _GLOBAL(do_lxvd2x) | |||
337 | LXVD2X(0,r1,r8) | 339 | LXVD2X(0,r1,r8) |
338 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 340 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
339 | mtlr r0 | 341 | mtlr r0 |
340 | mtmsrd r6 | 342 | MTMSRD(r6) |
341 | isync | 343 | isync |
342 | mr r3,r9 | 344 | mr r3,r9 |
343 | addi r1,r1,STKFRM | 345 | addi r1,r1,STKFRM |
@@ -353,7 +355,7 @@ _GLOBAL(do_stxvd2x) | |||
353 | oris r7,r6,MSR_VSX@h | 355 | oris r7,r6,MSR_VSX@h |
354 | cmpwi cr7,r3,0 | 356 | cmpwi cr7,r3,0 |
355 | li r8,STKFRM-16 | 357 | li r8,STKFRM-16 |
356 | mtmsrd r7 | 358 | MTMSRD(r7) |
357 | isync | 359 | isync |
358 | beq cr7,1f | 360 | beq cr7,1f |
359 | STXVD2X(0,r1,r8) | 361 | STXVD2X(0,r1,r8) |
@@ -365,7 +367,7 @@ _GLOBAL(do_stxvd2x) | |||
365 | LXVD2X(0,r1,r8) | 367 | LXVD2X(0,r1,r8) |
366 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) | 368 | 4: PPC_LL r0,STKFRM+PPC_LR_STKOFF(r1) |
367 | mtlr r0 | 369 | mtlr r0 |
368 | mtmsrd r6 | 370 | MTMSRD(r6) |
369 | isync | 371 | isync |
370 | mr r3,r9 | 372 | mr r3,r9 |
371 | addi r1,r1,STKFRM | 373 | addi r1,r1,STKFRM |
@@ -373,3 +375,5 @@ _GLOBAL(do_stxvd2x) | |||
373 | extab 2b,3b | 375 | extab 2b,3b |
374 | 376 | ||
375 | #endif /* CONFIG_VSX */ | 377 | #endif /* CONFIG_VSX */ |
378 | |||
379 | #endif /* CONFIG_PPC_FPU */ | ||
diff --git a/arch/powerpc/lib/locks.c b/arch/powerpc/lib/locks.c index 58e14fba11b1..9b8182e82166 100644 --- a/arch/powerpc/lib/locks.c +++ b/arch/powerpc/lib/locks.c | |||
@@ -34,7 +34,7 @@ void __spin_yield(arch_spinlock_t *lock) | |||
34 | return; | 34 | return; |
35 | holder_cpu = lock_value & 0xffff; | 35 | holder_cpu = lock_value & 0xffff; |
36 | BUG_ON(holder_cpu >= NR_CPUS); | 36 | BUG_ON(holder_cpu >= NR_CPUS); |
37 | yield_count = lppaca[holder_cpu].yield_count; | 37 | yield_count = lppaca_of(holder_cpu).yield_count; |
38 | if ((yield_count & 1) == 0) | 38 | if ((yield_count & 1) == 0) |
39 | return; /* virtual cpu is currently running */ | 39 | return; /* virtual cpu is currently running */ |
40 | rmb(); | 40 | rmb(); |
@@ -65,7 +65,7 @@ void __rw_yield(arch_rwlock_t *rw) | |||
65 | return; /* no write lock at present */ | 65 | return; /* no write lock at present */ |
66 | holder_cpu = lock_value & 0xffff; | 66 | holder_cpu = lock_value & 0xffff; |
67 | BUG_ON(holder_cpu >= NR_CPUS); | 67 | BUG_ON(holder_cpu >= NR_CPUS); |
68 | yield_count = lppaca[holder_cpu].yield_count; | 68 | yield_count = lppaca_of(holder_cpu).yield_count; |
69 | if ((yield_count & 1) == 0) | 69 | if ((yield_count & 1) == 0) |
70 | return; /* virtual cpu is currently running */ | 70 | return; /* virtual cpu is currently running */ |
71 | rmb(); | 71 | rmb(); |
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c index e0a9858d537e..9a52349874ee 100644 --- a/arch/powerpc/lib/sstep.c +++ b/arch/powerpc/lib/sstep.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/kernel.h> | 11 | #include <linux/kernel.h> |
12 | #include <linux/kprobes.h> | 12 | #include <linux/kprobes.h> |
13 | #include <linux/ptrace.h> | 13 | #include <linux/ptrace.h> |
14 | #include <linux/prefetch.h> | ||
14 | #include <asm/sstep.h> | 15 | #include <asm/sstep.h> |
15 | #include <asm/processor.h> | 16 | #include <asm/processor.h> |
16 | #include <asm/uaccess.h> | 17 | #include <asm/uaccess.h> |
@@ -30,6 +31,7 @@ extern char system_call_common[]; | |||
30 | #define XER_OV 0x40000000U | 31 | #define XER_OV 0x40000000U |
31 | #define XER_CA 0x20000000U | 32 | #define XER_CA 0x20000000U |
32 | 33 | ||
34 | #ifdef CONFIG_PPC_FPU | ||
33 | /* | 35 | /* |
34 | * Functions in ldstfp.S | 36 | * Functions in ldstfp.S |
35 | */ | 37 | */ |
@@ -41,6 +43,19 @@ extern int do_lvx(int rn, unsigned long ea); | |||
41 | extern int do_stvx(int rn, unsigned long ea); | 43 | extern int do_stvx(int rn, unsigned long ea); |
42 | extern int do_lxvd2x(int rn, unsigned long ea); | 44 | extern int do_lxvd2x(int rn, unsigned long ea); |
43 | extern int do_stxvd2x(int rn, unsigned long ea); | 45 | extern int do_stxvd2x(int rn, unsigned long ea); |
46 | #endif | ||
47 | |||
48 | /* | ||
49 | * Emulate the truncation of 64 bit values in 32-bit mode. | ||
50 | */ | ||
51 | static unsigned long truncate_if_32bit(unsigned long msr, unsigned long val) | ||
52 | { | ||
53 | #ifdef __powerpc64__ | ||
54 | if ((msr & MSR_64BIT) == 0) | ||
55 | val &= 0xffffffffUL; | ||
56 | #endif | ||
57 | return val; | ||
58 | } | ||
44 | 59 | ||
45 | /* | 60 | /* |
46 | * Determine whether a conditional branch instruction would branch. | 61 | * Determine whether a conditional branch instruction would branch. |
@@ -88,11 +103,8 @@ static unsigned long __kprobes dform_ea(unsigned int instr, struct pt_regs *regs | |||
88 | if (instr & 0x04000000) /* update forms */ | 103 | if (instr & 0x04000000) /* update forms */ |
89 | regs->gpr[ra] = ea; | 104 | regs->gpr[ra] = ea; |
90 | } | 105 | } |
91 | #ifdef __powerpc64__ | 106 | |
92 | if (!(regs->msr & MSR_SF)) | 107 | return truncate_if_32bit(regs->msr, ea); |
93 | ea &= 0xffffffffUL; | ||
94 | #endif | ||
95 | return ea; | ||
96 | } | 108 | } |
97 | 109 | ||
98 | #ifdef __powerpc64__ | 110 | #ifdef __powerpc64__ |
@@ -111,9 +123,8 @@ static unsigned long __kprobes dsform_ea(unsigned int instr, struct pt_regs *reg | |||
111 | if ((instr & 3) == 1) /* update forms */ | 123 | if ((instr & 3) == 1) /* update forms */ |
112 | regs->gpr[ra] = ea; | 124 | regs->gpr[ra] = ea; |
113 | } | 125 | } |
114 | if (!(regs->msr & MSR_SF)) | 126 | |
115 | ea &= 0xffffffffUL; | 127 | return truncate_if_32bit(regs->msr, ea); |
116 | return ea; | ||
117 | } | 128 | } |
118 | #endif /* __powerpc64 */ | 129 | #endif /* __powerpc64 */ |
119 | 130 | ||
@@ -134,11 +145,8 @@ static unsigned long __kprobes xform_ea(unsigned int instr, struct pt_regs *regs | |||
134 | if (do_update) /* update forms */ | 145 | if (do_update) /* update forms */ |
135 | regs->gpr[ra] = ea; | 146 | regs->gpr[ra] = ea; |
136 | } | 147 | } |
137 | #ifdef __powerpc64__ | 148 | |
138 | if (!(regs->msr & MSR_SF)) | 149 | return truncate_if_32bit(regs->msr, ea); |
139 | ea &= 0xffffffffUL; | ||
140 | #endif | ||
141 | return ea; | ||
142 | } | 150 | } |
143 | 151 | ||
144 | /* | 152 | /* |
@@ -290,6 +298,7 @@ static int __kprobes write_mem(unsigned long val, unsigned long ea, int nb, | |||
290 | return write_mem_unaligned(val, ea, nb, regs); | 298 | return write_mem_unaligned(val, ea, nb, regs); |
291 | } | 299 | } |
292 | 300 | ||
301 | #ifdef CONFIG_PPC_FPU | ||
293 | /* | 302 | /* |
294 | * Check the address and alignment, and call func to do the actual | 303 | * Check the address and alignment, and call func to do the actual |
295 | * load or store. | 304 | * load or store. |
@@ -351,6 +360,7 @@ static int __kprobes do_fp_store(int rn, int (*func)(int, unsigned long), | |||
351 | } | 360 | } |
352 | return err; | 361 | return err; |
353 | } | 362 | } |
363 | #endif | ||
354 | 364 | ||
355 | #ifdef CONFIG_ALTIVEC | 365 | #ifdef CONFIG_ALTIVEC |
356 | /* For Altivec/VMX, no need to worry about alignment */ | 366 | /* For Altivec/VMX, no need to worry about alignment */ |
@@ -462,7 +472,7 @@ static void __kprobes set_cr0(struct pt_regs *regs, int rd) | |||
462 | 472 | ||
463 | regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); | 473 | regs->ccr = (regs->ccr & 0x0fffffff) | ((regs->xer >> 3) & 0x10000000); |
464 | #ifdef __powerpc64__ | 474 | #ifdef __powerpc64__ |
465 | if (!(regs->msr & MSR_SF)) | 475 | if (!(regs->msr & MSR_64BIT)) |
466 | val = (int) val; | 476 | val = (int) val; |
467 | #endif | 477 | #endif |
468 | if (val < 0) | 478 | if (val < 0) |
@@ -483,7 +493,7 @@ static void __kprobes add_with_carry(struct pt_regs *regs, int rd, | |||
483 | ++val; | 493 | ++val; |
484 | regs->gpr[rd] = val; | 494 | regs->gpr[rd] = val; |
485 | #ifdef __powerpc64__ | 495 | #ifdef __powerpc64__ |
486 | if (!(regs->msr & MSR_SF)) { | 496 | if (!(regs->msr & MSR_64BIT)) { |
487 | val = (unsigned int) val; | 497 | val = (unsigned int) val; |
488 | val1 = (unsigned int) val1; | 498 | val1 = (unsigned int) val1; |
489 | } | 499 | } |
@@ -566,8 +576,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
566 | if ((instr & 2) == 0) | 576 | if ((instr & 2) == 0) |
567 | imm += regs->nip; | 577 | imm += regs->nip; |
568 | regs->nip += 4; | 578 | regs->nip += 4; |
569 | if ((regs->msr & MSR_SF) == 0) | 579 | regs->nip = truncate_if_32bit(regs->msr, regs->nip); |
570 | regs->nip &= 0xffffffffUL; | ||
571 | if (instr & 1) | 580 | if (instr & 1) |
572 | regs->link = regs->nip; | 581 | regs->link = regs->nip; |
573 | if (branch_taken(instr, regs)) | 582 | if (branch_taken(instr, regs)) |
@@ -600,13 +609,9 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
600 | imm -= 0x04000000; | 609 | imm -= 0x04000000; |
601 | if ((instr & 2) == 0) | 610 | if ((instr & 2) == 0) |
602 | imm += regs->nip; | 611 | imm += regs->nip; |
603 | if (instr & 1) { | 612 | if (instr & 1) |
604 | regs->link = regs->nip + 4; | 613 | regs->link = truncate_if_32bit(regs->msr, regs->nip + 4); |
605 | if ((regs->msr & MSR_SF) == 0) | 614 | imm = truncate_if_32bit(regs->msr, imm); |
606 | regs->link &= 0xffffffffUL; | ||
607 | } | ||
608 | if ((regs->msr & MSR_SF) == 0) | ||
609 | imm &= 0xffffffffUL; | ||
610 | regs->nip = imm; | 615 | regs->nip = imm; |
611 | return 1; | 616 | return 1; |
612 | case 19: | 617 | case 19: |
@@ -614,11 +619,8 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
614 | case 16: /* bclr */ | 619 | case 16: /* bclr */ |
615 | case 528: /* bcctr */ | 620 | case 528: /* bcctr */ |
616 | imm = (instr & 0x400)? regs->ctr: regs->link; | 621 | imm = (instr & 0x400)? regs->ctr: regs->link; |
617 | regs->nip += 4; | 622 | regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); |
618 | if ((regs->msr & MSR_SF) == 0) { | 623 | imm = truncate_if_32bit(regs->msr, imm); |
619 | regs->nip &= 0xffffffffUL; | ||
620 | imm &= 0xffffffffUL; | ||
621 | } | ||
622 | if (instr & 1) | 624 | if (instr & 1) |
623 | regs->link = regs->nip; | 625 | regs->link = regs->nip; |
624 | if (branch_taken(instr, regs)) | 626 | if (branch_taken(instr, regs)) |
@@ -1393,6 +1395,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1393 | regs->gpr[rd] = byterev_4(val); | 1395 | regs->gpr[rd] = byterev_4(val); |
1394 | goto ldst_done; | 1396 | goto ldst_done; |
1395 | 1397 | ||
1398 | #ifdef CONFIG_PPC_CPU | ||
1396 | case 535: /* lfsx */ | 1399 | case 535: /* lfsx */ |
1397 | case 567: /* lfsux */ | 1400 | case 567: /* lfsux */ |
1398 | if (!(regs->msr & MSR_FP)) | 1401 | if (!(regs->msr & MSR_FP)) |
@@ -1424,6 +1427,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1424 | ea = xform_ea(instr, regs, u); | 1427 | ea = xform_ea(instr, regs, u); |
1425 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | 1428 | err = do_fp_store(rd, do_stfd, ea, 8, regs); |
1426 | goto ldst_done; | 1429 | goto ldst_done; |
1430 | #endif | ||
1427 | 1431 | ||
1428 | #ifdef __powerpc64__ | 1432 | #ifdef __powerpc64__ |
1429 | case 660: /* stdbrx */ | 1433 | case 660: /* stdbrx */ |
@@ -1534,6 +1538,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1534 | } while (++rd < 32); | 1538 | } while (++rd < 32); |
1535 | goto instr_done; | 1539 | goto instr_done; |
1536 | 1540 | ||
1541 | #ifdef CONFIG_PPC_FPU | ||
1537 | case 48: /* lfs */ | 1542 | case 48: /* lfs */ |
1538 | case 49: /* lfsu */ | 1543 | case 49: /* lfsu */ |
1539 | if (!(regs->msr & MSR_FP)) | 1544 | if (!(regs->msr & MSR_FP)) |
@@ -1565,6 +1570,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1565 | ea = dform_ea(instr, regs); | 1570 | ea = dform_ea(instr, regs); |
1566 | err = do_fp_store(rd, do_stfd, ea, 8, regs); | 1571 | err = do_fp_store(rd, do_stfd, ea, 8, regs); |
1567 | goto ldst_done; | 1572 | goto ldst_done; |
1573 | #endif | ||
1568 | 1574 | ||
1569 | #ifdef __powerpc64__ | 1575 | #ifdef __powerpc64__ |
1570 | case 58: /* ld[u], lwa */ | 1576 | case 58: /* ld[u], lwa */ |
@@ -1608,11 +1614,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr) | |||
1608 | return 0; /* invoke DSI if -EFAULT? */ | 1614 | return 0; /* invoke DSI if -EFAULT? */ |
1609 | } | 1615 | } |
1610 | instr_done: | 1616 | instr_done: |
1611 | regs->nip += 4; | 1617 | regs->nip = truncate_if_32bit(regs->msr, regs->nip + 4); |
1612 | #ifdef __powerpc64__ | ||
1613 | if ((regs->msr & MSR_SF) == 0) | ||
1614 | regs->nip &= 0xffffffffUL; | ||
1615 | #endif | ||
1616 | return 1; | 1618 | return 1; |
1617 | 1619 | ||
1618 | logical_done: | 1620 | logical_done: |
diff --git a/arch/powerpc/math-emu/Makefile b/arch/powerpc/math-emu/Makefile index 0c16ab947f1f..7d1dba0d57f9 100644 --- a/arch/powerpc/math-emu/Makefile +++ b/arch/powerpc/math-emu/Makefile | |||
@@ -15,4 +15,4 @@ obj-$(CONFIG_SPE) += math_efp.o | |||
15 | CFLAGS_fabs.o = -fno-builtin-fabs | 15 | CFLAGS_fabs.o = -fno-builtin-fabs |
16 | CFLAGS_math.o = -fno-builtin-fabs | 16 | CFLAGS_math.o = -fno-builtin-fabs |
17 | 17 | ||
18 | EXTRA_CFLAGS = -I. -Iinclude/math-emu -w | 18 | ccflags-y = -I. -Iinclude/math-emu -w |
diff --git a/arch/powerpc/math-emu/math_efp.c b/arch/powerpc/math-emu/math_efp.c index 41f4ef30e480..62279200d965 100644 --- a/arch/powerpc/math-emu/math_efp.c +++ b/arch/powerpc/math-emu/math_efp.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/powerpc/math-emu/math_efp.c | 2 | * arch/powerpc/math-emu/math_efp.c |
3 | * | 3 | * |
4 | * Copyright (C) 2006-2008 Freescale Semiconductor, Inc. All rights reserved. | 4 | * Copyright (C) 2006-2008, 2010 Freescale Semiconductor, Inc. |
5 | * | 5 | * |
6 | * Author: Ebony Zhu, <ebony.zhu@freescale.com> | 6 | * Author: Ebony Zhu, <ebony.zhu@freescale.com> |
7 | * Yu Liu, <yu.liu@freescale.com> | 7 | * Yu Liu, <yu.liu@freescale.com> |
@@ -104,6 +104,8 @@ | |||
104 | #define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \ | 104 | #define FP_EX_MASK (FP_EX_INEXACT | FP_EX_INVALID | FP_EX_DIVZERO | \ |
105 | FP_EX_UNDERFLOW | FP_EX_OVERFLOW) | 105 | FP_EX_UNDERFLOW | FP_EX_OVERFLOW) |
106 | 106 | ||
107 | static int have_e500_cpu_a005_erratum; | ||
108 | |||
107 | union dw_union { | 109 | union dw_union { |
108 | u64 dp[1]; | 110 | u64 dp[1]; |
109 | u32 wp[2]; | 111 | u32 wp[2]; |
@@ -320,7 +322,8 @@ int do_spe_mathemu(struct pt_regs *regs) | |||
320 | } else { | 322 | } else { |
321 | _FP_ROUND_ZERO(1, SB); | 323 | _FP_ROUND_ZERO(1, SB); |
322 | } | 324 | } |
323 | FP_TO_INT_S(vc.wp[1], SB, 32, ((func & 0x3) != 0)); | 325 | FP_TO_INT_S(vc.wp[1], SB, 32, |
326 | (((func & 0x3) != 0) || SB_s)); | ||
324 | goto update_regs; | 327 | goto update_regs; |
325 | 328 | ||
326 | default: | 329 | default: |
@@ -458,7 +461,8 @@ cmp_s: | |||
458 | } else { | 461 | } else { |
459 | _FP_ROUND_ZERO(2, DB); | 462 | _FP_ROUND_ZERO(2, DB); |
460 | } | 463 | } |
461 | FP_TO_INT_D(vc.wp[1], DB, 32, ((func & 0x3) != 0)); | 464 | FP_TO_INT_D(vc.wp[1], DB, 32, |
465 | (((func & 0x3) != 0) || DB_s)); | ||
462 | goto update_regs; | 466 | goto update_regs; |
463 | 467 | ||
464 | default: | 468 | default: |
@@ -589,8 +593,10 @@ cmp_d: | |||
589 | _FP_ROUND_ZERO(1, SB0); | 593 | _FP_ROUND_ZERO(1, SB0); |
590 | _FP_ROUND_ZERO(1, SB1); | 594 | _FP_ROUND_ZERO(1, SB1); |
591 | } | 595 | } |
592 | FP_TO_INT_S(vc.wp[0], SB0, 32, ((func & 0x3) != 0)); | 596 | FP_TO_INT_S(vc.wp[0], SB0, 32, |
593 | FP_TO_INT_S(vc.wp[1], SB1, 32, ((func & 0x3) != 0)); | 597 | (((func & 0x3) != 0) || SB0_s)); |
598 | FP_TO_INT_S(vc.wp[1], SB1, 32, | ||
599 | (((func & 0x3) != 0) || SB1_s)); | ||
594 | goto update_regs; | 600 | goto update_regs; |
595 | 601 | ||
596 | default: | 602 | default: |
@@ -652,6 +658,15 @@ update_regs: | |||
652 | return 0; | 658 | return 0; |
653 | 659 | ||
654 | illegal: | 660 | illegal: |
661 | if (have_e500_cpu_a005_erratum) { | ||
662 | /* according to e500 cpu a005 erratum, reissue efp inst */ | ||
663 | regs->nip -= 4; | ||
664 | #ifdef DEBUG | ||
665 | printk(KERN_DEBUG "re-issue efp inst: %08lx\n", speinsn); | ||
666 | #endif | ||
667 | return 0; | ||
668 | } | ||
669 | |||
655 | printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn); | 670 | printk(KERN_ERR "\nOoops! IEEE-754 compliance handler encountered un-supported instruction.\ninst code: %08lx\n", speinsn); |
656 | return -ENOSYS; | 671 | return -ENOSYS; |
657 | } | 672 | } |
@@ -718,3 +733,43 @@ int speround_handler(struct pt_regs *regs) | |||
718 | 733 | ||
719 | return 0; | 734 | return 0; |
720 | } | 735 | } |
736 | |||
737 | int __init spe_mathemu_init(void) | ||
738 | { | ||
739 | u32 pvr, maj, min; | ||
740 | |||
741 | pvr = mfspr(SPRN_PVR); | ||
742 | |||
743 | if ((PVR_VER(pvr) == PVR_VER_E500V1) || | ||
744 | (PVR_VER(pvr) == PVR_VER_E500V2)) { | ||
745 | maj = PVR_MAJ(pvr); | ||
746 | min = PVR_MIN(pvr); | ||
747 | |||
748 | /* | ||
749 | * E500 revision below 1.1, 2.3, 3.1, 4.1, 5.1 | ||
750 | * need cpu a005 errata workaround | ||
751 | */ | ||
752 | switch (maj) { | ||
753 | case 1: | ||
754 | if (min < 1) | ||
755 | have_e500_cpu_a005_erratum = 1; | ||
756 | break; | ||
757 | case 2: | ||
758 | if (min < 3) | ||
759 | have_e500_cpu_a005_erratum = 1; | ||
760 | break; | ||
761 | case 3: | ||
762 | case 4: | ||
763 | case 5: | ||
764 | if (min < 1) | ||
765 | have_e500_cpu_a005_erratum = 1; | ||
766 | break; | ||
767 | default: | ||
768 | break; | ||
769 | } | ||
770 | } | ||
771 | |||
772 | return 0; | ||
773 | } | ||
774 | |||
775 | module_init(spe_mathemu_init); | ||
diff --git a/arch/powerpc/mm/40x_mmu.c b/arch/powerpc/mm/40x_mmu.c index 1dc2fa5ce1bd..5810967511d4 100644 --- a/arch/powerpc/mm/40x_mmu.c +++ b/arch/powerpc/mm/40x_mmu.c | |||
@@ -35,6 +35,7 @@ | |||
35 | #include <linux/init.h> | 35 | #include <linux/init.h> |
36 | #include <linux/delay.h> | 36 | #include <linux/delay.h> |
37 | #include <linux/highmem.h> | 37 | #include <linux/highmem.h> |
38 | #include <linux/memblock.h> | ||
38 | 39 | ||
39 | #include <asm/pgalloc.h> | 40 | #include <asm/pgalloc.h> |
40 | #include <asm/prom.h> | 41 | #include <asm/prom.h> |
@@ -47,6 +48,7 @@ | |||
47 | #include <asm/bootx.h> | 48 | #include <asm/bootx.h> |
48 | #include <asm/machdep.h> | 49 | #include <asm/machdep.h> |
49 | #include <asm/setup.h> | 50 | #include <asm/setup.h> |
51 | |||
50 | #include "mmu_decl.h" | 52 | #include "mmu_decl.h" |
51 | 53 | ||
52 | extern int __map_without_ltlbs; | 54 | extern int __map_without_ltlbs; |
@@ -139,8 +141,19 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
139 | * coverage with normal-sized pages (or other reasons) do not | 141 | * coverage with normal-sized pages (or other reasons) do not |
140 | * attempt to allocate outside the allowed range. | 142 | * attempt to allocate outside the allowed range. |
141 | */ | 143 | */ |
142 | 144 | memblock_set_current_limit(mapped); | |
143 | __initial_memory_limit_addr = memstart_addr + mapped; | ||
144 | 145 | ||
145 | return mapped; | 146 | return mapped; |
146 | } | 147 | } |
148 | |||
149 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
150 | phys_addr_t first_memblock_size) | ||
151 | { | ||
152 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
153 | * physical on those processors | ||
154 | */ | ||
155 | BUG_ON(first_memblock_base != 0); | ||
156 | |||
157 | /* 40x can only access 16MB at the moment (see head_40x.S) */ | ||
158 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
159 | } | ||
diff --git a/arch/powerpc/mm/44x_mmu.c b/arch/powerpc/mm/44x_mmu.c index d8c6efb32bc6..024acab588fd 100644 --- a/arch/powerpc/mm/44x_mmu.c +++ b/arch/powerpc/mm/44x_mmu.c | |||
@@ -24,6 +24,8 @@ | |||
24 | */ | 24 | */ |
25 | 25 | ||
26 | #include <linux/init.h> | 26 | #include <linux/init.h> |
27 | #include <linux/memblock.h> | ||
28 | |||
27 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
28 | #include <asm/system.h> | 30 | #include <asm/system.h> |
29 | #include <asm/page.h> | 31 | #include <asm/page.h> |
@@ -213,6 +215,18 @@ unsigned long __init mmu_mapin_ram(unsigned long top) | |||
213 | return total_lowmem; | 215 | return total_lowmem; |
214 | } | 216 | } |
215 | 217 | ||
218 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
219 | phys_addr_t first_memblock_size) | ||
220 | { | ||
221 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
222 | * physical on those processors | ||
223 | */ | ||
224 | BUG_ON(first_memblock_base != 0); | ||
225 | |||
226 | /* 44x has a 256M TLB entry pinned at boot */ | ||
227 | memblock_set_current_limit(min_t(u64, first_memblock_size, PPC_PIN_SIZE)); | ||
228 | } | ||
229 | |||
216 | #ifdef CONFIG_SMP | 230 | #ifdef CONFIG_SMP |
217 | void __cpuinit mmu_init_secondary(int cpu) | 231 | void __cpuinit mmu_init_secondary(int cpu) |
218 | { | 232 | { |
diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index ce68708bbad5..bdca46e08382 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile | |||
@@ -4,9 +4,7 @@ | |||
4 | 4 | ||
5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 5 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
6 | 6 | ||
7 | ifeq ($(CONFIG_PPC64),y) | 7 | ccflags-$(CONFIG_PPC64) := -mno-minimal-toc |
8 | EXTRA_CFLAGS += -mno-minimal-toc | ||
9 | endif | ||
10 | 8 | ||
11 | obj-y := fault.o mem.o pgtable.o gup.o \ | 9 | obj-y := fault.o mem.o pgtable.o gup.o \ |
12 | init_$(CONFIG_WORD_SIZE).o \ | 10 | init_$(CONFIG_WORD_SIZE).o \ |
@@ -25,7 +23,7 @@ obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ | |||
25 | mmu_context_hash$(CONFIG_WORD_SIZE).o | 23 | mmu_context_hash$(CONFIG_WORD_SIZE).o |
26 | obj-$(CONFIG_40x) += 40x_mmu.o | 24 | obj-$(CONFIG_40x) += 40x_mmu.o |
27 | obj-$(CONFIG_44x) += 44x_mmu.o | 25 | obj-$(CONFIG_44x) += 44x_mmu.o |
28 | obj-$(CONFIG_FSL_BOOKE) += fsl_booke_mmu.o | 26 | obj-$(CONFIG_PPC_FSL_BOOK3E) += fsl_booke_mmu.o |
29 | obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o | 27 | obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o |
30 | obj-$(CONFIG_PPC_MM_SLICES) += slice.o | 28 | obj-$(CONFIG_PPC_MM_SLICES) += slice.o |
31 | ifeq ($(CONFIG_HUGETLB_PAGE),y) | 29 | ifeq ($(CONFIG_HUGETLB_PAGE),y) |
diff --git a/arch/powerpc/mm/dma-noncoherent.c b/arch/powerpc/mm/dma-noncoherent.c index 757c0bed9a91..b42f76c4948d 100644 --- a/arch/powerpc/mm/dma-noncoherent.c +++ b/arch/powerpc/mm/dma-noncoherent.c | |||
@@ -399,3 +399,23 @@ void __dma_sync_page(struct page *page, unsigned long offset, | |||
399 | #endif | 399 | #endif |
400 | } | 400 | } |
401 | EXPORT_SYMBOL(__dma_sync_page); | 401 | EXPORT_SYMBOL(__dma_sync_page); |
402 | |||
403 | /* | ||
404 | * Return the PFN for a given cpu virtual address returned by | ||
405 | * __dma_alloc_coherent. This is used by dma_mmap_coherent() | ||
406 | */ | ||
407 | unsigned long __dma_get_coherent_pfn(unsigned long cpu_addr) | ||
408 | { | ||
409 | /* This should always be populated, so we don't test every | ||
410 | * level. If that fails, we'll have a nice crash which | ||
411 | * will be as good as a BUG_ON() | ||
412 | */ | ||
413 | pgd_t *pgd = pgd_offset_k(cpu_addr); | ||
414 | pud_t *pud = pud_offset(pgd, cpu_addr); | ||
415 | pmd_t *pmd = pmd_offset(pud, cpu_addr); | ||
416 | pte_t *ptep = pte_offset_kernel(pmd, cpu_addr); | ||
417 | |||
418 | if (pte_none(*ptep) || !pte_present(*ptep)) | ||
419 | return 0; | ||
420 | return pte_pfn(*ptep); | ||
421 | } | ||
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 1bd712c33ce2..ad35f66c69e8 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -30,6 +30,8 @@ | |||
30 | #include <linux/kprobes.h> | 30 | #include <linux/kprobes.h> |
31 | #include <linux/kdebug.h> | 31 | #include <linux/kdebug.h> |
32 | #include <linux/perf_event.h> | 32 | #include <linux/perf_event.h> |
33 | #include <linux/magic.h> | ||
34 | #include <linux/ratelimit.h> | ||
33 | 35 | ||
34 | #include <asm/firmware.h> | 36 | #include <asm/firmware.h> |
35 | #include <asm/page.h> | 37 | #include <asm/page.h> |
@@ -345,11 +347,10 @@ bad_area_nosemaphore: | |||
345 | return 0; | 347 | return 0; |
346 | } | 348 | } |
347 | 349 | ||
348 | if (is_exec && (error_code & DSISR_PROTFAULT) | 350 | if (is_exec && (error_code & DSISR_PROTFAULT)) |
349 | && printk_ratelimit()) | 351 | printk_ratelimited(KERN_CRIT "kernel tried to execute NX-protected" |
350 | printk(KERN_CRIT "kernel tried to execute NX-protected" | 352 | " page (%lx) - exploit attempt? (uid: %d)\n", |
351 | " page (%lx) - exploit attempt? (uid: %d)\n", | 353 | address, current_uid()); |
352 | address, current_uid()); | ||
353 | 354 | ||
354 | return SIGSEGV; | 355 | return SIGSEGV; |
355 | 356 | ||
@@ -385,6 +386,7 @@ do_sigbus: | |||
385 | void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | 386 | void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) |
386 | { | 387 | { |
387 | const struct exception_table_entry *entry; | 388 | const struct exception_table_entry *entry; |
389 | unsigned long *stackend; | ||
388 | 390 | ||
389 | /* Are we prepared to handle this fault? */ | 391 | /* Are we prepared to handle this fault? */ |
390 | if ((entry = search_exception_tables(regs->nip)) != NULL) { | 392 | if ((entry = search_exception_tables(regs->nip)) != NULL) { |
@@ -413,5 +415,9 @@ void bad_page_fault(struct pt_regs *regs, unsigned long address, int sig) | |||
413 | printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", | 415 | printk(KERN_ALERT "Faulting instruction address: 0x%08lx\n", |
414 | regs->nip); | 416 | regs->nip); |
415 | 417 | ||
418 | stackend = end_of_stack(current); | ||
419 | if (current != &init_task && *stackend != STACK_END_MAGIC) | ||
420 | printk(KERN_ALERT "Thread overran stack, or stack corrupted\n"); | ||
421 | |||
416 | die("Kernel access of bad area", regs, sig); | 422 | die("Kernel access of bad area", regs, sig); |
417 | } | 423 | } |
diff --git a/arch/powerpc/mm/fsl_booke_mmu.c b/arch/powerpc/mm/fsl_booke_mmu.c index 4b66a1ece6d8..f7802c8bba0a 100644 --- a/arch/powerpc/mm/fsl_booke_mmu.c +++ b/arch/powerpc/mm/fsl_booke_mmu.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/init.h> | 40 | #include <linux/init.h> |
41 | #include <linux/delay.h> | 41 | #include <linux/delay.h> |
42 | #include <linux/highmem.h> | 42 | #include <linux/highmem.h> |
43 | #include <linux/memblock.h> | ||
43 | 44 | ||
44 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
45 | #include <asm/prom.h> | 46 | #include <asm/prom.h> |
@@ -56,11 +57,6 @@ | |||
56 | 57 | ||
57 | unsigned int tlbcam_index; | 58 | unsigned int tlbcam_index; |
58 | 59 | ||
59 | |||
60 | #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) | ||
61 | #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" | ||
62 | #endif | ||
63 | |||
64 | #define NUM_TLBCAMS (64) | 60 | #define NUM_TLBCAMS (64) |
65 | struct tlbcam TLBCAM[NUM_TLBCAMS]; | 61 | struct tlbcam TLBCAM[NUM_TLBCAMS]; |
66 | 62 | ||
@@ -137,7 +133,8 @@ static void settlbcam(int index, unsigned long virt, phys_addr_t phys, | |||
137 | if (mmu_has_feature(MMU_FTR_BIG_PHYS)) | 133 | if (mmu_has_feature(MMU_FTR_BIG_PHYS)) |
138 | TLBCAM[index].MAS7 = (u64)phys >> 32; | 134 | TLBCAM[index].MAS7 = (u64)phys >> 32; |
139 | 135 | ||
140 | if (flags & _PAGE_USER) { | 136 | /* Below is unlikely -- only for large user pages or similar */ |
137 | if (pte_user(flags)) { | ||
141 | TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; | 138 | TLBCAM[index].MAS3 |= MAS3_UX | MAS3_UR; |
142 | TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); | 139 | TLBCAM[index].MAS3 |= ((flags & _PAGE_RW) ? MAS3_UW : 0); |
143 | } | 140 | } |
@@ -184,6 +181,12 @@ unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx) | |||
184 | return amount_mapped; | 181 | return amount_mapped; |
185 | } | 182 | } |
186 | 183 | ||
184 | #ifdef CONFIG_PPC32 | ||
185 | |||
186 | #if defined(CONFIG_LOWMEM_CAM_NUM_BOOL) && (CONFIG_LOWMEM_CAM_NUM >= NUM_TLBCAMS) | ||
187 | #error "LOWMEM_CAM_NUM must be less than NUM_TLBCAMS" | ||
188 | #endif | ||
189 | |||
187 | unsigned long __init mmu_mapin_ram(unsigned long top) | 190 | unsigned long __init mmu_mapin_ram(unsigned long top) |
188 | { | 191 | { |
189 | return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; | 192 | return tlbcam_addrs[tlbcam_index - 1].limit - PAGE_OFFSET + 1; |
@@ -213,5 +216,15 @@ void __init adjust_total_lowmem(void) | |||
213 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, | 216 | pr_cont("%lu Mb, residual: %dMb\n", tlbcam_sz(tlbcam_index - 1) >> 20, |
214 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); | 217 | (unsigned int)((total_lowmem - __max_low_memory) >> 20)); |
215 | 218 | ||
216 | __initial_memory_limit_addr = memstart_addr + __max_low_memory; | 219 | memblock_set_current_limit(memstart_addr + __max_low_memory); |
217 | } | 220 | } |
221 | |||
222 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
223 | phys_addr_t first_memblock_size) | ||
224 | { | ||
225 | phys_addr_t limit = first_memblock_base + first_memblock_size; | ||
226 | |||
227 | /* 64M mapped initially according to head_fsl_booke.S */ | ||
228 | memblock_set_current_limit(min_t(u64, limit, 0x04000000)); | ||
229 | } | ||
230 | #endif | ||
diff --git a/arch/powerpc/mm/gup.c b/arch/powerpc/mm/gup.c index d7efdbf640c7..fec13200868f 100644 --- a/arch/powerpc/mm/gup.c +++ b/arch/powerpc/mm/gup.c | |||
@@ -16,6 +16,16 @@ | |||
16 | 16 | ||
17 | #ifdef __HAVE_ARCH_PTE_SPECIAL | 17 | #ifdef __HAVE_ARCH_PTE_SPECIAL |
18 | 18 | ||
19 | static inline void get_huge_page_tail(struct page *page) | ||
20 | { | ||
21 | /* | ||
22 | * __split_huge_page_refcount() cannot run | ||
23 | * from under us. | ||
24 | */ | ||
25 | VM_BUG_ON(atomic_read(&page->_count) < 0); | ||
26 | atomic_inc(&page->_count); | ||
27 | } | ||
28 | |||
19 | /* | 29 | /* |
20 | * The performance critical leaf functions are made noinline otherwise gcc | 30 | * The performance critical leaf functions are made noinline otherwise gcc |
21 | * inlines everything into a single function which results in too much | 31 | * inlines everything into a single function which results in too much |
@@ -47,6 +57,8 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr, | |||
47 | put_page(page); | 57 | put_page(page); |
48 | return 0; | 58 | return 0; |
49 | } | 59 | } |
60 | if (PageTail(page)) | ||
61 | get_huge_page_tail(page); | ||
50 | pages[*nr] = page; | 62 | pages[*nr] = page; |
51 | (*nr)++; | 63 | (*nr)++; |
52 | 64 | ||
diff --git a/arch/powerpc/mm/hash_low_64.S b/arch/powerpc/mm/hash_low_64.S index 3079f6b44cf5..a242b5d7cbe4 100644 --- a/arch/powerpc/mm/hash_low_64.S +++ b/arch/powerpc/mm/hash_low_64.S | |||
@@ -118,7 +118,7 @@ _GLOBAL(__hash_page_4K) | |||
118 | BEGIN_FTR_SECTION | 118 | BEGIN_FTR_SECTION |
119 | cmpdi r9,0 /* check segment size */ | 119 | cmpdi r9,0 /* check segment size */ |
120 | bne 3f | 120 | bne 3f |
121 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 121 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
122 | /* Calc va and put it in r29 */ | 122 | /* Calc va and put it in r29 */ |
123 | rldicr r29,r5,28,63-28 | 123 | rldicr r29,r5,28,63-28 |
124 | rldicl r3,r3,0,36 | 124 | rldicl r3,r3,0,36 |
@@ -192,8 +192,8 @@ htab_insert_pte: | |||
192 | rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ | 192 | rldicr r3,r0,3,63-3 /* r3 = (hash & mask) << 3 */ |
193 | 193 | ||
194 | /* Call ppc_md.hpte_insert */ | 194 | /* Call ppc_md.hpte_insert */ |
195 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 195 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
196 | mr r4,r29 /* Retreive va */ | 196 | mr r4,r29 /* Retrieve va */ |
197 | li r7,0 /* !bolted, !secondary */ | 197 | li r7,0 /* !bolted, !secondary */ |
198 | li r8,MMU_PAGE_4K /* page size */ | 198 | li r8,MMU_PAGE_4K /* page size */ |
199 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 199 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -215,8 +215,8 @@ _GLOBAL(htab_call_hpte_insert1) | |||
215 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 215 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
216 | 216 | ||
217 | /* Call ppc_md.hpte_insert */ | 217 | /* Call ppc_md.hpte_insert */ |
218 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 218 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
219 | mr r4,r29 /* Retreive va */ | 219 | mr r4,r29 /* Retrieve va */ |
220 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 220 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
221 | li r8,MMU_PAGE_4K /* page size */ | 221 | li r8,MMU_PAGE_4K /* page size */ |
222 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 222 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -401,7 +401,7 @@ _GLOBAL(__hash_page_4K) | |||
401 | BEGIN_FTR_SECTION | 401 | BEGIN_FTR_SECTION |
402 | cmpdi r9,0 /* check segment size */ | 402 | cmpdi r9,0 /* check segment size */ |
403 | bne 3f | 403 | bne 3f |
404 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 404 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
405 | /* Calc va and put it in r29 */ | 405 | /* Calc va and put it in r29 */ |
406 | rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ | 406 | rldicr r29,r5,28,63-28 /* r29 = (vsid << 28) */ |
407 | rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ | 407 | rldicl r3,r3,0,36 /* r3 = (ea & 0x0fffffff) */ |
@@ -495,8 +495,8 @@ htab_special_pfn: | |||
495 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | 495 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ |
496 | 496 | ||
497 | /* Call ppc_md.hpte_insert */ | 497 | /* Call ppc_md.hpte_insert */ |
498 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 498 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
499 | mr r4,r29 /* Retreive va */ | 499 | mr r4,r29 /* Retrieve va */ |
500 | li r7,0 /* !bolted, !secondary */ | 500 | li r7,0 /* !bolted, !secondary */ |
501 | li r8,MMU_PAGE_4K /* page size */ | 501 | li r8,MMU_PAGE_4K /* page size */ |
502 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 502 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -522,8 +522,8 @@ _GLOBAL(htab_call_hpte_insert1) | |||
522 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 522 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
523 | 523 | ||
524 | /* Call ppc_md.hpte_insert */ | 524 | /* Call ppc_md.hpte_insert */ |
525 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 525 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
526 | mr r4,r29 /* Retreive va */ | 526 | mr r4,r29 /* Retrieve va */ |
527 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 527 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
528 | li r8,MMU_PAGE_4K /* page size */ | 528 | li r8,MMU_PAGE_4K /* page size */ |
529 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 529 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -715,7 +715,7 @@ BEGIN_FTR_SECTION | |||
715 | andi. r0,r31,_PAGE_NO_CACHE | 715 | andi. r0,r31,_PAGE_NO_CACHE |
716 | /* If so, bail out and refault as a 4k page */ | 716 | /* If so, bail out and refault as a 4k page */ |
717 | bne- ht64_bail_ok | 717 | bne- ht64_bail_ok |
718 | END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | 718 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_CI_LARGE_PAGE) |
719 | /* Prepare new PTE value (turn access RW into DIRTY, then | 719 | /* Prepare new PTE value (turn access RW into DIRTY, then |
720 | * add BUSY and ACCESSED) | 720 | * add BUSY and ACCESSED) |
721 | */ | 721 | */ |
@@ -736,7 +736,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_CI_LARGE_PAGE) | |||
736 | BEGIN_FTR_SECTION | 736 | BEGIN_FTR_SECTION |
737 | cmpdi r9,0 /* check segment size */ | 737 | cmpdi r9,0 /* check segment size */ |
738 | bne 3f | 738 | bne 3f |
739 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 739 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
740 | /* Calc va and put it in r29 */ | 740 | /* Calc va and put it in r29 */ |
741 | rldicr r29,r5,28,63-28 | 741 | rldicr r29,r5,28,63-28 |
742 | rldicl r3,r3,0,36 | 742 | rldicl r3,r3,0,36 |
@@ -813,8 +813,8 @@ ht64_insert_pte: | |||
813 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ | 813 | rldicr r3,r0,3,63-3 /* r0 = (hash & mask) << 3 */ |
814 | 814 | ||
815 | /* Call ppc_md.hpte_insert */ | 815 | /* Call ppc_md.hpte_insert */ |
816 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 816 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
817 | mr r4,r29 /* Retreive va */ | 817 | mr r4,r29 /* Retrieve va */ |
818 | li r7,0 /* !bolted, !secondary */ | 818 | li r7,0 /* !bolted, !secondary */ |
819 | li r8,MMU_PAGE_64K | 819 | li r8,MMU_PAGE_64K |
820 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 820 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
@@ -836,8 +836,8 @@ _GLOBAL(ht64_call_hpte_insert1) | |||
836 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ | 836 | rldicr r3,r0,3,63-3 /* r0 = (~hash & mask) << 3 */ |
837 | 837 | ||
838 | /* Call ppc_md.hpte_insert */ | 838 | /* Call ppc_md.hpte_insert */ |
839 | ld r6,STK_PARM(r4)(r1) /* Retreive new pp bits */ | 839 | ld r6,STK_PARM(r4)(r1) /* Retrieve new pp bits */ |
840 | mr r4,r29 /* Retreive va */ | 840 | mr r4,r29 /* Retrieve va */ |
841 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ | 841 | li r7,HPTE_V_SECONDARY /* !bolted, secondary */ |
842 | li r8,MMU_PAGE_64K | 842 | li r8,MMU_PAGE_64K |
843 | ld r9,STK_PARM(r9)(r1) /* segment size */ | 843 | ld r9,STK_PARM(r9)(r1) /* segment size */ |
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c index 784a400e0781..dfd764896db0 100644 --- a/arch/powerpc/mm/hash_native_64.c +++ b/arch/powerpc/mm/hash_native_64.c | |||
@@ -50,9 +50,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize) | |||
50 | case MMU_PAGE_4K: | 50 | case MMU_PAGE_4K: |
51 | va &= ~0xffful; | 51 | va &= ~0xffful; |
52 | va |= ssize << 8; | 52 | va |= ssize << 8; |
53 | asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), | 53 | asm volatile(ASM_FTR_IFCLR("tlbie %0,0", PPC_TLBIE(%1,%0), %2) |
54 | %2) | 54 | : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) |
55 | : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) | ||
56 | : "memory"); | 55 | : "memory"); |
57 | break; | 56 | break; |
58 | default: | 57 | default: |
@@ -61,9 +60,8 @@ static inline void __tlbie(unsigned long va, int psize, int ssize) | |||
61 | va |= penc << 12; | 60 | va |= penc << 12; |
62 | va |= ssize << 8; | 61 | va |= ssize << 8; |
63 | va |= 1; /* L */ | 62 | va |= 1; /* L */ |
64 | asm volatile(ASM_MMU_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), | 63 | asm volatile(ASM_FTR_IFCLR("tlbie %0,1", PPC_TLBIE(%1,%0), %2) |
65 | %2) | 64 | : : "r" (va), "r"(0), "i" (CPU_FTR_HVMODE_206) |
66 | : : "r" (va), "r"(0), "i" (MMU_FTR_TLBIE_206) | ||
67 | : "memory"); | 65 | : "memory"); |
68 | break; | 66 | break; |
69 | } | 67 | } |
@@ -98,8 +96,8 @@ static inline void __tlbiel(unsigned long va, int psize, int ssize) | |||
98 | 96 | ||
99 | static inline void tlbie(unsigned long va, int psize, int ssize, int local) | 97 | static inline void tlbie(unsigned long va, int psize, int ssize, int local) |
100 | { | 98 | { |
101 | unsigned int use_local = local && cpu_has_feature(CPU_FTR_TLBIEL); | 99 | unsigned int use_local = local && mmu_has_feature(MMU_FTR_TLBIEL); |
102 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); | 100 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
103 | 101 | ||
104 | if (use_local) | 102 | if (use_local) |
105 | use_local = mmu_psize_defs[psize].tlbiel; | 103 | use_local = mmu_psize_defs[psize].tlbiel; |
@@ -503,7 +501,7 @@ static void native_flush_hash_range(unsigned long number, int local) | |||
503 | } pte_iterate_hashed_end(); | 501 | } pte_iterate_hashed_end(); |
504 | } | 502 | } |
505 | 503 | ||
506 | if (cpu_has_feature(CPU_FTR_TLBIEL) && | 504 | if (mmu_has_feature(MMU_FTR_TLBIEL) && |
507 | mmu_psize_defs[psize].tlbiel && local) { | 505 | mmu_psize_defs[psize].tlbiel && local) { |
508 | asm volatile("ptesync":::"memory"); | 506 | asm volatile("ptesync":::"memory"); |
509 | for (i = 0; i < number; i++) { | 507 | for (i = 0; i < number; i++) { |
@@ -517,7 +515,7 @@ static void native_flush_hash_range(unsigned long number, int local) | |||
517 | } | 515 | } |
518 | asm volatile("ptesync":::"memory"); | 516 | asm volatile("ptesync":::"memory"); |
519 | } else { | 517 | } else { |
520 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); | 518 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
521 | 519 | ||
522 | if (lock_tlbie) | 520 | if (lock_tlbie) |
523 | raw_spin_lock(&native_tlbie_lock); | 521 | raw_spin_lock(&native_tlbie_lock); |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 09dffe6efa46..26b2872b3d00 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -53,6 +53,7 @@ | |||
53 | #include <asm/sections.h> | 53 | #include <asm/sections.h> |
54 | #include <asm/spu.h> | 54 | #include <asm/spu.h> |
55 | #include <asm/udbg.h> | 55 | #include <asm/udbg.h> |
56 | #include <asm/code-patching.h> | ||
56 | 57 | ||
57 | #ifdef DEBUG | 58 | #ifdef DEBUG |
58 | #define DBG(fmt...) udbg_printf(fmt) | 59 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -258,11 +259,11 @@ static int __init htab_dt_scan_seg_sizes(unsigned long node, | |||
258 | for (; size >= 4; size -= 4, ++prop) { | 259 | for (; size >= 4; size -= 4, ++prop) { |
259 | if (prop[0] == 40) { | 260 | if (prop[0] == 40) { |
260 | DBG("1T segment support detected\n"); | 261 | DBG("1T segment support detected\n"); |
261 | cur_cpu_spec->cpu_features |= CPU_FTR_1T_SEGMENT; | 262 | cur_cpu_spec->mmu_features |= MMU_FTR_1T_SEGMENT; |
262 | return 1; | 263 | return 1; |
263 | } | 264 | } |
264 | } | 265 | } |
265 | cur_cpu_spec->cpu_features &= ~CPU_FTR_NO_SLBIE_B; | 266 | cur_cpu_spec->mmu_features &= ~MMU_FTR_NO_SLBIE_B; |
266 | return 0; | 267 | return 0; |
267 | } | 268 | } |
268 | 269 | ||
@@ -288,7 +289,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, | |||
288 | if (prop != NULL) { | 289 | if (prop != NULL) { |
289 | DBG("Page sizes from device-tree:\n"); | 290 | DBG("Page sizes from device-tree:\n"); |
290 | size /= 4; | 291 | size /= 4; |
291 | cur_cpu_spec->cpu_features &= ~(CPU_FTR_16M_PAGE); | 292 | cur_cpu_spec->mmu_features &= ~(MMU_FTR_16M_PAGE); |
292 | while(size > 0) { | 293 | while(size > 0) { |
293 | unsigned int shift = prop[0]; | 294 | unsigned int shift = prop[0]; |
294 | unsigned int slbenc = prop[1]; | 295 | unsigned int slbenc = prop[1]; |
@@ -316,7 +317,7 @@ static int __init htab_dt_scan_page_sizes(unsigned long node, | |||
316 | break; | 317 | break; |
317 | case 0x18: | 318 | case 0x18: |
318 | idx = MMU_PAGE_16M; | 319 | idx = MMU_PAGE_16M; |
319 | cur_cpu_spec->cpu_features |= CPU_FTR_16M_PAGE; | 320 | cur_cpu_spec->mmu_features |= MMU_FTR_16M_PAGE; |
320 | break; | 321 | break; |
321 | case 0x22: | 322 | case 0x22: |
322 | idx = MMU_PAGE_16G; | 323 | idx = MMU_PAGE_16G; |
@@ -411,7 +412,7 @@ static void __init htab_init_page_sizes(void) | |||
411 | * Not in the device-tree, let's fallback on known size | 412 | * Not in the device-tree, let's fallback on known size |
412 | * list for 16M capable GP & GR | 413 | * list for 16M capable GP & GR |
413 | */ | 414 | */ |
414 | if (cpu_has_feature(CPU_FTR_16M_PAGE)) | 415 | if (mmu_has_feature(MMU_FTR_16M_PAGE)) |
415 | memcpy(mmu_psize_defs, mmu_psize_defaults_gp, | 416 | memcpy(mmu_psize_defs, mmu_psize_defaults_gp, |
416 | sizeof(mmu_psize_defaults_gp)); | 417 | sizeof(mmu_psize_defaults_gp)); |
417 | found: | 418 | found: |
@@ -441,7 +442,7 @@ static void __init htab_init_page_sizes(void) | |||
441 | mmu_vmalloc_psize = MMU_PAGE_64K; | 442 | mmu_vmalloc_psize = MMU_PAGE_64K; |
442 | if (mmu_linear_psize == MMU_PAGE_4K) | 443 | if (mmu_linear_psize == MMU_PAGE_4K) |
443 | mmu_linear_psize = MMU_PAGE_64K; | 444 | mmu_linear_psize = MMU_PAGE_64K; |
444 | if (cpu_has_feature(CPU_FTR_CI_LARGE_PAGE)) { | 445 | if (mmu_has_feature(MMU_FTR_CI_LARGE_PAGE)) { |
445 | /* | 446 | /* |
446 | * Don't use 64k pages for ioremap on pSeries, since | 447 | * Don't use 64k pages for ioremap on pSeries, since |
447 | * that would stop us accessing the HEA ethernet. | 448 | * that would stop us accessing the HEA ethernet. |
@@ -547,15 +548,7 @@ int remove_section_mapping(unsigned long start, unsigned long end) | |||
547 | } | 548 | } |
548 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 549 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
549 | 550 | ||
550 | static inline void make_bl(unsigned int *insn_addr, void *func) | 551 | #define FUNCTION_TEXT(A) ((*(unsigned long *)(A))) |
551 | { | ||
552 | unsigned long funcp = *((unsigned long *)func); | ||
553 | int offset = funcp - (unsigned long)insn_addr; | ||
554 | |||
555 | *insn_addr = (unsigned int)(0x48000001 | (offset & 0x03fffffc)); | ||
556 | flush_icache_range((unsigned long)insn_addr, 4+ | ||
557 | (unsigned long)insn_addr); | ||
558 | } | ||
559 | 552 | ||
560 | static void __init htab_finish_init(void) | 553 | static void __init htab_finish_init(void) |
561 | { | 554 | { |
@@ -570,16 +563,33 @@ static void __init htab_finish_init(void) | |||
570 | extern unsigned int *ht64_call_hpte_remove; | 563 | extern unsigned int *ht64_call_hpte_remove; |
571 | extern unsigned int *ht64_call_hpte_updatepp; | 564 | extern unsigned int *ht64_call_hpte_updatepp; |
572 | 565 | ||
573 | make_bl(ht64_call_hpte_insert1, ppc_md.hpte_insert); | 566 | patch_branch(ht64_call_hpte_insert1, |
574 | make_bl(ht64_call_hpte_insert2, ppc_md.hpte_insert); | 567 | FUNCTION_TEXT(ppc_md.hpte_insert), |
575 | make_bl(ht64_call_hpte_remove, ppc_md.hpte_remove); | 568 | BRANCH_SET_LINK); |
576 | make_bl(ht64_call_hpte_updatepp, ppc_md.hpte_updatepp); | 569 | patch_branch(ht64_call_hpte_insert2, |
570 | FUNCTION_TEXT(ppc_md.hpte_insert), | ||
571 | BRANCH_SET_LINK); | ||
572 | patch_branch(ht64_call_hpte_remove, | ||
573 | FUNCTION_TEXT(ppc_md.hpte_remove), | ||
574 | BRANCH_SET_LINK); | ||
575 | patch_branch(ht64_call_hpte_updatepp, | ||
576 | FUNCTION_TEXT(ppc_md.hpte_updatepp), | ||
577 | BRANCH_SET_LINK); | ||
578 | |||
577 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 579 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
578 | 580 | ||
579 | make_bl(htab_call_hpte_insert1, ppc_md.hpte_insert); | 581 | patch_branch(htab_call_hpte_insert1, |
580 | make_bl(htab_call_hpte_insert2, ppc_md.hpte_insert); | 582 | FUNCTION_TEXT(ppc_md.hpte_insert), |
581 | make_bl(htab_call_hpte_remove, ppc_md.hpte_remove); | 583 | BRANCH_SET_LINK); |
582 | make_bl(htab_call_hpte_updatepp, ppc_md.hpte_updatepp); | 584 | patch_branch(htab_call_hpte_insert2, |
585 | FUNCTION_TEXT(ppc_md.hpte_insert), | ||
586 | BRANCH_SET_LINK); | ||
587 | patch_branch(htab_call_hpte_remove, | ||
588 | FUNCTION_TEXT(ppc_md.hpte_remove), | ||
589 | BRANCH_SET_LINK); | ||
590 | patch_branch(htab_call_hpte_updatepp, | ||
591 | FUNCTION_TEXT(ppc_md.hpte_updatepp), | ||
592 | BRANCH_SET_LINK); | ||
583 | } | 593 | } |
584 | 594 | ||
585 | static void __init htab_initialize(void) | 595 | static void __init htab_initialize(void) |
@@ -588,7 +598,7 @@ static void __init htab_initialize(void) | |||
588 | unsigned long pteg_count; | 598 | unsigned long pteg_count; |
589 | unsigned long prot; | 599 | unsigned long prot; |
590 | unsigned long base = 0, size = 0, limit; | 600 | unsigned long base = 0, size = 0, limit; |
591 | int i; | 601 | struct memblock_region *reg; |
592 | 602 | ||
593 | DBG(" -> htab_initialize()\n"); | 603 | DBG(" -> htab_initialize()\n"); |
594 | 604 | ||
@@ -598,7 +608,7 @@ static void __init htab_initialize(void) | |||
598 | /* Initialize page sizes */ | 608 | /* Initialize page sizes */ |
599 | htab_init_page_sizes(); | 609 | htab_init_page_sizes(); |
600 | 610 | ||
601 | if (cpu_has_feature(CPU_FTR_1T_SEGMENT)) { | 611 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) { |
602 | mmu_kernel_ssize = MMU_SEGSIZE_1T; | 612 | mmu_kernel_ssize = MMU_SEGSIZE_1T; |
603 | mmu_highuser_ssize = MMU_SEGSIZE_1T; | 613 | mmu_highuser_ssize = MMU_SEGSIZE_1T; |
604 | printk(KERN_INFO "Using 1TB segments\n"); | 614 | printk(KERN_INFO "Using 1TB segments\n"); |
@@ -625,7 +635,7 @@ static void __init htab_initialize(void) | |||
625 | if (machine_is(cell)) | 635 | if (machine_is(cell)) |
626 | limit = 0x80000000; | 636 | limit = 0x80000000; |
627 | else | 637 | else |
628 | limit = 0; | 638 | limit = MEMBLOCK_ALLOC_ANYWHERE; |
629 | 639 | ||
630 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); | 640 | table = memblock_alloc_base(htab_size_bytes, htab_size_bytes, limit); |
631 | 641 | ||
@@ -649,7 +659,7 @@ static void __init htab_initialize(void) | |||
649 | #ifdef CONFIG_DEBUG_PAGEALLOC | 659 | #ifdef CONFIG_DEBUG_PAGEALLOC |
650 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; | 660 | linear_map_hash_count = memblock_end_of_DRAM() >> PAGE_SHIFT; |
651 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, | 661 | linear_map_hash_slots = __va(memblock_alloc_base(linear_map_hash_count, |
652 | 1, memblock.rmo_size)); | 662 | 1, ppc64_rma_size)); |
653 | memset(linear_map_hash_slots, 0, linear_map_hash_count); | 663 | memset(linear_map_hash_slots, 0, linear_map_hash_count); |
654 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 664 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
655 | 665 | ||
@@ -659,9 +669,9 @@ static void __init htab_initialize(void) | |||
659 | */ | 669 | */ |
660 | 670 | ||
661 | /* create bolted the linear mapping in the hash table */ | 671 | /* create bolted the linear mapping in the hash table */ |
662 | for (i=0; i < memblock.memory.cnt; i++) { | 672 | for_each_memblock(memory, reg) { |
663 | base = (unsigned long)__va(memblock.memory.region[i].base); | 673 | base = (unsigned long)__va(reg->base); |
664 | size = memblock.memory.region[i].size; | 674 | size = reg->size; |
665 | 675 | ||
666 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", | 676 | DBG("creating mapping for region: %lx..%lx (prot: %lx)\n", |
667 | base, size, prot); | 677 | base, size, prot); |
@@ -696,7 +706,8 @@ static void __init htab_initialize(void) | |||
696 | #endif /* CONFIG_U3_DART */ | 706 | #endif /* CONFIG_U3_DART */ |
697 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), | 707 | BUG_ON(htab_bolt_mapping(base, base + size, __pa(base), |
698 | prot, mmu_linear_psize, mmu_kernel_ssize)); | 708 | prot, mmu_linear_psize, mmu_kernel_ssize)); |
699 | } | 709 | } |
710 | memblock_set_current_limit(MEMBLOCK_ALLOC_ANYWHERE); | ||
700 | 711 | ||
701 | /* | 712 | /* |
702 | * If we have a memory_limit and we've allocated TCEs then we need to | 713 | * If we have a memory_limit and we've allocated TCEs then we need to |
@@ -738,7 +749,7 @@ void __init early_init_mmu(void) | |||
738 | 749 | ||
739 | /* Initialize stab / SLB management except on iSeries | 750 | /* Initialize stab / SLB management except on iSeries |
740 | */ | 751 | */ |
741 | if (cpu_has_feature(CPU_FTR_SLB)) | 752 | if (mmu_has_feature(MMU_FTR_SLB)) |
742 | slb_initialize(); | 753 | slb_initialize(); |
743 | else if (!firmware_has_feature(FW_FEATURE_ISERIES)) | 754 | else if (!firmware_has_feature(FW_FEATURE_ISERIES)) |
744 | stab_initialize(get_paca()->stab_real); | 755 | stab_initialize(get_paca()->stab_real); |
@@ -752,10 +763,10 @@ void __cpuinit early_init_mmu_secondary(void) | |||
752 | mtspr(SPRN_SDR1, _SDR1); | 763 | mtspr(SPRN_SDR1, _SDR1); |
753 | 764 | ||
754 | /* Initialize STAB/SLB. We use a virtual address as it works | 765 | /* Initialize STAB/SLB. We use a virtual address as it works |
755 | * in real mode on pSeries and we want a virutal address on | 766 | * in real mode on pSeries and we want a virtual address on |
756 | * iSeries anyway | 767 | * iSeries anyway |
757 | */ | 768 | */ |
758 | if (cpu_has_feature(CPU_FTR_SLB)) | 769 | if (mmu_has_feature(MMU_FTR_SLB)) |
759 | slb_initialize(); | 770 | slb_initialize(); |
760 | else | 771 | else |
761 | stab_initialize(get_paca()->stab_addr); | 772 | stab_initialize(get_paca()->stab_addr); |
@@ -1069,7 +1080,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1069 | unsigned long access, unsigned long trap) | 1080 | unsigned long access, unsigned long trap) |
1070 | { | 1081 | { |
1071 | unsigned long vsid; | 1082 | unsigned long vsid; |
1072 | void *pgdir; | 1083 | pgd_t *pgdir; |
1073 | pte_t *ptep; | 1084 | pte_t *ptep; |
1074 | unsigned long flags; | 1085 | unsigned long flags; |
1075 | int rc, ssize, local = 0; | 1086 | int rc, ssize, local = 0; |
@@ -1122,7 +1133,7 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, | |||
1122 | else | 1133 | else |
1123 | #endif /* CONFIG_PPC_HAS_HASH_64K */ | 1134 | #endif /* CONFIG_PPC_HAS_HASH_64K */ |
1124 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, | 1135 | rc = __hash_page_4K(ea, access, vsid, ptep, trap, local, ssize, |
1125 | subpage_protection(pgdir, ea)); | 1136 | subpage_protection(mm, ea)); |
1126 | 1137 | ||
1127 | /* Dump some info in case of hash insertion failure, they should | 1138 | /* Dump some info in case of hash insertion failure, they should |
1128 | * never happen so it is really useful to know if/when they do | 1139 | * never happen so it is really useful to know if/when they do |
@@ -1247,3 +1258,23 @@ void kernel_map_pages(struct page *page, int numpages, int enable) | |||
1247 | local_irq_restore(flags); | 1258 | local_irq_restore(flags); |
1248 | } | 1259 | } |
1249 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | 1260 | #endif /* CONFIG_DEBUG_PAGEALLOC */ |
1261 | |||
1262 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
1263 | phys_addr_t first_memblock_size) | ||
1264 | { | ||
1265 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
1266 | * physical on those processors | ||
1267 | */ | ||
1268 | BUG_ON(first_memblock_base != 0); | ||
1269 | |||
1270 | /* On LPAR systems, the first entry is our RMA region, | ||
1271 | * non-LPAR 64-bit hash MMU systems don't have a limitation | ||
1272 | * on real mode access, but using the first entry works well | ||
1273 | * enough. We also clamp it to 1G to avoid some funky things | ||
1274 | * such as RTAS bugs etc... | ||
1275 | */ | ||
1276 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
1277 | |||
1278 | /* Finally limit subsequent allocations */ | ||
1279 | memblock_set_current_limit(ppc64_rma_size); | ||
1280 | } | ||
diff --git a/arch/powerpc/mm/highmem.c b/arch/powerpc/mm/highmem.c index 857d4173f9c6..e7450bdbe83a 100644 --- a/arch/powerpc/mm/highmem.c +++ b/arch/powerpc/mm/highmem.c | |||
@@ -29,17 +29,17 @@ | |||
29 | * be used in IRQ contexts, so in some (very limited) cases we need | 29 | * be used in IRQ contexts, so in some (very limited) cases we need |
30 | * it. | 30 | * it. |
31 | */ | 31 | */ |
32 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | 32 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
33 | { | 33 | { |
34 | unsigned int idx; | ||
35 | unsigned long vaddr; | 34 | unsigned long vaddr; |
35 | int idx, type; | ||
36 | 36 | ||
37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ | 37 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
38 | pagefault_disable(); | 38 | pagefault_disable(); |
39 | if (!PageHighMem(page)) | 39 | if (!PageHighMem(page)) |
40 | return page_address(page); | 40 | return page_address(page); |
41 | 41 | ||
42 | debug_kmap_atomic(type); | 42 | type = kmap_atomic_idx_push(); |
43 | idx = type + KM_TYPE_NR*smp_processor_id(); | 43 | idx = type + KM_TYPE_NR*smp_processor_id(); |
44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); | 44 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
45 | #ifdef CONFIG_DEBUG_HIGHMEM | 45 | #ifdef CONFIG_DEBUG_HIGHMEM |
@@ -52,26 +52,35 @@ void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) | |||
52 | } | 52 | } |
53 | EXPORT_SYMBOL(kmap_atomic_prot); | 53 | EXPORT_SYMBOL(kmap_atomic_prot); |
54 | 54 | ||
55 | void kunmap_atomic_notypecheck(void *kvaddr, enum km_type type) | 55 | void __kunmap_atomic(void *kvaddr) |
56 | { | 56 | { |
57 | #ifdef CONFIG_DEBUG_HIGHMEM | ||
58 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; | 57 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
59 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); | 58 | int type; |
60 | 59 | ||
61 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { | 60 | if (vaddr < __fix_to_virt(FIX_KMAP_END)) { |
62 | pagefault_enable(); | 61 | pagefault_enable(); |
63 | return; | 62 | return; |
64 | } | 63 | } |
65 | 64 | ||
66 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); | 65 | type = kmap_atomic_idx(); |
67 | 66 | ||
68 | /* | 67 | #ifdef CONFIG_DEBUG_HIGHMEM |
69 | * force other mappings to Oops if they'll try to access | 68 | { |
70 | * this pte without first remap it | 69 | unsigned int idx; |
71 | */ | 70 | |
72 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | 71 | idx = type + KM_TYPE_NR * smp_processor_id(); |
73 | local_flush_tlb_page(NULL, vaddr); | 72 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
73 | |||
74 | /* | ||
75 | * force other mappings to Oops if they'll try to access | ||
76 | * this pte without first remap it | ||
77 | */ | ||
78 | pte_clear(&init_mm, vaddr, kmap_pte-idx); | ||
79 | local_flush_tlb_page(NULL, vaddr); | ||
80 | } | ||
74 | #endif | 81 | #endif |
82 | |||
83 | kmap_atomic_idx_pop(); | ||
75 | pagefault_enable(); | 84 | pagefault_enable(); |
76 | } | 85 | } |
77 | EXPORT_SYMBOL(kunmap_atomic_notypecheck); | 86 | EXPORT_SYMBOL(__kunmap_atomic); |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 9bb249c3046e..0b9a5c1901b9 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -529,7 +529,7 @@ static int __init hugetlbpage_init(void) | |||
529 | { | 529 | { |
530 | int psize; | 530 | int psize; |
531 | 531 | ||
532 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) | 532 | if (!mmu_has_feature(MMU_FTR_16M_PAGE)) |
533 | return -ENODEV; | 533 | return -ENODEV; |
534 | 534 | ||
535 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | 535 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 6a6975dc2654..5de0f254dbb5 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -92,12 +92,6 @@ int __allow_ioremap_reserved; | |||
92 | unsigned long __max_low_memory = MAX_LOW_MEM; | 92 | unsigned long __max_low_memory = MAX_LOW_MEM; |
93 | 93 | ||
94 | /* | 94 | /* |
95 | * address of the limit of what is accessible with initial MMU setup - | ||
96 | * 256MB usually, but only 16MB on 601. | ||
97 | */ | ||
98 | phys_addr_t __initial_memory_limit_addr = (phys_addr_t)0x10000000; | ||
99 | |||
100 | /* | ||
101 | * Check for command-line options that affect what MMU_init will do. | 95 | * Check for command-line options that affect what MMU_init will do. |
102 | */ | 96 | */ |
103 | void MMU_setup(void) | 97 | void MMU_setup(void) |
@@ -126,13 +120,6 @@ void __init MMU_init(void) | |||
126 | if (ppc_md.progress) | 120 | if (ppc_md.progress) |
127 | ppc_md.progress("MMU:enter", 0x111); | 121 | ppc_md.progress("MMU:enter", 0x111); |
128 | 122 | ||
129 | /* 601 can only access 16MB at the moment */ | ||
130 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
131 | __initial_memory_limit_addr = 0x01000000; | ||
132 | /* 8xx can only access 8MB at the moment */ | ||
133 | if (PVR_VER(mfspr(SPRN_PVR)) == 0x50) | ||
134 | __initial_memory_limit_addr = 0x00800000; | ||
135 | |||
136 | /* parse args from command line */ | 123 | /* parse args from command line */ |
137 | MMU_setup(); | 124 | MMU_setup(); |
138 | 125 | ||
@@ -161,7 +148,7 @@ void __init MMU_init(void) | |||
161 | lowmem_end_addr = memstart_addr + total_lowmem; | 148 | lowmem_end_addr = memstart_addr + total_lowmem; |
162 | #ifndef CONFIG_HIGHMEM | 149 | #ifndef CONFIG_HIGHMEM |
163 | total_memory = total_lowmem; | 150 | total_memory = total_lowmem; |
164 | memblock_enforce_memory_limit(lowmem_end_addr); | 151 | memblock_enforce_memory_limit(total_lowmem); |
165 | memblock_analyze(); | 152 | memblock_analyze(); |
166 | #endif /* CONFIG_HIGHMEM */ | 153 | #endif /* CONFIG_HIGHMEM */ |
167 | } | 154 | } |
@@ -190,20 +177,18 @@ void __init MMU_init(void) | |||
190 | #ifdef CONFIG_BOOTX_TEXT | 177 | #ifdef CONFIG_BOOTX_TEXT |
191 | btext_unmap(); | 178 | btext_unmap(); |
192 | #endif | 179 | #endif |
180 | |||
181 | /* Shortly after that, the entire linear mapping will be available */ | ||
182 | memblock_set_current_limit(lowmem_end_addr); | ||
193 | } | 183 | } |
194 | 184 | ||
195 | /* This is only called until mem_init is done. */ | 185 | /* This is only called until mem_init is done. */ |
196 | void __init *early_get_page(void) | 186 | void __init *early_get_page(void) |
197 | { | 187 | { |
198 | void *p; | 188 | if (init_bootmem_done) |
199 | 189 | return alloc_bootmem_pages(PAGE_SIZE); | |
200 | if (init_bootmem_done) { | 190 | else |
201 | p = alloc_bootmem_pages(PAGE_SIZE); | 191 | return __va(memblock_alloc(PAGE_SIZE, PAGE_SIZE)); |
202 | } else { | ||
203 | p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | ||
204 | __initial_memory_limit_addr)); | ||
205 | } | ||
206 | return p; | ||
207 | } | 192 | } |
208 | 193 | ||
209 | /* Free up now-unused memory */ | 194 | /* Free up now-unused memory */ |
@@ -238,17 +223,16 @@ void free_initmem(void) | |||
238 | #undef FREESEC | 223 | #undef FREESEC |
239 | } | 224 | } |
240 | 225 | ||
241 | #ifdef CONFIG_BLK_DEV_INITRD | 226 | #ifdef CONFIG_8xx /* No 8xx specific .c file to put that in ... */ |
242 | void free_initrd_mem(unsigned long start, unsigned long end) | 227 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, |
228 | phys_addr_t first_memblock_size) | ||
243 | { | 229 | { |
244 | if (start < end) | 230 | /* We don't currently support the first MEMBLOCK not mapping 0 |
245 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | 231 | * physical on those processors |
246 | for (; start < end; start += PAGE_SIZE) { | 232 | */ |
247 | ClearPageReserved(virt_to_page(start)); | 233 | BUG_ON(first_memblock_base != 0); |
248 | init_page_count(virt_to_page(start)); | ||
249 | free_page(start); | ||
250 | totalram_pages++; | ||
251 | } | ||
252 | } | ||
253 | #endif | ||
254 | 234 | ||
235 | /* 8xx can only access 8MB at the moment */ | ||
236 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x00800000)); | ||
237 | } | ||
238 | #endif /* CONFIG_8xx */ | ||
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index ace85fa74b29..f6dbb4c20e64 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c | |||
@@ -99,20 +99,6 @@ void free_initmem(void) | |||
99 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); | 99 | ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10); |
100 | } | 100 | } |
101 | 101 | ||
102 | #ifdef CONFIG_BLK_DEV_INITRD | ||
103 | void free_initrd_mem(unsigned long start, unsigned long end) | ||
104 | { | ||
105 | if (start < end) | ||
106 | printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
107 | for (; start < end; start += PAGE_SIZE) { | ||
108 | ClearPageReserved(virt_to_page(start)); | ||
109 | init_page_count(virt_to_page(start)); | ||
110 | free_page(start); | ||
111 | totalram_pages++; | ||
112 | } | ||
113 | } | ||
114 | #endif | ||
115 | |||
116 | static void pgd_ctor(void *addr) | 102 | static void pgd_ctor(void *addr) |
117 | { | 103 | { |
118 | memset(addr, 0, PGD_TABLE_SIZE); | 104 | memset(addr, 0, PGD_TABLE_SIZE); |
@@ -330,3 +316,4 @@ int __meminit vmemmap_populate(struct page *start_page, | |||
330 | return 0; | 316 | return 0; |
331 | } | 317 | } |
332 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | 318 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
319 | |||
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index 1a84a8d00005..29d4dde65c45 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -82,18 +82,11 @@ int page_is_ram(unsigned long pfn) | |||
82 | return pfn < max_pfn; | 82 | return pfn < max_pfn; |
83 | #else | 83 | #else |
84 | unsigned long paddr = (pfn << PAGE_SHIFT); | 84 | unsigned long paddr = (pfn << PAGE_SHIFT); |
85 | int i; | 85 | struct memblock_region *reg; |
86 | for (i=0; i < memblock.memory.cnt; i++) { | ||
87 | unsigned long base; | ||
88 | 86 | ||
89 | base = memblock.memory.region[i].base; | 87 | for_each_memblock(memory, reg) |
90 | 88 | if (paddr >= reg->base && paddr < (reg->base + reg->size)) | |
91 | if ((paddr >= base) && | ||
92 | (paddr < (base + memblock.memory.region[i].size))) { | ||
93 | return 1; | 89 | return 1; |
94 | } | ||
95 | } | ||
96 | |||
97 | return 0; | 90 | return 0; |
98 | #endif | 91 | #endif |
99 | } | 92 | } |
@@ -149,23 +142,19 @@ int | |||
149 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, | 142 | walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, |
150 | void *arg, int (*func)(unsigned long, unsigned long, void *)) | 143 | void *arg, int (*func)(unsigned long, unsigned long, void *)) |
151 | { | 144 | { |
152 | struct memblock_property res; | 145 | struct memblock_region *reg; |
153 | unsigned long pfn, len; | 146 | unsigned long end_pfn = start_pfn + nr_pages; |
154 | u64 end; | 147 | unsigned long tstart, tend; |
155 | int ret = -1; | 148 | int ret = -1; |
156 | 149 | ||
157 | res.base = (u64) start_pfn << PAGE_SHIFT; | 150 | for_each_memblock(memory, reg) { |
158 | res.size = (u64) nr_pages << PAGE_SHIFT; | 151 | tstart = max(start_pfn, memblock_region_memory_base_pfn(reg)); |
159 | 152 | tend = min(end_pfn, memblock_region_memory_end_pfn(reg)); | |
160 | end = res.base + res.size - 1; | 153 | if (tstart >= tend) |
161 | while ((res.base < end) && (memblock_find(&res) >= 0)) { | 154 | continue; |
162 | pfn = (unsigned long)(res.base >> PAGE_SHIFT); | 155 | ret = (*func)(tstart, tend - tstart, arg); |
163 | len = (unsigned long)(res.size >> PAGE_SHIFT); | ||
164 | ret = (*func)(pfn, len, arg); | ||
165 | if (ret) | 156 | if (ret) |
166 | break; | 157 | break; |
167 | res.base += (res.size + 1); | ||
168 | res.size = (end - res.base + 1); | ||
169 | } | 158 | } |
170 | return ret; | 159 | return ret; |
171 | } | 160 | } |
@@ -179,9 +168,9 @@ EXPORT_SYMBOL_GPL(walk_system_ram_range); | |||
179 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 168 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
180 | void __init do_init_bootmem(void) | 169 | void __init do_init_bootmem(void) |
181 | { | 170 | { |
182 | unsigned long i; | ||
183 | unsigned long start, bootmap_pages; | 171 | unsigned long start, bootmap_pages; |
184 | unsigned long total_pages; | 172 | unsigned long total_pages; |
173 | struct memblock_region *reg; | ||
185 | int boot_mapsize; | 174 | int boot_mapsize; |
186 | 175 | ||
187 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; | 176 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
@@ -204,10 +193,10 @@ void __init do_init_bootmem(void) | |||
204 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); | 193 | boot_mapsize = init_bootmem_node(NODE_DATA(0), start >> PAGE_SHIFT, min_low_pfn, max_low_pfn); |
205 | 194 | ||
206 | /* Add active regions with valid PFNs */ | 195 | /* Add active regions with valid PFNs */ |
207 | for (i = 0; i < memblock.memory.cnt; i++) { | 196 | for_each_memblock(memory, reg) { |
208 | unsigned long start_pfn, end_pfn; | 197 | unsigned long start_pfn, end_pfn; |
209 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 198 | start_pfn = memblock_region_memory_base_pfn(reg); |
210 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 199 | end_pfn = memblock_region_memory_end_pfn(reg); |
211 | add_active_range(0, start_pfn, end_pfn); | 200 | add_active_range(0, start_pfn, end_pfn); |
212 | } | 201 | } |
213 | 202 | ||
@@ -218,29 +207,21 @@ void __init do_init_bootmem(void) | |||
218 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); | 207 | free_bootmem_with_active_regions(0, lowmem_end_addr >> PAGE_SHIFT); |
219 | 208 | ||
220 | /* reserve the sections we're already using */ | 209 | /* reserve the sections we're already using */ |
221 | for (i = 0; i < memblock.reserved.cnt; i++) { | 210 | for_each_memblock(reserved, reg) { |
222 | unsigned long addr = memblock.reserved.region[i].base + | 211 | unsigned long top = reg->base + reg->size - 1; |
223 | memblock_size_bytes(&memblock.reserved, i) - 1; | 212 | if (top < lowmem_end_addr) |
224 | if (addr < lowmem_end_addr) | 213 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
225 | reserve_bootmem(memblock.reserved.region[i].base, | 214 | else if (reg->base < lowmem_end_addr) { |
226 | memblock_size_bytes(&memblock.reserved, i), | 215 | unsigned long trunc_size = lowmem_end_addr - reg->base; |
227 | BOOTMEM_DEFAULT); | 216 | reserve_bootmem(reg->base, trunc_size, BOOTMEM_DEFAULT); |
228 | else if (memblock.reserved.region[i].base < lowmem_end_addr) { | ||
229 | unsigned long adjusted_size = lowmem_end_addr - | ||
230 | memblock.reserved.region[i].base; | ||
231 | reserve_bootmem(memblock.reserved.region[i].base, | ||
232 | adjusted_size, BOOTMEM_DEFAULT); | ||
233 | } | 217 | } |
234 | } | 218 | } |
235 | #else | 219 | #else |
236 | free_bootmem_with_active_regions(0, max_pfn); | 220 | free_bootmem_with_active_regions(0, max_pfn); |
237 | 221 | ||
238 | /* reserve the sections we're already using */ | 222 | /* reserve the sections we're already using */ |
239 | for (i = 0; i < memblock.reserved.cnt; i++) | 223 | for_each_memblock(reserved, reg) |
240 | reserve_bootmem(memblock.reserved.region[i].base, | 224 | reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); |
241 | memblock_size_bytes(&memblock.reserved, i), | ||
242 | BOOTMEM_DEFAULT); | ||
243 | |||
244 | #endif | 225 | #endif |
245 | /* XXX need to clip this if using highmem? */ | 226 | /* XXX need to clip this if using highmem? */ |
246 | sparse_memory_present_with_active_regions(0); | 227 | sparse_memory_present_with_active_regions(0); |
@@ -251,22 +232,15 @@ void __init do_init_bootmem(void) | |||
251 | /* mark pages that don't exist as nosave */ | 232 | /* mark pages that don't exist as nosave */ |
252 | static int __init mark_nonram_nosave(void) | 233 | static int __init mark_nonram_nosave(void) |
253 | { | 234 | { |
254 | unsigned long memblock_next_region_start_pfn, | 235 | struct memblock_region *reg, *prev = NULL; |
255 | memblock_region_max_pfn; | 236 | |
256 | int i; | 237 | for_each_memblock(memory, reg) { |
257 | 238 | if (prev && | |
258 | for (i = 0; i < memblock.memory.cnt - 1; i++) { | 239 | memblock_region_memory_end_pfn(prev) < memblock_region_memory_base_pfn(reg)) |
259 | memblock_region_max_pfn = | 240 | register_nosave_region(memblock_region_memory_end_pfn(prev), |
260 | (memblock.memory.region[i].base >> PAGE_SHIFT) + | 241 | memblock_region_memory_base_pfn(reg)); |
261 | (memblock.memory.region[i].size >> PAGE_SHIFT); | 242 | prev = reg; |
262 | memblock_next_region_start_pfn = | ||
263 | memblock.memory.region[i+1].base >> PAGE_SHIFT; | ||
264 | |||
265 | if (memblock_region_max_pfn < memblock_next_region_start_pfn) | ||
266 | register_nosave_region(memblock_region_max_pfn, | ||
267 | memblock_next_region_start_pfn); | ||
268 | } | 243 | } |
269 | |||
270 | return 0; | 244 | return 0; |
271 | } | 245 | } |
272 | 246 | ||
@@ -327,7 +301,7 @@ void __init mem_init(void) | |||
327 | swiotlb_init(1); | 301 | swiotlb_init(1); |
328 | #endif | 302 | #endif |
329 | 303 | ||
330 | num_physpages = memblock.memory.size >> PAGE_SHIFT; | 304 | num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT; |
331 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 305 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
332 | 306 | ||
333 | #ifdef CONFIG_NEED_MULTIPLE_NODES | 307 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
@@ -408,6 +382,25 @@ void __init mem_init(void) | |||
408 | mem_init_done = 1; | 382 | mem_init_done = 1; |
409 | } | 383 | } |
410 | 384 | ||
385 | #ifdef CONFIG_BLK_DEV_INITRD | ||
386 | void __init free_initrd_mem(unsigned long start, unsigned long end) | ||
387 | { | ||
388 | if (start >= end) | ||
389 | return; | ||
390 | |||
391 | start = _ALIGN_DOWN(start, PAGE_SIZE); | ||
392 | end = _ALIGN_UP(end, PAGE_SIZE); | ||
393 | pr_info("Freeing initrd memory: %ldk freed\n", (end - start) >> 10); | ||
394 | |||
395 | for (; start < end; start += PAGE_SIZE) { | ||
396 | ClearPageReserved(virt_to_page(start)); | ||
397 | init_page_count(virt_to_page(start)); | ||
398 | free_page(start); | ||
399 | totalram_pages++; | ||
400 | } | ||
401 | } | ||
402 | #endif | ||
403 | |||
411 | /* | 404 | /* |
412 | * This is called when a page has been modified by the kernel. | 405 | * This is called when a page has been modified by the kernel. |
413 | * It just marks the page as not i-cache clean. We do the i-cache | 406 | * It just marks the page as not i-cache clean. We do the i-cache |
@@ -450,7 +443,7 @@ void clear_user_page(void *page, unsigned long vaddr, struct page *pg) | |||
450 | clear_page(page); | 443 | clear_page(page); |
451 | 444 | ||
452 | /* | 445 | /* |
453 | * We shouldnt have to do this, but some versions of glibc | 446 | * We shouldn't have to do this, but some versions of glibc |
454 | * require it (ld.so assumes zero filled pages are icache clean) | 447 | * require it (ld.so assumes zero filled pages are icache clean) |
455 | * - Anton | 448 | * - Anton |
456 | */ | 449 | */ |
diff --git a/arch/powerpc/mm/mmu_context_hash64.c b/arch/powerpc/mm/mmu_context_hash64.c index 2535828aa84b..3bafc3deca6d 100644 --- a/arch/powerpc/mm/mmu_context_hash64.c +++ b/arch/powerpc/mm/mmu_context_hash64.c | |||
@@ -20,9 +20,205 @@ | |||
20 | #include <linux/idr.h> | 20 | #include <linux/idr.h> |
21 | #include <linux/module.h> | 21 | #include <linux/module.h> |
22 | #include <linux/gfp.h> | 22 | #include <linux/gfp.h> |
23 | #include <linux/slab.h> | ||
23 | 24 | ||
24 | #include <asm/mmu_context.h> | 25 | #include <asm/mmu_context.h> |
25 | 26 | ||
27 | #ifdef CONFIG_PPC_ICSWX | ||
28 | /* | ||
29 | * The processor and its L2 cache cause the icswx instruction to | ||
30 | * generate a COP_REQ transaction on PowerBus. The transaction has | ||
31 | * no address, and the processor does not perform an MMU access | ||
32 | * to authenticate the transaction. The command portion of the | ||
33 | * PowerBus COP_REQ transaction includes the LPAR_ID (LPID) and | ||
34 | * the coprocessor Process ID (PID), which the coprocessor compares | ||
35 | * to the authorized LPID and PID held in the coprocessor, to determine | ||
36 | * if the process is authorized to generate the transaction. | ||
37 | * The data of the COP_REQ transaction is 128-byte or less and is | ||
38 | * placed in cacheable memory on a 128-byte cache line boundary. | ||
39 | * | ||
40 | * The task to use a coprocessor should use use_cop() to allocate | ||
41 | * a coprocessor PID before executing icswx instruction. use_cop() | ||
42 | * also enables the coprocessor context switching. Drop_cop() is | ||
43 | * used to free the coprocessor PID. | ||
44 | * | ||
45 | * Example: | ||
46 | * Host Fabric Interface (HFI) is a PowerPC network coprocessor. | ||
47 | * Each HFI have multiple windows. Each HFI window serves as a | ||
48 | * network device sending to and receiving from HFI network. | ||
49 | * HFI immediate send function uses icswx instruction. The immediate | ||
50 | * send function allows small (single cache-line) packets be sent | ||
51 | * without using the regular HFI send FIFO and doorbell, which are | ||
52 | * much slower than immediate send. | ||
53 | * | ||
54 | * For each task intending to use HFI immediate send, the HFI driver | ||
55 | * calls use_cop() to obtain a coprocessor PID for the task. | ||
56 | * The HFI driver then allocate a free HFI window and save the | ||
57 | * coprocessor PID to the HFI window to allow the task to use the | ||
58 | * HFI window. | ||
59 | * | ||
60 | * The HFI driver repeatedly creates immediate send packets and | ||
61 | * issues icswx instruction to send data through the HFI window. | ||
62 | * The HFI compares the coprocessor PID in the CPU PID register | ||
63 | * to the PID held in the HFI window to determine if the transaction | ||
64 | * is allowed. | ||
65 | * | ||
66 | * When the task to release the HFI window, the HFI driver calls | ||
67 | * drop_cop() to release the coprocessor PID. | ||
68 | */ | ||
69 | |||
70 | #define COP_PID_NONE 0 | ||
71 | #define COP_PID_MIN (COP_PID_NONE + 1) | ||
72 | #define COP_PID_MAX (0xFFFF) | ||
73 | |||
74 | static DEFINE_SPINLOCK(mmu_context_acop_lock); | ||
75 | static DEFINE_IDA(cop_ida); | ||
76 | |||
77 | void switch_cop(struct mm_struct *next) | ||
78 | { | ||
79 | mtspr(SPRN_PID, next->context.cop_pid); | ||
80 | mtspr(SPRN_ACOP, next->context.acop); | ||
81 | } | ||
82 | |||
83 | static int new_cop_pid(struct ida *ida, int min_id, int max_id, | ||
84 | spinlock_t *lock) | ||
85 | { | ||
86 | int index; | ||
87 | int err; | ||
88 | |||
89 | again: | ||
90 | if (!ida_pre_get(ida, GFP_KERNEL)) | ||
91 | return -ENOMEM; | ||
92 | |||
93 | spin_lock(lock); | ||
94 | err = ida_get_new_above(ida, min_id, &index); | ||
95 | spin_unlock(lock); | ||
96 | |||
97 | if (err == -EAGAIN) | ||
98 | goto again; | ||
99 | else if (err) | ||
100 | return err; | ||
101 | |||
102 | if (index > max_id) { | ||
103 | spin_lock(lock); | ||
104 | ida_remove(ida, index); | ||
105 | spin_unlock(lock); | ||
106 | return -ENOMEM; | ||
107 | } | ||
108 | |||
109 | return index; | ||
110 | } | ||
111 | |||
112 | static void sync_cop(void *arg) | ||
113 | { | ||
114 | struct mm_struct *mm = arg; | ||
115 | |||
116 | if (mm == current->active_mm) | ||
117 | switch_cop(current->active_mm); | ||
118 | } | ||
119 | |||
120 | /** | ||
121 | * Start using a coprocessor. | ||
122 | * @acop: mask of coprocessor to be used. | ||
123 | * @mm: The mm the coprocessor to associate with. Most likely current mm. | ||
124 | * | ||
125 | * Return a positive PID if successful. Negative errno otherwise. | ||
126 | * The returned PID will be fed to the coprocessor to determine if an | ||
127 | * icswx transaction is authenticated. | ||
128 | */ | ||
129 | int use_cop(unsigned long acop, struct mm_struct *mm) | ||
130 | { | ||
131 | int ret; | ||
132 | |||
133 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
134 | return -ENODEV; | ||
135 | |||
136 | if (!mm || !acop) | ||
137 | return -EINVAL; | ||
138 | |||
139 | /* We need to make sure mm_users doesn't change */ | ||
140 | down_read(&mm->mmap_sem); | ||
141 | spin_lock(mm->context.cop_lockp); | ||
142 | |||
143 | if (mm->context.cop_pid == COP_PID_NONE) { | ||
144 | ret = new_cop_pid(&cop_ida, COP_PID_MIN, COP_PID_MAX, | ||
145 | &mmu_context_acop_lock); | ||
146 | if (ret < 0) | ||
147 | goto out; | ||
148 | |||
149 | mm->context.cop_pid = ret; | ||
150 | } | ||
151 | mm->context.acop |= acop; | ||
152 | |||
153 | sync_cop(mm); | ||
154 | |||
155 | /* | ||
156 | * If this is a threaded process then there might be other threads | ||
157 | * running. We need to send an IPI to force them to pick up any | ||
158 | * change in PID and ACOP. | ||
159 | */ | ||
160 | if (atomic_read(&mm->mm_users) > 1) | ||
161 | smp_call_function(sync_cop, mm, 1); | ||
162 | |||
163 | ret = mm->context.cop_pid; | ||
164 | |||
165 | out: | ||
166 | spin_unlock(mm->context.cop_lockp); | ||
167 | up_read(&mm->mmap_sem); | ||
168 | |||
169 | return ret; | ||
170 | } | ||
171 | EXPORT_SYMBOL_GPL(use_cop); | ||
172 | |||
173 | /** | ||
174 | * Stop using a coprocessor. | ||
175 | * @acop: mask of coprocessor to be stopped. | ||
176 | * @mm: The mm the coprocessor associated with. | ||
177 | */ | ||
178 | void drop_cop(unsigned long acop, struct mm_struct *mm) | ||
179 | { | ||
180 | int free_pid = COP_PID_NONE; | ||
181 | |||
182 | if (!cpu_has_feature(CPU_FTR_ICSWX)) | ||
183 | return; | ||
184 | |||
185 | if (WARN_ON_ONCE(!mm)) | ||
186 | return; | ||
187 | |||
188 | /* We need to make sure mm_users doesn't change */ | ||
189 | down_read(&mm->mmap_sem); | ||
190 | spin_lock(mm->context.cop_lockp); | ||
191 | |||
192 | mm->context.acop &= ~acop; | ||
193 | |||
194 | if ((!mm->context.acop) && (mm->context.cop_pid != COP_PID_NONE)) { | ||
195 | free_pid = mm->context.cop_pid; | ||
196 | mm->context.cop_pid = COP_PID_NONE; | ||
197 | } | ||
198 | |||
199 | sync_cop(mm); | ||
200 | |||
201 | /* | ||
202 | * If this is a threaded process then there might be other threads | ||
203 | * running. We need to send an IPI to force them to pick up any | ||
204 | * change in PID and ACOP. | ||
205 | */ | ||
206 | if (atomic_read(&mm->mm_users) > 1) | ||
207 | smp_call_function(sync_cop, mm, 1); | ||
208 | |||
209 | if (free_pid != COP_PID_NONE) { | ||
210 | spin_lock(&mmu_context_acop_lock); | ||
211 | ida_remove(&cop_ida, free_pid); | ||
212 | spin_unlock(&mmu_context_acop_lock); | ||
213 | } | ||
214 | |||
215 | spin_unlock(mm->context.cop_lockp); | ||
216 | up_read(&mm->mmap_sem); | ||
217 | } | ||
218 | EXPORT_SYMBOL_GPL(drop_cop); | ||
219 | |||
220 | #endif /* CONFIG_PPC_ICSWX */ | ||
221 | |||
26 | static DEFINE_SPINLOCK(mmu_context_lock); | 222 | static DEFINE_SPINLOCK(mmu_context_lock); |
27 | static DEFINE_IDA(mmu_context_ida); | 223 | static DEFINE_IDA(mmu_context_ida); |
28 | 224 | ||
@@ -31,7 +227,6 @@ static DEFINE_IDA(mmu_context_ida); | |||
31 | * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, | 227 | * Each segment contains 2^28 bytes. Each context maps 2^44 bytes, |
32 | * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). | 228 | * so we can support 2^19-1 contexts (19 == 35 + 28 - 44). |
33 | */ | 229 | */ |
34 | #define NO_CONTEXT 0 | ||
35 | #define MAX_CONTEXT ((1UL << 19) - 1) | 230 | #define MAX_CONTEXT ((1UL << 19) - 1) |
36 | 231 | ||
37 | int __init_new_context(void) | 232 | int __init_new_context(void) |
@@ -79,6 +274,16 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm) | |||
79 | slice_set_user_psize(mm, mmu_virtual_psize); | 274 | slice_set_user_psize(mm, mmu_virtual_psize); |
80 | subpage_prot_init_new_context(mm); | 275 | subpage_prot_init_new_context(mm); |
81 | mm->context.id = index; | 276 | mm->context.id = index; |
277 | #ifdef CONFIG_PPC_ICSWX | ||
278 | mm->context.cop_lockp = kmalloc(sizeof(spinlock_t), GFP_KERNEL); | ||
279 | if (!mm->context.cop_lockp) { | ||
280 | __destroy_context(index); | ||
281 | subpage_prot_free(mm); | ||
282 | mm->context.id = MMU_NO_CONTEXT; | ||
283 | return -ENOMEM; | ||
284 | } | ||
285 | spin_lock_init(mm->context.cop_lockp); | ||
286 | #endif /* CONFIG_PPC_ICSWX */ | ||
82 | 287 | ||
83 | return 0; | 288 | return 0; |
84 | } | 289 | } |
@@ -93,7 +298,12 @@ EXPORT_SYMBOL_GPL(__destroy_context); | |||
93 | 298 | ||
94 | void destroy_context(struct mm_struct *mm) | 299 | void destroy_context(struct mm_struct *mm) |
95 | { | 300 | { |
301 | #ifdef CONFIG_PPC_ICSWX | ||
302 | drop_cop(mm->context.acop, mm); | ||
303 | kfree(mm->context.cop_lockp); | ||
304 | mm->context.cop_lockp = NULL; | ||
305 | #endif /* CONFIG_PPC_ICSWX */ | ||
96 | __destroy_context(mm->context.id); | 306 | __destroy_context(mm->context.id); |
97 | subpage_prot_free(mm); | 307 | subpage_prot_free(mm); |
98 | mm->context.id = NO_CONTEXT; | 308 | mm->context.id = MMU_NO_CONTEXT; |
99 | } | 309 | } |
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c index ddfd7ad4e1d6..336807de550e 100644 --- a/arch/powerpc/mm/mmu_context_nohash.c +++ b/arch/powerpc/mm/mmu_context_nohash.c | |||
@@ -111,8 +111,8 @@ static unsigned int steal_context_smp(unsigned int id) | |||
111 | * a core map instead but this will do for now. | 111 | * a core map instead but this will do for now. |
112 | */ | 112 | */ |
113 | for_each_cpu(cpu, mm_cpumask(mm)) { | 113 | for_each_cpu(cpu, mm_cpumask(mm)) { |
114 | for (i = cpu_first_thread_in_core(cpu); | 114 | for (i = cpu_first_thread_sibling(cpu); |
115 | i <= cpu_last_thread_in_core(cpu); i++) | 115 | i <= cpu_last_thread_sibling(cpu); i++) |
116 | __set_bit(id, stale_map[i]); | 116 | __set_bit(id, stale_map[i]); |
117 | cpu = i - 1; | 117 | cpu = i - 1; |
118 | } | 118 | } |
@@ -264,14 +264,14 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next) | |||
264 | */ | 264 | */ |
265 | if (test_bit(id, stale_map[cpu])) { | 265 | if (test_bit(id, stale_map[cpu])) { |
266 | pr_hardcont(" | stale flush %d [%d..%d]", | 266 | pr_hardcont(" | stale flush %d [%d..%d]", |
267 | id, cpu_first_thread_in_core(cpu), | 267 | id, cpu_first_thread_sibling(cpu), |
268 | cpu_last_thread_in_core(cpu)); | 268 | cpu_last_thread_sibling(cpu)); |
269 | 269 | ||
270 | local_flush_tlb_mm(next); | 270 | local_flush_tlb_mm(next); |
271 | 271 | ||
272 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ | 272 | /* XXX This clear should ultimately be part of local_flush_tlb_mm */ |
273 | for (i = cpu_first_thread_in_core(cpu); | 273 | for (i = cpu_first_thread_sibling(cpu); |
274 | i <= cpu_last_thread_in_core(cpu); i++) { | 274 | i <= cpu_last_thread_sibling(cpu); i++) { |
275 | __clear_bit(id, stale_map[i]); | 275 | __clear_bit(id, stale_map[i]); |
276 | } | 276 | } |
277 | } | 277 | } |
@@ -334,16 +334,18 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self, | |||
334 | /* We don't touch CPU 0 map, it's allocated at aboot and kept | 334 | /* We don't touch CPU 0 map, it's allocated at aboot and kept |
335 | * around forever | 335 | * around forever |
336 | */ | 336 | */ |
337 | if (cpu == 0) | 337 | if (cpu == boot_cpuid) |
338 | return NOTIFY_OK; | 338 | return NOTIFY_OK; |
339 | 339 | ||
340 | switch (action) { | 340 | switch (action) { |
341 | case CPU_ONLINE: | 341 | case CPU_UP_PREPARE: |
342 | case CPU_ONLINE_FROZEN: | 342 | case CPU_UP_PREPARE_FROZEN: |
343 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); | 343 | pr_devel("MMU: Allocating stale context map for CPU %d\n", cpu); |
344 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); | 344 | stale_map[cpu] = kzalloc(CTX_MAP_SIZE, GFP_KERNEL); |
345 | break; | 345 | break; |
346 | #ifdef CONFIG_HOTPLUG_CPU | 346 | #ifdef CONFIG_HOTPLUG_CPU |
347 | case CPU_UP_CANCELED: | ||
348 | case CPU_UP_CANCELED_FROZEN: | ||
347 | case CPU_DEAD: | 349 | case CPU_DEAD: |
348 | case CPU_DEAD_FROZEN: | 350 | case CPU_DEAD_FROZEN: |
349 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); | 351 | pr_devel("MMU: Freeing stale context map for CPU %d\n", cpu); |
@@ -407,7 +409,17 @@ void __init mmu_context_init(void) | |||
407 | } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { | 409 | } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { |
408 | first_context = 1; | 410 | first_context = 1; |
409 | last_context = 65535; | 411 | last_context = 65535; |
410 | } else { | 412 | } else |
413 | #ifdef CONFIG_PPC_BOOK3E_MMU | ||
414 | if (mmu_has_feature(MMU_FTR_TYPE_3E)) { | ||
415 | u32 mmucfg = mfspr(SPRN_MMUCFG); | ||
416 | u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK) | ||
417 | >> MMUCFG_PIDSIZE_SHIFT; | ||
418 | first_context = 1; | ||
419 | last_context = (1UL << (pid_bits + 1)) - 1; | ||
420 | } else | ||
421 | #endif | ||
422 | { | ||
411 | first_context = 1; | 423 | first_context = 1; |
412 | last_context = 255; | 424 | last_context = 255; |
413 | } | 425 | } |
@@ -420,9 +432,11 @@ void __init mmu_context_init(void) | |||
420 | */ | 432 | */ |
421 | context_map = alloc_bootmem(CTX_MAP_SIZE); | 433 | context_map = alloc_bootmem(CTX_MAP_SIZE); |
422 | context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); | 434 | context_mm = alloc_bootmem(sizeof(void *) * (last_context + 1)); |
435 | #ifndef CONFIG_SMP | ||
423 | stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); | 436 | stale_map[0] = alloc_bootmem(CTX_MAP_SIZE); |
437 | #else | ||
438 | stale_map[boot_cpuid] = alloc_bootmem(CTX_MAP_SIZE); | ||
424 | 439 | ||
425 | #ifdef CONFIG_SMP | ||
426 | register_cpu_notifier(&mmu_context_cpu_nb); | 440 | register_cpu_notifier(&mmu_context_cpu_nb); |
427 | #endif | 441 | #endif |
428 | 442 | ||
diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 63b84a0d3b10..dd0a2589591d 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h | |||
@@ -140,10 +140,13 @@ extern void wii_memory_fixups(void); | |||
140 | extern void MMU_init_hw(void); | 140 | extern void MMU_init_hw(void); |
141 | extern unsigned long mmu_mapin_ram(unsigned long top); | 141 | extern unsigned long mmu_mapin_ram(unsigned long top); |
142 | 142 | ||
143 | #elif defined(CONFIG_FSL_BOOKE) | 143 | #elif defined(CONFIG_PPC_FSL_BOOK3E) |
144 | extern unsigned long map_mem_in_cams(unsigned long ram, int max_cam_idx); | ||
145 | #ifdef CONFIG_PPC32 | ||
144 | extern void MMU_init_hw(void); | 146 | extern void MMU_init_hw(void); |
145 | extern unsigned long mmu_mapin_ram(unsigned long top); | 147 | extern unsigned long mmu_mapin_ram(unsigned long top); |
146 | extern void adjust_total_lowmem(void); | 148 | extern void adjust_total_lowmem(void); |
149 | #endif | ||
147 | extern void loadcam_entry(unsigned int index); | 150 | extern void loadcam_entry(unsigned int index); |
148 | 151 | ||
149 | struct tlbcam { | 152 | struct tlbcam { |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 002878ccf90b..2164006fe170 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -20,10 +20,15 @@ | |||
20 | #include <linux/memblock.h> | 20 | #include <linux/memblock.h> |
21 | #include <linux/of.h> | 21 | #include <linux/of.h> |
22 | #include <linux/pfn.h> | 22 | #include <linux/pfn.h> |
23 | #include <linux/cpuset.h> | ||
24 | #include <linux/node.h> | ||
23 | #include <asm/sparsemem.h> | 25 | #include <asm/sparsemem.h> |
24 | #include <asm/prom.h> | 26 | #include <asm/prom.h> |
25 | #include <asm/system.h> | 27 | #include <asm/system.h> |
26 | #include <asm/smp.h> | 28 | #include <asm/smp.h> |
29 | #include <asm/firmware.h> | ||
30 | #include <asm/paca.h> | ||
31 | #include <asm/hvcall.h> | ||
27 | 32 | ||
28 | static int numa_enabled = 1; | 33 | static int numa_enabled = 1; |
29 | 34 | ||
@@ -163,7 +168,7 @@ static void __init get_node_active_region(unsigned long start_pfn, | |||
163 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); | 168 | work_with_active_regions(nid, get_active_region_work_fn, node_ar); |
164 | } | 169 | } |
165 | 170 | ||
166 | static void __cpuinit map_cpu_to_node(int cpu, int node) | 171 | static void map_cpu_to_node(int cpu, int node) |
167 | { | 172 | { |
168 | numa_cpu_lookup_table[cpu] = node; | 173 | numa_cpu_lookup_table[cpu] = node; |
169 | 174 | ||
@@ -173,7 +178,7 @@ static void __cpuinit map_cpu_to_node(int cpu, int node) | |||
173 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | 178 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); |
174 | } | 179 | } |
175 | 180 | ||
176 | #ifdef CONFIG_HOTPLUG_CPU | 181 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_PPC_SPLPAR) |
177 | static void unmap_cpu_from_node(unsigned long cpu) | 182 | static void unmap_cpu_from_node(unsigned long cpu) |
178 | { | 183 | { |
179 | int node = numa_cpu_lookup_table[cpu]; | 184 | int node = numa_cpu_lookup_table[cpu]; |
@@ -181,13 +186,13 @@ static void unmap_cpu_from_node(unsigned long cpu) | |||
181 | dbg("removing cpu %lu from node %d\n", cpu, node); | 186 | dbg("removing cpu %lu from node %d\n", cpu, node); |
182 | 187 | ||
183 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { | 188 | if (cpumask_test_cpu(cpu, node_to_cpumask_map[node])) { |
184 | cpumask_set_cpu(cpu, node_to_cpumask_map[node]); | 189 | cpumask_clear_cpu(cpu, node_to_cpumask_map[node]); |
185 | } else { | 190 | } else { |
186 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", | 191 | printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", |
187 | cpu, node); | 192 | cpu, node); |
188 | } | 193 | } |
189 | } | 194 | } |
190 | #endif /* CONFIG_HOTPLUG_CPU */ | 195 | #endif /* CONFIG_HOTPLUG_CPU || CONFIG_PPC_SPLPAR */ |
191 | 196 | ||
192 | /* must hold reference to node during call */ | 197 | /* must hold reference to node during call */ |
193 | static const int *of_get_associativity(struct device_node *dev) | 198 | static const int *of_get_associativity(struct device_node *dev) |
@@ -246,32 +251,41 @@ static void initialize_distance_lookup_table(int nid, | |||
246 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa | 251 | /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa |
247 | * info is found. | 252 | * info is found. |
248 | */ | 253 | */ |
249 | static int of_node_to_nid_single(struct device_node *device) | 254 | static int associativity_to_nid(const unsigned int *associativity) |
250 | { | 255 | { |
251 | int nid = -1; | 256 | int nid = -1; |
252 | const unsigned int *tmp; | ||
253 | 257 | ||
254 | if (min_common_depth == -1) | 258 | if (min_common_depth == -1) |
255 | goto out; | 259 | goto out; |
256 | 260 | ||
257 | tmp = of_get_associativity(device); | 261 | if (associativity[0] >= min_common_depth) |
258 | if (!tmp) | 262 | nid = associativity[min_common_depth]; |
259 | goto out; | ||
260 | |||
261 | if (tmp[0] >= min_common_depth) | ||
262 | nid = tmp[min_common_depth]; | ||
263 | 263 | ||
264 | /* POWER4 LPAR uses 0xffff as invalid node */ | 264 | /* POWER4 LPAR uses 0xffff as invalid node */ |
265 | if (nid == 0xffff || nid >= MAX_NUMNODES) | 265 | if (nid == 0xffff || nid >= MAX_NUMNODES) |
266 | nid = -1; | 266 | nid = -1; |
267 | 267 | ||
268 | if (nid > 0 && tmp[0] >= distance_ref_points_depth) | 268 | if (nid > 0 && associativity[0] >= distance_ref_points_depth) |
269 | initialize_distance_lookup_table(nid, tmp); | 269 | initialize_distance_lookup_table(nid, associativity); |
270 | 270 | ||
271 | out: | 271 | out: |
272 | return nid; | 272 | return nid; |
273 | } | 273 | } |
274 | 274 | ||
275 | /* Returns the nid associated with the given device tree node, | ||
276 | * or -1 if not found. | ||
277 | */ | ||
278 | static int of_node_to_nid_single(struct device_node *device) | ||
279 | { | ||
280 | int nid = -1; | ||
281 | const unsigned int *tmp; | ||
282 | |||
283 | tmp = of_get_associativity(device); | ||
284 | if (tmp) | ||
285 | nid = associativity_to_nid(tmp); | ||
286 | return nid; | ||
287 | } | ||
288 | |||
275 | /* Walk the device tree upwards, looking for an associativity id */ | 289 | /* Walk the device tree upwards, looking for an associativity id */ |
276 | int of_node_to_nid(struct device_node *device) | 290 | int of_node_to_nid(struct device_node *device) |
277 | { | 291 | { |
@@ -297,14 +311,13 @@ EXPORT_SYMBOL_GPL(of_node_to_nid); | |||
297 | static int __init find_min_common_depth(void) | 311 | static int __init find_min_common_depth(void) |
298 | { | 312 | { |
299 | int depth; | 313 | int depth; |
300 | struct device_node *rtas_root; | ||
301 | struct device_node *chosen; | 314 | struct device_node *chosen; |
315 | struct device_node *root; | ||
302 | const char *vec5; | 316 | const char *vec5; |
303 | 317 | ||
304 | rtas_root = of_find_node_by_path("/rtas"); | 318 | root = of_find_node_by_path("/rtas"); |
305 | 319 | if (!root) | |
306 | if (!rtas_root) | 320 | root = of_find_node_by_path("/"); |
307 | return -1; | ||
308 | 321 | ||
309 | /* | 322 | /* |
310 | * This property is a set of 32-bit integers, each representing | 323 | * This property is a set of 32-bit integers, each representing |
@@ -318,7 +331,7 @@ static int __init find_min_common_depth(void) | |||
318 | * NUMA boundary and the following are progressively less significant | 331 | * NUMA boundary and the following are progressively less significant |
319 | * boundaries. There can be more than one level of NUMA. | 332 | * boundaries. There can be more than one level of NUMA. |
320 | */ | 333 | */ |
321 | distance_ref_points = of_get_property(rtas_root, | 334 | distance_ref_points = of_get_property(root, |
322 | "ibm,associativity-reference-points", | 335 | "ibm,associativity-reference-points", |
323 | &distance_ref_points_depth); | 336 | &distance_ref_points_depth); |
324 | 337 | ||
@@ -362,11 +375,11 @@ static int __init find_min_common_depth(void) | |||
362 | distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; | 375 | distance_ref_points_depth = MAX_DISTANCE_REF_POINTS; |
363 | } | 376 | } |
364 | 377 | ||
365 | of_node_put(rtas_root); | 378 | of_node_put(root); |
366 | return depth; | 379 | return depth; |
367 | 380 | ||
368 | err: | 381 | err: |
369 | of_node_put(rtas_root); | 382 | of_node_put(root); |
370 | return -1; | 383 | return -1; |
371 | } | 384 | } |
372 | 385 | ||
@@ -426,11 +439,11 @@ static void read_drconf_cell(struct of_drconf_cell *drmem, const u32 **cellp) | |||
426 | } | 439 | } |
427 | 440 | ||
428 | /* | 441 | /* |
429 | * Retreive and validate the ibm,dynamic-memory property of the device tree. | 442 | * Retrieve and validate the ibm,dynamic-memory property of the device tree. |
430 | * | 443 | * |
431 | * The layout of the ibm,dynamic-memory property is a number N of memblock | 444 | * The layout of the ibm,dynamic-memory property is a number N of memblock |
432 | * list entries followed by N memblock list entries. Each memblock list entry | 445 | * list entries followed by N memblock list entries. Each memblock list entry |
433 | * contains information as layed out in the of_drconf_cell struct above. | 446 | * contains information as laid out in the of_drconf_cell struct above. |
434 | */ | 447 | */ |
435 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | 448 | static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) |
436 | { | 449 | { |
@@ -454,7 +467,7 @@ static int of_get_drconf_memory(struct device_node *memory, const u32 **dm) | |||
454 | } | 467 | } |
455 | 468 | ||
456 | /* | 469 | /* |
457 | * Retreive and validate the ibm,lmb-size property for drconf memory | 470 | * Retrieve and validate the ibm,lmb-size property for drconf memory |
458 | * from the device tree. | 471 | * from the device tree. |
459 | */ | 472 | */ |
460 | static u64 of_get_lmb_size(struct device_node *memory) | 473 | static u64 of_get_lmb_size(struct device_node *memory) |
@@ -476,7 +489,7 @@ struct assoc_arrays { | |||
476 | }; | 489 | }; |
477 | 490 | ||
478 | /* | 491 | /* |
479 | * Retreive and validate the list of associativity arrays for drconf | 492 | * Retrieve and validate the list of associativity arrays for drconf |
480 | * memory from the ibm,associativity-lookup-arrays property of the | 493 | * memory from the ibm,associativity-lookup-arrays property of the |
481 | * device tree.. | 494 | * device tree.. |
482 | * | 495 | * |
@@ -590,7 +603,7 @@ static int __cpuinit cpu_numa_callback(struct notifier_block *nfb, | |||
590 | * Returns the size the region should have to enforce the memory limit. | 603 | * Returns the size the region should have to enforce the memory limit. |
591 | * This will either be the original value of size, a truncated value, | 604 | * This will either be the original value of size, a truncated value, |
592 | * or zero. If the returned value of size is 0 the region should be | 605 | * or zero. If the returned value of size is 0 the region should be |
593 | * discarded as it lies wholy above the memory limit. | 606 | * discarded as it lies wholly above the memory limit. |
594 | */ | 607 | */ |
595 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, | 608 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
596 | unsigned long size) | 609 | unsigned long size) |
@@ -802,16 +815,17 @@ static void __init setup_nonnuma(void) | |||
802 | unsigned long top_of_ram = memblock_end_of_DRAM(); | 815 | unsigned long top_of_ram = memblock_end_of_DRAM(); |
803 | unsigned long total_ram = memblock_phys_mem_size(); | 816 | unsigned long total_ram = memblock_phys_mem_size(); |
804 | unsigned long start_pfn, end_pfn; | 817 | unsigned long start_pfn, end_pfn; |
805 | unsigned int i, nid = 0; | 818 | unsigned int nid = 0; |
819 | struct memblock_region *reg; | ||
806 | 820 | ||
807 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 821 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
808 | top_of_ram, total_ram); | 822 | top_of_ram, total_ram); |
809 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 823 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
810 | (top_of_ram - total_ram) >> 20); | 824 | (top_of_ram - total_ram) >> 20); |
811 | 825 | ||
812 | for (i = 0; i < memblock.memory.cnt; ++i) { | 826 | for_each_memblock(memory, reg) { |
813 | start_pfn = memblock.memory.region[i].base >> PAGE_SHIFT; | 827 | start_pfn = memblock_region_memory_base_pfn(reg); |
814 | end_pfn = start_pfn + memblock_size_pages(&memblock.memory, i); | 828 | end_pfn = memblock_region_memory_end_pfn(reg); |
815 | 829 | ||
816 | fake_numa_create_new_node(end_pfn, &nid); | 830 | fake_numa_create_new_node(end_pfn, &nid); |
817 | add_active_range(nid, start_pfn, end_pfn); | 831 | add_active_range(nid, start_pfn, end_pfn); |
@@ -947,11 +961,11 @@ static struct notifier_block __cpuinitdata ppc64_numa_nb = { | |||
947 | static void mark_reserved_regions_for_nid(int nid) | 961 | static void mark_reserved_regions_for_nid(int nid) |
948 | { | 962 | { |
949 | struct pglist_data *node = NODE_DATA(nid); | 963 | struct pglist_data *node = NODE_DATA(nid); |
950 | int i; | 964 | struct memblock_region *reg; |
951 | 965 | ||
952 | for (i = 0; i < memblock.reserved.cnt; i++) { | 966 | for_each_memblock(reserved, reg) { |
953 | unsigned long physbase = memblock.reserved.region[i].base; | 967 | unsigned long physbase = reg->base; |
954 | unsigned long size = memblock.reserved.region[i].size; | 968 | unsigned long size = reg->size; |
955 | unsigned long start_pfn = physbase >> PAGE_SHIFT; | 969 | unsigned long start_pfn = physbase >> PAGE_SHIFT; |
956 | unsigned long end_pfn = PFN_UP(physbase + size); | 970 | unsigned long end_pfn = PFN_UP(physbase + size); |
957 | struct node_active_region node_ar; | 971 | struct node_active_region node_ar; |
@@ -1246,4 +1260,281 @@ int hot_add_scn_to_nid(unsigned long scn_addr) | |||
1246 | return nid; | 1260 | return nid; |
1247 | } | 1261 | } |
1248 | 1262 | ||
1263 | static u64 hot_add_drconf_memory_max(void) | ||
1264 | { | ||
1265 | struct device_node *memory = NULL; | ||
1266 | unsigned int drconf_cell_cnt = 0; | ||
1267 | u64 lmb_size = 0; | ||
1268 | const u32 *dm = 0; | ||
1269 | |||
1270 | memory = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | ||
1271 | if (memory) { | ||
1272 | drconf_cell_cnt = of_get_drconf_memory(memory, &dm); | ||
1273 | lmb_size = of_get_lmb_size(memory); | ||
1274 | of_node_put(memory); | ||
1275 | } | ||
1276 | return lmb_size * drconf_cell_cnt; | ||
1277 | } | ||
1278 | |||
1279 | /* | ||
1280 | * memory_hotplug_max - return max address of memory that may be added | ||
1281 | * | ||
1282 | * This is currently only used on systems that support drconfig memory | ||
1283 | * hotplug. | ||
1284 | */ | ||
1285 | u64 memory_hotplug_max(void) | ||
1286 | { | ||
1287 | return max(hot_add_drconf_memory_max(), memblock_end_of_DRAM()); | ||
1288 | } | ||
1249 | #endif /* CONFIG_MEMORY_HOTPLUG */ | 1289 | #endif /* CONFIG_MEMORY_HOTPLUG */ |
1290 | |||
1291 | /* Virtual Processor Home Node (VPHN) support */ | ||
1292 | #ifdef CONFIG_PPC_SPLPAR | ||
1293 | static u8 vphn_cpu_change_counts[NR_CPUS][MAX_DISTANCE_REF_POINTS]; | ||
1294 | static cpumask_t cpu_associativity_changes_mask; | ||
1295 | static int vphn_enabled; | ||
1296 | static void set_topology_timer(void); | ||
1297 | |||
1298 | /* | ||
1299 | * Store the current values of the associativity change counters in the | ||
1300 | * hypervisor. | ||
1301 | */ | ||
1302 | static void setup_cpu_associativity_change_counters(void) | ||
1303 | { | ||
1304 | int cpu; | ||
1305 | |||
1306 | /* The VPHN feature supports a maximum of 8 reference points */ | ||
1307 | BUILD_BUG_ON(MAX_DISTANCE_REF_POINTS > 8); | ||
1308 | |||
1309 | for_each_possible_cpu(cpu) { | ||
1310 | int i; | ||
1311 | u8 *counts = vphn_cpu_change_counts[cpu]; | ||
1312 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | ||
1313 | |||
1314 | for (i = 0; i < distance_ref_points_depth; i++) | ||
1315 | counts[i] = hypervisor_counts[i]; | ||
1316 | } | ||
1317 | } | ||
1318 | |||
1319 | /* | ||
1320 | * The hypervisor maintains a set of 8 associativity change counters in | ||
1321 | * the VPA of each cpu that correspond to the associativity levels in the | ||
1322 | * ibm,associativity-reference-points property. When an associativity | ||
1323 | * level changes, the corresponding counter is incremented. | ||
1324 | * | ||
1325 | * Set a bit in cpu_associativity_changes_mask for each cpu whose home | ||
1326 | * node associativity levels have changed. | ||
1327 | * | ||
1328 | * Returns the number of cpus with unhandled associativity changes. | ||
1329 | */ | ||
1330 | static int update_cpu_associativity_changes_mask(void) | ||
1331 | { | ||
1332 | int cpu, nr_cpus = 0; | ||
1333 | cpumask_t *changes = &cpu_associativity_changes_mask; | ||
1334 | |||
1335 | cpumask_clear(changes); | ||
1336 | |||
1337 | for_each_possible_cpu(cpu) { | ||
1338 | int i, changed = 0; | ||
1339 | u8 *counts = vphn_cpu_change_counts[cpu]; | ||
1340 | volatile u8 *hypervisor_counts = lppaca[cpu].vphn_assoc_counts; | ||
1341 | |||
1342 | for (i = 0; i < distance_ref_points_depth; i++) { | ||
1343 | if (hypervisor_counts[i] != counts[i]) { | ||
1344 | counts[i] = hypervisor_counts[i]; | ||
1345 | changed = 1; | ||
1346 | } | ||
1347 | } | ||
1348 | if (changed) { | ||
1349 | cpumask_set_cpu(cpu, changes); | ||
1350 | nr_cpus++; | ||
1351 | } | ||
1352 | } | ||
1353 | |||
1354 | return nr_cpus; | ||
1355 | } | ||
1356 | |||
1357 | /* | ||
1358 | * 6 64-bit registers unpacked into 12 32-bit associativity values. To form | ||
1359 | * the complete property we have to add the length in the first cell. | ||
1360 | */ | ||
1361 | #define VPHN_ASSOC_BUFSIZE (6*sizeof(u64)/sizeof(u32) + 1) | ||
1362 | |||
1363 | /* | ||
1364 | * Convert the associativity domain numbers returned from the hypervisor | ||
1365 | * to the sequence they would appear in the ibm,associativity property. | ||
1366 | */ | ||
1367 | static int vphn_unpack_associativity(const long *packed, unsigned int *unpacked) | ||
1368 | { | ||
1369 | int i, nr_assoc_doms = 0; | ||
1370 | const u16 *field = (const u16*) packed; | ||
1371 | |||
1372 | #define VPHN_FIELD_UNUSED (0xffff) | ||
1373 | #define VPHN_FIELD_MSB (0x8000) | ||
1374 | #define VPHN_FIELD_MASK (~VPHN_FIELD_MSB) | ||
1375 | |||
1376 | for (i = 1; i < VPHN_ASSOC_BUFSIZE; i++) { | ||
1377 | if (*field == VPHN_FIELD_UNUSED) { | ||
1378 | /* All significant fields processed, and remaining | ||
1379 | * fields contain the reserved value of all 1's. | ||
1380 | * Just store them. | ||
1381 | */ | ||
1382 | unpacked[i] = *((u32*)field); | ||
1383 | field += 2; | ||
1384 | } else if (*field & VPHN_FIELD_MSB) { | ||
1385 | /* Data is in the lower 15 bits of this field */ | ||
1386 | unpacked[i] = *field & VPHN_FIELD_MASK; | ||
1387 | field++; | ||
1388 | nr_assoc_doms++; | ||
1389 | } else { | ||
1390 | /* Data is in the lower 15 bits of this field | ||
1391 | * concatenated with the next 16 bit field | ||
1392 | */ | ||
1393 | unpacked[i] = *((u32*)field); | ||
1394 | field += 2; | ||
1395 | nr_assoc_doms++; | ||
1396 | } | ||
1397 | } | ||
1398 | |||
1399 | /* The first cell contains the length of the property */ | ||
1400 | unpacked[0] = nr_assoc_doms; | ||
1401 | |||
1402 | return nr_assoc_doms; | ||
1403 | } | ||
1404 | |||
1405 | /* | ||
1406 | * Retrieve the new associativity information for a virtual processor's | ||
1407 | * home node. | ||
1408 | */ | ||
1409 | static long hcall_vphn(unsigned long cpu, unsigned int *associativity) | ||
1410 | { | ||
1411 | long rc; | ||
1412 | long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; | ||
1413 | u64 flags = 1; | ||
1414 | int hwcpu = get_hard_smp_processor_id(cpu); | ||
1415 | |||
1416 | rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); | ||
1417 | vphn_unpack_associativity(retbuf, associativity); | ||
1418 | |||
1419 | return rc; | ||
1420 | } | ||
1421 | |||
1422 | static long vphn_get_associativity(unsigned long cpu, | ||
1423 | unsigned int *associativity) | ||
1424 | { | ||
1425 | long rc; | ||
1426 | |||
1427 | rc = hcall_vphn(cpu, associativity); | ||
1428 | |||
1429 | switch (rc) { | ||
1430 | case H_FUNCTION: | ||
1431 | printk(KERN_INFO | ||
1432 | "VPHN is not supported. Disabling polling...\n"); | ||
1433 | stop_topology_update(); | ||
1434 | break; | ||
1435 | case H_HARDWARE: | ||
1436 | printk(KERN_ERR | ||
1437 | "hcall_vphn() experienced a hardware fault " | ||
1438 | "preventing VPHN. Disabling polling...\n"); | ||
1439 | stop_topology_update(); | ||
1440 | } | ||
1441 | |||
1442 | return rc; | ||
1443 | } | ||
1444 | |||
1445 | /* | ||
1446 | * Update the node maps and sysfs entries for each cpu whose home node | ||
1447 | * has changed. | ||
1448 | */ | ||
1449 | int arch_update_cpu_topology(void) | ||
1450 | { | ||
1451 | int cpu, nid, old_nid; | ||
1452 | unsigned int associativity[VPHN_ASSOC_BUFSIZE] = {0}; | ||
1453 | struct sys_device *sysdev; | ||
1454 | |||
1455 | for_each_cpu(cpu,&cpu_associativity_changes_mask) { | ||
1456 | vphn_get_associativity(cpu, associativity); | ||
1457 | nid = associativity_to_nid(associativity); | ||
1458 | |||
1459 | if (nid < 0 || !node_online(nid)) | ||
1460 | nid = first_online_node; | ||
1461 | |||
1462 | old_nid = numa_cpu_lookup_table[cpu]; | ||
1463 | |||
1464 | /* Disable hotplug while we update the cpu | ||
1465 | * masks and sysfs. | ||
1466 | */ | ||
1467 | get_online_cpus(); | ||
1468 | unregister_cpu_under_node(cpu, old_nid); | ||
1469 | unmap_cpu_from_node(cpu); | ||
1470 | map_cpu_to_node(cpu, nid); | ||
1471 | register_cpu_under_node(cpu, nid); | ||
1472 | put_online_cpus(); | ||
1473 | |||
1474 | sysdev = get_cpu_sysdev(cpu); | ||
1475 | if (sysdev) | ||
1476 | kobject_uevent(&sysdev->kobj, KOBJ_CHANGE); | ||
1477 | } | ||
1478 | |||
1479 | return 1; | ||
1480 | } | ||
1481 | |||
1482 | static void topology_work_fn(struct work_struct *work) | ||
1483 | { | ||
1484 | rebuild_sched_domains(); | ||
1485 | } | ||
1486 | static DECLARE_WORK(topology_work, topology_work_fn); | ||
1487 | |||
1488 | void topology_schedule_update(void) | ||
1489 | { | ||
1490 | schedule_work(&topology_work); | ||
1491 | } | ||
1492 | |||
1493 | static void topology_timer_fn(unsigned long ignored) | ||
1494 | { | ||
1495 | if (!vphn_enabled) | ||
1496 | return; | ||
1497 | if (update_cpu_associativity_changes_mask() > 0) | ||
1498 | topology_schedule_update(); | ||
1499 | set_topology_timer(); | ||
1500 | } | ||
1501 | static struct timer_list topology_timer = | ||
1502 | TIMER_INITIALIZER(topology_timer_fn, 0, 0); | ||
1503 | |||
1504 | static void set_topology_timer(void) | ||
1505 | { | ||
1506 | topology_timer.data = 0; | ||
1507 | topology_timer.expires = jiffies + 60 * HZ; | ||
1508 | add_timer(&topology_timer); | ||
1509 | } | ||
1510 | |||
1511 | /* | ||
1512 | * Start polling for VPHN associativity changes. | ||
1513 | */ | ||
1514 | int start_topology_update(void) | ||
1515 | { | ||
1516 | int rc = 0; | ||
1517 | |||
1518 | /* Disabled until races with load balancing are fixed */ | ||
1519 | if (0 && firmware_has_feature(FW_FEATURE_VPHN) && | ||
1520 | get_lppaca()->shared_proc) { | ||
1521 | vphn_enabled = 1; | ||
1522 | setup_cpu_associativity_change_counters(); | ||
1523 | init_timer_deferrable(&topology_timer); | ||
1524 | set_topology_timer(); | ||
1525 | rc = 1; | ||
1526 | } | ||
1527 | |||
1528 | return rc; | ||
1529 | } | ||
1530 | __initcall(start_topology_update); | ||
1531 | |||
1532 | /* | ||
1533 | * Disable polling for VPHN associativity changes. | ||
1534 | */ | ||
1535 | int stop_topology_update(void) | ||
1536 | { | ||
1537 | vphn_enabled = 0; | ||
1538 | return del_timer_sync(&topology_timer); | ||
1539 | } | ||
1540 | #endif /* CONFIG_PPC_SPLPAR */ | ||
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index 2c7e801ab20b..af40c8768a78 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c | |||
@@ -33,110 +33,6 @@ | |||
33 | 33 | ||
34 | #include "mmu_decl.h" | 34 | #include "mmu_decl.h" |
35 | 35 | ||
36 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | ||
37 | |||
38 | #ifdef CONFIG_SMP | ||
39 | |||
40 | /* | ||
41 | * Handle batching of page table freeing on SMP. Page tables are | ||
42 | * queued up and send to be freed later by RCU in order to avoid | ||
43 | * freeing a page table page that is being walked without locks | ||
44 | */ | ||
45 | |||
46 | static DEFINE_PER_CPU(struct pte_freelist_batch *, pte_freelist_cur); | ||
47 | static unsigned long pte_freelist_forced_free; | ||
48 | |||
49 | struct pte_freelist_batch | ||
50 | { | ||
51 | struct rcu_head rcu; | ||
52 | unsigned int index; | ||
53 | unsigned long tables[0]; | ||
54 | }; | ||
55 | |||
56 | #define PTE_FREELIST_SIZE \ | ||
57 | ((PAGE_SIZE - sizeof(struct pte_freelist_batch)) \ | ||
58 | / sizeof(unsigned long)) | ||
59 | |||
60 | static void pte_free_smp_sync(void *arg) | ||
61 | { | ||
62 | /* Do nothing, just ensure we sync with all CPUs */ | ||
63 | } | ||
64 | |||
65 | /* This is only called when we are critically out of memory | ||
66 | * (and fail to get a page in pte_free_tlb). | ||
67 | */ | ||
68 | static void pgtable_free_now(void *table, unsigned shift) | ||
69 | { | ||
70 | pte_freelist_forced_free++; | ||
71 | |||
72 | smp_call_function(pte_free_smp_sync, NULL, 1); | ||
73 | |||
74 | pgtable_free(table, shift); | ||
75 | } | ||
76 | |||
77 | static void pte_free_rcu_callback(struct rcu_head *head) | ||
78 | { | ||
79 | struct pte_freelist_batch *batch = | ||
80 | container_of(head, struct pte_freelist_batch, rcu); | ||
81 | unsigned int i; | ||
82 | |||
83 | for (i = 0; i < batch->index; i++) { | ||
84 | void *table = (void *)(batch->tables[i] & ~MAX_PGTABLE_INDEX_SIZE); | ||
85 | unsigned shift = batch->tables[i] & MAX_PGTABLE_INDEX_SIZE; | ||
86 | |||
87 | pgtable_free(table, shift); | ||
88 | } | ||
89 | |||
90 | free_page((unsigned long)batch); | ||
91 | } | ||
92 | |||
93 | static void pte_free_submit(struct pte_freelist_batch *batch) | ||
94 | { | ||
95 | call_rcu(&batch->rcu, pte_free_rcu_callback); | ||
96 | } | ||
97 | |||
98 | void pgtable_free_tlb(struct mmu_gather *tlb, void *table, unsigned shift) | ||
99 | { | ||
100 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
101 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
102 | unsigned long pgf; | ||
103 | |||
104 | if (atomic_read(&tlb->mm->mm_users) < 2 || | ||
105 | cpumask_equal(mm_cpumask(tlb->mm), cpumask_of(smp_processor_id()))){ | ||
106 | pgtable_free(table, shift); | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | if (*batchp == NULL) { | ||
111 | *batchp = (struct pte_freelist_batch *)__get_free_page(GFP_ATOMIC); | ||
112 | if (*batchp == NULL) { | ||
113 | pgtable_free_now(table, shift); | ||
114 | return; | ||
115 | } | ||
116 | (*batchp)->index = 0; | ||
117 | } | ||
118 | BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); | ||
119 | pgf = (unsigned long)table | shift; | ||
120 | (*batchp)->tables[(*batchp)->index++] = pgf; | ||
121 | if ((*batchp)->index == PTE_FREELIST_SIZE) { | ||
122 | pte_free_submit(*batchp); | ||
123 | *batchp = NULL; | ||
124 | } | ||
125 | } | ||
126 | |||
127 | void pte_free_finish(void) | ||
128 | { | ||
129 | /* This is safe since tlb_gather_mmu has disabled preemption */ | ||
130 | struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur); | ||
131 | |||
132 | if (*batchp == NULL) | ||
133 | return; | ||
134 | pte_free_submit(*batchp); | ||
135 | *batchp = NULL; | ||
136 | } | ||
137 | |||
138 | #endif /* CONFIG_SMP */ | ||
139 | |||
140 | static inline int is_exec_fault(void) | 36 | static inline int is_exec_fault(void) |
141 | { | 37 | { |
142 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; | 38 | return current->thread.regs && TRAP(current->thread.regs) == 0x400; |
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index a87ead0138b4..51f87956f8f8 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c | |||
@@ -78,7 +78,7 @@ pgd_t *pgd_alloc(struct mm_struct *mm) | |||
78 | 78 | ||
79 | /* pgdir take page or two with 4K pages and a page fraction otherwise */ | 79 | /* pgdir take page or two with 4K pages and a page fraction otherwise */ |
80 | #ifndef CONFIG_PPC_4K_PAGES | 80 | #ifndef CONFIG_PPC_4K_PAGES |
81 | ret = (pgd_t *)kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); | 81 | ret = kzalloc(1 << PGDIR_ORDER, GFP_KERNEL); |
82 | #else | 82 | #else |
83 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, | 83 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_ZERO, |
84 | PGDIR_ORDER - PAGE_SHIFT); | 84 | PGDIR_ORDER - PAGE_SHIFT); |
@@ -133,7 +133,15 @@ ioremap(phys_addr_t addr, unsigned long size) | |||
133 | EXPORT_SYMBOL(ioremap); | 133 | EXPORT_SYMBOL(ioremap); |
134 | 134 | ||
135 | void __iomem * | 135 | void __iomem * |
136 | ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) | 136 | ioremap_wc(phys_addr_t addr, unsigned long size) |
137 | { | ||
138 | return __ioremap_caller(addr, size, _PAGE_NO_CACHE, | ||
139 | __builtin_return_address(0)); | ||
140 | } | ||
141 | EXPORT_SYMBOL(ioremap_wc); | ||
142 | |||
143 | void __iomem * | ||
144 | ioremap_prot(phys_addr_t addr, unsigned long size, unsigned long flags) | ||
137 | { | 145 | { |
138 | /* writeable implies dirty for kernel addresses */ | 146 | /* writeable implies dirty for kernel addresses */ |
139 | if (flags & _PAGE_RW) | 147 | if (flags & _PAGE_RW) |
@@ -152,7 +160,7 @@ ioremap_flags(phys_addr_t addr, unsigned long size, unsigned long flags) | |||
152 | 160 | ||
153 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); | 161 | return __ioremap_caller(addr, size, flags, __builtin_return_address(0)); |
154 | } | 162 | } |
155 | EXPORT_SYMBOL(ioremap_flags); | 163 | EXPORT_SYMBOL(ioremap_prot); |
156 | 164 | ||
157 | void __iomem * | 165 | void __iomem * |
158 | __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) | 166 | __ioremap(phys_addr_t addr, unsigned long size, unsigned long flags) |
@@ -230,6 +238,7 @@ __ioremap_caller(phys_addr_t addr, unsigned long size, unsigned long flags, | |||
230 | area = get_vm_area_caller(size, VM_IOREMAP, caller); | 238 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
231 | if (area == 0) | 239 | if (area == 0) |
232 | return NULL; | 240 | return NULL; |
241 | area->phys_addr = p; | ||
233 | v = (unsigned long) area->addr; | 242 | v = (unsigned long) area->addr; |
234 | } else { | 243 | } else { |
235 | v = (ioremap_bot -= size); | 244 | v = (ioremap_bot -= size); |
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 21d6dfab7942..6e595f6496d4 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c | |||
@@ -223,6 +223,8 @@ void __iomem * __ioremap_caller(phys_addr_t addr, unsigned long size, | |||
223 | caller); | 223 | caller); |
224 | if (area == NULL) | 224 | if (area == NULL) |
225 | return NULL; | 225 | return NULL; |
226 | |||
227 | area->phys_addr = paligned; | ||
226 | ret = __ioremap_at(paligned, area->addr, size, flags); | 228 | ret = __ioremap_at(paligned, area->addr, size, flags); |
227 | if (!ret) | 229 | if (!ret) |
228 | vunmap(area->addr); | 230 | vunmap(area->addr); |
@@ -253,7 +255,17 @@ void __iomem * ioremap(phys_addr_t addr, unsigned long size) | |||
253 | return __ioremap_caller(addr, size, flags, caller); | 255 | return __ioremap_caller(addr, size, flags, caller); |
254 | } | 256 | } |
255 | 257 | ||
256 | void __iomem * ioremap_flags(phys_addr_t addr, unsigned long size, | 258 | void __iomem * ioremap_wc(phys_addr_t addr, unsigned long size) |
259 | { | ||
260 | unsigned long flags = _PAGE_NO_CACHE; | ||
261 | void *caller = __builtin_return_address(0); | ||
262 | |||
263 | if (ppc_md.ioremap) | ||
264 | return ppc_md.ioremap(addr, size, flags, caller); | ||
265 | return __ioremap_caller(addr, size, flags, caller); | ||
266 | } | ||
267 | |||
268 | void __iomem * ioremap_prot(phys_addr_t addr, unsigned long size, | ||
257 | unsigned long flags) | 269 | unsigned long flags) |
258 | { | 270 | { |
259 | void *caller = __builtin_return_address(0); | 271 | void *caller = __builtin_return_address(0); |
@@ -309,7 +321,8 @@ void iounmap(volatile void __iomem *token) | |||
309 | } | 321 | } |
310 | 322 | ||
311 | EXPORT_SYMBOL(ioremap); | 323 | EXPORT_SYMBOL(ioremap); |
312 | EXPORT_SYMBOL(ioremap_flags); | 324 | EXPORT_SYMBOL(ioremap_wc); |
325 | EXPORT_SYMBOL(ioremap_prot); | ||
313 | EXPORT_SYMBOL(__ioremap); | 326 | EXPORT_SYMBOL(__ioremap); |
314 | EXPORT_SYMBOL(__ioremap_at); | 327 | EXPORT_SYMBOL(__ioremap_at); |
315 | EXPORT_SYMBOL(iounmap); | 328 | EXPORT_SYMBOL(iounmap); |
diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index f8a01829d64f..11571e118831 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c | |||
@@ -223,8 +223,7 @@ void __init MMU_init_hw(void) | |||
223 | * Find some memory for the hash table. | 223 | * Find some memory for the hash table. |
224 | */ | 224 | */ |
225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); | 225 | if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322); |
226 | Hash = __va(memblock_alloc_base(Hash_size, Hash_size, | 226 | Hash = __va(memblock_alloc(Hash_size, Hash_size)); |
227 | __initial_memory_limit_addr)); | ||
228 | cacheable_memzero(Hash, Hash_size); | 227 | cacheable_memzero(Hash, Hash_size); |
229 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; | 228 | _SDR1 = __pa(Hash) | SDR1_LOW_BITS; |
230 | 229 | ||
@@ -272,3 +271,18 @@ void __init MMU_init_hw(void) | |||
272 | 271 | ||
273 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); | 272 | if ( ppc_md.progress ) ppc_md.progress("hash:done", 0x205); |
274 | } | 273 | } |
274 | |||
275 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
276 | phys_addr_t first_memblock_size) | ||
277 | { | ||
278 | /* We don't currently support the first MEMBLOCK not mapping 0 | ||
279 | * physical on those processors | ||
280 | */ | ||
281 | BUG_ON(first_memblock_base != 0); | ||
282 | |||
283 | /* 601 can only access 16MB at the moment */ | ||
284 | if (PVR_VER(mfspr(SPRN_PVR)) == 1) | ||
285 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x01000000)); | ||
286 | else /* Anything else has 256M mapped */ | ||
287 | memblock_set_current_limit(min_t(u64, first_memblock_size, 0x10000000)); | ||
288 | } | ||
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 1d98ecc8eecd..e22276cb67a4 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/firmware.h> | 24 | #include <asm/firmware.h> |
25 | #include <linux/compiler.h> | 25 | #include <linux/compiler.h> |
26 | #include <asm/udbg.h> | 26 | #include <asm/udbg.h> |
27 | #include <asm/code-patching.h> | ||
27 | 28 | ||
28 | 29 | ||
29 | extern void slb_allocate_realmode(unsigned long ea); | 30 | extern void slb_allocate_realmode(unsigned long ea); |
@@ -166,7 +167,7 @@ static inline int esids_match(unsigned long addr1, unsigned long addr2) | |||
166 | int esid_1t_count; | 167 | int esid_1t_count; |
167 | 168 | ||
168 | /* System is not 1T segment size capable. */ | 169 | /* System is not 1T segment size capable. */ |
169 | if (!cpu_has_feature(CPU_FTR_1T_SEGMENT)) | 170 | if (!mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
170 | return (GET_ESID(addr1) == GET_ESID(addr2)); | 171 | return (GET_ESID(addr1) == GET_ESID(addr2)); |
171 | 172 | ||
172 | esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + | 173 | esid_1t_count = (((addr1 >> SID_SHIFT_1T) != 0) + |
@@ -201,7 +202,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
201 | */ | 202 | */ |
202 | hard_irq_disable(); | 203 | hard_irq_disable(); |
203 | offset = get_paca()->slb_cache_ptr; | 204 | offset = get_paca()->slb_cache_ptr; |
204 | if (!cpu_has_feature(CPU_FTR_NO_SLBIE_B) && | 205 | if (!mmu_has_feature(MMU_FTR_NO_SLBIE_B) && |
205 | offset <= SLB_CACHE_ENTRIES) { | 206 | offset <= SLB_CACHE_ENTRIES) { |
206 | int i; | 207 | int i; |
207 | asm volatile("isync" : : : "memory"); | 208 | asm volatile("isync" : : : "memory"); |
@@ -249,9 +250,8 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
249 | static inline void patch_slb_encoding(unsigned int *insn_addr, | 250 | static inline void patch_slb_encoding(unsigned int *insn_addr, |
250 | unsigned int immed) | 251 | unsigned int immed) |
251 | { | 252 | { |
252 | *insn_addr = (*insn_addr & 0xffff0000) | immed; | 253 | int insn = (*insn_addr & 0xffff0000) | immed; |
253 | flush_icache_range((unsigned long)insn_addr, 4+ | 254 | patch_instruction(insn_addr, insn); |
254 | (unsigned long)insn_addr); | ||
255 | } | 255 | } |
256 | 256 | ||
257 | void slb_set_size(u16 size) | 257 | void slb_set_size(u16 size) |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 95ce35581696..ef653dc95b65 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -58,7 +58,7 @@ _GLOBAL(slb_miss_kernel_load_linear) | |||
58 | li r11,0 | 58 | li r11,0 |
59 | BEGIN_FTR_SECTION | 59 | BEGIN_FTR_SECTION |
60 | b slb_finish_load | 60 | b slb_finish_load |
61 | END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) | 61 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
62 | b slb_finish_load_1T | 62 | b slb_finish_load_1T |
63 | 63 | ||
64 | 1: | 64 | 1: |
@@ -87,7 +87,7 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) | |||
87 | 6: | 87 | 6: |
88 | BEGIN_FTR_SECTION | 88 | BEGIN_FTR_SECTION |
89 | b slb_finish_load | 89 | b slb_finish_load |
90 | END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) | 90 | END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) |
91 | b slb_finish_load_1T | 91 | b slb_finish_load_1T |
92 | 92 | ||
93 | 0: /* user address: proto-VSID = context << 15 | ESID. First check | 93 | 0: /* user address: proto-VSID = context << 15 | ESID. First check |
@@ -138,11 +138,11 @@ END_FTR_SECTION_IFCLR(CPU_FTR_1T_SEGMENT) | |||
138 | ld r9,PACACONTEXTID(r13) | 138 | ld r9,PACACONTEXTID(r13) |
139 | BEGIN_FTR_SECTION | 139 | BEGIN_FTR_SECTION |
140 | cmpldi r10,0x1000 | 140 | cmpldi r10,0x1000 |
141 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 141 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
142 | rldimi r10,r9,USER_ESID_BITS,0 | 142 | rldimi r10,r9,USER_ESID_BITS,0 |
143 | BEGIN_FTR_SECTION | 143 | BEGIN_FTR_SECTION |
144 | bge slb_finish_load_1T | 144 | bge slb_finish_load_1T |
145 | END_FTR_SECTION_IFSET(CPU_FTR_1T_SEGMENT) | 145 | END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) |
146 | b slb_finish_load | 146 | b slb_finish_load |
147 | 147 | ||
148 | 8: /* invalid EA */ | 148 | 8: /* invalid EA */ |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 446a01842a73..41e31642a86a 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -243,7 +243,7 @@ void __init stabs_alloc(void) | |||
243 | { | 243 | { |
244 | int cpu; | 244 | int cpu; |
245 | 245 | ||
246 | if (cpu_has_feature(CPU_FTR_SLB)) | 246 | if (mmu_has_feature(MMU_FTR_SLB)) |
247 | return; | 247 | return; |
248 | 248 | ||
249 | for_each_possible_cpu(cpu) { | 249 | for_each_possible_cpu(cpu) { |
diff --git a/arch/powerpc/mm/tlb_hash32.c b/arch/powerpc/mm/tlb_hash32.c index 690566b66e8e..27b863c14941 100644 --- a/arch/powerpc/mm/tlb_hash32.c +++ b/arch/powerpc/mm/tlb_hash32.c | |||
@@ -71,9 +71,6 @@ void tlb_flush(struct mmu_gather *tlb) | |||
71 | */ | 71 | */ |
72 | _tlbia(); | 72 | _tlbia(); |
73 | } | 73 | } |
74 | |||
75 | /* Push out batch of freed page tables */ | ||
76 | pte_free_finish(); | ||
77 | } | 74 | } |
78 | 75 | ||
79 | /* | 76 | /* |
diff --git a/arch/powerpc/mm/tlb_hash64.c b/arch/powerpc/mm/tlb_hash64.c index 1ec06576f619..31f18207970b 100644 --- a/arch/powerpc/mm/tlb_hash64.c +++ b/arch/powerpc/mm/tlb_hash64.c | |||
@@ -38,13 +38,11 @@ DEFINE_PER_CPU(struct ppc64_tlb_batch, ppc64_tlb_batch); | |||
38 | * neesd to be flushed. This function will either perform the flush | 38 | * neesd to be flushed. This function will either perform the flush |
39 | * immediately or will batch it up if the current CPU has an active | 39 | * immediately or will batch it up if the current CPU has an active |
40 | * batch on it. | 40 | * batch on it. |
41 | * | ||
42 | * Must be called from within some kind of spinlock/non-preempt region... | ||
43 | */ | 41 | */ |
44 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | 42 | void hpte_need_flush(struct mm_struct *mm, unsigned long addr, |
45 | pte_t *ptep, unsigned long pte, int huge) | 43 | pte_t *ptep, unsigned long pte, int huge) |
46 | { | 44 | { |
47 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 45 | struct ppc64_tlb_batch *batch = &get_cpu_var(ppc64_tlb_batch); |
48 | unsigned long vsid, vaddr; | 46 | unsigned long vsid, vaddr; |
49 | unsigned int psize; | 47 | unsigned int psize; |
50 | int ssize; | 48 | int ssize; |
@@ -99,6 +97,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
99 | */ | 97 | */ |
100 | if (!batch->active) { | 98 | if (!batch->active) { |
101 | flush_hash_page(vaddr, rpte, psize, ssize, 0); | 99 | flush_hash_page(vaddr, rpte, psize, ssize, 0); |
100 | put_cpu_var(ppc64_tlb_batch); | ||
102 | return; | 101 | return; |
103 | } | 102 | } |
104 | 103 | ||
@@ -127,6 +126,7 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, | |||
127 | batch->index = ++i; | 126 | batch->index = ++i; |
128 | if (i >= PPC64_TLB_BATCH_NR) | 127 | if (i >= PPC64_TLB_BATCH_NR) |
129 | __flush_tlb_pending(batch); | 128 | __flush_tlb_pending(batch); |
129 | put_cpu_var(ppc64_tlb_batch); | ||
130 | } | 130 | } |
131 | 131 | ||
132 | /* | 132 | /* |
@@ -155,7 +155,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch) | |||
155 | 155 | ||
156 | void tlb_flush(struct mmu_gather *tlb) | 156 | void tlb_flush(struct mmu_gather *tlb) |
157 | { | 157 | { |
158 | struct ppc64_tlb_batch *tlbbatch = &__get_cpu_var(ppc64_tlb_batch); | 158 | struct ppc64_tlb_batch *tlbbatch = &get_cpu_var(ppc64_tlb_batch); |
159 | 159 | ||
160 | /* If there's a TLB batch pending, then we must flush it because the | 160 | /* If there's a TLB batch pending, then we must flush it because the |
161 | * pages are going to be freed and we really don't want to have a CPU | 161 | * pages are going to be freed and we really don't want to have a CPU |
@@ -164,8 +164,7 @@ void tlb_flush(struct mmu_gather *tlb) | |||
164 | if (tlbbatch->index) | 164 | if (tlbbatch->index) |
165 | __flush_tlb_pending(tlbbatch); | 165 | __flush_tlb_pending(tlbbatch); |
166 | 166 | ||
167 | /* Push out batch of freed page tables */ | 167 | put_cpu_var(ppc64_tlb_batch); |
168 | pte_free_finish(); | ||
169 | } | 168 | } |
170 | 169 | ||
171 | /** | 170 | /** |
diff --git a/arch/powerpc/mm/tlb_low_64e.S b/arch/powerpc/mm/tlb_low_64e.S index 8b04c54e596f..af0892209417 100644 --- a/arch/powerpc/mm/tlb_low_64e.S +++ b/arch/powerpc/mm/tlb_low_64e.S | |||
@@ -138,8 +138,11 @@ | |||
138 | cmpldi cr0,r15,0 /* Check for user region */ | 138 | cmpldi cr0,r15,0 /* Check for user region */ |
139 | std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ | 139 | std r14,EX_TLB_ESR(r12) /* write crazy -1 to frame */ |
140 | beq normal_tlb_miss | 140 | beq normal_tlb_miss |
141 | |||
142 | li r11,_PAGE_PRESENT|_PAGE_BAP_SX /* Base perm */ | ||
143 | oris r11,r11,_PAGE_ACCESSED@h | ||
141 | /* XXX replace the RMW cycles with immediate loads + writes */ | 144 | /* XXX replace the RMW cycles with immediate loads + writes */ |
142 | 1: mfspr r10,SPRN_MAS1 | 145 | mfspr r10,SPRN_MAS1 |
143 | cmpldi cr0,r15,8 /* Check for vmalloc region */ | 146 | cmpldi cr0,r15,8 /* Check for vmalloc region */ |
144 | rlwinm r10,r10,0,16,1 /* Clear TID */ | 147 | rlwinm r10,r10,0,16,1 /* Clear TID */ |
145 | mtspr SPRN_MAS1,r10 | 148 | mtspr SPRN_MAS1,r10 |
@@ -189,7 +192,7 @@ normal_tlb_miss: | |||
189 | or r10,r15,r14 | 192 | or r10,r15,r14 |
190 | 193 | ||
191 | BEGIN_MMU_FTR_SECTION | 194 | BEGIN_MMU_FTR_SECTION |
192 | /* Set the TLB reservation and seach for existing entry. Then load | 195 | /* Set the TLB reservation and search for existing entry. Then load |
193 | * the entry. | 196 | * the entry. |
194 | */ | 197 | */ |
195 | PPC_TLBSRX_DOT(0,r16) | 198 | PPC_TLBSRX_DOT(0,r16) |
@@ -422,13 +425,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_TLBRSRV) | |||
422 | 425 | ||
423 | virt_page_table_tlb_miss_fault: | 426 | virt_page_table_tlb_miss_fault: |
424 | /* If we fault here, things are a little bit tricky. We need to call | 427 | /* If we fault here, things are a little bit tricky. We need to call |
425 | * either data or instruction store fault, and we need to retreive | 428 | * either data or instruction store fault, and we need to retrieve |
426 | * the original fault address and ESR (for data). | 429 | * the original fault address and ESR (for data). |
427 | * | 430 | * |
428 | * The thing is, we know that in normal circumstances, this is | 431 | * The thing is, we know that in normal circumstances, this is |
429 | * always called as a second level tlb miss for SW load or as a first | 432 | * always called as a second level tlb miss for SW load or as a first |
430 | * level TLB miss for HW load, so we should be able to peek at the | 433 | * level TLB miss for HW load, so we should be able to peek at the |
431 | * relevant informations in the first exception frame in the PACA. | 434 | * relevant information in the first exception frame in the PACA. |
432 | * | 435 | * |
433 | * However, we do need to double check that, because we may just hit | 436 | * However, we do need to double check that, because we may just hit |
434 | * a stray kernel pointer or a userland attack trying to hit those | 437 | * a stray kernel pointer or a userland attack trying to hit those |
diff --git a/arch/powerpc/mm/tlb_nohash.c b/arch/powerpc/mm/tlb_nohash.c index fe391e942521..0bdad3aecc67 100644 --- a/arch/powerpc/mm/tlb_nohash.c +++ b/arch/powerpc/mm/tlb_nohash.c | |||
@@ -299,9 +299,6 @@ EXPORT_SYMBOL(flush_tlb_range); | |||
299 | void tlb_flush(struct mmu_gather *tlb) | 299 | void tlb_flush(struct mmu_gather *tlb) |
300 | { | 300 | { |
301 | flush_tlb_mm(tlb->mm); | 301 | flush_tlb_mm(tlb->mm); |
302 | |||
303 | /* Push out batch of freed page tables */ | ||
304 | pte_free_finish(); | ||
305 | } | 302 | } |
306 | 303 | ||
307 | /* | 304 | /* |
@@ -349,11 +346,47 @@ void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address) | |||
349 | 346 | ||
350 | static void setup_page_sizes(void) | 347 | static void setup_page_sizes(void) |
351 | { | 348 | { |
352 | unsigned int tlb0cfg = mfspr(SPRN_TLB0CFG); | 349 | unsigned int tlb0cfg; |
353 | unsigned int tlb0ps = mfspr(SPRN_TLB0PS); | 350 | unsigned int tlb0ps; |
354 | unsigned int eptcfg = mfspr(SPRN_EPTCFG); | 351 | unsigned int eptcfg; |
355 | int i, psize; | 352 | int i, psize; |
356 | 353 | ||
354 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
355 | unsigned int mmucfg = mfspr(SPRN_MMUCFG); | ||
356 | |||
357 | if (((mmucfg & MMUCFG_MAVN) == MMUCFG_MAVN_V1) && | ||
358 | (mmu_has_feature(MMU_FTR_TYPE_FSL_E))) { | ||
359 | unsigned int tlb1cfg = mfspr(SPRN_TLB1CFG); | ||
360 | unsigned int min_pg, max_pg; | ||
361 | |||
362 | min_pg = (tlb1cfg & TLBnCFG_MINSIZE) >> TLBnCFG_MINSIZE_SHIFT; | ||
363 | max_pg = (tlb1cfg & TLBnCFG_MAXSIZE) >> TLBnCFG_MAXSIZE_SHIFT; | ||
364 | |||
365 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | ||
366 | struct mmu_psize_def *def; | ||
367 | unsigned int shift; | ||
368 | |||
369 | def = &mmu_psize_defs[psize]; | ||
370 | shift = def->shift; | ||
371 | |||
372 | if (shift == 0) | ||
373 | continue; | ||
374 | |||
375 | /* adjust to be in terms of 4^shift Kb */ | ||
376 | shift = (shift - 10) >> 1; | ||
377 | |||
378 | if ((shift >= min_pg) && (shift <= max_pg)) | ||
379 | def->flags |= MMU_PAGE_SIZE_DIRECT; | ||
380 | } | ||
381 | |||
382 | goto no_indirect; | ||
383 | } | ||
384 | #endif | ||
385 | |||
386 | tlb0cfg = mfspr(SPRN_TLB0CFG); | ||
387 | tlb0ps = mfspr(SPRN_TLB0PS); | ||
388 | eptcfg = mfspr(SPRN_EPTCFG); | ||
389 | |||
357 | /* Look for supported direct sizes */ | 390 | /* Look for supported direct sizes */ |
358 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { | 391 | for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) { |
359 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; | 392 | struct mmu_psize_def *def = &mmu_psize_defs[psize]; |
@@ -505,10 +538,26 @@ static void __early_init_mmu(int boot_cpu) | |||
505 | */ | 538 | */ |
506 | linear_map_top = memblock_end_of_DRAM(); | 539 | linear_map_top = memblock_end_of_DRAM(); |
507 | 540 | ||
541 | #ifdef CONFIG_PPC_FSL_BOOK3E | ||
542 | if (mmu_has_feature(MMU_FTR_TYPE_FSL_E)) { | ||
543 | unsigned int num_cams; | ||
544 | |||
545 | /* use a quarter of the TLBCAM for bolted linear map */ | ||
546 | num_cams = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) / 4; | ||
547 | linear_map_top = map_mem_in_cams(linear_map_top, num_cams); | ||
548 | |||
549 | /* limit memory so we dont have linear faults */ | ||
550 | memblock_enforce_memory_limit(linear_map_top); | ||
551 | memblock_analyze(); | ||
552 | } | ||
553 | #endif | ||
554 | |||
508 | /* A sync won't hurt us after mucking around with | 555 | /* A sync won't hurt us after mucking around with |
509 | * the MMU configuration | 556 | * the MMU configuration |
510 | */ | 557 | */ |
511 | mb(); | 558 | mb(); |
559 | |||
560 | memblock_set_current_limit(linear_map_top); | ||
512 | } | 561 | } |
513 | 562 | ||
514 | void __init early_init_mmu(void) | 563 | void __init early_init_mmu(void) |
@@ -521,4 +570,18 @@ void __cpuinit early_init_mmu_secondary(void) | |||
521 | __early_init_mmu(0); | 570 | __early_init_mmu(0); |
522 | } | 571 | } |
523 | 572 | ||
573 | void setup_initial_memory_limit(phys_addr_t first_memblock_base, | ||
574 | phys_addr_t first_memblock_size) | ||
575 | { | ||
576 | /* On Embedded 64-bit, we adjust the RMA size to match | ||
577 | * the bolted TLB entry. We know for now that only 1G | ||
578 | * entries are supported though that may eventually | ||
579 | * change. We crop it to the size of the first MEMBLOCK to | ||
580 | * avoid going over total available memory just in case... | ||
581 | */ | ||
582 | ppc64_rma_size = min_t(u64, first_memblock_size, 0x40000000); | ||
583 | |||
584 | /* Finally limit subsequent allocations */ | ||
585 | memblock_set_current_limit(first_memblock_base + ppc64_rma_size); | ||
586 | } | ||
524 | #endif /* CONFIG_PPC64 */ | 587 | #endif /* CONFIG_PPC64 */ |
diff --git a/arch/powerpc/mm/tlb_nohash_low.S b/arch/powerpc/mm/tlb_nohash_low.S index b9d9fed8f36e..7c63c0ed4f1b 100644 --- a/arch/powerpc/mm/tlb_nohash_low.S +++ b/arch/powerpc/mm/tlb_nohash_low.S | |||
@@ -189,6 +189,13 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x) | |||
189 | blr | 189 | blr |
190 | 190 | ||
191 | #ifdef CONFIG_PPC_47x | 191 | #ifdef CONFIG_PPC_47x |
192 | |||
193 | /* | ||
194 | * 47x variant of icbt | ||
195 | */ | ||
196 | # define ICBT(CT,RA,RB) \ | ||
197 | .long 0x7c00002c | ((CT) << 21) | ((RA) << 16) | ((RB) << 11) | ||
198 | |||
192 | /* | 199 | /* |
193 | * _tlbivax_bcast is only on 47x. We don't bother doing a runtime | 200 | * _tlbivax_bcast is only on 47x. We don't bother doing a runtime |
194 | * check though, it will blow up soon enough if we mistakenly try | 201 | * check though, it will blow up soon enough if we mistakenly try |
@@ -206,7 +213,35 @@ _GLOBAL(_tlbivax_bcast) | |||
206 | isync | 213 | isync |
207 | eieio | 214 | eieio |
208 | tlbsync | 215 | tlbsync |
216 | BEGIN_FTR_SECTION | ||
217 | b 1f | ||
218 | END_FTR_SECTION_IFSET(CPU_FTR_476_DD2) | ||
219 | sync | ||
220 | wrtee r10 | ||
221 | blr | ||
222 | /* | ||
223 | * DD2 HW could hang if in instruction fetch happens before msync completes. | ||
224 | * Touch enough instruction cache lines to ensure cache hits | ||
225 | */ | ||
226 | 1: mflr r9 | ||
227 | bl 2f | ||
228 | 2: mflr r6 | ||
229 | li r7,32 | ||
230 | ICBT(0,r6,r7) /* touch next cache line */ | ||
231 | add r6,r6,r7 | ||
232 | ICBT(0,r6,r7) /* touch next cache line */ | ||
233 | add r6,r6,r7 | ||
234 | ICBT(0,r6,r7) /* touch next cache line */ | ||
209 | sync | 235 | sync |
236 | nop | ||
237 | nop | ||
238 | nop | ||
239 | nop | ||
240 | nop | ||
241 | nop | ||
242 | nop | ||
243 | nop | ||
244 | mtlr r9 | ||
210 | wrtee r10 | 245 | wrtee r10 |
211 | blr | 246 | blr |
212 | #endif /* CONFIG_PPC_47x */ | 247 | #endif /* CONFIG_PPC_47x */ |
@@ -367,7 +402,7 @@ _GLOBAL(set_context) | |||
367 | #error Unsupported processor type ! | 402 | #error Unsupported processor type ! |
368 | #endif | 403 | #endif |
369 | 404 | ||
370 | #if defined(CONFIG_FSL_BOOKE) | 405 | #if defined(CONFIG_PPC_FSL_BOOK3E) |
371 | /* | 406 | /* |
372 | * extern void loadcam_entry(unsigned int index) | 407 | * extern void loadcam_entry(unsigned int index) |
373 | * | 408 | * |
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile index e219ca43962d..73456c4cec28 100644 --- a/arch/powerpc/oprofile/Makefile +++ b/arch/powerpc/oprofile/Makefile | |||
@@ -1,8 +1,6 @@ | |||
1 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 1 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
2 | 2 | ||
3 | ifeq ($(CONFIG_PPC64),y) | 3 | ccflags-$(CONFIG_PPC64) := -mno-minimal-toc |
4 | EXTRA_CFLAGS += -mno-minimal-toc | ||
5 | endif | ||
6 | 4 | ||
7 | obj-$(CONFIG_OPROFILE) += oprofile.o | 5 | obj-$(CONFIG_OPROFILE) += oprofile.o |
8 | 6 | ||
diff --git a/arch/powerpc/oprofile/backtrace.c b/arch/powerpc/oprofile/backtrace.c index b4278cfd1f80..f75301f2c85f 100644 --- a/arch/powerpc/oprofile/backtrace.c +++ b/arch/powerpc/oprofile/backtrace.c | |||
@@ -105,7 +105,7 @@ void op_powerpc_backtrace(struct pt_regs * const regs, unsigned int depth) | |||
105 | } | 105 | } |
106 | } else { | 106 | } else { |
107 | #ifdef CONFIG_PPC64 | 107 | #ifdef CONFIG_PPC64 |
108 | if (!test_thread_flag(TIF_32BIT)) { | 108 | if (!is_32bit_task()) { |
109 | while (depth--) { | 109 | while (depth--) { |
110 | sp = user_getsp64(sp, first_frame); | 110 | sp = user_getsp64(sp, first_frame); |
111 | if (!sp) | 111 | if (!sp) |
diff --git a/arch/powerpc/oprofile/op_model_cell.c b/arch/powerpc/oprofile/op_model_cell.c index 7fd90d02d8c6..cb515cff745c 100644 --- a/arch/powerpc/oprofile/op_model_cell.c +++ b/arch/powerpc/oprofile/op_model_cell.c | |||
@@ -67,7 +67,7 @@ | |||
67 | 67 | ||
68 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ | 68 | #define MAX_SPU_COUNT 0xFFFFFF /* maximum 24 bit LFSR value */ |
69 | 69 | ||
70 | /* Minumum HW interval timer setting to send value to trace buffer is 10 cycle. | 70 | /* Minimum HW interval timer setting to send value to trace buffer is 10 cycle. |
71 | * To configure counter to send value every N cycles set counter to | 71 | * To configure counter to send value every N cycles set counter to |
72 | * 2^32 - 1 - N. | 72 | * 2^32 - 1 - N. |
73 | */ | 73 | */ |
@@ -1469,8 +1469,8 @@ static int cell_global_start(struct op_counter_config *ctr) | |||
1469 | * The pm_interval register is setup to write the SPU PC value into the | 1469 | * The pm_interval register is setup to write the SPU PC value into the |
1470 | * trace buffer at the maximum rate possible. The trace buffer is configured | 1470 | * trace buffer at the maximum rate possible. The trace buffer is configured |
1471 | * to store the PCs, wrapping when it is full. The performance counter is | 1471 | * to store the PCs, wrapping when it is full. The performance counter is |
1472 | * intialized to the max hardware count minus the number of events, N, between | 1472 | * initialized to the max hardware count minus the number of events, N, between |
1473 | * samples. Once the N events have occured, a HW counter overflow occurs | 1473 | * samples. Once the N events have occurred, a HW counter overflow occurs |
1474 | * causing the generation of a HW counter interrupt which also stops the | 1474 | * causing the generation of a HW counter interrupt which also stops the |
1475 | * writing of the SPU PC values to the trace buffer. Hence the last PC | 1475 | * writing of the SPU PC values to the trace buffer. Hence the last PC |
1476 | * written to the trace buffer is the SPU PC that we want. Unfortunately, | 1476 | * written to the trace buffer is the SPU PC that we want. Unfortunately, |
@@ -1656,7 +1656,7 @@ static void cell_handle_interrupt_ppu(struct pt_regs *regs, | |||
1656 | * The counters were frozen by the interrupt. | 1656 | * The counters were frozen by the interrupt. |
1657 | * Reenable the interrupt and restart the counters. | 1657 | * Reenable the interrupt and restart the counters. |
1658 | * If there was a race between the interrupt handler and | 1658 | * If there was a race between the interrupt handler and |
1659 | * the virtual counter routine. The virutal counter | 1659 | * the virtual counter routine. The virtual counter |
1660 | * routine may have cleared the interrupts. Hence must | 1660 | * routine may have cleared the interrupts. Hence must |
1661 | * use the virt_cntr_inter_mask to re-enable the interrupts. | 1661 | * use the virt_cntr_inter_mask to re-enable the interrupts. |
1662 | */ | 1662 | */ |
diff --git a/arch/powerpc/oprofile/op_model_fsl_emb.c b/arch/powerpc/oprofile/op_model_fsl_emb.c index 62312abffa28..d4e6507277b5 100644 --- a/arch/powerpc/oprofile/op_model_fsl_emb.c +++ b/arch/powerpc/oprofile/op_model_fsl_emb.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Freescale Embedded oprofile support, based on ppc64 oprofile support | 2 | * Freescale Embedded oprofile support, based on ppc64 oprofile support |
3 | * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM | 3 | * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM |
4 | * | 4 | * |
5 | * Copyright (c) 2004 Freescale Semiconductor, Inc | 5 | * Copyright (c) 2004, 2010 Freescale Semiconductor, Inc |
6 | * | 6 | * |
7 | * Author: Andy Fleming | 7 | * Author: Andy Fleming |
8 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> | 8 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> |
@@ -321,9 +321,6 @@ static void fsl_emb_handle_interrupt(struct pt_regs *regs, | |||
321 | int val; | 321 | int val; |
322 | int i; | 322 | int i; |
323 | 323 | ||
324 | /* set the PMM bit (see comment below) */ | ||
325 | mtmsr(mfmsr() | MSR_PMM); | ||
326 | |||
327 | pc = regs->nip; | 324 | pc = regs->nip; |
328 | is_kernel = is_kernel_addr(pc); | 325 | is_kernel = is_kernel_addr(pc); |
329 | 326 | ||
@@ -340,9 +337,13 @@ static void fsl_emb_handle_interrupt(struct pt_regs *regs, | |||
340 | } | 337 | } |
341 | 338 | ||
342 | /* The freeze bit was set by the interrupt. */ | 339 | /* The freeze bit was set by the interrupt. */ |
343 | /* Clear the freeze bit, and reenable the interrupt. | 340 | /* Clear the freeze bit, and reenable the interrupt. The |
344 | * The counters won't actually start until the rfi clears | 341 | * counters won't actually start until the rfi clears the PMM |
345 | * the PMM bit */ | 342 | * bit. The PMM bit should not be set until after the interrupt |
343 | * is cleared to avoid it getting lost in some hypervisor | ||
344 | * environments. | ||
345 | */ | ||
346 | mtmsr(mfmsr() | MSR_PMM); | ||
346 | pmc_start_ctrs(1); | 347 | pmc_start_ctrs(1); |
347 | } | 348 | } |
348 | 349 | ||
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 80774092db77..e6bec74be131 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c | |||
@@ -207,7 +207,7 @@ static unsigned long get_pc(struct pt_regs *regs) | |||
207 | unsigned long mmcra; | 207 | unsigned long mmcra; |
208 | unsigned long slot; | 208 | unsigned long slot; |
209 | 209 | ||
210 | /* Cant do much about it */ | 210 | /* Can't do much about it */ |
211 | if (!cur_cpu_spec->oprofile_mmcra_sihv) | 211 | if (!cur_cpu_spec->oprofile_mmcra_sihv) |
212 | return pc; | 212 | return pc; |
213 | 213 | ||
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra) | |||
261 | return is_kernel; | 261 | return is_kernel; |
262 | } | 262 | } |
263 | 263 | ||
264 | static bool pmc_overflow(unsigned long val) | ||
265 | { | ||
266 | if ((int)val < 0) | ||
267 | return true; | ||
268 | |||
269 | /* | ||
270 | * Events on POWER7 can roll back if a speculative event doesn't | ||
271 | * eventually complete. Unfortunately in some rare cases they will | ||
272 | * raise a performance monitor exception. We need to catch this to | ||
273 | * ensure we reset the PMC. In all cases the PMC will be 256 or less | ||
274 | * cycles from overflow. | ||
275 | * | ||
276 | * We only do this if the first pass fails to find any overflowing | ||
277 | * PMCs because a user might set a period of less than 256 and we | ||
278 | * don't want to mistakenly reset them. | ||
279 | */ | ||
280 | if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) | ||
281 | return true; | ||
282 | |||
283 | return false; | ||
284 | } | ||
285 | |||
264 | static void power4_handle_interrupt(struct pt_regs *regs, | 286 | static void power4_handle_interrupt(struct pt_regs *regs, |
265 | struct op_counter_config *ctr) | 287 | struct op_counter_config *ctr) |
266 | { | 288 | { |
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs, | |||
281 | 303 | ||
282 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { | 304 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { |
283 | val = classic_ctr_read(i); | 305 | val = classic_ctr_read(i); |
284 | if (val < 0) { | 306 | if (pmc_overflow(val)) { |
285 | if (oprofile_running && ctr[i].enabled) { | 307 | if (oprofile_running && ctr[i].enabled) { |
286 | oprofile_add_ext_sample(pc, regs, i, is_kernel); | 308 | oprofile_add_ext_sample(pc, regs, i, is_kernel); |
287 | classic_ctr_write(i, reset_value[i]); | 309 | classic_ctr_write(i, reset_value[i]); |
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index b72176434ebe..d733d7ca939c 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig | |||
@@ -57,6 +57,8 @@ config KILAUEA | |||
57 | select 405EX | 57 | select 405EX |
58 | select PPC40x_SIMPLE | 58 | select PPC40x_SIMPLE |
59 | select PPC4xx_PCI_EXPRESS | 59 | select PPC4xx_PCI_EXPRESS |
60 | select PCI_MSI | ||
61 | select PPC4xx_MSI | ||
60 | help | 62 | help |
61 | This option enables support for the AMCC PPC405EX evaluation board. | 63 | This option enables support for the AMCC PPC405EX evaluation board. |
62 | 64 | ||
diff --git a/arch/powerpc/platforms/40x/ppc40x_simple.c b/arch/powerpc/platforms/40x/ppc40x_simple.c index 546bbc229d19..2521d93ef136 100644 --- a/arch/powerpc/platforms/40x/ppc40x_simple.c +++ b/arch/powerpc/platforms/40x/ppc40x_simple.c | |||
@@ -50,7 +50,7 @@ machine_device_initcall(ppc40x_simple, ppc40x_device_probe); | |||
50 | * Again, if your board needs to do things differently then create a | 50 | * Again, if your board needs to do things differently then create a |
51 | * board.c file for it rather than adding it to this list. | 51 | * board.c file for it rather than adding it to this list. |
52 | */ | 52 | */ |
53 | static char *board[] __initdata = { | 53 | static const char *board[] __initdata = { |
54 | "amcc,acadia", | 54 | "amcc,acadia", |
55 | "amcc,haleakala", | 55 | "amcc,haleakala", |
56 | "amcc,kilauea", | 56 | "amcc,kilauea", |
@@ -60,14 +60,9 @@ static char *board[] __initdata = { | |||
60 | 60 | ||
61 | static int __init ppc40x_probe(void) | 61 | static int __init ppc40x_probe(void) |
62 | { | 62 | { |
63 | unsigned long root = of_get_flat_dt_root(); | 63 | if (of_flat_dt_match(of_get_flat_dt_root(), board)) { |
64 | int i = 0; | 64 | ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); |
65 | 65 | return 1; | |
66 | for (i = 0; i < ARRAY_SIZE(board); i++) { | ||
67 | if (of_flat_dt_is_compatible(root, board[i])) { | ||
68 | ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); | ||
69 | return 1; | ||
70 | } | ||
71 | } | 66 | } |
72 | 67 | ||
73 | return 0; | 68 | return 0; |
diff --git a/arch/powerpc/platforms/44x/44x.h b/arch/powerpc/platforms/44x/44x.h index dbc4d2b4301a..63f703ecd23c 100644 --- a/arch/powerpc/platforms/44x/44x.h +++ b/arch/powerpc/platforms/44x/44x.h | |||
@@ -4,4 +4,8 @@ | |||
4 | extern u8 as1_readb(volatile u8 __iomem *addr); | 4 | extern u8 as1_readb(volatile u8 __iomem *addr); |
5 | extern void as1_writeb(u8 data, volatile u8 __iomem *addr); | 5 | extern void as1_writeb(u8 data, volatile u8 __iomem *addr); |
6 | 6 | ||
7 | #define GPIO0_OSRH 0xC | ||
8 | #define GPIO0_TSRH 0x14 | ||
9 | #define GPIO0_ISR1H 0x34 | ||
10 | |||
7 | #endif /* __POWERPC_PLATFORMS_44X_44X_H */ | 11 | #endif /* __POWERPC_PLATFORMS_44X_44X_H */ |
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index 69d668c072ae..e958b6f48ec2 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig | |||
@@ -17,6 +17,16 @@ config BAMBOO | |||
17 | help | 17 | help |
18 | This option enables support for the IBM PPC440EP evaluation board. | 18 | This option enables support for the IBM PPC440EP evaluation board. |
19 | 19 | ||
20 | config BLUESTONE | ||
21 | bool "Bluestone" | ||
22 | depends on 44x | ||
23 | default n | ||
24 | select PPC44x_SIMPLE | ||
25 | select APM821xx | ||
26 | select IBM_NEW_EMAC_RGMII | ||
27 | help | ||
28 | This option enables support for the APM APM821xx Evaluation board. | ||
29 | |||
20 | config EBONY | 30 | config EBONY |
21 | bool "Ebony" | 31 | bool "Ebony" |
22 | depends on 44x | 32 | depends on 44x |
@@ -64,6 +74,8 @@ config KATMAI | |||
64 | select 440SPe | 74 | select 440SPe |
65 | select PCI | 75 | select PCI |
66 | select PPC4xx_PCI_EXPRESS | 76 | select PPC4xx_PCI_EXPRESS |
77 | select PCI_MSI | ||
78 | select PCC4xx_MSI | ||
67 | help | 79 | help |
68 | This option enables support for the AMCC PPC440SPe evaluation board. | 80 | This option enables support for the AMCC PPC440SPe evaluation board. |
69 | 81 | ||
@@ -105,10 +117,11 @@ config CANYONLANDS | |||
105 | bool "Canyonlands" | 117 | bool "Canyonlands" |
106 | depends on 44x | 118 | depends on 44x |
107 | default n | 119 | default n |
108 | select PPC44x_SIMPLE | ||
109 | select 460EX | 120 | select 460EX |
110 | select PCI | 121 | select PCI |
111 | select PPC4xx_PCI_EXPRESS | 122 | select PPC4xx_PCI_EXPRESS |
123 | select PCI_MSI | ||
124 | select PPC4xx_MSI | ||
112 | select IBM_NEW_EMAC_RGMII | 125 | select IBM_NEW_EMAC_RGMII |
113 | select IBM_NEW_EMAC_ZMII | 126 | select IBM_NEW_EMAC_ZMII |
114 | help | 127 | help |
@@ -135,6 +148,8 @@ config REDWOOD | |||
135 | select 460SX | 148 | select 460SX |
136 | select PCI | 149 | select PCI |
137 | select PPC4xx_PCI_EXPRESS | 150 | select PPC4xx_PCI_EXPRESS |
151 | select PCI_MSI | ||
152 | select PPC4xx_MSI | ||
138 | help | 153 | help |
139 | This option enables support for the AMCC PPC460SX Redwood board. | 154 | This option enables support for the AMCC PPC460SX Redwood board. |
140 | 155 | ||
@@ -293,6 +308,12 @@ config 460SX | |||
293 | select IBM_NEW_EMAC_ZMII | 308 | select IBM_NEW_EMAC_ZMII |
294 | select IBM_NEW_EMAC_TAH | 309 | select IBM_NEW_EMAC_TAH |
295 | 310 | ||
311 | config APM821xx | ||
312 | bool | ||
313 | select PPC_FPU | ||
314 | select IBM_NEW_EMAC_EMAC4 | ||
315 | select IBM_NEW_EMAC_TAH | ||
316 | |||
296 | # 44x errata/workaround config symbols, selected by the CPU models above | 317 | # 44x errata/workaround config symbols, selected by the CPU models above |
297 | config IBM440EP_ERR42 | 318 | config IBM440EP_ERR42 |
298 | bool | 319 | bool |
diff --git a/arch/powerpc/platforms/44x/Makefile b/arch/powerpc/platforms/44x/Makefile index 82ff326e0795..553db6007217 100644 --- a/arch/powerpc/platforms/44x/Makefile +++ b/arch/powerpc/platforms/44x/Makefile | |||
@@ -1,4 +1,7 @@ | |||
1 | obj-$(CONFIG_44x) := misc_44x.o idle.o | 1 | obj-$(CONFIG_44x) += misc_44x.o |
2 | ifneq ($(CONFIG_PPC4xx_CPM),y) | ||
3 | obj-$(CONFIG_44x) += idle.o | ||
4 | endif | ||
2 | obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o | 5 | obj-$(CONFIG_PPC44x_SIMPLE) += ppc44x_simple.o |
3 | obj-$(CONFIG_EBONY) += ebony.o | 6 | obj-$(CONFIG_EBONY) += ebony.o |
4 | obj-$(CONFIG_SAM440EP) += sam440ep.o | 7 | obj-$(CONFIG_SAM440EP) += sam440ep.o |
@@ -6,3 +9,4 @@ obj-$(CONFIG_WARP) += warp.o | |||
6 | obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o | 9 | obj-$(CONFIG_XILINX_VIRTEX_5_FXT) += virtex.o |
7 | obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o | 10 | obj-$(CONFIG_XILINX_ML510) += virtex_ml510.o |
8 | obj-$(CONFIG_ISS4xx) += iss4xx.o | 11 | obj-$(CONFIG_ISS4xx) += iss4xx.o |
12 | obj-$(CONFIG_CANYONLANDS)+= canyonlands.o | ||
diff --git a/arch/powerpc/platforms/44x/canyonlands.c b/arch/powerpc/platforms/44x/canyonlands.c new file mode 100644 index 000000000000..afc5e8ea3775 --- /dev/null +++ b/arch/powerpc/platforms/44x/canyonlands.c | |||
@@ -0,0 +1,134 @@ | |||
1 | /* | ||
2 | * This contain platform specific code for APM PPC460EX based Canyonlands | ||
3 | * board. | ||
4 | * | ||
5 | * Copyright (c) 2010, Applied Micro Circuits Corporation | ||
6 | * Author: Rupjyoti Sarmah <rsarmah@apm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
21 | * MA 02111-1307 USA | ||
22 | * | ||
23 | */ | ||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <asm/pci-bridge.h> | ||
27 | #include <asm/ppc4xx.h> | ||
28 | #include <asm/udbg.h> | ||
29 | #include <asm/uic.h> | ||
30 | #include <linux/of_platform.h> | ||
31 | #include <linux/delay.h> | ||
32 | #include "44x.h" | ||
33 | |||
34 | #define BCSR_USB_EN 0x11 | ||
35 | |||
36 | static __initdata struct of_device_id ppc460ex_of_bus[] = { | ||
37 | { .compatible = "ibm,plb4", }, | ||
38 | { .compatible = "ibm,opb", }, | ||
39 | { .compatible = "ibm,ebc", }, | ||
40 | { .compatible = "simple-bus", }, | ||
41 | {}, | ||
42 | }; | ||
43 | |||
44 | static int __init ppc460ex_device_probe(void) | ||
45 | { | ||
46 | of_platform_bus_probe(NULL, ppc460ex_of_bus, NULL); | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | machine_device_initcall(canyonlands, ppc460ex_device_probe); | ||
51 | |||
52 | /* Using this code only for the Canyonlands board. */ | ||
53 | |||
54 | static int __init ppc460ex_probe(void) | ||
55 | { | ||
56 | unsigned long root = of_get_flat_dt_root(); | ||
57 | if (of_flat_dt_is_compatible(root, "amcc,canyonlands")) { | ||
58 | ppc_pci_set_flags(PPC_PCI_REASSIGN_ALL_RSRC); | ||
59 | return 1; | ||
60 | } | ||
61 | return 0; | ||
62 | } | ||
63 | |||
64 | /* USB PHY fixup code on Canyonlands kit. */ | ||
65 | |||
66 | static int __init ppc460ex_canyonlands_fixup(void) | ||
67 | { | ||
68 | u8 __iomem *bcsr ; | ||
69 | void __iomem *vaddr; | ||
70 | struct device_node *np; | ||
71 | int ret = 0; | ||
72 | |||
73 | np = of_find_compatible_node(NULL, NULL, "amcc,ppc460ex-bcsr"); | ||
74 | if (!np) { | ||
75 | printk(KERN_ERR "failed did not find amcc, ppc460ex bcsr node\n"); | ||
76 | return -ENODEV; | ||
77 | } | ||
78 | |||
79 | bcsr = of_iomap(np, 0); | ||
80 | of_node_put(np); | ||
81 | |||
82 | if (!bcsr) { | ||
83 | printk(KERN_CRIT "Could not remap bcsr\n"); | ||
84 | ret = -ENODEV; | ||
85 | goto err_bcsr; | ||
86 | } | ||
87 | |||
88 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc4xx-gpio"); | ||
89 | if (!np) { | ||
90 | printk(KERN_ERR "failed did not find ibm,ppc4xx-gpio node\n"); | ||
91 | return -ENODEV; | ||
92 | } | ||
93 | |||
94 | vaddr = of_iomap(np, 0); | ||
95 | of_node_put(np); | ||
96 | |||
97 | if (!vaddr) { | ||
98 | printk(KERN_CRIT "Could not get gpio node address\n"); | ||
99 | ret = -ENODEV; | ||
100 | goto err_gpio; | ||
101 | } | ||
102 | /* Disable USB, through the BCSR7 bits */ | ||
103 | setbits8(&bcsr[7], BCSR_USB_EN); | ||
104 | |||
105 | /* Wait for a while after reset */ | ||
106 | msleep(100); | ||
107 | |||
108 | /* Enable USB here */ | ||
109 | clrbits8(&bcsr[7], BCSR_USB_EN); | ||
110 | |||
111 | /* | ||
112 | * Configure multiplexed gpio16 and gpio19 as alternate1 output | ||
113 | * source after USB reset. In this configuration gpio16 will be | ||
114 | * USB2HStop and gpio19 will be USB2DStop. For more details refer to | ||
115 | * table 34-7 of PPC460EX user manual. | ||
116 | */ | ||
117 | setbits32((vaddr + GPIO0_OSRH), 0x42000000); | ||
118 | setbits32((vaddr + GPIO0_TSRH), 0x42000000); | ||
119 | err_gpio: | ||
120 | iounmap(vaddr); | ||
121 | err_bcsr: | ||
122 | iounmap(bcsr); | ||
123 | return ret; | ||
124 | } | ||
125 | machine_device_initcall(canyonlands, ppc460ex_canyonlands_fixup); | ||
126 | define_machine(canyonlands) { | ||
127 | .name = "Canyonlands", | ||
128 | .probe = ppc460ex_probe, | ||
129 | .progress = udbg_progress, | ||
130 | .init_IRQ = uic_init_tree, | ||
131 | .get_irq = uic_get_irq, | ||
132 | .restart = ppc4xx_reset_system, | ||
133 | .calibrate_decr = generic_calibrate_decr, | ||
134 | }; | ||
diff --git a/arch/powerpc/platforms/44x/iss4xx.c b/arch/powerpc/platforms/44x/iss4xx.c index aa46e9d1e771..19395f18b1db 100644 --- a/arch/powerpc/platforms/44x/iss4xx.c +++ b/arch/powerpc/platforms/44x/iss4xx.c | |||
@@ -87,7 +87,7 @@ static void __cpuinit smp_iss4xx_setup_cpu(int cpu) | |||
87 | mpic_setup_this_cpu(); | 87 | mpic_setup_this_cpu(); |
88 | } | 88 | } |
89 | 89 | ||
90 | static void __cpuinit smp_iss4xx_kick_cpu(int cpu) | 90 | static int __cpuinit smp_iss4xx_kick_cpu(int cpu) |
91 | { | 91 | { |
92 | struct device_node *cpunode = of_get_cpu_node(cpu, NULL); | 92 | struct device_node *cpunode = of_get_cpu_node(cpu, NULL); |
93 | const u64 *spin_table_addr_prop; | 93 | const u64 *spin_table_addr_prop; |
@@ -104,7 +104,7 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu) | |||
104 | NULL); | 104 | NULL); |
105 | if (spin_table_addr_prop == NULL) { | 105 | if (spin_table_addr_prop == NULL) { |
106 | pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); | 106 | pr_err("CPU%d: Can't start, missing cpu-release-addr !\n", cpu); |
107 | return; | 107 | return -ENOENT; |
108 | } | 108 | } |
109 | 109 | ||
110 | /* Assume it's mapped as part of the linear mapping. This is a bit | 110 | /* Assume it's mapped as part of the linear mapping. This is a bit |
@@ -117,6 +117,8 @@ static void __cpuinit smp_iss4xx_kick_cpu(int cpu) | |||
117 | smp_wmb(); | 117 | smp_wmb(); |
118 | spin_table[1] = __pa(start_secondary_47x); | 118 | spin_table[1] = __pa(start_secondary_47x); |
119 | mb(); | 119 | mb(); |
120 | |||
121 | return 0; | ||
120 | } | 122 | } |
121 | 123 | ||
122 | static struct smp_ops_t iss_smp_ops = { | 124 | static struct smp_ops_t iss_smp_ops = { |
diff --git a/arch/powerpc/platforms/44x/ppc44x_simple.c b/arch/powerpc/platforms/44x/ppc44x_simple.c index 5f7a29d7f590..c81c19c0b3d4 100644 --- a/arch/powerpc/platforms/44x/ppc44x_simple.c +++ b/arch/powerpc/platforms/44x/ppc44x_simple.c | |||
@@ -52,7 +52,7 @@ machine_device_initcall(ppc44x_simple, ppc44x_device_probe); | |||
52 | static char *board[] __initdata = { | 52 | static char *board[] __initdata = { |
53 | "amcc,arches", | 53 | "amcc,arches", |
54 | "amcc,bamboo", | 54 | "amcc,bamboo", |
55 | "amcc,canyonlands", | 55 | "amcc,bluestone", |
56 | "amcc,glacier", | 56 | "amcc,glacier", |
57 | "ibm,ebony", | 57 | "ibm,ebony", |
58 | "amcc,eiger", | 58 | "amcc,eiger", |
diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c index 4ecf4cf9a51b..9f09319352c0 100644 --- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c +++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | |||
@@ -59,9 +59,9 @@ irq_to_pic_bit(unsigned int irq) | |||
59 | } | 59 | } |
60 | 60 | ||
61 | static void | 61 | static void |
62 | cpld_mask_irq(unsigned int irq) | 62 | cpld_mask_irq(struct irq_data *d) |
63 | { | 63 | { |
64 | unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq; | 64 | unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); |
65 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); | 65 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); |
66 | 66 | ||
67 | out_8(pic_mask, | 67 | out_8(pic_mask, |
@@ -69,9 +69,9 @@ cpld_mask_irq(unsigned int irq) | |||
69 | } | 69 | } |
70 | 70 | ||
71 | static void | 71 | static void |
72 | cpld_unmask_irq(unsigned int irq) | 72 | cpld_unmask_irq(struct irq_data *d) |
73 | { | 73 | { |
74 | unsigned int cpld_irq = (unsigned int)irq_map[irq].hwirq; | 74 | unsigned int cpld_irq = (unsigned int)irqd_to_hwirq(d); |
75 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); | 75 | void __iomem *pic_mask = irq_to_pic_mask(cpld_irq); |
76 | 76 | ||
77 | out_8(pic_mask, | 77 | out_8(pic_mask, |
@@ -80,9 +80,9 @@ cpld_unmask_irq(unsigned int irq) | |||
80 | 80 | ||
81 | static struct irq_chip cpld_pic = { | 81 | static struct irq_chip cpld_pic = { |
82 | .name = "CPLD PIC", | 82 | .name = "CPLD PIC", |
83 | .mask = cpld_mask_irq, | 83 | .irq_mask = cpld_mask_irq, |
84 | .ack = cpld_mask_irq, | 84 | .irq_ack = cpld_mask_irq, |
85 | .unmask = cpld_unmask_irq, | 85 | .irq_unmask = cpld_unmask_irq, |
86 | }; | 86 | }; |
87 | 87 | ||
88 | static int | 88 | static int |
@@ -97,7 +97,7 @@ cpld_pic_get_irq(int offset, u8 ignore, u8 __iomem *statusp, | |||
97 | status |= (ignore | mask); | 97 | status |= (ignore | mask); |
98 | 98 | ||
99 | if (status == 0xff) | 99 | if (status == 0xff) |
100 | return NO_IRQ_IGNORE; | 100 | return NO_IRQ; |
101 | 101 | ||
102 | cpld_irq = ffz(status) + offset; | 102 | cpld_irq = ffz(status) + offset; |
103 | 103 | ||
@@ -109,14 +109,14 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) | |||
109 | { | 109 | { |
110 | irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, | 110 | irq = cpld_pic_get_irq(0, PCI_IGNORE, &cpld_regs->pci_status, |
111 | &cpld_regs->pci_mask); | 111 | &cpld_regs->pci_mask); |
112 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { | 112 | if (irq != NO_IRQ) { |
113 | generic_handle_irq(irq); | 113 | generic_handle_irq(irq); |
114 | return; | 114 | return; |
115 | } | 115 | } |
116 | 116 | ||
117 | irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, | 117 | irq = cpld_pic_get_irq(8, MISC_IGNORE, &cpld_regs->misc_status, |
118 | &cpld_regs->misc_mask); | 118 | &cpld_regs->misc_mask); |
119 | if (irq != NO_IRQ && irq != NO_IRQ_IGNORE) { | 119 | if (irq != NO_IRQ) { |
120 | generic_handle_irq(irq); | 120 | generic_handle_irq(irq); |
121 | return; | 121 | return; |
122 | } | 122 | } |
@@ -132,8 +132,8 @@ static int | |||
132 | cpld_pic_host_map(struct irq_host *h, unsigned int virq, | 132 | cpld_pic_host_map(struct irq_host *h, unsigned int virq, |
133 | irq_hw_number_t hw) | 133 | irq_hw_number_t hw) |
134 | { | 134 | { |
135 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 135 | irq_set_status_flags(virq, IRQ_LEVEL); |
136 | set_irq_chip_and_handler(virq, &cpld_pic, handle_level_irq); | 136 | irq_set_chip_and_handler(virq, &cpld_pic, handle_level_irq); |
137 | return 0; | 137 | return 0; |
138 | } | 138 | } |
139 | 139 | ||
@@ -198,7 +198,7 @@ mpc5121_ads_cpld_pic_init(void) | |||
198 | goto end; | 198 | goto end; |
199 | } | 199 | } |
200 | 200 | ||
201 | set_irq_chained_handler(cascade_irq, cpld_pic_cascade); | 201 | irq_set_chained_handler(cascade_irq, cpld_pic_cascade); |
202 | end: | 202 | end: |
203 | of_node_put(np); | 203 | of_node_put(np); |
204 | } | 204 | } |
diff --git a/arch/powerpc/platforms/512x/mpc5121_generic.c b/arch/powerpc/platforms/512x/mpc5121_generic.c index e487eb06ec6b..926731f1ff01 100644 --- a/arch/powerpc/platforms/512x/mpc5121_generic.c +++ b/arch/powerpc/platforms/512x/mpc5121_generic.c | |||
@@ -26,7 +26,7 @@ | |||
26 | /* | 26 | /* |
27 | * list of supported boards | 27 | * list of supported boards |
28 | */ | 28 | */ |
29 | static char *board[] __initdata = { | 29 | static const char *board[] __initdata = { |
30 | "prt,prtlvt", | 30 | "prt,prtlvt", |
31 | NULL | 31 | NULL |
32 | }; | 32 | }; |
@@ -36,16 +36,7 @@ static char *board[] __initdata = { | |||
36 | */ | 36 | */ |
37 | static int __init mpc5121_generic_probe(void) | 37 | static int __init mpc5121_generic_probe(void) |
38 | { | 38 | { |
39 | unsigned long node = of_get_flat_dt_root(); | 39 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
40 | int i = 0; | ||
41 | |||
42 | while (board[i]) { | ||
43 | if (of_flat_dt_is_compatible(node, board[i])) | ||
44 | break; | ||
45 | i++; | ||
46 | } | ||
47 | |||
48 | return board[i] != NULL; | ||
49 | } | 40 | } |
50 | 41 | ||
51 | define_machine(mpc5121_generic) { | 42 | define_machine(mpc5121_generic) { |
diff --git a/arch/powerpc/platforms/52xx/lite5200.c b/arch/powerpc/platforms/52xx/lite5200.c index de55bc0584b5..01ffa64d2aa7 100644 --- a/arch/powerpc/platforms/52xx/lite5200.c +++ b/arch/powerpc/platforms/52xx/lite5200.c | |||
@@ -172,20 +172,18 @@ static void __init lite5200_setup_arch(void) | |||
172 | mpc52xx_setup_pci(); | 172 | mpc52xx_setup_pci(); |
173 | } | 173 | } |
174 | 174 | ||
175 | static const char *board[] __initdata = { | ||
176 | "fsl,lite5200", | ||
177 | "fsl,lite5200b", | ||
178 | NULL, | ||
179 | }; | ||
180 | |||
175 | /* | 181 | /* |
176 | * Called very early, MMU is off, device-tree isn't unflattened | 182 | * Called very early, MMU is off, device-tree isn't unflattened |
177 | */ | 183 | */ |
178 | static int __init lite5200_probe(void) | 184 | static int __init lite5200_probe(void) |
179 | { | 185 | { |
180 | unsigned long node = of_get_flat_dt_root(); | 186 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
181 | const char *model = of_get_flat_dt_prop(node, "model", NULL); | ||
182 | |||
183 | if (!of_flat_dt_is_compatible(node, "fsl,lite5200") && | ||
184 | !of_flat_dt_is_compatible(node, "fsl,lite5200b")) | ||
185 | return 0; | ||
186 | pr_debug("%s board found\n", model ? model : "unknown"); | ||
187 | |||
188 | return 1; | ||
189 | } | 187 | } |
190 | 188 | ||
191 | define_machine(lite5200) { | 189 | define_machine(lite5200) { |
diff --git a/arch/powerpc/platforms/52xx/lite5200_pm.c b/arch/powerpc/platforms/52xx/lite5200_pm.c index 80234e5921f5..eda0fc2a3914 100644 --- a/arch/powerpc/platforms/52xx/lite5200_pm.c +++ b/arch/powerpc/platforms/52xx/lite5200_pm.c | |||
@@ -232,7 +232,7 @@ static void lite5200_pm_end(void) | |||
232 | lite5200_pm_target_state = PM_SUSPEND_ON; | 232 | lite5200_pm_target_state = PM_SUSPEND_ON; |
233 | } | 233 | } |
234 | 234 | ||
235 | static struct platform_suspend_ops lite5200_pm_ops = { | 235 | static const struct platform_suspend_ops lite5200_pm_ops = { |
236 | .valid = lite5200_pm_valid, | 236 | .valid = lite5200_pm_valid, |
237 | .begin = lite5200_pm_begin, | 237 | .begin = lite5200_pm_begin, |
238 | .prepare = lite5200_pm_prepare, | 238 | .prepare = lite5200_pm_prepare, |
diff --git a/arch/powerpc/platforms/52xx/media5200.c b/arch/powerpc/platforms/52xx/media5200.c index 0bac3a3dbecf..96f85e5e0cd3 100644 --- a/arch/powerpc/platforms/52xx/media5200.c +++ b/arch/powerpc/platforms/52xx/media5200.c | |||
@@ -49,45 +49,46 @@ struct media5200_irq { | |||
49 | }; | 49 | }; |
50 | struct media5200_irq media5200_irq; | 50 | struct media5200_irq media5200_irq; |
51 | 51 | ||
52 | static void media5200_irq_unmask(unsigned int virq) | 52 | static void media5200_irq_unmask(struct irq_data *d) |
53 | { | 53 | { |
54 | unsigned long flags; | 54 | unsigned long flags; |
55 | u32 val; | 55 | u32 val; |
56 | 56 | ||
57 | spin_lock_irqsave(&media5200_irq.lock, flags); | 57 | spin_lock_irqsave(&media5200_irq.lock, flags); |
58 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); | 58 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); |
59 | val |= 1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq); | 59 | val |= 1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d)); |
60 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); | 60 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); |
61 | spin_unlock_irqrestore(&media5200_irq.lock, flags); | 61 | spin_unlock_irqrestore(&media5200_irq.lock, flags); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void media5200_irq_mask(unsigned int virq) | 64 | static void media5200_irq_mask(struct irq_data *d) |
65 | { | 65 | { |
66 | unsigned long flags; | 66 | unsigned long flags; |
67 | u32 val; | 67 | u32 val; |
68 | 68 | ||
69 | spin_lock_irqsave(&media5200_irq.lock, flags); | 69 | spin_lock_irqsave(&media5200_irq.lock, flags); |
70 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); | 70 | val = in_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE); |
71 | val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irq_map[virq].hwirq)); | 71 | val &= ~(1 << (MEDIA5200_IRQ_SHIFT + irqd_to_hwirq(d))); |
72 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); | 72 | out_be32(media5200_irq.regs + MEDIA5200_IRQ_ENABLE, val); |
73 | spin_unlock_irqrestore(&media5200_irq.lock, flags); | 73 | spin_unlock_irqrestore(&media5200_irq.lock, flags); |
74 | } | 74 | } |
75 | 75 | ||
76 | static struct irq_chip media5200_irq_chip = { | 76 | static struct irq_chip media5200_irq_chip = { |
77 | .name = "Media5200 FPGA", | 77 | .name = "Media5200 FPGA", |
78 | .unmask = media5200_irq_unmask, | 78 | .irq_unmask = media5200_irq_unmask, |
79 | .mask = media5200_irq_mask, | 79 | .irq_mask = media5200_irq_mask, |
80 | .mask_ack = media5200_irq_mask, | 80 | .irq_mask_ack = media5200_irq_mask, |
81 | }; | 81 | }; |
82 | 82 | ||
83 | void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | 83 | void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) |
84 | { | 84 | { |
85 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
85 | int sub_virq, val; | 86 | int sub_virq, val; |
86 | u32 status, enable; | 87 | u32 status, enable; |
87 | 88 | ||
88 | /* Mask off the cascaded IRQ */ | 89 | /* Mask off the cascaded IRQ */ |
89 | raw_spin_lock(&desc->lock); | 90 | raw_spin_lock(&desc->lock); |
90 | desc->chip->mask(virq); | 91 | chip->irq_mask(&desc->irq_data); |
91 | raw_spin_unlock(&desc->lock); | 92 | raw_spin_unlock(&desc->lock); |
92 | 93 | ||
93 | /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs | 94 | /* Ask the FPGA for IRQ status. If 'val' is 0, then no irqs |
@@ -105,24 +106,19 @@ void media5200_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
105 | 106 | ||
106 | /* Processing done; can reenable the cascade now */ | 107 | /* Processing done; can reenable the cascade now */ |
107 | raw_spin_lock(&desc->lock); | 108 | raw_spin_lock(&desc->lock); |
108 | desc->chip->ack(virq); | 109 | chip->irq_ack(&desc->irq_data); |
109 | if (!(desc->status & IRQ_DISABLED)) | 110 | if (!irqd_irq_disabled(&desc->irq_data)) |
110 | desc->chip->unmask(virq); | 111 | chip->irq_unmask(&desc->irq_data); |
111 | raw_spin_unlock(&desc->lock); | 112 | raw_spin_unlock(&desc->lock); |
112 | } | 113 | } |
113 | 114 | ||
114 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, | 115 | static int media5200_irq_map(struct irq_host *h, unsigned int virq, |
115 | irq_hw_number_t hw) | 116 | irq_hw_number_t hw) |
116 | { | 117 | { |
117 | struct irq_desc *desc = irq_to_desc(virq); | ||
118 | |||
119 | pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); | 118 | pr_debug("%s: h=%p, virq=%i, hwirq=%i\n", __func__, h, virq, (int)hw); |
120 | set_irq_chip_data(virq, &media5200_irq); | 119 | irq_set_chip_data(virq, &media5200_irq); |
121 | set_irq_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); | 120 | irq_set_chip_and_handler(virq, &media5200_irq_chip, handle_level_irq); |
122 | set_irq_type(virq, IRQ_TYPE_LEVEL_LOW); | 121 | irq_set_status_flags(virq, IRQ_LEVEL); |
123 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
124 | desc->status |= IRQ_TYPE_LEVEL_LOW | IRQ_LEVEL; | ||
125 | |||
126 | return 0; | 122 | return 0; |
127 | } | 123 | } |
128 | 124 | ||
@@ -186,8 +182,8 @@ static void __init media5200_init_irq(void) | |||
186 | 182 | ||
187 | media5200_irq.irqhost->host_data = &media5200_irq; | 183 | media5200_irq.irqhost->host_data = &media5200_irq; |
188 | 184 | ||
189 | set_irq_data(cascade_virq, &media5200_irq); | 185 | irq_set_handler_data(cascade_virq, &media5200_irq); |
190 | set_irq_chained_handler(cascade_virq, media5200_irq_cascade); | 186 | irq_set_chained_handler(cascade_virq, media5200_irq_cascade); |
191 | 187 | ||
192 | return; | 188 | return; |
193 | 189 | ||
@@ -239,7 +235,7 @@ static void __init media5200_setup_arch(void) | |||
239 | } | 235 | } |
240 | 236 | ||
241 | /* list of the supported boards */ | 237 | /* list of the supported boards */ |
242 | static char *board[] __initdata = { | 238 | static const char *board[] __initdata = { |
243 | "fsl,media5200", | 239 | "fsl,media5200", |
244 | NULL | 240 | NULL |
245 | }; | 241 | }; |
@@ -249,16 +245,7 @@ static char *board[] __initdata = { | |||
249 | */ | 245 | */ |
250 | static int __init media5200_probe(void) | 246 | static int __init media5200_probe(void) |
251 | { | 247 | { |
252 | unsigned long node = of_get_flat_dt_root(); | 248 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
253 | int i = 0; | ||
254 | |||
255 | while (board[i]) { | ||
256 | if (of_flat_dt_is_compatible(node, board[i])) | ||
257 | break; | ||
258 | i++; | ||
259 | } | ||
260 | |||
261 | return (board[i] != NULL); | ||
262 | } | 249 | } |
263 | 250 | ||
264 | define_machine(media5200_platform) { | 251 | define_machine(media5200_platform) { |
diff --git a/arch/powerpc/platforms/52xx/mpc5200_simple.c b/arch/powerpc/platforms/52xx/mpc5200_simple.c index d45be5b5ad49..e36d6e232ae6 100644 --- a/arch/powerpc/platforms/52xx/mpc5200_simple.c +++ b/arch/powerpc/platforms/52xx/mpc5200_simple.c | |||
@@ -49,7 +49,7 @@ static void __init mpc5200_simple_setup_arch(void) | |||
49 | } | 49 | } |
50 | 50 | ||
51 | /* list of the supported boards */ | 51 | /* list of the supported boards */ |
52 | static char *board[] __initdata = { | 52 | static const char *board[] __initdata = { |
53 | "intercontrol,digsy-mtc", | 53 | "intercontrol,digsy-mtc", |
54 | "manroland,mucmc52", | 54 | "manroland,mucmc52", |
55 | "manroland,uc101", | 55 | "manroland,uc101", |
@@ -66,16 +66,7 @@ static char *board[] __initdata = { | |||
66 | */ | 66 | */ |
67 | static int __init mpc5200_simple_probe(void) | 67 | static int __init mpc5200_simple_probe(void) |
68 | { | 68 | { |
69 | unsigned long node = of_get_flat_dt_root(); | 69 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
70 | int i = 0; | ||
71 | |||
72 | while (board[i]) { | ||
73 | if (of_flat_dt_is_compatible(node, board[i])) | ||
74 | break; | ||
75 | i++; | ||
76 | } | ||
77 | |||
78 | return (board[i] != NULL); | ||
79 | } | 70 | } |
80 | 71 | ||
81 | define_machine(mpc5200_simple_platform) { | 72 | define_machine(mpc5200_simple_platform) { |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c index 0dad9a935eb5..1757d1db4b51 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpio.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpio.c | |||
@@ -147,8 +147,7 @@ mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
147 | return 0; | 147 | return 0; |
148 | } | 148 | } |
149 | 149 | ||
150 | static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev, | 150 | static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) |
151 | const struct of_device_id *match) | ||
152 | { | 151 | { |
153 | struct mpc52xx_gpiochip *chip; | 152 | struct mpc52xx_gpiochip *chip; |
154 | struct mpc52xx_gpio_wkup __iomem *regs; | 153 | struct mpc52xx_gpio_wkup __iomem *regs; |
@@ -191,7 +190,7 @@ static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { | |||
191 | {} | 190 | {} |
192 | }; | 191 | }; |
193 | 192 | ||
194 | static struct of_platform_driver mpc52xx_wkup_gpiochip_driver = { | 193 | static struct platform_driver mpc52xx_wkup_gpiochip_driver = { |
195 | .driver = { | 194 | .driver = { |
196 | .name = "gpio_wkup", | 195 | .name = "gpio_wkup", |
197 | .owner = THIS_MODULE, | 196 | .owner = THIS_MODULE, |
@@ -310,8 +309,7 @@ mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | |||
310 | return 0; | 309 | return 0; |
311 | } | 310 | } |
312 | 311 | ||
313 | static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev, | 312 | static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) |
314 | const struct of_device_id *match) | ||
315 | { | 313 | { |
316 | struct mpc52xx_gpiochip *chip; | 314 | struct mpc52xx_gpiochip *chip; |
317 | struct gpio_chip *gc; | 315 | struct gpio_chip *gc; |
@@ -349,7 +347,7 @@ static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { | |||
349 | {} | 347 | {} |
350 | }; | 348 | }; |
351 | 349 | ||
352 | static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { | 350 | static struct platform_driver mpc52xx_simple_gpiochip_driver = { |
353 | .driver = { | 351 | .driver = { |
354 | .name = "gpio", | 352 | .name = "gpio", |
355 | .owner = THIS_MODULE, | 353 | .owner = THIS_MODULE, |
@@ -361,10 +359,10 @@ static struct of_platform_driver mpc52xx_simple_gpiochip_driver = { | |||
361 | 359 | ||
362 | static int __init mpc52xx_gpio_init(void) | 360 | static int __init mpc52xx_gpio_init(void) |
363 | { | 361 | { |
364 | if (of_register_platform_driver(&mpc52xx_wkup_gpiochip_driver)) | 362 | if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) |
365 | printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); | 363 | printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); |
366 | 364 | ||
367 | if (of_register_platform_driver(&mpc52xx_simple_gpiochip_driver)) | 365 | if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) |
368 | printk(KERN_ERR "Unable to register simple GPIO driver\n"); | 366 | printk(KERN_ERR "Unable to register simple GPIO driver\n"); |
369 | 367 | ||
370 | return 0; | 368 | return 0; |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c index fea833e18ad5..6c39b9cc2fa3 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_gpt.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_gpt.c | |||
@@ -63,6 +63,7 @@ | |||
63 | #include <linux/of_gpio.h> | 63 | #include <linux/of_gpio.h> |
64 | #include <linux/kernel.h> | 64 | #include <linux/kernel.h> |
65 | #include <linux/slab.h> | 65 | #include <linux/slab.h> |
66 | #include <linux/fs.h> | ||
66 | #include <linux/watchdog.h> | 67 | #include <linux/watchdog.h> |
67 | #include <linux/miscdevice.h> | 68 | #include <linux/miscdevice.h> |
68 | #include <linux/uaccess.h> | 69 | #include <linux/uaccess.h> |
@@ -134,9 +135,9 @@ DEFINE_MUTEX(mpc52xx_gpt_list_mutex); | |||
134 | * Cascaded interrupt controller hooks | 135 | * Cascaded interrupt controller hooks |
135 | */ | 136 | */ |
136 | 137 | ||
137 | static void mpc52xx_gpt_irq_unmask(unsigned int virq) | 138 | static void mpc52xx_gpt_irq_unmask(struct irq_data *d) |
138 | { | 139 | { |
139 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 140 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
140 | unsigned long flags; | 141 | unsigned long flags; |
141 | 142 | ||
142 | spin_lock_irqsave(&gpt->lock, flags); | 143 | spin_lock_irqsave(&gpt->lock, flags); |
@@ -144,9 +145,9 @@ static void mpc52xx_gpt_irq_unmask(unsigned int virq) | |||
144 | spin_unlock_irqrestore(&gpt->lock, flags); | 145 | spin_unlock_irqrestore(&gpt->lock, flags); |
145 | } | 146 | } |
146 | 147 | ||
147 | static void mpc52xx_gpt_irq_mask(unsigned int virq) | 148 | static void mpc52xx_gpt_irq_mask(struct irq_data *d) |
148 | { | 149 | { |
149 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 150 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
150 | unsigned long flags; | 151 | unsigned long flags; |
151 | 152 | ||
152 | spin_lock_irqsave(&gpt->lock, flags); | 153 | spin_lock_irqsave(&gpt->lock, flags); |
@@ -154,20 +155,20 @@ static void mpc52xx_gpt_irq_mask(unsigned int virq) | |||
154 | spin_unlock_irqrestore(&gpt->lock, flags); | 155 | spin_unlock_irqrestore(&gpt->lock, flags); |
155 | } | 156 | } |
156 | 157 | ||
157 | static void mpc52xx_gpt_irq_ack(unsigned int virq) | 158 | static void mpc52xx_gpt_irq_ack(struct irq_data *d) |
158 | { | 159 | { |
159 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 160 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
160 | 161 | ||
161 | out_be32(&gpt->regs->status, MPC52xx_GPT_STATUS_IRQMASK); | 162 | out_be32(&gpt->regs->status, MPC52xx_GPT_STATUS_IRQMASK); |
162 | } | 163 | } |
163 | 164 | ||
164 | static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type) | 165 | static int mpc52xx_gpt_irq_set_type(struct irq_data *d, unsigned int flow_type) |
165 | { | 166 | { |
166 | struct mpc52xx_gpt_priv *gpt = get_irq_chip_data(virq); | 167 | struct mpc52xx_gpt_priv *gpt = irq_data_get_irq_chip_data(d); |
167 | unsigned long flags; | 168 | unsigned long flags; |
168 | u32 reg; | 169 | u32 reg; |
169 | 170 | ||
170 | dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, virq, flow_type); | 171 | dev_dbg(gpt->dev, "%s: virq=%i type=%x\n", __func__, d->irq, flow_type); |
171 | 172 | ||
172 | spin_lock_irqsave(&gpt->lock, flags); | 173 | spin_lock_irqsave(&gpt->lock, flags); |
173 | reg = in_be32(&gpt->regs->mode) & ~MPC52xx_GPT_MODE_ICT_MASK; | 174 | reg = in_be32(&gpt->regs->mode) & ~MPC52xx_GPT_MODE_ICT_MASK; |
@@ -183,15 +184,15 @@ static int mpc52xx_gpt_irq_set_type(unsigned int virq, unsigned int flow_type) | |||
183 | 184 | ||
184 | static struct irq_chip mpc52xx_gpt_irq_chip = { | 185 | static struct irq_chip mpc52xx_gpt_irq_chip = { |
185 | .name = "MPC52xx GPT", | 186 | .name = "MPC52xx GPT", |
186 | .unmask = mpc52xx_gpt_irq_unmask, | 187 | .irq_unmask = mpc52xx_gpt_irq_unmask, |
187 | .mask = mpc52xx_gpt_irq_mask, | 188 | .irq_mask = mpc52xx_gpt_irq_mask, |
188 | .ack = mpc52xx_gpt_irq_ack, | 189 | .irq_ack = mpc52xx_gpt_irq_ack, |
189 | .set_type = mpc52xx_gpt_irq_set_type, | 190 | .irq_set_type = mpc52xx_gpt_irq_set_type, |
190 | }; | 191 | }; |
191 | 192 | ||
192 | void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) | 193 | void mpc52xx_gpt_irq_cascade(unsigned int virq, struct irq_desc *desc) |
193 | { | 194 | { |
194 | struct mpc52xx_gpt_priv *gpt = get_irq_data(virq); | 195 | struct mpc52xx_gpt_priv *gpt = irq_get_handler_data(virq); |
195 | int sub_virq; | 196 | int sub_virq; |
196 | u32 status; | 197 | u32 status; |
197 | 198 | ||
@@ -208,8 +209,8 @@ static int mpc52xx_gpt_irq_map(struct irq_host *h, unsigned int virq, | |||
208 | struct mpc52xx_gpt_priv *gpt = h->host_data; | 209 | struct mpc52xx_gpt_priv *gpt = h->host_data; |
209 | 210 | ||
210 | dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); | 211 | dev_dbg(gpt->dev, "%s: h=%p, virq=%i\n", __func__, h, virq); |
211 | set_irq_chip_data(virq, gpt); | 212 | irq_set_chip_data(virq, gpt); |
212 | set_irq_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); | 213 | irq_set_chip_and_handler(virq, &mpc52xx_gpt_irq_chip, handle_edge_irq); |
213 | 214 | ||
214 | return 0; | 215 | return 0; |
215 | } | 216 | } |
@@ -258,8 +259,8 @@ mpc52xx_gpt_irq_setup(struct mpc52xx_gpt_priv *gpt, struct device_node *node) | |||
258 | } | 259 | } |
259 | 260 | ||
260 | gpt->irqhost->host_data = gpt; | 261 | gpt->irqhost->host_data = gpt; |
261 | set_irq_data(cascade_virq, gpt); | 262 | irq_set_handler_data(cascade_virq, gpt); |
262 | set_irq_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); | 263 | irq_set_chained_handler(cascade_virq, mpc52xx_gpt_irq_cascade); |
263 | 264 | ||
264 | /* If the GPT is currently disabled, then change it to be in Input | 265 | /* If the GPT is currently disabled, then change it to be in Input |
265 | * Capture mode. If the mode is non-zero, then the pin could be | 266 | * Capture mode. If the mode is non-zero, then the pin could be |
@@ -720,8 +721,7 @@ static inline int mpc52xx_gpt_wdt_setup(struct mpc52xx_gpt_priv *gpt, | |||
720 | /* --------------------------------------------------------------------- | 721 | /* --------------------------------------------------------------------- |
721 | * of_platform bus binding code | 722 | * of_platform bus binding code |
722 | */ | 723 | */ |
723 | static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev, | 724 | static int __devinit mpc52xx_gpt_probe(struct platform_device *ofdev) |
724 | const struct of_device_id *match) | ||
725 | { | 725 | { |
726 | struct mpc52xx_gpt_priv *gpt; | 726 | struct mpc52xx_gpt_priv *gpt; |
727 | 727 | ||
@@ -780,7 +780,7 @@ static const struct of_device_id mpc52xx_gpt_match[] = { | |||
780 | {} | 780 | {} |
781 | }; | 781 | }; |
782 | 782 | ||
783 | static struct of_platform_driver mpc52xx_gpt_driver = { | 783 | static struct platform_driver mpc52xx_gpt_driver = { |
784 | .driver = { | 784 | .driver = { |
785 | .name = "mpc52xx-gpt", | 785 | .name = "mpc52xx-gpt", |
786 | .owner = THIS_MODULE, | 786 | .owner = THIS_MODULE, |
@@ -792,10 +792,7 @@ static struct of_platform_driver mpc52xx_gpt_driver = { | |||
792 | 792 | ||
793 | static int __init mpc52xx_gpt_init(void) | 793 | static int __init mpc52xx_gpt_init(void) |
794 | { | 794 | { |
795 | if (of_register_platform_driver(&mpc52xx_gpt_driver)) | 795 | return platform_driver_register(&mpc52xx_gpt_driver); |
796 | pr_err("error registering MPC52xx GPT driver\n"); | ||
797 | |||
798 | return 0; | ||
799 | } | 796 | } |
800 | 797 | ||
801 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ | 798 | /* Make sure GPIOs and IRQs get set up before anyone tries to use them */ |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c index f4ac213c89c0..9940ce8a2d4e 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c | |||
@@ -57,7 +57,7 @@ struct mpc52xx_lpbfifo { | |||
57 | static struct mpc52xx_lpbfifo lpbfifo; | 57 | static struct mpc52xx_lpbfifo lpbfifo; |
58 | 58 | ||
59 | /** | 59 | /** |
60 | * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transfered | 60 | * mpc52xx_lpbfifo_kick - Trigger the next block of data to be transferred |
61 | */ | 61 | */ |
62 | static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) | 62 | static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) |
63 | { | 63 | { |
@@ -179,7 +179,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) | |||
179 | * | 179 | * |
180 | * On transmit, the dma completion irq triggers before the fifo completion | 180 | * On transmit, the dma completion irq triggers before the fifo completion |
181 | * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm | 181 | * triggers. Handle the dma completion here instead of the LPB FIFO Bestcomm |
182 | * task completion irq becuase everyting is not really done until the LPB FIFO | 182 | * task completion irq because everything is not really done until the LPB FIFO |
183 | * completion irq triggers. | 183 | * completion irq triggers. |
184 | * | 184 | * |
185 | * In other words: | 185 | * In other words: |
@@ -195,7 +195,7 @@ static void mpc52xx_lpbfifo_kick(struct mpc52xx_lpbfifo_request *req) | |||
195 | * Exit conditions: | 195 | * Exit conditions: |
196 | * 1) Transfer aborted | 196 | * 1) Transfer aborted |
197 | * 2) FIFO complete without DMA; more data to do | 197 | * 2) FIFO complete without DMA; more data to do |
198 | * 3) FIFO complete without DMA; all data transfered | 198 | * 3) FIFO complete without DMA; all data transferred |
199 | * 4) FIFO complete using DMA | 199 | * 4) FIFO complete using DMA |
200 | * | 200 | * |
201 | * Condition 1 can occur regardless of whether or not DMA is used. | 201 | * Condition 1 can occur regardless of whether or not DMA is used. |
@@ -436,8 +436,7 @@ void mpc52xx_lpbfifo_abort(struct mpc52xx_lpbfifo_request *req) | |||
436 | } | 436 | } |
437 | EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); | 437 | EXPORT_SYMBOL(mpc52xx_lpbfifo_abort); |
438 | 438 | ||
439 | static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op, | 439 | static int __devinit mpc52xx_lpbfifo_probe(struct platform_device *op) |
440 | const struct of_device_id *match) | ||
441 | { | 440 | { |
442 | struct resource res; | 441 | struct resource res; |
443 | int rc = -ENOMEM; | 442 | int rc = -ENOMEM; |
@@ -536,7 +535,7 @@ static struct of_device_id mpc52xx_lpbfifo_match[] __devinitconst = { | |||
536 | {}, | 535 | {}, |
537 | }; | 536 | }; |
538 | 537 | ||
539 | static struct of_platform_driver mpc52xx_lpbfifo_driver = { | 538 | static struct platform_driver mpc52xx_lpbfifo_driver = { |
540 | .driver = { | 539 | .driver = { |
541 | .name = "mpc52xx-lpbfifo", | 540 | .name = "mpc52xx-lpbfifo", |
542 | .owner = THIS_MODULE, | 541 | .owner = THIS_MODULE, |
@@ -551,14 +550,12 @@ static struct of_platform_driver mpc52xx_lpbfifo_driver = { | |||
551 | */ | 550 | */ |
552 | static int __init mpc52xx_lpbfifo_init(void) | 551 | static int __init mpc52xx_lpbfifo_init(void) |
553 | { | 552 | { |
554 | pr_debug("Registering LocalPlus bus FIFO driver\n"); | 553 | return platform_driver_register(&mpc52xx_lpbfifo_driver); |
555 | return of_register_platform_driver(&mpc52xx_lpbfifo_driver); | ||
556 | } | 554 | } |
557 | module_init(mpc52xx_lpbfifo_init); | 555 | module_init(mpc52xx_lpbfifo_init); |
558 | 556 | ||
559 | static void __exit mpc52xx_lpbfifo_exit(void) | 557 | static void __exit mpc52xx_lpbfifo_exit(void) |
560 | { | 558 | { |
561 | pr_debug("Unregistering LocalPlus bus FIFO driver\n"); | 559 | platform_driver_unregister(&mpc52xx_lpbfifo_driver); |
562 | of_unregister_platform_driver(&mpc52xx_lpbfifo_driver); | ||
563 | } | 560 | } |
564 | module_exit(mpc52xx_lpbfifo_exit); | 561 | module_exit(mpc52xx_lpbfifo_exit); |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pic.c b/arch/powerpc/platforms/52xx/mpc52xx_pic.c index 4bf4bf7b063e..1a9a49570579 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pic.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pic.c | |||
@@ -155,50 +155,32 @@ static inline void io_be_clrbit(u32 __iomem *addr, int bitno) | |||
155 | /* | 155 | /* |
156 | * IRQ[0-3] interrupt irq_chip | 156 | * IRQ[0-3] interrupt irq_chip |
157 | */ | 157 | */ |
158 | static void mpc52xx_extirq_mask(unsigned int virq) | 158 | static void mpc52xx_extirq_mask(struct irq_data *d) |
159 | { | 159 | { |
160 | int irq; | 160 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
161 | int l2irq; | ||
162 | |||
163 | irq = irq_map[virq].hwirq; | ||
164 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
165 | |||
166 | io_be_clrbit(&intr->ctrl, 11 - l2irq); | 161 | io_be_clrbit(&intr->ctrl, 11 - l2irq); |
167 | } | 162 | } |
168 | 163 | ||
169 | static void mpc52xx_extirq_unmask(unsigned int virq) | 164 | static void mpc52xx_extirq_unmask(struct irq_data *d) |
170 | { | 165 | { |
171 | int irq; | 166 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
172 | int l2irq; | ||
173 | |||
174 | irq = irq_map[virq].hwirq; | ||
175 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
176 | |||
177 | io_be_setbit(&intr->ctrl, 11 - l2irq); | 167 | io_be_setbit(&intr->ctrl, 11 - l2irq); |
178 | } | 168 | } |
179 | 169 | ||
180 | static void mpc52xx_extirq_ack(unsigned int virq) | 170 | static void mpc52xx_extirq_ack(struct irq_data *d) |
181 | { | 171 | { |
182 | int irq; | 172 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
183 | int l2irq; | ||
184 | |||
185 | irq = irq_map[virq].hwirq; | ||
186 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
187 | |||
188 | io_be_setbit(&intr->ctrl, 27-l2irq); | 173 | io_be_setbit(&intr->ctrl, 27-l2irq); |
189 | } | 174 | } |
190 | 175 | ||
191 | static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type) | 176 | static int mpc52xx_extirq_set_type(struct irq_data *d, unsigned int flow_type) |
192 | { | 177 | { |
193 | u32 ctrl_reg, type; | 178 | u32 ctrl_reg, type; |
194 | int irq; | 179 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
195 | int l2irq; | ||
196 | void *handler = handle_level_irq; | 180 | void *handler = handle_level_irq; |
197 | 181 | ||
198 | irq = irq_map[virq].hwirq; | 182 | pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, |
199 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | 183 | (int) irqd_to_hwirq(d), l2irq, flow_type); |
200 | |||
201 | pr_debug("%s: irq=%x. l2=%d flow_type=%d\n", __func__, irq, l2irq, flow_type); | ||
202 | 184 | ||
203 | switch (flow_type) { | 185 | switch (flow_type) { |
204 | case IRQF_TRIGGER_HIGH: type = 0; break; | 186 | case IRQF_TRIGGER_HIGH: type = 0; break; |
@@ -214,132 +196,97 @@ static int mpc52xx_extirq_set_type(unsigned int virq, unsigned int flow_type) | |||
214 | ctrl_reg |= (type << (22 - (l2irq * 2))); | 196 | ctrl_reg |= (type << (22 - (l2irq * 2))); |
215 | out_be32(&intr->ctrl, ctrl_reg); | 197 | out_be32(&intr->ctrl, ctrl_reg); |
216 | 198 | ||
217 | __set_irq_handler_unlocked(virq, handler); | 199 | __irq_set_handler_locked(d->irq, handler); |
218 | 200 | ||
219 | return 0; | 201 | return 0; |
220 | } | 202 | } |
221 | 203 | ||
222 | static struct irq_chip mpc52xx_extirq_irqchip = { | 204 | static struct irq_chip mpc52xx_extirq_irqchip = { |
223 | .name = "MPC52xx External", | 205 | .name = "MPC52xx External", |
224 | .mask = mpc52xx_extirq_mask, | 206 | .irq_mask = mpc52xx_extirq_mask, |
225 | .unmask = mpc52xx_extirq_unmask, | 207 | .irq_unmask = mpc52xx_extirq_unmask, |
226 | .ack = mpc52xx_extirq_ack, | 208 | .irq_ack = mpc52xx_extirq_ack, |
227 | .set_type = mpc52xx_extirq_set_type, | 209 | .irq_set_type = mpc52xx_extirq_set_type, |
228 | }; | 210 | }; |
229 | 211 | ||
230 | /* | 212 | /* |
231 | * Main interrupt irq_chip | 213 | * Main interrupt irq_chip |
232 | */ | 214 | */ |
233 | static int mpc52xx_null_set_type(unsigned int virq, unsigned int flow_type) | 215 | static int mpc52xx_null_set_type(struct irq_data *d, unsigned int flow_type) |
234 | { | 216 | { |
235 | return 0; /* Do nothing so that the sense mask will get updated */ | 217 | return 0; /* Do nothing so that the sense mask will get updated */ |
236 | } | 218 | } |
237 | 219 | ||
238 | static void mpc52xx_main_mask(unsigned int virq) | 220 | static void mpc52xx_main_mask(struct irq_data *d) |
239 | { | 221 | { |
240 | int irq; | 222 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
241 | int l2irq; | ||
242 | |||
243 | irq = irq_map[virq].hwirq; | ||
244 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
245 | |||
246 | io_be_setbit(&intr->main_mask, 16 - l2irq); | 223 | io_be_setbit(&intr->main_mask, 16 - l2irq); |
247 | } | 224 | } |
248 | 225 | ||
249 | static void mpc52xx_main_unmask(unsigned int virq) | 226 | static void mpc52xx_main_unmask(struct irq_data *d) |
250 | { | 227 | { |
251 | int irq; | 228 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
252 | int l2irq; | ||
253 | |||
254 | irq = irq_map[virq].hwirq; | ||
255 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
256 | |||
257 | io_be_clrbit(&intr->main_mask, 16 - l2irq); | 229 | io_be_clrbit(&intr->main_mask, 16 - l2irq); |
258 | } | 230 | } |
259 | 231 | ||
260 | static struct irq_chip mpc52xx_main_irqchip = { | 232 | static struct irq_chip mpc52xx_main_irqchip = { |
261 | .name = "MPC52xx Main", | 233 | .name = "MPC52xx Main", |
262 | .mask = mpc52xx_main_mask, | 234 | .irq_mask = mpc52xx_main_mask, |
263 | .mask_ack = mpc52xx_main_mask, | 235 | .irq_mask_ack = mpc52xx_main_mask, |
264 | .unmask = mpc52xx_main_unmask, | 236 | .irq_unmask = mpc52xx_main_unmask, |
265 | .set_type = mpc52xx_null_set_type, | 237 | .irq_set_type = mpc52xx_null_set_type, |
266 | }; | 238 | }; |
267 | 239 | ||
268 | /* | 240 | /* |
269 | * Peripherals interrupt irq_chip | 241 | * Peripherals interrupt irq_chip |
270 | */ | 242 | */ |
271 | static void mpc52xx_periph_mask(unsigned int virq) | 243 | static void mpc52xx_periph_mask(struct irq_data *d) |
272 | { | 244 | { |
273 | int irq; | 245 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
274 | int l2irq; | ||
275 | |||
276 | irq = irq_map[virq].hwirq; | ||
277 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
278 | |||
279 | io_be_setbit(&intr->per_mask, 31 - l2irq); | 246 | io_be_setbit(&intr->per_mask, 31 - l2irq); |
280 | } | 247 | } |
281 | 248 | ||
282 | static void mpc52xx_periph_unmask(unsigned int virq) | 249 | static void mpc52xx_periph_unmask(struct irq_data *d) |
283 | { | 250 | { |
284 | int irq; | 251 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
285 | int l2irq; | ||
286 | |||
287 | irq = irq_map[virq].hwirq; | ||
288 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
289 | |||
290 | io_be_clrbit(&intr->per_mask, 31 - l2irq); | 252 | io_be_clrbit(&intr->per_mask, 31 - l2irq); |
291 | } | 253 | } |
292 | 254 | ||
293 | static struct irq_chip mpc52xx_periph_irqchip = { | 255 | static struct irq_chip mpc52xx_periph_irqchip = { |
294 | .name = "MPC52xx Peripherals", | 256 | .name = "MPC52xx Peripherals", |
295 | .mask = mpc52xx_periph_mask, | 257 | .irq_mask = mpc52xx_periph_mask, |
296 | .mask_ack = mpc52xx_periph_mask, | 258 | .irq_mask_ack = mpc52xx_periph_mask, |
297 | .unmask = mpc52xx_periph_unmask, | 259 | .irq_unmask = mpc52xx_periph_unmask, |
298 | .set_type = mpc52xx_null_set_type, | 260 | .irq_set_type = mpc52xx_null_set_type, |
299 | }; | 261 | }; |
300 | 262 | ||
301 | /* | 263 | /* |
302 | * SDMA interrupt irq_chip | 264 | * SDMA interrupt irq_chip |
303 | */ | 265 | */ |
304 | static void mpc52xx_sdma_mask(unsigned int virq) | 266 | static void mpc52xx_sdma_mask(struct irq_data *d) |
305 | { | 267 | { |
306 | int irq; | 268 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
307 | int l2irq; | ||
308 | |||
309 | irq = irq_map[virq].hwirq; | ||
310 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
311 | |||
312 | io_be_setbit(&sdma->IntMask, l2irq); | 269 | io_be_setbit(&sdma->IntMask, l2irq); |
313 | } | 270 | } |
314 | 271 | ||
315 | static void mpc52xx_sdma_unmask(unsigned int virq) | 272 | static void mpc52xx_sdma_unmask(struct irq_data *d) |
316 | { | 273 | { |
317 | int irq; | 274 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
318 | int l2irq; | ||
319 | |||
320 | irq = irq_map[virq].hwirq; | ||
321 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
322 | |||
323 | io_be_clrbit(&sdma->IntMask, l2irq); | 275 | io_be_clrbit(&sdma->IntMask, l2irq); |
324 | } | 276 | } |
325 | 277 | ||
326 | static void mpc52xx_sdma_ack(unsigned int virq) | 278 | static void mpc52xx_sdma_ack(struct irq_data *d) |
327 | { | 279 | { |
328 | int irq; | 280 | int l2irq = irqd_to_hwirq(d) & MPC52xx_IRQ_L2_MASK; |
329 | int l2irq; | ||
330 | |||
331 | irq = irq_map[virq].hwirq; | ||
332 | l2irq = irq & MPC52xx_IRQ_L2_MASK; | ||
333 | |||
334 | out_be32(&sdma->IntPend, 1 << l2irq); | 281 | out_be32(&sdma->IntPend, 1 << l2irq); |
335 | } | 282 | } |
336 | 283 | ||
337 | static struct irq_chip mpc52xx_sdma_irqchip = { | 284 | static struct irq_chip mpc52xx_sdma_irqchip = { |
338 | .name = "MPC52xx SDMA", | 285 | .name = "MPC52xx SDMA", |
339 | .mask = mpc52xx_sdma_mask, | 286 | .irq_mask = mpc52xx_sdma_mask, |
340 | .unmask = mpc52xx_sdma_unmask, | 287 | .irq_unmask = mpc52xx_sdma_unmask, |
341 | .ack = mpc52xx_sdma_ack, | 288 | .irq_ack = mpc52xx_sdma_ack, |
342 | .set_type = mpc52xx_null_set_type, | 289 | .irq_set_type = mpc52xx_null_set_type, |
343 | }; | 290 | }; |
344 | 291 | ||
345 | /** | 292 | /** |
@@ -414,7 +361,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, | |||
414 | else | 361 | else |
415 | hndlr = handle_level_irq; | 362 | hndlr = handle_level_irq; |
416 | 363 | ||
417 | set_irq_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); | 364 | irq_set_chip_and_handler(virq, &mpc52xx_extirq_irqchip, hndlr); |
418 | pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", | 365 | pr_debug("%s: External IRQ%i virq=%x, hw=%x. type=%x\n", |
419 | __func__, l2irq, virq, (int)irq, type); | 366 | __func__, l2irq, virq, (int)irq, type); |
420 | return 0; | 367 | return 0; |
@@ -431,7 +378,7 @@ static int mpc52xx_irqhost_map(struct irq_host *h, unsigned int virq, | |||
431 | return -EINVAL; | 378 | return -EINVAL; |
432 | } | 379 | } |
433 | 380 | ||
434 | set_irq_chip_and_handler(virq, irqchip, handle_level_irq); | 381 | irq_set_chip_and_handler(virq, irqchip, handle_level_irq); |
435 | pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); | 382 | pr_debug("%s: virq=%x, l1=%i, l2=%i\n", __func__, virq, l1irq, l2irq); |
436 | 383 | ||
437 | return 0; | 384 | return 0; |
@@ -512,7 +459,7 @@ void __init mpc52xx_init_irq(void) | |||
512 | /** | 459 | /** |
513 | * mpc52xx_get_irq - Get pending interrupt number hook function | 460 | * mpc52xx_get_irq - Get pending interrupt number hook function |
514 | * | 461 | * |
515 | * Called by the interupt handler to determine what IRQ handler needs to be | 462 | * Called by the interrupt handler to determine what IRQ handler needs to be |
516 | * executed. | 463 | * executed. |
517 | * | 464 | * |
518 | * Status of pending interrupts is determined by reading the encoded status | 465 | * Status of pending interrupts is determined by reading the encoded status |
@@ -539,7 +486,7 @@ void __init mpc52xx_init_irq(void) | |||
539 | unsigned int mpc52xx_get_irq(void) | 486 | unsigned int mpc52xx_get_irq(void) |
540 | { | 487 | { |
541 | u32 status; | 488 | u32 status; |
542 | int irq = NO_IRQ_IGNORE; | 489 | int irq; |
543 | 490 | ||
544 | status = in_be32(&intr->enc_status); | 491 | status = in_be32(&intr->enc_status); |
545 | if (status & 0x00000400) { /* critical */ | 492 | if (status & 0x00000400) { /* critical */ |
@@ -562,6 +509,8 @@ unsigned int mpc52xx_get_irq(void) | |||
562 | } else { | 509 | } else { |
563 | irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); | 510 | irq |= (MPC52xx_IRQ_L1_PERP << MPC52xx_IRQ_L1_OFFSET); |
564 | } | 511 | } |
512 | } else { | ||
513 | return NO_IRQ; | ||
565 | } | 514 | } |
566 | 515 | ||
567 | return irq_linear_revmap(mpc52xx_irqhost, irq); | 516 | return irq_linear_revmap(mpc52xx_irqhost, irq); |
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_pm.c b/arch/powerpc/platforms/52xx/mpc52xx_pm.c index 568cef636275..8310e8b5b57f 100644 --- a/arch/powerpc/platforms/52xx/mpc52xx_pm.c +++ b/arch/powerpc/platforms/52xx/mpc52xx_pm.c | |||
@@ -186,7 +186,7 @@ void mpc52xx_pm_finish(void) | |||
186 | iounmap(mbar); | 186 | iounmap(mbar); |
187 | } | 187 | } |
188 | 188 | ||
189 | static struct platform_suspend_ops mpc52xx_pm_ops = { | 189 | static const struct platform_suspend_ops mpc52xx_pm_ops = { |
190 | .valid = mpc52xx_pm_valid, | 190 | .valid = mpc52xx_pm_valid, |
191 | .prepare = mpc52xx_pm_prepare, | 191 | .prepare = mpc52xx_pm_prepare, |
192 | .enter = mpc52xx_pm_enter, | 192 | .enter = mpc52xx_pm_enter, |
diff --git a/arch/powerpc/platforms/82xx/Makefile b/arch/powerpc/platforms/82xx/Makefile index d982793f4dbd..455fe21e37c4 100644 --- a/arch/powerpc/platforms/82xx/Makefile +++ b/arch/powerpc/platforms/82xx/Makefile | |||
@@ -6,4 +6,4 @@ obj-$(CONFIG_CPM2) += pq2.o | |||
6 | obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o | 6 | obj-$(CONFIG_PQ2_ADS_PCI_PIC) += pq2ads-pci-pic.o |
7 | obj-$(CONFIG_PQ2FADS) += pq2fads.o | 7 | obj-$(CONFIG_PQ2FADS) += pq2fads.o |
8 | obj-$(CONFIG_EP8248E) += ep8248e.o | 8 | obj-$(CONFIG_EP8248E) += ep8248e.o |
9 | obj-$(CONFIG_MGCOGE) += mgcoge.o | 9 | obj-$(CONFIG_MGCOGE) += km82xx.o |
diff --git a/arch/powerpc/platforms/82xx/ep8248e.c b/arch/powerpc/platforms/82xx/ep8248e.c index 1565e0446dc8..10ff526cd046 100644 --- a/arch/powerpc/platforms/82xx/ep8248e.c +++ b/arch/powerpc/platforms/82xx/ep8248e.c | |||
@@ -111,8 +111,7 @@ static struct mdiobb_ctrl ep8248e_mdio_ctrl = { | |||
111 | .ops = &ep8248e_mdio_ops, | 111 | .ops = &ep8248e_mdio_ops, |
112 | }; | 112 | }; |
113 | 113 | ||
114 | static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev, | 114 | static int __devinit ep8248e_mdio_probe(struct platform_device *ofdev) |
115 | const struct of_device_id *match) | ||
116 | { | 115 | { |
117 | struct mii_bus *bus; | 116 | struct mii_bus *bus; |
118 | struct resource res; | 117 | struct resource res; |
@@ -167,7 +166,7 @@ static const struct of_device_id ep8248e_mdio_match[] = { | |||
167 | {}, | 166 | {}, |
168 | }; | 167 | }; |
169 | 168 | ||
170 | static struct of_platform_driver ep8248e_mdio_driver = { | 169 | static struct platform_driver ep8248e_mdio_driver = { |
171 | .driver = { | 170 | .driver = { |
172 | .name = "ep8248e-mdio-bitbang", | 171 | .name = "ep8248e-mdio-bitbang", |
173 | .owner = THIS_MODULE, | 172 | .owner = THIS_MODULE, |
@@ -308,7 +307,7 @@ static __initdata struct of_device_id of_bus_ids[] = { | |||
308 | static int __init declare_of_platform_devices(void) | 307 | static int __init declare_of_platform_devices(void) |
309 | { | 308 | { |
310 | of_platform_bus_probe(NULL, of_bus_ids, NULL); | 309 | of_platform_bus_probe(NULL, of_bus_ids, NULL); |
311 | of_register_platform_driver(&ep8248e_mdio_driver); | 310 | platform_driver_register(&ep8248e_mdio_driver); |
312 | 311 | ||
313 | return 0; | 312 | return 0; |
314 | } | 313 | } |
diff --git a/arch/powerpc/platforms/82xx/mgcoge.c b/arch/powerpc/platforms/82xx/km82xx.c index 7a5de9eb3c73..428c5e0a0e75 100644 --- a/arch/powerpc/platforms/82xx/mgcoge.c +++ b/arch/powerpc/platforms/82xx/km82xx.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * Keymile mgcoge support | 2 | * Keymile km82xx support |
3 | * Copyright 2008 DENX Software Engineering GmbH | 3 | * Copyright 2008-2011 DENX Software Engineering GmbH |
4 | * Author: Heiko Schocher <hs@denx.de> | 4 | * Author: Heiko Schocher <hs@denx.de> |
5 | * | 5 | * |
6 | * based on code from: | 6 | * based on code from: |
@@ -31,9 +31,10 @@ | |||
31 | 31 | ||
32 | #include "pq2.h" | 32 | #include "pq2.h" |
33 | 33 | ||
34 | static void __init mgcoge_pic_init(void) | 34 | static void __init km82xx_pic_init(void) |
35 | { | 35 | { |
36 | struct device_node *np = of_find_compatible_node(NULL, NULL, "fsl,pq2-pic"); | 36 | struct device_node *np = of_find_compatible_node(NULL, NULL, |
37 | "fsl,pq2-pic"); | ||
37 | if (!np) { | 38 | if (!np) { |
38 | printk(KERN_ERR "PIC init: can not find cpm-pic node\n"); | 39 | printk(KERN_ERR "PIC init: can not find cpm-pic node\n"); |
39 | return; | 40 | return; |
@@ -47,12 +48,18 @@ struct cpm_pin { | |||
47 | int port, pin, flags; | 48 | int port, pin, flags; |
48 | }; | 49 | }; |
49 | 50 | ||
50 | static __initdata struct cpm_pin mgcoge_pins[] = { | 51 | static __initdata struct cpm_pin km82xx_pins[] = { |
51 | 52 | ||
52 | /* SMC2 */ | 53 | /* SMC2 */ |
53 | {0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | 54 | {0, 8, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, |
54 | {0, 9, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, | 55 | {0, 9, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, |
55 | 56 | ||
57 | /* SCC1 */ | ||
58 | {2, 21, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | ||
59 | {2, 15, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | ||
60 | {3, 31, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | ||
61 | {3, 30, CPM_PIN_OUTPUT | CPM_PIN_SECONDARY}, | ||
62 | |||
56 | /* SCC4 */ | 63 | /* SCC4 */ |
57 | {2, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | 64 | {2, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, |
58 | {2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, | 65 | {2, 24, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, |
@@ -107,30 +114,49 @@ static __initdata struct cpm_pin mgcoge_pins[] = { | |||
107 | {3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, | 114 | {3, 14, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, |
108 | {3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, | 115 | {3, 15, CPM_PIN_INPUT | CPM_PIN_SECONDARY | CPM_PIN_OPENDRAIN}, |
109 | #endif | 116 | #endif |
117 | |||
118 | /* USB */ | ||
119 | {0, 10, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /* FULL_SPEED */ | ||
120 | {0, 11, CPM_PIN_OUTPUT | CPM_PIN_GPIO}, /*/SLAVE */ | ||
121 | {2, 10, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXN */ | ||
122 | {2, 11, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXP */ | ||
123 | {2, 20, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* /OE */ | ||
124 | {2, 27, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXCLK */ | ||
125 | {3, 23, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXP */ | ||
126 | {3, 24, CPM_PIN_OUTPUT | CPM_PIN_PRIMARY}, /* TXN */ | ||
127 | {3, 25, CPM_PIN_INPUT | CPM_PIN_PRIMARY}, /* RXD */ | ||
110 | }; | 128 | }; |
111 | 129 | ||
112 | static void __init init_ioports(void) | 130 | static void __init init_ioports(void) |
113 | { | 131 | { |
114 | int i; | 132 | int i; |
115 | 133 | ||
116 | for (i = 0; i < ARRAY_SIZE(mgcoge_pins); i++) { | 134 | for (i = 0; i < ARRAY_SIZE(km82xx_pins); i++) { |
117 | const struct cpm_pin *pin = &mgcoge_pins[i]; | 135 | const struct cpm_pin *pin = &km82xx_pins[i]; |
118 | cpm2_set_pin(pin->port, pin->pin, pin->flags); | 136 | cpm2_set_pin(pin->port, pin->pin, pin->flags); |
119 | } | 137 | } |
120 | 138 | ||
121 | cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8); | 139 | cpm2_smc_clk_setup(CPM_CLK_SMC2, CPM_BRG8); |
140 | cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_RX); | ||
141 | cpm2_clk_setup(CPM_CLK_SCC1, CPM_CLK11, CPM_CLK_TX); | ||
142 | cpm2_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RTX); | ||
122 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX); | 143 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK7, CPM_CLK_RX); |
123 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX); | 144 | cpm2_clk_setup(CPM_CLK_SCC4, CPM_CLK8, CPM_CLK_TX); |
124 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX); | 145 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK10, CPM_CLK_RX); |
125 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX); | 146 | cpm2_clk_setup(CPM_CLK_FCC1, CPM_CLK9, CPM_CLK_TX); |
126 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); | 147 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK13, CPM_CLK_RX); |
127 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); | 148 | cpm2_clk_setup(CPM_CLK_FCC2, CPM_CLK14, CPM_CLK_TX); |
149 | |||
150 | /* Force USB FULL SPEED bit to '1' */ | ||
151 | setbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 10)); | ||
152 | /* clear USB_SLAVE */ | ||
153 | clrbits32(&cpm2_immr->im_ioport.iop_pdata, 1 << (31 - 11)); | ||
128 | } | 154 | } |
129 | 155 | ||
130 | static void __init mgcoge_setup_arch(void) | 156 | static void __init km82xx_setup_arch(void) |
131 | { | 157 | { |
132 | if (ppc_md.progress) | 158 | if (ppc_md.progress) |
133 | ppc_md.progress("mgcoge_setup_arch()", 0); | 159 | ppc_md.progress("km82xx_setup_arch()", 0); |
134 | 160 | ||
135 | cpm2_reset(); | 161 | cpm2_reset(); |
136 | 162 | ||
@@ -142,7 +168,7 @@ static void __init mgcoge_setup_arch(void) | |||
142 | init_ioports(); | 168 | init_ioports(); |
143 | 169 | ||
144 | if (ppc_md.progress) | 170 | if (ppc_md.progress) |
145 | ppc_md.progress("mgcoge_setup_arch(), finish", 0); | 171 | ppc_md.progress("km82xx_setup_arch(), finish", 0); |
146 | } | 172 | } |
147 | 173 | ||
148 | static __initdata struct of_device_id of_bus_ids[] = { | 174 | static __initdata struct of_device_id of_bus_ids[] = { |
@@ -156,23 +182,23 @@ static int __init declare_of_platform_devices(void) | |||
156 | 182 | ||
157 | return 0; | 183 | return 0; |
158 | } | 184 | } |
159 | machine_device_initcall(mgcoge, declare_of_platform_devices); | 185 | machine_device_initcall(km82xx, declare_of_platform_devices); |
160 | 186 | ||
161 | /* | 187 | /* |
162 | * Called very early, device-tree isn't unflattened | 188 | * Called very early, device-tree isn't unflattened |
163 | */ | 189 | */ |
164 | static int __init mgcoge_probe(void) | 190 | static int __init km82xx_probe(void) |
165 | { | 191 | { |
166 | unsigned long root = of_get_flat_dt_root(); | 192 | unsigned long root = of_get_flat_dt_root(); |
167 | return of_flat_dt_is_compatible(root, "keymile,mgcoge"); | 193 | return of_flat_dt_is_compatible(root, "keymile,km82xx"); |
168 | } | 194 | } |
169 | 195 | ||
170 | define_machine(mgcoge) | 196 | define_machine(km82xx) |
171 | { | 197 | { |
172 | .name = "Keymile MGCOGE", | 198 | .name = "Keymile km82xx", |
173 | .probe = mgcoge_probe, | 199 | .probe = km82xx_probe, |
174 | .setup_arch = mgcoge_setup_arch, | 200 | .setup_arch = km82xx_setup_arch, |
175 | .init_IRQ = mgcoge_pic_init, | 201 | .init_IRQ = km82xx_pic_init, |
176 | .get_irq = cpm2_get_irq, | 202 | .get_irq = cpm2_get_irq, |
177 | .calibrate_decr = generic_calibrate_decr, | 203 | .calibrate_decr = generic_calibrate_decr, |
178 | .restart = pq2_restart, | 204 | .restart = pq2_restart, |
diff --git a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c index 5a55d87d6bd6..8ccf9ed62fe2 100644 --- a/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c +++ b/arch/powerpc/platforms/82xx/pq2ads-pci-pic.c | |||
@@ -39,10 +39,10 @@ struct pq2ads_pci_pic { | |||
39 | 39 | ||
40 | #define NUM_IRQS 32 | 40 | #define NUM_IRQS 32 |
41 | 41 | ||
42 | static void pq2ads_pci_mask_irq(unsigned int virq) | 42 | static void pq2ads_pci_mask_irq(struct irq_data *d) |
43 | { | 43 | { |
44 | struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); | 44 | struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); |
45 | int irq = NUM_IRQS - virq_to_hw(virq) - 1; | 45 | int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; |
46 | 46 | ||
47 | if (irq != -1) { | 47 | if (irq != -1) { |
48 | unsigned long flags; | 48 | unsigned long flags; |
@@ -55,10 +55,10 @@ static void pq2ads_pci_mask_irq(unsigned int virq) | |||
55 | } | 55 | } |
56 | } | 56 | } |
57 | 57 | ||
58 | static void pq2ads_pci_unmask_irq(unsigned int virq) | 58 | static void pq2ads_pci_unmask_irq(struct irq_data *d) |
59 | { | 59 | { |
60 | struct pq2ads_pci_pic *priv = get_irq_chip_data(virq); | 60 | struct pq2ads_pci_pic *priv = irq_data_get_irq_chip_data(d); |
61 | int irq = NUM_IRQS - virq_to_hw(virq) - 1; | 61 | int irq = NUM_IRQS - irqd_to_hwirq(d) - 1; |
62 | 62 | ||
63 | if (irq != -1) { | 63 | if (irq != -1) { |
64 | unsigned long flags; | 64 | unsigned long flags; |
@@ -71,18 +71,17 @@ static void pq2ads_pci_unmask_irq(unsigned int virq) | |||
71 | 71 | ||
72 | static struct irq_chip pq2ads_pci_ic = { | 72 | static struct irq_chip pq2ads_pci_ic = { |
73 | .name = "PQ2 ADS PCI", | 73 | .name = "PQ2 ADS PCI", |
74 | .end = pq2ads_pci_unmask_irq, | 74 | .irq_mask = pq2ads_pci_mask_irq, |
75 | .mask = pq2ads_pci_mask_irq, | 75 | .irq_mask_ack = pq2ads_pci_mask_irq, |
76 | .mask_ack = pq2ads_pci_mask_irq, | 76 | .irq_ack = pq2ads_pci_mask_irq, |
77 | .ack = pq2ads_pci_mask_irq, | 77 | .irq_unmask = pq2ads_pci_unmask_irq, |
78 | .unmask = pq2ads_pci_unmask_irq, | 78 | .irq_enable = pq2ads_pci_unmask_irq, |
79 | .enable = pq2ads_pci_unmask_irq, | 79 | .irq_disable = pq2ads_pci_mask_irq |
80 | .disable = pq2ads_pci_mask_irq | ||
81 | }; | 80 | }; |
82 | 81 | ||
83 | static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) | 82 | static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) |
84 | { | 83 | { |
85 | struct pq2ads_pci_pic *priv = desc->handler_data; | 84 | struct pq2ads_pci_pic *priv = irq_desc_get_handler_data(desc); |
86 | u32 stat, mask, pend; | 85 | u32 stat, mask, pend; |
87 | int bit; | 86 | int bit; |
88 | 87 | ||
@@ -107,22 +106,14 @@ static void pq2ads_pci_irq_demux(unsigned int irq, struct irq_desc *desc) | |||
107 | static int pci_pic_host_map(struct irq_host *h, unsigned int virq, | 106 | static int pci_pic_host_map(struct irq_host *h, unsigned int virq, |
108 | irq_hw_number_t hw) | 107 | irq_hw_number_t hw) |
109 | { | 108 | { |
110 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 109 | irq_set_status_flags(virq, IRQ_LEVEL); |
111 | set_irq_chip_data(virq, h->host_data); | 110 | irq_set_chip_data(virq, h->host_data); |
112 | set_irq_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); | 111 | irq_set_chip_and_handler(virq, &pq2ads_pci_ic, handle_level_irq); |
113 | return 0; | 112 | return 0; |
114 | } | 113 | } |
115 | 114 | ||
116 | static void pci_host_unmap(struct irq_host *h, unsigned int virq) | ||
117 | { | ||
118 | /* remove chip and handler */ | ||
119 | set_irq_chip_data(virq, NULL); | ||
120 | set_irq_chip(virq, NULL); | ||
121 | } | ||
122 | |||
123 | static struct irq_host_ops pci_pic_host_ops = { | 115 | static struct irq_host_ops pci_pic_host_ops = { |
124 | .map = pci_pic_host_map, | 116 | .map = pci_pic_host_map, |
125 | .unmap = pci_host_unmap, | ||
126 | }; | 117 | }; |
127 | 118 | ||
128 | int __init pq2ads_pci_init_irq(void) | 119 | int __init pq2ads_pci_init_irq(void) |
@@ -176,8 +167,8 @@ int __init pq2ads_pci_init_irq(void) | |||
176 | 167 | ||
177 | priv->host = host; | 168 | priv->host = host; |
178 | host->host_data = priv; | 169 | host->host_data = priv; |
179 | set_irq_data(irq, priv); | 170 | irq_set_handler_data(irq, priv); |
180 | set_irq_chained_handler(irq, pq2ads_pci_irq_demux); | 171 | irq_set_chained_handler(irq, pq2ads_pci_irq_demux); |
181 | 172 | ||
182 | of_node_put(np); | 173 | of_node_put(np); |
183 | return 0; | 174 | return 0; |
diff --git a/arch/powerpc/platforms/83xx/Kconfig b/arch/powerpc/platforms/83xx/Kconfig index 021763a32c2f..73f4135f3a1a 100644 --- a/arch/powerpc/platforms/83xx/Kconfig +++ b/arch/powerpc/platforms/83xx/Kconfig | |||
@@ -10,12 +10,12 @@ menuconfig PPC_83xx | |||
10 | if PPC_83xx | 10 | if PPC_83xx |
11 | 11 | ||
12 | config MPC830x_RDB | 12 | config MPC830x_RDB |
13 | bool "Freescale MPC830x RDB" | 13 | bool "Freescale MPC830x RDB and derivatives" |
14 | select DEFAULT_UIMAGE | 14 | select DEFAULT_UIMAGE |
15 | select PPC_MPC831x | 15 | select PPC_MPC831x |
16 | select FSL_GTM | 16 | select FSL_GTM |
17 | help | 17 | help |
18 | This option enables support for the MPC8308 RDB board. | 18 | This option enables support for the MPC8308 RDB and MPC8308 P1M boards. |
19 | 19 | ||
20 | config MPC831x_RDB | 20 | config MPC831x_RDB |
21 | bool "Freescale MPC831x RDB" | 21 | bool "Freescale MPC831x RDB" |
diff --git a/arch/powerpc/platforms/83xx/Makefile b/arch/powerpc/platforms/83xx/Makefile index 6e8bbbbcfdf8..ed95bfcbcbff 100644 --- a/arch/powerpc/platforms/83xx/Makefile +++ b/arch/powerpc/platforms/83xx/Makefile | |||
@@ -16,4 +16,4 @@ obj-$(CONFIG_MPC837x_MDS) += mpc837x_mds.o | |||
16 | obj-$(CONFIG_SBC834x) += sbc834x.o | 16 | obj-$(CONFIG_SBC834x) += sbc834x.o |
17 | obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o | 17 | obj-$(CONFIG_MPC837x_RDB) += mpc837x_rdb.o |
18 | obj-$(CONFIG_ASP834x) += asp834x.o | 18 | obj-$(CONFIG_ASP834x) += asp834x.o |
19 | obj-$(CONFIG_KMETER1) += kmeter1.o | 19 | obj-$(CONFIG_KMETER1) += km83xx.o |
diff --git a/arch/powerpc/platforms/83xx/kmeter1.c b/arch/powerpc/platforms/83xx/km83xx.c index 903acfd851ac..a2b9b9ef1240 100644 --- a/arch/powerpc/platforms/83xx/kmeter1.c +++ b/arch/powerpc/platforms/83xx/km83xx.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright 2008 DENX Software Engineering GmbH | 2 | * Copyright 2008-2011 DENX Software Engineering GmbH |
3 | * Author: Heiko Schocher <hs@denx.de> | 3 | * Author: Heiko Schocher <hs@denx.de> |
4 | * | 4 | * |
5 | * Description: | 5 | * Description: |
@@ -49,12 +49,12 @@ | |||
49 | * Setup the architecture | 49 | * Setup the architecture |
50 | * | 50 | * |
51 | */ | 51 | */ |
52 | static void __init kmeter1_setup_arch(void) | 52 | static void __init mpc83xx_km_setup_arch(void) |
53 | { | 53 | { |
54 | struct device_node *np; | 54 | struct device_node *np; |
55 | 55 | ||
56 | if (ppc_md.progress) | 56 | if (ppc_md.progress) |
57 | ppc_md.progress("kmeter1_setup_arch()", 0); | 57 | ppc_md.progress("kmpbec83xx_setup_arch()", 0); |
58 | 58 | ||
59 | #ifdef CONFIG_PCI | 59 | #ifdef CONFIG_PCI |
60 | for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") | 60 | for_each_compatible_node(np, "pci", "fsl,mpc8349-pci") |
@@ -69,6 +69,9 @@ static void __init kmeter1_setup_arch(void) | |||
69 | par_io_init(np); | 69 | par_io_init(np); |
70 | of_node_put(np); | 70 | of_node_put(np); |
71 | 71 | ||
72 | for_each_node_by_name(np, "spi") | ||
73 | par_io_of_config(np); | ||
74 | |||
72 | for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) | 75 | for (np = NULL; (np = of_find_node_by_name(np, "ucc")) != NULL;) |
73 | par_io_of_config(np); | 76 | par_io_of_config(np); |
74 | } | 77 | } |
@@ -119,7 +122,7 @@ static void __init kmeter1_setup_arch(void) | |||
119 | #endif /* CONFIG_QUICC_ENGINE */ | 122 | #endif /* CONFIG_QUICC_ENGINE */ |
120 | } | 123 | } |
121 | 124 | ||
122 | static struct of_device_id kmeter_ids[] = { | 125 | static struct of_device_id kmpbec83xx_ids[] = { |
123 | { .type = "soc", }, | 126 | { .type = "soc", }, |
124 | { .compatible = "soc", }, | 127 | { .compatible = "soc", }, |
125 | { .compatible = "simple-bus", }, | 128 | { .compatible = "simple-bus", }, |
@@ -131,13 +134,13 @@ static struct of_device_id kmeter_ids[] = { | |||
131 | static int __init kmeter_declare_of_platform_devices(void) | 134 | static int __init kmeter_declare_of_platform_devices(void) |
132 | { | 135 | { |
133 | /* Publish the QE devices */ | 136 | /* Publish the QE devices */ |
134 | of_platform_bus_probe(NULL, kmeter_ids, NULL); | 137 | of_platform_bus_probe(NULL, kmpbec83xx_ids, NULL); |
135 | 138 | ||
136 | return 0; | 139 | return 0; |
137 | } | 140 | } |
138 | machine_device_initcall(kmeter1, kmeter_declare_of_platform_devices); | 141 | machine_device_initcall(mpc83xx_km, kmeter_declare_of_platform_devices); |
139 | 142 | ||
140 | static void __init kmeter1_init_IRQ(void) | 143 | static void __init mpc83xx_km_init_IRQ(void) |
141 | { | 144 | { |
142 | struct device_node *np; | 145 | struct device_node *np; |
143 | 146 | ||
@@ -168,21 +171,34 @@ static void __init kmeter1_init_IRQ(void) | |||
168 | #endif /* CONFIG_QUICC_ENGINE */ | 171 | #endif /* CONFIG_QUICC_ENGINE */ |
169 | } | 172 | } |
170 | 173 | ||
174 | /* list of the supported boards */ | ||
175 | static char *board[] __initdata = { | ||
176 | "Keymile,KMETER1", | ||
177 | "Keymile,kmpbec8321", | ||
178 | NULL | ||
179 | }; | ||
180 | |||
171 | /* | 181 | /* |
172 | * Called very early, MMU is off, device-tree isn't unflattened | 182 | * Called very early, MMU is off, device-tree isn't unflattened |
173 | */ | 183 | */ |
174 | static int __init kmeter1_probe(void) | 184 | static int __init mpc83xx_km_probe(void) |
175 | { | 185 | { |
176 | unsigned long root = of_get_flat_dt_root(); | 186 | unsigned long node = of_get_flat_dt_root(); |
187 | int i = 0; | ||
177 | 188 | ||
178 | return of_flat_dt_is_compatible(root, "keymile,KMETER1"); | 189 | while (board[i]) { |
190 | if (of_flat_dt_is_compatible(node, board[i])) | ||
191 | break; | ||
192 | i++; | ||
193 | } | ||
194 | return (board[i] != NULL); | ||
179 | } | 195 | } |
180 | 196 | ||
181 | define_machine(kmeter1) { | 197 | define_machine(mpc83xx_km) { |
182 | .name = "KMETER1", | 198 | .name = "mpc83xx-km-platform", |
183 | .probe = kmeter1_probe, | 199 | .probe = mpc83xx_km_probe, |
184 | .setup_arch = kmeter1_setup_arch, | 200 | .setup_arch = mpc83xx_km_setup_arch, |
185 | .init_IRQ = kmeter1_init_IRQ, | 201 | .init_IRQ = mpc83xx_km_init_IRQ, |
186 | .get_irq = ipic_get_irq, | 202 | .get_irq = ipic_get_irq, |
187 | .restart = mpc83xx_restart, | 203 | .restart = mpc83xx_restart, |
188 | .time_init = mpc83xx_time_init, | 204 | .time_init = mpc83xx_time_init, |
diff --git a/arch/powerpc/platforms/83xx/mpc830x_rdb.c b/arch/powerpc/platforms/83xx/mpc830x_rdb.c index ac102ee9abe8..d0c4e15b7794 100644 --- a/arch/powerpc/platforms/83xx/mpc830x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc830x_rdb.c | |||
@@ -57,15 +57,19 @@ static void __init mpc830x_rdb_init_IRQ(void) | |||
57 | ipic_set_default_priority(); | 57 | ipic_set_default_priority(); |
58 | } | 58 | } |
59 | 59 | ||
60 | static const char *board[] __initdata = { | ||
61 | "MPC8308RDB", | ||
62 | "fsl,mpc8308rdb", | ||
63 | "denx,mpc8308_p1m", | ||
64 | NULL | ||
65 | }; | ||
66 | |||
60 | /* | 67 | /* |
61 | * Called very early, MMU is off, device-tree isn't unflattened | 68 | * Called very early, MMU is off, device-tree isn't unflattened |
62 | */ | 69 | */ |
63 | static int __init mpc830x_rdb_probe(void) | 70 | static int __init mpc830x_rdb_probe(void) |
64 | { | 71 | { |
65 | unsigned long root = of_get_flat_dt_root(); | 72 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
66 | |||
67 | return of_flat_dt_is_compatible(root, "MPC8308RDB") || | ||
68 | of_flat_dt_is_compatible(root, "fsl,mpc8308rdb"); | ||
69 | } | 73 | } |
70 | 74 | ||
71 | static struct of_device_id __initdata of_bus_ids[] = { | 75 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/83xx/mpc831x_rdb.c b/arch/powerpc/platforms/83xx/mpc831x_rdb.c index ae525e4745d2..f859ead49a8d 100644 --- a/arch/powerpc/platforms/83xx/mpc831x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc831x_rdb.c | |||
@@ -60,15 +60,18 @@ static void __init mpc831x_rdb_init_IRQ(void) | |||
60 | ipic_set_default_priority(); | 60 | ipic_set_default_priority(); |
61 | } | 61 | } |
62 | 62 | ||
63 | static const char *board[] __initdata = { | ||
64 | "MPC8313ERDB", | ||
65 | "fsl,mpc8315erdb", | ||
66 | NULL | ||
67 | }; | ||
68 | |||
63 | /* | 69 | /* |
64 | * Called very early, MMU is off, device-tree isn't unflattened | 70 | * Called very early, MMU is off, device-tree isn't unflattened |
65 | */ | 71 | */ |
66 | static int __init mpc831x_rdb_probe(void) | 72 | static int __init mpc831x_rdb_probe(void) |
67 | { | 73 | { |
68 | unsigned long root = of_get_flat_dt_root(); | 74 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
69 | |||
70 | return of_flat_dt_is_compatible(root, "MPC8313ERDB") || | ||
71 | of_flat_dt_is_compatible(root, "fsl,mpc8315erdb"); | ||
72 | } | 75 | } |
73 | 76 | ||
74 | static struct of_device_id __initdata of_bus_ids[] = { | 77 | static struct of_device_id __initdata of_bus_ids[] = { |
diff --git a/arch/powerpc/platforms/83xx/mpc837x_rdb.c b/arch/powerpc/platforms/83xx/mpc837x_rdb.c index 910caa6b5810..7bafbf2ec0f9 100644 --- a/arch/powerpc/platforms/83xx/mpc837x_rdb.c +++ b/arch/powerpc/platforms/83xx/mpc837x_rdb.c | |||
@@ -101,17 +101,20 @@ static void __init mpc837x_rdb_init_IRQ(void) | |||
101 | ipic_set_default_priority(); | 101 | ipic_set_default_priority(); |
102 | } | 102 | } |
103 | 103 | ||
104 | static const char *board[] __initdata = { | ||
105 | "fsl,mpc8377rdb", | ||
106 | "fsl,mpc8378rdb", | ||
107 | "fsl,mpc8379rdb", | ||
108 | "fsl,mpc8377wlan", | ||
109 | NULL | ||
110 | }; | ||
111 | |||
104 | /* | 112 | /* |
105 | * Called very early, MMU is off, device-tree isn't unflattened | 113 | * Called very early, MMU is off, device-tree isn't unflattened |
106 | */ | 114 | */ |
107 | static int __init mpc837x_rdb_probe(void) | 115 | static int __init mpc837x_rdb_probe(void) |
108 | { | 116 | { |
109 | unsigned long root = of_get_flat_dt_root(); | 117 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
110 | |||
111 | return of_flat_dt_is_compatible(root, "fsl,mpc8377rdb") || | ||
112 | of_flat_dt_is_compatible(root, "fsl,mpc8378rdb") || | ||
113 | of_flat_dt_is_compatible(root, "fsl,mpc8379rdb") || | ||
114 | of_flat_dt_is_compatible(root, "fsl,mpc8377wlan"); | ||
115 | } | 118 | } |
116 | 119 | ||
117 | define_machine(mpc837x_rdb) { | 120 | define_machine(mpc837x_rdb) { |
diff --git a/arch/powerpc/platforms/83xx/mpc83xx.h b/arch/powerpc/platforms/83xx/mpc83xx.h index 0fea8811d45b..82a434510d83 100644 --- a/arch/powerpc/platforms/83xx/mpc83xx.h +++ b/arch/powerpc/platforms/83xx/mpc83xx.h | |||
@@ -35,6 +35,8 @@ | |||
35 | 35 | ||
36 | /* system i/o configuration register high */ | 36 | /* system i/o configuration register high */ |
37 | #define MPC83XX_SICRH_OFFS 0x118 | 37 | #define MPC83XX_SICRH_OFFS 0x118 |
38 | #define MPC8308_SICRH_USB_MASK 0x000c0000 | ||
39 | #define MPC8308_SICRH_USB_ULPI 0x00040000 | ||
38 | #define MPC834X_SICRH_USB_UTMI 0x00020000 | 40 | #define MPC834X_SICRH_USB_UTMI 0x00020000 |
39 | #define MPC831X_SICRH_USB_MASK 0x000000e0 | 41 | #define MPC831X_SICRH_USB_MASK 0x000000e0 |
40 | #define MPC831X_SICRH_USB_ULPI 0x000000a0 | 42 | #define MPC831X_SICRH_USB_ULPI 0x000000a0 |
diff --git a/arch/powerpc/platforms/83xx/suspend-asm.S b/arch/powerpc/platforms/83xx/suspend-asm.S index 1930543c98d3..3d1ecd211776 100644 --- a/arch/powerpc/platforms/83xx/suspend-asm.S +++ b/arch/powerpc/platforms/83xx/suspend-asm.S | |||
@@ -231,7 +231,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) | |||
231 | ori r4, r4, 0x002a | 231 | ori r4, r4, 0x002a |
232 | mtspr SPRN_DBAT0L, r4 | 232 | mtspr SPRN_DBAT0L, r4 |
233 | lis r8, TMP_VIRT_IMMR@h | 233 | lis r8, TMP_VIRT_IMMR@h |
234 | ori r4, r8, 0x001e /* 1 MByte accessable from Kernel Space only */ | 234 | ori r4, r8, 0x001e /* 1 MByte accessible from Kernel Space only */ |
235 | mtspr SPRN_DBAT0U, r4 | 235 | mtspr SPRN_DBAT0U, r4 |
236 | isync | 236 | isync |
237 | 237 | ||
@@ -241,7 +241,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) | |||
241 | ori r4, r4, 0x002a | 241 | ori r4, r4, 0x002a |
242 | mtspr SPRN_DBAT1L, r4 | 242 | mtspr SPRN_DBAT1L, r4 |
243 | lis r9, (TMP_VIRT_IMMR + 0x01000000)@h | 243 | lis r9, (TMP_VIRT_IMMR + 0x01000000)@h |
244 | ori r4, r9, 0x001e /* 1 MByte accessable from Kernel Space only */ | 244 | ori r4, r9, 0x001e /* 1 MByte accessible from Kernel Space only */ |
245 | mtspr SPRN_DBAT1U, r4 | 245 | mtspr SPRN_DBAT1U, r4 |
246 | isync | 246 | isync |
247 | 247 | ||
@@ -253,7 +253,7 @@ _GLOBAL(mpc83xx_enter_deep_sleep) | |||
253 | li r4, 0x0002 | 253 | li r4, 0x0002 |
254 | mtspr SPRN_DBAT2L, r4 | 254 | mtspr SPRN_DBAT2L, r4 |
255 | lis r4, KERNELBASE@h | 255 | lis r4, KERNELBASE@h |
256 | ori r4, r4, 0x001e /* 1 MByte accessable from Kernel Space only */ | 256 | ori r4, r4, 0x001e /* 1 MByte accessible from Kernel Space only */ |
257 | mtspr SPRN_DBAT2U, r4 | 257 | mtspr SPRN_DBAT2U, r4 |
258 | isync | 258 | isync |
259 | 259 | ||
diff --git a/arch/powerpc/platforms/83xx/suspend.c b/arch/powerpc/platforms/83xx/suspend.c index 75ae77f1af6a..104faa8aa23c 100644 --- a/arch/powerpc/platforms/83xx/suspend.c +++ b/arch/powerpc/platforms/83xx/suspend.c | |||
@@ -311,21 +311,28 @@ static int mpc83xx_is_pci_agent(void) | |||
311 | return ret; | 311 | return ret; |
312 | } | 312 | } |
313 | 313 | ||
314 | static struct platform_suspend_ops mpc83xx_suspend_ops = { | 314 | static const struct platform_suspend_ops mpc83xx_suspend_ops = { |
315 | .valid = mpc83xx_suspend_valid, | 315 | .valid = mpc83xx_suspend_valid, |
316 | .begin = mpc83xx_suspend_begin, | 316 | .begin = mpc83xx_suspend_begin, |
317 | .enter = mpc83xx_suspend_enter, | 317 | .enter = mpc83xx_suspend_enter, |
318 | .end = mpc83xx_suspend_end, | 318 | .end = mpc83xx_suspend_end, |
319 | }; | 319 | }; |
320 | 320 | ||
321 | static int pmc_probe(struct platform_device *ofdev, | 321 | static struct of_device_id pmc_match[]; |
322 | const struct of_device_id *match) | 322 | static int pmc_probe(struct platform_device *ofdev) |
323 | { | 323 | { |
324 | const struct of_device_id *match; | ||
324 | struct device_node *np = ofdev->dev.of_node; | 325 | struct device_node *np = ofdev->dev.of_node; |
325 | struct resource res; | 326 | struct resource res; |
326 | struct pmc_type *type = match->data; | 327 | struct pmc_type *type; |
327 | int ret = 0; | 328 | int ret = 0; |
328 | 329 | ||
330 | match = of_match_device(pmc_match, &ofdev->dev); | ||
331 | if (!match) | ||
332 | return -EINVAL; | ||
333 | |||
334 | type = match->data; | ||
335 | |||
329 | if (!of_device_is_available(np)) | 336 | if (!of_device_is_available(np)) |
330 | return -ENODEV; | 337 | return -ENODEV; |
331 | 338 | ||
@@ -422,7 +429,7 @@ static struct of_device_id pmc_match[] = { | |||
422 | {} | 429 | {} |
423 | }; | 430 | }; |
424 | 431 | ||
425 | static struct of_platform_driver pmc_driver = { | 432 | static struct platform_driver pmc_driver = { |
426 | .driver = { | 433 | .driver = { |
427 | .name = "mpc83xx-pmc", | 434 | .name = "mpc83xx-pmc", |
428 | .owner = THIS_MODULE, | 435 | .owner = THIS_MODULE, |
@@ -434,7 +441,7 @@ static struct of_platform_driver pmc_driver = { | |||
434 | 441 | ||
435 | static int pmc_init(void) | 442 | static int pmc_init(void) |
436 | { | 443 | { |
437 | return of_register_platform_driver(&pmc_driver); | 444 | return platform_driver_register(&pmc_driver); |
438 | } | 445 | } |
439 | 446 | ||
440 | module_init(pmc_init); | 447 | module_init(pmc_init); |
diff --git a/arch/powerpc/platforms/83xx/usb.c b/arch/powerpc/platforms/83xx/usb.c index 3ba4bb7d41bb..2c64164722d0 100644 --- a/arch/powerpc/platforms/83xx/usb.c +++ b/arch/powerpc/platforms/83xx/usb.c | |||
@@ -127,7 +127,8 @@ int mpc831x_usb_cfg(void) | |||
127 | 127 | ||
128 | /* Configure clock */ | 128 | /* Configure clock */ |
129 | immr_node = of_get_parent(np); | 129 | immr_node = of_get_parent(np); |
130 | if (immr_node && of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) | 130 | if (immr_node && (of_device_is_compatible(immr_node, "fsl,mpc8315-immr") || |
131 | of_device_is_compatible(immr_node, "fsl,mpc8308-immr"))) | ||
131 | clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, | 132 | clrsetbits_be32(immap + MPC83XX_SCCR_OFFS, |
132 | MPC8315_SCCR_USB_MASK, | 133 | MPC8315_SCCR_USB_MASK, |
133 | MPC8315_SCCR_USB_DRCM_01); | 134 | MPC8315_SCCR_USB_DRCM_01); |
@@ -138,7 +139,11 @@ int mpc831x_usb_cfg(void) | |||
138 | 139 | ||
139 | /* Configure pin mux for ULPI. There is no pin mux for UTMI */ | 140 | /* Configure pin mux for ULPI. There is no pin mux for UTMI */ |
140 | if (prop && !strcmp(prop, "ulpi")) { | 141 | if (prop && !strcmp(prop, "ulpi")) { |
141 | if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) { | 142 | if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { |
143 | clrsetbits_be32(immap + MPC83XX_SICRH_OFFS, | ||
144 | MPC8308_SICRH_USB_MASK, | ||
145 | MPC8308_SICRH_USB_ULPI); | ||
146 | } else if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) { | ||
142 | clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, | 147 | clrsetbits_be32(immap + MPC83XX_SICRL_OFFS, |
143 | MPC8315_SICRL_USB_MASK, | 148 | MPC8315_SICRL_USB_MASK, |
144 | MPC8315_SICRL_USB_ULPI); | 149 | MPC8315_SICRL_USB_ULPI); |
@@ -173,6 +178,9 @@ int mpc831x_usb_cfg(void) | |||
173 | !strcmp(prop, "utmi"))) { | 178 | !strcmp(prop, "utmi"))) { |
174 | u32 refsel; | 179 | u32 refsel; |
175 | 180 | ||
181 | if (of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) | ||
182 | goto out; | ||
183 | |||
176 | if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) | 184 | if (of_device_is_compatible(immr_node, "fsl,mpc8315-immr")) |
177 | refsel = CONTROL_REFSEL_24MHZ; | 185 | refsel = CONTROL_REFSEL_24MHZ; |
178 | else | 186 | else |
@@ -186,9 +194,11 @@ int mpc831x_usb_cfg(void) | |||
186 | temp = CONTROL_PHY_CLK_SEL_ULPI; | 194 | temp = CONTROL_PHY_CLK_SEL_ULPI; |
187 | #ifdef CONFIG_USB_OTG | 195 | #ifdef CONFIG_USB_OTG |
188 | /* Set OTG_PORT */ | 196 | /* Set OTG_PORT */ |
189 | dr_mode = of_get_property(np, "dr_mode", NULL); | 197 | if (!of_device_is_compatible(immr_node, "fsl,mpc8308-immr")) { |
190 | if (dr_mode && !strcmp(dr_mode, "otg")) | 198 | dr_mode = of_get_property(np, "dr_mode", NULL); |
191 | temp |= CONTROL_OTG_PORT; | 199 | if (dr_mode && !strcmp(dr_mode, "otg")) |
200 | temp |= CONTROL_OTG_PORT; | ||
201 | } | ||
192 | #endif /* CONFIG_USB_OTG */ | 202 | #endif /* CONFIG_USB_OTG */ |
193 | out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp); | 203 | out_be32(usb_regs + FSL_USB2_CONTROL_OFFS, temp); |
194 | } else { | 204 | } else { |
@@ -196,6 +206,7 @@ int mpc831x_usb_cfg(void) | |||
196 | ret = -EINVAL; | 206 | ret = -EINVAL; |
197 | } | 207 | } |
198 | 208 | ||
209 | out: | ||
199 | iounmap(usb_regs); | 210 | iounmap(usb_regs); |
200 | of_node_put(np); | 211 | of_node_put(np); |
201 | return ret; | 212 | return ret; |
diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig index bea1f5905ad4..b6976e1726e4 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig | |||
@@ -11,6 +11,8 @@ menuconfig FSL_SOC_BOOKE | |||
11 | 11 | ||
12 | if FSL_SOC_BOOKE | 12 | if FSL_SOC_BOOKE |
13 | 13 | ||
14 | if PPC32 | ||
15 | |||
14 | config MPC8540_ADS | 16 | config MPC8540_ADS |
15 | bool "Freescale MPC8540 ADS" | 17 | bool "Freescale MPC8540 ADS" |
16 | select DEFAULT_UIMAGE | 18 | select DEFAULT_UIMAGE |
@@ -153,10 +155,20 @@ config SBC8560 | |||
153 | help | 155 | help |
154 | This option enables support for the Wind River SBC8560 board | 156 | This option enables support for the Wind River SBC8560 board |
155 | 157 | ||
158 | config P3041_DS | ||
159 | bool "Freescale P3041 DS" | ||
160 | select DEFAULT_UIMAGE | ||
161 | select PPC_E500MC | ||
162 | select PHYS_64BIT | ||
163 | select SWIOTLB | ||
164 | select MPC8xxx_GPIO | ||
165 | select HAS_RAPIDIO | ||
166 | help | ||
167 | This option enables support for the P3041 DS board | ||
168 | |||
156 | config P4080_DS | 169 | config P4080_DS |
157 | bool "Freescale P4080 DS" | 170 | bool "Freescale P4080 DS" |
158 | select DEFAULT_UIMAGE | 171 | select DEFAULT_UIMAGE |
159 | select PPC_FSL_BOOK3E | ||
160 | select PPC_E500MC | 172 | select PPC_E500MC |
161 | select PHYS_64BIT | 173 | select PHYS_64BIT |
162 | select SWIOTLB | 174 | select SWIOTLB |
@@ -165,6 +177,20 @@ config P4080_DS | |||
165 | help | 177 | help |
166 | This option enables support for the P4080 DS board | 178 | This option enables support for the P4080 DS board |
167 | 179 | ||
180 | endif # PPC32 | ||
181 | |||
182 | config P5020_DS | ||
183 | bool "Freescale P5020 DS" | ||
184 | select DEFAULT_UIMAGE | ||
185 | select E500 | ||
186 | select PPC_E500MC | ||
187 | select PHYS_64BIT | ||
188 | select SWIOTLB | ||
189 | select MPC8xxx_GPIO | ||
190 | select HAS_RAPIDIO | ||
191 | help | ||
192 | This option enables support for the P5020 DS board | ||
193 | |||
168 | endif # FSL_SOC_BOOKE | 194 | endif # FSL_SOC_BOOKE |
169 | 195 | ||
170 | config TQM85xx | 196 | config TQM85xx |
diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile index a2ec3f8f4d06..dd70db77d63e 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile | |||
@@ -11,7 +11,9 @@ obj-$(CONFIG_MPC85xx_DS) += mpc85xx_ds.o | |||
11 | obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o | 11 | obj-$(CONFIG_MPC85xx_MDS) += mpc85xx_mds.o |
12 | obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o | 12 | obj-$(CONFIG_MPC85xx_RDB) += mpc85xx_rdb.o |
13 | obj-$(CONFIG_P1022_DS) += p1022_ds.o | 13 | obj-$(CONFIG_P1022_DS) += p1022_ds.o |
14 | obj-$(CONFIG_P3041_DS) += p3041_ds.o corenet_ds.o | ||
14 | obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o | 15 | obj-$(CONFIG_P4080_DS) += p4080_ds.o corenet_ds.o |
16 | obj-$(CONFIG_P5020_DS) += p5020_ds.o corenet_ds.o | ||
15 | obj-$(CONFIG_STX_GP3) += stx_gp3.o | 17 | obj-$(CONFIG_STX_GP3) += stx_gp3.o |
16 | obj-$(CONFIG_TQM85xx) += tqm85xx.o | 18 | obj-$(CONFIG_TQM85xx) += tqm85xx.o |
17 | obj-$(CONFIG_SBC8560) += sbc8560.o | 19 | obj-$(CONFIG_SBC8560) += sbc8560.o |
diff --git a/arch/powerpc/platforms/85xx/ksi8560.c b/arch/powerpc/platforms/85xx/ksi8560.c index f4d36b5a2e00..c46f9359be15 100644 --- a/arch/powerpc/platforms/85xx/ksi8560.c +++ b/arch/powerpc/platforms/85xx/ksi8560.c | |||
@@ -56,12 +56,13 @@ static void machine_restart(char *cmd) | |||
56 | 56 | ||
57 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 57 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
58 | { | 58 | { |
59 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
59 | int cascade_irq; | 60 | int cascade_irq; |
60 | 61 | ||
61 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 62 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
62 | generic_handle_irq(cascade_irq); | 63 | generic_handle_irq(cascade_irq); |
63 | 64 | ||
64 | desc->chip->eoi(irq); | 65 | chip->irq_eoi(&desc->irq_data); |
65 | } | 66 | } |
66 | 67 | ||
67 | static void __init ksi8560_pic_init(void) | 68 | static void __init ksi8560_pic_init(void) |
@@ -105,7 +106,7 @@ static void __init ksi8560_pic_init(void) | |||
105 | 106 | ||
106 | cpm2_pic_init(np); | 107 | cpm2_pic_init(np); |
107 | of_node_put(np); | 108 | of_node_put(np); |
108 | set_irq_chained_handler(irq, cpm2_cascade); | 109 | irq_set_chained_handler(irq, cpm2_cascade); |
109 | #endif | 110 | #endif |
110 | } | 111 | } |
111 | 112 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ads.c b/arch/powerpc/platforms/85xx/mpc85xx_ads.c index 9438a892afc4..3b2c9bb66199 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ads.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ads.c | |||
@@ -50,12 +50,13 @@ static int mpc85xx_exclude_device(struct pci_controller *hose, | |||
50 | 50 | ||
51 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 51 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
52 | { | 52 | { |
53 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
53 | int cascade_irq; | 54 | int cascade_irq; |
54 | 55 | ||
55 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 56 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
56 | generic_handle_irq(cascade_irq); | 57 | generic_handle_irq(cascade_irq); |
57 | 58 | ||
58 | desc->chip->eoi(irq); | 59 | chip->irq_eoi(&desc->irq_data); |
59 | } | 60 | } |
60 | 61 | ||
61 | #endif /* CONFIG_CPM2 */ | 62 | #endif /* CONFIG_CPM2 */ |
@@ -100,7 +101,7 @@ static void __init mpc85xx_ads_pic_init(void) | |||
100 | 101 | ||
101 | cpm2_pic_init(np); | 102 | cpm2_pic_init(np); |
102 | of_node_put(np); | 103 | of_node_put(np); |
103 | set_irq_chained_handler(irq, cpm2_cascade); | 104 | irq_set_chained_handler(irq, cpm2_cascade); |
104 | #endif | 105 | #endif |
105 | } | 106 | } |
106 | 107 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_cds.c b/arch/powerpc/platforms/85xx/mpc85xx_cds.c index 458d91fba91d..6299a2a51ae8 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_cds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_cds.c | |||
@@ -255,7 +255,7 @@ static int mpc85xx_cds_8259_attach(void) | |||
255 | } | 255 | } |
256 | 256 | ||
257 | /* Success. Connect our low-level cascade handler. */ | 257 | /* Success. Connect our low-level cascade handler. */ |
258 | set_irq_handler(cascade_irq, mpc85xx_8259_cascade_handler); | 258 | irq_set_handler(cascade_irq, mpc85xx_8259_cascade_handler); |
259 | 259 | ||
260 | return 0; | 260 | return 0; |
261 | } | 261 | } |
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_ds.c b/arch/powerpc/platforms/85xx/mpc85xx_ds.c index 8190bc25bf27..c7b97f70312e 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_ds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_ds.c | |||
@@ -47,12 +47,13 @@ | |||
47 | #ifdef CONFIG_PPC_I8259 | 47 | #ifdef CONFIG_PPC_I8259 |
48 | static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 48 | static void mpc85xx_8259_cascade(unsigned int irq, struct irq_desc *desc) |
49 | { | 49 | { |
50 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
50 | unsigned int cascade_irq = i8259_irq(); | 51 | unsigned int cascade_irq = i8259_irq(); |
51 | 52 | ||
52 | if (cascade_irq != NO_IRQ) { | 53 | if (cascade_irq != NO_IRQ) { |
53 | generic_handle_irq(cascade_irq); | 54 | generic_handle_irq(cascade_irq); |
54 | } | 55 | } |
55 | desc->chip->eoi(irq); | 56 | chip->irq_eoi(&desc->irq_data); |
56 | } | 57 | } |
57 | #endif /* CONFIG_PPC_I8259 */ | 58 | #endif /* CONFIG_PPC_I8259 */ |
58 | 59 | ||
@@ -121,7 +122,7 @@ void __init mpc85xx_ds_pic_init(void) | |||
121 | i8259_init(cascade_node, 0); | 122 | i8259_init(cascade_node, 0); |
122 | of_node_put(cascade_node); | 123 | of_node_put(cascade_node); |
123 | 124 | ||
124 | set_irq_chained_handler(cascade_irq, mpc85xx_8259_cascade); | 125 | irq_set_chained_handler(cascade_irq, mpc85xx_8259_cascade); |
125 | #endif /* CONFIG_PPC_I8259 */ | 126 | #endif /* CONFIG_PPC_I8259 */ |
126 | } | 127 | } |
127 | 128 | ||
diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c index aa34cac4eb5c..747d1ee661fd 100644 --- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c +++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c | |||
@@ -309,7 +309,7 @@ static void __init mpc85xx_mds_qe_init(void) | |||
309 | /* P1021 has pins muxed for QE and other functions. To | 309 | /* P1021 has pins muxed for QE and other functions. To |
310 | * enable QE UEC mode, we need to set bit QE0 for UCC1 | 310 | * enable QE UEC mode, we need to set bit QE0 for UCC1 |
311 | * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 | 311 | * in Eth mode, QE0 and QE3 for UCC5 in Eth mode, QE9 |
312 | * and QE12 for QE MII management singals in PMUXCR | 312 | * and QE12 for QE MII management signals in PMUXCR |
313 | * register. | 313 | * register. |
314 | */ | 314 | */ |
315 | setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | | 315 | setbits32(pmuxcr, MPC85xx_PMUXCR_QE0 | |
diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c index 34e00902ce86..7eb5c40c069f 100644 --- a/arch/powerpc/platforms/85xx/p1022_ds.c +++ b/arch/powerpc/platforms/85xx/p1022_ds.c | |||
@@ -8,7 +8,6 @@ | |||
8 | * Copyright 2010 Freescale Semiconductor, Inc. | 8 | * Copyright 2010 Freescale Semiconductor, Inc. |
9 | * | 9 | * |
10 | * This file is taken from the Freescale P1022DS BSP, with modifications: | 10 | * This file is taken from the Freescale P1022DS BSP, with modifications: |
11 | * 1) No DIU support (pending rewrite of DIU code) | ||
12 | * 2) No AMP support | 11 | * 2) No AMP support |
13 | * 3) No PCI endpoint support | 12 | * 3) No PCI endpoint support |
14 | * | 13 | * |
@@ -20,12 +19,211 @@ | |||
20 | #include <linux/pci.h> | 19 | #include <linux/pci.h> |
21 | #include <linux/of_platform.h> | 20 | #include <linux/of_platform.h> |
22 | #include <linux/memblock.h> | 21 | #include <linux/memblock.h> |
23 | 22 | #include <asm/div64.h> | |
24 | #include <asm/mpic.h> | 23 | #include <asm/mpic.h> |
25 | #include <asm/swiotlb.h> | 24 | #include <asm/swiotlb.h> |
26 | 25 | ||
27 | #include <sysdev/fsl_soc.h> | 26 | #include <sysdev/fsl_soc.h> |
28 | #include <sysdev/fsl_pci.h> | 27 | #include <sysdev/fsl_pci.h> |
28 | #include <asm/fsl_guts.h> | ||
29 | |||
30 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) | ||
31 | |||
32 | /* | ||
33 | * Board-specific initialization of the DIU. This code should probably be | ||
34 | * executed when the DIU is opened, rather than in arch code, but the DIU | ||
35 | * driver does not have a mechanism for this (yet). | ||
36 | * | ||
37 | * This is especially problematic on the P1022DS because the local bus (eLBC) | ||
38 | * and the DIU video signals share the same pins, which means that enabling the | ||
39 | * DIU will disable access to NOR flash. | ||
40 | */ | ||
41 | |||
42 | /* DIU Pixel Clock bits of the CLKDVDR Global Utilities register */ | ||
43 | #define CLKDVDR_PXCKEN 0x80000000 | ||
44 | #define CLKDVDR_PXCKINV 0x10000000 | ||
45 | #define CLKDVDR_PXCKDLY 0x06000000 | ||
46 | #define CLKDVDR_PXCLK_MASK 0x00FF0000 | ||
47 | |||
48 | /* Some ngPIXIS register definitions */ | ||
49 | #define PX_BRDCFG1_DVIEN 0x80 | ||
50 | #define PX_BRDCFG1_DFPEN 0x40 | ||
51 | #define PX_BRDCFG1_BACKLIGHT 0x20 | ||
52 | #define PX_BRDCFG1_DDCEN 0x10 | ||
53 | |||
54 | /* | ||
55 | * DIU Area Descriptor | ||
56 | * | ||
57 | * Note that we need to byte-swap the value before it's written to the AD | ||
58 | * register. So even though the registers don't look like they're in the same | ||
59 | * bit positions as they are on the MPC8610, the same value is written to the | ||
60 | * AD register on the MPC8610 and on the P1022. | ||
61 | */ | ||
62 | #define AD_BYTE_F 0x10000000 | ||
63 | #define AD_ALPHA_C_MASK 0x0E000000 | ||
64 | #define AD_ALPHA_C_SHIFT 25 | ||
65 | #define AD_BLUE_C_MASK 0x01800000 | ||
66 | #define AD_BLUE_C_SHIFT 23 | ||
67 | #define AD_GREEN_C_MASK 0x00600000 | ||
68 | #define AD_GREEN_C_SHIFT 21 | ||
69 | #define AD_RED_C_MASK 0x00180000 | ||
70 | #define AD_RED_C_SHIFT 19 | ||
71 | #define AD_PALETTE 0x00040000 | ||
72 | #define AD_PIXEL_S_MASK 0x00030000 | ||
73 | #define AD_PIXEL_S_SHIFT 16 | ||
74 | #define AD_COMP_3_MASK 0x0000F000 | ||
75 | #define AD_COMP_3_SHIFT 12 | ||
76 | #define AD_COMP_2_MASK 0x00000F00 | ||
77 | #define AD_COMP_2_SHIFT 8 | ||
78 | #define AD_COMP_1_MASK 0x000000F0 | ||
79 | #define AD_COMP_1_SHIFT 4 | ||
80 | #define AD_COMP_0_MASK 0x0000000F | ||
81 | #define AD_COMP_0_SHIFT 0 | ||
82 | |||
83 | #define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ | ||
84 | cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ | ||
85 | (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ | ||
86 | (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ | ||
87 | (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ | ||
88 | (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) | ||
89 | |||
90 | /** | ||
91 | * p1022ds_get_pixel_format: return the Area Descriptor for a given pixel depth | ||
92 | * | ||
93 | * The Area Descriptor is a 32-bit value that determine which bits in each | ||
94 | * pixel are to be used for each color. | ||
95 | */ | ||
96 | static unsigned int p1022ds_get_pixel_format(unsigned int bits_per_pixel, | ||
97 | int monitor_port) | ||
98 | { | ||
99 | switch (bits_per_pixel) { | ||
100 | case 32: | ||
101 | /* 0x88883316 */ | ||
102 | return MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8); | ||
103 | case 24: | ||
104 | /* 0x88082219 */ | ||
105 | return MAKE_AD(4, 0, 1, 2, 2, 0, 8, 8, 8); | ||
106 | case 16: | ||
107 | /* 0x65053118 */ | ||
108 | return MAKE_AD(4, 2, 1, 0, 1, 5, 6, 5, 0); | ||
109 | default: | ||
110 | pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); | ||
111 | return 0; | ||
112 | } | ||
113 | } | ||
114 | |||
115 | /** | ||
116 | * p1022ds_set_gamma_table: update the gamma table, if necessary | ||
117 | * | ||
118 | * On some boards, the gamma table for some ports may need to be modified. | ||
119 | * This is not the case on the P1022DS, so we do nothing. | ||
120 | */ | ||
121 | static void p1022ds_set_gamma_table(int monitor_port, char *gamma_table_base) | ||
122 | { | ||
123 | } | ||
124 | |||
125 | /** | ||
126 | * p1022ds_set_monitor_port: switch the output to a different monitor port | ||
127 | * | ||
128 | */ | ||
129 | static void p1022ds_set_monitor_port(int monitor_port) | ||
130 | { | ||
131 | struct device_node *pixis_node; | ||
132 | u8 __iomem *brdcfg1; | ||
133 | |||
134 | pixis_node = of_find_compatible_node(NULL, NULL, "fsl,p1022ds-pixis"); | ||
135 | if (!pixis_node) { | ||
136 | pr_err("p1022ds: missing ngPIXIS node\n"); | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | brdcfg1 = of_iomap(pixis_node, 0); | ||
141 | if (!brdcfg1) { | ||
142 | pr_err("p1022ds: could not map ngPIXIS registers\n"); | ||
143 | return; | ||
144 | } | ||
145 | brdcfg1 += 9; /* BRDCFG1 is at offset 9 in the ngPIXIS */ | ||
146 | |||
147 | switch (monitor_port) { | ||
148 | case 0: /* DVI */ | ||
149 | /* Enable the DVI port, disable the DFP and the backlight */ | ||
150 | clrsetbits_8(brdcfg1, PX_BRDCFG1_DFPEN | PX_BRDCFG1_BACKLIGHT, | ||
151 | PX_BRDCFG1_DVIEN); | ||
152 | break; | ||
153 | case 1: /* Single link LVDS */ | ||
154 | /* Enable the DFP port, disable the DVI and the backlight */ | ||
155 | clrsetbits_8(brdcfg1, PX_BRDCFG1_DVIEN | PX_BRDCFG1_BACKLIGHT, | ||
156 | PX_BRDCFG1_DFPEN); | ||
157 | break; | ||
158 | default: | ||
159 | pr_err("p1022ds: unsupported monitor port %i\n", monitor_port); | ||
160 | } | ||
161 | } | ||
162 | |||
163 | /** | ||
164 | * p1022ds_set_pixel_clock: program the DIU's clock | ||
165 | * | ||
166 | * @pixclock: the wavelength, in picoseconds, of the clock | ||
167 | */ | ||
168 | void p1022ds_set_pixel_clock(unsigned int pixclock) | ||
169 | { | ||
170 | struct device_node *guts_np = NULL; | ||
171 | struct ccsr_guts_85xx __iomem *guts; | ||
172 | unsigned long freq; | ||
173 | u64 temp; | ||
174 | u32 pxclk; | ||
175 | |||
176 | /* Map the global utilities registers. */ | ||
177 | guts_np = of_find_compatible_node(NULL, NULL, "fsl,p1022-guts"); | ||
178 | if (!guts_np) { | ||
179 | pr_err("p1022ds: missing global utilties device node\n"); | ||
180 | return; | ||
181 | } | ||
182 | |||
183 | guts = of_iomap(guts_np, 0); | ||
184 | of_node_put(guts_np); | ||
185 | if (!guts) { | ||
186 | pr_err("p1022ds: could not map global utilties device\n"); | ||
187 | return; | ||
188 | } | ||
189 | |||
190 | /* Convert pixclock from a wavelength to a frequency */ | ||
191 | temp = 1000000000000ULL; | ||
192 | do_div(temp, pixclock); | ||
193 | freq = temp; | ||
194 | |||
195 | /* pixclk is the ratio of the platform clock to the pixel clock */ | ||
196 | pxclk = DIV_ROUND_CLOSEST(fsl_get_sys_freq(), freq); | ||
197 | |||
198 | /* Disable the pixel clock, and set it to non-inverted and no delay */ | ||
199 | clrbits32(&guts->clkdvdr, | ||
200 | CLKDVDR_PXCKEN | CLKDVDR_PXCKDLY | CLKDVDR_PXCLK_MASK); | ||
201 | |||
202 | /* Enable the clock and set the pxclk */ | ||
203 | setbits32(&guts->clkdvdr, CLKDVDR_PXCKEN | (pxclk << 16)); | ||
204 | } | ||
205 | |||
206 | /** | ||
207 | * p1022ds_show_monitor_port: show the current monitor | ||
208 | * | ||
209 | * This function returns a string indicating whether the current monitor is | ||
210 | * set to DVI or LVDS. | ||
211 | */ | ||
212 | ssize_t p1022ds_show_monitor_port(int monitor_port, char *buf) | ||
213 | { | ||
214 | return sprintf(buf, "%c0 - DVI\n%c1 - Single link LVDS\n", | ||
215 | monitor_port == 0 ? '*' : ' ', monitor_port == 1 ? '*' : ' '); | ||
216 | } | ||
217 | |||
218 | /** | ||
219 | * p1022ds_set_sysfs_monitor_port: set the monitor port for sysfs | ||
220 | */ | ||
221 | int p1022ds_set_sysfs_monitor_port(int val) | ||
222 | { | ||
223 | return val < 2 ? val : 0; | ||
224 | } | ||
225 | |||
226 | #endif | ||
29 | 227 | ||
30 | void __init p1022_ds_pic_init(void) | 228 | void __init p1022_ds_pic_init(void) |
31 | { | 229 | { |
@@ -92,6 +290,15 @@ static void __init p1022_ds_setup_arch(void) | |||
92 | } | 290 | } |
93 | #endif | 291 | #endif |
94 | 292 | ||
293 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) | ||
294 | diu_ops.get_pixel_format = p1022ds_get_pixel_format; | ||
295 | diu_ops.set_gamma_table = p1022ds_set_gamma_table; | ||
296 | diu_ops.set_monitor_port = p1022ds_set_monitor_port; | ||
297 | diu_ops.set_pixel_clock = p1022ds_set_pixel_clock; | ||
298 | diu_ops.show_monitor_port = p1022ds_show_monitor_port; | ||
299 | diu_ops.set_sysfs_monitor_port = p1022ds_set_sysfs_monitor_port; | ||
300 | #endif | ||
301 | |||
95 | #ifdef CONFIG_SMP | 302 | #ifdef CONFIG_SMP |
96 | mpc85xx_smp_init(); | 303 | mpc85xx_smp_init(); |
97 | #endif | 304 | #endif |
@@ -112,6 +319,8 @@ static struct of_device_id __initdata p1022_ds_ids[] = { | |||
112 | { .compatible = "soc", }, | 319 | { .compatible = "soc", }, |
113 | { .compatible = "simple-bus", }, | 320 | { .compatible = "simple-bus", }, |
114 | { .compatible = "gianfar", }, | 321 | { .compatible = "gianfar", }, |
322 | /* So that the DMA channel nodes can be probed individually: */ | ||
323 | { .compatible = "fsl,eloplus-dma", }, | ||
115 | {}, | 324 | {}, |
116 | }; | 325 | }; |
117 | 326 | ||
diff --git a/arch/powerpc/platforms/85xx/p3041_ds.c b/arch/powerpc/platforms/85xx/p3041_ds.c new file mode 100644 index 000000000000..0ed52e18298c --- /dev/null +++ b/arch/powerpc/platforms/85xx/p3041_ds.c | |||
@@ -0,0 +1,64 @@ | |||
1 | /* | ||
2 | * P3041 DS Setup | ||
3 | * | ||
4 | * Maintained by Kumar Gala (see MAINTAINERS for contact information) | ||
5 | * | ||
6 | * Copyright 2009-2010 Freescale Semiconductor Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/kdev_t.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/phy.h> | ||
20 | |||
21 | #include <asm/system.h> | ||
22 | #include <asm/time.h> | ||
23 | #include <asm/machdep.h> | ||
24 | #include <asm/pci-bridge.h> | ||
25 | #include <mm/mmu_decl.h> | ||
26 | #include <asm/prom.h> | ||
27 | #include <asm/udbg.h> | ||
28 | #include <asm/mpic.h> | ||
29 | |||
30 | #include <linux/of_platform.h> | ||
31 | #include <sysdev/fsl_soc.h> | ||
32 | #include <sysdev/fsl_pci.h> | ||
33 | |||
34 | #include "corenet_ds.h" | ||
35 | |||
36 | /* | ||
37 | * Called very early, device-tree isn't unflattened | ||
38 | */ | ||
39 | static int __init p3041_ds_probe(void) | ||
40 | { | ||
41 | unsigned long root = of_get_flat_dt_root(); | ||
42 | |||
43 | return of_flat_dt_is_compatible(root, "fsl,P3041DS"); | ||
44 | } | ||
45 | |||
46 | define_machine(p3041_ds) { | ||
47 | .name = "P3041 DS", | ||
48 | .probe = p3041_ds_probe, | ||
49 | .setup_arch = corenet_ds_setup_arch, | ||
50 | .init_IRQ = corenet_ds_pic_init, | ||
51 | #ifdef CONFIG_PCI | ||
52 | .pcibios_fixup_bus = fsl_pcibios_fixup_bus, | ||
53 | #endif | ||
54 | .get_irq = mpic_get_coreint_irq, | ||
55 | .restart = fsl_rstcr_restart, | ||
56 | .calibrate_decr = generic_calibrate_decr, | ||
57 | .progress = udbg_progress, | ||
58 | }; | ||
59 | |||
60 | machine_device_initcall(p3041_ds, corenet_ds_publish_devices); | ||
61 | |||
62 | #ifdef CONFIG_SWIOTLB | ||
63 | machine_arch_initcall(p3041_ds, swiotlb_setup_bus_notifier); | ||
64 | #endif | ||
diff --git a/arch/powerpc/platforms/85xx/p5020_ds.c b/arch/powerpc/platforms/85xx/p5020_ds.c new file mode 100644 index 000000000000..7467b712ee00 --- /dev/null +++ b/arch/powerpc/platforms/85xx/p5020_ds.c | |||
@@ -0,0 +1,69 @@ | |||
1 | /* | ||
2 | * P5020 DS Setup | ||
3 | * | ||
4 | * Maintained by Kumar Gala (see MAINTAINERS for contact information) | ||
5 | * | ||
6 | * Copyright 2009-2010 Freescale Semiconductor Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/pci.h> | ||
16 | #include <linux/kdev_t.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/phy.h> | ||
20 | |||
21 | #include <asm/system.h> | ||
22 | #include <asm/time.h> | ||
23 | #include <asm/machdep.h> | ||
24 | #include <asm/pci-bridge.h> | ||
25 | #include <mm/mmu_decl.h> | ||
26 | #include <asm/prom.h> | ||
27 | #include <asm/udbg.h> | ||
28 | #include <asm/mpic.h> | ||
29 | |||
30 | #include <linux/of_platform.h> | ||
31 | #include <sysdev/fsl_soc.h> | ||
32 | #include <sysdev/fsl_pci.h> | ||
33 | |||
34 | #include "corenet_ds.h" | ||
35 | |||
36 | /* | ||
37 | * Called very early, device-tree isn't unflattened | ||
38 | */ | ||
39 | static int __init p5020_ds_probe(void) | ||
40 | { | ||
41 | unsigned long root = of_get_flat_dt_root(); | ||
42 | |||
43 | return of_flat_dt_is_compatible(root, "fsl,P5020DS"); | ||
44 | } | ||
45 | |||
46 | define_machine(p5020_ds) { | ||
47 | .name = "P5020 DS", | ||
48 | .probe = p5020_ds_probe, | ||
49 | .setup_arch = corenet_ds_setup_arch, | ||
50 | .init_IRQ = corenet_ds_pic_init, | ||
51 | #ifdef CONFIG_PCI | ||
52 | .pcibios_fixup_bus = fsl_pcibios_fixup_bus, | ||
53 | #endif | ||
54 | /* coreint doesn't play nice with lazy EE, use legacy mpic for now */ | ||
55 | #ifdef CONFIG_PPC64 | ||
56 | .get_irq = mpic_get_irq, | ||
57 | #else | ||
58 | .get_irq = mpic_get_coreint_irq, | ||
59 | #endif | ||
60 | .restart = fsl_rstcr_restart, | ||
61 | .calibrate_decr = generic_calibrate_decr, | ||
62 | .progress = udbg_progress, | ||
63 | }; | ||
64 | |||
65 | machine_device_initcall(p5020_ds, corenet_ds_publish_devices); | ||
66 | |||
67 | #ifdef CONFIG_SWIOTLB | ||
68 | machine_arch_initcall(p5020_ds, swiotlb_setup_bus_notifier); | ||
69 | #endif | ||
diff --git a/arch/powerpc/platforms/85xx/sbc8560.c b/arch/powerpc/platforms/85xx/sbc8560.c index a5ad1c7794bf..d2dfd465fbf6 100644 --- a/arch/powerpc/platforms/85xx/sbc8560.c +++ b/arch/powerpc/platforms/85xx/sbc8560.c | |||
@@ -41,12 +41,13 @@ | |||
41 | 41 | ||
42 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 42 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
43 | { | 43 | { |
44 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
44 | int cascade_irq; | 45 | int cascade_irq; |
45 | 46 | ||
46 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 47 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
47 | generic_handle_irq(cascade_irq); | 48 | generic_handle_irq(cascade_irq); |
48 | 49 | ||
49 | desc->chip->eoi(irq); | 50 | chip->irq_eoi(&desc->irq_data); |
50 | } | 51 | } |
51 | 52 | ||
52 | #endif /* CONFIG_CPM2 */ | 53 | #endif /* CONFIG_CPM2 */ |
@@ -91,7 +92,7 @@ static void __init sbc8560_pic_init(void) | |||
91 | 92 | ||
92 | cpm2_pic_init(np); | 93 | cpm2_pic_init(np); |
93 | of_node_put(np); | 94 | of_node_put(np); |
94 | set_irq_chained_handler(irq, cpm2_cascade); | 95 | irq_set_chained_handler(irq, cpm2_cascade); |
95 | #endif | 96 | #endif |
96 | } | 97 | } |
97 | 98 | ||
diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c index a6b106557be4..d6a93a10c0f5 100644 --- a/arch/powerpc/platforms/85xx/smp.c +++ b/arch/powerpc/platforms/85xx/smp.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/of.h> | 17 | #include <linux/of.h> |
18 | #include <linux/kexec.h> | 18 | #include <linux/kexec.h> |
19 | #include <linux/highmem.h> | ||
19 | 20 | ||
20 | #include <asm/machdep.h> | 21 | #include <asm/machdep.h> |
21 | #include <asm/pgtable.h> | 22 | #include <asm/pgtable.h> |
@@ -40,7 +41,7 @@ extern void __early_start(void); | |||
40 | #define NUM_BOOT_ENTRY 8 | 41 | #define NUM_BOOT_ENTRY 8 |
41 | #define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) | 42 | #define SIZE_BOOT_ENTRY (NUM_BOOT_ENTRY * sizeof(u32)) |
42 | 43 | ||
43 | static void __init | 44 | static int __init |
44 | smp_85xx_kick_cpu(int nr) | 45 | smp_85xx_kick_cpu(int nr) |
45 | { | 46 | { |
46 | unsigned long flags; | 47 | unsigned long flags; |
@@ -59,7 +60,7 @@ smp_85xx_kick_cpu(int nr) | |||
59 | 60 | ||
60 | if (cpu_rel_addr == NULL) { | 61 | if (cpu_rel_addr == NULL) { |
61 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); | 62 | printk(KERN_ERR "No cpu-release-addr for cpu %d\n", nr); |
62 | return; | 63 | return -ENOENT; |
63 | } | 64 | } |
64 | 65 | ||
65 | /* | 66 | /* |
@@ -79,6 +80,7 @@ smp_85xx_kick_cpu(int nr) | |||
79 | local_irq_save(flags); | 80 | local_irq_save(flags); |
80 | 81 | ||
81 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); | 82 | out_be32(bptr_vaddr + BOOT_ENTRY_PIR, nr); |
83 | #ifdef CONFIG_PPC32 | ||
82 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); | 84 | out_be32(bptr_vaddr + BOOT_ENTRY_ADDR_LOWER, __pa(__early_start)); |
83 | 85 | ||
84 | if (!ioremappable) | 86 | if (!ioremappable) |
@@ -88,6 +90,16 @@ smp_85xx_kick_cpu(int nr) | |||
88 | /* Wait a bit for the CPU to ack. */ | 90 | /* Wait a bit for the CPU to ack. */ |
89 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) | 91 | while ((__secondary_hold_acknowledge != nr) && (++n < 1000)) |
90 | mdelay(1); | 92 | mdelay(1); |
93 | #else | ||
94 | smp_generic_kick_cpu(nr); | ||
95 | |||
96 | out_be64((u64 *)(bptr_vaddr + BOOT_ENTRY_ADDR_UPPER), | ||
97 | __pa((u64)*((unsigned long long *) generic_secondary_smp_init))); | ||
98 | |||
99 | if (!ioremappable) | ||
100 | flush_dcache_range((ulong)bptr_vaddr, | ||
101 | (ulong)(bptr_vaddr + SIZE_BOOT_ENTRY)); | ||
102 | #endif | ||
91 | 103 | ||
92 | local_irq_restore(flags); | 104 | local_irq_restore(flags); |
93 | 105 | ||
@@ -95,6 +107,8 @@ smp_85xx_kick_cpu(int nr) | |||
95 | iounmap(bptr_vaddr); | 107 | iounmap(bptr_vaddr); |
96 | 108 | ||
97 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); | 109 | pr_debug("waited %d msecs for CPU #%d.\n", n, nr); |
110 | |||
111 | return 0; | ||
98 | } | 112 | } |
99 | 113 | ||
100 | static void __init | 114 | static void __init |
@@ -114,19 +128,15 @@ struct smp_ops_t smp_85xx_ops = { | |||
114 | }; | 128 | }; |
115 | 129 | ||
116 | #ifdef CONFIG_KEXEC | 130 | #ifdef CONFIG_KEXEC |
117 | static int kexec_down_cpus = 0; | 131 | atomic_t kexec_down_cpus = ATOMIC_INIT(0); |
118 | 132 | ||
119 | void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) | 133 | void mpc85xx_smp_kexec_cpu_down(int crash_shutdown, int secondary) |
120 | { | 134 | { |
121 | mpic_teardown_this_cpu(1); | 135 | local_irq_disable(); |
122 | |||
123 | /* When crashing, this gets called on all CPU's we only | ||
124 | * take down the non-boot cpus */ | ||
125 | if (smp_processor_id() != boot_cpuid) | ||
126 | { | ||
127 | local_irq_disable(); | ||
128 | kexec_down_cpus++; | ||
129 | 136 | ||
137 | if (secondary) { | ||
138 | atomic_inc(&kexec_down_cpus); | ||
139 | /* loop forever */ | ||
130 | while (1); | 140 | while (1); |
131 | } | 141 | } |
132 | } | 142 | } |
@@ -137,16 +147,65 @@ static void mpc85xx_smp_kexec_down(void *arg) | |||
137 | ppc_md.kexec_cpu_down(0,1); | 147 | ppc_md.kexec_cpu_down(0,1); |
138 | } | 148 | } |
139 | 149 | ||
140 | static void mpc85xx_smp_machine_kexec(struct kimage *image) | 150 | static void map_and_flush(unsigned long paddr) |
141 | { | 151 | { |
142 | int timeout = 2000; | 152 | struct page *page = pfn_to_page(paddr >> PAGE_SHIFT); |
153 | unsigned long kaddr = (unsigned long)kmap(page); | ||
154 | |||
155 | flush_dcache_range(kaddr, kaddr + PAGE_SIZE); | ||
156 | kunmap(page); | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * Before we reset the other cores, we need to flush relevant cache | ||
161 | * out to memory so we don't get anything corrupted, some of these flushes | ||
162 | * are performed out of an overabundance of caution as interrupts are not | ||
163 | * disabled yet and we can switch cores | ||
164 | */ | ||
165 | static void mpc85xx_smp_flush_dcache_kexec(struct kimage *image) | ||
166 | { | ||
167 | kimage_entry_t *ptr, entry; | ||
168 | unsigned long paddr; | ||
143 | int i; | 169 | int i; |
144 | 170 | ||
145 | set_cpus_allowed(current, cpumask_of_cpu(boot_cpuid)); | 171 | if (image->type == KEXEC_TYPE_DEFAULT) { |
172 | /* normal kexec images are stored in temporary pages */ | ||
173 | for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); | ||
174 | ptr = (entry & IND_INDIRECTION) ? | ||
175 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { | ||
176 | if (!(entry & IND_DESTINATION)) { | ||
177 | map_and_flush(entry); | ||
178 | } | ||
179 | } | ||
180 | /* flush out last IND_DONE page */ | ||
181 | map_and_flush(entry); | ||
182 | } else { | ||
183 | /* crash type kexec images are copied to the crash region */ | ||
184 | for (i = 0; i < image->nr_segments; i++) { | ||
185 | struct kexec_segment *seg = &image->segment[i]; | ||
186 | for (paddr = seg->mem; paddr < seg->mem + seg->memsz; | ||
187 | paddr += PAGE_SIZE) { | ||
188 | map_and_flush(paddr); | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* also flush the kimage struct to be passed in as well */ | ||
194 | flush_dcache_range((unsigned long)image, | ||
195 | (unsigned long)image + sizeof(*image)); | ||
196 | } | ||
197 | |||
198 | static void mpc85xx_smp_machine_kexec(struct kimage *image) | ||
199 | { | ||
200 | int timeout = INT_MAX; | ||
201 | int i, num_cpus = num_present_cpus(); | ||
202 | |||
203 | mpc85xx_smp_flush_dcache_kexec(image); | ||
146 | 204 | ||
147 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); | 205 | if (image->type == KEXEC_TYPE_DEFAULT) |
206 | smp_call_function(mpc85xx_smp_kexec_down, NULL, 0); | ||
148 | 207 | ||
149 | while ( (kexec_down_cpus != (num_online_cpus() - 1)) && | 208 | while ( (atomic_read(&kexec_down_cpus) != (num_cpus - 1)) && |
150 | ( timeout > 0 ) ) | 209 | ( timeout > 0 ) ) |
151 | { | 210 | { |
152 | timeout--; | 211 | timeout--; |
@@ -155,7 +214,7 @@ static void mpc85xx_smp_machine_kexec(struct kimage *image) | |||
155 | if ( !timeout ) | 214 | if ( !timeout ) |
156 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); | 215 | printk(KERN_ERR "Unable to bring down secondary cpu(s)"); |
157 | 216 | ||
158 | for (i = 0; i < num_present_cpus(); i++) | 217 | for (i = 0; i < num_cpus; i++) |
159 | { | 218 | { |
160 | if ( i == smp_processor_id() ) continue; | 219 | if ( i == smp_processor_id() ) continue; |
161 | mpic_reset_core(i); | 220 | mpic_reset_core(i); |
@@ -176,8 +235,10 @@ void __init mpc85xx_smp_init(void) | |||
176 | smp_85xx_ops.message_pass = smp_mpic_message_pass; | 235 | smp_85xx_ops.message_pass = smp_mpic_message_pass; |
177 | } | 236 | } |
178 | 237 | ||
179 | if (cpu_has_feature(CPU_FTR_DBELL)) | 238 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
180 | smp_85xx_ops.message_pass = doorbell_message_pass; | 239 | smp_85xx_ops.message_pass = smp_muxed_ipi_message_pass; |
240 | smp_85xx_ops.cause_ipi = doorbell_cause_ipi; | ||
241 | } | ||
181 | 242 | ||
182 | BUG_ON(!smp_85xx_ops.message_pass); | 243 | BUG_ON(!smp_85xx_ops.message_pass); |
183 | 244 | ||
diff --git a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c index d48527ffc425..12cb9bb2cc68 100644 --- a/arch/powerpc/platforms/85xx/socrates_fpga_pic.c +++ b/arch/powerpc/platforms/85xx/socrates_fpga_pic.c | |||
@@ -48,8 +48,6 @@ static struct socrates_fpga_irq_info fpga_irqs[SOCRATES_FPGA_NUM_IRQS] = { | |||
48 | [8] = {0, IRQ_TYPE_LEVEL_HIGH}, | 48 | [8] = {0, IRQ_TYPE_LEVEL_HIGH}, |
49 | }; | 49 | }; |
50 | 50 | ||
51 | #define socrates_fpga_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | ||
52 | |||
53 | static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); | 51 | static DEFINE_RAW_SPINLOCK(socrates_fpga_pic_lock); |
54 | 52 | ||
55 | static void __iomem *socrates_fpga_pic_iobase; | 53 | static void __iomem *socrates_fpga_pic_iobase; |
@@ -93,6 +91,7 @@ static inline unsigned int socrates_fpga_pic_get_irq(unsigned int irq) | |||
93 | 91 | ||
94 | void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) | 92 | void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) |
95 | { | 93 | { |
94 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
96 | unsigned int cascade_irq; | 95 | unsigned int cascade_irq; |
97 | 96 | ||
98 | /* | 97 | /* |
@@ -103,18 +102,15 @@ void socrates_fpga_pic_cascade(unsigned int irq, struct irq_desc *desc) | |||
103 | 102 | ||
104 | if (cascade_irq != NO_IRQ) | 103 | if (cascade_irq != NO_IRQ) |
105 | generic_handle_irq(cascade_irq); | 104 | generic_handle_irq(cascade_irq); |
106 | desc->chip->eoi(irq); | 105 | chip->irq_eoi(&desc->irq_data); |
107 | |||
108 | } | 106 | } |
109 | 107 | ||
110 | static void socrates_fpga_pic_ack(unsigned int virq) | 108 | static void socrates_fpga_pic_ack(struct irq_data *d) |
111 | { | 109 | { |
112 | unsigned long flags; | 110 | unsigned long flags; |
113 | unsigned int hwirq, irq_line; | 111 | unsigned int irq_line, hwirq = irqd_to_hwirq(d); |
114 | uint32_t mask; | 112 | uint32_t mask; |
115 | 113 | ||
116 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
117 | |||
118 | irq_line = fpga_irqs[hwirq].irq_line; | 114 | irq_line = fpga_irqs[hwirq].irq_line; |
119 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 115 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
120 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 116 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -124,15 +120,13 @@ static void socrates_fpga_pic_ack(unsigned int virq) | |||
124 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 120 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
125 | } | 121 | } |
126 | 122 | ||
127 | static void socrates_fpga_pic_mask(unsigned int virq) | 123 | static void socrates_fpga_pic_mask(struct irq_data *d) |
128 | { | 124 | { |
129 | unsigned long flags; | 125 | unsigned long flags; |
130 | unsigned int hwirq; | 126 | unsigned int hwirq = irqd_to_hwirq(d); |
131 | int irq_line; | 127 | int irq_line; |
132 | u32 mask; | 128 | u32 mask; |
133 | 129 | ||
134 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
135 | |||
136 | irq_line = fpga_irqs[hwirq].irq_line; | 130 | irq_line = fpga_irqs[hwirq].irq_line; |
137 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 131 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
138 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 132 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -142,15 +136,13 @@ static void socrates_fpga_pic_mask(unsigned int virq) | |||
142 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 136 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
143 | } | 137 | } |
144 | 138 | ||
145 | static void socrates_fpga_pic_mask_ack(unsigned int virq) | 139 | static void socrates_fpga_pic_mask_ack(struct irq_data *d) |
146 | { | 140 | { |
147 | unsigned long flags; | 141 | unsigned long flags; |
148 | unsigned int hwirq; | 142 | unsigned int hwirq = irqd_to_hwirq(d); |
149 | int irq_line; | 143 | int irq_line; |
150 | u32 mask; | 144 | u32 mask; |
151 | 145 | ||
152 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
153 | |||
154 | irq_line = fpga_irqs[hwirq].irq_line; | 146 | irq_line = fpga_irqs[hwirq].irq_line; |
155 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 147 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
156 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 148 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -161,15 +153,13 @@ static void socrates_fpga_pic_mask_ack(unsigned int virq) | |||
161 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 153 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
162 | } | 154 | } |
163 | 155 | ||
164 | static void socrates_fpga_pic_unmask(unsigned int virq) | 156 | static void socrates_fpga_pic_unmask(struct irq_data *d) |
165 | { | 157 | { |
166 | unsigned long flags; | 158 | unsigned long flags; |
167 | unsigned int hwirq; | 159 | unsigned int hwirq = irqd_to_hwirq(d); |
168 | int irq_line; | 160 | int irq_line; |
169 | u32 mask; | 161 | u32 mask; |
170 | 162 | ||
171 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
172 | |||
173 | irq_line = fpga_irqs[hwirq].irq_line; | 163 | irq_line = fpga_irqs[hwirq].irq_line; |
174 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 164 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
175 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 165 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -179,15 +169,13 @@ static void socrates_fpga_pic_unmask(unsigned int virq) | |||
179 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 169 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
180 | } | 170 | } |
181 | 171 | ||
182 | static void socrates_fpga_pic_eoi(unsigned int virq) | 172 | static void socrates_fpga_pic_eoi(struct irq_data *d) |
183 | { | 173 | { |
184 | unsigned long flags; | 174 | unsigned long flags; |
185 | unsigned int hwirq; | 175 | unsigned int hwirq = irqd_to_hwirq(d); |
186 | int irq_line; | 176 | int irq_line; |
187 | u32 mask; | 177 | u32 mask; |
188 | 178 | ||
189 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
190 | |||
191 | irq_line = fpga_irqs[hwirq].irq_line; | 179 | irq_line = fpga_irqs[hwirq].irq_line; |
192 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); | 180 | raw_spin_lock_irqsave(&socrates_fpga_pic_lock, flags); |
193 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) | 181 | mask = socrates_fpga_pic_read(FPGA_PIC_IRQMASK(irq_line)) |
@@ -197,16 +185,14 @@ static void socrates_fpga_pic_eoi(unsigned int virq) | |||
197 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); | 185 | raw_spin_unlock_irqrestore(&socrates_fpga_pic_lock, flags); |
198 | } | 186 | } |
199 | 187 | ||
200 | static int socrates_fpga_pic_set_type(unsigned int virq, | 188 | static int socrates_fpga_pic_set_type(struct irq_data *d, |
201 | unsigned int flow_type) | 189 | unsigned int flow_type) |
202 | { | 190 | { |
203 | unsigned long flags; | 191 | unsigned long flags; |
204 | unsigned int hwirq; | 192 | unsigned int hwirq = irqd_to_hwirq(d); |
205 | int polarity; | 193 | int polarity; |
206 | u32 mask; | 194 | u32 mask; |
207 | 195 | ||
208 | hwirq = socrates_fpga_irq_to_hw(virq); | ||
209 | |||
210 | if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) | 196 | if (fpga_irqs[hwirq].type != IRQ_TYPE_NONE) |
211 | return -EINVAL; | 197 | return -EINVAL; |
212 | 198 | ||
@@ -233,21 +219,21 @@ static int socrates_fpga_pic_set_type(unsigned int virq, | |||
233 | 219 | ||
234 | static struct irq_chip socrates_fpga_pic_chip = { | 220 | static struct irq_chip socrates_fpga_pic_chip = { |
235 | .name = "FPGA-PIC", | 221 | .name = "FPGA-PIC", |
236 | .ack = socrates_fpga_pic_ack, | 222 | .irq_ack = socrates_fpga_pic_ack, |
237 | .mask = socrates_fpga_pic_mask, | 223 | .irq_mask = socrates_fpga_pic_mask, |
238 | .mask_ack = socrates_fpga_pic_mask_ack, | 224 | .irq_mask_ack = socrates_fpga_pic_mask_ack, |
239 | .unmask = socrates_fpga_pic_unmask, | 225 | .irq_unmask = socrates_fpga_pic_unmask, |
240 | .eoi = socrates_fpga_pic_eoi, | 226 | .irq_eoi = socrates_fpga_pic_eoi, |
241 | .set_type = socrates_fpga_pic_set_type, | 227 | .irq_set_type = socrates_fpga_pic_set_type, |
242 | }; | 228 | }; |
243 | 229 | ||
244 | static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, | 230 | static int socrates_fpga_pic_host_map(struct irq_host *h, unsigned int virq, |
245 | irq_hw_number_t hwirq) | 231 | irq_hw_number_t hwirq) |
246 | { | 232 | { |
247 | /* All interrupts are LEVEL sensitive */ | 233 | /* All interrupts are LEVEL sensitive */ |
248 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 234 | irq_set_status_flags(virq, IRQ_LEVEL); |
249 | set_irq_chip_and_handler(virq, &socrates_fpga_pic_chip, | 235 | irq_set_chip_and_handler(virq, &socrates_fpga_pic_chip, |
250 | handle_fasteoi_irq); | 236 | handle_fasteoi_irq); |
251 | 237 | ||
252 | return 0; | 238 | return 0; |
253 | } | 239 | } |
@@ -308,8 +294,8 @@ void socrates_fpga_pic_init(struct device_node *pic) | |||
308 | pr_warning("FPGA PIC: can't get irq%d.\n", i); | 294 | pr_warning("FPGA PIC: can't get irq%d.\n", i); |
309 | continue; | 295 | continue; |
310 | } | 296 | } |
311 | set_irq_chained_handler(socrates_fpga_irqs[i], | 297 | irq_set_chained_handler(socrates_fpga_irqs[i], |
312 | socrates_fpga_pic_cascade); | 298 | socrates_fpga_pic_cascade); |
313 | } | 299 | } |
314 | 300 | ||
315 | socrates_fpga_pic_iobase = of_iomap(pic, 0); | 301 | socrates_fpga_pic_iobase = of_iomap(pic, 0); |
diff --git a/arch/powerpc/platforms/85xx/stx_gp3.c b/arch/powerpc/platforms/85xx/stx_gp3.c index bc33d1859ae7..5387e9f06bdb 100644 --- a/arch/powerpc/platforms/85xx/stx_gp3.c +++ b/arch/powerpc/platforms/85xx/stx_gp3.c | |||
@@ -46,12 +46,13 @@ | |||
46 | 46 | ||
47 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 47 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
48 | { | 48 | { |
49 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
49 | int cascade_irq; | 50 | int cascade_irq; |
50 | 51 | ||
51 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 52 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
52 | generic_handle_irq(cascade_irq); | 53 | generic_handle_irq(cascade_irq); |
53 | 54 | ||
54 | desc->chip->eoi(irq); | 55 | chip->irq_eoi(&desc->irq_data); |
55 | } | 56 | } |
56 | #endif /* CONFIG_CPM2 */ | 57 | #endif /* CONFIG_CPM2 */ |
57 | 58 | ||
@@ -101,7 +102,7 @@ static void __init stx_gp3_pic_init(void) | |||
101 | 102 | ||
102 | cpm2_pic_init(np); | 103 | cpm2_pic_init(np); |
103 | of_node_put(np); | 104 | of_node_put(np); |
104 | set_irq_chained_handler(irq, cpm2_cascade); | 105 | irq_set_chained_handler(irq, cpm2_cascade); |
105 | #endif | 106 | #endif |
106 | } | 107 | } |
107 | 108 | ||
diff --git a/arch/powerpc/platforms/85xx/tqm85xx.c b/arch/powerpc/platforms/85xx/tqm85xx.c index 8f29bbce5360..325de772725a 100644 --- a/arch/powerpc/platforms/85xx/tqm85xx.c +++ b/arch/powerpc/platforms/85xx/tqm85xx.c | |||
@@ -44,12 +44,13 @@ | |||
44 | 44 | ||
45 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) | 45 | static void cpm2_cascade(unsigned int irq, struct irq_desc *desc) |
46 | { | 46 | { |
47 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
47 | int cascade_irq; | 48 | int cascade_irq; |
48 | 49 | ||
49 | while ((cascade_irq = cpm2_get_irq()) >= 0) | 50 | while ((cascade_irq = cpm2_get_irq()) >= 0) |
50 | generic_handle_irq(cascade_irq); | 51 | generic_handle_irq(cascade_irq); |
51 | 52 | ||
52 | desc->chip->eoi(irq); | 53 | chip->irq_eoi(&desc->irq_data); |
53 | } | 54 | } |
54 | #endif /* CONFIG_CPM2 */ | 55 | #endif /* CONFIG_CPM2 */ |
55 | 56 | ||
@@ -99,7 +100,7 @@ static void __init tqm85xx_pic_init(void) | |||
99 | 100 | ||
100 | cpm2_pic_init(np); | 101 | cpm2_pic_init(np); |
101 | of_node_put(np); | 102 | of_node_put(np); |
102 | set_irq_chained_handler(irq, cpm2_cascade); | 103 | irq_set_chained_handler(irq, cpm2_cascade); |
103 | #endif | 104 | #endif |
104 | } | 105 | } |
105 | 106 | ||
@@ -186,21 +187,21 @@ static int __init declare_of_platform_devices(void) | |||
186 | } | 187 | } |
187 | machine_device_initcall(tqm85xx, declare_of_platform_devices); | 188 | machine_device_initcall(tqm85xx, declare_of_platform_devices); |
188 | 189 | ||
190 | static const char *board[] __initdata = { | ||
191 | "tqc,tqm8540", | ||
192 | "tqc,tqm8541", | ||
193 | "tqc,tqm8548", | ||
194 | "tqc,tqm8555", | ||
195 | "tqc,tqm8560", | ||
196 | NULL | ||
197 | }; | ||
198 | |||
189 | /* | 199 | /* |
190 | * Called very early, device-tree isn't unflattened | 200 | * Called very early, device-tree isn't unflattened |
191 | */ | 201 | */ |
192 | static int __init tqm85xx_probe(void) | 202 | static int __init tqm85xx_probe(void) |
193 | { | 203 | { |
194 | unsigned long root = of_get_flat_dt_root(); | 204 | return of_flat_dt_match(of_get_flat_dt_root(), board); |
195 | |||
196 | if ((of_flat_dt_is_compatible(root, "tqc,tqm8540")) || | ||
197 | (of_flat_dt_is_compatible(root, "tqc,tqm8541")) || | ||
198 | (of_flat_dt_is_compatible(root, "tqc,tqm8548")) || | ||
199 | (of_flat_dt_is_compatible(root, "tqc,tqm8555")) || | ||
200 | (of_flat_dt_is_compatible(root, "tqc,tqm8560"))) | ||
201 | return 1; | ||
202 | |||
203 | return 0; | ||
204 | } | 205 | } |
205 | 206 | ||
206 | define_machine(tqm85xx) { | 207 | define_machine(tqm85xx) { |
diff --git a/arch/powerpc/platforms/86xx/gef_pic.c b/arch/powerpc/platforms/86xx/gef_pic.c index 6df9e2561c06..94594e58594c 100644 --- a/arch/powerpc/platforms/86xx/gef_pic.c +++ b/arch/powerpc/platforms/86xx/gef_pic.c | |||
@@ -46,8 +46,6 @@ | |||
46 | #define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0) | 46 | #define GEF_PIC_CPU0_MCP_MASK GEF_PIC_MCP_MASK(0) |
47 | #define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1) | 47 | #define GEF_PIC_CPU1_MCP_MASK GEF_PIC_MCP_MASK(1) |
48 | 48 | ||
49 | #define gef_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | ||
50 | |||
51 | 49 | ||
52 | static DEFINE_RAW_SPINLOCK(gef_pic_lock); | 50 | static DEFINE_RAW_SPINLOCK(gef_pic_lock); |
53 | 51 | ||
@@ -95,6 +93,7 @@ static int gef_pic_cascade_irq; | |||
95 | 93 | ||
96 | void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) | 94 | void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) |
97 | { | 95 | { |
96 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
98 | unsigned int cascade_irq; | 97 | unsigned int cascade_irq; |
99 | 98 | ||
100 | /* | 99 | /* |
@@ -106,18 +105,15 @@ void gef_pic_cascade(unsigned int irq, struct irq_desc *desc) | |||
106 | if (cascade_irq != NO_IRQ) | 105 | if (cascade_irq != NO_IRQ) |
107 | generic_handle_irq(cascade_irq); | 106 | generic_handle_irq(cascade_irq); |
108 | 107 | ||
109 | desc->chip->eoi(irq); | 108 | chip->irq_eoi(&desc->irq_data); |
110 | |||
111 | } | 109 | } |
112 | 110 | ||
113 | static void gef_pic_mask(unsigned int virq) | 111 | static void gef_pic_mask(struct irq_data *d) |
114 | { | 112 | { |
115 | unsigned long flags; | 113 | unsigned long flags; |
116 | unsigned int hwirq; | 114 | unsigned int hwirq = irqd_to_hwirq(d); |
117 | u32 mask; | 115 | u32 mask; |
118 | 116 | ||
119 | hwirq = gef_irq_to_hw(virq); | ||
120 | |||
121 | raw_spin_lock_irqsave(&gef_pic_lock, flags); | 117 | raw_spin_lock_irqsave(&gef_pic_lock, flags); |
122 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); | 118 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); |
123 | mask &= ~(1 << hwirq); | 119 | mask &= ~(1 << hwirq); |
@@ -125,22 +121,20 @@ static void gef_pic_mask(unsigned int virq) | |||
125 | raw_spin_unlock_irqrestore(&gef_pic_lock, flags); | 121 | raw_spin_unlock_irqrestore(&gef_pic_lock, flags); |
126 | } | 122 | } |
127 | 123 | ||
128 | static void gef_pic_mask_ack(unsigned int virq) | 124 | static void gef_pic_mask_ack(struct irq_data *d) |
129 | { | 125 | { |
130 | /* Don't think we actually have to do anything to ack an interrupt, | 126 | /* Don't think we actually have to do anything to ack an interrupt, |
131 | * we just need to clear down the devices interrupt and it will go away | 127 | * we just need to clear down the devices interrupt and it will go away |
132 | */ | 128 | */ |
133 | gef_pic_mask(virq); | 129 | gef_pic_mask(d); |
134 | } | 130 | } |
135 | 131 | ||
136 | static void gef_pic_unmask(unsigned int virq) | 132 | static void gef_pic_unmask(struct irq_data *d) |
137 | { | 133 | { |
138 | unsigned long flags; | 134 | unsigned long flags; |
139 | unsigned int hwirq; | 135 | unsigned int hwirq = irqd_to_hwirq(d); |
140 | u32 mask; | 136 | u32 mask; |
141 | 137 | ||
142 | hwirq = gef_irq_to_hw(virq); | ||
143 | |||
144 | raw_spin_lock_irqsave(&gef_pic_lock, flags); | 138 | raw_spin_lock_irqsave(&gef_pic_lock, flags); |
145 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); | 139 | mask = in_be32(gef_pic_irq_reg_base + GEF_PIC_INTR_MASK(0)); |
146 | mask |= (1 << hwirq); | 140 | mask |= (1 << hwirq); |
@@ -150,9 +144,9 @@ static void gef_pic_unmask(unsigned int virq) | |||
150 | 144 | ||
151 | static struct irq_chip gef_pic_chip = { | 145 | static struct irq_chip gef_pic_chip = { |
152 | .name = "gefp", | 146 | .name = "gefp", |
153 | .mask = gef_pic_mask, | 147 | .irq_mask = gef_pic_mask, |
154 | .mask_ack = gef_pic_mask_ack, | 148 | .irq_mask_ack = gef_pic_mask_ack, |
155 | .unmask = gef_pic_unmask, | 149 | .irq_unmask = gef_pic_unmask, |
156 | }; | 150 | }; |
157 | 151 | ||
158 | 152 | ||
@@ -163,8 +157,8 @@ static int gef_pic_host_map(struct irq_host *h, unsigned int virq, | |||
163 | irq_hw_number_t hwirq) | 157 | irq_hw_number_t hwirq) |
164 | { | 158 | { |
165 | /* All interrupts are LEVEL sensitive */ | 159 | /* All interrupts are LEVEL sensitive */ |
166 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 160 | irq_set_status_flags(virq, IRQ_LEVEL); |
167 | set_irq_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); | 161 | irq_set_chip_and_handler(virq, &gef_pic_chip, handle_level_irq); |
168 | 162 | ||
169 | return 0; | 163 | return 0; |
170 | } | 164 | } |
@@ -225,7 +219,7 @@ void __init gef_pic_init(struct device_node *np) | |||
225 | return; | 219 | return; |
226 | 220 | ||
227 | /* Chain with parent controller */ | 221 | /* Chain with parent controller */ |
228 | set_irq_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); | 222 | irq_set_chained_handler(gef_pic_cascade_irq, gef_pic_cascade); |
229 | } | 223 | } |
230 | 224 | ||
231 | /* | 225 | /* |
diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c index 018cc67be426..a896511690c2 100644 --- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c +++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c | |||
@@ -66,7 +66,7 @@ static void __init mpc8610_suspend_init(void) | |||
66 | return; | 66 | return; |
67 | } | 67 | } |
68 | 68 | ||
69 | ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9/wakeup", NULL); | 69 | ret = request_irq(irq, mpc8610_sw9_irq, 0, "sw9:wakeup", NULL); |
70 | if (ret) { | 70 | if (ret) { |
71 | pr_err("%s: can't request pixis event IRQ: %d\n", | 71 | pr_err("%s: can't request pixis event IRQ: %d\n", |
72 | __func__, ret); | 72 | __func__, ret); |
@@ -105,45 +105,77 @@ machine_device_initcall(mpc86xx_hpcd, mpc8610_declare_of_platform_devices); | |||
105 | 105 | ||
106 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) | 106 | #if defined(CONFIG_FB_FSL_DIU) || defined(CONFIG_FB_FSL_DIU_MODULE) |
107 | 107 | ||
108 | static u32 get_busfreq(void) | 108 | /* |
109 | { | 109 | * DIU Area Descriptor |
110 | struct device_node *node; | 110 | * |
111 | 111 | * The MPC8610 reference manual shows the bits of the AD register in | |
112 | u32 fs_busfreq = 0; | 112 | * little-endian order, which causes the BLUE_C field to be split into two |
113 | node = of_find_node_by_type(NULL, "cpu"); | 113 | * parts. To simplify the definition of the MAKE_AD() macro, we define the |
114 | if (node) { | 114 | * fields in big-endian order and byte-swap the result. |
115 | unsigned int size; | 115 | * |
116 | const unsigned int *prop = | 116 | * So even though the registers don't look like they're in the |
117 | of_get_property(node, "bus-frequency", &size); | 117 | * same bit positions as they are on the P1022, the same value is written to |
118 | if (prop) | 118 | * the AD register on the MPC8610 and on the P1022. |
119 | fs_busfreq = *prop; | 119 | */ |
120 | of_node_put(node); | 120 | #define AD_BYTE_F 0x10000000 |
121 | }; | 121 | #define AD_ALPHA_C_MASK 0x0E000000 |
122 | return fs_busfreq; | 122 | #define AD_ALPHA_C_SHIFT 25 |
123 | } | 123 | #define AD_BLUE_C_MASK 0x01800000 |
124 | #define AD_BLUE_C_SHIFT 23 | ||
125 | #define AD_GREEN_C_MASK 0x00600000 | ||
126 | #define AD_GREEN_C_SHIFT 21 | ||
127 | #define AD_RED_C_MASK 0x00180000 | ||
128 | #define AD_RED_C_SHIFT 19 | ||
129 | #define AD_PALETTE 0x00040000 | ||
130 | #define AD_PIXEL_S_MASK 0x00030000 | ||
131 | #define AD_PIXEL_S_SHIFT 16 | ||
132 | #define AD_COMP_3_MASK 0x0000F000 | ||
133 | #define AD_COMP_3_SHIFT 12 | ||
134 | #define AD_COMP_2_MASK 0x00000F00 | ||
135 | #define AD_COMP_2_SHIFT 8 | ||
136 | #define AD_COMP_1_MASK 0x000000F0 | ||
137 | #define AD_COMP_1_SHIFT 4 | ||
138 | #define AD_COMP_0_MASK 0x0000000F | ||
139 | #define AD_COMP_0_SHIFT 0 | ||
140 | |||
141 | #define MAKE_AD(alpha, red, blue, green, size, c0, c1, c2, c3) \ | ||
142 | cpu_to_le32(AD_BYTE_F | (alpha << AD_ALPHA_C_SHIFT) | \ | ||
143 | (blue << AD_BLUE_C_SHIFT) | (green << AD_GREEN_C_SHIFT) | \ | ||
144 | (red << AD_RED_C_SHIFT) | (c3 << AD_COMP_3_SHIFT) | \ | ||
145 | (c2 << AD_COMP_2_SHIFT) | (c1 << AD_COMP_1_SHIFT) | \ | ||
146 | (c0 << AD_COMP_0_SHIFT) | (size << AD_PIXEL_S_SHIFT)) | ||
124 | 147 | ||
125 | unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, | 148 | unsigned int mpc8610hpcd_get_pixel_format(unsigned int bits_per_pixel, |
126 | int monitor_port) | 149 | int monitor_port) |
127 | { | 150 | { |
128 | static const unsigned long pixelformat[][3] = { | 151 | static const unsigned long pixelformat[][3] = { |
129 | {0x88882317, 0x88083218, 0x65052119}, | 152 | { |
130 | {0x88883316, 0x88082219, 0x65053118}, | 153 | MAKE_AD(3, 0, 2, 1, 3, 8, 8, 8, 8), |
154 | MAKE_AD(4, 2, 0, 1, 2, 8, 8, 8, 0), | ||
155 | MAKE_AD(4, 0, 2, 1, 1, 5, 6, 5, 0) | ||
156 | }, | ||
157 | { | ||
158 | MAKE_AD(3, 2, 0, 1, 3, 8, 8, 8, 8), | ||
159 | MAKE_AD(4, 0, 2, 1, 2, 8, 8, 8, 0), | ||
160 | MAKE_AD(4, 2, 0, 1, 1, 5, 6, 5, 0) | ||
161 | }, | ||
131 | }; | 162 | }; |
132 | unsigned int pix_fmt, arch_monitor; | 163 | unsigned int arch_monitor; |
133 | 164 | ||
165 | /* The DVI port is mis-wired on revision 1 of this board. */ | ||
134 | arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; | 166 | arch_monitor = ((*pixis_arch == 0x01) && (monitor_port == 0))? 0 : 1; |
135 | /* DVI port for board version 0x01 */ | 167 | |
136 | 168 | switch (bits_per_pixel) { | |
137 | if (bits_per_pixel == 32) | 169 | case 32: |
138 | pix_fmt = pixelformat[arch_monitor][0]; | 170 | return pixelformat[arch_monitor][0]; |
139 | else if (bits_per_pixel == 24) | 171 | case 24: |
140 | pix_fmt = pixelformat[arch_monitor][1]; | 172 | return pixelformat[arch_monitor][1]; |
141 | else if (bits_per_pixel == 16) | 173 | case 16: |
142 | pix_fmt = pixelformat[arch_monitor][2]; | 174 | return pixelformat[arch_monitor][2]; |
143 | else | 175 | default: |
144 | pix_fmt = pixelformat[1][0]; | 176 | pr_err("fsl-diu: unsupported pixel depth %u\n", bits_per_pixel); |
145 | 177 | return 0; | |
146 | return pix_fmt; | 178 | } |
147 | } | 179 | } |
148 | 180 | ||
149 | void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) | 181 | void mpc8610hpcd_set_gamma_table(int monitor_port, char *gamma_table_base) |
@@ -190,8 +222,7 @@ void mpc8610hpcd_set_pixel_clock(unsigned int pixclock) | |||
190 | } | 222 | } |
191 | 223 | ||
192 | /* Pixel Clock configuration */ | 224 | /* Pixel Clock configuration */ |
193 | pr_debug("DIU: Bus Frequency = %d\n", get_busfreq()); | 225 | speed_ccb = fsl_get_sys_freq(); |
194 | speed_ccb = get_busfreq(); | ||
195 | 226 | ||
196 | /* Calculate the pixel clock with the smallest error */ | 227 | /* Calculate the pixel clock with the smallest error */ |
197 | /* calculate the following in steps to avoid overflow */ | 228 | /* calculate the following in steps to avoid overflow */ |
diff --git a/arch/powerpc/platforms/86xx/mpc86xx_smp.c b/arch/powerpc/platforms/86xx/mpc86xx_smp.c index eacea0e3fcc8..af09baee22cb 100644 --- a/arch/powerpc/platforms/86xx/mpc86xx_smp.c +++ b/arch/powerpc/platforms/86xx/mpc86xx_smp.c | |||
@@ -56,7 +56,7 @@ smp_86xx_release_core(int nr) | |||
56 | } | 56 | } |
57 | 57 | ||
58 | 58 | ||
59 | static void __init | 59 | static int __init |
60 | smp_86xx_kick_cpu(int nr) | 60 | smp_86xx_kick_cpu(int nr) |
61 | { | 61 | { |
62 | unsigned int save_vector; | 62 | unsigned int save_vector; |
@@ -65,7 +65,7 @@ smp_86xx_kick_cpu(int nr) | |||
65 | unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); | 65 | unsigned int *vector = (unsigned int *)(KERNELBASE + 0x100); |
66 | 66 | ||
67 | if (nr < 0 || nr >= NR_CPUS) | 67 | if (nr < 0 || nr >= NR_CPUS) |
68 | return; | 68 | return -ENOENT; |
69 | 69 | ||
70 | pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr); | 70 | pr_debug("smp_86xx_kick_cpu: kick CPU #%d\n", nr); |
71 | 71 | ||
@@ -92,6 +92,8 @@ smp_86xx_kick_cpu(int nr) | |||
92 | local_irq_restore(flags); | 92 | local_irq_restore(flags); |
93 | 93 | ||
94 | pr_debug("wait CPU #%d for %d msecs.\n", nr, n); | 94 | pr_debug("wait CPU #%d for %d msecs.\n", nr, n); |
95 | |||
96 | return 0; | ||
95 | } | 97 | } |
96 | 98 | ||
97 | 99 | ||
diff --git a/arch/powerpc/platforms/86xx/pic.c b/arch/powerpc/platforms/86xx/pic.c index 668275d9e668..8ef8960abda6 100644 --- a/arch/powerpc/platforms/86xx/pic.c +++ b/arch/powerpc/platforms/86xx/pic.c | |||
@@ -19,10 +19,13 @@ | |||
19 | #ifdef CONFIG_PPC_I8259 | 19 | #ifdef CONFIG_PPC_I8259 |
20 | static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) | 20 | static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) |
21 | { | 21 | { |
22 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
22 | unsigned int cascade_irq = i8259_irq(); | 23 | unsigned int cascade_irq = i8259_irq(); |
24 | |||
23 | if (cascade_irq != NO_IRQ) | 25 | if (cascade_irq != NO_IRQ) |
24 | generic_handle_irq(cascade_irq); | 26 | generic_handle_irq(cascade_irq); |
25 | desc->chip->eoi(irq); | 27 | |
28 | chip->irq_eoi(&desc->irq_data); | ||
26 | } | 29 | } |
27 | #endif /* CONFIG_PPC_I8259 */ | 30 | #endif /* CONFIG_PPC_I8259 */ |
28 | 31 | ||
@@ -74,6 +77,6 @@ void __init mpc86xx_init_irq(void) | |||
74 | i8259_init(cascade_node, 0); | 77 | i8259_init(cascade_node, 0); |
75 | of_node_put(cascade_node); | 78 | of_node_put(cascade_node); |
76 | 79 | ||
77 | set_irq_chained_handler(cascade_irq, mpc86xx_8259_cascade); | 80 | irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade); |
78 | #endif | 81 | #endif |
79 | } | 82 | } |
diff --git a/arch/powerpc/platforms/8xx/Kconfig b/arch/powerpc/platforms/8xx/Kconfig index dd35ce081cff..ee56a9ea6a79 100644 --- a/arch/powerpc/platforms/8xx/Kconfig +++ b/arch/powerpc/platforms/8xx/Kconfig | |||
@@ -49,12 +49,6 @@ config PPC_ADDER875 | |||
49 | This enables support for the Analogue & Micro Adder 875 | 49 | This enables support for the Analogue & Micro Adder 875 |
50 | board. | 50 | board. |
51 | 51 | ||
52 | config PPC_MGSUVD | ||
53 | bool "MGSUVD" | ||
54 | select CPM1 | ||
55 | help | ||
56 | This enables support for the Keymile MGSUVD board. | ||
57 | |||
58 | config TQM8XX | 52 | config TQM8XX |
59 | bool "TQM8XX" | 53 | bool "TQM8XX" |
60 | select CPM1 | 54 | select CPM1 |
diff --git a/arch/powerpc/platforms/8xx/Makefile b/arch/powerpc/platforms/8xx/Makefile index a491fe6b94fc..76a81c3350a8 100644 --- a/arch/powerpc/platforms/8xx/Makefile +++ b/arch/powerpc/platforms/8xx/Makefile | |||
@@ -6,5 +6,4 @@ obj-$(CONFIG_MPC885ADS) += mpc885ads_setup.o | |||
6 | obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o | 6 | obj-$(CONFIG_MPC86XADS) += mpc86xads_setup.o |
7 | obj-$(CONFIG_PPC_EP88XC) += ep88xc.o | 7 | obj-$(CONFIG_PPC_EP88XC) += ep88xc.o |
8 | obj-$(CONFIG_PPC_ADDER875) += adder875.o | 8 | obj-$(CONFIG_PPC_ADDER875) += adder875.o |
9 | obj-$(CONFIG_PPC_MGSUVD) += mgsuvd.o | ||
10 | obj-$(CONFIG_TQM8XX) += tqm8xx_setup.o | 9 | obj-$(CONFIG_TQM8XX) += tqm8xx_setup.o |
diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 60168c1f98fe..1e121088826f 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c | |||
@@ -150,7 +150,7 @@ void __init mpc8xx_calibrate_decr(void) | |||
150 | */ | 150 | */ |
151 | cpu = of_find_node_by_type(NULL, "cpu"); | 151 | cpu = of_find_node_by_type(NULL, "cpu"); |
152 | virq= irq_of_parse_and_map(cpu, 0); | 152 | virq= irq_of_parse_and_map(cpu, 0); |
153 | irq = irq_map[virq].hwirq; | 153 | irq = virq_to_hw(virq); |
154 | 154 | ||
155 | sys_tmr2 = immr_map(im_sit); | 155 | sys_tmr2 = immr_map(im_sit); |
156 | out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | | 156 | out_be16(&sys_tmr2->sit_tbscr, ((1 << (7 - (irq/2))) << 8) | |
@@ -218,15 +218,20 @@ void mpc8xx_restart(char *cmd) | |||
218 | 218 | ||
219 | static void cpm_cascade(unsigned int irq, struct irq_desc *desc) | 219 | static void cpm_cascade(unsigned int irq, struct irq_desc *desc) |
220 | { | 220 | { |
221 | struct irq_chip *chip; | ||
221 | int cascade_irq; | 222 | int cascade_irq; |
222 | 223 | ||
223 | if ((cascade_irq = cpm_get_irq()) >= 0) { | 224 | if ((cascade_irq = cpm_get_irq()) >= 0) { |
224 | struct irq_desc *cdesc = irq_to_desc(cascade_irq); | 225 | struct irq_desc *cdesc = irq_to_desc(cascade_irq); |
225 | 226 | ||
226 | generic_handle_irq(cascade_irq); | 227 | generic_handle_irq(cascade_irq); |
227 | cdesc->chip->eoi(cascade_irq); | 228 | |
229 | chip = irq_desc_get_chip(cdesc); | ||
230 | chip->irq_eoi(&cdesc->irq_data); | ||
228 | } | 231 | } |
229 | desc->chip->eoi(irq); | 232 | |
233 | chip = irq_desc_get_chip(desc); | ||
234 | chip->irq_eoi(&desc->irq_data); | ||
230 | } | 235 | } |
231 | 236 | ||
232 | /* Initialize the internal interrupt controllers. The number of | 237 | /* Initialize the internal interrupt controllers. The number of |
@@ -246,5 +251,5 @@ void __init mpc8xx_pics_init(void) | |||
246 | 251 | ||
247 | irq = cpm_pic_init(); | 252 | irq = cpm_pic_init(); |
248 | if (irq != NO_IRQ) | 253 | if (irq != NO_IRQ) |
249 | set_irq_chained_handler(irq, cpm_cascade); | 254 | irq_set_chained_handler(irq, cpm_cascade); |
250 | } | 255 | } |
diff --git a/arch/powerpc/platforms/8xx/mgsuvd.c b/arch/powerpc/platforms/8xx/mgsuvd.c deleted file mode 100644 index ca3cb071772c..000000000000 --- a/arch/powerpc/platforms/8xx/mgsuvd.c +++ /dev/null | |||
@@ -1,92 +0,0 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Platform setup for the Keymile mgsuvd board | ||
4 | * | ||
5 | * Heiko Schocher <hs@denx.de> | ||
6 | * | ||
7 | * Copyright 2008 DENX Software Engineering GmbH | ||
8 | * | ||
9 | * This file is licensed under the terms of the GNU General Public License | ||
10 | * version 2. This program is licensed "as is" without any warranty of any | ||
11 | * kind, whether express or implied. | ||
12 | */ | ||
13 | |||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/of_platform.h> | ||
16 | |||
17 | #include <asm/io.h> | ||
18 | #include <asm/machdep.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/cpm1.h> | ||
21 | #include <asm/prom.h> | ||
22 | #include <asm/fs_pd.h> | ||
23 | |||
24 | #include "mpc8xx.h" | ||
25 | |||
26 | struct cpm_pin { | ||
27 | int port, pin, flags; | ||
28 | }; | ||
29 | |||
30 | static __initdata struct cpm_pin mgsuvd_pins[] = { | ||
31 | /* SMC1 */ | ||
32 | {CPM_PORTB, 24, CPM_PIN_INPUT}, /* RX */ | ||
33 | {CPM_PORTB, 25, CPM_PIN_INPUT | CPM_PIN_SECONDARY}, /* TX */ | ||
34 | |||
35 | /* SCC3 */ | ||
36 | {CPM_PORTA, 10, CPM_PIN_INPUT}, | ||
37 | {CPM_PORTA, 11, CPM_PIN_INPUT}, | ||
38 | {CPM_PORTA, 3, CPM_PIN_INPUT}, | ||
39 | {CPM_PORTA, 2, CPM_PIN_INPUT}, | ||
40 | {CPM_PORTC, 13, CPM_PIN_INPUT}, | ||
41 | }; | ||
42 | |||
43 | static void __init init_ioports(void) | ||
44 | { | ||
45 | int i; | ||
46 | |||
47 | for (i = 0; i < ARRAY_SIZE(mgsuvd_pins); i++) { | ||
48 | struct cpm_pin *pin = &mgsuvd_pins[i]; | ||
49 | cpm1_set_pin(pin->port, pin->pin, pin->flags); | ||
50 | } | ||
51 | |||
52 | setbits16(&mpc8xx_immr->im_ioport.iop_pcso, 0x300); | ||
53 | cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK5, CPM_CLK_RX); | ||
54 | cpm1_clk_setup(CPM_CLK_SCC3, CPM_CLK6, CPM_CLK_TX); | ||
55 | cpm1_clk_setup(CPM_CLK_SMC1, CPM_BRG1, CPM_CLK_RTX); | ||
56 | } | ||
57 | |||
58 | static void __init mgsuvd_setup_arch(void) | ||
59 | { | ||
60 | cpm_reset(); | ||
61 | init_ioports(); | ||
62 | } | ||
63 | |||
64 | static __initdata struct of_device_id of_bus_ids[] = { | ||
65 | { .compatible = "simple-bus" }, | ||
66 | {}, | ||
67 | }; | ||
68 | |||
69 | static int __init declare_of_platform_devices(void) | ||
70 | { | ||
71 | of_platform_bus_probe(NULL, of_bus_ids, NULL); | ||
72 | return 0; | ||
73 | } | ||
74 | machine_device_initcall(mgsuvd, declare_of_platform_devices); | ||
75 | |||
76 | static int __init mgsuvd_probe(void) | ||
77 | { | ||
78 | unsigned long root = of_get_flat_dt_root(); | ||
79 | return of_flat_dt_is_compatible(root, "keymile,mgsuvd"); | ||
80 | } | ||
81 | |||
82 | define_machine(mgsuvd) { | ||
83 | .name = "MGSUVD", | ||
84 | .probe = mgsuvd_probe, | ||
85 | .setup_arch = mgsuvd_setup_arch, | ||
86 | .init_IRQ = mpc8xx_pics_init, | ||
87 | .get_irq = mpc8xx_get_irq, | ||
88 | .restart = mpc8xx_restart, | ||
89 | .calibrate_decr = mpc8xx_calibrate_decr, | ||
90 | .set_rtc_time = mpc8xx_set_rtc_time, | ||
91 | .get_rtc_time = mpc8xx_get_rtc_time, | ||
92 | }; | ||
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig index 81c9208025fa..f970ca2b180c 100644 --- a/arch/powerpc/platforms/Kconfig +++ b/arch/powerpc/platforms/Kconfig | |||
@@ -20,6 +20,17 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig" | |||
20 | source "arch/powerpc/platforms/44x/Kconfig" | 20 | source "arch/powerpc/platforms/44x/Kconfig" |
21 | source "arch/powerpc/platforms/40x/Kconfig" | 21 | source "arch/powerpc/platforms/40x/Kconfig" |
22 | source "arch/powerpc/platforms/amigaone/Kconfig" | 22 | source "arch/powerpc/platforms/amigaone/Kconfig" |
23 | source "arch/powerpc/platforms/wsp/Kconfig" | ||
24 | |||
25 | config KVM_GUEST | ||
26 | bool "KVM Guest support" | ||
27 | default y | ||
28 | ---help--- | ||
29 | This option enables various optimizations for running under the KVM | ||
30 | hypervisor. Overhead for the kernel when not running inside KVM should | ||
31 | be minimal. | ||
32 | |||
33 | In case of doubt, say Y | ||
23 | 34 | ||
24 | config PPC_NATIVE | 35 | config PPC_NATIVE |
25 | bool | 36 | bool |
@@ -36,7 +47,7 @@ config PPC_OF_BOOT_TRAMPOLINE | |||
36 | help | 47 | help |
37 | Support from booting from Open Firmware or yaboot using an | 48 | Support from booting from Open Firmware or yaboot using an |
38 | Open Firmware client interface. This enables the kernel to | 49 | Open Firmware client interface. This enables the kernel to |
39 | communicate with open firmware to retrieve system informations | 50 | communicate with open firmware to retrieve system information |
40 | such as the device tree. | 51 | such as the device tree. |
41 | 52 | ||
42 | In case of doubt, say Y | 53 | In case of doubt, say Y |
@@ -46,16 +57,19 @@ config UDBG_RTAS_CONSOLE | |||
46 | depends on PPC_RTAS | 57 | depends on PPC_RTAS |
47 | default n | 58 | default n |
48 | 59 | ||
60 | config PPC_SMP_MUXED_IPI | ||
61 | bool | ||
62 | help | ||
63 | Select this opton if your platform supports SMP and your | ||
64 | interrupt controller provides less than 4 interrupts to each | ||
65 | cpu. This will enable the generic code to multiplex the 4 | ||
66 | messages on to one ipi. | ||
67 | |||
49 | config PPC_UDBG_BEAT | 68 | config PPC_UDBG_BEAT |
50 | bool "BEAT based debug console" | 69 | bool "BEAT based debug console" |
51 | depends on PPC_CELLEB | 70 | depends on PPC_CELLEB |
52 | default n | 71 | default n |
53 | 72 | ||
54 | config XICS | ||
55 | depends on PPC_PSERIES | ||
56 | bool | ||
57 | default y | ||
58 | |||
59 | config IPIC | 73 | config IPIC |
60 | bool | 74 | bool |
61 | default n | 75 | default n |
@@ -137,14 +151,27 @@ config PPC_970_NAP | |||
137 | bool | 151 | bool |
138 | default n | 152 | default n |
139 | 153 | ||
154 | config PPC_P7_NAP | ||
155 | bool | ||
156 | default n | ||
157 | |||
140 | config PPC_INDIRECT_IO | 158 | config PPC_INDIRECT_IO |
141 | bool | 159 | bool |
142 | select GENERIC_IOMAP | 160 | select GENERIC_IOMAP |
143 | default n | 161 | |
162 | config PPC_INDIRECT_PIO | ||
163 | bool | ||
164 | select PPC_INDIRECT_IO | ||
165 | |||
166 | config PPC_INDIRECT_MMIO | ||
167 | bool | ||
168 | select PPC_INDIRECT_IO | ||
169 | |||
170 | config PPC_IO_WORKAROUNDS | ||
171 | bool | ||
144 | 172 | ||
145 | config GENERIC_IOMAP | 173 | config GENERIC_IOMAP |
146 | bool | 174 | bool |
147 | default n | ||
148 | 175 | ||
149 | source "drivers/cpufreq/Kconfig" | 176 | source "drivers/cpufreq/Kconfig" |
150 | 177 | ||
@@ -303,13 +330,14 @@ config OF_RTC | |||
303 | source "arch/powerpc/sysdev/bestcomm/Kconfig" | 330 | source "arch/powerpc/sysdev/bestcomm/Kconfig" |
304 | 331 | ||
305 | config MPC8xxx_GPIO | 332 | config MPC8xxx_GPIO |
306 | bool "MPC8xxx GPIO support" | 333 | bool "MPC512x/MPC8xxx GPIO support" |
307 | depends on PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || FSL_SOC_BOOKE || PPC_86xx | 334 | depends on PPC_MPC512x || PPC_MPC831x || PPC_MPC834x || PPC_MPC837x || \ |
335 | FSL_SOC_BOOKE || PPC_86xx | ||
308 | select GENERIC_GPIO | 336 | select GENERIC_GPIO |
309 | select ARCH_REQUIRE_GPIOLIB | 337 | select ARCH_REQUIRE_GPIOLIB |
310 | help | 338 | help |
311 | Say Y here if you're going to use hardware that connects to the | 339 | Say Y here if you're going to use hardware that connects to the |
312 | MPC831x/834x/837x/8572/8610 GPIOs. | 340 | MPC512x/831x/834x/837x/8572/8610 GPIOs. |
313 | 341 | ||
314 | config SIMPLE_GPIO | 342 | config SIMPLE_GPIO |
315 | bool "Support for simple, memory-mapped GPIO controllers" | 343 | bool "Support for simple, memory-mapped GPIO controllers" |
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype index d361f8119b1e..2165b65876f9 100644 --- a/arch/powerpc/platforms/Kconfig.cputype +++ b/arch/powerpc/platforms/Kconfig.cputype | |||
@@ -73,6 +73,7 @@ config PPC_BOOK3S_64 | |||
73 | config PPC_BOOK3E_64 | 73 | config PPC_BOOK3E_64 |
74 | bool "Embedded processors" | 74 | bool "Embedded processors" |
75 | select PPC_FPU # Make it a choice ? | 75 | select PPC_FPU # Make it a choice ? |
76 | select PPC_SMP_MUXED_IPI | ||
76 | 77 | ||
77 | endchoice | 78 | endchoice |
78 | 79 | ||
@@ -107,6 +108,10 @@ config POWER4 | |||
107 | depends on PPC64 && PPC_BOOK3S | 108 | depends on PPC64 && PPC_BOOK3S |
108 | def_bool y | 109 | def_bool y |
109 | 110 | ||
111 | config PPC_A2 | ||
112 | bool | ||
113 | depends on PPC_BOOK3E_64 | ||
114 | |||
110 | config TUNE_CELL | 115 | config TUNE_CELL |
111 | bool "Optimize for Cell Broadband Engine" | 116 | bool "Optimize for Cell Broadband Engine" |
112 | depends on PPC64 && PPC_BOOK3S | 117 | depends on PPC64 && PPC_BOOK3S |
@@ -125,6 +130,7 @@ config 8xx | |||
125 | 130 | ||
126 | config E500 | 131 | config E500 |
127 | select FSL_EMB_PERFMON | 132 | select FSL_EMB_PERFMON |
133 | select PPC_FSL_BOOK3E | ||
128 | bool | 134 | bool |
129 | 135 | ||
130 | config PPC_E500MC | 136 | config PPC_E500MC |
@@ -166,9 +172,15 @@ config BOOKE | |||
166 | 172 | ||
167 | config FSL_BOOKE | 173 | config FSL_BOOKE |
168 | bool | 174 | bool |
169 | depends on E200 || E500 | 175 | depends on (E200 || E500) && PPC32 |
170 | default y | 176 | default y |
171 | 177 | ||
178 | # this is for common code between PPC32 & PPC64 FSL BOOKE | ||
179 | config PPC_FSL_BOOK3E | ||
180 | bool | ||
181 | select FSL_EMB_PERFMON | ||
182 | select PPC_SMP_MUXED_IPI | ||
183 | default y if FSL_BOOKE | ||
172 | 184 | ||
173 | config PTE_64BIT | 185 | config PTE_64BIT |
174 | bool | 186 | bool |
@@ -220,6 +232,24 @@ config VSX | |||
220 | 232 | ||
221 | If in doubt, say Y here. | 233 | If in doubt, say Y here. |
222 | 234 | ||
235 | config PPC_ICSWX | ||
236 | bool "Support for PowerPC icswx coprocessor instruction" | ||
237 | depends on POWER4 | ||
238 | default n | ||
239 | ---help--- | ||
240 | |||
241 | This option enables kernel support for the PowerPC Initiate | ||
242 | Coprocessor Store Word (icswx) coprocessor instruction on POWER7 | ||
243 | or newer processors. | ||
244 | |||
245 | This option is only useful if you have a processor that supports | ||
246 | the icswx coprocessor instruction. It does not have any effect | ||
247 | on processors without the icswx coprocessor instruction. | ||
248 | |||
249 | This option slightly increases kernel memory usage. | ||
250 | |||
251 | If in doubt, say N here. | ||
252 | |||
223 | config SPE | 253 | config SPE |
224 | bool "SPE Support" | 254 | bool "SPE Support" |
225 | depends on E200 || (E500 && !PPC_E500MC) | 255 | depends on E200 || (E500 && !PPC_E500MC) |
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile index fdb9f0b0d7a8..73e2116cfeed 100644 --- a/arch/powerpc/platforms/Makefile +++ b/arch/powerpc/platforms/Makefile | |||
@@ -22,3 +22,4 @@ obj-$(CONFIG_PPC_CELL) += cell/ | |||
22 | obj-$(CONFIG_PPC_PS3) += ps3/ | 22 | obj-$(CONFIG_PPC_PS3) += ps3/ |
23 | obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ | 23 | obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ |
24 | obj-$(CONFIG_AMIGAONE) += amigaone/ | 24 | obj-$(CONFIG_AMIGAONE) += amigaone/ |
25 | obj-$(CONFIG_PPC_WSP) += wsp/ | ||
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig index 48cd7d2e1b75..67d5009b4e86 100644 --- a/arch/powerpc/platforms/cell/Kconfig +++ b/arch/powerpc/platforms/cell/Kconfig | |||
@@ -6,14 +6,17 @@ config PPC_CELL_COMMON | |||
6 | bool | 6 | bool |
7 | select PPC_CELL | 7 | select PPC_CELL |
8 | select PPC_DCR_MMIO | 8 | select PPC_DCR_MMIO |
9 | select PPC_INDIRECT_IO | 9 | select PPC_INDIRECT_PIO |
10 | select PPC_INDIRECT_MMIO | ||
10 | select PPC_NATIVE | 11 | select PPC_NATIVE |
11 | select PPC_RTAS | 12 | select PPC_RTAS |
13 | select IRQ_EDGE_EOI_HANDLER | ||
12 | 14 | ||
13 | config PPC_CELL_NATIVE | 15 | config PPC_CELL_NATIVE |
14 | bool | 16 | bool |
15 | select PPC_CELL_COMMON | 17 | select PPC_CELL_COMMON |
16 | select MPIC | 18 | select MPIC |
19 | select PPC_IO_WORKAROUNDS | ||
17 | select IBM_NEW_EMAC_EMAC4 | 20 | select IBM_NEW_EMAC_EMAC4 |
18 | select IBM_NEW_EMAC_RGMII | 21 | select IBM_NEW_EMAC_RGMII |
19 | select IBM_NEW_EMAC_ZMII #test only | 22 | select IBM_NEW_EMAC_ZMII #test only |
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 83fafe922641..a4a89350bcfc 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile | |||
@@ -1,7 +1,7 @@ | |||
1 | obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o | 1 | obj-$(CONFIG_PPC_CELL_COMMON) += cbe_regs.o interrupt.o pervasive.o |
2 | 2 | ||
3 | obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ | 3 | obj-$(CONFIG_PPC_CELL_NATIVE) += iommu.o setup.o spider-pic.o \ |
4 | pmu.o io-workarounds.o spider-pci.o | 4 | pmu.o spider-pci.o |
5 | obj-$(CONFIG_CBE_RAS) += ras.o | 5 | obj-$(CONFIG_CBE_RAS) += ras.o |
6 | 6 | ||
7 | obj-$(CONFIG_CBE_THERM) += cbe_thermal.o | 7 | obj-$(CONFIG_CBE_THERM) += cbe_thermal.o |
@@ -39,11 +39,10 @@ obj-y += celleb_setup.o \ | |||
39 | celleb_pci.o celleb_scc_epci.o \ | 39 | celleb_pci.o celleb_scc_epci.o \ |
40 | celleb_scc_pciex.o \ | 40 | celleb_scc_pciex.o \ |
41 | celleb_scc_uhc.o \ | 41 | celleb_scc_uhc.o \ |
42 | io-workarounds.o spider-pci.o \ | 42 | spider-pci.o beat.o beat_htab.o \ |
43 | beat.o beat_htab.o beat_hvCall.o \ | 43 | beat_hvCall.o beat_interrupt.o \ |
44 | beat_interrupt.o beat_iommu.o | 44 | beat_iommu.o |
45 | 45 | ||
46 | obj-$(CONFIG_SMP) += beat_smp.o | ||
47 | obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o | 46 | obj-$(CONFIG_PPC_UDBG_BEAT) += beat_udbg.o |
48 | obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o | 47 | obj-$(CONFIG_SERIAL_TXX9) += celleb_scc_sio.o |
49 | obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o | 48 | obj-$(CONFIG_SPU_BASE) += beat_spu_priv1.o |
diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c index 97085530aa63..ac06903e136a 100644 --- a/arch/powerpc/platforms/cell/axon_msi.c +++ b/arch/powerpc/platforms/cell/axon_msi.c | |||
@@ -93,7 +93,8 @@ static void msic_dcr_write(struct axon_msic *msic, unsigned int dcr_n, u32 val) | |||
93 | 93 | ||
94 | static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | 94 | static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) |
95 | { | 95 | { |
96 | struct axon_msic *msic = get_irq_data(irq); | 96 | struct irq_chip *chip = irq_desc_get_chip(desc); |
97 | struct axon_msic *msic = irq_get_handler_data(irq); | ||
97 | u32 write_offset, msi; | 98 | u32 write_offset, msi; |
98 | int idx; | 99 | int idx; |
99 | int retry = 0; | 100 | int retry = 0; |
@@ -112,7 +113,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
112 | pr_devel("axon_msi: woff %x roff %x msi %x\n", | 113 | pr_devel("axon_msi: woff %x roff %x msi %x\n", |
113 | write_offset, msic->read_offset, msi); | 114 | write_offset, msic->read_offset, msi); |
114 | 115 | ||
115 | if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) { | 116 | if (msi < NR_IRQS && irq_get_chip_data(msi) == msic) { |
116 | generic_handle_irq(msi); | 117 | generic_handle_irq(msi); |
117 | msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); | 118 | msic->fifo_virt[idx] = cpu_to_le32(0xffffffff); |
118 | } else { | 119 | } else { |
@@ -145,7 +146,7 @@ static void axon_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
145 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; | 146 | msic->read_offset &= MSIC_FIFO_SIZE_MASK; |
146 | } | 147 | } |
147 | 148 | ||
148 | desc->chip->eoi(irq); | 149 | chip->irq_eoi(&desc->irq_data); |
149 | } | 150 | } |
150 | 151 | ||
151 | static struct axon_msic *find_msi_translator(struct pci_dev *dev) | 152 | static struct axon_msic *find_msi_translator(struct pci_dev *dev) |
@@ -286,7 +287,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | |||
286 | } | 287 | } |
287 | dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); | 288 | dev_dbg(&dev->dev, "axon_msi: allocated virq 0x%x\n", virq); |
288 | 289 | ||
289 | set_irq_msi(virq, entry); | 290 | irq_set_msi_desc(virq, entry); |
290 | msg.data = virq; | 291 | msg.data = virq; |
291 | write_msi_msg(virq, &msg); | 292 | write_msi_msg(virq, &msg); |
292 | } | 293 | } |
@@ -304,22 +305,23 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) | |||
304 | if (entry->irq == NO_IRQ) | 305 | if (entry->irq == NO_IRQ) |
305 | continue; | 306 | continue; |
306 | 307 | ||
307 | set_irq_msi(entry->irq, NULL); | 308 | irq_set_msi_desc(entry->irq, NULL); |
308 | irq_dispose_mapping(entry->irq); | 309 | irq_dispose_mapping(entry->irq); |
309 | } | 310 | } |
310 | } | 311 | } |
311 | 312 | ||
312 | static struct irq_chip msic_irq_chip = { | 313 | static struct irq_chip msic_irq_chip = { |
313 | .mask = mask_msi_irq, | 314 | .irq_mask = mask_msi_irq, |
314 | .unmask = unmask_msi_irq, | 315 | .irq_unmask = unmask_msi_irq, |
315 | .shutdown = unmask_msi_irq, | 316 | .irq_shutdown = mask_msi_irq, |
316 | .name = "AXON-MSI", | 317 | .name = "AXON-MSI", |
317 | }; | 318 | }; |
318 | 319 | ||
319 | static int msic_host_map(struct irq_host *h, unsigned int virq, | 320 | static int msic_host_map(struct irq_host *h, unsigned int virq, |
320 | irq_hw_number_t hw) | 321 | irq_hw_number_t hw) |
321 | { | 322 | { |
322 | set_irq_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); | 323 | irq_set_chip_data(virq, h->host_data); |
324 | irq_set_chip_and_handler(virq, &msic_irq_chip, handle_simple_irq); | ||
323 | 325 | ||
324 | return 0; | 326 | return 0; |
325 | } | 327 | } |
@@ -328,7 +330,7 @@ static struct irq_host_ops msic_host_ops = { | |||
328 | .map = msic_host_map, | 330 | .map = msic_host_map, |
329 | }; | 331 | }; |
330 | 332 | ||
331 | static int axon_msi_shutdown(struct platform_device *device) | 333 | static void axon_msi_shutdown(struct platform_device *device) |
332 | { | 334 | { |
333 | struct axon_msic *msic = dev_get_drvdata(&device->dev); | 335 | struct axon_msic *msic = dev_get_drvdata(&device->dev); |
334 | u32 tmp; | 336 | u32 tmp; |
@@ -338,12 +340,9 @@ static int axon_msi_shutdown(struct platform_device *device) | |||
338 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); | 340 | tmp = dcr_read(msic->dcr_host, MSIC_CTRL_REG); |
339 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; | 341 | tmp &= ~MSIC_CTRL_ENABLE & ~MSIC_CTRL_IRQ_ENABLE; |
340 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); | 342 | msic_dcr_write(msic, MSIC_CTRL_REG, tmp); |
341 | |||
342 | return 0; | ||
343 | } | 343 | } |
344 | 344 | ||
345 | static int axon_msi_probe(struct platform_device *device, | 345 | static int axon_msi_probe(struct platform_device *device) |
346 | const struct of_device_id *device_id) | ||
347 | { | 346 | { |
348 | struct device_node *dn = device->dev.of_node; | 347 | struct device_node *dn = device->dev.of_node; |
349 | struct axon_msic *msic; | 348 | struct axon_msic *msic; |
@@ -402,8 +401,8 @@ static int axon_msi_probe(struct platform_device *device, | |||
402 | 401 | ||
403 | msic->irq_host->host_data = msic; | 402 | msic->irq_host->host_data = msic; |
404 | 403 | ||
405 | set_irq_data(virq, msic); | 404 | irq_set_handler_data(virq, msic); |
406 | set_irq_chained_handler(virq, axon_msi_cascade); | 405 | irq_set_chained_handler(virq, axon_msi_cascade); |
407 | pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); | 406 | pr_devel("axon_msi: irq 0x%x setup for axon_msi\n", virq); |
408 | 407 | ||
409 | /* Enable the MSIC hardware */ | 408 | /* Enable the MSIC hardware */ |
@@ -446,7 +445,7 @@ static const struct of_device_id axon_msi_device_id[] = { | |||
446 | {} | 445 | {} |
447 | }; | 446 | }; |
448 | 447 | ||
449 | static struct of_platform_driver axon_msi_driver = { | 448 | static struct platform_driver axon_msi_driver = { |
450 | .probe = axon_msi_probe, | 449 | .probe = axon_msi_probe, |
451 | .shutdown = axon_msi_shutdown, | 450 | .shutdown = axon_msi_shutdown, |
452 | .driver = { | 451 | .driver = { |
@@ -458,7 +457,7 @@ static struct of_platform_driver axon_msi_driver = { | |||
458 | 457 | ||
459 | static int __init axon_msi_init(void) | 458 | static int __init axon_msi_init(void) |
460 | { | 459 | { |
461 | return of_register_platform_driver(&axon_msi_driver); | 460 | return platform_driver_register(&axon_msi_driver); |
462 | } | 461 | } |
463 | subsys_initcall(axon_msi_init); | 462 | subsys_initcall(axon_msi_init); |
464 | 463 | ||
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.c b/arch/powerpc/platforms/cell/beat_interrupt.c index 682af97321a8..55015e1f6939 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.c +++ b/arch/powerpc/platforms/cell/beat_interrupt.c | |||
@@ -61,59 +61,59 @@ static inline void beatic_update_irq_mask(unsigned int irq_plug) | |||
61 | panic("Failed to set mask IRQ!"); | 61 | panic("Failed to set mask IRQ!"); |
62 | } | 62 | } |
63 | 63 | ||
64 | static void beatic_mask_irq(unsigned int irq_plug) | 64 | static void beatic_mask_irq(struct irq_data *d) |
65 | { | 65 | { |
66 | unsigned long flags; | 66 | unsigned long flags; |
67 | 67 | ||
68 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 68 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
69 | beatic_irq_mask_enable[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); | 69 | beatic_irq_mask_enable[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); |
70 | beatic_update_irq_mask(irq_plug); | 70 | beatic_update_irq_mask(d->irq); |
71 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 71 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
72 | } | 72 | } |
73 | 73 | ||
74 | static void beatic_unmask_irq(unsigned int irq_plug) | 74 | static void beatic_unmask_irq(struct irq_data *d) |
75 | { | 75 | { |
76 | unsigned long flags; | 76 | unsigned long flags; |
77 | 77 | ||
78 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 78 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
79 | beatic_irq_mask_enable[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); | 79 | beatic_irq_mask_enable[d->irq/64] |= 1UL << (63 - (d->irq%64)); |
80 | beatic_update_irq_mask(irq_plug); | 80 | beatic_update_irq_mask(d->irq); |
81 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 81 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
82 | } | 82 | } |
83 | 83 | ||
84 | static void beatic_ack_irq(unsigned int irq_plug) | 84 | static void beatic_ack_irq(struct irq_data *d) |
85 | { | 85 | { |
86 | unsigned long flags; | 86 | unsigned long flags; |
87 | 87 | ||
88 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 88 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
89 | beatic_irq_mask_ack[irq_plug/64] &= ~(1UL << (63 - (irq_plug%64))); | 89 | beatic_irq_mask_ack[d->irq/64] &= ~(1UL << (63 - (d->irq%64))); |
90 | beatic_update_irq_mask(irq_plug); | 90 | beatic_update_irq_mask(d->irq); |
91 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 91 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
92 | } | 92 | } |
93 | 93 | ||
94 | static void beatic_end_irq(unsigned int irq_plug) | 94 | static void beatic_end_irq(struct irq_data *d) |
95 | { | 95 | { |
96 | s64 err; | 96 | s64 err; |
97 | unsigned long flags; | 97 | unsigned long flags; |
98 | 98 | ||
99 | err = beat_downcount_of_interrupt(irq_plug); | 99 | err = beat_downcount_of_interrupt(d->irq); |
100 | if (err != 0) { | 100 | if (err != 0) { |
101 | if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ | 101 | if ((err & 0xFFFFFFFF) != 0xFFFFFFF5) /* -11: wrong state */ |
102 | panic("Failed to downcount IRQ! Error = %16llx", err); | 102 | panic("Failed to downcount IRQ! Error = %16llx", err); |
103 | 103 | ||
104 | printk(KERN_ERR "IRQ over-downcounted, plug %d\n", irq_plug); | 104 | printk(KERN_ERR "IRQ over-downcounted, plug %d\n", d->irq); |
105 | } | 105 | } |
106 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); | 106 | raw_spin_lock_irqsave(&beatic_irq_mask_lock, flags); |
107 | beatic_irq_mask_ack[irq_plug/64] |= 1UL << (63 - (irq_plug%64)); | 107 | beatic_irq_mask_ack[d->irq/64] |= 1UL << (63 - (d->irq%64)); |
108 | beatic_update_irq_mask(irq_plug); | 108 | beatic_update_irq_mask(d->irq); |
109 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); | 109 | raw_spin_unlock_irqrestore(&beatic_irq_mask_lock, flags); |
110 | } | 110 | } |
111 | 111 | ||
112 | static struct irq_chip beatic_pic = { | 112 | static struct irq_chip beatic_pic = { |
113 | .name = "CELL-BEAT", | 113 | .name = "CELL-BEAT", |
114 | .unmask = beatic_unmask_irq, | 114 | .irq_unmask = beatic_unmask_irq, |
115 | .mask = beatic_mask_irq, | 115 | .irq_mask = beatic_mask_irq, |
116 | .eoi = beatic_end_irq, | 116 | .irq_eoi = beatic_end_irq, |
117 | }; | 117 | }; |
118 | 118 | ||
119 | /* | 119 | /* |
@@ -136,29 +136,18 @@ static void beatic_pic_host_unmap(struct irq_host *h, unsigned int virq) | |||
136 | static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, | 136 | static int beatic_pic_host_map(struct irq_host *h, unsigned int virq, |
137 | irq_hw_number_t hw) | 137 | irq_hw_number_t hw) |
138 | { | 138 | { |
139 | struct irq_desc *desc = irq_to_desc(virq); | ||
140 | int64_t err; | 139 | int64_t err; |
141 | 140 | ||
142 | err = beat_construct_and_connect_irq_plug(virq, hw); | 141 | err = beat_construct_and_connect_irq_plug(virq, hw); |
143 | if (err < 0) | 142 | if (err < 0) |
144 | return -EIO; | 143 | return -EIO; |
145 | 144 | ||
146 | desc->status |= IRQ_LEVEL; | 145 | irq_set_status_flags(virq, IRQ_LEVEL); |
147 | set_irq_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); | 146 | irq_set_chip_and_handler(virq, &beatic_pic, handle_fasteoi_irq); |
148 | return 0; | 147 | return 0; |
149 | } | 148 | } |
150 | 149 | ||
151 | /* | 150 | /* |
152 | * Update binding hardware IRQ number (hw) and Virtuql | ||
153 | * IRQ number (virq). This is called only once for a given mapping. | ||
154 | */ | ||
155 | static void beatic_pic_host_remap(struct irq_host *h, unsigned int virq, | ||
156 | irq_hw_number_t hw) | ||
157 | { | ||
158 | beat_construct_and_connect_irq_plug(virq, hw); | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), | 151 | * Translate device-tree interrupt spec to irq_hw_number_t style (ulong), |
163 | * to pass away to irq_create_mapping(). | 152 | * to pass away to irq_create_mapping(). |
164 | * | 153 | * |
@@ -185,7 +174,6 @@ static int beatic_pic_host_match(struct irq_host *h, struct device_node *np) | |||
185 | 174 | ||
186 | static struct irq_host_ops beatic_pic_host_ops = { | 175 | static struct irq_host_ops beatic_pic_host_ops = { |
187 | .map = beatic_pic_host_map, | 176 | .map = beatic_pic_host_map, |
188 | .remap = beatic_pic_host_remap, | ||
189 | .unmap = beatic_pic_host_unmap, | 177 | .unmap = beatic_pic_host_unmap, |
190 | .xlate = beatic_pic_host_xlate, | 178 | .xlate = beatic_pic_host_xlate, |
191 | .match = beatic_pic_host_match, | 179 | .match = beatic_pic_host_match, |
@@ -232,7 +220,7 @@ unsigned int beatic_get_irq(void) | |||
232 | 220 | ||
233 | ret = beatic_get_irq_plug(); | 221 | ret = beatic_get_irq_plug(); |
234 | if (ret != NO_IRQ) | 222 | if (ret != NO_IRQ) |
235 | beatic_ack_irq(ret); | 223 | beatic_ack_irq(irq_get_irq_data(ret)); |
236 | return ret; | 224 | return ret; |
237 | } | 225 | } |
238 | 226 | ||
@@ -258,22 +246,6 @@ void __init beatic_init_IRQ(void) | |||
258 | irq_set_default_host(beatic_host); | 246 | irq_set_default_host(beatic_host); |
259 | } | 247 | } |
260 | 248 | ||
261 | #ifdef CONFIG_SMP | ||
262 | |||
263 | /* Nullified to compile with SMP mode */ | ||
264 | void beatic_setup_cpu(int cpu) | ||
265 | { | ||
266 | } | ||
267 | |||
268 | void beatic_cause_IPI(int cpu, int mesg) | ||
269 | { | ||
270 | } | ||
271 | |||
272 | void beatic_request_IPIs(void) | ||
273 | { | ||
274 | } | ||
275 | #endif /* CONFIG_SMP */ | ||
276 | |||
277 | void beatic_deinit_IRQ(void) | 249 | void beatic_deinit_IRQ(void) |
278 | { | 250 | { |
279 | int i; | 251 | int i; |
diff --git a/arch/powerpc/platforms/cell/beat_interrupt.h b/arch/powerpc/platforms/cell/beat_interrupt.h index b470fd0051f1..a7e52f91a078 100644 --- a/arch/powerpc/platforms/cell/beat_interrupt.h +++ b/arch/powerpc/platforms/cell/beat_interrupt.h | |||
@@ -24,9 +24,6 @@ | |||
24 | 24 | ||
25 | extern void beatic_init_IRQ(void); | 25 | extern void beatic_init_IRQ(void); |
26 | extern unsigned int beatic_get_irq(void); | 26 | extern unsigned int beatic_get_irq(void); |
27 | extern void beatic_cause_IPI(int cpu, int mesg); | ||
28 | extern void beatic_request_IPIs(void); | ||
29 | extern void beatic_setup_cpu(int); | ||
30 | extern void beatic_deinit_IRQ(void); | 27 | extern void beatic_deinit_IRQ(void); |
31 | 28 | ||
32 | #endif | 29 | #endif |
diff --git a/arch/powerpc/platforms/cell/beat_iommu.c b/arch/powerpc/platforms/cell/beat_iommu.c index beec405eb6f8..3ce685568935 100644 --- a/arch/powerpc/platforms/cell/beat_iommu.c +++ b/arch/powerpc/platforms/cell/beat_iommu.c | |||
@@ -76,7 +76,7 @@ static void __init celleb_init_direct_mapping(void) | |||
76 | 76 | ||
77 | static void celleb_dma_dev_setup(struct device *dev) | 77 | static void celleb_dma_dev_setup(struct device *dev) |
78 | { | 78 | { |
79 | dev->archdata.dma_ops = get_pci_dma_ops(); | 79 | set_dma_ops(dev, &dma_direct_ops); |
80 | set_dma_offset(dev, celleb_dma_direct_offset); | 80 | set_dma_offset(dev, celleb_dma_direct_offset); |
81 | } | 81 | } |
82 | 82 | ||
@@ -106,7 +106,6 @@ static struct notifier_block celleb_of_bus_notifier = { | |||
106 | static int __init celleb_init_iommu(void) | 106 | static int __init celleb_init_iommu(void) |
107 | { | 107 | { |
108 | celleb_init_direct_mapping(); | 108 | celleb_init_direct_mapping(); |
109 | set_pci_dma_ops(&dma_direct_ops); | ||
110 | ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup; | 109 | ppc_md.pci_dma_dev_setup = celleb_pci_dma_dev_setup; |
111 | bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier); | 110 | bus_register_notifier(&platform_bus_type, &celleb_of_bus_notifier); |
112 | 111 | ||
diff --git a/arch/powerpc/platforms/cell/beat_smp.c b/arch/powerpc/platforms/cell/beat_smp.c deleted file mode 100644 index 26efc204c47f..000000000000 --- a/arch/powerpc/platforms/cell/beat_smp.c +++ /dev/null | |||
@@ -1,124 +0,0 @@ | |||
1 | /* | ||
2 | * SMP support for Celleb platform. (Incomplete) | ||
3 | * | ||
4 | * (C) Copyright 2006 TOSHIBA CORPORATION | ||
5 | * | ||
6 | * This code is based on arch/powerpc/platforms/cell/smp.c: | ||
7 | * Dave Engebretsen, Peter Bergner, and | ||
8 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | ||
9 | * Plus various changes from other IBM teams... | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License along | ||
22 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | */ | ||
25 | |||
26 | #undef DEBUG | ||
27 | |||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/threads.h> | ||
33 | #include <linux/cpu.h> | ||
34 | |||
35 | #include <asm/irq.h> | ||
36 | #include <asm/smp.h> | ||
37 | #include <asm/machdep.h> | ||
38 | #include <asm/udbg.h> | ||
39 | |||
40 | #include "beat_interrupt.h" | ||
41 | |||
42 | #ifdef DEBUG | ||
43 | #define DBG(fmt...) udbg_printf(fmt) | ||
44 | #else | ||
45 | #define DBG(fmt...) | ||
46 | #endif | ||
47 | |||
48 | /* | ||
49 | * The primary thread of each non-boot processor is recorded here before | ||
50 | * smp init. | ||
51 | */ | ||
52 | /* static cpumask_t of_spin_map; */ | ||
53 | |||
54 | /** | ||
55 | * smp_startup_cpu() - start the given cpu | ||
56 | * | ||
57 | * At boot time, there is nothing to do for primary threads which were | ||
58 | * started from Open Firmware. For anything else, call RTAS with the | ||
59 | * appropriate start location. | ||
60 | * | ||
61 | * Returns: | ||
62 | * 0 - failure | ||
63 | * 1 - success | ||
64 | */ | ||
65 | static inline int __devinit smp_startup_cpu(unsigned int lcpu) | ||
66 | { | ||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static void smp_beatic_message_pass(int target, int msg) | ||
71 | { | ||
72 | unsigned int i; | ||
73 | |||
74 | if (target < NR_CPUS) { | ||
75 | beatic_cause_IPI(target, msg); | ||
76 | } else { | ||
77 | for_each_online_cpu(i) { | ||
78 | if (target == MSG_ALL_BUT_SELF | ||
79 | && i == smp_processor_id()) | ||
80 | continue; | ||
81 | beatic_cause_IPI(i, msg); | ||
82 | } | ||
83 | } | ||
84 | } | ||
85 | |||
86 | static int __init smp_beatic_probe(void) | ||
87 | { | ||
88 | return cpus_weight(cpu_possible_map); | ||
89 | } | ||
90 | |||
91 | static void __devinit smp_beatic_setup_cpu(int cpu) | ||
92 | { | ||
93 | beatic_setup_cpu(cpu); | ||
94 | } | ||
95 | |||
96 | static void __devinit smp_celleb_kick_cpu(int nr) | ||
97 | { | ||
98 | BUG_ON(nr < 0 || nr >= NR_CPUS); | ||
99 | |||
100 | if (!smp_startup_cpu(nr)) | ||
101 | return; | ||
102 | } | ||
103 | |||
104 | static int smp_celleb_cpu_bootable(unsigned int nr) | ||
105 | { | ||
106 | return 1; | ||
107 | } | ||
108 | static struct smp_ops_t bpa_beatic_smp_ops = { | ||
109 | .message_pass = smp_beatic_message_pass, | ||
110 | .probe = smp_beatic_probe, | ||
111 | .kick_cpu = smp_celleb_kick_cpu, | ||
112 | .setup_cpu = smp_beatic_setup_cpu, | ||
113 | .cpu_bootable = smp_celleb_cpu_bootable, | ||
114 | }; | ||
115 | |||
116 | /* This is called very early */ | ||
117 | void __init smp_init_celleb(void) | ||
118 | { | ||
119 | DBG(" -> smp_init_celleb()\n"); | ||
120 | |||
121 | smp_ops = &bpa_beatic_smp_ops; | ||
122 | |||
123 | DBG(" <- smp_init_celleb()\n"); | ||
124 | } | ||
diff --git a/arch/powerpc/platforms/cell/cbe_regs.c b/arch/powerpc/platforms/cell/cbe_regs.c index dbc338f187a2..f3917e7a5b44 100644 --- a/arch/powerpc/platforms/cell/cbe_regs.c +++ b/arch/powerpc/platforms/cell/cbe_regs.c | |||
@@ -45,8 +45,8 @@ static struct cbe_thread_map | |||
45 | unsigned int cbe_id; | 45 | unsigned int cbe_id; |
46 | } cbe_thread_map[NR_CPUS]; | 46 | } cbe_thread_map[NR_CPUS]; |
47 | 47 | ||
48 | static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = CPU_MASK_NONE }; | 48 | static cpumask_t cbe_local_mask[MAX_CBE] = { [0 ... MAX_CBE-1] = {CPU_BITS_NONE} }; |
49 | static cpumask_t cbe_first_online_cpu = CPU_MASK_NONE; | 49 | static cpumask_t cbe_first_online_cpu = { CPU_BITS_NONE }; |
50 | 50 | ||
51 | static struct cbe_regs_map *cbe_find_map(struct device_node *np) | 51 | static struct cbe_regs_map *cbe_find_map(struct device_node *np) |
52 | { | 52 | { |
@@ -159,7 +159,8 @@ EXPORT_SYMBOL_GPL(cbe_cpu_to_node); | |||
159 | 159 | ||
160 | u32 cbe_node_to_cpu(int node) | 160 | u32 cbe_node_to_cpu(int node) |
161 | { | 161 | { |
162 | return find_first_bit( (unsigned long *) &cbe_local_mask[node], sizeof(cpumask_t)); | 162 | return cpumask_first(&cbe_local_mask[node]); |
163 | |||
163 | } | 164 | } |
164 | EXPORT_SYMBOL_GPL(cbe_node_to_cpu); | 165 | EXPORT_SYMBOL_GPL(cbe_node_to_cpu); |
165 | 166 | ||
@@ -268,9 +269,9 @@ void __init cbe_regs_init(void) | |||
268 | thread->regs = map; | 269 | thread->regs = map; |
269 | thread->cbe_id = cbe_id; | 270 | thread->cbe_id = cbe_id; |
270 | map->be_node = thread->be_node; | 271 | map->be_node = thread->be_node; |
271 | cpu_set(i, cbe_local_mask[cbe_id]); | 272 | cpumask_set_cpu(i, &cbe_local_mask[cbe_id]); |
272 | if(thread->thread_id == 0) | 273 | if(thread->thread_id == 0) |
273 | cpu_set(i, cbe_first_online_cpu); | 274 | cpumask_set_cpu(i, &cbe_first_online_cpu); |
274 | } | 275 | } |
275 | } | 276 | } |
276 | 277 | ||
diff --git a/arch/powerpc/platforms/cell/celleb_pci.c b/arch/powerpc/platforms/cell/celleb_pci.c index 404d1fc04d59..5822141aa63f 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.c +++ b/arch/powerpc/platforms/cell/celleb_pci.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <asm/pci-bridge.h> | 41 | #include <asm/pci-bridge.h> |
42 | #include <asm/ppc-pci.h> | 42 | #include <asm/ppc-pci.h> |
43 | 43 | ||
44 | #include "io-workarounds.h" | ||
45 | #include "celleb_pci.h" | 44 | #include "celleb_pci.h" |
46 | 45 | ||
47 | #define MAX_PCI_DEVICES 32 | 46 | #define MAX_PCI_DEVICES 32 |
@@ -320,7 +319,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node, | |||
320 | 319 | ||
321 | size = 256; | 320 | size = 256; |
322 | config = &private->fake_config[devno][fn]; | 321 | config = &private->fake_config[devno][fn]; |
323 | *config = alloc_maybe_bootmem(size, GFP_KERNEL); | 322 | *config = zalloc_maybe_bootmem(size, GFP_KERNEL); |
324 | if (*config == NULL) { | 323 | if (*config == NULL) { |
325 | printk(KERN_ERR "PCI: " | 324 | printk(KERN_ERR "PCI: " |
326 | "not enough memory for fake configuration space\n"); | 325 | "not enough memory for fake configuration space\n"); |
@@ -331,7 +330,7 @@ static int __init celleb_setup_fake_pci_device(struct device_node *node, | |||
331 | 330 | ||
332 | size = sizeof(struct celleb_pci_resource); | 331 | size = sizeof(struct celleb_pci_resource); |
333 | res = &private->res[devno][fn]; | 332 | res = &private->res[devno][fn]; |
334 | *res = alloc_maybe_bootmem(size, GFP_KERNEL); | 333 | *res = zalloc_maybe_bootmem(size, GFP_KERNEL); |
335 | if (*res == NULL) { | 334 | if (*res == NULL) { |
336 | printk(KERN_ERR | 335 | printk(KERN_ERR |
337 | "PCI: not enough memory for resource data space\n"); | 336 | "PCI: not enough memory for resource data space\n"); |
@@ -432,7 +431,7 @@ static int __init phb_set_bus_ranges(struct device_node *dev, | |||
432 | static void __init celleb_alloc_private_mem(struct pci_controller *hose) | 431 | static void __init celleb_alloc_private_mem(struct pci_controller *hose) |
433 | { | 432 | { |
434 | hose->private_data = | 433 | hose->private_data = |
435 | alloc_maybe_bootmem(sizeof(struct celleb_pci_private), | 434 | zalloc_maybe_bootmem(sizeof(struct celleb_pci_private), |
436 | GFP_KERNEL); | 435 | GFP_KERNEL); |
437 | } | 436 | } |
438 | 437 | ||
@@ -469,18 +468,6 @@ static struct of_device_id celleb_phb_match[] __initdata = { | |||
469 | }, | 468 | }, |
470 | }; | 469 | }; |
471 | 470 | ||
472 | static int __init celleb_io_workaround_init(struct pci_controller *phb, | ||
473 | struct celleb_phb_spec *phb_spec) | ||
474 | { | ||
475 | if (phb_spec->ops) { | ||
476 | iowa_register_bus(phb, phb_spec->ops, phb_spec->iowa_init, | ||
477 | phb_spec->iowa_data); | ||
478 | io_workaround_init(); | ||
479 | } | ||
480 | |||
481 | return 0; | ||
482 | } | ||
483 | |||
484 | int __init celleb_setup_phb(struct pci_controller *phb) | 471 | int __init celleb_setup_phb(struct pci_controller *phb) |
485 | { | 472 | { |
486 | struct device_node *dev = phb->dn; | 473 | struct device_node *dev = phb->dn; |
@@ -500,7 +487,11 @@ int __init celleb_setup_phb(struct pci_controller *phb) | |||
500 | if (rc) | 487 | if (rc) |
501 | return 1; | 488 | return 1; |
502 | 489 | ||
503 | return celleb_io_workaround_init(phb, phb_spec); | 490 | if (phb_spec->ops) |
491 | iowa_register_bus(phb, phb_spec->ops, | ||
492 | phb_spec->iowa_init, | ||
493 | phb_spec->iowa_data); | ||
494 | return 0; | ||
504 | } | 495 | } |
505 | 496 | ||
506 | int celleb_pci_probe_mode(struct pci_bus *bus) | 497 | int celleb_pci_probe_mode(struct pci_bus *bus) |
diff --git a/arch/powerpc/platforms/cell/celleb_pci.h b/arch/powerpc/platforms/cell/celleb_pci.h index 4cba1523ec50..a801fcc5f389 100644 --- a/arch/powerpc/platforms/cell/celleb_pci.h +++ b/arch/powerpc/platforms/cell/celleb_pci.h | |||
@@ -26,8 +26,9 @@ | |||
26 | #include <asm/pci-bridge.h> | 26 | #include <asm/pci-bridge.h> |
27 | #include <asm/prom.h> | 27 | #include <asm/prom.h> |
28 | #include <asm/ppc-pci.h> | 28 | #include <asm/ppc-pci.h> |
29 | #include <asm/io-workarounds.h> | ||
29 | 30 | ||
30 | #include "io-workarounds.h" | 31 | struct iowa_bus; |
31 | 32 | ||
32 | struct celleb_phb_spec { | 33 | struct celleb_phb_spec { |
33 | int (*setup)(struct device_node *, struct pci_controller *); | 34 | int (*setup)(struct device_node *, struct pci_controller *); |
diff --git a/arch/powerpc/platforms/cell/celleb_setup.c b/arch/powerpc/platforms/cell/celleb_setup.c index e53845579770..d58d9bae4b9b 100644 --- a/arch/powerpc/platforms/cell/celleb_setup.c +++ b/arch/powerpc/platforms/cell/celleb_setup.c | |||
@@ -128,10 +128,6 @@ static void __init celleb_setup_arch_beat(void) | |||
128 | spu_management_ops = &spu_management_of_ops; | 128 | spu_management_ops = &spu_management_of_ops; |
129 | #endif | 129 | #endif |
130 | 130 | ||
131 | #ifdef CONFIG_SMP | ||
132 | smp_init_celleb(); | ||
133 | #endif | ||
134 | |||
135 | celleb_setup_arch_common(); | 131 | celleb_setup_arch_common(); |
136 | } | 132 | } |
137 | 133 | ||
diff --git a/arch/powerpc/platforms/cell/cpufreq_spudemand.c b/arch/powerpc/platforms/cell/cpufreq_spudemand.c index 968c1c0b4d5b..d809836bcf5f 100644 --- a/arch/powerpc/platforms/cell/cpufreq_spudemand.c +++ b/arch/powerpc/platforms/cell/cpufreq_spudemand.c | |||
@@ -39,8 +39,6 @@ struct spu_gov_info_struct { | |||
39 | }; | 39 | }; |
40 | static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); | 40 | static DEFINE_PER_CPU(struct spu_gov_info_struct, spu_gov_info); |
41 | 41 | ||
42 | static struct workqueue_struct *kspugov_wq; | ||
43 | |||
44 | static int calc_freq(struct spu_gov_info_struct *info) | 42 | static int calc_freq(struct spu_gov_info_struct *info) |
45 | { | 43 | { |
46 | int cpu; | 44 | int cpu; |
@@ -71,14 +69,14 @@ static void spu_gov_work(struct work_struct *work) | |||
71 | __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); | 69 | __cpufreq_driver_target(info->policy, target_freq, CPUFREQ_RELATION_H); |
72 | 70 | ||
73 | delay = usecs_to_jiffies(info->poll_int); | 71 | delay = usecs_to_jiffies(info->poll_int); |
74 | queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); | 72 | schedule_delayed_work_on(info->policy->cpu, &info->work, delay); |
75 | } | 73 | } |
76 | 74 | ||
77 | static void spu_gov_init_work(struct spu_gov_info_struct *info) | 75 | static void spu_gov_init_work(struct spu_gov_info_struct *info) |
78 | { | 76 | { |
79 | int delay = usecs_to_jiffies(info->poll_int); | 77 | int delay = usecs_to_jiffies(info->poll_int); |
80 | INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); | 78 | INIT_DELAYED_WORK_DEFERRABLE(&info->work, spu_gov_work); |
81 | queue_delayed_work_on(info->policy->cpu, kspugov_wq, &info->work, delay); | 79 | schedule_delayed_work_on(info->policy->cpu, &info->work, delay); |
82 | } | 80 | } |
83 | 81 | ||
84 | static void spu_gov_cancel_work(struct spu_gov_info_struct *info) | 82 | static void spu_gov_cancel_work(struct spu_gov_info_struct *info) |
@@ -152,27 +150,15 @@ static int __init spu_gov_init(void) | |||
152 | { | 150 | { |
153 | int ret; | 151 | int ret; |
154 | 152 | ||
155 | kspugov_wq = create_workqueue("kspugov"); | ||
156 | if (!kspugov_wq) { | ||
157 | printk(KERN_ERR "creation of kspugov failed\n"); | ||
158 | ret = -EFAULT; | ||
159 | goto out; | ||
160 | } | ||
161 | |||
162 | ret = cpufreq_register_governor(&spu_governor); | 153 | ret = cpufreq_register_governor(&spu_governor); |
163 | if (ret) { | 154 | if (ret) |
164 | printk(KERN_ERR "registration of governor failed\n"); | 155 | printk(KERN_ERR "registration of governor failed\n"); |
165 | destroy_workqueue(kspugov_wq); | ||
166 | goto out; | ||
167 | } | ||
168 | out: | ||
169 | return ret; | 156 | return ret; |
170 | } | 157 | } |
171 | 158 | ||
172 | static void __exit spu_gov_exit(void) | 159 | static void __exit spu_gov_exit(void) |
173 | { | 160 | { |
174 | cpufreq_unregister_governor(&spu_governor); | 161 | cpufreq_unregister_governor(&spu_governor); |
175 | destroy_workqueue(kspugov_wq); | ||
176 | } | 162 | } |
177 | 163 | ||
178 | 164 | ||
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 10eb1a443626..3e4eba603e6b 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -72,15 +72,15 @@ static irq_hw_number_t iic_pending_to_hwnum(struct cbe_iic_pending_bits bits) | |||
72 | return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; | 72 | return (node << IIC_IRQ_NODE_SHIFT) | (class << 4) | unit; |
73 | } | 73 | } |
74 | 74 | ||
75 | static void iic_mask(unsigned int irq) | 75 | static void iic_mask(struct irq_data *d) |
76 | { | 76 | { |
77 | } | 77 | } |
78 | 78 | ||
79 | static void iic_unmask(unsigned int irq) | 79 | static void iic_unmask(struct irq_data *d) |
80 | { | 80 | { |
81 | } | 81 | } |
82 | 82 | ||
83 | static void iic_eoi(unsigned int irq) | 83 | static void iic_eoi(struct irq_data *d) |
84 | { | 84 | { |
85 | struct iic *iic = &__get_cpu_var(cpu_iic); | 85 | struct iic *iic = &__get_cpu_var(cpu_iic); |
86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); | 86 | out_be64(&iic->regs->prio, iic->eoi_stack[--iic->eoi_ptr]); |
@@ -89,19 +89,21 @@ static void iic_eoi(unsigned int irq) | |||
89 | 89 | ||
90 | static struct irq_chip iic_chip = { | 90 | static struct irq_chip iic_chip = { |
91 | .name = "CELL-IIC", | 91 | .name = "CELL-IIC", |
92 | .mask = iic_mask, | 92 | .irq_mask = iic_mask, |
93 | .unmask = iic_unmask, | 93 | .irq_unmask = iic_unmask, |
94 | .eoi = iic_eoi, | 94 | .irq_eoi = iic_eoi, |
95 | }; | 95 | }; |
96 | 96 | ||
97 | 97 | ||
98 | static void iic_ioexc_eoi(unsigned int irq) | 98 | static void iic_ioexc_eoi(struct irq_data *d) |
99 | { | 99 | { |
100 | } | 100 | } |
101 | 101 | ||
102 | static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) | 102 | static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) |
103 | { | 103 | { |
104 | struct cbe_iic_regs __iomem *node_iic = (void __iomem *)desc->handler_data; | 104 | struct irq_chip *chip = irq_desc_get_chip(desc); |
105 | struct cbe_iic_regs __iomem *node_iic = | ||
106 | (void __iomem *)irq_desc_get_handler_data(desc); | ||
105 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; | 107 | unsigned int base = (irq & 0xffffff00) | IIC_IRQ_TYPE_IOEXC; |
106 | unsigned long bits, ack; | 108 | unsigned long bits, ack; |
107 | int cascade; | 109 | int cascade; |
@@ -128,15 +130,15 @@ static void iic_ioexc_cascade(unsigned int irq, struct irq_desc *desc) | |||
128 | if (ack) | 130 | if (ack) |
129 | out_be64(&node_iic->iic_is, ack); | 131 | out_be64(&node_iic->iic_is, ack); |
130 | } | 132 | } |
131 | desc->chip->eoi(irq); | 133 | chip->irq_eoi(&desc->irq_data); |
132 | } | 134 | } |
133 | 135 | ||
134 | 136 | ||
135 | static struct irq_chip iic_ioexc_chip = { | 137 | static struct irq_chip iic_ioexc_chip = { |
136 | .name = "CELL-IOEX", | 138 | .name = "CELL-IOEX", |
137 | .mask = iic_mask, | 139 | .irq_mask = iic_mask, |
138 | .unmask = iic_unmask, | 140 | .irq_unmask = iic_unmask, |
139 | .eoi = iic_ioexc_eoi, | 141 | .irq_eoi = iic_ioexc_eoi, |
140 | }; | 142 | }; |
141 | 143 | ||
142 | /* Get an IRQ number from the pending state register of the IIC */ | 144 | /* Get an IRQ number from the pending state register of the IIC */ |
@@ -174,14 +176,14 @@ EXPORT_SYMBOL_GPL(iic_get_target_id); | |||
174 | #ifdef CONFIG_SMP | 176 | #ifdef CONFIG_SMP |
175 | 177 | ||
176 | /* Use the highest interrupt priorities for IPI */ | 178 | /* Use the highest interrupt priorities for IPI */ |
177 | static inline int iic_ipi_to_irq(int ipi) | 179 | static inline int iic_msg_to_irq(int msg) |
178 | { | 180 | { |
179 | return IIC_IRQ_TYPE_IPI + 0xf - ipi; | 181 | return IIC_IRQ_TYPE_IPI + 0xf - msg; |
180 | } | 182 | } |
181 | 183 | ||
182 | void iic_cause_IPI(int cpu, int mesg) | 184 | void iic_message_pass(int cpu, int msg) |
183 | { | 185 | { |
184 | out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4); | 186 | out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); |
185 | } | 187 | } |
186 | 188 | ||
187 | struct irq_host *iic_get_irq_host(int node) | 189 | struct irq_host *iic_get_irq_host(int node) |
@@ -190,38 +192,31 @@ struct irq_host *iic_get_irq_host(int node) | |||
190 | } | 192 | } |
191 | EXPORT_SYMBOL_GPL(iic_get_irq_host); | 193 | EXPORT_SYMBOL_GPL(iic_get_irq_host); |
192 | 194 | ||
193 | static irqreturn_t iic_ipi_action(int irq, void *dev_id) | 195 | static void iic_request_ipi(int msg) |
194 | { | ||
195 | int ipi = (int)(long)dev_id; | ||
196 | |||
197 | smp_message_recv(ipi); | ||
198 | |||
199 | return IRQ_HANDLED; | ||
200 | } | ||
201 | static void iic_request_ipi(int ipi, const char *name) | ||
202 | { | 196 | { |
203 | int virq; | 197 | int virq; |
204 | 198 | ||
205 | virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi)); | 199 | virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); |
206 | if (virq == NO_IRQ) { | 200 | if (virq == NO_IRQ) { |
207 | printk(KERN_ERR | 201 | printk(KERN_ERR |
208 | "iic: failed to map IPI %s\n", name); | 202 | "iic: failed to map IPI %s\n", smp_ipi_name[msg]); |
209 | return; | 203 | return; |
210 | } | 204 | } |
211 | if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name, | 205 | |
212 | (void *)(long)ipi)) | 206 | /* |
213 | printk(KERN_ERR | 207 | * If smp_request_message_ipi encounters an error it will notify |
214 | "iic: failed to request IPI %s\n", name); | 208 | * the error. If a message is not needed it will return non-zero. |
209 | */ | ||
210 | if (smp_request_message_ipi(virq, msg)) | ||
211 | irq_dispose_mapping(virq); | ||
215 | } | 212 | } |
216 | 213 | ||
217 | void iic_request_IPIs(void) | 214 | void iic_request_IPIs(void) |
218 | { | 215 | { |
219 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); | 216 | iic_request_ipi(PPC_MSG_CALL_FUNCTION); |
220 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); | 217 | iic_request_ipi(PPC_MSG_RESCHEDULE); |
221 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); | 218 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE); |
222 | #ifdef CONFIG_DEBUGGER | 219 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK); |
223 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | ||
224 | #endif /* CONFIG_DEBUGGER */ | ||
225 | } | 220 | } |
226 | 221 | ||
227 | #endif /* CONFIG_SMP */ | 222 | #endif /* CONFIG_SMP */ |
@@ -233,65 +228,19 @@ static int iic_host_match(struct irq_host *h, struct device_node *node) | |||
233 | "IBM,CBEA-Internal-Interrupt-Controller"); | 228 | "IBM,CBEA-Internal-Interrupt-Controller"); |
234 | } | 229 | } |
235 | 230 | ||
236 | extern int noirqdebug; | ||
237 | |||
238 | static void handle_iic_irq(unsigned int irq, struct irq_desc *desc) | ||
239 | { | ||
240 | raw_spin_lock(&desc->lock); | ||
241 | |||
242 | desc->status &= ~(IRQ_REPLAY | IRQ_WAITING); | ||
243 | |||
244 | /* | ||
245 | * If we're currently running this IRQ, or its disabled, | ||
246 | * we shouldn't process the IRQ. Mark it pending, handle | ||
247 | * the necessary masking and go out | ||
248 | */ | ||
249 | if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) || | ||
250 | !desc->action)) { | ||
251 | desc->status |= IRQ_PENDING; | ||
252 | goto out_eoi; | ||
253 | } | ||
254 | |||
255 | kstat_incr_irqs_this_cpu(irq, desc); | ||
256 | |||
257 | /* Mark the IRQ currently in progress.*/ | ||
258 | desc->status |= IRQ_INPROGRESS; | ||
259 | |||
260 | do { | ||
261 | struct irqaction *action = desc->action; | ||
262 | irqreturn_t action_ret; | ||
263 | |||
264 | if (unlikely(!action)) | ||
265 | goto out_eoi; | ||
266 | |||
267 | desc->status &= ~IRQ_PENDING; | ||
268 | raw_spin_unlock(&desc->lock); | ||
269 | action_ret = handle_IRQ_event(irq, action); | ||
270 | if (!noirqdebug) | ||
271 | note_interrupt(irq, desc, action_ret); | ||
272 | raw_spin_lock(&desc->lock); | ||
273 | |||
274 | } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING); | ||
275 | |||
276 | desc->status &= ~IRQ_INPROGRESS; | ||
277 | out_eoi: | ||
278 | desc->chip->eoi(irq); | ||
279 | raw_spin_unlock(&desc->lock); | ||
280 | } | ||
281 | |||
282 | static int iic_host_map(struct irq_host *h, unsigned int virq, | 231 | static int iic_host_map(struct irq_host *h, unsigned int virq, |
283 | irq_hw_number_t hw) | 232 | irq_hw_number_t hw) |
284 | { | 233 | { |
285 | switch (hw & IIC_IRQ_TYPE_MASK) { | 234 | switch (hw & IIC_IRQ_TYPE_MASK) { |
286 | case IIC_IRQ_TYPE_IPI: | 235 | case IIC_IRQ_TYPE_IPI: |
287 | set_irq_chip_and_handler(virq, &iic_chip, handle_percpu_irq); | 236 | irq_set_chip_and_handler(virq, &iic_chip, handle_percpu_irq); |
288 | break; | 237 | break; |
289 | case IIC_IRQ_TYPE_IOEXC: | 238 | case IIC_IRQ_TYPE_IOEXC: |
290 | set_irq_chip_and_handler(virq, &iic_ioexc_chip, | 239 | irq_set_chip_and_handler(virq, &iic_ioexc_chip, |
291 | handle_iic_irq); | 240 | handle_edge_eoi_irq); |
292 | break; | 241 | break; |
293 | default: | 242 | default: |
294 | set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq); | 243 | irq_set_chip_and_handler(virq, &iic_chip, handle_edge_eoi_irq); |
295 | } | 244 | } |
296 | return 0; | 245 | return 0; |
297 | } | 246 | } |
@@ -408,8 +357,8 @@ static int __init setup_iic(void) | |||
408 | * irq_data is a generic pointer that gets passed back | 357 | * irq_data is a generic pointer that gets passed back |
409 | * to us later, so the forced cast is fine. | 358 | * to us later, so the forced cast is fine. |
410 | */ | 359 | */ |
411 | set_irq_data(cascade, (void __force *)node_iic); | 360 | irq_set_handler_data(cascade, (void __force *)node_iic); |
412 | set_irq_chained_handler(cascade , iic_ioexc_cascade); | 361 | irq_set_chained_handler(cascade, iic_ioexc_cascade); |
413 | out_be64(&node_iic->iic_ir, | 362 | out_be64(&node_iic->iic_ir, |
414 | (1 << 12) /* priority */ | | 363 | (1 << 12) /* priority */ | |
415 | (node << 4) /* dest node */ | | 364 | (node << 4) /* dest node */ | |
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index 942dc39d6045..4f60ae6ca358 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h | |||
@@ -75,7 +75,7 @@ enum { | |||
75 | }; | 75 | }; |
76 | 76 | ||
77 | extern void iic_init_IRQ(void); | 77 | extern void iic_init_IRQ(void); |
78 | extern void iic_cause_IPI(int cpu, int mesg); | 78 | extern void iic_message_pass(int cpu, int msg); |
79 | extern void iic_request_IPIs(void); | 79 | extern void iic_request_IPIs(void); |
80 | extern void iic_setup_cpu(void); | 80 | extern void iic_setup_cpu(void); |
81 | 81 | ||
diff --git a/arch/powerpc/platforms/cell/qpace_setup.c b/arch/powerpc/platforms/cell/qpace_setup.c index 1b5749042756..51e290126bc1 100644 --- a/arch/powerpc/platforms/cell/qpace_setup.c +++ b/arch/powerpc/platforms/cell/qpace_setup.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "interrupt.h" | 42 | #include "interrupt.h" |
43 | #include "pervasive.h" | 43 | #include "pervasive.h" |
44 | #include "ras.h" | 44 | #include "ras.h" |
45 | #include "io-workarounds.h" | ||
46 | 45 | ||
47 | static void qpace_show_cpuinfo(struct seq_file *m) | 46 | static void qpace_show_cpuinfo(struct seq_file *m) |
48 | { | 47 | { |
@@ -145,9 +144,4 @@ define_machine(qpace) { | |||
145 | .calibrate_decr = generic_calibrate_decr, | 144 | .calibrate_decr = generic_calibrate_decr, |
146 | .progress = qpace_progress, | 145 | .progress = qpace_progress, |
147 | .init_IRQ = iic_init_IRQ, | 146 | .init_IRQ = iic_init_IRQ, |
148 | #ifdef CONFIG_KEXEC | ||
149 | .machine_kexec = default_machine_kexec, | ||
150 | .machine_kexec_prepare = default_machine_kexec_prepare, | ||
151 | .machine_crash_shutdown = default_machine_crash_shutdown, | ||
152 | #endif | ||
153 | }; | 147 | }; |
diff --git a/arch/powerpc/platforms/cell/ras.c b/arch/powerpc/platforms/cell/ras.c index 1d3c4effea10..5ec1e47a0d77 100644 --- a/arch/powerpc/platforms/cell/ras.c +++ b/arch/powerpc/platforms/cell/ras.c | |||
@@ -173,8 +173,10 @@ static int __init cbe_ptcal_enable(void) | |||
173 | return -ENODEV; | 173 | return -ENODEV; |
174 | 174 | ||
175 | size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); | 175 | size = of_get_property(np, "ibm,cbe-ptcal-size", NULL); |
176 | if (!size) | 176 | if (!size) { |
177 | of_node_put(np); | ||
177 | return -ENODEV; | 178 | return -ENODEV; |
179 | } | ||
178 | 180 | ||
179 | pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); | 181 | pr_debug("%s: enabling PTCAL, size = 0x%x\n", __func__, *size); |
180 | order = get_order(*size); | 182 | order = get_order(*size); |
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 691995761b3d..c73cf4c43fc2 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -51,11 +51,11 @@ | |||
51 | #include <asm/udbg.h> | 51 | #include <asm/udbg.h> |
52 | #include <asm/mpic.h> | 52 | #include <asm/mpic.h> |
53 | #include <asm/cell-regs.h> | 53 | #include <asm/cell-regs.h> |
54 | #include <asm/io-workarounds.h> | ||
54 | 55 | ||
55 | #include "interrupt.h" | 56 | #include "interrupt.h" |
56 | #include "pervasive.h" | 57 | #include "pervasive.h" |
57 | #include "ras.h" | 58 | #include "ras.h" |
58 | #include "io-workarounds.h" | ||
59 | 59 | ||
60 | #ifdef DEBUG | 60 | #ifdef DEBUG |
61 | #define DBG(fmt...) udbg_printf(fmt) | 61 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -136,8 +136,6 @@ static int __devinit cell_setup_phb(struct pci_controller *phb) | |||
136 | 136 | ||
137 | iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, | 137 | iowa_register_bus(phb, &spiderpci_ops, &spiderpci_iowa_init, |
138 | (void *)SPIDER_PCI_REG_BASE); | 138 | (void *)SPIDER_PCI_REG_BASE); |
139 | io_workaround_init(); | ||
140 | |||
141 | return 0; | 139 | return 0; |
142 | } | 140 | } |
143 | 141 | ||
@@ -187,13 +185,15 @@ machine_subsys_initcall(cell, cell_publish_devices); | |||
187 | 185 | ||
188 | static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) | 186 | static void cell_mpic_cascade(unsigned int irq, struct irq_desc *desc) |
189 | { | 187 | { |
190 | struct mpic *mpic = desc->handler_data; | 188 | struct irq_chip *chip = irq_desc_get_chip(desc); |
189 | struct mpic *mpic = irq_desc_get_handler_data(desc); | ||
191 | unsigned int virq; | 190 | unsigned int virq; |
192 | 191 | ||
193 | virq = mpic_get_one_irq(mpic); | 192 | virq = mpic_get_one_irq(mpic); |
194 | if (virq != NO_IRQ) | 193 | if (virq != NO_IRQ) |
195 | generic_handle_irq(virq); | 194 | generic_handle_irq(virq); |
196 | desc->chip->eoi(irq); | 195 | |
196 | chip->irq_eoi(&desc->irq_data); | ||
197 | } | 197 | } |
198 | 198 | ||
199 | static void __init mpic_init_IRQ(void) | 199 | static void __init mpic_init_IRQ(void) |
@@ -221,8 +221,8 @@ static void __init mpic_init_IRQ(void) | |||
221 | 221 | ||
222 | printk(KERN_INFO "%s : hooking up to IRQ %d\n", | 222 | printk(KERN_INFO "%s : hooking up to IRQ %d\n", |
223 | dn->full_name, virq); | 223 | dn->full_name, virq); |
224 | set_irq_data(virq, mpic); | 224 | irq_set_handler_data(virq, mpic); |
225 | set_irq_chained_handler(virq, cell_mpic_cascade); | 225 | irq_set_chained_handler(virq, cell_mpic_cascade); |
226 | } | 226 | } |
227 | } | 227 | } |
228 | 228 | ||
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index f774530075b7..dbb641ea90dd 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c | |||
@@ -77,7 +77,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
77 | unsigned int pcpu; | 77 | unsigned int pcpu; |
78 | int start_cpu; | 78 | int start_cpu; |
79 | 79 | ||
80 | if (cpu_isset(lcpu, of_spin_map)) | 80 | if (cpumask_test_cpu(lcpu, &of_spin_map)) |
81 | /* Already started by OF and sitting in spin loop */ | 81 | /* Already started by OF and sitting in spin loop */ |
82 | return 1; | 82 | return 1; |
83 | 83 | ||
@@ -103,27 +103,11 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
103 | return 1; | 103 | return 1; |
104 | } | 104 | } |
105 | 105 | ||
106 | static void smp_iic_message_pass(int target, int msg) | ||
107 | { | ||
108 | unsigned int i; | ||
109 | |||
110 | if (target < NR_CPUS) { | ||
111 | iic_cause_IPI(target, msg); | ||
112 | } else { | ||
113 | for_each_online_cpu(i) { | ||
114 | if (target == MSG_ALL_BUT_SELF | ||
115 | && i == smp_processor_id()) | ||
116 | continue; | ||
117 | iic_cause_IPI(i, msg); | ||
118 | } | ||
119 | } | ||
120 | } | ||
121 | |||
122 | static int __init smp_iic_probe(void) | 106 | static int __init smp_iic_probe(void) |
123 | { | 107 | { |
124 | iic_request_IPIs(); | 108 | iic_request_IPIs(); |
125 | 109 | ||
126 | return cpus_weight(cpu_possible_map); | 110 | return cpumask_weight(cpu_possible_mask); |
127 | } | 111 | } |
128 | 112 | ||
129 | static void __devinit smp_cell_setup_cpu(int cpu) | 113 | static void __devinit smp_cell_setup_cpu(int cpu) |
@@ -137,12 +121,12 @@ static void __devinit smp_cell_setup_cpu(int cpu) | |||
137 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); | 121 | mtspr(SPRN_DABRX, DABRX_KERNEL | DABRX_USER); |
138 | } | 122 | } |
139 | 123 | ||
140 | static void __devinit smp_cell_kick_cpu(int nr) | 124 | static int __devinit smp_cell_kick_cpu(int nr) |
141 | { | 125 | { |
142 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 126 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
143 | 127 | ||
144 | if (!smp_startup_cpu(nr)) | 128 | if (!smp_startup_cpu(nr)) |
145 | return; | 129 | return -ENOENT; |
146 | 130 | ||
147 | /* | 131 | /* |
148 | * The processor is currently spinning, waiting for the | 132 | * The processor is currently spinning, waiting for the |
@@ -150,6 +134,8 @@ static void __devinit smp_cell_kick_cpu(int nr) | |||
150 | * the processor will continue on to secondary_start | 134 | * the processor will continue on to secondary_start |
151 | */ | 135 | */ |
152 | paca[nr].cpu_start = 1; | 136 | paca[nr].cpu_start = 1; |
137 | |||
138 | return 0; | ||
153 | } | 139 | } |
154 | 140 | ||
155 | static int smp_cell_cpu_bootable(unsigned int nr) | 141 | static int smp_cell_cpu_bootable(unsigned int nr) |
@@ -166,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr) | |||
166 | return 1; | 152 | return 1; |
167 | } | 153 | } |
168 | static struct smp_ops_t bpa_iic_smp_ops = { | 154 | static struct smp_ops_t bpa_iic_smp_ops = { |
169 | .message_pass = smp_iic_message_pass, | 155 | .message_pass = iic_message_pass, |
170 | .probe = smp_iic_probe, | 156 | .probe = smp_iic_probe, |
171 | .kick_cpu = smp_cell_kick_cpu, | 157 | .kick_cpu = smp_cell_kick_cpu, |
172 | .setup_cpu = smp_cell_setup_cpu, | 158 | .setup_cpu = smp_cell_setup_cpu, |
@@ -186,13 +172,12 @@ void __init smp_init_cell(void) | |||
186 | if (cpu_has_feature(CPU_FTR_SMT)) { | 172 | if (cpu_has_feature(CPU_FTR_SMT)) { |
187 | for_each_present_cpu(i) { | 173 | for_each_present_cpu(i) { |
188 | if (cpu_thread_in_core(i) == 0) | 174 | if (cpu_thread_in_core(i) == 0) |
189 | cpu_set(i, of_spin_map); | 175 | cpumask_set_cpu(i, &of_spin_map); |
190 | } | 176 | } |
191 | } else { | 177 | } else |
192 | of_spin_map = cpu_present_map; | 178 | cpumask_copy(&of_spin_map, cpu_present_mask); |
193 | } | ||
194 | 179 | ||
195 | cpu_clear(boot_cpuid, of_spin_map); | 180 | cpumask_clear_cpu(boot_cpuid, &of_spin_map); |
196 | 181 | ||
197 | /* Non-lpar has additional take/give timebase */ | 182 | /* Non-lpar has additional take/give timebase */ |
198 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { | 183 | if (rtas_token("freeze-time-base") != RTAS_UNKNOWN_SERVICE) { |
diff --git a/arch/powerpc/platforms/cell/spider-pci.c b/arch/powerpc/platforms/cell/spider-pci.c index ca7731c0b595..f1f7878893f3 100644 --- a/arch/powerpc/platforms/cell/spider-pci.c +++ b/arch/powerpc/platforms/cell/spider-pci.c | |||
@@ -27,8 +27,7 @@ | |||
27 | 27 | ||
28 | #include <asm/ppc-pci.h> | 28 | #include <asm/ppc-pci.h> |
29 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
30 | 30 | #include <asm/io-workarounds.h> | |
31 | #include "io-workarounds.h" | ||
32 | 31 | ||
33 | #define SPIDER_PCI_DISABLE_PREFETCH | 32 | #define SPIDER_PCI_DISABLE_PREFETCH |
34 | 33 | ||
diff --git a/arch/powerpc/platforms/cell/spider-pic.c b/arch/powerpc/platforms/cell/spider-pic.c index 5876e888e412..442c28c00f88 100644 --- a/arch/powerpc/platforms/cell/spider-pic.c +++ b/arch/powerpc/platforms/cell/spider-pic.c | |||
@@ -68,9 +68,9 @@ struct spider_pic { | |||
68 | }; | 68 | }; |
69 | static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; | 69 | static struct spider_pic spider_pics[SPIDER_CHIP_COUNT]; |
70 | 70 | ||
71 | static struct spider_pic *spider_virq_to_pic(unsigned int virq) | 71 | static struct spider_pic *spider_irq_data_to_pic(struct irq_data *d) |
72 | { | 72 | { |
73 | return irq_map[virq].host->host_data; | 73 | return irq_data_get_irq_chip_data(d); |
74 | } | 74 | } |
75 | 75 | ||
76 | static void __iomem *spider_get_irq_config(struct spider_pic *pic, | 76 | static void __iomem *spider_get_irq_config(struct spider_pic *pic, |
@@ -79,30 +79,30 @@ static void __iomem *spider_get_irq_config(struct spider_pic *pic, | |||
79 | return pic->regs + TIR_CFGA + 8 * src; | 79 | return pic->regs + TIR_CFGA + 8 * src; |
80 | } | 80 | } |
81 | 81 | ||
82 | static void spider_unmask_irq(unsigned int virq) | 82 | static void spider_unmask_irq(struct irq_data *d) |
83 | { | 83 | { |
84 | struct spider_pic *pic = spider_virq_to_pic(virq); | 84 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
85 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); | 85 | void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); |
86 | 86 | ||
87 | out_be32(cfg, in_be32(cfg) | 0x30000000u); | 87 | out_be32(cfg, in_be32(cfg) | 0x30000000u); |
88 | } | 88 | } |
89 | 89 | ||
90 | static void spider_mask_irq(unsigned int virq) | 90 | static void spider_mask_irq(struct irq_data *d) |
91 | { | 91 | { |
92 | struct spider_pic *pic = spider_virq_to_pic(virq); | 92 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
93 | void __iomem *cfg = spider_get_irq_config(pic, irq_map[virq].hwirq); | 93 | void __iomem *cfg = spider_get_irq_config(pic, irqd_to_hwirq(d)); |
94 | 94 | ||
95 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | 95 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); |
96 | } | 96 | } |
97 | 97 | ||
98 | static void spider_ack_irq(unsigned int virq) | 98 | static void spider_ack_irq(struct irq_data *d) |
99 | { | 99 | { |
100 | struct spider_pic *pic = spider_virq_to_pic(virq); | 100 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
101 | unsigned int src = irq_map[virq].hwirq; | 101 | unsigned int src = irqd_to_hwirq(d); |
102 | 102 | ||
103 | /* Reset edge detection logic if necessary | 103 | /* Reset edge detection logic if necessary |
104 | */ | 104 | */ |
105 | if (irq_to_desc(virq)->status & IRQ_LEVEL) | 105 | if (irqd_is_level_type(d)) |
106 | return; | 106 | return; |
107 | 107 | ||
108 | /* Only interrupts 47 to 50 can be set to edge */ | 108 | /* Only interrupts 47 to 50 can be set to edge */ |
@@ -113,13 +113,12 @@ static void spider_ack_irq(unsigned int virq) | |||
113 | out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); | 113 | out_be32(pic->regs + TIR_EDC, 0x100 | (src & 0xf)); |
114 | } | 114 | } |
115 | 115 | ||
116 | static int spider_set_irq_type(unsigned int virq, unsigned int type) | 116 | static int spider_set_irq_type(struct irq_data *d, unsigned int type) |
117 | { | 117 | { |
118 | unsigned int sense = type & IRQ_TYPE_SENSE_MASK; | 118 | unsigned int sense = type & IRQ_TYPE_SENSE_MASK; |
119 | struct spider_pic *pic = spider_virq_to_pic(virq); | 119 | struct spider_pic *pic = spider_irq_data_to_pic(d); |
120 | unsigned int hw = irq_map[virq].hwirq; | 120 | unsigned int hw = irqd_to_hwirq(d); |
121 | void __iomem *cfg = spider_get_irq_config(pic, hw); | 121 | void __iomem *cfg = spider_get_irq_config(pic, hw); |
122 | struct irq_desc *desc = irq_to_desc(virq); | ||
123 | u32 old_mask; | 122 | u32 old_mask; |
124 | u32 ic; | 123 | u32 ic; |
125 | 124 | ||
@@ -147,12 +146,6 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type) | |||
147 | return -EINVAL; | 146 | return -EINVAL; |
148 | } | 147 | } |
149 | 148 | ||
150 | /* Update irq_desc */ | ||
151 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
152 | desc->status |= type & IRQ_TYPE_SENSE_MASK; | ||
153 | if (type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
154 | desc->status |= IRQ_LEVEL; | ||
155 | |||
156 | /* Configure the source. One gross hack that was there before and | 149 | /* Configure the source. One gross hack that was there before and |
157 | * that I've kept around is the priority to the BE which I set to | 150 | * that I've kept around is the priority to the BE which I set to |
158 | * be the same as the interrupt source number. I don't know wether | 151 | * be the same as the interrupt source number. I don't know wether |
@@ -169,19 +162,20 @@ static int spider_set_irq_type(unsigned int virq, unsigned int type) | |||
169 | 162 | ||
170 | static struct irq_chip spider_pic = { | 163 | static struct irq_chip spider_pic = { |
171 | .name = "SPIDER", | 164 | .name = "SPIDER", |
172 | .unmask = spider_unmask_irq, | 165 | .irq_unmask = spider_unmask_irq, |
173 | .mask = spider_mask_irq, | 166 | .irq_mask = spider_mask_irq, |
174 | .ack = spider_ack_irq, | 167 | .irq_ack = spider_ack_irq, |
175 | .set_type = spider_set_irq_type, | 168 | .irq_set_type = spider_set_irq_type, |
176 | }; | 169 | }; |
177 | 170 | ||
178 | static int spider_host_map(struct irq_host *h, unsigned int virq, | 171 | static int spider_host_map(struct irq_host *h, unsigned int virq, |
179 | irq_hw_number_t hw) | 172 | irq_hw_number_t hw) |
180 | { | 173 | { |
181 | set_irq_chip_and_handler(virq, &spider_pic, handle_level_irq); | 174 | irq_set_chip_data(virq, h->host_data); |
175 | irq_set_chip_and_handler(virq, &spider_pic, handle_level_irq); | ||
182 | 176 | ||
183 | /* Set default irq type */ | 177 | /* Set default irq type */ |
184 | set_irq_type(virq, IRQ_TYPE_NONE); | 178 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
185 | 179 | ||
186 | return 0; | 180 | return 0; |
187 | } | 181 | } |
@@ -207,7 +201,8 @@ static struct irq_host_ops spider_host_ops = { | |||
207 | 201 | ||
208 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) | 202 | static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) |
209 | { | 203 | { |
210 | struct spider_pic *pic = desc->handler_data; | 204 | struct irq_chip *chip = irq_desc_get_chip(desc); |
205 | struct spider_pic *pic = irq_desc_get_handler_data(desc); | ||
211 | unsigned int cs, virq; | 206 | unsigned int cs, virq; |
212 | 207 | ||
213 | cs = in_be32(pic->regs + TIR_CS) >> 24; | 208 | cs = in_be32(pic->regs + TIR_CS) >> 24; |
@@ -215,9 +210,11 @@ static void spider_irq_cascade(unsigned int irq, struct irq_desc *desc) | |||
215 | virq = NO_IRQ; | 210 | virq = NO_IRQ; |
216 | else | 211 | else |
217 | virq = irq_linear_revmap(pic->host, cs); | 212 | virq = irq_linear_revmap(pic->host, cs); |
213 | |||
218 | if (virq != NO_IRQ) | 214 | if (virq != NO_IRQ) |
219 | generic_handle_irq(virq); | 215 | generic_handle_irq(virq); |
220 | desc->chip->eoi(irq); | 216 | |
217 | chip->irq_eoi(&desc->irq_data); | ||
221 | } | 218 | } |
222 | 219 | ||
223 | /* For hooking up the cascace we have a problem. Our device-tree is | 220 | /* For hooking up the cascace we have a problem. Our device-tree is |
@@ -258,8 +255,10 @@ static unsigned int __init spider_find_cascade_and_node(struct spider_pic *pic) | |||
258 | return NO_IRQ; | 255 | return NO_IRQ; |
259 | imap += intsize + 1; | 256 | imap += intsize + 1; |
260 | tmp = of_get_property(iic, "#interrupt-cells", NULL); | 257 | tmp = of_get_property(iic, "#interrupt-cells", NULL); |
261 | if (tmp == NULL) | 258 | if (tmp == NULL) { |
259 | of_node_put(iic); | ||
262 | return NO_IRQ; | 260 | return NO_IRQ; |
261 | } | ||
263 | intsize = *tmp; | 262 | intsize = *tmp; |
264 | /* Assume unit is last entry of interrupt specifier */ | 263 | /* Assume unit is last entry of interrupt specifier */ |
265 | unit = imap[intsize - 1]; | 264 | unit = imap[intsize - 1]; |
@@ -323,8 +322,8 @@ static void __init spider_init_one(struct device_node *of_node, int chip, | |||
323 | virq = spider_find_cascade_and_node(pic); | 322 | virq = spider_find_cascade_and_node(pic); |
324 | if (virq == NO_IRQ) | 323 | if (virq == NO_IRQ) |
325 | return; | 324 | return; |
326 | set_irq_data(virq, pic); | 325 | irq_set_handler_data(virq, pic); |
327 | set_irq_chained_handler(virq, spider_irq_cascade); | 326 | irq_set_chained_handler(virq, spider_irq_cascade); |
328 | 327 | ||
329 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", | 328 | printk(KERN_INFO "spider_pic: node %d, addr: 0x%lx %s\n", |
330 | pic->node_id, addr, of_node->full_name); | 329 | pic->node_id, addr, of_node->full_name); |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c index 8547e86bfb42..3675da73623f 100644 --- a/arch/powerpc/platforms/cell/spu_base.c +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -32,11 +32,13 @@ | |||
32 | #include <linux/io.h> | 32 | #include <linux/io.h> |
33 | #include <linux/mutex.h> | 33 | #include <linux/mutex.h> |
34 | #include <linux/linux_logo.h> | 34 | #include <linux/linux_logo.h> |
35 | #include <linux/syscore_ops.h> | ||
35 | #include <asm/spu.h> | 36 | #include <asm/spu.h> |
36 | #include <asm/spu_priv1.h> | 37 | #include <asm/spu_priv1.h> |
37 | #include <asm/spu_csa.h> | 38 | #include <asm/spu_csa.h> |
38 | #include <asm/xmon.h> | 39 | #include <asm/xmon.h> |
39 | #include <asm/prom.h> | 40 | #include <asm/prom.h> |
41 | #include <asm/kexec.h> | ||
40 | 42 | ||
41 | const struct spu_management_ops *spu_management_ops; | 43 | const struct spu_management_ops *spu_management_ops; |
42 | EXPORT_SYMBOL_GPL(spu_management_ops); | 44 | EXPORT_SYMBOL_GPL(spu_management_ops); |
@@ -520,18 +522,8 @@ void spu_init_channels(struct spu *spu) | |||
520 | } | 522 | } |
521 | EXPORT_SYMBOL_GPL(spu_init_channels); | 523 | EXPORT_SYMBOL_GPL(spu_init_channels); |
522 | 524 | ||
523 | static int spu_shutdown(struct sys_device *sysdev) | ||
524 | { | ||
525 | struct spu *spu = container_of(sysdev, struct spu, sysdev); | ||
526 | |||
527 | spu_free_irqs(spu); | ||
528 | spu_destroy_spu(spu); | ||
529 | return 0; | ||
530 | } | ||
531 | |||
532 | static struct sysdev_class spu_sysdev_class = { | 525 | static struct sysdev_class spu_sysdev_class = { |
533 | .name = "spu", | 526 | .name = "spu", |
534 | .shutdown = spu_shutdown, | ||
535 | }; | 527 | }; |
536 | 528 | ||
537 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) | 529 | int spu_add_sysdev_attr(struct sysdev_attribute *attr) |
@@ -727,6 +719,91 @@ static ssize_t spu_stat_show(struct sys_device *sysdev, | |||
727 | 719 | ||
728 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); | 720 | static SYSDEV_ATTR(stat, 0644, spu_stat_show, NULL); |
729 | 721 | ||
722 | #ifdef CONFIG_KEXEC | ||
723 | |||
724 | struct crash_spu_info { | ||
725 | struct spu *spu; | ||
726 | u32 saved_spu_runcntl_RW; | ||
727 | u32 saved_spu_status_R; | ||
728 | u32 saved_spu_npc_RW; | ||
729 | u64 saved_mfc_sr1_RW; | ||
730 | u64 saved_mfc_dar; | ||
731 | u64 saved_mfc_dsisr; | ||
732 | }; | ||
733 | |||
734 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ | ||
735 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; | ||
736 | |||
737 | static void crash_kexec_stop_spus(void) | ||
738 | { | ||
739 | struct spu *spu; | ||
740 | int i; | ||
741 | u64 tmp; | ||
742 | |||
743 | for (i = 0; i < CRASH_NUM_SPUS; i++) { | ||
744 | if (!crash_spu_info[i].spu) | ||
745 | continue; | ||
746 | |||
747 | spu = crash_spu_info[i].spu; | ||
748 | |||
749 | crash_spu_info[i].saved_spu_runcntl_RW = | ||
750 | in_be32(&spu->problem->spu_runcntl_RW); | ||
751 | crash_spu_info[i].saved_spu_status_R = | ||
752 | in_be32(&spu->problem->spu_status_R); | ||
753 | crash_spu_info[i].saved_spu_npc_RW = | ||
754 | in_be32(&spu->problem->spu_npc_RW); | ||
755 | |||
756 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); | ||
757 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); | ||
758 | tmp = spu_mfc_sr1_get(spu); | ||
759 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; | ||
760 | |||
761 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; | ||
762 | spu_mfc_sr1_set(spu, tmp); | ||
763 | |||
764 | __delay(200); | ||
765 | } | ||
766 | } | ||
767 | |||
768 | static void crash_register_spus(struct list_head *list) | ||
769 | { | ||
770 | struct spu *spu; | ||
771 | int ret; | ||
772 | |||
773 | list_for_each_entry(spu, list, full_list) { | ||
774 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) | ||
775 | continue; | ||
776 | |||
777 | crash_spu_info[spu->number].spu = spu; | ||
778 | } | ||
779 | |||
780 | ret = crash_shutdown_register(&crash_kexec_stop_spus); | ||
781 | if (ret) | ||
782 | printk(KERN_ERR "Could not register SPU crash handler"); | ||
783 | } | ||
784 | |||
785 | #else | ||
786 | static inline void crash_register_spus(struct list_head *list) | ||
787 | { | ||
788 | } | ||
789 | #endif | ||
790 | |||
791 | static void spu_shutdown(void) | ||
792 | { | ||
793 | struct spu *spu; | ||
794 | |||
795 | mutex_lock(&spu_full_list_mutex); | ||
796 | list_for_each_entry(spu, &spu_full_list, full_list) { | ||
797 | spu_free_irqs(spu); | ||
798 | spu_destroy_spu(spu); | ||
799 | } | ||
800 | mutex_unlock(&spu_full_list_mutex); | ||
801 | } | ||
802 | |||
803 | static struct syscore_ops spu_syscore_ops = { | ||
804 | .shutdown = spu_shutdown, | ||
805 | }; | ||
806 | |||
730 | static int __init init_spu_base(void) | 807 | static int __init init_spu_base(void) |
731 | { | 808 | { |
732 | int i, ret = 0; | 809 | int i, ret = 0; |
@@ -760,6 +837,7 @@ static int __init init_spu_base(void) | |||
760 | crash_register_spus(&spu_full_list); | 837 | crash_register_spus(&spu_full_list); |
761 | mutex_unlock(&spu_full_list_mutex); | 838 | mutex_unlock(&spu_full_list_mutex); |
762 | spu_add_sysdev_attr(&attr_stat); | 839 | spu_add_sysdev_attr(&attr_stat); |
840 | register_syscore_ops(&spu_syscore_ops); | ||
763 | 841 | ||
764 | spu_init_affinity(); | 842 | spu_init_affinity(); |
765 | 843 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c index 1a40da92154c..3c7c3f82d842 100644 --- a/arch/powerpc/platforms/cell/spufs/file.c +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -154,6 +154,7 @@ static const struct file_operations __fops = { \ | |||
154 | .release = spufs_attr_release, \ | 154 | .release = spufs_attr_release, \ |
155 | .read = spufs_attr_read, \ | 155 | .read = spufs_attr_read, \ |
156 | .write = spufs_attr_write, \ | 156 | .write = spufs_attr_write, \ |
157 | .llseek = generic_file_llseek, \ | ||
157 | }; | 158 | }; |
158 | 159 | ||
159 | 160 | ||
@@ -218,24 +219,17 @@ spufs_mem_write(struct file *file, const char __user *buffer, | |||
218 | loff_t pos = *ppos; | 219 | loff_t pos = *ppos; |
219 | int ret; | 220 | int ret; |
220 | 221 | ||
221 | if (pos < 0) | ||
222 | return -EINVAL; | ||
223 | if (pos > LS_SIZE) | 222 | if (pos > LS_SIZE) |
224 | return -EFBIG; | 223 | return -EFBIG; |
225 | if (size > LS_SIZE - pos) | ||
226 | size = LS_SIZE - pos; | ||
227 | 224 | ||
228 | ret = spu_acquire(ctx); | 225 | ret = spu_acquire(ctx); |
229 | if (ret) | 226 | if (ret) |
230 | return ret; | 227 | return ret; |
231 | 228 | ||
232 | local_store = ctx->ops->get_ls(ctx); | 229 | local_store = ctx->ops->get_ls(ctx); |
233 | ret = copy_from_user(local_store + pos, buffer, size); | 230 | size = simple_write_to_buffer(local_store, LS_SIZE, ppos, buffer, size); |
234 | spu_release(ctx); | 231 | spu_release(ctx); |
235 | 232 | ||
236 | if (ret) | ||
237 | return -EFAULT; | ||
238 | *ppos = pos + size; | ||
239 | return size; | 233 | return size; |
240 | } | 234 | } |
241 | 235 | ||
@@ -521,6 +515,7 @@ static const struct file_operations spufs_cntl_fops = { | |||
521 | .release = spufs_cntl_release, | 515 | .release = spufs_cntl_release, |
522 | .read = simple_attr_read, | 516 | .read = simple_attr_read, |
523 | .write = simple_attr_write, | 517 | .write = simple_attr_write, |
518 | .llseek = generic_file_llseek, | ||
524 | .mmap = spufs_cntl_mmap, | 519 | .mmap = spufs_cntl_mmap, |
525 | }; | 520 | }; |
526 | 521 | ||
@@ -572,18 +567,15 @@ spufs_regs_write(struct file *file, const char __user *buffer, | |||
572 | if (*pos >= sizeof(lscsa->gprs)) | 567 | if (*pos >= sizeof(lscsa->gprs)) |
573 | return -EFBIG; | 568 | return -EFBIG; |
574 | 569 | ||
575 | size = min_t(ssize_t, sizeof(lscsa->gprs) - *pos, size); | ||
576 | *pos += size; | ||
577 | |||
578 | ret = spu_acquire_saved(ctx); | 570 | ret = spu_acquire_saved(ctx); |
579 | if (ret) | 571 | if (ret) |
580 | return ret; | 572 | return ret; |
581 | 573 | ||
582 | ret = copy_from_user((char *)lscsa->gprs + *pos - size, | 574 | size = simple_write_to_buffer(lscsa->gprs, sizeof(lscsa->gprs), pos, |
583 | buffer, size) ? -EFAULT : size; | 575 | buffer, size); |
584 | 576 | ||
585 | spu_release_saved(ctx); | 577 | spu_release_saved(ctx); |
586 | return ret; | 578 | return size; |
587 | } | 579 | } |
588 | 580 | ||
589 | static const struct file_operations spufs_regs_fops = { | 581 | static const struct file_operations spufs_regs_fops = { |
@@ -628,18 +620,15 @@ spufs_fpcr_write(struct file *file, const char __user * buffer, | |||
628 | if (*pos >= sizeof(lscsa->fpcr)) | 620 | if (*pos >= sizeof(lscsa->fpcr)) |
629 | return -EFBIG; | 621 | return -EFBIG; |
630 | 622 | ||
631 | size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); | ||
632 | |||
633 | ret = spu_acquire_saved(ctx); | 623 | ret = spu_acquire_saved(ctx); |
634 | if (ret) | 624 | if (ret) |
635 | return ret; | 625 | return ret; |
636 | 626 | ||
637 | *pos += size; | 627 | size = simple_write_to_buffer(&lscsa->fpcr, sizeof(lscsa->fpcr), pos, |
638 | ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, | 628 | buffer, size); |
639 | buffer, size) ? -EFAULT : size; | ||
640 | 629 | ||
641 | spu_release_saved(ctx); | 630 | spu_release_saved(ctx); |
642 | return ret; | 631 | return size; |
643 | } | 632 | } |
644 | 633 | ||
645 | static const struct file_operations spufs_fpcr_fops = { | 634 | static const struct file_operations spufs_fpcr_fops = { |
@@ -714,6 +703,7 @@ static ssize_t spufs_mbox_read(struct file *file, char __user *buf, | |||
714 | static const struct file_operations spufs_mbox_fops = { | 703 | static const struct file_operations spufs_mbox_fops = { |
715 | .open = spufs_pipe_open, | 704 | .open = spufs_pipe_open, |
716 | .read = spufs_mbox_read, | 705 | .read = spufs_mbox_read, |
706 | .llseek = no_llseek, | ||
717 | }; | 707 | }; |
718 | 708 | ||
719 | static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, | 709 | static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, |
@@ -743,6 +733,7 @@ static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, | |||
743 | static const struct file_operations spufs_mbox_stat_fops = { | 733 | static const struct file_operations spufs_mbox_stat_fops = { |
744 | .open = spufs_pipe_open, | 734 | .open = spufs_pipe_open, |
745 | .read = spufs_mbox_stat_read, | 735 | .read = spufs_mbox_stat_read, |
736 | .llseek = no_llseek, | ||
746 | }; | 737 | }; |
747 | 738 | ||
748 | /* low-level ibox access function */ | 739 | /* low-level ibox access function */ |
@@ -863,6 +854,7 @@ static const struct file_operations spufs_ibox_fops = { | |||
863 | .read = spufs_ibox_read, | 854 | .read = spufs_ibox_read, |
864 | .poll = spufs_ibox_poll, | 855 | .poll = spufs_ibox_poll, |
865 | .fasync = spufs_ibox_fasync, | 856 | .fasync = spufs_ibox_fasync, |
857 | .llseek = no_llseek, | ||
866 | }; | 858 | }; |
867 | 859 | ||
868 | static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, | 860 | static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, |
@@ -890,6 +882,7 @@ static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, | |||
890 | static const struct file_operations spufs_ibox_stat_fops = { | 882 | static const struct file_operations spufs_ibox_stat_fops = { |
891 | .open = spufs_pipe_open, | 883 | .open = spufs_pipe_open, |
892 | .read = spufs_ibox_stat_read, | 884 | .read = spufs_ibox_stat_read, |
885 | .llseek = no_llseek, | ||
893 | }; | 886 | }; |
894 | 887 | ||
895 | /* low-level mailbox write */ | 888 | /* low-level mailbox write */ |
@@ -1011,6 +1004,7 @@ static const struct file_operations spufs_wbox_fops = { | |||
1011 | .write = spufs_wbox_write, | 1004 | .write = spufs_wbox_write, |
1012 | .poll = spufs_wbox_poll, | 1005 | .poll = spufs_wbox_poll, |
1013 | .fasync = spufs_wbox_fasync, | 1006 | .fasync = spufs_wbox_fasync, |
1007 | .llseek = no_llseek, | ||
1014 | }; | 1008 | }; |
1015 | 1009 | ||
1016 | static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, | 1010 | static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, |
@@ -1038,6 +1032,7 @@ static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, | |||
1038 | static const struct file_operations spufs_wbox_stat_fops = { | 1032 | static const struct file_operations spufs_wbox_stat_fops = { |
1039 | .open = spufs_pipe_open, | 1033 | .open = spufs_pipe_open, |
1040 | .read = spufs_wbox_stat_read, | 1034 | .read = spufs_wbox_stat_read, |
1035 | .llseek = no_llseek, | ||
1041 | }; | 1036 | }; |
1042 | 1037 | ||
1043 | static int spufs_signal1_open(struct inode *inode, struct file *file) | 1038 | static int spufs_signal1_open(struct inode *inode, struct file *file) |
@@ -1166,6 +1161,7 @@ static const struct file_operations spufs_signal1_fops = { | |||
1166 | .read = spufs_signal1_read, | 1161 | .read = spufs_signal1_read, |
1167 | .write = spufs_signal1_write, | 1162 | .write = spufs_signal1_write, |
1168 | .mmap = spufs_signal1_mmap, | 1163 | .mmap = spufs_signal1_mmap, |
1164 | .llseek = no_llseek, | ||
1169 | }; | 1165 | }; |
1170 | 1166 | ||
1171 | static const struct file_operations spufs_signal1_nosched_fops = { | 1167 | static const struct file_operations spufs_signal1_nosched_fops = { |
@@ -1173,6 +1169,7 @@ static const struct file_operations spufs_signal1_nosched_fops = { | |||
1173 | .release = spufs_signal1_release, | 1169 | .release = spufs_signal1_release, |
1174 | .write = spufs_signal1_write, | 1170 | .write = spufs_signal1_write, |
1175 | .mmap = spufs_signal1_mmap, | 1171 | .mmap = spufs_signal1_mmap, |
1172 | .llseek = no_llseek, | ||
1176 | }; | 1173 | }; |
1177 | 1174 | ||
1178 | static int spufs_signal2_open(struct inode *inode, struct file *file) | 1175 | static int spufs_signal2_open(struct inode *inode, struct file *file) |
@@ -1305,6 +1302,7 @@ static const struct file_operations spufs_signal2_fops = { | |||
1305 | .read = spufs_signal2_read, | 1302 | .read = spufs_signal2_read, |
1306 | .write = spufs_signal2_write, | 1303 | .write = spufs_signal2_write, |
1307 | .mmap = spufs_signal2_mmap, | 1304 | .mmap = spufs_signal2_mmap, |
1305 | .llseek = no_llseek, | ||
1308 | }; | 1306 | }; |
1309 | 1307 | ||
1310 | static const struct file_operations spufs_signal2_nosched_fops = { | 1308 | static const struct file_operations spufs_signal2_nosched_fops = { |
@@ -1312,6 +1310,7 @@ static const struct file_operations spufs_signal2_nosched_fops = { | |||
1312 | .release = spufs_signal2_release, | 1310 | .release = spufs_signal2_release, |
1313 | .write = spufs_signal2_write, | 1311 | .write = spufs_signal2_write, |
1314 | .mmap = spufs_signal2_mmap, | 1312 | .mmap = spufs_signal2_mmap, |
1313 | .llseek = no_llseek, | ||
1315 | }; | 1314 | }; |
1316 | 1315 | ||
1317 | /* | 1316 | /* |
@@ -1451,6 +1450,7 @@ static const struct file_operations spufs_mss_fops = { | |||
1451 | .open = spufs_mss_open, | 1450 | .open = spufs_mss_open, |
1452 | .release = spufs_mss_release, | 1451 | .release = spufs_mss_release, |
1453 | .mmap = spufs_mss_mmap, | 1452 | .mmap = spufs_mss_mmap, |
1453 | .llseek = no_llseek, | ||
1454 | }; | 1454 | }; |
1455 | 1455 | ||
1456 | static int | 1456 | static int |
@@ -1508,6 +1508,7 @@ static const struct file_operations spufs_psmap_fops = { | |||
1508 | .open = spufs_psmap_open, | 1508 | .open = spufs_psmap_open, |
1509 | .release = spufs_psmap_release, | 1509 | .release = spufs_psmap_release, |
1510 | .mmap = spufs_psmap_mmap, | 1510 | .mmap = spufs_psmap_mmap, |
1511 | .llseek = no_llseek, | ||
1511 | }; | 1512 | }; |
1512 | 1513 | ||
1513 | 1514 | ||
@@ -1871,6 +1872,7 @@ static const struct file_operations spufs_mfc_fops = { | |||
1871 | .fsync = spufs_mfc_fsync, | 1872 | .fsync = spufs_mfc_fsync, |
1872 | .fasync = spufs_mfc_fasync, | 1873 | .fasync = spufs_mfc_fasync, |
1873 | .mmap = spufs_mfc_mmap, | 1874 | .mmap = spufs_mfc_mmap, |
1875 | .llseek = no_llseek, | ||
1874 | }; | 1876 | }; |
1875 | 1877 | ||
1876 | static int spufs_npc_set(void *data, u64 val) | 1878 | static int spufs_npc_set(void *data, u64 val) |
@@ -2246,6 +2248,7 @@ static ssize_t spufs_dma_info_read(struct file *file, char __user *buf, | |||
2246 | static const struct file_operations spufs_dma_info_fops = { | 2248 | static const struct file_operations spufs_dma_info_fops = { |
2247 | .open = spufs_info_open, | 2249 | .open = spufs_info_open, |
2248 | .read = spufs_dma_info_read, | 2250 | .read = spufs_dma_info_read, |
2251 | .llseek = no_llseek, | ||
2249 | }; | 2252 | }; |
2250 | 2253 | ||
2251 | static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, | 2254 | static ssize_t __spufs_proxydma_info_read(struct spu_context *ctx, |
@@ -2299,6 +2302,7 @@ static ssize_t spufs_proxydma_info_read(struct file *file, char __user *buf, | |||
2299 | static const struct file_operations spufs_proxydma_info_fops = { | 2302 | static const struct file_operations spufs_proxydma_info_fops = { |
2300 | .open = spufs_info_open, | 2303 | .open = spufs_info_open, |
2301 | .read = spufs_proxydma_info_read, | 2304 | .read = spufs_proxydma_info_read, |
2305 | .llseek = no_llseek, | ||
2302 | }; | 2306 | }; |
2303 | 2307 | ||
2304 | static int spufs_show_tid(struct seq_file *s, void *private) | 2308 | static int spufs_show_tid(struct seq_file *s, void *private) |
@@ -2585,6 +2589,7 @@ static const struct file_operations spufs_switch_log_fops = { | |||
2585 | .read = spufs_switch_log_read, | 2589 | .read = spufs_switch_log_read, |
2586 | .poll = spufs_switch_log_poll, | 2590 | .poll = spufs_switch_log_poll, |
2587 | .release = spufs_switch_log_release, | 2591 | .release = spufs_switch_log_release, |
2592 | .llseek = no_llseek, | ||
2588 | }; | 2593 | }; |
2589 | 2594 | ||
2590 | /** | 2595 | /** |
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 5dec408d6703..856e9c398068 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -71,12 +71,18 @@ spufs_alloc_inode(struct super_block *sb) | |||
71 | return &ei->vfs_inode; | 71 | return &ei->vfs_inode; |
72 | } | 72 | } |
73 | 73 | ||
74 | static void | 74 | static void spufs_i_callback(struct rcu_head *head) |
75 | spufs_destroy_inode(struct inode *inode) | ||
76 | { | 75 | { |
76 | struct inode *inode = container_of(head, struct inode, i_rcu); | ||
77 | INIT_LIST_HEAD(&inode->i_dentry); | ||
77 | kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); | 78 | kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); |
78 | } | 79 | } |
79 | 80 | ||
81 | static void spufs_destroy_inode(struct inode *inode) | ||
82 | { | ||
83 | call_rcu(&inode->i_rcu, spufs_i_callback); | ||
84 | } | ||
85 | |||
80 | static void | 86 | static void |
81 | spufs_init_once(void *p) | 87 | spufs_init_once(void *p) |
82 | { | 88 | { |
@@ -159,18 +165,18 @@ static void spufs_prune_dir(struct dentry *dir) | |||
159 | 165 | ||
160 | mutex_lock(&dir->d_inode->i_mutex); | 166 | mutex_lock(&dir->d_inode->i_mutex); |
161 | list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { | 167 | list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_u.d_child) { |
162 | spin_lock(&dcache_lock); | ||
163 | spin_lock(&dentry->d_lock); | 168 | spin_lock(&dentry->d_lock); |
164 | if (!(d_unhashed(dentry)) && dentry->d_inode) { | 169 | if (!(d_unhashed(dentry)) && dentry->d_inode) { |
165 | dget_locked(dentry); | 170 | dget_dlock(dentry); |
166 | __d_drop(dentry); | 171 | __d_drop(dentry); |
167 | spin_unlock(&dentry->d_lock); | 172 | spin_unlock(&dentry->d_lock); |
168 | simple_unlink(dir->d_inode, dentry); | 173 | simple_unlink(dir->d_inode, dentry); |
169 | spin_unlock(&dcache_lock); | 174 | /* XXX: what was dcache_lock protecting here? Other |
175 | * filesystems (IB, configfs) release dcache_lock | ||
176 | * before unlink */ | ||
170 | dput(dentry); | 177 | dput(dentry); |
171 | } else { | 178 | } else { |
172 | spin_unlock(&dentry->d_lock); | 179 | spin_unlock(&dentry->d_lock); |
173 | spin_unlock(&dcache_lock); | ||
174 | } | 180 | } |
175 | } | 181 | } |
176 | shrink_dcache_parent(dir); | 182 | shrink_dcache_parent(dir); |
@@ -798,17 +804,17 @@ spufs_fill_super(struct super_block *sb, void *data, int silent) | |||
798 | return spufs_create_root(sb, data); | 804 | return spufs_create_root(sb, data); |
799 | } | 805 | } |
800 | 806 | ||
801 | static int | 807 | static struct dentry * |
802 | spufs_get_sb(struct file_system_type *fstype, int flags, | 808 | spufs_mount(struct file_system_type *fstype, int flags, |
803 | const char *name, void *data, struct vfsmount *mnt) | 809 | const char *name, void *data) |
804 | { | 810 | { |
805 | return get_sb_single(fstype, flags, data, spufs_fill_super, mnt); | 811 | return mount_single(fstype, flags, data, spufs_fill_super); |
806 | } | 812 | } |
807 | 813 | ||
808 | static struct file_system_type spufs_type = { | 814 | static struct file_system_type spufs_type = { |
809 | .owner = THIS_MODULE, | 815 | .owner = THIS_MODULE, |
810 | .name = "spufs", | 816 | .name = "spufs", |
811 | .get_sb = spufs_get_sb, | 817 | .mount = spufs_mount, |
812 | .kill_sb = kill_litter_super, | 818 | .kill_sb = kill_litter_super, |
813 | }; | 819 | }; |
814 | 820 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c index a101abf17504..147069938cfe 100644 --- a/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c +++ b/arch/powerpc/platforms/cell/spufs/lscsa_alloc.c | |||
@@ -36,10 +36,9 @@ static int spu_alloc_lscsa_std(struct spu_state *csa) | |||
36 | struct spu_lscsa *lscsa; | 36 | struct spu_lscsa *lscsa; |
37 | unsigned char *p; | 37 | unsigned char *p; |
38 | 38 | ||
39 | lscsa = vmalloc(sizeof(struct spu_lscsa)); | 39 | lscsa = vzalloc(sizeof(struct spu_lscsa)); |
40 | if (!lscsa) | 40 | if (!lscsa) |
41 | return -ENOMEM; | 41 | return -ENOMEM; |
42 | memset(lscsa, 0, sizeof(struct spu_lscsa)); | ||
43 | csa->lscsa = lscsa; | 42 | csa->lscsa = lscsa; |
44 | 43 | ||
45 | /* Set LS pages reserved to allow for user-space mapping. */ | 44 | /* Set LS pages reserved to allow for user-space mapping. */ |
@@ -91,7 +90,7 @@ int spu_alloc_lscsa(struct spu_state *csa) | |||
91 | */ | 90 | */ |
92 | for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { | 91 | for (i = 0; i < SPU_LSCSA_NUM_BIG_PAGES; i++) { |
93 | /* XXX This is likely to fail, we should use a special pool | 92 | /* XXX This is likely to fail, we should use a special pool |
94 | * similiar to what hugetlbfs does. | 93 | * similar to what hugetlbfs does. |
95 | */ | 94 | */ |
96 | csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, | 95 | csa->lscsa_pages[i] = alloc_pages(GFP_KERNEL, |
97 | SPU_64K_PAGE_ORDER); | 96 | SPU_64K_PAGE_ORDER); |
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c index 0b0466284932..32cb4e66d2cd 100644 --- a/arch/powerpc/platforms/cell/spufs/sched.c +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -141,7 +141,7 @@ void __spu_update_sched_info(struct spu_context *ctx) | |||
141 | * runqueue. The context will be rescheduled on the proper node | 141 | * runqueue. The context will be rescheduled on the proper node |
142 | * if it is timesliced or preempted. | 142 | * if it is timesliced or preempted. |
143 | */ | 143 | */ |
144 | ctx->cpus_allowed = current->cpus_allowed; | 144 | cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current)); |
145 | 145 | ||
146 | /* Save the current cpu id for spu interrupt routing. */ | 146 | /* Save the current cpu id for spu interrupt routing. */ |
147 | ctx->last_ran = raw_smp_processor_id(); | 147 | ctx->last_ran = raw_smp_processor_id(); |
@@ -846,7 +846,7 @@ static struct spu_context *grab_runnable_context(int prio, int node) | |||
846 | struct list_head *rq = &spu_prio->runq[best]; | 846 | struct list_head *rq = &spu_prio->runq[best]; |
847 | 847 | ||
848 | list_for_each_entry(ctx, rq, rq) { | 848 | list_for_each_entry(ctx, rq, rq) { |
849 | /* XXX(hch): check for affinity here aswell */ | 849 | /* XXX(hch): check for affinity here as well */ |
850 | if (__node_allowed(ctx, node)) { | 850 | if (__node_allowed(ctx, node)) { |
851 | __spu_del_from_rq(ctx); | 851 | __spu_del_from_rq(ctx); |
852 | goto found; | 852 | goto found; |
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c index 21a9c952d88b..72c905f1ee7a 100644 --- a/arch/powerpc/platforms/cell/spufs/spu_restore.c +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c | |||
@@ -284,7 +284,7 @@ static inline void restore_complete(void) | |||
284 | exit_instrs[3] = BR_INSTR; | 284 | exit_instrs[3] = BR_INSTR; |
285 | break; | 285 | break; |
286 | default: | 286 | default: |
287 | /* SPU_Status[R]=1. No additonal instructions. */ | 287 | /* SPU_Status[R]=1. No additional instructions. */ |
288 | break; | 288 | break; |
289 | } | 289 | } |
290 | spu_sync(); | 290 | spu_sync(); |
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c index 187a7d32f86a..a3d2ce54ea2e 100644 --- a/arch/powerpc/platforms/cell/spufs/syscalls.c +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c | |||
@@ -70,7 +70,7 @@ static long do_spu_create(const char __user *pathname, unsigned int flags, | |||
70 | if (!IS_ERR(tmp)) { | 70 | if (!IS_ERR(tmp)) { |
71 | struct nameidata nd; | 71 | struct nameidata nd; |
72 | 72 | ||
73 | ret = path_lookup(tmp, LOOKUP_PARENT, &nd); | 73 | ret = kern_path_parent(tmp, &nd); |
74 | if (!ret) { | 74 | if (!ret) { |
75 | nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; | 75 | nd.flags |= LOOKUP_OPEN | LOOKUP_CREATE; |
76 | ret = spufs_create(&nd, flags, mode, neighbor); | 76 | ret = spufs_create(&nd, flags, mode, neighbor); |
diff --git a/arch/powerpc/platforms/chrp/nvram.c b/arch/powerpc/platforms/chrp/nvram.c index ba3588f2d8e0..d3ceff04ffc7 100644 --- a/arch/powerpc/platforms/chrp/nvram.c +++ b/arch/powerpc/platforms/chrp/nvram.c | |||
@@ -74,8 +74,10 @@ void __init chrp_nvram_init(void) | |||
74 | return; | 74 | return; |
75 | 75 | ||
76 | nbytes_p = of_get_property(nvram, "#bytes", &proplen); | 76 | nbytes_p = of_get_property(nvram, "#bytes", &proplen); |
77 | if (nbytes_p == NULL || proplen != sizeof(unsigned int)) | 77 | if (nbytes_p == NULL || proplen != sizeof(unsigned int)) { |
78 | of_node_put(nvram); | ||
78 | return; | 79 | return; |
80 | } | ||
79 | 81 | ||
80 | nvram_size = *nbytes_p; | 82 | nvram_size = *nbytes_p; |
81 | 83 | ||
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index 8553cc49e0d6..122786498419 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -365,10 +365,13 @@ void __init chrp_setup_arch(void) | |||
365 | 365 | ||
366 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) | 366 | static void chrp_8259_cascade(unsigned int irq, struct irq_desc *desc) |
367 | { | 367 | { |
368 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
368 | unsigned int cascade_irq = i8259_irq(); | 369 | unsigned int cascade_irq = i8259_irq(); |
370 | |||
369 | if (cascade_irq != NO_IRQ) | 371 | if (cascade_irq != NO_IRQ) |
370 | generic_handle_irq(cascade_irq); | 372 | generic_handle_irq(cascade_irq); |
371 | desc->chip->eoi(irq); | 373 | |
374 | chip->irq_eoi(&desc->irq_data); | ||
372 | } | 375 | } |
373 | 376 | ||
374 | /* | 377 | /* |
@@ -514,7 +517,7 @@ static void __init chrp_find_8259(void) | |||
514 | if (cascade_irq == NO_IRQ) | 517 | if (cascade_irq == NO_IRQ) |
515 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); | 518 | printk(KERN_ERR "i8259: failed to map cascade irq\n"); |
516 | else | 519 | else |
517 | set_irq_chained_handler(cascade_irq, | 520 | irq_set_chained_handler(cascade_irq, |
518 | chrp_8259_cascade); | 521 | chrp_8259_cascade); |
519 | } | 522 | } |
520 | } | 523 | } |
diff --git a/arch/powerpc/platforms/chrp/smp.c b/arch/powerpc/platforms/chrp/smp.c index 02cafecc90e3..a800122e4dda 100644 --- a/arch/powerpc/platforms/chrp/smp.c +++ b/arch/powerpc/platforms/chrp/smp.c | |||
@@ -30,10 +30,12 @@ | |||
30 | #include <asm/mpic.h> | 30 | #include <asm/mpic.h> |
31 | #include <asm/rtas.h> | 31 | #include <asm/rtas.h> |
32 | 32 | ||
33 | static void __devinit smp_chrp_kick_cpu(int nr) | 33 | static int __devinit smp_chrp_kick_cpu(int nr) |
34 | { | 34 | { |
35 | *(unsigned long *)KERNELBASE = nr; | 35 | *(unsigned long *)KERNELBASE = nr; |
36 | asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); | 36 | asm volatile("dcbf 0,%0"::"r"(KERNELBASE):"memory"); |
37 | |||
38 | return 0; | ||
37 | } | 39 | } |
38 | 40 | ||
39 | static void __devinit smp_chrp_setup_cpu(int cpu_nr) | 41 | static void __devinit smp_chrp_setup_cpu(int cpu_nr) |
diff --git a/arch/powerpc/platforms/chrp/time.c b/arch/powerpc/platforms/chrp/time.c index 054dfe5b8e77..f803f4b8ab6f 100644 --- a/arch/powerpc/platforms/chrp/time.c +++ b/arch/powerpc/platforms/chrp/time.c | |||
@@ -29,6 +29,10 @@ | |||
29 | 29 | ||
30 | extern spinlock_t rtc_lock; | 30 | extern spinlock_t rtc_lock; |
31 | 31 | ||
32 | #define NVRAM_AS0 0x74 | ||
33 | #define NVRAM_AS1 0x75 | ||
34 | #define NVRAM_DATA 0x77 | ||
35 | |||
32 | static int nvram_as1 = NVRAM_AS1; | 36 | static int nvram_as1 = NVRAM_AS1; |
33 | static int nvram_as0 = NVRAM_AS0; | 37 | static int nvram_as0 = NVRAM_AS0; |
34 | static int nvram_data = NVRAM_DATA; | 38 | static int nvram_data = NVRAM_DATA; |
diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c index c278bd3a8fec..f61a2dd96b99 100644 --- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c +++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c | |||
@@ -46,10 +46,10 @@ | |||
46 | * | 46 | * |
47 | */ | 47 | */ |
48 | 48 | ||
49 | static void flipper_pic_mask_and_ack(unsigned int virq) | 49 | static void flipper_pic_mask_and_ack(struct irq_data *d) |
50 | { | 50 | { |
51 | int irq = virq_to_hw(virq); | 51 | int irq = irqd_to_hwirq(d); |
52 | void __iomem *io_base = get_irq_chip_data(virq); | 52 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
53 | u32 mask = 1 << irq; | 53 | u32 mask = 1 << irq; |
54 | 54 | ||
55 | clrbits32(io_base + FLIPPER_IMR, mask); | 55 | clrbits32(io_base + FLIPPER_IMR, mask); |
@@ -57,27 +57,27 @@ static void flipper_pic_mask_and_ack(unsigned int virq) | |||
57 | out_be32(io_base + FLIPPER_ICR, mask); | 57 | out_be32(io_base + FLIPPER_ICR, mask); |
58 | } | 58 | } |
59 | 59 | ||
60 | static void flipper_pic_ack(unsigned int virq) | 60 | static void flipper_pic_ack(struct irq_data *d) |
61 | { | 61 | { |
62 | int irq = virq_to_hw(virq); | 62 | int irq = irqd_to_hwirq(d); |
63 | void __iomem *io_base = get_irq_chip_data(virq); | 63 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
64 | 64 | ||
65 | /* this is at least needed for RSW */ | 65 | /* this is at least needed for RSW */ |
66 | out_be32(io_base + FLIPPER_ICR, 1 << irq); | 66 | out_be32(io_base + FLIPPER_ICR, 1 << irq); |
67 | } | 67 | } |
68 | 68 | ||
69 | static void flipper_pic_mask(unsigned int virq) | 69 | static void flipper_pic_mask(struct irq_data *d) |
70 | { | 70 | { |
71 | int irq = virq_to_hw(virq); | 71 | int irq = irqd_to_hwirq(d); |
72 | void __iomem *io_base = get_irq_chip_data(virq); | 72 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
73 | 73 | ||
74 | clrbits32(io_base + FLIPPER_IMR, 1 << irq); | 74 | clrbits32(io_base + FLIPPER_IMR, 1 << irq); |
75 | } | 75 | } |
76 | 76 | ||
77 | static void flipper_pic_unmask(unsigned int virq) | 77 | static void flipper_pic_unmask(struct irq_data *d) |
78 | { | 78 | { |
79 | int irq = virq_to_hw(virq); | 79 | int irq = irqd_to_hwirq(d); |
80 | void __iomem *io_base = get_irq_chip_data(virq); | 80 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
81 | 81 | ||
82 | setbits32(io_base + FLIPPER_IMR, 1 << irq); | 82 | setbits32(io_base + FLIPPER_IMR, 1 << irq); |
83 | } | 83 | } |
@@ -85,10 +85,10 @@ static void flipper_pic_unmask(unsigned int virq) | |||
85 | 85 | ||
86 | static struct irq_chip flipper_pic = { | 86 | static struct irq_chip flipper_pic = { |
87 | .name = "flipper-pic", | 87 | .name = "flipper-pic", |
88 | .ack = flipper_pic_ack, | 88 | .irq_ack = flipper_pic_ack, |
89 | .mask_ack = flipper_pic_mask_and_ack, | 89 | .irq_mask_ack = flipper_pic_mask_and_ack, |
90 | .mask = flipper_pic_mask, | 90 | .irq_mask = flipper_pic_mask, |
91 | .unmask = flipper_pic_unmask, | 91 | .irq_unmask = flipper_pic_unmask, |
92 | }; | 92 | }; |
93 | 93 | ||
94 | /* | 94 | /* |
@@ -101,18 +101,12 @@ static struct irq_host *flipper_irq_host; | |||
101 | static int flipper_pic_map(struct irq_host *h, unsigned int virq, | 101 | static int flipper_pic_map(struct irq_host *h, unsigned int virq, |
102 | irq_hw_number_t hwirq) | 102 | irq_hw_number_t hwirq) |
103 | { | 103 | { |
104 | set_irq_chip_data(virq, h->host_data); | 104 | irq_set_chip_data(virq, h->host_data); |
105 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 105 | irq_set_status_flags(virq, IRQ_LEVEL); |
106 | set_irq_chip_and_handler(virq, &flipper_pic, handle_level_irq); | 106 | irq_set_chip_and_handler(virq, &flipper_pic, handle_level_irq); |
107 | return 0; | 107 | return 0; |
108 | } | 108 | } |
109 | 109 | ||
110 | static void flipper_pic_unmap(struct irq_host *h, unsigned int irq) | ||
111 | { | ||
112 | set_irq_chip_data(irq, NULL); | ||
113 | set_irq_chip(irq, NULL); | ||
114 | } | ||
115 | |||
116 | static int flipper_pic_match(struct irq_host *h, struct device_node *np) | 110 | static int flipper_pic_match(struct irq_host *h, struct device_node *np) |
117 | { | 111 | { |
118 | return 1; | 112 | return 1; |
@@ -121,7 +115,6 @@ static int flipper_pic_match(struct irq_host *h, struct device_node *np) | |||
121 | 115 | ||
122 | static struct irq_host_ops flipper_irq_host_ops = { | 116 | static struct irq_host_ops flipper_irq_host_ops = { |
123 | .map = flipper_pic_map, | 117 | .map = flipper_pic_map, |
124 | .unmap = flipper_pic_unmap, | ||
125 | .match = flipper_pic_match, | 118 | .match = flipper_pic_match, |
126 | }; | 119 | }; |
127 | 120 | ||
diff --git a/arch/powerpc/platforms/embedded6xx/gamecube.c b/arch/powerpc/platforms/embedded6xx/gamecube.c index 1106fd99627f..a138e14bad2e 100644 --- a/arch/powerpc/platforms/embedded6xx/gamecube.c +++ b/arch/powerpc/platforms/embedded6xx/gamecube.c | |||
@@ -75,14 +75,6 @@ static void gamecube_shutdown(void) | |||
75 | flipper_quiesce(); | 75 | flipper_quiesce(); |
76 | } | 76 | } |
77 | 77 | ||
78 | #ifdef CONFIG_KEXEC | ||
79 | static int gamecube_kexec_prepare(struct kimage *image) | ||
80 | { | ||
81 | return 0; | ||
82 | } | ||
83 | #endif /* CONFIG_KEXEC */ | ||
84 | |||
85 | |||
86 | define_machine(gamecube) { | 78 | define_machine(gamecube) { |
87 | .name = "gamecube", | 79 | .name = "gamecube", |
88 | .probe = gamecube_probe, | 80 | .probe = gamecube_probe, |
@@ -95,9 +87,6 @@ define_machine(gamecube) { | |||
95 | .calibrate_decr = generic_calibrate_decr, | 87 | .calibrate_decr = generic_calibrate_decr, |
96 | .progress = udbg_progress, | 88 | .progress = udbg_progress, |
97 | .machine_shutdown = gamecube_shutdown, | 89 | .machine_shutdown = gamecube_shutdown, |
98 | #ifdef CONFIG_KEXEC | ||
99 | .machine_kexec_prepare = gamecube_kexec_prepare, | ||
100 | #endif | ||
101 | }; | 90 | }; |
102 | 91 | ||
103 | 92 | ||
diff --git a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c index a771f91e215b..e4919170c6bc 100644 --- a/arch/powerpc/platforms/embedded6xx/hlwd-pic.c +++ b/arch/powerpc/platforms/embedded6xx/hlwd-pic.c | |||
@@ -41,36 +41,36 @@ | |||
41 | * | 41 | * |
42 | */ | 42 | */ |
43 | 43 | ||
44 | static void hlwd_pic_mask_and_ack(unsigned int virq) | 44 | static void hlwd_pic_mask_and_ack(struct irq_data *d) |
45 | { | 45 | { |
46 | int irq = virq_to_hw(virq); | 46 | int irq = irqd_to_hwirq(d); |
47 | void __iomem *io_base = get_irq_chip_data(virq); | 47 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
48 | u32 mask = 1 << irq; | 48 | u32 mask = 1 << irq; |
49 | 49 | ||
50 | clrbits32(io_base + HW_BROADWAY_IMR, mask); | 50 | clrbits32(io_base + HW_BROADWAY_IMR, mask); |
51 | out_be32(io_base + HW_BROADWAY_ICR, mask); | 51 | out_be32(io_base + HW_BROADWAY_ICR, mask); |
52 | } | 52 | } |
53 | 53 | ||
54 | static void hlwd_pic_ack(unsigned int virq) | 54 | static void hlwd_pic_ack(struct irq_data *d) |
55 | { | 55 | { |
56 | int irq = virq_to_hw(virq); | 56 | int irq = irqd_to_hwirq(d); |
57 | void __iomem *io_base = get_irq_chip_data(virq); | 57 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
58 | 58 | ||
59 | out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); | 59 | out_be32(io_base + HW_BROADWAY_ICR, 1 << irq); |
60 | } | 60 | } |
61 | 61 | ||
62 | static void hlwd_pic_mask(unsigned int virq) | 62 | static void hlwd_pic_mask(struct irq_data *d) |
63 | { | 63 | { |
64 | int irq = virq_to_hw(virq); | 64 | int irq = irqd_to_hwirq(d); |
65 | void __iomem *io_base = get_irq_chip_data(virq); | 65 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
66 | 66 | ||
67 | clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); | 67 | clrbits32(io_base + HW_BROADWAY_IMR, 1 << irq); |
68 | } | 68 | } |
69 | 69 | ||
70 | static void hlwd_pic_unmask(unsigned int virq) | 70 | static void hlwd_pic_unmask(struct irq_data *d) |
71 | { | 71 | { |
72 | int irq = virq_to_hw(virq); | 72 | int irq = irqd_to_hwirq(d); |
73 | void __iomem *io_base = get_irq_chip_data(virq); | 73 | void __iomem *io_base = irq_data_get_irq_chip_data(d); |
74 | 74 | ||
75 | setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); | 75 | setbits32(io_base + HW_BROADWAY_IMR, 1 << irq); |
76 | } | 76 | } |
@@ -78,10 +78,10 @@ static void hlwd_pic_unmask(unsigned int virq) | |||
78 | 78 | ||
79 | static struct irq_chip hlwd_pic = { | 79 | static struct irq_chip hlwd_pic = { |
80 | .name = "hlwd-pic", | 80 | .name = "hlwd-pic", |
81 | .ack = hlwd_pic_ack, | 81 | .irq_ack = hlwd_pic_ack, |
82 | .mask_ack = hlwd_pic_mask_and_ack, | 82 | .irq_mask_ack = hlwd_pic_mask_and_ack, |
83 | .mask = hlwd_pic_mask, | 83 | .irq_mask = hlwd_pic_mask, |
84 | .unmask = hlwd_pic_unmask, | 84 | .irq_unmask = hlwd_pic_unmask, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | /* | 87 | /* |
@@ -94,21 +94,14 @@ static struct irq_host *hlwd_irq_host; | |||
94 | static int hlwd_pic_map(struct irq_host *h, unsigned int virq, | 94 | static int hlwd_pic_map(struct irq_host *h, unsigned int virq, |
95 | irq_hw_number_t hwirq) | 95 | irq_hw_number_t hwirq) |
96 | { | 96 | { |
97 | set_irq_chip_data(virq, h->host_data); | 97 | irq_set_chip_data(virq, h->host_data); |
98 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 98 | irq_set_status_flags(virq, IRQ_LEVEL); |
99 | set_irq_chip_and_handler(virq, &hlwd_pic, handle_level_irq); | 99 | irq_set_chip_and_handler(virq, &hlwd_pic, handle_level_irq); |
100 | return 0; | 100 | return 0; |
101 | } | 101 | } |
102 | 102 | ||
103 | static void hlwd_pic_unmap(struct irq_host *h, unsigned int irq) | ||
104 | { | ||
105 | set_irq_chip_data(irq, NULL); | ||
106 | set_irq_chip(irq, NULL); | ||
107 | } | ||
108 | |||
109 | static struct irq_host_ops hlwd_irq_host_ops = { | 103 | static struct irq_host_ops hlwd_irq_host_ops = { |
110 | .map = hlwd_pic_map, | 104 | .map = hlwd_pic_map, |
111 | .unmap = hlwd_pic_unmap, | ||
112 | }; | 105 | }; |
113 | 106 | ||
114 | static unsigned int __hlwd_pic_get_irq(struct irq_host *h) | 107 | static unsigned int __hlwd_pic_get_irq(struct irq_host *h) |
@@ -129,11 +122,12 @@ static unsigned int __hlwd_pic_get_irq(struct irq_host *h) | |||
129 | static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | 122 | static void hlwd_pic_irq_cascade(unsigned int cascade_virq, |
130 | struct irq_desc *desc) | 123 | struct irq_desc *desc) |
131 | { | 124 | { |
132 | struct irq_host *irq_host = get_irq_data(cascade_virq); | 125 | struct irq_chip *chip = irq_desc_get_chip(desc); |
126 | struct irq_host *irq_host = irq_get_handler_data(cascade_virq); | ||
133 | unsigned int virq; | 127 | unsigned int virq; |
134 | 128 | ||
135 | raw_spin_lock(&desc->lock); | 129 | raw_spin_lock(&desc->lock); |
136 | desc->chip->mask(cascade_virq); /* IRQ_LEVEL */ | 130 | chip->irq_mask(&desc->irq_data); /* IRQ_LEVEL */ |
137 | raw_spin_unlock(&desc->lock); | 131 | raw_spin_unlock(&desc->lock); |
138 | 132 | ||
139 | virq = __hlwd_pic_get_irq(irq_host); | 133 | virq = __hlwd_pic_get_irq(irq_host); |
@@ -143,9 +137,9 @@ static void hlwd_pic_irq_cascade(unsigned int cascade_virq, | |||
143 | pr_err("spurious interrupt!\n"); | 137 | pr_err("spurious interrupt!\n"); |
144 | 138 | ||
145 | raw_spin_lock(&desc->lock); | 139 | raw_spin_lock(&desc->lock); |
146 | desc->chip->ack(cascade_virq); /* IRQ_LEVEL */ | 140 | chip->irq_ack(&desc->irq_data); /* IRQ_LEVEL */ |
147 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | 141 | if (!irqd_irq_disabled(&desc->irq_data) && chip->irq_unmask) |
148 | desc->chip->unmask(cascade_virq); | 142 | chip->irq_unmask(&desc->irq_data); |
149 | raw_spin_unlock(&desc->lock); | 143 | raw_spin_unlock(&desc->lock); |
150 | } | 144 | } |
151 | 145 | ||
@@ -217,8 +211,8 @@ void hlwd_pic_probe(void) | |||
217 | host = hlwd_pic_init(np); | 211 | host = hlwd_pic_init(np); |
218 | BUG_ON(!host); | 212 | BUG_ON(!host); |
219 | cascade_virq = irq_of_parse_and_map(np, 0); | 213 | cascade_virq = irq_of_parse_and_map(np, 0); |
220 | set_irq_data(cascade_virq, host); | 214 | irq_set_handler_data(cascade_virq, host); |
221 | set_irq_chained_handler(cascade_virq, | 215 | irq_set_chained_handler(cascade_virq, |
222 | hlwd_pic_irq_cascade); | 216 | hlwd_pic_irq_cascade); |
223 | hlwd_irq_host = host; | 217 | hlwd_irq_host = host; |
224 | break; | 218 | break; |
diff --git a/arch/powerpc/platforms/embedded6xx/holly.c b/arch/powerpc/platforms/embedded6xx/holly.c index b21fde589ca7..487bda0d18d8 100644 --- a/arch/powerpc/platforms/embedded6xx/holly.c +++ b/arch/powerpc/platforms/embedded6xx/holly.c | |||
@@ -198,8 +198,8 @@ static void __init holly_init_IRQ(void) | |||
198 | cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); | 198 | cascade_pci_irq = irq_of_parse_and_map(tsi_pci, 0); |
199 | pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); | 199 | pr_debug("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, (u32) cascade_pci_irq); |
200 | tsi108_pci_int_init(cascade_node); | 200 | tsi108_pci_int_init(cascade_node); |
201 | set_irq_data(cascade_pci_irq, mpic); | 201 | irq_set_handler_data(cascade_pci_irq, mpic); |
202 | set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); | 202 | irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); |
203 | #endif | 203 | #endif |
204 | /* Configure MPIC outputs to CPU0 */ | 204 | /* Configure MPIC outputs to CPU0 */ |
205 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); | 205 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); |
diff --git a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c index 7a2ba39d7811..1cb907c94359 100644 --- a/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c +++ b/arch/powerpc/platforms/embedded6xx/mpc7448_hpc2.c | |||
@@ -153,8 +153,8 @@ static void __init mpc7448_hpc2_init_IRQ(void) | |||
153 | DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, | 153 | DBG("%s: tsi108 cascade_pci_irq = 0x%x\n", __func__, |
154 | (u32) cascade_pci_irq); | 154 | (u32) cascade_pci_irq); |
155 | tsi108_pci_int_init(cascade_node); | 155 | tsi108_pci_int_init(cascade_node); |
156 | set_irq_data(cascade_pci_irq, mpic); | 156 | irq_set_handler_data(cascade_pci_irq, mpic); |
157 | set_irq_chained_handler(cascade_pci_irq, tsi108_irq_cascade); | 157 | irq_set_chained_handler(cascade_pci_irq, tsi108_irq_cascade); |
158 | #endif | 158 | #endif |
159 | /* Configure MPIC outputs to CPU0 */ | 159 | /* Configure MPIC outputs to CPU0 */ |
160 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); | 160 | tsi108_write_reg(TSI108_MPIC_OFFSET + 0x30c, 0); |
diff --git a/arch/powerpc/platforms/embedded6xx/wii.c b/arch/powerpc/platforms/embedded6xx/wii.c index 5cdcc7c8d973..1b5dc1a2e145 100644 --- a/arch/powerpc/platforms/embedded6xx/wii.c +++ b/arch/powerpc/platforms/embedded6xx/wii.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/init.h> | 18 | #include <linux/init.h> |
19 | #include <linux/irq.h> | 19 | #include <linux/irq.h> |
20 | #include <linux/seq_file.h> | 20 | #include <linux/seq_file.h> |
21 | #include <linux/kexec.h> | ||
22 | #include <linux/of_platform.h> | 21 | #include <linux/of_platform.h> |
23 | #include <linux/memblock.h> | 22 | #include <linux/memblock.h> |
24 | #include <mm/mmu_decl.h> | 23 | #include <mm/mmu_decl.h> |
@@ -65,7 +64,7 @@ static int __init page_aligned(unsigned long x) | |||
65 | 64 | ||
66 | void __init wii_memory_fixups(void) | 65 | void __init wii_memory_fixups(void) |
67 | { | 66 | { |
68 | struct memblock_property *p = memblock.memory.region; | 67 | struct memblock_region *p = memblock.memory.regions; |
69 | 68 | ||
70 | /* | 69 | /* |
71 | * This is part of a workaround to allow the use of two | 70 | * This is part of a workaround to allow the use of two |
@@ -226,13 +225,6 @@ static void wii_shutdown(void) | |||
226 | flipper_quiesce(); | 225 | flipper_quiesce(); |
227 | } | 226 | } |
228 | 227 | ||
229 | #ifdef CONFIG_KEXEC | ||
230 | static int wii_machine_kexec_prepare(struct kimage *image) | ||
231 | { | ||
232 | return 0; | ||
233 | } | ||
234 | #endif /* CONFIG_KEXEC */ | ||
235 | |||
236 | define_machine(wii) { | 228 | define_machine(wii) { |
237 | .name = "wii", | 229 | .name = "wii", |
238 | .probe = wii_probe, | 230 | .probe = wii_probe, |
@@ -246,9 +238,6 @@ define_machine(wii) { | |||
246 | .calibrate_decr = generic_calibrate_decr, | 238 | .calibrate_decr = generic_calibrate_decr, |
247 | .progress = udbg_progress, | 239 | .progress = udbg_progress, |
248 | .machine_shutdown = wii_shutdown, | 240 | .machine_shutdown = wii_shutdown, |
249 | #ifdef CONFIG_KEXEC | ||
250 | .machine_kexec_prepare = wii_machine_kexec_prepare, | ||
251 | #endif | ||
252 | }; | 241 | }; |
253 | 242 | ||
254 | static struct of_device_id wii_of_bus[] = { | 243 | static struct of_device_id wii_of_bus[] = { |
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig index 47a20cfb4486..b57cda3a0817 100644 --- a/arch/powerpc/platforms/iseries/Kconfig +++ b/arch/powerpc/platforms/iseries/Kconfig | |||
@@ -1,8 +1,10 @@ | |||
1 | config PPC_ISERIES | 1 | config PPC_ISERIES |
2 | bool "IBM Legacy iSeries" | 2 | bool "IBM Legacy iSeries" |
3 | depends on PPC64 && PPC_BOOK3S | 3 | depends on PPC64 && PPC_BOOK3S |
4 | select PPC_INDIRECT_IO | 4 | select PPC_SMP_MUXED_IPI |
5 | select PPC_PCI_CHOICE if EMBEDDED | 5 | select PPC_INDIRECT_PIO |
6 | select PPC_INDIRECT_MMIO | ||
7 | select PPC_PCI_CHOICE if EXPERT | ||
6 | 8 | ||
7 | menu "iSeries device drivers" | 9 | menu "iSeries device drivers" |
8 | depends on PPC_ISERIES | 10 | depends on PPC_ISERIES |
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile index ce014928d460..a7602b11ed9d 100644 --- a/arch/powerpc/platforms/iseries/Makefile +++ b/arch/powerpc/platforms/iseries/Makefile | |||
@@ -1,4 +1,4 @@ | |||
1 | EXTRA_CFLAGS += -mno-minimal-toc | 1 | ccflags-y := -mno-minimal-toc |
2 | 2 | ||
3 | obj-y += exception.o | 3 | obj-y += exception.o |
4 | obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \ | 4 | obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \ |
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c index 7f45a51fe793..f0491cc28900 100644 --- a/arch/powerpc/platforms/iseries/dt.c +++ b/arch/powerpc/platforms/iseries/dt.c | |||
@@ -242,7 +242,7 @@ static void __init dt_cpus(struct iseries_flat_dt *dt) | |||
242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ | 242 | pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */ |
243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); | 243 | pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE); |
244 | 244 | ||
245 | for (i = 0; i < NR_CPUS; i++) { | 245 | for (i = 0; i < NR_LPPACAS; i++) { |
246 | if (lppaca[i].dyn_proc_status >= 2) | 246 | if (lppaca[i].dyn_proc_status >= 2) |
247 | continue; | 247 | continue; |
248 | 248 | ||
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S index 32a56c6dfa72..29c02f36b32f 100644 --- a/arch/powerpc/platforms/iseries/exception.S +++ b/arch/powerpc/platforms/iseries/exception.S | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <asm/thread_info.h> | 31 | #include <asm/thread_info.h> |
32 | #include <asm/ptrace.h> | 32 | #include <asm/ptrace.h> |
33 | #include <asm/cputable.h> | 33 | #include <asm/cputable.h> |
34 | #include <asm/mmu.h> | ||
34 | 35 | ||
35 | #include "exception.h" | 36 | #include "exception.h" |
36 | 37 | ||
@@ -60,29 +61,31 @@ system_reset_iSeries: | |||
60 | /* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */ | 61 | /* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */ |
61 | /* In the UP case we'll yield() later, and we will not access the paca anyway */ | 62 | /* In the UP case we'll yield() later, and we will not access the paca anyway */ |
62 | #ifdef CONFIG_SMP | 63 | #ifdef CONFIG_SMP |
63 | 1: | 64 | iSeries_secondary_wait_paca: |
64 | HMT_LOW | 65 | HMT_LOW |
65 | LOAD_REG_ADDR(r23, __secondary_hold_spinloop) | 66 | LOAD_REG_ADDR(r23, __secondary_hold_spinloop) |
66 | ld r23,0(r23) | 67 | ld r23,0(r23) |
67 | sync | ||
68 | LOAD_REG_ADDR(r3,current_set) | ||
69 | sldi r28,r24,3 /* get current_set[cpu#] */ | ||
70 | ldx r3,r3,r28 | ||
71 | addi r1,r3,THREAD_SIZE | ||
72 | subi r1,r1,STACK_FRAME_OVERHEAD | ||
73 | 68 | ||
74 | cmpwi 0,r23,0 /* Keep poking the Hypervisor until */ | 69 | cmpdi 0,r23,0 |
75 | bne 2f /* we're released */ | 70 | bne 2f /* go on when the master is ready */ |
76 | /* Let the Hypervisor know we are alive */ | 71 | |
72 | /* Keep poking the Hypervisor until we're released */ | ||
77 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | 73 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ |
78 | lis r3,0x8002 | 74 | lis r3,0x8002 |
79 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | 75 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ |
80 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | 76 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ |
81 | sc /* Invoke the hypervisor via a system call */ | 77 | sc /* Invoke the hypervisor via a system call */ |
82 | b 1b | 78 | b iSeries_secondary_wait_paca |
83 | #endif | ||
84 | 79 | ||
85 | 2: | 80 | 2: |
81 | HMT_MEDIUM | ||
82 | sync | ||
83 | |||
84 | LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */ | ||
85 | lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */ | ||
86 | cmpld 0,r24,r3 /* is our cpu number allocated? */ | ||
87 | bge iSeries_secondary_yield /* no, yield forever */ | ||
88 | |||
86 | /* Load our paca now that it's been allocated */ | 89 | /* Load our paca now that it's been allocated */ |
87 | LOAD_REG_ADDR(r13, paca) | 90 | LOAD_REG_ADDR(r13, paca) |
88 | ld r13,0(r13) | 91 | ld r13,0(r13) |
@@ -93,10 +96,24 @@ system_reset_iSeries: | |||
93 | ori r23,r23,MSR_RI | 96 | ori r23,r23,MSR_RI |
94 | mtmsrd r23 /* RI on */ | 97 | mtmsrd r23 /* RI on */ |
95 | 98 | ||
96 | HMT_LOW | 99 | iSeries_secondary_smp_loop: |
97 | #ifdef CONFIG_SMP | ||
98 | lbz r23,PACAPROCSTART(r13) /* Test if this processor | 100 | lbz r23,PACAPROCSTART(r13) /* Test if this processor |
99 | * should start */ | 101 | * should start */ |
102 | cmpwi 0,r23,0 | ||
103 | bne 3f /* go on when we are told */ | ||
104 | |||
105 | HMT_LOW | ||
106 | /* Let the Hypervisor know we are alive */ | ||
107 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | ||
108 | lis r3,0x8002 | ||
109 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | ||
110 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | ||
111 | sc /* Invoke the hypervisor via a system call */ | ||
112 | mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ | ||
113 | b iSeries_secondary_smp_loop /* wait for signal to start */ | ||
114 | |||
115 | 3: | ||
116 | HMT_MEDIUM | ||
100 | sync | 117 | sync |
101 | LOAD_REG_ADDR(r3,current_set) | 118 | LOAD_REG_ADDR(r3,current_set) |
102 | sldi r28,r24,3 /* get current_set[cpu#] */ | 119 | sldi r28,r24,3 /* get current_set[cpu#] */ |
@@ -104,27 +121,22 @@ system_reset_iSeries: | |||
104 | addi r1,r3,THREAD_SIZE | 121 | addi r1,r3,THREAD_SIZE |
105 | subi r1,r1,STACK_FRAME_OVERHEAD | 122 | subi r1,r1,STACK_FRAME_OVERHEAD |
106 | 123 | ||
107 | cmpwi 0,r23,0 | ||
108 | beq iSeries_secondary_smp_loop /* Loop until told to go */ | ||
109 | b __secondary_start /* Loop until told to go */ | 124 | b __secondary_start /* Loop until told to go */ |
110 | iSeries_secondary_smp_loop: | 125 | #endif /* CONFIG_SMP */ |
111 | /* Let the Hypervisor know we are alive */ | 126 | |
112 | /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */ | 127 | iSeries_secondary_yield: |
113 | lis r3,0x8002 | ||
114 | rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */ | ||
115 | #else /* CONFIG_SMP */ | ||
116 | /* Yield the processor. This is required for non-SMP kernels | 128 | /* Yield the processor. This is required for non-SMP kernels |
117 | which are running on multi-threaded machines. */ | 129 | which are running on multi-threaded machines. */ |
130 | HMT_LOW | ||
118 | lis r3,0x8000 | 131 | lis r3,0x8000 |
119 | rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ | 132 | rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */ |
120 | addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ | 133 | addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */ |
121 | li r4,0 /* "yield timed" */ | 134 | li r4,0 /* "yield timed" */ |
122 | li r5,-1 /* "yield forever" */ | 135 | li r5,-1 /* "yield forever" */ |
123 | #endif /* CONFIG_SMP */ | ||
124 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ | 136 | li r0,-1 /* r0=-1 indicates a Hypervisor call */ |
125 | sc /* Invoke the hypervisor via a system call */ | 137 | sc /* Invoke the hypervisor via a system call */ |
126 | mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ | 138 | mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */ |
127 | b 2b /* If SMP not configured, secondaries | 139 | b iSeries_secondary_yield /* If SMP not configured, secondaries |
128 | * loop forever */ | 140 | * loop forever */ |
129 | 141 | ||
130 | /*** ISeries-LPAR interrupt handlers ***/ | 142 | /*** ISeries-LPAR interrupt handlers ***/ |
@@ -157,7 +169,7 @@ BEGIN_FTR_SECTION | |||
157 | FTR_SECTION_ELSE | 169 | FTR_SECTION_ELSE |
158 | EXCEPTION_PROLOG_1(PACA_EXGEN) | 170 | EXCEPTION_PROLOG_1(PACA_EXGEN) |
159 | EXCEPTION_PROLOG_ISERIES_1 | 171 | EXCEPTION_PROLOG_ISERIES_1 |
160 | ALT_FTR_SECTION_END_IFCLR(CPU_FTR_SLB) | 172 | ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB) |
161 | b data_access_common | 173 | b data_access_common |
162 | 174 | ||
163 | .do_stab_bolted_iSeries: | 175 | .do_stab_bolted_iSeries: |
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index ba446bf355a9..b2103453eb01 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -42,7 +42,6 @@ | |||
42 | #include "irq.h" | 42 | #include "irq.h" |
43 | #include "pci.h" | 43 | #include "pci.h" |
44 | #include "call_pci.h" | 44 | #include "call_pci.h" |
45 | #include "smp.h" | ||
46 | 45 | ||
47 | #ifdef CONFIG_PCI | 46 | #ifdef CONFIG_PCI |
48 | 47 | ||
@@ -167,11 +166,11 @@ static void pci_event_handler(struct HvLpEvent *event) | |||
167 | * This will be called by device drivers (via enable_IRQ) | 166 | * This will be called by device drivers (via enable_IRQ) |
168 | * to enable INTA in the bridge interrupt status register. | 167 | * to enable INTA in the bridge interrupt status register. |
169 | */ | 168 | */ |
170 | static void iseries_enable_IRQ(unsigned int irq) | 169 | static void iseries_enable_IRQ(struct irq_data *d) |
171 | { | 170 | { |
172 | u32 bus, dev_id, function, mask; | 171 | u32 bus, dev_id, function, mask; |
173 | const u32 sub_bus = 0; | 172 | const u32 sub_bus = 0; |
174 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 173 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
175 | 174 | ||
176 | /* The IRQ has already been locked by the caller */ | 175 | /* The IRQ has already been locked by the caller */ |
177 | bus = REAL_IRQ_TO_BUS(rirq); | 176 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -184,23 +183,23 @@ static void iseries_enable_IRQ(unsigned int irq) | |||
184 | } | 183 | } |
185 | 184 | ||
186 | /* This is called by iseries_activate_IRQs */ | 185 | /* This is called by iseries_activate_IRQs */ |
187 | static unsigned int iseries_startup_IRQ(unsigned int irq) | 186 | static unsigned int iseries_startup_IRQ(struct irq_data *d) |
188 | { | 187 | { |
189 | u32 bus, dev_id, function, mask; | 188 | u32 bus, dev_id, function, mask; |
190 | const u32 sub_bus = 0; | 189 | const u32 sub_bus = 0; |
191 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 190 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
192 | 191 | ||
193 | bus = REAL_IRQ_TO_BUS(rirq); | 192 | bus = REAL_IRQ_TO_BUS(rirq); |
194 | function = REAL_IRQ_TO_FUNC(rirq); | 193 | function = REAL_IRQ_TO_FUNC(rirq); |
195 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 194 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
196 | 195 | ||
197 | /* Link the IRQ number to the bridge */ | 196 | /* Link the IRQ number to the bridge */ |
198 | HvCallXm_connectBusUnit(bus, sub_bus, dev_id, irq); | 197 | HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq); |
199 | 198 | ||
200 | /* Unmask bridge interrupts in the FISR */ | 199 | /* Unmask bridge interrupts in the FISR */ |
201 | mask = 0x01010000 << function; | 200 | mask = 0x01010000 << function; |
202 | HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask); | 201 | HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask); |
203 | iseries_enable_IRQ(irq); | 202 | iseries_enable_IRQ(d); |
204 | return 0; | 203 | return 0; |
205 | } | 204 | } |
206 | 205 | ||
@@ -215,21 +214,26 @@ void __init iSeries_activate_IRQs() | |||
215 | 214 | ||
216 | for_each_irq (irq) { | 215 | for_each_irq (irq) { |
217 | struct irq_desc *desc = irq_to_desc(irq); | 216 | struct irq_desc *desc = irq_to_desc(irq); |
217 | struct irq_chip *chip; | ||
218 | 218 | ||
219 | if (desc && desc->chip && desc->chip->startup) { | 219 | if (!desc) |
220 | continue; | ||
221 | |||
222 | chip = irq_desc_get_chip(desc); | ||
223 | if (chip && chip->irq_startup) { | ||
220 | raw_spin_lock_irqsave(&desc->lock, flags); | 224 | raw_spin_lock_irqsave(&desc->lock, flags); |
221 | desc->chip->startup(irq); | 225 | chip->irq_startup(&desc->irq_data); |
222 | raw_spin_unlock_irqrestore(&desc->lock, flags); | 226 | raw_spin_unlock_irqrestore(&desc->lock, flags); |
223 | } | 227 | } |
224 | } | 228 | } |
225 | } | 229 | } |
226 | 230 | ||
227 | /* this is not called anywhere currently */ | 231 | /* this is not called anywhere currently */ |
228 | static void iseries_shutdown_IRQ(unsigned int irq) | 232 | static void iseries_shutdown_IRQ(struct irq_data *d) |
229 | { | 233 | { |
230 | u32 bus, dev_id, function, mask; | 234 | u32 bus, dev_id, function, mask; |
231 | const u32 sub_bus = 0; | 235 | const u32 sub_bus = 0; |
232 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 236 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
233 | 237 | ||
234 | /* irq should be locked by the caller */ | 238 | /* irq should be locked by the caller */ |
235 | bus = REAL_IRQ_TO_BUS(rirq); | 239 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -248,11 +252,11 @@ static void iseries_shutdown_IRQ(unsigned int irq) | |||
248 | * This will be called by device drivers (via disable_IRQ) | 252 | * This will be called by device drivers (via disable_IRQ) |
249 | * to disable INTA in the bridge interrupt status register. | 253 | * to disable INTA in the bridge interrupt status register. |
250 | */ | 254 | */ |
251 | static void iseries_disable_IRQ(unsigned int irq) | 255 | static void iseries_disable_IRQ(struct irq_data *d) |
252 | { | 256 | { |
253 | u32 bus, dev_id, function, mask; | 257 | u32 bus, dev_id, function, mask; |
254 | const u32 sub_bus = 0; | 258 | const u32 sub_bus = 0; |
255 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 259 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
256 | 260 | ||
257 | /* The IRQ has already been locked by the caller */ | 261 | /* The IRQ has already been locked by the caller */ |
258 | bus = REAL_IRQ_TO_BUS(rirq); | 262 | bus = REAL_IRQ_TO_BUS(rirq); |
@@ -264,9 +268,9 @@ static void iseries_disable_IRQ(unsigned int irq) | |||
264 | HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask); | 268 | HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask); |
265 | } | 269 | } |
266 | 270 | ||
267 | static void iseries_end_IRQ(unsigned int irq) | 271 | static void iseries_end_IRQ(struct irq_data *d) |
268 | { | 272 | { |
269 | unsigned int rirq = (unsigned int)irq_map[irq].hwirq; | 273 | unsigned int rirq = (unsigned int)irqd_to_hwirq(d); |
270 | 274 | ||
271 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), | 275 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), |
272 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); | 276 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); |
@@ -274,11 +278,11 @@ static void iseries_end_IRQ(unsigned int irq) | |||
274 | 278 | ||
275 | static struct irq_chip iseries_pic = { | 279 | static struct irq_chip iseries_pic = { |
276 | .name = "iSeries", | 280 | .name = "iSeries", |
277 | .startup = iseries_startup_IRQ, | 281 | .irq_startup = iseries_startup_IRQ, |
278 | .shutdown = iseries_shutdown_IRQ, | 282 | .irq_shutdown = iseries_shutdown_IRQ, |
279 | .unmask = iseries_enable_IRQ, | 283 | .irq_unmask = iseries_enable_IRQ, |
280 | .mask = iseries_disable_IRQ, | 284 | .irq_mask = iseries_disable_IRQ, |
281 | .eoi = iseries_end_IRQ | 285 | .irq_eoi = iseries_end_IRQ |
282 | }; | 286 | }; |
283 | 287 | ||
284 | /* | 288 | /* |
@@ -311,7 +315,7 @@ unsigned int iSeries_get_irq(void) | |||
311 | #ifdef CONFIG_SMP | 315 | #ifdef CONFIG_SMP |
312 | if (get_lppaca()->int_dword.fields.ipi_cnt) { | 316 | if (get_lppaca()->int_dword.fields.ipi_cnt) { |
313 | get_lppaca()->int_dword.fields.ipi_cnt = 0; | 317 | get_lppaca()->int_dword.fields.ipi_cnt = 0; |
314 | iSeries_smp_message_recv(); | 318 | smp_ipi_demux(); |
315 | } | 319 | } |
316 | #endif /* CONFIG_SMP */ | 320 | #endif /* CONFIG_SMP */ |
317 | if (hvlpevent_is_pending()) | 321 | if (hvlpevent_is_pending()) |
@@ -341,7 +345,7 @@ unsigned int iSeries_get_irq(void) | |||
341 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, | 345 | static int iseries_irq_host_map(struct irq_host *h, unsigned int virq, |
342 | irq_hw_number_t hw) | 346 | irq_hw_number_t hw) |
343 | { | 347 | { |
344 | set_irq_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); | 348 | irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq); |
345 | 349 | ||
346 | return 0; | 350 | return 0; |
347 | } | 351 | } |
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c index 33e5fc7334fc..62dabe3c2bfa 100644 --- a/arch/powerpc/platforms/iseries/mf.c +++ b/arch/powerpc/platforms/iseries/mf.c | |||
@@ -51,7 +51,7 @@ | |||
51 | static int mf_initialized; | 51 | static int mf_initialized; |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * This is the structure layout for the Machine Facilites LPAR event | 54 | * This is the structure layout for the Machine Facilities LPAR event |
55 | * flows. | 55 | * flows. |
56 | */ | 56 | */ |
57 | struct vsp_cmd_data { | 57 | struct vsp_cmd_data { |
@@ -1045,71 +1045,9 @@ static const struct file_operations mf_side_proc_fops = { | |||
1045 | .write = mf_side_proc_write, | 1045 | .write = mf_side_proc_write, |
1046 | }; | 1046 | }; |
1047 | 1047 | ||
1048 | #if 0 | ||
1049 | static void mf_getSrcHistory(char *buffer, int size) | ||
1050 | { | ||
1051 | struct IplTypeReturnStuff return_stuff; | ||
1052 | struct pending_event *ev = new_pending_event(); | ||
1053 | int rc = 0; | ||
1054 | char *pages[4]; | ||
1055 | |||
1056 | pages[0] = kmalloc(4096, GFP_ATOMIC); | ||
1057 | pages[1] = kmalloc(4096, GFP_ATOMIC); | ||
1058 | pages[2] = kmalloc(4096, GFP_ATOMIC); | ||
1059 | pages[3] = kmalloc(4096, GFP_ATOMIC); | ||
1060 | if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL) | ||
1061 | || (pages[2] == NULL) || (pages[3] == NULL)) | ||
1062 | return -ENOMEM; | ||
1063 | |||
1064 | return_stuff.xType = 0; | ||
1065 | return_stuff.xRc = 0; | ||
1066 | return_stuff.xDone = 0; | ||
1067 | ev->event.hp_lp_event.xSubtype = 6; | ||
1068 | ev->event.hp_lp_event.x.xSubtypeData = | ||
1069 | subtype_data('M', 'F', 'V', 'I'); | ||
1070 | ev->event.data.vsp_cmd.xEvent = &return_stuff; | ||
1071 | ev->event.data.vsp_cmd.cmd = 4; | ||
1072 | ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); | ||
1073 | ev->event.data.vsp_cmd.result_code = 0xFF; | ||
1074 | ev->event.data.vsp_cmd.reserved = 0; | ||
1075 | ev->event.data.vsp_cmd.sub_data.page[0] = iseries_hv_addr(pages[0]); | ||
1076 | ev->event.data.vsp_cmd.sub_data.page[1] = iseries_hv_addr(pages[1]); | ||
1077 | ev->event.data.vsp_cmd.sub_data.page[2] = iseries_hv_addr(pages[2]); | ||
1078 | ev->event.data.vsp_cmd.sub_data.page[3] = iseries_hv_addr(pages[3]); | ||
1079 | mb(); | ||
1080 | if (signal_event(ev) != 0) | ||
1081 | return; | ||
1082 | |||
1083 | while (return_stuff.xDone != 1) | ||
1084 | udelay(10); | ||
1085 | if (return_stuff.xRc == 0) | ||
1086 | memcpy(buffer, pages[0], size); | ||
1087 | kfree(pages[0]); | ||
1088 | kfree(pages[1]); | ||
1089 | kfree(pages[2]); | ||
1090 | kfree(pages[3]); | ||
1091 | } | ||
1092 | #endif | ||
1093 | |||
1094 | static int mf_src_proc_show(struct seq_file *m, void *v) | 1048 | static int mf_src_proc_show(struct seq_file *m, void *v) |
1095 | { | 1049 | { |
1096 | #if 0 | ||
1097 | int len; | ||
1098 | |||
1099 | mf_getSrcHistory(page, count); | ||
1100 | len = count; | ||
1101 | len -= off; | ||
1102 | if (len < count) { | ||
1103 | *eof = 1; | ||
1104 | if (len <= 0) | ||
1105 | return 0; | ||
1106 | } else | ||
1107 | len = count; | ||
1108 | *start = page + off; | ||
1109 | return len; | ||
1110 | #else | ||
1111 | return 0; | 1050 | return 0; |
1112 | #endif | ||
1113 | } | 1051 | } |
1114 | 1052 | ||
1115 | static int mf_src_proc_open(struct inode *inode, struct file *file) | 1053 | static int mf_src_proc_open(struct inode *inode, struct file *file) |
@@ -1249,6 +1187,7 @@ out: | |||
1249 | 1187 | ||
1250 | static const struct file_operations proc_vmlinux_operations = { | 1188 | static const struct file_operations proc_vmlinux_operations = { |
1251 | .write = proc_mf_change_vmlinux, | 1189 | .write = proc_mf_change_vmlinux, |
1190 | .llseek = default_llseek, | ||
1252 | }; | 1191 | }; |
1253 | 1192 | ||
1254 | static int __init mf_proc_init(void) | 1193 | static int __init mf_proc_init(void) |
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index b0863410517f..c25a0815c26b 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -249,7 +249,7 @@ static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array, | |||
249 | unsigned long i; | 249 | unsigned long i; |
250 | unsigned long mem_blocks = 0; | 250 | unsigned long mem_blocks = 0; |
251 | 251 | ||
252 | if (cpu_has_feature(CPU_FTR_SLB)) | 252 | if (mmu_has_feature(MMU_FTR_SLB)) |
253 | mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, | 253 | mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, |
254 | max_entries); | 254 | max_entries); |
255 | else | 255 | else |
@@ -634,7 +634,7 @@ static int __init iseries_probe(void) | |||
634 | 634 | ||
635 | hpte_init_iSeries(); | 635 | hpte_init_iSeries(); |
636 | /* iSeries does not support 16M pages */ | 636 | /* iSeries does not support 16M pages */ |
637 | cur_cpu_spec->cpu_features &= ~CPU_FTR_16M_PAGE; | 637 | cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE; |
638 | 638 | ||
639 | return 1; | 639 | return 1; |
640 | } | 640 | } |
@@ -680,10 +680,16 @@ void * __init iSeries_early_setup(void) | |||
680 | * on but calling this function multiple times is fine. | 680 | * on but calling this function multiple times is fine. |
681 | */ | 681 | */ |
682 | identify_cpu(0, mfspr(SPRN_PVR)); | 682 | identify_cpu(0, mfspr(SPRN_PVR)); |
683 | initialise_paca(&boot_paca, 0); | ||
683 | 684 | ||
684 | powerpc_firmware_features |= FW_FEATURE_ISERIES; | 685 | powerpc_firmware_features |= FW_FEATURE_ISERIES; |
685 | powerpc_firmware_features |= FW_FEATURE_LPAR; | 686 | powerpc_firmware_features |= FW_FEATURE_LPAR; |
686 | 687 | ||
688 | #ifdef CONFIG_SMP | ||
689 | /* On iSeries we know we can never have more than 64 cpus */ | ||
690 | nr_cpu_ids = max(nr_cpu_ids, 64); | ||
691 | #endif | ||
692 | |||
687 | iSeries_fixup_klimit(); | 693 | iSeries_fixup_klimit(); |
688 | 694 | ||
689 | /* | 695 | /* |
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c index 6590850045af..e3265adde5d3 100644 --- a/arch/powerpc/platforms/iseries/smp.c +++ b/arch/powerpc/platforms/iseries/smp.c | |||
@@ -42,57 +42,23 @@ | |||
42 | #include <asm/cputable.h> | 42 | #include <asm/cputable.h> |
43 | #include <asm/system.h> | 43 | #include <asm/system.h> |
44 | 44 | ||
45 | #include "smp.h" | 45 | static void smp_iSeries_cause_ipi(int cpu, unsigned long data) |
46 | |||
47 | static unsigned long iSeries_smp_message[NR_CPUS]; | ||
48 | |||
49 | void iSeries_smp_message_recv(void) | ||
50 | { | ||
51 | int cpu = smp_processor_id(); | ||
52 | int msg; | ||
53 | |||
54 | if (num_online_cpus() < 2) | ||
55 | return; | ||
56 | |||
57 | for (msg = 0; msg < 4; msg++) | ||
58 | if (test_and_clear_bit(msg, &iSeries_smp_message[cpu])) | ||
59 | smp_message_recv(msg); | ||
60 | } | ||
61 | |||
62 | static inline void smp_iSeries_do_message(int cpu, int msg) | ||
63 | { | 46 | { |
64 | set_bit(msg, &iSeries_smp_message[cpu]); | ||
65 | HvCall_sendIPI(&(paca[cpu])); | 47 | HvCall_sendIPI(&(paca[cpu])); |
66 | } | 48 | } |
67 | 49 | ||
68 | static void smp_iSeries_message_pass(int target, int msg) | ||
69 | { | ||
70 | int i; | ||
71 | |||
72 | if (target < NR_CPUS) | ||
73 | smp_iSeries_do_message(target, msg); | ||
74 | else { | ||
75 | for_each_online_cpu(i) { | ||
76 | if ((target == MSG_ALL_BUT_SELF) && | ||
77 | (i == smp_processor_id())) | ||
78 | continue; | ||
79 | smp_iSeries_do_message(i, msg); | ||
80 | } | ||
81 | } | ||
82 | } | ||
83 | |||
84 | static int smp_iSeries_probe(void) | 50 | static int smp_iSeries_probe(void) |
85 | { | 51 | { |
86 | return cpumask_weight(cpu_possible_mask); | 52 | return cpumask_weight(cpu_possible_mask); |
87 | } | 53 | } |
88 | 54 | ||
89 | static void smp_iSeries_kick_cpu(int nr) | 55 | static int smp_iSeries_kick_cpu(int nr) |
90 | { | 56 | { |
91 | BUG_ON((nr < 0) || (nr >= NR_CPUS)); | 57 | BUG_ON((nr < 0) || (nr >= NR_CPUS)); |
92 | 58 | ||
93 | /* Verify that our partition has a processor nr */ | 59 | /* Verify that our partition has a processor nr */ |
94 | if (lppaca[nr].dyn_proc_status >= 2) | 60 | if (lppaca_of(nr).dyn_proc_status >= 2) |
95 | return; | 61 | return -ENOENT; |
96 | 62 | ||
97 | /* The processor is currently spinning, waiting | 63 | /* The processor is currently spinning, waiting |
98 | * for the cpu_start field to become non-zero | 64 | * for the cpu_start field to become non-zero |
@@ -100,6 +66,8 @@ static void smp_iSeries_kick_cpu(int nr) | |||
100 | * continue on to secondary_start in iSeries_head.S | 66 | * continue on to secondary_start in iSeries_head.S |
101 | */ | 67 | */ |
102 | paca[nr].cpu_start = 1; | 68 | paca[nr].cpu_start = 1; |
69 | |||
70 | return 0; | ||
103 | } | 71 | } |
104 | 72 | ||
105 | static void __devinit smp_iSeries_setup_cpu(int nr) | 73 | static void __devinit smp_iSeries_setup_cpu(int nr) |
@@ -107,7 +75,8 @@ static void __devinit smp_iSeries_setup_cpu(int nr) | |||
107 | } | 75 | } |
108 | 76 | ||
109 | static struct smp_ops_t iSeries_smp_ops = { | 77 | static struct smp_ops_t iSeries_smp_ops = { |
110 | .message_pass = smp_iSeries_message_pass, | 78 | .message_pass = smp_muxed_ipi_message_pass, |
79 | .cause_ipi = smp_iSeries_cause_ipi, | ||
111 | .probe = smp_iSeries_probe, | 80 | .probe = smp_iSeries_probe, |
112 | .kick_cpu = smp_iSeries_kick_cpu, | 81 | .kick_cpu = smp_iSeries_kick_cpu, |
113 | .setup_cpu = smp_iSeries_setup_cpu, | 82 | .setup_cpu = smp_iSeries_setup_cpu, |
diff --git a/arch/powerpc/platforms/iseries/smp.h b/arch/powerpc/platforms/iseries/smp.h deleted file mode 100644 index d501f7de01e7..000000000000 --- a/arch/powerpc/platforms/iseries/smp.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef _PLATFORMS_ISERIES_SMP_H | ||
2 | #define _PLATFORMS_ISERIES_SMP_H | ||
3 | |||
4 | extern void iSeries_smp_message_recv(void); | ||
5 | |||
6 | #endif /* _PLATFORMS_ISERIES_SMP_H */ | ||
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c index b5f05d943a90..2376069cdc14 100644 --- a/arch/powerpc/platforms/iseries/viopath.c +++ b/arch/powerpc/platforms/iseries/viopath.c | |||
@@ -396,7 +396,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
396 | viopathStatus[remoteLp].mTargetInst)) { | 396 | viopathStatus[remoteLp].mTargetInst)) { |
397 | printk(VIOPATH_KERN_WARN | 397 | printk(VIOPATH_KERN_WARN |
398 | "message from invalid partition. " | 398 | "message from invalid partition. " |
399 | "int msg rcvd, source inst (%d) doesnt match (%d)\n", | 399 | "int msg rcvd, source inst (%d) doesn't match (%d)\n", |
400 | viopathStatus[remoteLp].mTargetInst, | 400 | viopathStatus[remoteLp].mTargetInst, |
401 | event->xSourceInstanceId); | 401 | event->xSourceInstanceId); |
402 | return; | 402 | return; |
@@ -407,7 +407,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
407 | viopathStatus[remoteLp].mSourceInst)) { | 407 | viopathStatus[remoteLp].mSourceInst)) { |
408 | printk(VIOPATH_KERN_WARN | 408 | printk(VIOPATH_KERN_WARN |
409 | "message from invalid partition. " | 409 | "message from invalid partition. " |
410 | "int msg rcvd, target inst (%d) doesnt match (%d)\n", | 410 | "int msg rcvd, target inst (%d) doesn't match (%d)\n", |
411 | viopathStatus[remoteLp].mSourceInst, | 411 | viopathStatus[remoteLp].mSourceInst, |
412 | event->xTargetInstanceId); | 412 | event->xTargetInstanceId); |
413 | return; | 413 | return; |
@@ -418,7 +418,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
418 | viopathStatus[remoteLp].mSourceInst) { | 418 | viopathStatus[remoteLp].mSourceInst) { |
419 | printk(VIOPATH_KERN_WARN | 419 | printk(VIOPATH_KERN_WARN |
420 | "message from invalid partition. " | 420 | "message from invalid partition. " |
421 | "ack msg rcvd, source inst (%d) doesnt match (%d)\n", | 421 | "ack msg rcvd, source inst (%d) doesn't match (%d)\n", |
422 | viopathStatus[remoteLp].mSourceInst, | 422 | viopathStatus[remoteLp].mSourceInst, |
423 | event->xSourceInstanceId); | 423 | event->xSourceInstanceId); |
424 | return; | 424 | return; |
@@ -428,7 +428,7 @@ static void vio_handleEvent(struct HvLpEvent *event) | |||
428 | viopathStatus[remoteLp].mTargetInst) { | 428 | viopathStatus[remoteLp].mTargetInst) { |
429 | printk(VIOPATH_KERN_WARN | 429 | printk(VIOPATH_KERN_WARN |
430 | "message from invalid partition. " | 430 | "message from invalid partition. " |
431 | "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n", | 431 | "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n", |
432 | viopathStatus[remoteLp].mTargetInst, | 432 | viopathStatus[remoteLp].mTargetInst, |
433 | event->xTargetInstanceId); | 433 | event->xTargetInstanceId); |
434 | return; | 434 | return; |
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c index 04296ffff8bf..dd2e48b28508 100644 --- a/arch/powerpc/platforms/maple/pci.c +++ b/arch/powerpc/platforms/maple/pci.c | |||
@@ -498,7 +498,7 @@ void __devinit maple_pci_irq_fixup(struct pci_dev *dev) | |||
498 | printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); | 498 | printk(KERN_DEBUG "Fixup U4 PCIe IRQ\n"); |
499 | dev->irq = irq_create_mapping(NULL, 1); | 499 | dev->irq = irq_create_mapping(NULL, 1); |
500 | if (dev->irq != NO_IRQ) | 500 | if (dev->irq != NO_IRQ) |
501 | set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); | 501 | irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); |
502 | } | 502 | } |
503 | 503 | ||
504 | /* Hide AMD8111 IDE interrupt when in legacy mode so | 504 | /* Hide AMD8111 IDE interrupt when in legacy mode so |
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 3fff8d979b41..fe34c3d9bb74 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c | |||
@@ -358,6 +358,7 @@ static int __init maple_cpc925_edac_setup(void) | |||
358 | model = (const unsigned char *)of_get_property(np, "model", NULL); | 358 | model = (const unsigned char *)of_get_property(np, "model", NULL); |
359 | if (!model) { | 359 | if (!model) { |
360 | printk(KERN_ERR "%s: Unabel to get model info\n", __func__); | 360 | printk(KERN_ERR "%s: Unabel to get model info\n", __func__); |
361 | of_node_put(np); | ||
361 | return -ENODEV; | 362 | return -ENODEV; |
362 | } | 363 | } |
363 | 364 | ||
diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 09695ae50f91..321a9b3a2d00 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c | |||
@@ -379,9 +379,9 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, | |||
379 | } | 379 | } |
380 | EXPORT_SYMBOL(pasemi_dma_free_buf); | 380 | EXPORT_SYMBOL(pasemi_dma_free_buf); |
381 | 381 | ||
382 | /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization | 382 | /* pasemi_dma_alloc_flag - Allocate a flag (event) for channel synchronization |
383 | * | 383 | * |
384 | * Allocates a flag for use with channel syncronization (event descriptors). | 384 | * Allocates a flag for use with channel synchronization (event descriptors). |
385 | * Returns allocated flag (0-63), < 0 on error. | 385 | * Returns allocated flag (0-63), < 0 on error. |
386 | */ | 386 | */ |
387 | int pasemi_dma_alloc_flag(void) | 387 | int pasemi_dma_alloc_flag(void) |
diff --git a/arch/powerpc/platforms/pasemi/gpio_mdio.c b/arch/powerpc/platforms/pasemi/gpio_mdio.c index a5d907b5a4c2..9886296e08da 100644 --- a/arch/powerpc/platforms/pasemi/gpio_mdio.c +++ b/arch/powerpc/platforms/pasemi/gpio_mdio.c | |||
@@ -216,8 +216,7 @@ static int gpio_mdio_reset(struct mii_bus *bus) | |||
216 | } | 216 | } |
217 | 217 | ||
218 | 218 | ||
219 | static int __devinit gpio_mdio_probe(struct platform_device *ofdev, | 219 | static int __devinit gpio_mdio_probe(struct platform_device *ofdev) |
220 | const struct of_device_id *match) | ||
221 | { | 220 | { |
222 | struct device *dev = &ofdev->dev; | 221 | struct device *dev = &ofdev->dev; |
223 | struct device_node *np = ofdev->dev.of_node; | 222 | struct device_node *np = ofdev->dev.of_node; |
@@ -299,7 +298,7 @@ static struct of_device_id gpio_mdio_match[] = | |||
299 | }; | 298 | }; |
300 | MODULE_DEVICE_TABLE(of, gpio_mdio_match); | 299 | MODULE_DEVICE_TABLE(of, gpio_mdio_match); |
301 | 300 | ||
302 | static struct of_platform_driver gpio_mdio_driver = | 301 | static struct platform_driver gpio_mdio_driver = |
303 | { | 302 | { |
304 | .probe = gpio_mdio_probe, | 303 | .probe = gpio_mdio_probe, |
305 | .remove = gpio_mdio_remove, | 304 | .remove = gpio_mdio_remove, |
@@ -326,13 +325,13 @@ int gpio_mdio_init(void) | |||
326 | if (!gpio_regs) | 325 | if (!gpio_regs) |
327 | return -ENODEV; | 326 | return -ENODEV; |
328 | 327 | ||
329 | return of_register_platform_driver(&gpio_mdio_driver); | 328 | return platform_driver_register(&gpio_mdio_driver); |
330 | } | 329 | } |
331 | module_init(gpio_mdio_init); | 330 | module_init(gpio_mdio_init); |
332 | 331 | ||
333 | void gpio_mdio_exit(void) | 332 | void gpio_mdio_exit(void) |
334 | { | 333 | { |
335 | of_unregister_platform_driver(&gpio_mdio_driver); | 334 | platform_driver_unregister(&gpio_mdio_driver); |
336 | if (gpio_regs) | 335 | if (gpio_regs) |
337 | iounmap(gpio_regs); | 336 | iounmap(gpio_regs); |
338 | } | 337 | } |
diff --git a/arch/powerpc/platforms/pasemi/iommu.c b/arch/powerpc/platforms/pasemi/iommu.c index 1f9fb2c57761..14943ef01918 100644 --- a/arch/powerpc/platforms/pasemi/iommu.c +++ b/arch/powerpc/platforms/pasemi/iommu.c | |||
@@ -156,20 +156,12 @@ static void iommu_table_iobmap_setup(void) | |||
156 | 156 | ||
157 | static void pci_dma_bus_setup_pasemi(struct pci_bus *bus) | 157 | static void pci_dma_bus_setup_pasemi(struct pci_bus *bus) |
158 | { | 158 | { |
159 | struct device_node *dn; | ||
160 | |||
161 | pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self); | 159 | pr_debug("pci_dma_bus_setup, bus %p, bus->self %p\n", bus, bus->self); |
162 | 160 | ||
163 | if (!iommu_table_iobmap_inited) { | 161 | if (!iommu_table_iobmap_inited) { |
164 | iommu_table_iobmap_inited = 1; | 162 | iommu_table_iobmap_inited = 1; |
165 | iommu_table_iobmap_setup(); | 163 | iommu_table_iobmap_setup(); |
166 | } | 164 | } |
167 | |||
168 | dn = pci_bus_to_OF_node(bus); | ||
169 | |||
170 | if (dn) | ||
171 | PCI_DN(dn)->iommu_table = &iommu_table_iobmap; | ||
172 | |||
173 | } | 165 | } |
174 | 166 | ||
175 | 167 | ||
@@ -192,9 +184,6 @@ static void pci_dma_dev_setup_pasemi(struct pci_dev *dev) | |||
192 | set_iommu_table_base(&dev->dev, &iommu_table_iobmap); | 184 | set_iommu_table_base(&dev->dev, &iommu_table_iobmap); |
193 | } | 185 | } |
194 | 186 | ||
195 | static void pci_dma_bus_setup_null(struct pci_bus *b) { } | ||
196 | static void pci_dma_dev_setup_null(struct pci_dev *d) { } | ||
197 | |||
198 | int __init iob_init(struct device_node *dn) | 187 | int __init iob_init(struct device_node *dn) |
199 | { | 188 | { |
200 | unsigned long tmp; | 189 | unsigned long tmp; |
@@ -251,14 +240,8 @@ void __init iommu_init_early_pasemi(void) | |||
251 | iommu_off = of_chosen && | 240 | iommu_off = of_chosen && |
252 | of_get_property(of_chosen, "linux,iommu-off", NULL); | 241 | of_get_property(of_chosen, "linux,iommu-off", NULL); |
253 | #endif | 242 | #endif |
254 | if (iommu_off) { | 243 | if (iommu_off) |
255 | /* Direct I/O, IOMMU off */ | ||
256 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_null; | ||
257 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_null; | ||
258 | set_pci_dma_ops(&dma_direct_ops); | ||
259 | |||
260 | return; | 244 | return; |
261 | } | ||
262 | 245 | ||
263 | iob_init(NULL); | 246 | iob_init(NULL); |
264 | 247 | ||
diff --git a/arch/powerpc/platforms/pasemi/setup.c b/arch/powerpc/platforms/pasemi/setup.c index f372ec1691a3..7c858e6f843c 100644 --- a/arch/powerpc/platforms/pasemi/setup.c +++ b/arch/powerpc/platforms/pasemi/setup.c | |||
@@ -239,8 +239,8 @@ static __init void pas_init_IRQ(void) | |||
239 | if (nmiprop) { | 239 | if (nmiprop) { |
240 | nmi_virq = irq_create_mapping(NULL, *nmiprop); | 240 | nmi_virq = irq_create_mapping(NULL, *nmiprop); |
241 | mpic_irq_set_priority(nmi_virq, 15); | 241 | mpic_irq_set_priority(nmi_virq, 15); |
242 | set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); | 242 | irq_set_irq_type(nmi_virq, IRQ_TYPE_EDGE_RISING); |
243 | mpic_unmask_irq(nmi_virq); | 243 | mpic_unmask_irq(irq_get_irq_data(nmi_virq)); |
244 | } | 244 | } |
245 | 245 | ||
246 | of_node_put(mpic_node); | 246 | of_node_put(mpic_node); |
@@ -266,7 +266,7 @@ static int pas_machine_check_handler(struct pt_regs *regs) | |||
266 | if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) { | 266 | if (nmi_virq != NO_IRQ && mpic_get_mcirq() == nmi_virq) { |
267 | printk(KERN_ERR "NMI delivered\n"); | 267 | printk(KERN_ERR "NMI delivered\n"); |
268 | debugger(regs); | 268 | debugger(regs); |
269 | mpic_end_irq(nmi_virq); | 269 | mpic_end_irq(irq_get_irq_data(nmi_virq)); |
270 | goto out; | 270 | goto out; |
271 | } | 271 | } |
272 | 272 | ||
diff --git a/arch/powerpc/platforms/powermac/Kconfig b/arch/powerpc/platforms/powermac/Kconfig index 1e1a0873e1dd..1afd10f67858 100644 --- a/arch/powerpc/platforms/powermac/Kconfig +++ b/arch/powerpc/platforms/powermac/Kconfig | |||
@@ -18,4 +18,13 @@ config PPC_PMAC64 | |||
18 | select PPC_970_NAP | 18 | select PPC_970_NAP |
19 | default y | 19 | default y |
20 | 20 | ||
21 | 21 | config PPC_PMAC32_PSURGE | |
22 | bool "Support for powersurge upgrade cards" if EXPERT | ||
23 | depends on SMP && PPC32 && PPC_PMAC | ||
24 | select PPC_SMP_MUXED_IPI | ||
25 | default y | ||
26 | help | ||
27 | The powersurge cpu boards can be used in the generation | ||
28 | of powermacs that have a socket for an upgradeable cpu card, | ||
29 | including the 7500, 8500, 9500, 9600. Support exists for | ||
30 | both dual and quad socket upgrade cards. | ||
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index 50f169392551..ea47df66fee5 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile | |||
@@ -11,7 +11,7 @@ obj-y += pic.o setup.o time.o feature.o pci.o \ | |||
11 | obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o | 11 | obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o |
12 | obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o | 12 | obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o |
13 | obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o | 13 | obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o |
14 | # CONFIG_NVRAM is an arch. independant tristate symbol, for pmac32 we really | 14 | # CONFIG_NVRAM is an arch. independent tristate symbol, for pmac32 we really |
15 | # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really | 15 | # need this to be a bool. Cheat here and pretend CONFIG_NVRAM=m is really |
16 | # CONFIG_NVRAM=y | 16 | # CONFIG_NVRAM=y |
17 | obj-$(CONFIG_NVRAM:m=y) += nvram.o | 17 | obj-$(CONFIG_NVRAM:m=y) += nvram.o |
diff --git a/arch/powerpc/platforms/powermac/cpufreq_32.c b/arch/powerpc/platforms/powermac/cpufreq_32.c index 415ca6d6b273..04af5f48b4eb 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_32.c +++ b/arch/powerpc/platforms/powermac/cpufreq_32.c | |||
@@ -429,7 +429,7 @@ static u32 read_gpio(struct device_node *np) | |||
429 | return offset; | 429 | return offset; |
430 | } | 430 | } |
431 | 431 | ||
432 | static int pmac_cpufreq_suspend(struct cpufreq_policy *policy, pm_message_t pmsg) | 432 | static int pmac_cpufreq_suspend(struct cpufreq_policy *policy) |
433 | { | 433 | { |
434 | /* Ok, this could be made a bit smarter, but let's be robust for now. We | 434 | /* Ok, this could be made a bit smarter, but let's be robust for now. We |
435 | * always force a speed change to high speed before sleep, to make sure | 435 | * always force a speed change to high speed before sleep, to make sure |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index 480567e5fa9a..e9c8a607268e 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -904,7 +904,7 @@ static void __init smu_i2c_probe(void) | |||
904 | printk(KERN_INFO "SMU i2c %s\n", controller->full_name); | 904 | printk(KERN_INFO "SMU i2c %s\n", controller->full_name); |
905 | 905 | ||
906 | /* Look for childs, note that they might not be of the right | 906 | /* Look for childs, note that they might not be of the right |
907 | * type as older device trees mix i2c busses and other thigns | 907 | * type as older device trees mix i2c busses and other things |
908 | * at the same level | 908 | * at the same level |
909 | */ | 909 | */ |
910 | for (busnode = NULL; | 910 | for (busnode = NULL; |
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 3bc075c788ef..f33e08d573ce 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -299,7 +299,7 @@ static void __init setup_chaos(struct pci_controller *hose, | |||
299 | * This function deals with some "special cases" devices. | 299 | * This function deals with some "special cases" devices. |
300 | * | 300 | * |
301 | * 0 -> No special case | 301 | * 0 -> No special case |
302 | * 1 -> Skip the device but act as if the access was successfull | 302 | * 1 -> Skip the device but act as if the access was successful |
303 | * (return 0xff's on reads, eventually, cache config space | 303 | * (return 0xff's on reads, eventually, cache config space |
304 | * accesses in a later version) | 304 | * accesses in a later version) |
305 | * -1 -> Hide the device (unsuccessful access) | 305 | * -1 -> Hide the device (unsuccessful access) |
@@ -988,7 +988,7 @@ void __devinit pmac_pci_irq_fixup(struct pci_dev *dev) | |||
988 | dev->vendor == PCI_VENDOR_ID_DEC && | 988 | dev->vendor == PCI_VENDOR_ID_DEC && |
989 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { | 989 | dev->device == PCI_DEVICE_ID_DEC_TULIP_PLUS) { |
990 | dev->irq = irq_create_mapping(NULL, 60); | 990 | dev->irq = irq_create_mapping(NULL, 60); |
991 | set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); | 991 | irq_set_irq_type(dev->irq, IRQ_TYPE_LEVEL_LOW); |
992 | } | 992 | } |
993 | #endif /* CONFIG_PPC32 */ | 993 | #endif /* CONFIG_PPC32 */ |
994 | } | 994 | } |
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c index cec635942657..b0c3777528a1 100644 --- a/arch/powerpc/platforms/powermac/pfunc_core.c +++ b/arch/powerpc/platforms/powermac/pfunc_core.c | |||
@@ -837,8 +837,10 @@ struct pmf_function *__pmf_find_function(struct device_node *target, | |||
837 | return NULL; | 837 | return NULL; |
838 | find_it: | 838 | find_it: |
839 | dev = pmf_find_device(actor); | 839 | dev = pmf_find_device(actor); |
840 | if (dev == NULL) | 840 | if (dev == NULL) { |
841 | return NULL; | 841 | result = NULL; |
842 | goto out; | ||
843 | } | ||
842 | 844 | ||
843 | list_for_each_entry(func, &dev->functions, link) { | 845 | list_for_each_entry(func, &dev->functions, link) { |
844 | if (name && strcmp(name, func->name)) | 846 | if (name && strcmp(name, func->name)) |
@@ -850,8 +852,9 @@ struct pmf_function *__pmf_find_function(struct device_node *target, | |||
850 | result = func; | 852 | result = func; |
851 | break; | 853 | break; |
852 | } | 854 | } |
853 | of_node_put(actor); | ||
854 | pmf_put_device(dev); | 855 | pmf_put_device(dev); |
856 | out: | ||
857 | of_node_put(actor); | ||
855 | return result; | 858 | return result; |
856 | } | 859 | } |
857 | 860 | ||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 890d5f72b198..7667db448aa7 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -21,7 +21,7 @@ | |||
21 | #include <linux/signal.h> | 21 | #include <linux/signal.h> |
22 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
23 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
24 | #include <linux/sysdev.h> | 24 | #include <linux/syscore_ops.h> |
25 | #include <linux/adb.h> | 25 | #include <linux/adb.h> |
26 | #include <linux/pmu.h> | 26 | #include <linux/pmu.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
@@ -82,9 +82,9 @@ static void __pmac_retrigger(unsigned int irq_nr) | |||
82 | } | 82 | } |
83 | } | 83 | } |
84 | 84 | ||
85 | static void pmac_mask_and_ack_irq(unsigned int virq) | 85 | static void pmac_mask_and_ack_irq(struct irq_data *d) |
86 | { | 86 | { |
87 | unsigned int src = irq_map[virq].hwirq; | 87 | unsigned int src = irqd_to_hwirq(d); |
88 | unsigned long bit = 1UL << (src & 0x1f); | 88 | unsigned long bit = 1UL << (src & 0x1f); |
89 | int i = src >> 5; | 89 | int i = src >> 5; |
90 | unsigned long flags; | 90 | unsigned long flags; |
@@ -104,9 +104,9 @@ static void pmac_mask_and_ack_irq(unsigned int virq) | |||
104 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 104 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
105 | } | 105 | } |
106 | 106 | ||
107 | static void pmac_ack_irq(unsigned int virq) | 107 | static void pmac_ack_irq(struct irq_data *d) |
108 | { | 108 | { |
109 | unsigned int src = irq_map[virq].hwirq; | 109 | unsigned int src = irqd_to_hwirq(d); |
110 | unsigned long bit = 1UL << (src & 0x1f); | 110 | unsigned long bit = 1UL << (src & 0x1f); |
111 | int i = src >> 5; | 111 | int i = src >> 5; |
112 | unsigned long flags; | 112 | unsigned long flags; |
@@ -149,15 +149,15 @@ static void __pmac_set_irq_mask(unsigned int irq_nr, int nokicklost) | |||
149 | /* When an irq gets requested for the first client, if it's an | 149 | /* When an irq gets requested for the first client, if it's an |
150 | * edge interrupt, we clear any previous one on the controller | 150 | * edge interrupt, we clear any previous one on the controller |
151 | */ | 151 | */ |
152 | static unsigned int pmac_startup_irq(unsigned int virq) | 152 | static unsigned int pmac_startup_irq(struct irq_data *d) |
153 | { | 153 | { |
154 | unsigned long flags; | 154 | unsigned long flags; |
155 | unsigned int src = irq_map[virq].hwirq; | 155 | unsigned int src = irqd_to_hwirq(d); |
156 | unsigned long bit = 1UL << (src & 0x1f); | 156 | unsigned long bit = 1UL << (src & 0x1f); |
157 | int i = src >> 5; | 157 | int i = src >> 5; |
158 | 158 | ||
159 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 159 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
160 | if ((irq_to_desc(virq)->status & IRQ_LEVEL) == 0) | 160 | if (!irqd_is_level_type(d)) |
161 | out_le32(&pmac_irq_hw[i]->ack, bit); | 161 | out_le32(&pmac_irq_hw[i]->ack, bit); |
162 | __set_bit(src, ppc_cached_irq_mask); | 162 | __set_bit(src, ppc_cached_irq_mask); |
163 | __pmac_set_irq_mask(src, 0); | 163 | __pmac_set_irq_mask(src, 0); |
@@ -166,10 +166,10 @@ static unsigned int pmac_startup_irq(unsigned int virq) | |||
166 | return 0; | 166 | return 0; |
167 | } | 167 | } |
168 | 168 | ||
169 | static void pmac_mask_irq(unsigned int virq) | 169 | static void pmac_mask_irq(struct irq_data *d) |
170 | { | 170 | { |
171 | unsigned long flags; | 171 | unsigned long flags; |
172 | unsigned int src = irq_map[virq].hwirq; | 172 | unsigned int src = irqd_to_hwirq(d); |
173 | 173 | ||
174 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 174 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
175 | __clear_bit(src, ppc_cached_irq_mask); | 175 | __clear_bit(src, ppc_cached_irq_mask); |
@@ -177,10 +177,10 @@ static void pmac_mask_irq(unsigned int virq) | |||
177 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 177 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
178 | } | 178 | } |
179 | 179 | ||
180 | static void pmac_unmask_irq(unsigned int virq) | 180 | static void pmac_unmask_irq(struct irq_data *d) |
181 | { | 181 | { |
182 | unsigned long flags; | 182 | unsigned long flags; |
183 | unsigned int src = irq_map[virq].hwirq; | 183 | unsigned int src = irqd_to_hwirq(d); |
184 | 184 | ||
185 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 185 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
186 | __set_bit(src, ppc_cached_irq_mask); | 186 | __set_bit(src, ppc_cached_irq_mask); |
@@ -188,24 +188,24 @@ static void pmac_unmask_irq(unsigned int virq) | |||
188 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 188 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
189 | } | 189 | } |
190 | 190 | ||
191 | static int pmac_retrigger(unsigned int virq) | 191 | static int pmac_retrigger(struct irq_data *d) |
192 | { | 192 | { |
193 | unsigned long flags; | 193 | unsigned long flags; |
194 | 194 | ||
195 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 195 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
196 | __pmac_retrigger(irq_map[virq].hwirq); | 196 | __pmac_retrigger(irqd_to_hwirq(d)); |
197 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); | 197 | raw_spin_unlock_irqrestore(&pmac_pic_lock, flags); |
198 | return 1; | 198 | return 1; |
199 | } | 199 | } |
200 | 200 | ||
201 | static struct irq_chip pmac_pic = { | 201 | static struct irq_chip pmac_pic = { |
202 | .name = "PMAC-PIC", | 202 | .name = "PMAC-PIC", |
203 | .startup = pmac_startup_irq, | 203 | .irq_startup = pmac_startup_irq, |
204 | .mask = pmac_mask_irq, | 204 | .irq_mask = pmac_mask_irq, |
205 | .ack = pmac_ack_irq, | 205 | .irq_ack = pmac_ack_irq, |
206 | .mask_ack = pmac_mask_and_ack_irq, | 206 | .irq_mask_ack = pmac_mask_and_ack_irq, |
207 | .unmask = pmac_unmask_irq, | 207 | .irq_unmask = pmac_unmask_irq, |
208 | .retrigger = pmac_retrigger, | 208 | .irq_retrigger = pmac_retrigger, |
209 | }; | 209 | }; |
210 | 210 | ||
211 | static irqreturn_t gatwick_action(int cpl, void *dev_id) | 211 | static irqreturn_t gatwick_action(int cpl, void *dev_id) |
@@ -239,15 +239,12 @@ static unsigned int pmac_pic_get_irq(void) | |||
239 | unsigned long bits = 0; | 239 | unsigned long bits = 0; |
240 | unsigned long flags; | 240 | unsigned long flags; |
241 | 241 | ||
242 | #ifdef CONFIG_SMP | 242 | #ifdef CONFIG_PPC_PMAC32_PSURGE |
243 | void psurge_smp_message_recv(void); | 243 | /* IPI's are a hack on the powersurge -- Cort */ |
244 | 244 | if (smp_processor_id() != 0) { | |
245 | /* IPI's are a hack on the powersurge -- Cort */ | 245 | return psurge_secondary_virq; |
246 | if ( smp_processor_id() != 0 ) { | ||
247 | psurge_smp_message_recv(); | ||
248 | return NO_IRQ_IGNORE; /* ignore, already handled */ | ||
249 | } | 246 | } |
250 | #endif /* CONFIG_SMP */ | 247 | #endif /* CONFIG_PPC_PMAC32_PSURGE */ |
251 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); | 248 | raw_spin_lock_irqsave(&pmac_pic_lock, flags); |
252 | for (irq = max_real_irqs; (irq -= 32) >= 0; ) { | 249 | for (irq = max_real_irqs; (irq -= 32) >= 0; ) { |
253 | int i = irq >> 5; | 250 | int i = irq >> 5; |
@@ -289,7 +286,6 @@ static int pmac_pic_host_match(struct irq_host *h, struct device_node *node) | |||
289 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | 286 | static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, |
290 | irq_hw_number_t hw) | 287 | irq_hw_number_t hw) |
291 | { | 288 | { |
292 | struct irq_desc *desc = irq_to_desc(virq); | ||
293 | int level; | 289 | int level; |
294 | 290 | ||
295 | if (hw >= max_irqs) | 291 | if (hw >= max_irqs) |
@@ -300,9 +296,9 @@ static int pmac_pic_host_map(struct irq_host *h, unsigned int virq, | |||
300 | */ | 296 | */ |
301 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); | 297 | level = !!(level_mask[hw >> 5] & (1UL << (hw & 0x1f))); |
302 | if (level) | 298 | if (level) |
303 | desc->status |= IRQ_LEVEL; | 299 | irq_set_status_flags(virq, IRQ_LEVEL); |
304 | set_irq_chip_and_handler(virq, &pmac_pic, level ? | 300 | irq_set_chip_and_handler(virq, &pmac_pic, |
305 | handle_level_irq : handle_edge_irq); | 301 | level ? handle_level_irq : handle_edge_irq); |
306 | return 0; | 302 | return 0; |
307 | } | 303 | } |
308 | 304 | ||
@@ -472,12 +468,14 @@ int of_irq_map_oldworld(struct device_node *device, int index, | |||
472 | 468 | ||
473 | static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) | 469 | static void pmac_u3_cascade(unsigned int irq, struct irq_desc *desc) |
474 | { | 470 | { |
475 | struct mpic *mpic = desc->handler_data; | 471 | struct irq_chip *chip = irq_desc_get_chip(desc); |
476 | 472 | struct mpic *mpic = irq_desc_get_handler_data(desc); | |
477 | unsigned int cascade_irq = mpic_get_one_irq(mpic); | 473 | unsigned int cascade_irq = mpic_get_one_irq(mpic); |
474 | |||
478 | if (cascade_irq != NO_IRQ) | 475 | if (cascade_irq != NO_IRQ) |
479 | generic_handle_irq(cascade_irq); | 476 | generic_handle_irq(cascade_irq); |
480 | desc->chip->eoi(irq); | 477 | |
478 | chip->irq_eoi(&desc->irq_data); | ||
481 | } | 479 | } |
482 | 480 | ||
483 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | 481 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) |
@@ -589,8 +587,8 @@ static int __init pmac_pic_probe_mpic(void) | |||
589 | of_node_put(slave); | 587 | of_node_put(slave); |
590 | return 0; | 588 | return 0; |
591 | } | 589 | } |
592 | set_irq_data(cascade, mpic2); | 590 | irq_set_handler_data(cascade, mpic2); |
593 | set_irq_chained_handler(cascade, pmac_u3_cascade); | 591 | irq_set_chained_handler(cascade, pmac_u3_cascade); |
594 | 592 | ||
595 | of_node_put(slave); | 593 | of_node_put(slave); |
596 | return 0; | 594 | return 0; |
@@ -676,7 +674,7 @@ not_found: | |||
676 | return viaint; | 674 | return viaint; |
677 | } | 675 | } |
678 | 676 | ||
679 | static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | 677 | static int pmacpic_suspend(void) |
680 | { | 678 | { |
681 | int viaint = pmacpic_find_viaint(); | 679 | int viaint = pmacpic_find_viaint(); |
682 | 680 | ||
@@ -697,7 +695,7 @@ static int pmacpic_suspend(struct sys_device *sysdev, pm_message_t state) | |||
697 | return 0; | 695 | return 0; |
698 | } | 696 | } |
699 | 697 | ||
700 | static int pmacpic_resume(struct sys_device *sysdev) | 698 | static void pmacpic_resume(void) |
701 | { | 699 | { |
702 | int i; | 700 | int i; |
703 | 701 | ||
@@ -707,40 +705,21 @@ static int pmacpic_resume(struct sys_device *sysdev) | |||
707 | mb(); | 705 | mb(); |
708 | for (i = 0; i < max_real_irqs; ++i) | 706 | for (i = 0; i < max_real_irqs; ++i) |
709 | if (test_bit(i, sleep_save_mask)) | 707 | if (test_bit(i, sleep_save_mask)) |
710 | pmac_unmask_irq(i); | 708 | pmac_unmask_irq(irq_get_irq_data(i)); |
711 | |||
712 | return 0; | ||
713 | } | 709 | } |
714 | 710 | ||
715 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | 711 | static struct syscore_ops pmacpic_syscore_ops = { |
716 | 712 | .suspend = pmacpic_suspend, | |
717 | static struct sysdev_class pmacpic_sysclass = { | 713 | .resume = pmacpic_resume, |
718 | .name = "pmac_pic", | ||
719 | }; | ||
720 | |||
721 | static struct sys_device device_pmacpic = { | ||
722 | .id = 0, | ||
723 | .cls = &pmacpic_sysclass, | ||
724 | }; | 714 | }; |
725 | 715 | ||
726 | static struct sysdev_driver driver_pmacpic = { | 716 | static int __init init_pmacpic_syscore(void) |
727 | #if defined(CONFIG_PM) && defined(CONFIG_PPC32) | ||
728 | .suspend = &pmacpic_suspend, | ||
729 | .resume = &pmacpic_resume, | ||
730 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
731 | }; | ||
732 | |||
733 | static int __init init_pmacpic_sysfs(void) | ||
734 | { | 717 | { |
735 | #ifdef CONFIG_PPC32 | 718 | if (pmac_irq_hw[0]) |
736 | if (max_irqs == 0) | 719 | register_syscore_ops(&pmacpic_syscore_ops); |
737 | return -ENODEV; | ||
738 | #endif | ||
739 | printk(KERN_DEBUG "Registering pmac pic with sysfs...\n"); | ||
740 | sysdev_class_register(&pmacpic_sysclass); | ||
741 | sysdev_register(&device_pmacpic); | ||
742 | sysdev_driver_register(&pmacpic_sysclass, &driver_pmacpic); | ||
743 | return 0; | 720 | return 0; |
744 | } | 721 | } |
745 | machine_subsys_initcall(powermac, init_pmacpic_sysfs); | ||
746 | 722 | ||
723 | machine_subsys_initcall(powermac, init_pmacpic_syscore); | ||
724 | |||
725 | #endif /* CONFIG_PM && CONFIG_PPC32 */ | ||
diff --git a/arch/powerpc/platforms/powermac/pic.h b/arch/powerpc/platforms/powermac/pic.h deleted file mode 100644 index d622a8345aaa..000000000000 --- a/arch/powerpc/platforms/powermac/pic.h +++ /dev/null | |||
@@ -1,11 +0,0 @@ | |||
1 | #ifndef __PPC_PLATFORMS_PMAC_PIC_H | ||
2 | #define __PPC_PLATFORMS_PMAC_PIC_H | ||
3 | |||
4 | #include <linux/irq.h> | ||
5 | |||
6 | extern struct irq_chip pmac_pic; | ||
7 | |||
8 | extern void pmac_pic_init(void); | ||
9 | extern int pmac_get_irq(void); | ||
10 | |||
11 | #endif /* __PPC_PLATFORMS_PMAC_PIC_H */ | ||
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index f0bc08f6c1f0..8327cce2bdb0 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -33,7 +33,7 @@ extern void pmac_setup_pci_dma(void); | |||
33 | extern void pmac_check_ht_link(void); | 33 | extern void pmac_check_ht_link(void); |
34 | 34 | ||
35 | extern void pmac_setup_smp(void); | 35 | extern void pmac_setup_smp(void); |
36 | extern void pmac32_cpu_die(void); | 36 | extern int psurge_secondary_virq; |
37 | extern void low_cpu_die(void) __attribute__((noreturn)); | 37 | extern void low_cpu_die(void) __attribute__((noreturn)); |
38 | 38 | ||
39 | extern int pmac_nvram_init(void); | 39 | extern int pmac_nvram_init(void); |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 9deb274841f1..aa45281bd296 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -506,6 +506,15 @@ static int __init pmac_declare_of_platform_devices(void) | |||
506 | of_platform_device_create(np, "smu", NULL); | 506 | of_platform_device_create(np, "smu", NULL); |
507 | of_node_put(np); | 507 | of_node_put(np); |
508 | } | 508 | } |
509 | np = of_find_node_by_type(NULL, "fcu"); | ||
510 | if (np == NULL) { | ||
511 | /* Some machines have strangely broken device-tree */ | ||
512 | np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e"); | ||
513 | } | ||
514 | if (np) { | ||
515 | of_platform_device_create(np, "temperature", NULL); | ||
516 | of_node_put(np); | ||
517 | } | ||
509 | 518 | ||
510 | return 0; | 519 | return 0; |
511 | } | 520 | } |
@@ -641,51 +650,6 @@ static int pmac_pci_probe_mode(struct pci_bus *bus) | |||
641 | return PCI_PROBE_NORMAL; | 650 | return PCI_PROBE_NORMAL; |
642 | return PCI_PROBE_DEVTREE; | 651 | return PCI_PROBE_DEVTREE; |
643 | } | 652 | } |
644 | |||
645 | #ifdef CONFIG_HOTPLUG_CPU | ||
646 | /* access per cpu vars from generic smp.c */ | ||
647 | DECLARE_PER_CPU(int, cpu_state); | ||
648 | |||
649 | static void pmac64_cpu_die(void) | ||
650 | { | ||
651 | /* | ||
652 | * turn off as much as possible, we'll be | ||
653 | * kicked out as this will only be invoked | ||
654 | * on core99 platforms for now ... | ||
655 | */ | ||
656 | |||
657 | printk(KERN_INFO "CPU#%d offline\n", smp_processor_id()); | ||
658 | __get_cpu_var(cpu_state) = CPU_DEAD; | ||
659 | smp_wmb(); | ||
660 | |||
661 | /* | ||
662 | * during the path that leads here preemption is disabled, | ||
663 | * reenable it now so that when coming up preempt count is | ||
664 | * zero correctly | ||
665 | */ | ||
666 | preempt_enable(); | ||
667 | |||
668 | /* | ||
669 | * hard-disable interrupts for the non-NAP case, the NAP code | ||
670 | * needs to re-enable interrupts (but soft-disables them) | ||
671 | */ | ||
672 | hard_irq_disable(); | ||
673 | |||
674 | while (1) { | ||
675 | /* let's not take timer interrupts too often ... */ | ||
676 | set_dec(0x7fffffff); | ||
677 | |||
678 | /* should always be true at this point */ | ||
679 | if (cpu_has_feature(CPU_FTR_CAN_NAP)) | ||
680 | power4_cpu_offline_powersave(); | ||
681 | else { | ||
682 | HMT_low(); | ||
683 | HMT_very_low(); | ||
684 | } | ||
685 | } | ||
686 | } | ||
687 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
688 | |||
689 | #endif /* CONFIG_PPC64 */ | 653 | #endif /* CONFIG_PPC64 */ |
690 | 654 | ||
691 | define_machine(powermac) { | 655 | define_machine(powermac) { |
@@ -717,15 +681,4 @@ define_machine(powermac) { | |||
717 | .pcibios_after_init = pmac_pcibios_after_init, | 681 | .pcibios_after_init = pmac_pcibios_after_init, |
718 | .phys_mem_access_prot = pci_phys_mem_access_prot, | 682 | .phys_mem_access_prot = pci_phys_mem_access_prot, |
719 | #endif | 683 | #endif |
720 | #ifdef CONFIG_HOTPLUG_CPU | ||
721 | #ifdef CONFIG_PPC64 | ||
722 | .cpu_die = pmac64_cpu_die, | ||
723 | #endif | ||
724 | #ifdef CONFIG_PPC32 | ||
725 | .cpu_die = pmac32_cpu_die, | ||
726 | #endif | ||
727 | #endif | ||
728 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) | ||
729 | .cpu_die = generic_mach_cpu_die, | ||
730 | #endif | ||
731 | }; | 684 | }; |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index c95215f4f8b6..db092d7c4c5b 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -70,7 +70,7 @@ static void (*pmac_tb_freeze)(int freeze); | |||
70 | static u64 timebase; | 70 | static u64 timebase; |
71 | static int tb_req; | 71 | static int tb_req; |
72 | 72 | ||
73 | #ifdef CONFIG_PPC32 | 73 | #ifdef CONFIG_PPC_PMAC32_PSURGE |
74 | 74 | ||
75 | /* | 75 | /* |
76 | * Powersurge (old powermac SMP) support. | 76 | * Powersurge (old powermac SMP) support. |
@@ -124,6 +124,10 @@ static volatile u32 __iomem *psurge_start; | |||
124 | /* what sort of powersurge board we have */ | 124 | /* what sort of powersurge board we have */ |
125 | static int psurge_type = PSURGE_NONE; | 125 | static int psurge_type = PSURGE_NONE; |
126 | 126 | ||
127 | /* irq for secondary cpus to report */ | ||
128 | static struct irq_host *psurge_host; | ||
129 | int psurge_secondary_virq; | ||
130 | |||
127 | /* | 131 | /* |
128 | * Set and clear IPIs for powersurge. | 132 | * Set and clear IPIs for powersurge. |
129 | */ | 133 | */ |
@@ -156,51 +160,52 @@ static inline void psurge_clr_ipi(int cpu) | |||
156 | /* | 160 | /* |
157 | * On powersurge (old SMP powermac architecture) we don't have | 161 | * On powersurge (old SMP powermac architecture) we don't have |
158 | * separate IPIs for separate messages like openpic does. Instead | 162 | * separate IPIs for separate messages like openpic does. Instead |
159 | * we have a bitmap for each processor, where a 1 bit means that | 163 | * use the generic demux helpers |
160 | * the corresponding message is pending for that processor. | ||
161 | * Ideally each cpu's entry would be in a different cache line. | ||
162 | * -- paulus. | 164 | * -- paulus. |
163 | */ | 165 | */ |
164 | static unsigned long psurge_smp_message[NR_CPUS]; | 166 | static irqreturn_t psurge_ipi_intr(int irq, void *d) |
165 | |||
166 | void psurge_smp_message_recv(void) | ||
167 | { | 167 | { |
168 | int cpu = smp_processor_id(); | 168 | psurge_clr_ipi(smp_processor_id()); |
169 | int msg; | 169 | smp_ipi_demux(); |
170 | |||
171 | /* clear interrupt */ | ||
172 | psurge_clr_ipi(cpu); | ||
173 | 170 | ||
174 | if (num_online_cpus() < 2) | 171 | return IRQ_HANDLED; |
175 | return; | 172 | } |
176 | 173 | ||
177 | /* make sure there is a message there */ | 174 | static void smp_psurge_cause_ipi(int cpu, unsigned long data) |
178 | for (msg = 0; msg < 4; msg++) | 175 | { |
179 | if (test_and_clear_bit(msg, &psurge_smp_message[cpu])) | 176 | psurge_set_ipi(cpu); |
180 | smp_message_recv(msg); | ||
181 | } | 177 | } |
182 | 178 | ||
183 | irqreturn_t psurge_primary_intr(int irq, void *d) | 179 | static int psurge_host_map(struct irq_host *h, unsigned int virq, |
180 | irq_hw_number_t hw) | ||
184 | { | 181 | { |
185 | psurge_smp_message_recv(); | 182 | irq_set_chip_and_handler(virq, &dummy_irq_chip, handle_percpu_irq); |
186 | return IRQ_HANDLED; | 183 | |
184 | return 0; | ||
187 | } | 185 | } |
188 | 186 | ||
189 | static void smp_psurge_message_pass(int target, int msg) | 187 | struct irq_host_ops psurge_host_ops = { |
188 | .map = psurge_host_map, | ||
189 | }; | ||
190 | |||
191 | static int psurge_secondary_ipi_init(void) | ||
190 | { | 192 | { |
191 | int i; | 193 | int rc = -ENOMEM; |
192 | 194 | ||
193 | if (num_online_cpus() < 2) | 195 | psurge_host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0, |
194 | return; | 196 | &psurge_host_ops, 0); |
195 | 197 | ||
196 | for_each_online_cpu(i) { | 198 | if (psurge_host) |
197 | if (target == MSG_ALL | 199 | psurge_secondary_virq = irq_create_direct_mapping(psurge_host); |
198 | || (target == MSG_ALL_BUT_SELF && i != smp_processor_id()) | 200 | |
199 | || target == i) { | 201 | if (psurge_secondary_virq) |
200 | set_bit(msg, &psurge_smp_message[i]); | 202 | rc = request_irq(psurge_secondary_virq, psurge_ipi_intr, |
201 | psurge_set_ipi(i); | 203 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); |
202 | } | 204 | |
203 | } | 205 | if (rc) |
206 | pr_err("Failed to setup secondary cpu IPI\n"); | ||
207 | |||
208 | return rc; | ||
204 | } | 209 | } |
205 | 210 | ||
206 | /* | 211 | /* |
@@ -311,6 +316,9 @@ static int __init smp_psurge_probe(void) | |||
311 | ncpus = 2; | 316 | ncpus = 2; |
312 | } | 317 | } |
313 | 318 | ||
319 | if (psurge_secondary_ipi_init()) | ||
320 | return 1; | ||
321 | |||
314 | psurge_start = ioremap(PSURGE_START, 4); | 322 | psurge_start = ioremap(PSURGE_START, 4); |
315 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); | 323 | psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4); |
316 | 324 | ||
@@ -329,7 +337,7 @@ static int __init smp_psurge_probe(void) | |||
329 | return ncpus; | 337 | return ncpus; |
330 | } | 338 | } |
331 | 339 | ||
332 | static void __init smp_psurge_kick_cpu(int nr) | 340 | static int __init smp_psurge_kick_cpu(int nr) |
333 | { | 341 | { |
334 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; | 342 | unsigned long start = __pa(__secondary_start_pmac_0) + nr * 8; |
335 | unsigned long a, flags; | 343 | unsigned long a, flags; |
@@ -394,11 +402,13 @@ static void __init smp_psurge_kick_cpu(int nr) | |||
394 | psurge_set_ipi(1); | 402 | psurge_set_ipi(1); |
395 | 403 | ||
396 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); | 404 | if (ppc_md.progress) ppc_md.progress("smp_psurge_kick_cpu - done", 0x354); |
405 | |||
406 | return 0; | ||
397 | } | 407 | } |
398 | 408 | ||
399 | static struct irqaction psurge_irqaction = { | 409 | static struct irqaction psurge_irqaction = { |
400 | .handler = psurge_primary_intr, | 410 | .handler = psurge_ipi_intr, |
401 | .flags = IRQF_DISABLED, | 411 | .flags = IRQF_DISABLED|IRQF_PERCPU, |
402 | .name = "primary IPI", | 412 | .name = "primary IPI", |
403 | }; | 413 | }; |
404 | 414 | ||
@@ -437,14 +447,15 @@ void __init smp_psurge_give_timebase(void) | |||
437 | 447 | ||
438 | /* PowerSurge-style Macs */ | 448 | /* PowerSurge-style Macs */ |
439 | struct smp_ops_t psurge_smp_ops = { | 449 | struct smp_ops_t psurge_smp_ops = { |
440 | .message_pass = smp_psurge_message_pass, | 450 | .message_pass = smp_muxed_ipi_message_pass, |
451 | .cause_ipi = smp_psurge_cause_ipi, | ||
441 | .probe = smp_psurge_probe, | 452 | .probe = smp_psurge_probe, |
442 | .kick_cpu = smp_psurge_kick_cpu, | 453 | .kick_cpu = smp_psurge_kick_cpu, |
443 | .setup_cpu = smp_psurge_setup_cpu, | 454 | .setup_cpu = smp_psurge_setup_cpu, |
444 | .give_timebase = smp_psurge_give_timebase, | 455 | .give_timebase = smp_psurge_give_timebase, |
445 | .take_timebase = smp_psurge_take_timebase, | 456 | .take_timebase = smp_psurge_take_timebase, |
446 | }; | 457 | }; |
447 | #endif /* CONFIG_PPC32 - actually powersurge support */ | 458 | #endif /* CONFIG_PPC_PMAC32_PSURGE */ |
448 | 459 | ||
449 | /* | 460 | /* |
450 | * Core 99 and later support | 461 | * Core 99 and later support |
@@ -791,14 +802,14 @@ static int __init smp_core99_probe(void) | |||
791 | return ncpus; | 802 | return ncpus; |
792 | } | 803 | } |
793 | 804 | ||
794 | static void __devinit smp_core99_kick_cpu(int nr) | 805 | static int __devinit smp_core99_kick_cpu(int nr) |
795 | { | 806 | { |
796 | unsigned int save_vector; | 807 | unsigned int save_vector; |
797 | unsigned long target, flags; | 808 | unsigned long target, flags; |
798 | unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); | 809 | unsigned int *vector = (unsigned int *)(PAGE_OFFSET+0x100); |
799 | 810 | ||
800 | if (nr < 0 || nr > 3) | 811 | if (nr < 0 || nr > 3) |
801 | return; | 812 | return -ENOENT; |
802 | 813 | ||
803 | if (ppc_md.progress) | 814 | if (ppc_md.progress) |
804 | ppc_md.progress("smp_core99_kick_cpu", 0x346); | 815 | ppc_md.progress("smp_core99_kick_cpu", 0x346); |
@@ -830,6 +841,8 @@ static void __devinit smp_core99_kick_cpu(int nr) | |||
830 | 841 | ||
831 | local_irq_restore(flags); | 842 | local_irq_restore(flags); |
832 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); | 843 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu done", 0x347); |
844 | |||
845 | return 0; | ||
833 | } | 846 | } |
834 | 847 | ||
835 | static void __devinit smp_core99_setup_cpu(int cpu_nr) | 848 | static void __devinit smp_core99_setup_cpu(int cpu_nr) |
@@ -840,92 +853,151 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) | |||
840 | 853 | ||
841 | /* Setup openpic */ | 854 | /* Setup openpic */ |
842 | mpic_setup_this_cpu(); | 855 | mpic_setup_this_cpu(); |
856 | } | ||
843 | 857 | ||
844 | if (cpu_nr == 0) { | ||
845 | #ifdef CONFIG_PPC64 | 858 | #ifdef CONFIG_PPC64 |
846 | extern void g5_phy_disable_cpu1(void); | 859 | #ifdef CONFIG_HOTPLUG_CPU |
860 | static int smp_core99_cpu_notify(struct notifier_block *self, | ||
861 | unsigned long action, void *hcpu) | ||
862 | { | ||
863 | int rc; | ||
847 | 864 | ||
848 | /* Close i2c bus if it was used for tb sync */ | 865 | switch(action) { |
866 | case CPU_UP_PREPARE: | ||
867 | case CPU_UP_PREPARE_FROZEN: | ||
868 | /* Open i2c bus if it was used for tb sync */ | ||
849 | if (pmac_tb_clock_chip_host) { | 869 | if (pmac_tb_clock_chip_host) { |
850 | pmac_i2c_close(pmac_tb_clock_chip_host); | 870 | rc = pmac_i2c_open(pmac_tb_clock_chip_host, 1); |
851 | pmac_tb_clock_chip_host = NULL; | 871 | if (rc) { |
872 | pr_err("Failed to open i2c bus for time sync\n"); | ||
873 | return notifier_from_errno(rc); | ||
874 | } | ||
852 | } | 875 | } |
876 | break; | ||
877 | case CPU_ONLINE: | ||
878 | case CPU_UP_CANCELED: | ||
879 | /* Close i2c bus if it was used for tb sync */ | ||
880 | if (pmac_tb_clock_chip_host) | ||
881 | pmac_i2c_close(pmac_tb_clock_chip_host); | ||
882 | break; | ||
883 | default: | ||
884 | break; | ||
885 | } | ||
886 | return NOTIFY_OK; | ||
887 | } | ||
853 | 888 | ||
854 | /* If we didn't start the second CPU, we must take | 889 | static struct notifier_block __cpuinitdata smp_core99_cpu_nb = { |
855 | * it off the bus | 890 | .notifier_call = smp_core99_cpu_notify, |
856 | */ | 891 | }; |
857 | if (of_machine_is_compatible("MacRISC4") && | 892 | #endif /* CONFIG_HOTPLUG_CPU */ |
858 | num_online_cpus() < 2) | 893 | |
859 | g5_phy_disable_cpu1(); | 894 | static void __init smp_core99_bringup_done(void) |
860 | #endif /* CONFIG_PPC64 */ | 895 | { |
896 | extern void g5_phy_disable_cpu1(void); | ||
861 | 897 | ||
862 | if (ppc_md.progress) | 898 | /* Close i2c bus if it was used for tb sync */ |
863 | ppc_md.progress("core99_setup_cpu 0 done", 0x349); | 899 | if (pmac_tb_clock_chip_host) |
900 | pmac_i2c_close(pmac_tb_clock_chip_host); | ||
901 | |||
902 | /* If we didn't start the second CPU, we must take | ||
903 | * it off the bus. | ||
904 | */ | ||
905 | if (of_machine_is_compatible("MacRISC4") && | ||
906 | num_online_cpus() < 2) { | ||
907 | set_cpu_present(1, false); | ||
908 | g5_phy_disable_cpu1(); | ||
864 | } | 909 | } |
865 | } | 910 | #ifdef CONFIG_HOTPLUG_CPU |
911 | register_cpu_notifier(&smp_core99_cpu_nb); | ||
912 | #endif | ||
866 | 913 | ||
914 | if (ppc_md.progress) | ||
915 | ppc_md.progress("smp_core99_bringup_done", 0x349); | ||
916 | } | ||
917 | #endif /* CONFIG_PPC64 */ | ||
867 | 918 | ||
868 | #if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32) | 919 | #ifdef CONFIG_HOTPLUG_CPU |
869 | 920 | ||
870 | int smp_core99_cpu_disable(void) | 921 | static int smp_core99_cpu_disable(void) |
871 | { | 922 | { |
872 | set_cpu_online(smp_processor_id(), false); | 923 | int rc = generic_cpu_disable(); |
924 | if (rc) | ||
925 | return rc; | ||
873 | 926 | ||
874 | /* XXX reset cpu affinity here */ | ||
875 | mpic_cpu_set_priority(0xf); | 927 | mpic_cpu_set_priority(0xf); |
876 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | 928 | |
877 | mb(); | ||
878 | udelay(20); | ||
879 | asm volatile("mtdec %0" : : "r" (0x7fffffff)); | ||
880 | return 0; | 929 | return 0; |
881 | } | 930 | } |
882 | 931 | ||
883 | static int cpu_dead[NR_CPUS]; | 932 | #ifdef CONFIG_PPC32 |
884 | 933 | ||
885 | void pmac32_cpu_die(void) | 934 | static void pmac_cpu_die(void) |
886 | { | 935 | { |
936 | int cpu = smp_processor_id(); | ||
937 | |||
887 | local_irq_disable(); | 938 | local_irq_disable(); |
888 | cpu_dead[smp_processor_id()] = 1; | 939 | idle_task_exit(); |
940 | pr_debug("CPU%d offline\n", cpu); | ||
941 | generic_set_cpu_dead(cpu); | ||
942 | smp_wmb(); | ||
889 | mb(); | 943 | mb(); |
890 | low_cpu_die(); | 944 | low_cpu_die(); |
891 | } | 945 | } |
892 | 946 | ||
893 | void smp_core99_cpu_die(unsigned int cpu) | 947 | #else /* CONFIG_PPC32 */ |
948 | |||
949 | static void pmac_cpu_die(void) | ||
894 | { | 950 | { |
895 | int timeout; | 951 | int cpu = smp_processor_id(); |
896 | 952 | ||
897 | timeout = 1000; | 953 | local_irq_disable(); |
898 | while (!cpu_dead[cpu]) { | 954 | idle_task_exit(); |
899 | if (--timeout == 0) { | 955 | |
900 | printk("CPU %u refused to die!\n", cpu); | 956 | /* |
901 | break; | 957 | * turn off as much as possible, we'll be |
902 | } | 958 | * kicked out as this will only be invoked |
903 | msleep(1); | 959 | * on core99 platforms for now ... |
960 | */ | ||
961 | |||
962 | printk(KERN_INFO "CPU#%d offline\n", cpu); | ||
963 | generic_set_cpu_dead(cpu); | ||
964 | smp_wmb(); | ||
965 | |||
966 | /* | ||
967 | * Re-enable interrupts. The NAP code needs to enable them | ||
968 | * anyways, do it now so we deal with the case where one already | ||
969 | * happened while soft-disabled. | ||
970 | * We shouldn't get any external interrupts, only decrementer, and the | ||
971 | * decrementer handler is safe for use on offline CPUs | ||
972 | */ | ||
973 | local_irq_enable(); | ||
974 | |||
975 | while (1) { | ||
976 | /* let's not take timer interrupts too often ... */ | ||
977 | set_dec(0x7fffffff); | ||
978 | |||
979 | /* Enter NAP mode */ | ||
980 | power4_idle(); | ||
904 | } | 981 | } |
905 | cpu_dead[cpu] = 0; | ||
906 | } | 982 | } |
907 | 983 | ||
908 | #endif /* CONFIG_HOTPLUG_CPU && CONFIG_PP32 */ | 984 | #endif /* else CONFIG_PPC32 */ |
985 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
909 | 986 | ||
910 | /* Core99 Macs (dual G4s and G5s) */ | 987 | /* Core99 Macs (dual G4s and G5s) */ |
911 | struct smp_ops_t core99_smp_ops = { | 988 | struct smp_ops_t core99_smp_ops = { |
912 | .message_pass = smp_mpic_message_pass, | 989 | .message_pass = smp_mpic_message_pass, |
913 | .probe = smp_core99_probe, | 990 | .probe = smp_core99_probe, |
991 | #ifdef CONFIG_PPC64 | ||
992 | .bringup_done = smp_core99_bringup_done, | ||
993 | #endif | ||
914 | .kick_cpu = smp_core99_kick_cpu, | 994 | .kick_cpu = smp_core99_kick_cpu, |
915 | .setup_cpu = smp_core99_setup_cpu, | 995 | .setup_cpu = smp_core99_setup_cpu, |
916 | .give_timebase = smp_core99_give_timebase, | 996 | .give_timebase = smp_core99_give_timebase, |
917 | .take_timebase = smp_core99_take_timebase, | 997 | .take_timebase = smp_core99_take_timebase, |
918 | #if defined(CONFIG_HOTPLUG_CPU) | 998 | #if defined(CONFIG_HOTPLUG_CPU) |
919 | # if defined(CONFIG_PPC32) | ||
920 | .cpu_disable = smp_core99_cpu_disable, | 999 | .cpu_disable = smp_core99_cpu_disable, |
921 | .cpu_die = smp_core99_cpu_die, | ||
922 | # endif | ||
923 | # if defined(CONFIG_PPC64) | ||
924 | .cpu_disable = generic_cpu_disable, | ||
925 | .cpu_die = generic_cpu_die, | 1000 | .cpu_die = generic_cpu_die, |
926 | /* intentionally do *NOT* assign cpu_enable, | ||
927 | * the generic code will use kick_cpu then! */ | ||
928 | # endif | ||
929 | #endif | 1001 | #endif |
930 | }; | 1002 | }; |
931 | 1003 | ||
@@ -943,7 +1015,7 @@ void __init pmac_setup_smp(void) | |||
943 | of_node_put(np); | 1015 | of_node_put(np); |
944 | smp_ops = &core99_smp_ops; | 1016 | smp_ops = &core99_smp_ops; |
945 | } | 1017 | } |
946 | #ifdef CONFIG_PPC32 | 1018 | #ifdef CONFIG_PPC_PMAC32_PSURGE |
947 | else { | 1019 | else { |
948 | /* We have to set bits in cpu_possible_mask here since the | 1020 | /* We have to set bits in cpu_possible_mask here since the |
949 | * secondary CPU(s) aren't in the device tree. Various | 1021 | * secondary CPU(s) aren't in the device tree. Various |
@@ -956,6 +1028,11 @@ void __init pmac_setup_smp(void) | |||
956 | set_cpu_possible(cpu, true); | 1028 | set_cpu_possible(cpu, true); |
957 | smp_ops = &psurge_smp_ops; | 1029 | smp_ops = &psurge_smp_ops; |
958 | } | 1030 | } |
959 | #endif /* CONFIG_PPC32 */ | 1031 | #endif /* CONFIG_PPC_PMAC32_PSURGE */ |
1032 | |||
1033 | #ifdef CONFIG_HOTPLUG_CPU | ||
1034 | ppc_md.cpu_die = pmac_cpu_die; | ||
1035 | #endif | ||
960 | } | 1036 | } |
961 | 1037 | ||
1038 | |||
diff --git a/arch/powerpc/platforms/ps3/device-init.c b/arch/powerpc/platforms/ps3/device-init.c index b341018326df..6c4b5837fc8a 100644 --- a/arch/powerpc/platforms/ps3/device-init.c +++ b/arch/powerpc/platforms/ps3/device-init.c | |||
@@ -566,10 +566,10 @@ static int ps3_setup_dynamic_device(const struct ps3_repository_device *repo) | |||
566 | case PS3_DEV_TYPE_STOR_DISK: | 566 | case PS3_DEV_TYPE_STOR_DISK: |
567 | result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK); | 567 | result = ps3_setup_storage_dev(repo, PS3_MATCH_ID_STOR_DISK); |
568 | 568 | ||
569 | /* Some devices are not accessable from the Other OS lpar. */ | 569 | /* Some devices are not accessible from the Other OS lpar. */ |
570 | if (result == -ENODEV) { | 570 | if (result == -ENODEV) { |
571 | result = 0; | 571 | result = 0; |
572 | pr_debug("%s:%u: not accessable\n", __func__, | 572 | pr_debug("%s:%u: not accessible\n", __func__, |
573 | __LINE__); | 573 | __LINE__); |
574 | } | 574 | } |
575 | 575 | ||
diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c index 59d9712d7364..600ed2c0ed59 100644 --- a/arch/powerpc/platforms/ps3/interrupt.c +++ b/arch/powerpc/platforms/ps3/interrupt.c | |||
@@ -44,7 +44,7 @@ | |||
44 | * @lock: | 44 | * @lock: |
45 | * @ipi_debug_brk_mask: | 45 | * @ipi_debug_brk_mask: |
46 | * | 46 | * |
47 | * The HV mantains per SMT thread mappings of HV outlet to HV plug on | 47 | * The HV maintains per SMT thread mappings of HV outlet to HV plug on |
48 | * behalf of the guest. These mappings are implemented as 256 bit guest | 48 | * behalf of the guest. These mappings are implemented as 256 bit guest |
49 | * supplied bitmaps indexed by plug number. The addresses of the bitmaps | 49 | * supplied bitmaps indexed by plug number. The addresses of the bitmaps |
50 | * are registered with the HV through lv1_configure_irq_state_bitmap(). | 50 | * are registered with the HV through lv1_configure_irq_state_bitmap(). |
@@ -99,16 +99,16 @@ static DEFINE_PER_CPU(struct ps3_private, ps3_private); | |||
99 | * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). | 99 | * Sets ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). |
100 | */ | 100 | */ |
101 | 101 | ||
102 | static void ps3_chip_mask(unsigned int virq) | 102 | static void ps3_chip_mask(struct irq_data *d) |
103 | { | 103 | { |
104 | struct ps3_private *pd = get_irq_chip_data(virq); | 104 | struct ps3_private *pd = irq_data_get_irq_chip_data(d); |
105 | unsigned long flags; | 105 | unsigned long flags; |
106 | 106 | ||
107 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, | 107 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, |
108 | pd->thread_id, virq); | 108 | pd->thread_id, d->irq); |
109 | 109 | ||
110 | local_irq_save(flags); | 110 | local_irq_save(flags); |
111 | clear_bit(63 - virq, &pd->bmp.mask); | 111 | clear_bit(63 - d->irq, &pd->bmp.mask); |
112 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); | 112 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); |
113 | local_irq_restore(flags); | 113 | local_irq_restore(flags); |
114 | } | 114 | } |
@@ -120,16 +120,16 @@ static void ps3_chip_mask(unsigned int virq) | |||
120 | * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). | 120 | * Clears ps3_bmp.mask and calls lv1_did_update_interrupt_mask(). |
121 | */ | 121 | */ |
122 | 122 | ||
123 | static void ps3_chip_unmask(unsigned int virq) | 123 | static void ps3_chip_unmask(struct irq_data *d) |
124 | { | 124 | { |
125 | struct ps3_private *pd = get_irq_chip_data(virq); | 125 | struct ps3_private *pd = irq_data_get_irq_chip_data(d); |
126 | unsigned long flags; | 126 | unsigned long flags; |
127 | 127 | ||
128 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, | 128 | pr_debug("%s:%d: thread_id %llu, virq %d\n", __func__, __LINE__, |
129 | pd->thread_id, virq); | 129 | pd->thread_id, d->irq); |
130 | 130 | ||
131 | local_irq_save(flags); | 131 | local_irq_save(flags); |
132 | set_bit(63 - virq, &pd->bmp.mask); | 132 | set_bit(63 - d->irq, &pd->bmp.mask); |
133 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); | 133 | lv1_did_update_interrupt_mask(pd->ppe_id, pd->thread_id); |
134 | local_irq_restore(flags); | 134 | local_irq_restore(flags); |
135 | } | 135 | } |
@@ -141,10 +141,10 @@ static void ps3_chip_unmask(unsigned int virq) | |||
141 | * Calls lv1_end_of_interrupt_ext(). | 141 | * Calls lv1_end_of_interrupt_ext(). |
142 | */ | 142 | */ |
143 | 143 | ||
144 | static void ps3_chip_eoi(unsigned int virq) | 144 | static void ps3_chip_eoi(struct irq_data *d) |
145 | { | 145 | { |
146 | const struct ps3_private *pd = get_irq_chip_data(virq); | 146 | const struct ps3_private *pd = irq_data_get_irq_chip_data(d); |
147 | lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, virq); | 147 | lv1_end_of_interrupt_ext(pd->ppe_id, pd->thread_id, d->irq); |
148 | } | 148 | } |
149 | 149 | ||
150 | /** | 150 | /** |
@@ -153,9 +153,9 @@ static void ps3_chip_eoi(unsigned int virq) | |||
153 | 153 | ||
154 | static struct irq_chip ps3_irq_chip = { | 154 | static struct irq_chip ps3_irq_chip = { |
155 | .name = "ps3", | 155 | .name = "ps3", |
156 | .mask = ps3_chip_mask, | 156 | .irq_mask = ps3_chip_mask, |
157 | .unmask = ps3_chip_unmask, | 157 | .irq_unmask = ps3_chip_unmask, |
158 | .eoi = ps3_chip_eoi, | 158 | .irq_eoi = ps3_chip_eoi, |
159 | }; | 159 | }; |
160 | 160 | ||
161 | /** | 161 | /** |
@@ -194,15 +194,15 @@ static int ps3_virq_setup(enum ps3_cpu_binding cpu, unsigned long outlet, | |||
194 | pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, | 194 | pr_debug("%s:%d: outlet %lu => cpu %u, virq %u\n", __func__, __LINE__, |
195 | outlet, cpu, *virq); | 195 | outlet, cpu, *virq); |
196 | 196 | ||
197 | result = set_irq_chip_data(*virq, pd); | 197 | result = irq_set_chip_data(*virq, pd); |
198 | 198 | ||
199 | if (result) { | 199 | if (result) { |
200 | pr_debug("%s:%d: set_irq_chip_data failed\n", | 200 | pr_debug("%s:%d: irq_set_chip_data failed\n", |
201 | __func__, __LINE__); | 201 | __func__, __LINE__); |
202 | goto fail_set; | 202 | goto fail_set; |
203 | } | 203 | } |
204 | 204 | ||
205 | ps3_chip_mask(*virq); | 205 | ps3_chip_mask(irq_get_irq_data(*virq)); |
206 | 206 | ||
207 | return result; | 207 | return result; |
208 | 208 | ||
@@ -221,12 +221,12 @@ fail_create: | |||
221 | 221 | ||
222 | static int ps3_virq_destroy(unsigned int virq) | 222 | static int ps3_virq_destroy(unsigned int virq) |
223 | { | 223 | { |
224 | const struct ps3_private *pd = get_irq_chip_data(virq); | 224 | const struct ps3_private *pd = irq_get_chip_data(virq); |
225 | 225 | ||
226 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, | 226 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, |
227 | __LINE__, pd->ppe_id, pd->thread_id, virq); | 227 | __LINE__, pd->ppe_id, pd->thread_id, virq); |
228 | 228 | ||
229 | set_irq_chip_data(virq, NULL); | 229 | irq_set_chip_data(virq, NULL); |
230 | irq_dispose_mapping(virq); | 230 | irq_dispose_mapping(virq); |
231 | 231 | ||
232 | pr_debug("%s:%d <-\n", __func__, __LINE__); | 232 | pr_debug("%s:%d <-\n", __func__, __LINE__); |
@@ -256,7 +256,7 @@ int ps3_irq_plug_setup(enum ps3_cpu_binding cpu, unsigned long outlet, | |||
256 | goto fail_setup; | 256 | goto fail_setup; |
257 | } | 257 | } |
258 | 258 | ||
259 | pd = get_irq_chip_data(*virq); | 259 | pd = irq_get_chip_data(*virq); |
260 | 260 | ||
261 | /* Binds outlet to cpu + virq. */ | 261 | /* Binds outlet to cpu + virq. */ |
262 | 262 | ||
@@ -291,12 +291,12 @@ EXPORT_SYMBOL_GPL(ps3_irq_plug_setup); | |||
291 | int ps3_irq_plug_destroy(unsigned int virq) | 291 | int ps3_irq_plug_destroy(unsigned int virq) |
292 | { | 292 | { |
293 | int result; | 293 | int result; |
294 | const struct ps3_private *pd = get_irq_chip_data(virq); | 294 | const struct ps3_private *pd = irq_get_chip_data(virq); |
295 | 295 | ||
296 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, | 296 | pr_debug("%s:%d: ppe_id %llu, thread_id %llu, virq %u\n", __func__, |
297 | __LINE__, pd->ppe_id, pd->thread_id, virq); | 297 | __LINE__, pd->ppe_id, pd->thread_id, virq); |
298 | 298 | ||
299 | ps3_chip_mask(virq); | 299 | ps3_chip_mask(irq_get_irq_data(virq)); |
300 | 300 | ||
301 | result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); | 301 | result = lv1_disconnect_irq_plug_ext(pd->ppe_id, pd->thread_id, virq); |
302 | 302 | ||
@@ -357,7 +357,7 @@ int ps3_event_receive_port_destroy(unsigned int virq) | |||
357 | 357 | ||
358 | pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq); | 358 | pr_debug(" -> %s:%d virq %u\n", __func__, __LINE__, virq); |
359 | 359 | ||
360 | ps3_chip_mask(virq); | 360 | ps3_chip_mask(irq_get_irq_data(virq)); |
361 | 361 | ||
362 | result = lv1_destruct_event_receive_port(virq_to_hw(virq)); | 362 | result = lv1_destruct_event_receive_port(virq_to_hw(virq)); |
363 | 363 | ||
@@ -492,7 +492,7 @@ int ps3_io_irq_destroy(unsigned int virq) | |||
492 | int result; | 492 | int result; |
493 | unsigned long outlet = virq_to_hw(virq); | 493 | unsigned long outlet = virq_to_hw(virq); |
494 | 494 | ||
495 | ps3_chip_mask(virq); | 495 | ps3_chip_mask(irq_get_irq_data(virq)); |
496 | 496 | ||
497 | /* | 497 | /* |
498 | * lv1_destruct_io_irq_outlet() will destroy the IRQ plug, | 498 | * lv1_destruct_io_irq_outlet() will destroy the IRQ plug, |
@@ -553,7 +553,7 @@ int ps3_vuart_irq_destroy(unsigned int virq) | |||
553 | { | 553 | { |
554 | int result; | 554 | int result; |
555 | 555 | ||
556 | ps3_chip_mask(virq); | 556 | ps3_chip_mask(irq_get_irq_data(virq)); |
557 | result = lv1_deconfigure_virtual_uart_irq(); | 557 | result = lv1_deconfigure_virtual_uart_irq(); |
558 | 558 | ||
559 | if (result) { | 559 | if (result) { |
@@ -605,7 +605,7 @@ int ps3_spe_irq_destroy(unsigned int virq) | |||
605 | { | 605 | { |
606 | int result; | 606 | int result; |
607 | 607 | ||
608 | ps3_chip_mask(virq); | 608 | ps3_chip_mask(irq_get_irq_data(virq)); |
609 | 609 | ||
610 | result = ps3_irq_plug_destroy(virq); | 610 | result = ps3_irq_plug_destroy(virq); |
611 | BUG_ON(result); | 611 | BUG_ON(result); |
@@ -659,18 +659,13 @@ static void __maybe_unused _dump_mask(struct ps3_private *pd, | |||
659 | static void dump_bmp(struct ps3_private* pd) {}; | 659 | static void dump_bmp(struct ps3_private* pd) {}; |
660 | #endif /* defined(DEBUG) */ | 660 | #endif /* defined(DEBUG) */ |
661 | 661 | ||
662 | static void ps3_host_unmap(struct irq_host *h, unsigned int virq) | ||
663 | { | ||
664 | set_irq_chip_data(virq, NULL); | ||
665 | } | ||
666 | |||
667 | static int ps3_host_map(struct irq_host *h, unsigned int virq, | 662 | static int ps3_host_map(struct irq_host *h, unsigned int virq, |
668 | irq_hw_number_t hwirq) | 663 | irq_hw_number_t hwirq) |
669 | { | 664 | { |
670 | pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, | 665 | pr_debug("%s:%d: hwirq %lu, virq %u\n", __func__, __LINE__, hwirq, |
671 | virq); | 666 | virq); |
672 | 667 | ||
673 | set_irq_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); | 668 | irq_set_chip_and_handler(virq, &ps3_irq_chip, handle_fasteoi_irq); |
674 | 669 | ||
675 | return 0; | 670 | return 0; |
676 | } | 671 | } |
@@ -683,7 +678,6 @@ static int ps3_host_match(struct irq_host *h, struct device_node *np) | |||
683 | 678 | ||
684 | static struct irq_host_ops ps3_host_ops = { | 679 | static struct irq_host_ops ps3_host_ops = { |
685 | .map = ps3_host_map, | 680 | .map = ps3_host_map, |
686 | .unmap = ps3_host_unmap, | ||
687 | .match = ps3_host_match, | 681 | .match = ps3_host_match, |
688 | }; | 682 | }; |
689 | 683 | ||
diff --git a/arch/powerpc/platforms/ps3/smp.c b/arch/powerpc/platforms/ps3/smp.c index 51ffde40af2b..4c44794faac0 100644 --- a/arch/powerpc/platforms/ps3/smp.c +++ b/arch/powerpc/platforms/ps3/smp.c | |||
@@ -39,7 +39,7 @@ | |||
39 | #define MSG_COUNT 4 | 39 | #define MSG_COUNT 4 |
40 | static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); | 40 | static DEFINE_PER_CPU(unsigned int [MSG_COUNT], ps3_ipi_virqs); |
41 | 41 | ||
42 | static void do_message_pass(int target, int msg) | 42 | static void ps3_smp_message_pass(int cpu, int msg) |
43 | { | 43 | { |
44 | int result; | 44 | int result; |
45 | unsigned int virq; | 45 | unsigned int virq; |
@@ -49,28 +49,12 @@ static void do_message_pass(int target, int msg) | |||
49 | return; | 49 | return; |
50 | } | 50 | } |
51 | 51 | ||
52 | virq = per_cpu(ps3_ipi_virqs, target)[msg]; | 52 | virq = per_cpu(ps3_ipi_virqs, cpu)[msg]; |
53 | result = ps3_send_event_locally(virq); | 53 | result = ps3_send_event_locally(virq); |
54 | 54 | ||
55 | if (result) | 55 | if (result) |
56 | DBG("%s:%d: ps3_send_event_locally(%d, %d) failed" | 56 | DBG("%s:%d: ps3_send_event_locally(%d, %d) failed" |
57 | " (%d)\n", __func__, __LINE__, target, msg, result); | 57 | " (%d)\n", __func__, __LINE__, cpu, msg, result); |
58 | } | ||
59 | |||
60 | static void ps3_smp_message_pass(int target, int msg) | ||
61 | { | ||
62 | int cpu; | ||
63 | |||
64 | if (target < NR_CPUS) | ||
65 | do_message_pass(target, msg); | ||
66 | else if (target == MSG_ALL_BUT_SELF) { | ||
67 | for_each_online_cpu(cpu) | ||
68 | if (cpu != smp_processor_id()) | ||
69 | do_message_pass(cpu, msg); | ||
70 | } else { | ||
71 | for_each_online_cpu(cpu) | ||
72 | do_message_pass(cpu, msg); | ||
73 | } | ||
74 | } | 58 | } |
75 | 59 | ||
76 | static int ps3_smp_probe(void) | 60 | static int ps3_smp_probe(void) |
diff --git a/arch/powerpc/platforms/ps3/spu.c b/arch/powerpc/platforms/ps3/spu.c index 39a472e9e80f..375a9f92158d 100644 --- a/arch/powerpc/platforms/ps3/spu.c +++ b/arch/powerpc/platforms/ps3/spu.c | |||
@@ -197,7 +197,7 @@ static void spu_unmap(struct spu *spu) | |||
197 | * The current HV requires the spu shadow regs to be mapped with the | 197 | * The current HV requires the spu shadow regs to be mapped with the |
198 | * PTE page protection bits set as read-only (PP=3). This implementation | 198 | * PTE page protection bits set as read-only (PP=3). This implementation |
199 | * uses the low level __ioremap() to bypass the page protection settings | 199 | * uses the low level __ioremap() to bypass the page protection settings |
200 | * inforced by ioremap_flags() to get the needed PTE bits set for the | 200 | * inforced by ioremap_prot() to get the needed PTE bits set for the |
201 | * shadow regs. | 201 | * shadow regs. |
202 | */ | 202 | */ |
203 | 203 | ||
@@ -214,7 +214,7 @@ static int __init setup_areas(struct spu *spu) | |||
214 | goto fail_ioremap; | 214 | goto fail_ioremap; |
215 | } | 215 | } |
216 | 216 | ||
217 | spu->local_store = (__force void *)ioremap_flags(spu->local_store_phys, | 217 | spu->local_store = (__force void *)ioremap_prot(spu->local_store_phys, |
218 | LS_SIZE, _PAGE_NO_CACHE); | 218 | LS_SIZE, _PAGE_NO_CACHE); |
219 | 219 | ||
220 | if (!spu->local_store) { | 220 | if (!spu->local_store) { |
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig index c667f0f02c34..71af4c5d6c05 100644 --- a/arch/powerpc/platforms/pseries/Kconfig +++ b/arch/powerpc/platforms/pseries/Kconfig | |||
@@ -3,14 +3,17 @@ config PPC_PSERIES | |||
3 | bool "IBM pSeries & new (POWER5-based) iSeries" | 3 | bool "IBM pSeries & new (POWER5-based) iSeries" |
4 | select MPIC | 4 | select MPIC |
5 | select PCI_MSI | 5 | select PCI_MSI |
6 | select XICS | 6 | select PPC_XICS |
7 | select PPC_ICP_NATIVE | ||
8 | select PPC_ICP_HV | ||
9 | select PPC_ICS_RTAS | ||
7 | select PPC_I8259 | 10 | select PPC_I8259 |
8 | select PPC_RTAS | 11 | select PPC_RTAS |
9 | select PPC_RTAS_DAEMON | 12 | select PPC_RTAS_DAEMON |
10 | select RTAS_ERROR_LOGGING | 13 | select RTAS_ERROR_LOGGING |
11 | select PPC_UDBG_16550 | 14 | select PPC_UDBG_16550 |
12 | select PPC_NATIVE | 15 | select PPC_NATIVE |
13 | select PPC_PCI_CHOICE if EMBEDDED | 16 | select PPC_PCI_CHOICE if EXPERT |
14 | default y | 17 | default y |
15 | 18 | ||
16 | config PPC_SPLPAR | 19 | config PPC_SPLPAR |
@@ -24,19 +27,47 @@ config PPC_SPLPAR | |||
24 | two or more partitions. | 27 | two or more partitions. |
25 | 28 | ||
26 | config EEH | 29 | config EEH |
27 | bool "PCI Extended Error Handling (EEH)" if EMBEDDED | 30 | bool "PCI Extended Error Handling (EEH)" if EXPERT |
28 | depends on PPC_PSERIES && PCI | 31 | depends on PPC_PSERIES && PCI |
29 | default y if !EMBEDDED | 32 | default y if !EXPERT |
30 | 33 | ||
31 | config PSERIES_MSI | 34 | config PSERIES_MSI |
32 | bool | 35 | bool |
33 | depends on PCI_MSI && EEH | 36 | depends on PCI_MSI && EEH |
34 | default y | 37 | default y |
35 | 38 | ||
39 | config PSERIES_ENERGY | ||
40 | tristate "pSeries energy management capabilities driver" | ||
41 | depends on PPC_PSERIES | ||
42 | default y | ||
43 | help | ||
44 | Provides interface to platform energy management capabilities | ||
45 | on supported PSERIES platforms. | ||
46 | Provides: /sys/devices/system/cpu/pseries_(de)activation_hint_list | ||
47 | and /sys/devices/system/cpu/cpuN/pseries_(de)activation_hint | ||
48 | |||
36 | config SCANLOG | 49 | config SCANLOG |
37 | tristate "Scanlog dump interface" | 50 | tristate "Scanlog dump interface" |
38 | depends on RTAS_PROC && PPC_PSERIES | 51 | depends on RTAS_PROC && PPC_PSERIES |
39 | 52 | ||
53 | config IO_EVENT_IRQ | ||
54 | bool "IO Event Interrupt support" | ||
55 | depends on PPC_PSERIES | ||
56 | default y | ||
57 | help | ||
58 | Select this option, if you want to enable support for IO Event | ||
59 | interrupts. IO event interrupt is a mechanism provided by RTAS | ||
60 | to return information about hardware error and non-error events | ||
61 | which may need OS attention. RTAS returns events for multiple | ||
62 | event types and scopes. Device drivers can register their handlers | ||
63 | to receive events. | ||
64 | |||
65 | This option will only enable the IO event platform code. You | ||
66 | will still need to enable or compile the actual drivers | ||
67 | that use this infrastruture to handle IO event interrupts. | ||
68 | |||
69 | Say Y if you are unsure. | ||
70 | |||
40 | config LPARCFG | 71 | config LPARCFG |
41 | bool "LPAR Configuration Data" | 72 | bool "LPAR Configuration Data" |
42 | depends on PPC_PSERIES || PPC_ISERIES | 73 | depends on PPC_PSERIES || PPC_ISERIES |
@@ -47,6 +78,12 @@ config LPARCFG | |||
47 | config PPC_PSERIES_DEBUG | 78 | config PPC_PSERIES_DEBUG |
48 | depends on PPC_PSERIES && PPC_EARLY_DEBUG | 79 | depends on PPC_PSERIES && PPC_EARLY_DEBUG |
49 | bool "Enable extra debug logging in platforms/pseries" | 80 | bool "Enable extra debug logging in platforms/pseries" |
81 | help | ||
82 | Say Y here if you want the pseries core to produce a bunch of | ||
83 | debug messages to the system log. Select this if you are having a | ||
84 | problem with the pseries core and want to see more of what is | ||
85 | going on. This does not enable debugging in lpar.c, which must | ||
86 | be manually done due to its verbosity. | ||
50 | default y | 87 | default y |
51 | 88 | ||
52 | config PPC_SMLPAR | 89 | config PPC_SMLPAR |
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 046ace9c4381..3556e402cbf5 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -1,21 +1,16 @@ | |||
1 | ifeq ($(CONFIG_PPC64),y) | 1 | ccflags-$(CONFIG_PPC64) := -mno-minimal-toc |
2 | EXTRA_CFLAGS += -mno-minimal-toc | 2 | ccflags-$(CONFIG_PPC_PSERIES_DEBUG) += -DDEBUG |
3 | endif | ||
4 | |||
5 | ifeq ($(CONFIG_PPC_PSERIES_DEBUG),y) | ||
6 | EXTRA_CFLAGS += -DDEBUG | ||
7 | endif | ||
8 | 3 | ||
9 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ | 4 | obj-y := lpar.o hvCall.o nvram.o reconfig.o \ |
10 | setup.o iommu.o event_sources.o ras.o \ | 5 | setup.o iommu.o event_sources.o ras.o \ |
11 | firmware.o power.o dlpar.o | 6 | firmware.o power.o dlpar.o mobility.o |
12 | obj-$(CONFIG_SMP) += smp.o | 7 | obj-$(CONFIG_SMP) += smp.o |
13 | obj-$(CONFIG_XICS) += xics.o | ||
14 | obj-$(CONFIG_SCANLOG) += scanlog.o | 8 | obj-$(CONFIG_SCANLOG) += scanlog.o |
15 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o | 9 | obj-$(CONFIG_EEH) += eeh.o eeh_cache.o eeh_driver.o eeh_event.o eeh_sysfs.o |
16 | obj-$(CONFIG_KEXEC) += kexec.o | 10 | obj-$(CONFIG_KEXEC) += kexec.o |
17 | obj-$(CONFIG_PCI) += pci.o pci_dlpar.o | 11 | obj-$(CONFIG_PCI) += pci.o pci_dlpar.o |
18 | obj-$(CONFIG_PSERIES_MSI) += msi.o | 12 | obj-$(CONFIG_PSERIES_MSI) += msi.o |
13 | obj-$(CONFIG_PSERIES_ENERGY) += pseries_energy.o | ||
19 | 14 | ||
20 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o | 15 | obj-$(CONFIG_HOTPLUG_CPU) += hotplug-cpu.o |
21 | obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o | 16 | obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o |
@@ -23,9 +18,10 @@ obj-$(CONFIG_MEMORY_HOTPLUG) += hotplug-memory.o | |||
23 | obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o | 18 | obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o |
24 | obj-$(CONFIG_HVCS) += hvcserver.o | 19 | obj-$(CONFIG_HVCS) += hvcserver.o |
25 | obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o | 20 | obj-$(CONFIG_HCALL_STATS) += hvCall_inst.o |
26 | obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o | 21 | obj-$(CONFIG_PHYP_DUMP) += phyp_dump.o |
27 | obj-$(CONFIG_CMM) += cmm.o | 22 | obj-$(CONFIG_CMM) += cmm.o |
28 | obj-$(CONFIG_DTL) += dtl.o | 23 | obj-$(CONFIG_DTL) += dtl.o |
24 | obj-$(CONFIG_IO_EVENT_IRQ) += io_event_irq.o | ||
29 | 25 | ||
30 | ifeq ($(CONFIG_PPC_PSERIES),y) | 26 | ifeq ($(CONFIG_PPC_PSERIES),y) |
31 | obj-$(CONFIG_SUSPEND) += suspend.o | 27 | obj-$(CONFIG_SUSPEND) += suspend.o |
diff --git a/arch/powerpc/platforms/pseries/cmm.c b/arch/powerpc/platforms/pseries/cmm.c index f4803868642c..3cafc306b971 100644 --- a/arch/powerpc/platforms/pseries/cmm.c +++ b/arch/powerpc/platforms/pseries/cmm.c | |||
@@ -508,12 +508,7 @@ static int cmm_memory_isolate_cb(struct notifier_block *self, | |||
508 | if (action == MEM_ISOLATE_COUNT) | 508 | if (action == MEM_ISOLATE_COUNT) |
509 | ret = cmm_count_pages(arg); | 509 | ret = cmm_count_pages(arg); |
510 | 510 | ||
511 | if (ret) | 511 | return notifier_from_errno(ret); |
512 | ret = notifier_from_errno(ret); | ||
513 | else | ||
514 | ret = NOTIFY_OK; | ||
515 | |||
516 | return ret; | ||
517 | } | 512 | } |
518 | 513 | ||
519 | static struct notifier_block cmm_mem_isolate_nb = { | 514 | static struct notifier_block cmm_mem_isolate_nb = { |
@@ -635,12 +630,7 @@ static int cmm_memory_cb(struct notifier_block *self, | |||
635 | break; | 630 | break; |
636 | } | 631 | } |
637 | 632 | ||
638 | if (ret) | 633 | return notifier_from_errno(ret); |
639 | ret = notifier_from_errno(ret); | ||
640 | else | ||
641 | ret = NOTIFY_OK; | ||
642 | |||
643 | return ret; | ||
644 | } | 634 | } |
645 | 635 | ||
646 | static struct notifier_block cmm_mem_nb = { | 636 | static struct notifier_block cmm_mem_nb = { |
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c index 72d8054fa739..57ceb92b2288 100644 --- a/arch/powerpc/platforms/pseries/dlpar.c +++ b/arch/powerpc/platforms/pseries/dlpar.c | |||
@@ -33,7 +33,7 @@ struct cc_workarea { | |||
33 | u32 prop_offset; | 33 | u32 prop_offset; |
34 | }; | 34 | }; |
35 | 35 | ||
36 | static void dlpar_free_cc_property(struct property *prop) | 36 | void dlpar_free_cc_property(struct property *prop) |
37 | { | 37 | { |
38 | kfree(prop->name); | 38 | kfree(prop->name); |
39 | kfree(prop->value); | 39 | kfree(prop->value); |
@@ -55,13 +55,12 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa) | |||
55 | 55 | ||
56 | prop->length = ccwa->prop_length; | 56 | prop->length = ccwa->prop_length; |
57 | value = (char *)ccwa + ccwa->prop_offset; | 57 | value = (char *)ccwa + ccwa->prop_offset; |
58 | prop->value = kzalloc(prop->length, GFP_KERNEL); | 58 | prop->value = kmemdup(value, prop->length, GFP_KERNEL); |
59 | if (!prop->value) { | 59 | if (!prop->value) { |
60 | dlpar_free_cc_property(prop); | 60 | dlpar_free_cc_property(prop); |
61 | return NULL; | 61 | return NULL; |
62 | } | 62 | } |
63 | 63 | ||
64 | memcpy(prop->value, value, prop->length); | ||
65 | return prop; | 64 | return prop; |
66 | } | 65 | } |
67 | 66 | ||
@@ -75,7 +74,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa) | |||
75 | return NULL; | 74 | return NULL; |
76 | 75 | ||
77 | /* The configure connector reported name does not contain a | 76 | /* The configure connector reported name does not contain a |
78 | * preceeding '/', so we allocate a buffer large enough to | 77 | * preceding '/', so we allocate a buffer large enough to |
79 | * prepend this to the full_name. | 78 | * prepend this to the full_name. |
80 | */ | 79 | */ |
81 | name = (char *)ccwa + ccwa->name_offset; | 80 | name = (char *)ccwa + ccwa->name_offset; |
@@ -102,7 +101,7 @@ static void dlpar_free_one_cc_node(struct device_node *dn) | |||
102 | kfree(dn); | 101 | kfree(dn); |
103 | } | 102 | } |
104 | 103 | ||
105 | static void dlpar_free_cc_nodes(struct device_node *dn) | 104 | void dlpar_free_cc_nodes(struct device_node *dn) |
106 | { | 105 | { |
107 | if (dn->child) | 106 | if (dn->child) |
108 | dlpar_free_cc_nodes(dn->child); | 107 | dlpar_free_cc_nodes(dn->child); |
diff --git a/arch/powerpc/platforms/pseries/dtl.c b/arch/powerpc/platforms/pseries/dtl.c index a00addb55945..e9190073bb97 100644 --- a/arch/powerpc/platforms/pseries/dtl.c +++ b/arch/powerpc/platforms/pseries/dtl.c | |||
@@ -23,37 +23,22 @@ | |||
23 | #include <linux/init.h> | 23 | #include <linux/init.h> |
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/debugfs.h> | 25 | #include <linux/debugfs.h> |
26 | #include <linux/spinlock.h> | ||
26 | #include <asm/smp.h> | 27 | #include <asm/smp.h> |
27 | #include <asm/system.h> | 28 | #include <asm/system.h> |
28 | #include <asm/uaccess.h> | 29 | #include <asm/uaccess.h> |
29 | #include <asm/firmware.h> | 30 | #include <asm/firmware.h> |
31 | #include <asm/lppaca.h> | ||
30 | 32 | ||
31 | #include "plpar_wrappers.h" | 33 | #include "plpar_wrappers.h" |
32 | 34 | ||
33 | /* | ||
34 | * Layout of entries in the hypervisor's DTL buffer. Although we don't | ||
35 | * actually access the internals of an entry (we only need to know the size), | ||
36 | * we might as well define it here for reference. | ||
37 | */ | ||
38 | struct dtl_entry { | ||
39 | u8 dispatch_reason; | ||
40 | u8 preempt_reason; | ||
41 | u16 processor_id; | ||
42 | u32 enqueue_to_dispatch_time; | ||
43 | u32 ready_to_enqueue_time; | ||
44 | u32 waiting_to_ready_time; | ||
45 | u64 timebase; | ||
46 | u64 fault_addr; | ||
47 | u64 srr0; | ||
48 | u64 srr1; | ||
49 | }; | ||
50 | |||
51 | struct dtl { | 35 | struct dtl { |
52 | struct dtl_entry *buf; | 36 | struct dtl_entry *buf; |
53 | struct dentry *file; | 37 | struct dentry *file; |
54 | int cpu; | 38 | int cpu; |
55 | int buf_entries; | 39 | int buf_entries; |
56 | u64 last_idx; | 40 | u64 last_idx; |
41 | spinlock_t lock; | ||
57 | }; | 42 | }; |
58 | static DEFINE_PER_CPU(struct dtl, cpu_dtl); | 43 | static DEFINE_PER_CPU(struct dtl, cpu_dtl); |
59 | 44 | ||
@@ -67,34 +52,106 @@ static u8 dtl_event_mask = 0x7; | |||
67 | 52 | ||
68 | 53 | ||
69 | /* | 54 | /* |
70 | * Size of per-cpu log buffers. Default is just under 16 pages worth. | 55 | * Size of per-cpu log buffers. Firmware requires that the buffer does |
56 | * not cross a 4k boundary. | ||
71 | */ | 57 | */ |
72 | static int dtl_buf_entries = (16 * 85); | 58 | static int dtl_buf_entries = N_DISPATCH_LOG; |
59 | |||
60 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
61 | struct dtl_ring { | ||
62 | u64 write_index; | ||
63 | struct dtl_entry *write_ptr; | ||
64 | struct dtl_entry *buf; | ||
65 | struct dtl_entry *buf_end; | ||
66 | u8 saved_dtl_mask; | ||
67 | }; | ||
73 | 68 | ||
69 | static DEFINE_PER_CPU(struct dtl_ring, dtl_rings); | ||
74 | 70 | ||
75 | static int dtl_enable(struct dtl *dtl) | 71 | static atomic_t dtl_count; |
72 | |||
73 | /* | ||
74 | * The cpu accounting code controls the DTL ring buffer, and we get | ||
75 | * given entries as they are processed. | ||
76 | */ | ||
77 | static void consume_dtle(struct dtl_entry *dtle, u64 index) | ||
76 | { | 78 | { |
77 | unsigned long addr; | 79 | struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); |
78 | int ret, hwcpu; | 80 | struct dtl_entry *wp = dtlr->write_ptr; |
81 | struct lppaca *vpa = local_paca->lppaca_ptr; | ||
79 | 82 | ||
80 | /* only allow one reader */ | 83 | if (!wp) |
81 | if (dtl->buf) | 84 | return; |
82 | return -EBUSY; | ||
83 | 85 | ||
84 | /* we need to store the original allocation size for use during read */ | 86 | *wp = *dtle; |
85 | dtl->buf_entries = dtl_buf_entries; | 87 | barrier(); |
86 | 88 | ||
87 | dtl->buf = kmalloc_node(dtl->buf_entries * sizeof(struct dtl_entry), | 89 | /* check for hypervisor ring buffer overflow, ignore this entry if so */ |
88 | GFP_KERNEL, cpu_to_node(dtl->cpu)); | 90 | if (index + N_DISPATCH_LOG < vpa->dtl_idx) |
89 | if (!dtl->buf) { | 91 | return; |
90 | printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", | 92 | |
91 | __func__, dtl->cpu); | 93 | ++wp; |
92 | return -ENOMEM; | 94 | if (wp == dtlr->buf_end) |
93 | } | 95 | wp = dtlr->buf; |
96 | dtlr->write_ptr = wp; | ||
97 | |||
98 | /* incrementing write_index makes the new entry visible */ | ||
99 | smp_wmb(); | ||
100 | ++dtlr->write_index; | ||
101 | } | ||
102 | |||
103 | static int dtl_start(struct dtl *dtl) | ||
104 | { | ||
105 | struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); | ||
106 | |||
107 | dtlr->buf = dtl->buf; | ||
108 | dtlr->buf_end = dtl->buf + dtl->buf_entries; | ||
109 | dtlr->write_index = 0; | ||
110 | |||
111 | /* setting write_ptr enables logging into our buffer */ | ||
112 | smp_wmb(); | ||
113 | dtlr->write_ptr = dtl->buf; | ||
114 | |||
115 | /* enable event logging */ | ||
116 | dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask; | ||
117 | lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask; | ||
118 | |||
119 | dtl_consumer = consume_dtle; | ||
120 | atomic_inc(&dtl_count); | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | static void dtl_stop(struct dtl *dtl) | ||
125 | { | ||
126 | struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); | ||
127 | |||
128 | dtlr->write_ptr = NULL; | ||
129 | smp_wmb(); | ||
130 | |||
131 | dtlr->buf = NULL; | ||
132 | |||
133 | /* restore dtl_enable_mask */ | ||
134 | lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask; | ||
135 | |||
136 | if (atomic_dec_and_test(&dtl_count)) | ||
137 | dtl_consumer = NULL; | ||
138 | } | ||
139 | |||
140 | static u64 dtl_current_index(struct dtl *dtl) | ||
141 | { | ||
142 | return per_cpu(dtl_rings, dtl->cpu).write_index; | ||
143 | } | ||
144 | |||
145 | #else /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
146 | |||
147 | static int dtl_start(struct dtl *dtl) | ||
148 | { | ||
149 | unsigned long addr; | ||
150 | int ret, hwcpu; | ||
94 | 151 | ||
95 | /* Register our dtl buffer with the hypervisor. The HV expects the | 152 | /* Register our dtl buffer with the hypervisor. The HV expects the |
96 | * buffer size to be passed in the second word of the buffer */ | 153 | * buffer size to be passed in the second word of the buffer */ |
97 | ((u32 *)dtl->buf)[1] = dtl->buf_entries * sizeof(struct dtl_entry); | 154 | ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; |
98 | 155 | ||
99 | hwcpu = get_hard_smp_processor_id(dtl->cpu); | 156 | hwcpu = get_hard_smp_processor_id(dtl->cpu); |
100 | addr = __pa(dtl->buf); | 157 | addr = __pa(dtl->buf); |
@@ -102,34 +159,84 @@ static int dtl_enable(struct dtl *dtl) | |||
102 | if (ret) { | 159 | if (ret) { |
103 | printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " | 160 | printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " |
104 | "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); | 161 | "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); |
105 | kfree(dtl->buf); | ||
106 | return -EIO; | 162 | return -EIO; |
107 | } | 163 | } |
108 | 164 | ||
109 | /* set our initial buffer indices */ | 165 | /* set our initial buffer indices */ |
110 | dtl->last_idx = lppaca[dtl->cpu].dtl_idx = 0; | 166 | lppaca_of(dtl->cpu).dtl_idx = 0; |
111 | 167 | ||
112 | /* ensure that our updates to the lppaca fields have occurred before | 168 | /* ensure that our updates to the lppaca fields have occurred before |
113 | * we actually enable the logging */ | 169 | * we actually enable the logging */ |
114 | smp_wmb(); | 170 | smp_wmb(); |
115 | 171 | ||
116 | /* enable event logging */ | 172 | /* enable event logging */ |
117 | lppaca[dtl->cpu].dtl_enable_mask = dtl_event_mask; | 173 | lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask; |
118 | 174 | ||
119 | return 0; | 175 | return 0; |
120 | } | 176 | } |
121 | 177 | ||
122 | static void dtl_disable(struct dtl *dtl) | 178 | static void dtl_stop(struct dtl *dtl) |
123 | { | 179 | { |
124 | int hwcpu = get_hard_smp_processor_id(dtl->cpu); | 180 | int hwcpu = get_hard_smp_processor_id(dtl->cpu); |
125 | 181 | ||
126 | lppaca[dtl->cpu].dtl_enable_mask = 0x0; | 182 | lppaca_of(dtl->cpu).dtl_enable_mask = 0x0; |
127 | 183 | ||
128 | unregister_dtl(hwcpu, __pa(dtl->buf)); | 184 | unregister_dtl(hwcpu, __pa(dtl->buf)); |
185 | } | ||
186 | |||
187 | static u64 dtl_current_index(struct dtl *dtl) | ||
188 | { | ||
189 | return lppaca_of(dtl->cpu).dtl_idx; | ||
190 | } | ||
191 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
192 | |||
193 | static int dtl_enable(struct dtl *dtl) | ||
194 | { | ||
195 | long int n_entries; | ||
196 | long int rc; | ||
197 | struct dtl_entry *buf = NULL; | ||
198 | |||
199 | if (!dtl_cache) | ||
200 | return -ENOMEM; | ||
201 | |||
202 | /* only allow one reader */ | ||
203 | if (dtl->buf) | ||
204 | return -EBUSY; | ||
205 | |||
206 | n_entries = dtl_buf_entries; | ||
207 | buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu)); | ||
208 | if (!buf) { | ||
209 | printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", | ||
210 | __func__, dtl->cpu); | ||
211 | return -ENOMEM; | ||
212 | } | ||
129 | 213 | ||
130 | kfree(dtl->buf); | 214 | spin_lock(&dtl->lock); |
215 | rc = -EBUSY; | ||
216 | if (!dtl->buf) { | ||
217 | /* store the original allocation size for use during read */ | ||
218 | dtl->buf_entries = n_entries; | ||
219 | dtl->buf = buf; | ||
220 | dtl->last_idx = 0; | ||
221 | rc = dtl_start(dtl); | ||
222 | if (rc) | ||
223 | dtl->buf = NULL; | ||
224 | } | ||
225 | spin_unlock(&dtl->lock); | ||
226 | |||
227 | if (rc) | ||
228 | kmem_cache_free(dtl_cache, buf); | ||
229 | return rc; | ||
230 | } | ||
231 | |||
232 | static void dtl_disable(struct dtl *dtl) | ||
233 | { | ||
234 | spin_lock(&dtl->lock); | ||
235 | dtl_stop(dtl); | ||
236 | kmem_cache_free(dtl_cache, dtl->buf); | ||
131 | dtl->buf = NULL; | 237 | dtl->buf = NULL; |
132 | dtl->buf_entries = 0; | 238 | dtl->buf_entries = 0; |
239 | spin_unlock(&dtl->lock); | ||
133 | } | 240 | } |
134 | 241 | ||
135 | /* file interface */ | 242 | /* file interface */ |
@@ -157,8 +264,9 @@ static int dtl_file_release(struct inode *inode, struct file *filp) | |||
157 | static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, | 264 | static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, |
158 | loff_t *pos) | 265 | loff_t *pos) |
159 | { | 266 | { |
160 | int rc, cur_idx, last_idx, n_read, n_req, read_size; | 267 | long int rc, n_read, n_req, read_size; |
161 | struct dtl *dtl; | 268 | struct dtl *dtl; |
269 | u64 cur_idx, last_idx, i; | ||
162 | 270 | ||
163 | if ((len % sizeof(struct dtl_entry)) != 0) | 271 | if ((len % sizeof(struct dtl_entry)) != 0) |
164 | return -EINVAL; | 272 | return -EINVAL; |
@@ -171,41 +279,48 @@ static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, | |||
171 | /* actual number of entries read */ | 279 | /* actual number of entries read */ |
172 | n_read = 0; | 280 | n_read = 0; |
173 | 281 | ||
174 | cur_idx = lppaca[dtl->cpu].dtl_idx; | 282 | spin_lock(&dtl->lock); |
283 | |||
284 | cur_idx = dtl_current_index(dtl); | ||
175 | last_idx = dtl->last_idx; | 285 | last_idx = dtl->last_idx; |
176 | 286 | ||
177 | if (cur_idx - last_idx > dtl->buf_entries) { | 287 | if (last_idx + dtl->buf_entries <= cur_idx) |
178 | pr_debug("%s: hv buffer overflow for cpu %d, samples lost\n", | 288 | last_idx = cur_idx - dtl->buf_entries + 1; |
179 | __func__, dtl->cpu); | 289 | |
180 | } | 290 | if (last_idx + n_req > cur_idx) |
291 | n_req = cur_idx - last_idx; | ||
292 | |||
293 | if (n_req > 0) | ||
294 | dtl->last_idx = last_idx + n_req; | ||
295 | |||
296 | spin_unlock(&dtl->lock); | ||
297 | |||
298 | if (n_req <= 0) | ||
299 | return 0; | ||
181 | 300 | ||
182 | cur_idx %= dtl->buf_entries; | 301 | i = last_idx % dtl->buf_entries; |
183 | last_idx %= dtl->buf_entries; | ||
184 | 302 | ||
185 | /* read the tail of the buffer if we've wrapped */ | 303 | /* read the tail of the buffer if we've wrapped */ |
186 | if (last_idx > cur_idx) { | 304 | if (i + n_req > dtl->buf_entries) { |
187 | read_size = min(n_req, dtl->buf_entries - last_idx); | 305 | read_size = dtl->buf_entries - i; |
188 | 306 | ||
189 | rc = copy_to_user(buf, &dtl->buf[last_idx], | 307 | rc = copy_to_user(buf, &dtl->buf[i], |
190 | read_size * sizeof(struct dtl_entry)); | 308 | read_size * sizeof(struct dtl_entry)); |
191 | if (rc) | 309 | if (rc) |
192 | return -EFAULT; | 310 | return -EFAULT; |
193 | 311 | ||
194 | last_idx = 0; | 312 | i = 0; |
195 | n_req -= read_size; | 313 | n_req -= read_size; |
196 | n_read += read_size; | 314 | n_read += read_size; |
197 | buf += read_size * sizeof(struct dtl_entry); | 315 | buf += read_size * sizeof(struct dtl_entry); |
198 | } | 316 | } |
199 | 317 | ||
200 | /* .. and now the head */ | 318 | /* .. and now the head */ |
201 | read_size = min(n_req, cur_idx - last_idx); | 319 | rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry)); |
202 | rc = copy_to_user(buf, &dtl->buf[last_idx], | ||
203 | read_size * sizeof(struct dtl_entry)); | ||
204 | if (rc) | 320 | if (rc) |
205 | return -EFAULT; | 321 | return -EFAULT; |
206 | 322 | ||
207 | n_read += read_size; | 323 | n_read += n_req; |
208 | dtl->last_idx += n_read; | ||
209 | 324 | ||
210 | return n_read * sizeof(struct dtl_entry); | 325 | return n_read * sizeof(struct dtl_entry); |
211 | } | 326 | } |
@@ -252,7 +367,7 @@ static int dtl_init(void) | |||
252 | 367 | ||
253 | event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, | 368 | event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, |
254 | dtl_dir, &dtl_event_mask); | 369 | dtl_dir, &dtl_event_mask); |
255 | buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0600, | 370 | buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400, |
256 | dtl_dir, &dtl_buf_entries); | 371 | dtl_dir, &dtl_buf_entries); |
257 | 372 | ||
258 | if (!event_mask_file || !buf_entries_file) { | 373 | if (!event_mask_file || !buf_entries_file) { |
@@ -263,6 +378,7 @@ static int dtl_init(void) | |||
263 | /* set up the per-cpu log structures */ | 378 | /* set up the per-cpu log structures */ |
264 | for_each_possible_cpu(i) { | 379 | for_each_possible_cpu(i) { |
265 | struct dtl *dtl = &per_cpu(cpu_dtl, i); | 380 | struct dtl *dtl = &per_cpu(cpu_dtl, i); |
381 | spin_lock_init(&dtl->lock); | ||
266 | dtl->cpu = i; | 382 | dtl->cpu = i; |
267 | 383 | ||
268 | rc = dtl_setup_file(dtl); | 384 | rc = dtl_setup_file(dtl); |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index 34b7dc12e731..46b55cf563e3 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -21,8 +21,6 @@ | |||
21 | * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> | 21 | * Please address comments and feedback to Linas Vepstas <linas@austin.ibm.com> |
22 | */ | 22 | */ |
23 | 23 | ||
24 | #undef DEBUG | ||
25 | |||
26 | #include <linux/delay.h> | 24 | #include <linux/delay.h> |
27 | #include <linux/init.h> | 25 | #include <linux/init.h> |
28 | #include <linux/list.h> | 26 | #include <linux/list.h> |
@@ -67,7 +65,7 @@ | |||
67 | * with EEH. | 65 | * with EEH. |
68 | * | 66 | * |
69 | * Ideally, a PCI device driver, when suspecting that an isolation | 67 | * Ideally, a PCI device driver, when suspecting that an isolation |
70 | * event has occured (e.g. by reading 0xff's), will then ask EEH | 68 | * event has occurred (e.g. by reading 0xff's), will then ask EEH |
71 | * whether this is the case, and then take appropriate steps to | 69 | * whether this is the case, and then take appropriate steps to |
72 | * reset the PCI slot, the PCI device, and then resume operations. | 70 | * reset the PCI slot, the PCI device, and then resume operations. |
73 | * However, until that day, the checking is done here, with the | 71 | * However, until that day, the checking is done here, with the |
@@ -95,6 +93,7 @@ static int ibm_slot_error_detail; | |||
95 | static int ibm_get_config_addr_info; | 93 | static int ibm_get_config_addr_info; |
96 | static int ibm_get_config_addr_info2; | 94 | static int ibm_get_config_addr_info2; |
97 | static int ibm_configure_bridge; | 95 | static int ibm_configure_bridge; |
96 | static int ibm_configure_pe; | ||
98 | 97 | ||
99 | int eeh_subsystem_enabled; | 98 | int eeh_subsystem_enabled; |
100 | EXPORT_SYMBOL(eeh_subsystem_enabled); | 99 | EXPORT_SYMBOL(eeh_subsystem_enabled); |
@@ -263,6 +262,8 @@ void eeh_slot_error_detail(struct pci_dn *pdn, int severity) | |||
263 | pci_regs_buf[0] = 0; | 262 | pci_regs_buf[0] = 0; |
264 | 263 | ||
265 | rtas_pci_enable(pdn, EEH_THAW_MMIO); | 264 | rtas_pci_enable(pdn, EEH_THAW_MMIO); |
265 | rtas_configure_bridge(pdn); | ||
266 | eeh_restore_bars(pdn); | ||
266 | loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); | 267 | loglen = gather_pci_data(pdn, pci_regs_buf, EEH_PCI_REGS_LOG_LEN); |
267 | 268 | ||
268 | rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); | 269 | rtas_slot_error_detail(pdn, severity, pci_regs_buf, loglen); |
@@ -450,6 +451,39 @@ void eeh_clear_slot (struct device_node *dn, int mode_flag) | |||
450 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); | 451 | raw_spin_unlock_irqrestore(&confirm_error_lock, flags); |
451 | } | 452 | } |
452 | 453 | ||
454 | void __eeh_set_pe_freset(struct device_node *parent, unsigned int *freset) | ||
455 | { | ||
456 | struct device_node *dn; | ||
457 | |||
458 | for_each_child_of_node(parent, dn) { | ||
459 | if (PCI_DN(dn)) { | ||
460 | |||
461 | struct pci_dev *dev = PCI_DN(dn)->pcidev; | ||
462 | |||
463 | if (dev && dev->driver) | ||
464 | *freset |= dev->needs_freset; | ||
465 | |||
466 | __eeh_set_pe_freset(dn, freset); | ||
467 | } | ||
468 | } | ||
469 | } | ||
470 | |||
471 | void eeh_set_pe_freset(struct device_node *dn, unsigned int *freset) | ||
472 | { | ||
473 | struct pci_dev *dev; | ||
474 | dn = find_device_pe(dn); | ||
475 | |||
476 | /* Back up one, since config addrs might be shared */ | ||
477 | if (!pcibios_find_pci_bus(dn) && PCI_DN(dn->parent)) | ||
478 | dn = dn->parent; | ||
479 | |||
480 | dev = PCI_DN(dn)->pcidev; | ||
481 | if (dev) | ||
482 | *freset |= dev->needs_freset; | ||
483 | |||
484 | __eeh_set_pe_freset(dn, freset); | ||
485 | } | ||
486 | |||
453 | /** | 487 | /** |
454 | * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze | 488 | * eeh_dn_check_failure - check if all 1's data is due to EEH slot freeze |
455 | * @dn device node | 489 | * @dn device node |
@@ -694,15 +728,24 @@ rtas_pci_slot_reset(struct pci_dn *pdn, int state) | |||
694 | if (pdn->eeh_pe_config_addr) | 728 | if (pdn->eeh_pe_config_addr) |
695 | config_addr = pdn->eeh_pe_config_addr; | 729 | config_addr = pdn->eeh_pe_config_addr; |
696 | 730 | ||
697 | rc = rtas_call(ibm_set_slot_reset,4,1, NULL, | 731 | rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
698 | config_addr, | 732 | config_addr, |
699 | BUID_HI(pdn->phb->buid), | 733 | BUID_HI(pdn->phb->buid), |
700 | BUID_LO(pdn->phb->buid), | 734 | BUID_LO(pdn->phb->buid), |
701 | state); | 735 | state); |
702 | if (rc) | 736 | |
703 | printk (KERN_WARNING "EEH: Unable to reset the failed slot," | 737 | /* Fundamental-reset not supported on this PE, try hot-reset */ |
704 | " (%d) #RST=%d dn=%s\n", | 738 | if (rc == -8 && state == 3) { |
705 | rc, state, pdn->node->full_name); | 739 | rc = rtas_call(ibm_set_slot_reset, 4, 1, NULL, |
740 | config_addr, | ||
741 | BUID_HI(pdn->phb->buid), | ||
742 | BUID_LO(pdn->phb->buid), 1); | ||
743 | if (rc) | ||
744 | printk(KERN_WARNING | ||
745 | "EEH: Unable to reset the failed slot," | ||
746 | " #RST=%d dn=%s\n", | ||
747 | rc, pdn->node->full_name); | ||
748 | } | ||
706 | } | 749 | } |
707 | 750 | ||
708 | /** | 751 | /** |
@@ -738,18 +781,21 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat | |||
738 | /** | 781 | /** |
739 | * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second | 782 | * rtas_set_slot_reset -- assert the pci #RST line for 1/4 second |
740 | * @pdn: pci device node to be reset. | 783 | * @pdn: pci device node to be reset. |
741 | * | ||
742 | * Return 0 if success, else a non-zero value. | ||
743 | */ | 784 | */ |
744 | 785 | ||
745 | static void __rtas_set_slot_reset(struct pci_dn *pdn) | 786 | static void __rtas_set_slot_reset(struct pci_dn *pdn) |
746 | { | 787 | { |
747 | struct pci_dev *dev = pdn->pcidev; | 788 | unsigned int freset = 0; |
748 | 789 | ||
749 | /* Determine type of EEH reset required by device, | 790 | /* Determine type of EEH reset required for |
750 | * default hot reset or fundamental reset | 791 | * Partitionable Endpoint, a hot-reset (1) |
751 | */ | 792 | * or a fundamental reset (3). |
752 | if (dev && dev->needs_freset) | 793 | * A fundamental reset required by any device under |
794 | * Partitionable Endpoint trumps hot-reset. | ||
795 | */ | ||
796 | eeh_set_pe_freset(pdn->node, &freset); | ||
797 | |||
798 | if (freset) | ||
753 | rtas_pci_slot_reset(pdn, 3); | 799 | rtas_pci_slot_reset(pdn, 3); |
754 | else | 800 | else |
755 | rtas_pci_slot_reset(pdn, 1); | 801 | rtas_pci_slot_reset(pdn, 1); |
@@ -878,7 +924,7 @@ void eeh_restore_bars(struct pci_dn *pdn) | |||
878 | * | 924 | * |
879 | * Save the values of the device bars. Unlike the restore | 925 | * Save the values of the device bars. Unlike the restore |
880 | * routine, this routine is *not* recursive. This is because | 926 | * routine, this routine is *not* recursive. This is because |
881 | * PCI devices are added individuallly; but, for the restore, | 927 | * PCI devices are added individually; but, for the restore, |
882 | * an entire slot is reset at a time. | 928 | * an entire slot is reset at a time. |
883 | */ | 929 | */ |
884 | static void eeh_save_bars(struct pci_dn *pdn) | 930 | static void eeh_save_bars(struct pci_dn *pdn) |
@@ -897,13 +943,20 @@ rtas_configure_bridge(struct pci_dn *pdn) | |||
897 | { | 943 | { |
898 | int config_addr; | 944 | int config_addr; |
899 | int rc; | 945 | int rc; |
946 | int token; | ||
900 | 947 | ||
901 | /* Use PE configuration address, if present */ | 948 | /* Use PE configuration address, if present */ |
902 | config_addr = pdn->eeh_config_addr; | 949 | config_addr = pdn->eeh_config_addr; |
903 | if (pdn->eeh_pe_config_addr) | 950 | if (pdn->eeh_pe_config_addr) |
904 | config_addr = pdn->eeh_pe_config_addr; | 951 | config_addr = pdn->eeh_pe_config_addr; |
905 | 952 | ||
906 | rc = rtas_call(ibm_configure_bridge,3,1, NULL, | 953 | /* Use new configure-pe function, if supported */ |
954 | if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) | ||
955 | token = ibm_configure_pe; | ||
956 | else | ||
957 | token = ibm_configure_bridge; | ||
958 | |||
959 | rc = rtas_call(token, 3, 1, NULL, | ||
907 | config_addr, | 960 | config_addr, |
908 | BUID_HI(pdn->phb->buid), | 961 | BUID_HI(pdn->phb->buid), |
909 | BUID_LO(pdn->phb->buid)); | 962 | BUID_LO(pdn->phb->buid)); |
@@ -1079,6 +1132,7 @@ void __init eeh_init(void) | |||
1079 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); | 1132 | ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info"); |
1080 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); | 1133 | ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2"); |
1081 | ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); | 1134 | ibm_configure_bridge = rtas_token ("ibm,configure-bridge"); |
1135 | ibm_configure_pe = rtas_token("ibm,configure-pe"); | ||
1082 | 1136 | ||
1083 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) | 1137 | if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) |
1084 | return; | 1138 | return; |
diff --git a/arch/powerpc/platforms/pseries/eeh_driver.c b/arch/powerpc/platforms/pseries/eeh_driver.c index b8d70f5d9aa9..1b6cb10589e0 100644 --- a/arch/powerpc/platforms/pseries/eeh_driver.c +++ b/arch/powerpc/platforms/pseries/eeh_driver.c | |||
@@ -328,7 +328,7 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
328 | struct pci_bus *frozen_bus; | 328 | struct pci_bus *frozen_bus; |
329 | int rc = 0; | 329 | int rc = 0; |
330 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; | 330 | enum pci_ers_result result = PCI_ERS_RESULT_NONE; |
331 | const char *location, *pci_str, *drv_str; | 331 | const char *location, *pci_str, *drv_str, *bus_pci_str, *bus_drv_str; |
332 | 332 | ||
333 | frozen_dn = find_device_pe(event->dn); | 333 | frozen_dn = find_device_pe(event->dn); |
334 | if (!frozen_dn) { | 334 | if (!frozen_dn) { |
@@ -364,13 +364,8 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
364 | frozen_pdn = PCI_DN(frozen_dn); | 364 | frozen_pdn = PCI_DN(frozen_dn); |
365 | frozen_pdn->eeh_freeze_count++; | 365 | frozen_pdn->eeh_freeze_count++; |
366 | 366 | ||
367 | if (frozen_pdn->pcidev) { | 367 | pci_str = eeh_pci_name(event->dev); |
368 | pci_str = pci_name (frozen_pdn->pcidev); | 368 | drv_str = pcid_name(event->dev); |
369 | drv_str = pcid_name (frozen_pdn->pcidev); | ||
370 | } else { | ||
371 | pci_str = eeh_pci_name(event->dev); | ||
372 | drv_str = pcid_name (event->dev); | ||
373 | } | ||
374 | 369 | ||
375 | if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) | 370 | if (frozen_pdn->eeh_freeze_count > EEH_MAX_ALLOWED_FREEZES) |
376 | goto excess_failures; | 371 | goto excess_failures; |
@@ -378,8 +373,17 @@ struct pci_dn * handle_eeh_events (struct eeh_event *event) | |||
378 | printk(KERN_WARNING | 373 | printk(KERN_WARNING |
379 | "EEH: This PCI device has failed %d times in the last hour:\n", | 374 | "EEH: This PCI device has failed %d times in the last hour:\n", |
380 | frozen_pdn->eeh_freeze_count); | 375 | frozen_pdn->eeh_freeze_count); |
376 | |||
377 | if (frozen_pdn->pcidev) { | ||
378 | bus_pci_str = pci_name(frozen_pdn->pcidev); | ||
379 | bus_drv_str = pcid_name(frozen_pdn->pcidev); | ||
380 | printk(KERN_WARNING | ||
381 | "EEH: Bus location=%s driver=%s pci addr=%s\n", | ||
382 | location, bus_drv_str, bus_pci_str); | ||
383 | } | ||
384 | |||
381 | printk(KERN_WARNING | 385 | printk(KERN_WARNING |
382 | "EEH: location=%s driver=%s pci addr=%s\n", | 386 | "EEH: Device location=%s driver=%s pci addr=%s\n", |
383 | location, drv_str, pci_str); | 387 | location, drv_str, pci_str); |
384 | 388 | ||
385 | /* Walk the various device drivers attached to this slot through | 389 | /* Walk the various device drivers attached to this slot through |
diff --git a/arch/powerpc/platforms/pseries/eeh_sysfs.c b/arch/powerpc/platforms/pseries/eeh_sysfs.c index 15e13b568904..23982c7892d2 100644 --- a/arch/powerpc/platforms/pseries/eeh_sysfs.c +++ b/arch/powerpc/platforms/pseries/eeh_sysfs.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/pci.h> | 25 | #include <linux/pci.h> |
26 | #include <asm/ppc-pci.h> | 26 | #include <asm/ppc-pci.h> |
27 | #include <asm/pci-bridge.h> | 27 | #include <asm/pci-bridge.h> |
28 | #include <linux/kobject.h> | ||
29 | 28 | ||
30 | /** | 29 | /** |
31 | * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic | 30 | * EEH_SHOW_ATTR -- create sysfs entry for eeh statistic |
diff --git a/arch/powerpc/platforms/pseries/firmware.c b/arch/powerpc/platforms/pseries/firmware.c index 0a14d8cd314f..0b0eff0cce35 100644 --- a/arch/powerpc/platforms/pseries/firmware.c +++ b/arch/powerpc/platforms/pseries/firmware.c | |||
@@ -55,6 +55,7 @@ firmware_features_table[FIRMWARE_MAX_FEATURES] = { | |||
55 | {FW_FEATURE_XDABR, "hcall-xdabr"}, | 55 | {FW_FEATURE_XDABR, "hcall-xdabr"}, |
56 | {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, | 56 | {FW_FEATURE_MULTITCE, "hcall-multi-tce"}, |
57 | {FW_FEATURE_SPLPAR, "hcall-splpar"}, | 57 | {FW_FEATURE_SPLPAR, "hcall-splpar"}, |
58 | {FW_FEATURE_VPHN, "hcall-vphn"}, | ||
58 | }; | 59 | }; |
59 | 60 | ||
60 | /* Build up the firmware features bitmask using the contents of | 61 | /* Build up the firmware features bitmask using the contents of |
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c index fd50ccd4bac1..46f13a3c5d09 100644 --- a/arch/powerpc/platforms/pseries/hotplug-cpu.c +++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c | |||
@@ -19,6 +19,7 @@ | |||
19 | */ | 19 | */ |
20 | 20 | ||
21 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
22 | #include <linux/interrupt.h> | ||
22 | #include <linux/delay.h> | 23 | #include <linux/delay.h> |
23 | #include <linux/cpu.h> | 24 | #include <linux/cpu.h> |
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
@@ -28,7 +29,7 @@ | |||
28 | #include <asm/machdep.h> | 29 | #include <asm/machdep.h> |
29 | #include <asm/vdso_datapage.h> | 30 | #include <asm/vdso_datapage.h> |
30 | #include <asm/pSeries_reconfig.h> | 31 | #include <asm/pSeries_reconfig.h> |
31 | #include "xics.h" | 32 | #include <asm/xics.h> |
32 | #include "plpar_wrappers.h" | 33 | #include "plpar_wrappers.h" |
33 | #include "offline_states.h" | 34 | #include "offline_states.h" |
34 | 35 | ||
@@ -216,7 +217,7 @@ static void pseries_cpu_die(unsigned int cpu) | |||
216 | cpu, pcpu, cpu_status); | 217 | cpu, pcpu, cpu_status); |
217 | } | 218 | } |
218 | 219 | ||
219 | /* Isolation and deallocation are definatly done by | 220 | /* Isolation and deallocation are definitely done by |
220 | * drslot_chrp_cpu. If they were not they would be | 221 | * drslot_chrp_cpu. If they were not they would be |
221 | * done here. Change isolate state to Isolate and | 222 | * done here. Change isolate state to Isolate and |
222 | * change allocation-state to Unusable. | 223 | * change allocation-state to Unusable. |
@@ -280,7 +281,7 @@ static int pseries_add_processor(struct device_node *np) | |||
280 | } | 281 | } |
281 | 282 | ||
282 | for_each_cpu(cpu, tmp) { | 283 | for_each_cpu(cpu, tmp) { |
283 | BUG_ON(cpumask_test_cpu(cpu, cpu_present_mask)); | 284 | BUG_ON(cpu_present(cpu)); |
284 | set_cpu_present(cpu, true); | 285 | set_cpu_present(cpu, true); |
285 | set_hard_smp_processor_id(cpu, *intserv++); | 286 | set_hard_smp_processor_id(cpu, *intserv++); |
286 | } | 287 | } |
diff --git a/arch/powerpc/platforms/pseries/hotplug-memory.c b/arch/powerpc/platforms/pseries/hotplug-memory.c index bc8803664140..9d6a8effeda2 100644 --- a/arch/powerpc/platforms/pseries/hotplug-memory.c +++ b/arch/powerpc/platforms/pseries/hotplug-memory.c | |||
@@ -12,11 +12,67 @@ | |||
12 | #include <linux/of.h> | 12 | #include <linux/of.h> |
13 | #include <linux/memblock.h> | 13 | #include <linux/memblock.h> |
14 | #include <linux/vmalloc.h> | 14 | #include <linux/vmalloc.h> |
15 | #include <linux/memory.h> | ||
16 | |||
15 | #include <asm/firmware.h> | 17 | #include <asm/firmware.h> |
16 | #include <asm/machdep.h> | 18 | #include <asm/machdep.h> |
17 | #include <asm/pSeries_reconfig.h> | 19 | #include <asm/pSeries_reconfig.h> |
18 | #include <asm/sparsemem.h> | 20 | #include <asm/sparsemem.h> |
19 | 21 | ||
22 | static unsigned long get_memblock_size(void) | ||
23 | { | ||
24 | struct device_node *np; | ||
25 | unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE; | ||
26 | struct resource r; | ||
27 | |||
28 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | ||
29 | if (np) { | ||
30 | const __be64 *size; | ||
31 | |||
32 | size = of_get_property(np, "ibm,lmb-size", NULL); | ||
33 | if (size) | ||
34 | memblock_size = be64_to_cpup(size); | ||
35 | of_node_put(np); | ||
36 | } else if (machine_is(pseries)) { | ||
37 | /* This fallback really only applies to pseries */ | ||
38 | unsigned int memzero_size = 0; | ||
39 | |||
40 | np = of_find_node_by_path("/memory@0"); | ||
41 | if (np) { | ||
42 | if (!of_address_to_resource(np, 0, &r)) | ||
43 | memzero_size = resource_size(&r); | ||
44 | of_node_put(np); | ||
45 | } | ||
46 | |||
47 | if (memzero_size) { | ||
48 | /* We now know the size of memory@0, use this to find | ||
49 | * the first memoryblock and get its size. | ||
50 | */ | ||
51 | char buf[64]; | ||
52 | |||
53 | sprintf(buf, "/memory@%x", memzero_size); | ||
54 | np = of_find_node_by_path(buf); | ||
55 | if (np) { | ||
56 | if (!of_address_to_resource(np, 0, &r)) | ||
57 | memblock_size = resource_size(&r); | ||
58 | of_node_put(np); | ||
59 | } | ||
60 | } | ||
61 | } | ||
62 | return memblock_size; | ||
63 | } | ||
64 | |||
65 | /* WARNING: This is going to override the generic definition whenever | ||
66 | * pseries is built-in regardless of what platform is active at boot | ||
67 | * time. This is fine for now as this is the only "option" and it | ||
68 | * should work everywhere. If not, we'll have to turn this into a | ||
69 | * ppc_md. callback | ||
70 | */ | ||
71 | unsigned long memory_block_size_bytes(void) | ||
72 | { | ||
73 | return get_memblock_size(); | ||
74 | } | ||
75 | |||
20 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) | 76 | static int pseries_remove_memblock(unsigned long base, unsigned int memblock_size) |
21 | { | 77 | { |
22 | unsigned long start, start_pfn; | 78 | unsigned long start, start_pfn; |
@@ -127,30 +183,22 @@ static int pseries_add_memory(struct device_node *np) | |||
127 | 183 | ||
128 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) | 184 | static int pseries_drconf_memory(unsigned long *base, unsigned int action) |
129 | { | 185 | { |
130 | struct device_node *np; | 186 | unsigned long memblock_size; |
131 | const unsigned long *lmb_size; | ||
132 | int rc; | 187 | int rc; |
133 | 188 | ||
134 | np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory"); | 189 | memblock_size = get_memblock_size(); |
135 | if (!np) | 190 | if (!memblock_size) |
136 | return -EINVAL; | 191 | return -EINVAL; |
137 | 192 | ||
138 | lmb_size = of_get_property(np, "ibm,lmb-size", NULL); | ||
139 | if (!lmb_size) { | ||
140 | of_node_put(np); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | |||
144 | if (action == PSERIES_DRCONF_MEM_ADD) { | 193 | if (action == PSERIES_DRCONF_MEM_ADD) { |
145 | rc = memblock_add(*base, *lmb_size); | 194 | rc = memblock_add(*base, memblock_size); |
146 | rc = (rc < 0) ? -EINVAL : 0; | 195 | rc = (rc < 0) ? -EINVAL : 0; |
147 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { | 196 | } else if (action == PSERIES_DRCONF_MEM_REMOVE) { |
148 | rc = pseries_remove_memblock(*base, *lmb_size); | 197 | rc = pseries_remove_memblock(*base, memblock_size); |
149 | } else { | 198 | } else { |
150 | rc = -EINVAL; | 199 | rc = -EINVAL; |
151 | } | 200 | } |
152 | 201 | ||
153 | of_node_put(np); | ||
154 | return rc; | 202 | return rc; |
155 | } | 203 | } |
156 | 204 | ||
diff --git a/arch/powerpc/platforms/pseries/hvCall.S b/arch/powerpc/platforms/pseries/hvCall.S index 48d20573e4de..fd05fdee576a 100644 --- a/arch/powerpc/platforms/pseries/hvCall.S +++ b/arch/powerpc/platforms/pseries/hvCall.S | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/ppc_asm.h> | 12 | #include <asm/ppc_asm.h> |
13 | #include <asm/asm-offsets.h> | 13 | #include <asm/asm-offsets.h> |
14 | #include <asm/ptrace.h> | ||
14 | 15 | ||
15 | #define STK_PARM(i) (48 + ((i)-3)*8) | 16 | #define STK_PARM(i) (48 + ((i)-3)*8) |
16 | 17 | ||
diff --git a/arch/powerpc/platforms/pseries/hvCall_inst.c b/arch/powerpc/platforms/pseries/hvCall_inst.c index e19ff021e711..f106662f4381 100644 --- a/arch/powerpc/platforms/pseries/hvCall_inst.c +++ b/arch/powerpc/platforms/pseries/hvCall_inst.c | |||
@@ -55,7 +55,7 @@ static void hc_stop(struct seq_file *m, void *p) | |||
55 | static int hc_show(struct seq_file *m, void *p) | 55 | static int hc_show(struct seq_file *m, void *p) |
56 | { | 56 | { |
57 | unsigned long h_num = (unsigned long)p; | 57 | unsigned long h_num = (unsigned long)p; |
58 | struct hcall_stats *hs = (struct hcall_stats *)m->private; | 58 | struct hcall_stats *hs = m->private; |
59 | 59 | ||
60 | if (hs[h_num].num_calls) { | 60 | if (hs[h_num].num_calls) { |
61 | if (cpu_has_feature(CPU_FTR_PURR)) | 61 | if (cpu_has_feature(CPU_FTR_PURR)) |
diff --git a/arch/powerpc/platforms/pseries/io_event_irq.c b/arch/powerpc/platforms/pseries/io_event_irq.c new file mode 100644 index 000000000000..c829e6067d54 --- /dev/null +++ b/arch/powerpc/platforms/pseries/io_event_irq.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * Copyright 2010 2011 Mark Nelson and Tseng-Hui (Frank) Lin, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/errno.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/notifier.h> | ||
18 | |||
19 | #include <asm/machdep.h> | ||
20 | #include <asm/rtas.h> | ||
21 | #include <asm/irq.h> | ||
22 | #include <asm/io_event_irq.h> | ||
23 | |||
24 | #include "pseries.h" | ||
25 | |||
26 | /* | ||
27 | * IO event interrupt is a mechanism provided by RTAS to return | ||
28 | * information about hardware error and non-error events. Device | ||
29 | * drivers can register their event handlers to receive events. | ||
30 | * Device drivers are expected to use atomic_notifier_chain_register() | ||
31 | * and atomic_notifier_chain_unregister() to register and unregister | ||
32 | * their event handlers. Since multiple IO event types and scopes | ||
33 | * share an IO event interrupt, the event handlers are called one | ||
34 | * by one until the IO event is claimed by one of the handlers. | ||
35 | * The event handlers are expected to return NOTIFY_OK if the | ||
36 | * event is handled by the event handler or NOTIFY_DONE if the | ||
37 | * event does not belong to the handler. | ||
38 | * | ||
39 | * Usage: | ||
40 | * | ||
41 | * Notifier function: | ||
42 | * #include <asm/io_event_irq.h> | ||
43 | * int event_handler(struct notifier_block *nb, unsigned long val, void *data) { | ||
44 | * p = (struct pseries_io_event_sect_data *) data; | ||
45 | * if (! is_my_event(p->scope, p->event_type)) return NOTIFY_DONE; | ||
46 | * : | ||
47 | * : | ||
48 | * return NOTIFY_OK; | ||
49 | * } | ||
50 | * struct notifier_block event_nb = { | ||
51 | * .notifier_call = event_handler, | ||
52 | * } | ||
53 | * | ||
54 | * Registration: | ||
55 | * atomic_notifier_chain_register(&pseries_ioei_notifier_list, &event_nb); | ||
56 | * | ||
57 | * Unregistration: | ||
58 | * atomic_notifier_chain_unregister(&pseries_ioei_notifier_list, &event_nb); | ||
59 | */ | ||
60 | |||
61 | ATOMIC_NOTIFIER_HEAD(pseries_ioei_notifier_list); | ||
62 | EXPORT_SYMBOL_GPL(pseries_ioei_notifier_list); | ||
63 | |||
64 | static int ioei_check_exception_token; | ||
65 | |||
66 | /* pSeries event log format */ | ||
67 | |||
68 | /* Two bytes ASCII section IDs */ | ||
69 | #define PSERIES_ELOG_SECT_ID_PRIV_HDR (('P' << 8) | 'H') | ||
70 | #define PSERIES_ELOG_SECT_ID_USER_HDR (('U' << 8) | 'H') | ||
71 | #define PSERIES_ELOG_SECT_ID_PRIMARY_SRC (('P' << 8) | 'S') | ||
72 | #define PSERIES_ELOG_SECT_ID_EXTENDED_UH (('E' << 8) | 'H') | ||
73 | #define PSERIES_ELOG_SECT_ID_FAILING_MTMS (('M' << 8) | 'T') | ||
74 | #define PSERIES_ELOG_SECT_ID_SECONDARY_SRC (('S' << 8) | 'S') | ||
75 | #define PSERIES_ELOG_SECT_ID_DUMP_LOCATOR (('D' << 8) | 'H') | ||
76 | #define PSERIES_ELOG_SECT_ID_FW_ERROR (('S' << 8) | 'W') | ||
77 | #define PSERIES_ELOG_SECT_ID_IMPACT_PART_ID (('L' << 8) | 'P') | ||
78 | #define PSERIES_ELOG_SECT_ID_LOGIC_RESOURCE_ID (('L' << 8) | 'R') | ||
79 | #define PSERIES_ELOG_SECT_ID_HMC_ID (('H' << 8) | 'M') | ||
80 | #define PSERIES_ELOG_SECT_ID_EPOW (('E' << 8) | 'P') | ||
81 | #define PSERIES_ELOG_SECT_ID_IO_EVENT (('I' << 8) | 'E') | ||
82 | #define PSERIES_ELOG_SECT_ID_MANUFACT_INFO (('M' << 8) | 'I') | ||
83 | #define PSERIES_ELOG_SECT_ID_CALL_HOME (('C' << 8) | 'H') | ||
84 | #define PSERIES_ELOG_SECT_ID_USER_DEF (('U' << 8) | 'D') | ||
85 | |||
86 | /* Vendor specific Platform Event Log Format, Version 6, section header */ | ||
87 | struct pseries_elog_section { | ||
88 | uint16_t id; /* 0x00 2-byte ASCII section ID */ | ||
89 | uint16_t length; /* 0x02 Section length in bytes */ | ||
90 | uint8_t version; /* 0x04 Section version */ | ||
91 | uint8_t subtype; /* 0x05 Section subtype */ | ||
92 | uint16_t creator_component; /* 0x06 Creator component ID */ | ||
93 | uint8_t data[]; /* 0x08 Start of section data */ | ||
94 | }; | ||
95 | |||
96 | static char ioei_rtas_buf[RTAS_DATA_BUF_SIZE] __cacheline_aligned; | ||
97 | |||
98 | /** | ||
99 | * Find data portion of a specific section in RTAS extended event log. | ||
100 | * @elog: RTAS error/event log. | ||
101 | * @sect_id: secsion ID. | ||
102 | * | ||
103 | * Return: | ||
104 | * pointer to the section data of the specified section | ||
105 | * NULL if not found | ||
106 | */ | ||
107 | static struct pseries_elog_section *find_xelog_section(struct rtas_error_log *elog, | ||
108 | uint16_t sect_id) | ||
109 | { | ||
110 | struct rtas_ext_event_log_v6 *xelog = | ||
111 | (struct rtas_ext_event_log_v6 *) elog->buffer; | ||
112 | struct pseries_elog_section *sect; | ||
113 | unsigned char *p, *log_end; | ||
114 | |||
115 | /* Check that we understand the format */ | ||
116 | if (elog->extended_log_length < sizeof(struct rtas_ext_event_log_v6) || | ||
117 | xelog->log_format != RTAS_V6EXT_LOG_FORMAT_EVENT_LOG || | ||
118 | xelog->company_id != RTAS_V6EXT_COMPANY_ID_IBM) | ||
119 | return NULL; | ||
120 | |||
121 | log_end = elog->buffer + elog->extended_log_length; | ||
122 | p = xelog->vendor_log; | ||
123 | while (p < log_end) { | ||
124 | sect = (struct pseries_elog_section *)p; | ||
125 | if (sect->id == sect_id) | ||
126 | return sect; | ||
127 | p += sect->length; | ||
128 | } | ||
129 | return NULL; | ||
130 | } | ||
131 | |||
132 | /** | ||
133 | * Find the data portion of an IO Event section from event log. | ||
134 | * @elog: RTAS error/event log. | ||
135 | * | ||
136 | * Return: | ||
137 | * pointer to a valid IO event section data. NULL if not found. | ||
138 | */ | ||
139 | static struct pseries_io_event * ioei_find_event(struct rtas_error_log *elog) | ||
140 | { | ||
141 | struct pseries_elog_section *sect; | ||
142 | |||
143 | /* We should only ever get called for io-event interrupts, but if | ||
144 | * we do get called for another type then something went wrong so | ||
145 | * make some noise about it. | ||
146 | * RTAS_TYPE_IO only exists in extended event log version 6 or later. | ||
147 | * No need to check event log version. | ||
148 | */ | ||
149 | if (unlikely(elog->type != RTAS_TYPE_IO)) { | ||
150 | printk_once(KERN_WARNING "io_event_irq: Unexpected event type %d", | ||
151 | elog->type); | ||
152 | return NULL; | ||
153 | } | ||
154 | |||
155 | sect = find_xelog_section(elog, PSERIES_ELOG_SECT_ID_IO_EVENT); | ||
156 | if (unlikely(!sect)) { | ||
157 | printk_once(KERN_WARNING "io_event_irq: RTAS extended event " | ||
158 | "log does not contain an IO Event section. " | ||
159 | "Could be a bug in system firmware!\n"); | ||
160 | return NULL; | ||
161 | } | ||
162 | return (struct pseries_io_event *) §->data; | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * PAPR: | ||
167 | * - check-exception returns the first found error or event and clear that | ||
168 | * error or event so it is reported once. | ||
169 | * - Each interrupt returns one event. If a plateform chooses to report | ||
170 | * multiple events through a single interrupt, it must ensure that the | ||
171 | * interrupt remains asserted until check-exception has been used to | ||
172 | * process all out-standing events for that interrupt. | ||
173 | * | ||
174 | * Implementation notes: | ||
175 | * - Events must be processed in the order they are returned. Hence, | ||
176 | * sequential in nature. | ||
177 | * - The owner of an event is determined by combinations of scope, | ||
178 | * event type, and sub-type. There is no easy way to pre-sort clients | ||
179 | * by scope or event type alone. For example, Torrent ISR route change | ||
180 | * event is reported with scope 0x00 (Not Applicatable) rather than | ||
181 | * 0x3B (Torrent-hub). It is better to let the clients to identify | ||
182 | * who owns the the event. | ||
183 | */ | ||
184 | |||
185 | static irqreturn_t ioei_interrupt(int irq, void *dev_id) | ||
186 | { | ||
187 | struct pseries_io_event *event; | ||
188 | int rtas_rc; | ||
189 | |||
190 | for (;;) { | ||
191 | rtas_rc = rtas_call(ioei_check_exception_token, 6, 1, NULL, | ||
192 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | ||
193 | virq_to_hw(irq), | ||
194 | RTAS_IO_EVENTS, 1 /* Time Critical */, | ||
195 | __pa(ioei_rtas_buf), | ||
196 | RTAS_DATA_BUF_SIZE); | ||
197 | if (rtas_rc != 0) | ||
198 | break; | ||
199 | |||
200 | event = ioei_find_event((struct rtas_error_log *)ioei_rtas_buf); | ||
201 | if (!event) | ||
202 | continue; | ||
203 | |||
204 | atomic_notifier_call_chain(&pseries_ioei_notifier_list, | ||
205 | 0, event); | ||
206 | } | ||
207 | return IRQ_HANDLED; | ||
208 | } | ||
209 | |||
210 | static int __init ioei_init(void) | ||
211 | { | ||
212 | struct device_node *np; | ||
213 | |||
214 | ioei_check_exception_token = rtas_token("check-exception"); | ||
215 | if (ioei_check_exception_token == RTAS_UNKNOWN_SERVICE) { | ||
216 | pr_warning("IO Event IRQ not supported on this system !\n"); | ||
217 | return -ENODEV; | ||
218 | } | ||
219 | np = of_find_node_by_path("/event-sources/ibm,io-events"); | ||
220 | if (np) { | ||
221 | request_event_sources_irqs(np, ioei_interrupt, "IO_EVENT"); | ||
222 | of_node_put(np); | ||
223 | } else { | ||
224 | pr_err("io_event_irq: No ibm,io-events on system! " | ||
225 | "IO Event interrupt disabled.\n"); | ||
226 | return -ENODEV; | ||
227 | } | ||
228 | return 0; | ||
229 | } | ||
230 | machine_subsys_initcall(pseries, ioei_init); | ||
231 | |||
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index a77bcaed80af..01faab9456ca 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/pci.h> | 33 | #include <linux/pci.h> |
34 | #include <linux/dma-mapping.h> | 34 | #include <linux/dma-mapping.h> |
35 | #include <linux/crash_dump.h> | 35 | #include <linux/crash_dump.h> |
36 | #include <linux/memory.h> | ||
36 | #include <asm/io.h> | 37 | #include <asm/io.h> |
37 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
38 | #include <asm/rtas.h> | 39 | #include <asm/rtas.h> |
@@ -45,6 +46,7 @@ | |||
45 | #include <asm/tce.h> | 46 | #include <asm/tce.h> |
46 | #include <asm/ppc-pci.h> | 47 | #include <asm/ppc-pci.h> |
47 | #include <asm/udbg.h> | 48 | #include <asm/udbg.h> |
49 | #include <asm/mmzone.h> | ||
48 | 50 | ||
49 | #include "plpar_wrappers.h" | 51 | #include "plpar_wrappers.h" |
50 | 52 | ||
@@ -140,7 +142,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum, | |||
140 | return ret; | 142 | return ret; |
141 | } | 143 | } |
142 | 144 | ||
143 | static DEFINE_PER_CPU(u64 *, tce_page) = NULL; | 145 | static DEFINE_PER_CPU(u64 *, tce_page); |
144 | 146 | ||
145 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, | 147 | static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum, |
146 | long npages, unsigned long uaddr, | 148 | long npages, unsigned long uaddr, |
@@ -270,6 +272,152 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum) | |||
270 | return tce_ret; | 272 | return tce_ret; |
271 | } | 273 | } |
272 | 274 | ||
275 | /* this is compatible with cells for the device tree property */ | ||
276 | struct dynamic_dma_window_prop { | ||
277 | __be32 liobn; /* tce table number */ | ||
278 | __be64 dma_base; /* address hi,lo */ | ||
279 | __be32 tce_shift; /* ilog2(tce_page_size) */ | ||
280 | __be32 window_shift; /* ilog2(tce_window_size) */ | ||
281 | }; | ||
282 | |||
283 | struct direct_window { | ||
284 | struct device_node *device; | ||
285 | const struct dynamic_dma_window_prop *prop; | ||
286 | struct list_head list; | ||
287 | }; | ||
288 | |||
289 | /* Dynamic DMA Window support */ | ||
290 | struct ddw_query_response { | ||
291 | u32 windows_available; | ||
292 | u32 largest_available_block; | ||
293 | u32 page_size; | ||
294 | u32 migration_capable; | ||
295 | }; | ||
296 | |||
297 | struct ddw_create_response { | ||
298 | u32 liobn; | ||
299 | u32 addr_hi; | ||
300 | u32 addr_lo; | ||
301 | }; | ||
302 | |||
303 | static LIST_HEAD(direct_window_list); | ||
304 | /* prevents races between memory on/offline and window creation */ | ||
305 | static DEFINE_SPINLOCK(direct_window_list_lock); | ||
306 | /* protects initializing window twice for same device */ | ||
307 | static DEFINE_MUTEX(direct_window_init_mutex); | ||
308 | #define DIRECT64_PROPNAME "linux,direct64-ddr-window-info" | ||
309 | |||
310 | static int tce_clearrange_multi_pSeriesLP(unsigned long start_pfn, | ||
311 | unsigned long num_pfn, const void *arg) | ||
312 | { | ||
313 | const struct dynamic_dma_window_prop *maprange = arg; | ||
314 | int rc; | ||
315 | u64 tce_size, num_tce, dma_offset, next; | ||
316 | u32 tce_shift; | ||
317 | long limit; | ||
318 | |||
319 | tce_shift = be32_to_cpu(maprange->tce_shift); | ||
320 | tce_size = 1ULL << tce_shift; | ||
321 | next = start_pfn << PAGE_SHIFT; | ||
322 | num_tce = num_pfn << PAGE_SHIFT; | ||
323 | |||
324 | /* round back to the beginning of the tce page size */ | ||
325 | num_tce += next & (tce_size - 1); | ||
326 | next &= ~(tce_size - 1); | ||
327 | |||
328 | /* covert to number of tces */ | ||
329 | num_tce |= tce_size - 1; | ||
330 | num_tce >>= tce_shift; | ||
331 | |||
332 | do { | ||
333 | /* | ||
334 | * Set up the page with TCE data, looping through and setting | ||
335 | * the values. | ||
336 | */ | ||
337 | limit = min_t(long, num_tce, 512); | ||
338 | dma_offset = next + be64_to_cpu(maprange->dma_base); | ||
339 | |||
340 | rc = plpar_tce_stuff((u64)be32_to_cpu(maprange->liobn), | ||
341 | dma_offset, | ||
342 | 0, limit); | ||
343 | num_tce -= limit; | ||
344 | } while (num_tce > 0 && !rc); | ||
345 | |||
346 | return rc; | ||
347 | } | ||
348 | |||
349 | static int tce_setrange_multi_pSeriesLP(unsigned long start_pfn, | ||
350 | unsigned long num_pfn, const void *arg) | ||
351 | { | ||
352 | const struct dynamic_dma_window_prop *maprange = arg; | ||
353 | u64 *tcep, tce_size, num_tce, dma_offset, next, proto_tce, liobn; | ||
354 | u32 tce_shift; | ||
355 | u64 rc = 0; | ||
356 | long l, limit; | ||
357 | |||
358 | local_irq_disable(); /* to protect tcep and the page behind it */ | ||
359 | tcep = __get_cpu_var(tce_page); | ||
360 | |||
361 | if (!tcep) { | ||
362 | tcep = (u64 *)__get_free_page(GFP_ATOMIC); | ||
363 | if (!tcep) { | ||
364 | local_irq_enable(); | ||
365 | return -ENOMEM; | ||
366 | } | ||
367 | __get_cpu_var(tce_page) = tcep; | ||
368 | } | ||
369 | |||
370 | proto_tce = TCE_PCI_READ | TCE_PCI_WRITE; | ||
371 | |||
372 | liobn = (u64)be32_to_cpu(maprange->liobn); | ||
373 | tce_shift = be32_to_cpu(maprange->tce_shift); | ||
374 | tce_size = 1ULL << tce_shift; | ||
375 | next = start_pfn << PAGE_SHIFT; | ||
376 | num_tce = num_pfn << PAGE_SHIFT; | ||
377 | |||
378 | /* round back to the beginning of the tce page size */ | ||
379 | num_tce += next & (tce_size - 1); | ||
380 | next &= ~(tce_size - 1); | ||
381 | |||
382 | /* covert to number of tces */ | ||
383 | num_tce |= tce_size - 1; | ||
384 | num_tce >>= tce_shift; | ||
385 | |||
386 | /* We can map max one pageful of TCEs at a time */ | ||
387 | do { | ||
388 | /* | ||
389 | * Set up the page with TCE data, looping through and setting | ||
390 | * the values. | ||
391 | */ | ||
392 | limit = min_t(long, num_tce, 4096/TCE_ENTRY_SIZE); | ||
393 | dma_offset = next + be64_to_cpu(maprange->dma_base); | ||
394 | |||
395 | for (l = 0; l < limit; l++) { | ||
396 | tcep[l] = proto_tce | next; | ||
397 | next += tce_size; | ||
398 | } | ||
399 | |||
400 | rc = plpar_tce_put_indirect(liobn, | ||
401 | dma_offset, | ||
402 | (u64)virt_to_abs(tcep), | ||
403 | limit); | ||
404 | |||
405 | num_tce -= limit; | ||
406 | } while (num_tce > 0 && !rc); | ||
407 | |||
408 | /* error cleanup: caller will clear whole range */ | ||
409 | |||
410 | local_irq_enable(); | ||
411 | return rc; | ||
412 | } | ||
413 | |||
414 | static int tce_setrange_multi_pSeriesLP_walk(unsigned long start_pfn, | ||
415 | unsigned long num_pfn, void *arg) | ||
416 | { | ||
417 | return tce_setrange_multi_pSeriesLP(start_pfn, num_pfn, arg); | ||
418 | } | ||
419 | |||
420 | |||
273 | #ifdef CONFIG_PCI | 421 | #ifdef CONFIG_PCI |
274 | static void iommu_table_setparms(struct pci_controller *phb, | 422 | static void iommu_table_setparms(struct pci_controller *phb, |
275 | struct device_node *dn, | 423 | struct device_node *dn, |
@@ -323,14 +471,13 @@ static void iommu_table_setparms(struct pci_controller *phb, | |||
323 | static void iommu_table_setparms_lpar(struct pci_controller *phb, | 471 | static void iommu_table_setparms_lpar(struct pci_controller *phb, |
324 | struct device_node *dn, | 472 | struct device_node *dn, |
325 | struct iommu_table *tbl, | 473 | struct iommu_table *tbl, |
326 | const void *dma_window, | 474 | const void *dma_window) |
327 | int bussubno) | ||
328 | { | 475 | { |
329 | unsigned long offset, size; | 476 | unsigned long offset, size; |
330 | 477 | ||
331 | tbl->it_busno = bussubno; | ||
332 | of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); | 478 | of_parse_dma_window(dn, dma_window, &tbl->it_index, &offset, &size); |
333 | 479 | ||
480 | tbl->it_busno = phb->bus->number; | ||
334 | tbl->it_base = 0; | 481 | tbl->it_base = 0; |
335 | tbl->it_blocksize = 16; | 482 | tbl->it_blocksize = 16; |
336 | tbl->it_type = TCE_PCI; | 483 | tbl->it_type = TCE_PCI; |
@@ -450,14 +597,10 @@ static void pci_dma_bus_setup_pSeriesLP(struct pci_bus *bus) | |||
450 | if (!ppci->iommu_table) { | 597 | if (!ppci->iommu_table) { |
451 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 598 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
452 | ppci->phb->node); | 599 | ppci->phb->node); |
453 | iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window, | 600 | iommu_table_setparms_lpar(ppci->phb, pdn, tbl, dma_window); |
454 | bus->number); | ||
455 | ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); | 601 | ppci->iommu_table = iommu_init_table(tbl, ppci->phb->node); |
456 | pr_debug(" created table: %p\n", ppci->iommu_table); | 602 | pr_debug(" created table: %p\n", ppci->iommu_table); |
457 | } | 603 | } |
458 | |||
459 | if (pdn != dn) | ||
460 | PCI_DN(dn)->iommu_table = ppci->iommu_table; | ||
461 | } | 604 | } |
462 | 605 | ||
463 | 606 | ||
@@ -500,6 +643,334 @@ static void pci_dma_dev_setup_pSeries(struct pci_dev *dev) | |||
500 | pci_name(dev)); | 643 | pci_name(dev)); |
501 | } | 644 | } |
502 | 645 | ||
646 | static int __read_mostly disable_ddw; | ||
647 | |||
648 | static int __init disable_ddw_setup(char *str) | ||
649 | { | ||
650 | disable_ddw = 1; | ||
651 | printk(KERN_INFO "ppc iommu: disabling ddw.\n"); | ||
652 | |||
653 | return 0; | ||
654 | } | ||
655 | |||
656 | early_param("disable_ddw", disable_ddw_setup); | ||
657 | |||
658 | static void remove_ddw(struct device_node *np) | ||
659 | { | ||
660 | struct dynamic_dma_window_prop *dwp; | ||
661 | struct property *win64; | ||
662 | const u32 *ddw_avail; | ||
663 | u64 liobn; | ||
664 | int len, ret; | ||
665 | |||
666 | ddw_avail = of_get_property(np, "ibm,ddw-applicable", &len); | ||
667 | win64 = of_find_property(np, DIRECT64_PROPNAME, NULL); | ||
668 | if (!win64) | ||
669 | return; | ||
670 | |||
671 | if (!ddw_avail || len < 3 * sizeof(u32) || win64->length < sizeof(*dwp)) | ||
672 | goto delprop; | ||
673 | |||
674 | dwp = win64->value; | ||
675 | liobn = (u64)be32_to_cpu(dwp->liobn); | ||
676 | |||
677 | /* clear the whole window, note the arg is in kernel pages */ | ||
678 | ret = tce_clearrange_multi_pSeriesLP(0, | ||
679 | 1ULL << (be32_to_cpu(dwp->window_shift) - PAGE_SHIFT), dwp); | ||
680 | if (ret) | ||
681 | pr_warning("%s failed to clear tces in window.\n", | ||
682 | np->full_name); | ||
683 | else | ||
684 | pr_debug("%s successfully cleared tces in window.\n", | ||
685 | np->full_name); | ||
686 | |||
687 | ret = rtas_call(ddw_avail[2], 1, 1, NULL, liobn); | ||
688 | if (ret) | ||
689 | pr_warning("%s: failed to remove direct window: rtas returned " | ||
690 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | ||
691 | np->full_name, ret, ddw_avail[2], liobn); | ||
692 | else | ||
693 | pr_debug("%s: successfully removed direct window: rtas returned " | ||
694 | "%d to ibm,remove-pe-dma-window(%x) %llx\n", | ||
695 | np->full_name, ret, ddw_avail[2], liobn); | ||
696 | |||
697 | delprop: | ||
698 | ret = prom_remove_property(np, win64); | ||
699 | if (ret) | ||
700 | pr_warning("%s: failed to remove direct window property: %d\n", | ||
701 | np->full_name, ret); | ||
702 | } | ||
703 | |||
704 | static u64 find_existing_ddw(struct device_node *pdn) | ||
705 | { | ||
706 | struct direct_window *window; | ||
707 | const struct dynamic_dma_window_prop *direct64; | ||
708 | u64 dma_addr = 0; | ||
709 | |||
710 | spin_lock(&direct_window_list_lock); | ||
711 | /* check if we already created a window and dupe that config if so */ | ||
712 | list_for_each_entry(window, &direct_window_list, list) { | ||
713 | if (window->device == pdn) { | ||
714 | direct64 = window->prop; | ||
715 | dma_addr = direct64->dma_base; | ||
716 | break; | ||
717 | } | ||
718 | } | ||
719 | spin_unlock(&direct_window_list_lock); | ||
720 | |||
721 | return dma_addr; | ||
722 | } | ||
723 | |||
724 | static int find_existing_ddw_windows(void) | ||
725 | { | ||
726 | int len; | ||
727 | struct device_node *pdn; | ||
728 | struct direct_window *window; | ||
729 | const struct dynamic_dma_window_prop *direct64; | ||
730 | |||
731 | if (!firmware_has_feature(FW_FEATURE_LPAR)) | ||
732 | return 0; | ||
733 | |||
734 | for_each_node_with_property(pdn, DIRECT64_PROPNAME) { | ||
735 | direct64 = of_get_property(pdn, DIRECT64_PROPNAME, &len); | ||
736 | if (!direct64) | ||
737 | continue; | ||
738 | |||
739 | window = kzalloc(sizeof(*window), GFP_KERNEL); | ||
740 | if (!window || len < sizeof(struct dynamic_dma_window_prop)) { | ||
741 | kfree(window); | ||
742 | remove_ddw(pdn); | ||
743 | continue; | ||
744 | } | ||
745 | |||
746 | window->device = pdn; | ||
747 | window->prop = direct64; | ||
748 | spin_lock(&direct_window_list_lock); | ||
749 | list_add(&window->list, &direct_window_list); | ||
750 | spin_unlock(&direct_window_list_lock); | ||
751 | } | ||
752 | |||
753 | return 0; | ||
754 | } | ||
755 | machine_arch_initcall(pseries, find_existing_ddw_windows); | ||
756 | |||
757 | static int query_ddw(struct pci_dev *dev, const u32 *ddw_avail, | ||
758 | struct ddw_query_response *query) | ||
759 | { | ||
760 | struct device_node *dn; | ||
761 | struct pci_dn *pcidn; | ||
762 | u32 cfg_addr; | ||
763 | u64 buid; | ||
764 | int ret; | ||
765 | |||
766 | /* | ||
767 | * Get the config address and phb buid of the PE window. | ||
768 | * Rely on eeh to retrieve this for us. | ||
769 | * Retrieve them from the pci device, not the node with the | ||
770 | * dma-window property | ||
771 | */ | ||
772 | dn = pci_device_to_OF_node(dev); | ||
773 | pcidn = PCI_DN(dn); | ||
774 | cfg_addr = pcidn->eeh_config_addr; | ||
775 | if (pcidn->eeh_pe_config_addr) | ||
776 | cfg_addr = pcidn->eeh_pe_config_addr; | ||
777 | buid = pcidn->phb->buid; | ||
778 | ret = rtas_call(ddw_avail[0], 3, 5, (u32 *)query, | ||
779 | cfg_addr, BUID_HI(buid), BUID_LO(buid)); | ||
780 | dev_info(&dev->dev, "ibm,query-pe-dma-windows(%x) %x %x %x" | ||
781 | " returned %d\n", ddw_avail[0], cfg_addr, BUID_HI(buid), | ||
782 | BUID_LO(buid), ret); | ||
783 | return ret; | ||
784 | } | ||
785 | |||
786 | static int create_ddw(struct pci_dev *dev, const u32 *ddw_avail, | ||
787 | struct ddw_create_response *create, int page_shift, | ||
788 | int window_shift) | ||
789 | { | ||
790 | struct device_node *dn; | ||
791 | struct pci_dn *pcidn; | ||
792 | u32 cfg_addr; | ||
793 | u64 buid; | ||
794 | int ret; | ||
795 | |||
796 | /* | ||
797 | * Get the config address and phb buid of the PE window. | ||
798 | * Rely on eeh to retrieve this for us. | ||
799 | * Retrieve them from the pci device, not the node with the | ||
800 | * dma-window property | ||
801 | */ | ||
802 | dn = pci_device_to_OF_node(dev); | ||
803 | pcidn = PCI_DN(dn); | ||
804 | cfg_addr = pcidn->eeh_config_addr; | ||
805 | if (pcidn->eeh_pe_config_addr) | ||
806 | cfg_addr = pcidn->eeh_pe_config_addr; | ||
807 | buid = pcidn->phb->buid; | ||
808 | |||
809 | do { | ||
810 | /* extra outputs are LIOBN and dma-addr (hi, lo) */ | ||
811 | ret = rtas_call(ddw_avail[1], 5, 4, (u32 *)create, cfg_addr, | ||
812 | BUID_HI(buid), BUID_LO(buid), page_shift, window_shift); | ||
813 | } while (rtas_busy_delay(ret)); | ||
814 | dev_info(&dev->dev, | ||
815 | "ibm,create-pe-dma-window(%x) %x %x %x %x %x returned %d " | ||
816 | "(liobn = 0x%x starting addr = %x %x)\n", ddw_avail[1], | ||
817 | cfg_addr, BUID_HI(buid), BUID_LO(buid), page_shift, | ||
818 | window_shift, ret, create->liobn, create->addr_hi, create->addr_lo); | ||
819 | |||
820 | return ret; | ||
821 | } | ||
822 | |||
823 | /* | ||
824 | * If the PE supports dynamic dma windows, and there is space for a table | ||
825 | * that can map all pages in a linear offset, then setup such a table, | ||
826 | * and record the dma-offset in the struct device. | ||
827 | * | ||
828 | * dev: the pci device we are checking | ||
829 | * pdn: the parent pe node with the ibm,dma_window property | ||
830 | * Future: also check if we can remap the base window for our base page size | ||
831 | * | ||
832 | * returns the dma offset for use by dma_set_mask | ||
833 | */ | ||
834 | static u64 enable_ddw(struct pci_dev *dev, struct device_node *pdn) | ||
835 | { | ||
836 | int len, ret; | ||
837 | struct ddw_query_response query; | ||
838 | struct ddw_create_response create; | ||
839 | int page_shift; | ||
840 | u64 dma_addr, max_addr; | ||
841 | struct device_node *dn; | ||
842 | const u32 *uninitialized_var(ddw_avail); | ||
843 | struct direct_window *window; | ||
844 | struct property *win64; | ||
845 | struct dynamic_dma_window_prop *ddwprop; | ||
846 | |||
847 | mutex_lock(&direct_window_init_mutex); | ||
848 | |||
849 | dma_addr = find_existing_ddw(pdn); | ||
850 | if (dma_addr != 0) | ||
851 | goto out_unlock; | ||
852 | |||
853 | /* | ||
854 | * the ibm,ddw-applicable property holds the tokens for: | ||
855 | * ibm,query-pe-dma-window | ||
856 | * ibm,create-pe-dma-window | ||
857 | * ibm,remove-pe-dma-window | ||
858 | * for the given node in that order. | ||
859 | * the property is actually in the parent, not the PE | ||
860 | */ | ||
861 | ddw_avail = of_get_property(pdn, "ibm,ddw-applicable", &len); | ||
862 | if (!ddw_avail || len < 3 * sizeof(u32)) | ||
863 | goto out_unlock; | ||
864 | |||
865 | /* | ||
866 | * Query if there is a second window of size to map the | ||
867 | * whole partition. Query returns number of windows, largest | ||
868 | * block assigned to PE (partition endpoint), and two bitmasks | ||
869 | * of page sizes: supported and supported for migrate-dma. | ||
870 | */ | ||
871 | dn = pci_device_to_OF_node(dev); | ||
872 | ret = query_ddw(dev, ddw_avail, &query); | ||
873 | if (ret != 0) | ||
874 | goto out_unlock; | ||
875 | |||
876 | if (query.windows_available == 0) { | ||
877 | /* | ||
878 | * no additional windows are available for this device. | ||
879 | * We might be able to reallocate the existing window, | ||
880 | * trading in for a larger page size. | ||
881 | */ | ||
882 | dev_dbg(&dev->dev, "no free dynamic windows"); | ||
883 | goto out_unlock; | ||
884 | } | ||
885 | if (query.page_size & 4) { | ||
886 | page_shift = 24; /* 16MB */ | ||
887 | } else if (query.page_size & 2) { | ||
888 | page_shift = 16; /* 64kB */ | ||
889 | } else if (query.page_size & 1) { | ||
890 | page_shift = 12; /* 4kB */ | ||
891 | } else { | ||
892 | dev_dbg(&dev->dev, "no supported direct page size in mask %x", | ||
893 | query.page_size); | ||
894 | goto out_unlock; | ||
895 | } | ||
896 | /* verify the window * number of ptes will map the partition */ | ||
897 | /* check largest block * page size > max memory hotplug addr */ | ||
898 | max_addr = memory_hotplug_max(); | ||
899 | if (query.largest_available_block < (max_addr >> page_shift)) { | ||
900 | dev_dbg(&dev->dev, "can't map partiton max 0x%llx with %u " | ||
901 | "%llu-sized pages\n", max_addr, query.largest_available_block, | ||
902 | 1ULL << page_shift); | ||
903 | goto out_unlock; | ||
904 | } | ||
905 | len = order_base_2(max_addr); | ||
906 | win64 = kzalloc(sizeof(struct property), GFP_KERNEL); | ||
907 | if (!win64) { | ||
908 | dev_info(&dev->dev, | ||
909 | "couldn't allocate property for 64bit dma window\n"); | ||
910 | goto out_unlock; | ||
911 | } | ||
912 | win64->name = kstrdup(DIRECT64_PROPNAME, GFP_KERNEL); | ||
913 | win64->value = ddwprop = kmalloc(sizeof(*ddwprop), GFP_KERNEL); | ||
914 | win64->length = sizeof(*ddwprop); | ||
915 | if (!win64->name || !win64->value) { | ||
916 | dev_info(&dev->dev, | ||
917 | "couldn't allocate property name and value\n"); | ||
918 | goto out_free_prop; | ||
919 | } | ||
920 | |||
921 | ret = create_ddw(dev, ddw_avail, &create, page_shift, len); | ||
922 | if (ret != 0) | ||
923 | goto out_free_prop; | ||
924 | |||
925 | ddwprop->liobn = cpu_to_be32(create.liobn); | ||
926 | ddwprop->dma_base = cpu_to_be64(of_read_number(&create.addr_hi, 2)); | ||
927 | ddwprop->tce_shift = cpu_to_be32(page_shift); | ||
928 | ddwprop->window_shift = cpu_to_be32(len); | ||
929 | |||
930 | dev_dbg(&dev->dev, "created tce table LIOBN 0x%x for %s\n", | ||
931 | create.liobn, dn->full_name); | ||
932 | |||
933 | window = kzalloc(sizeof(*window), GFP_KERNEL); | ||
934 | if (!window) | ||
935 | goto out_clear_window; | ||
936 | |||
937 | ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT, | ||
938 | win64->value, tce_setrange_multi_pSeriesLP_walk); | ||
939 | if (ret) { | ||
940 | dev_info(&dev->dev, "failed to map direct window for %s: %d\n", | ||
941 | dn->full_name, ret); | ||
942 | goto out_clear_window; | ||
943 | } | ||
944 | |||
945 | ret = prom_add_property(pdn, win64); | ||
946 | if (ret) { | ||
947 | dev_err(&dev->dev, "unable to add dma window property for %s: %d", | ||
948 | pdn->full_name, ret); | ||
949 | goto out_clear_window; | ||
950 | } | ||
951 | |||
952 | window->device = pdn; | ||
953 | window->prop = ddwprop; | ||
954 | spin_lock(&direct_window_list_lock); | ||
955 | list_add(&window->list, &direct_window_list); | ||
956 | spin_unlock(&direct_window_list_lock); | ||
957 | |||
958 | dma_addr = of_read_number(&create.addr_hi, 2); | ||
959 | goto out_unlock; | ||
960 | |||
961 | out_clear_window: | ||
962 | remove_ddw(pdn); | ||
963 | |||
964 | out_free_prop: | ||
965 | kfree(win64->name); | ||
966 | kfree(win64->value); | ||
967 | kfree(win64); | ||
968 | |||
969 | out_unlock: | ||
970 | mutex_unlock(&direct_window_init_mutex); | ||
971 | return dma_addr; | ||
972 | } | ||
973 | |||
503 | static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | 974 | static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) |
504 | { | 975 | { |
505 | struct device_node *pdn, *dn; | 976 | struct device_node *pdn, *dn; |
@@ -510,7 +981,7 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
510 | pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); | 981 | pr_debug("pci_dma_dev_setup_pSeriesLP: %s\n", pci_name(dev)); |
511 | 982 | ||
512 | /* dev setup for LPAR is a little tricky, since the device tree might | 983 | /* dev setup for LPAR is a little tricky, since the device tree might |
513 | * contain the dma-window properties per-device and not neccesarily | 984 | * contain the dma-window properties per-device and not necessarily |
514 | * for the bus. So we need to search upwards in the tree until we | 985 | * for the bus. So we need to search upwards in the tree until we |
515 | * either hit a dma-window property, OR find a parent with a table | 986 | * either hit a dma-window property, OR find a parent with a table |
516 | * already allocated. | 987 | * already allocated. |
@@ -533,21 +1004,11 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
533 | } | 1004 | } |
534 | pr_debug(" parent is %s\n", pdn->full_name); | 1005 | pr_debug(" parent is %s\n", pdn->full_name); |
535 | 1006 | ||
536 | /* Check for parent == NULL so we don't try to setup the empty EADS | ||
537 | * slots on POWER4 machines. | ||
538 | */ | ||
539 | if (dma_window == NULL || pdn->parent == NULL) { | ||
540 | pr_debug(" no dma window for device, linking to parent\n"); | ||
541 | set_iommu_table_base(&dev->dev, PCI_DN(pdn)->iommu_table); | ||
542 | return; | ||
543 | } | ||
544 | |||
545 | pci = PCI_DN(pdn); | 1007 | pci = PCI_DN(pdn); |
546 | if (!pci->iommu_table) { | 1008 | if (!pci->iommu_table) { |
547 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, | 1009 | tbl = kzalloc_node(sizeof(struct iommu_table), GFP_KERNEL, |
548 | pci->phb->node); | 1010 | pci->phb->node); |
549 | iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window, | 1011 | iommu_table_setparms_lpar(pci->phb, pdn, tbl, dma_window); |
550 | pci->phb->bus->number); | ||
551 | pci->iommu_table = iommu_init_table(tbl, pci->phb->node); | 1012 | pci->iommu_table = iommu_init_table(tbl, pci->phb->node); |
552 | pr_debug(" created table: %p\n", pci->iommu_table); | 1013 | pr_debug(" created table: %p\n", pci->iommu_table); |
553 | } else { | 1014 | } else { |
@@ -556,24 +1017,145 @@ static void pci_dma_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
556 | 1017 | ||
557 | set_iommu_table_base(&dev->dev, pci->iommu_table); | 1018 | set_iommu_table_base(&dev->dev, pci->iommu_table); |
558 | } | 1019 | } |
1020 | |||
1021 | static int dma_set_mask_pSeriesLP(struct device *dev, u64 dma_mask) | ||
1022 | { | ||
1023 | bool ddw_enabled = false; | ||
1024 | struct device_node *pdn, *dn; | ||
1025 | struct pci_dev *pdev; | ||
1026 | const void *dma_window = NULL; | ||
1027 | u64 dma_offset; | ||
1028 | |||
1029 | if (!dev->dma_mask) | ||
1030 | return -EIO; | ||
1031 | |||
1032 | if (!dev_is_pci(dev)) | ||
1033 | goto check_mask; | ||
1034 | |||
1035 | pdev = to_pci_dev(dev); | ||
1036 | |||
1037 | /* only attempt to use a new window if 64-bit DMA is requested */ | ||
1038 | if (!disable_ddw && dma_mask == DMA_BIT_MASK(64)) { | ||
1039 | dn = pci_device_to_OF_node(pdev); | ||
1040 | dev_dbg(dev, "node is %s\n", dn->full_name); | ||
1041 | |||
1042 | /* | ||
1043 | * the device tree might contain the dma-window properties | ||
1044 | * per-device and not necessarily for the bus. So we need to | ||
1045 | * search upwards in the tree until we either hit a dma-window | ||
1046 | * property, OR find a parent with a table already allocated. | ||
1047 | */ | ||
1048 | for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; | ||
1049 | pdn = pdn->parent) { | ||
1050 | dma_window = of_get_property(pdn, "ibm,dma-window", NULL); | ||
1051 | if (dma_window) | ||
1052 | break; | ||
1053 | } | ||
1054 | if (pdn && PCI_DN(pdn)) { | ||
1055 | dma_offset = enable_ddw(pdev, pdn); | ||
1056 | if (dma_offset != 0) { | ||
1057 | dev_info(dev, "Using 64-bit direct DMA at offset %llx\n", dma_offset); | ||
1058 | set_dma_offset(dev, dma_offset); | ||
1059 | set_dma_ops(dev, &dma_direct_ops); | ||
1060 | ddw_enabled = true; | ||
1061 | } | ||
1062 | } | ||
1063 | } | ||
1064 | |||
1065 | /* fall back on iommu ops, restore table pointer with ops */ | ||
1066 | if (!ddw_enabled && get_dma_ops(dev) != &dma_iommu_ops) { | ||
1067 | dev_info(dev, "Restoring 32-bit DMA via iommu\n"); | ||
1068 | set_dma_ops(dev, &dma_iommu_ops); | ||
1069 | pci_dma_dev_setup_pSeriesLP(pdev); | ||
1070 | } | ||
1071 | |||
1072 | check_mask: | ||
1073 | if (!dma_supported(dev, dma_mask)) | ||
1074 | return -EIO; | ||
1075 | |||
1076 | *dev->dma_mask = dma_mask; | ||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
559 | #else /* CONFIG_PCI */ | 1080 | #else /* CONFIG_PCI */ |
560 | #define pci_dma_bus_setup_pSeries NULL | 1081 | #define pci_dma_bus_setup_pSeries NULL |
561 | #define pci_dma_dev_setup_pSeries NULL | 1082 | #define pci_dma_dev_setup_pSeries NULL |
562 | #define pci_dma_bus_setup_pSeriesLP NULL | 1083 | #define pci_dma_bus_setup_pSeriesLP NULL |
563 | #define pci_dma_dev_setup_pSeriesLP NULL | 1084 | #define pci_dma_dev_setup_pSeriesLP NULL |
1085 | #define dma_set_mask_pSeriesLP NULL | ||
564 | #endif /* !CONFIG_PCI */ | 1086 | #endif /* !CONFIG_PCI */ |
565 | 1087 | ||
1088 | static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action, | ||
1089 | void *data) | ||
1090 | { | ||
1091 | struct direct_window *window; | ||
1092 | struct memory_notify *arg = data; | ||
1093 | int ret = 0; | ||
1094 | |||
1095 | switch (action) { | ||
1096 | case MEM_GOING_ONLINE: | ||
1097 | spin_lock(&direct_window_list_lock); | ||
1098 | list_for_each_entry(window, &direct_window_list, list) { | ||
1099 | ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn, | ||
1100 | arg->nr_pages, window->prop); | ||
1101 | /* XXX log error */ | ||
1102 | } | ||
1103 | spin_unlock(&direct_window_list_lock); | ||
1104 | break; | ||
1105 | case MEM_CANCEL_ONLINE: | ||
1106 | case MEM_OFFLINE: | ||
1107 | spin_lock(&direct_window_list_lock); | ||
1108 | list_for_each_entry(window, &direct_window_list, list) { | ||
1109 | ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn, | ||
1110 | arg->nr_pages, window->prop); | ||
1111 | /* XXX log error */ | ||
1112 | } | ||
1113 | spin_unlock(&direct_window_list_lock); | ||
1114 | break; | ||
1115 | default: | ||
1116 | break; | ||
1117 | } | ||
1118 | if (ret && action != MEM_CANCEL_ONLINE) | ||
1119 | return NOTIFY_BAD; | ||
1120 | |||
1121 | return NOTIFY_OK; | ||
1122 | } | ||
1123 | |||
1124 | static struct notifier_block iommu_mem_nb = { | ||
1125 | .notifier_call = iommu_mem_notifier, | ||
1126 | }; | ||
1127 | |||
566 | static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) | 1128 | static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long action, void *node) |
567 | { | 1129 | { |
568 | int err = NOTIFY_OK; | 1130 | int err = NOTIFY_OK; |
569 | struct device_node *np = node; | 1131 | struct device_node *np = node; |
570 | struct pci_dn *pci = PCI_DN(np); | 1132 | struct pci_dn *pci = PCI_DN(np); |
1133 | struct direct_window *window; | ||
571 | 1134 | ||
572 | switch (action) { | 1135 | switch (action) { |
573 | case PSERIES_RECONFIG_REMOVE: | 1136 | case PSERIES_RECONFIG_REMOVE: |
574 | if (pci && pci->iommu_table && | 1137 | if (pci && pci->iommu_table) |
575 | of_get_property(np, "ibm,dma-window", NULL)) | ||
576 | iommu_free_table(pci->iommu_table, np->full_name); | 1138 | iommu_free_table(pci->iommu_table, np->full_name); |
1139 | |||
1140 | spin_lock(&direct_window_list_lock); | ||
1141 | list_for_each_entry(window, &direct_window_list, list) { | ||
1142 | if (window->device == np) { | ||
1143 | list_del(&window->list); | ||
1144 | kfree(window); | ||
1145 | break; | ||
1146 | } | ||
1147 | } | ||
1148 | spin_unlock(&direct_window_list_lock); | ||
1149 | |||
1150 | /* | ||
1151 | * Because the notifier runs after isolation of the | ||
1152 | * slot, we are guaranteed any DMA window has already | ||
1153 | * been revoked and the TCEs have been marked invalid, | ||
1154 | * so we don't need a call to remove_ddw(np). However, | ||
1155 | * if an additional notifier action is added before the | ||
1156 | * isolate call, we should update this code for | ||
1157 | * completeness with such a call. | ||
1158 | */ | ||
577 | break; | 1159 | break; |
578 | default: | 1160 | default: |
579 | err = NOTIFY_DONE; | 1161 | err = NOTIFY_DONE; |
@@ -589,13 +1171,8 @@ static struct notifier_block iommu_reconfig_nb = { | |||
589 | /* These are called very early. */ | 1171 | /* These are called very early. */ |
590 | void iommu_init_early_pSeries(void) | 1172 | void iommu_init_early_pSeries(void) |
591 | { | 1173 | { |
592 | if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) { | 1174 | if (of_chosen && of_get_property(of_chosen, "linux,iommu-off", NULL)) |
593 | /* Direct I/O, IOMMU off */ | ||
594 | ppc_md.pci_dma_dev_setup = NULL; | ||
595 | ppc_md.pci_dma_bus_setup = NULL; | ||
596 | set_pci_dma_ops(&dma_direct_ops); | ||
597 | return; | 1175 | return; |
598 | } | ||
599 | 1176 | ||
600 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | 1177 | if (firmware_has_feature(FW_FEATURE_LPAR)) { |
601 | if (firmware_has_feature(FW_FEATURE_MULTITCE)) { | 1178 | if (firmware_has_feature(FW_FEATURE_MULTITCE)) { |
@@ -608,6 +1185,7 @@ void iommu_init_early_pSeries(void) | |||
608 | ppc_md.tce_get = tce_get_pSeriesLP; | 1185 | ppc_md.tce_get = tce_get_pSeriesLP; |
609 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; | 1186 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_pSeriesLP; |
610 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; | 1187 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_pSeriesLP; |
1188 | ppc_md.dma_set_mask = dma_set_mask_pSeriesLP; | ||
611 | } else { | 1189 | } else { |
612 | ppc_md.tce_build = tce_build_pSeries; | 1190 | ppc_md.tce_build = tce_build_pSeries; |
613 | ppc_md.tce_free = tce_free_pSeries; | 1191 | ppc_md.tce_free = tce_free_pSeries; |
@@ -618,7 +1196,22 @@ void iommu_init_early_pSeries(void) | |||
618 | 1196 | ||
619 | 1197 | ||
620 | pSeries_reconfig_notifier_register(&iommu_reconfig_nb); | 1198 | pSeries_reconfig_notifier_register(&iommu_reconfig_nb); |
1199 | register_memory_notifier(&iommu_mem_nb); | ||
621 | 1200 | ||
622 | set_pci_dma_ops(&dma_iommu_ops); | 1201 | set_pci_dma_ops(&dma_iommu_ops); |
623 | } | 1202 | } |
624 | 1203 | ||
1204 | static int __init disable_multitce(char *str) | ||
1205 | { | ||
1206 | if (strcmp(str, "off") == 0 && | ||
1207 | firmware_has_feature(FW_FEATURE_LPAR) && | ||
1208 | firmware_has_feature(FW_FEATURE_MULTITCE)) { | ||
1209 | printk(KERN_INFO "Disabling MULTITCE firmware feature\n"); | ||
1210 | ppc_md.tce_build = tce_build_pSeriesLP; | ||
1211 | ppc_md.tce_free = tce_free_pSeriesLP; | ||
1212 | powerpc_firmware_features &= ~FW_FEATURE_MULTITCE; | ||
1213 | } | ||
1214 | return 1; | ||
1215 | } | ||
1216 | |||
1217 | __setup("multitce=", disable_multitce); | ||
diff --git a/arch/powerpc/platforms/pseries/kexec.c b/arch/powerpc/platforms/pseries/kexec.c index 53cbd53d8740..54cf3a4aa16b 100644 --- a/arch/powerpc/platforms/pseries/kexec.c +++ b/arch/powerpc/platforms/pseries/kexec.c | |||
@@ -7,15 +7,18 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
10 | #include <asm/machdep.h> | 13 | #include <asm/machdep.h> |
11 | #include <asm/page.h> | 14 | #include <asm/page.h> |
12 | #include <asm/firmware.h> | 15 | #include <asm/firmware.h> |
13 | #include <asm/kexec.h> | 16 | #include <asm/kexec.h> |
14 | #include <asm/mpic.h> | 17 | #include <asm/mpic.h> |
18 | #include <asm/xics.h> | ||
15 | #include <asm/smp.h> | 19 | #include <asm/smp.h> |
16 | 20 | ||
17 | #include "pseries.h" | 21 | #include "pseries.h" |
18 | #include "xics.h" | ||
19 | #include "plpar_wrappers.h" | 22 | #include "plpar_wrappers.h" |
20 | 23 | ||
21 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) | 24 | static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) |
@@ -61,13 +64,3 @@ void __init setup_kexec_cpu_down_xics(void) | |||
61 | { | 64 | { |
62 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; | 65 | ppc_md.kexec_cpu_down = pseries_kexec_cpu_down_xics; |
63 | } | 66 | } |
64 | |||
65 | static int __init pseries_kexec_setup(void) | ||
66 | { | ||
67 | ppc_md.machine_kexec = default_machine_kexec; | ||
68 | ppc_md.machine_kexec_prepare = default_machine_kexec_prepare; | ||
69 | ppc_md.machine_crash_shutdown = default_machine_crash_shutdown; | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | machine_device_initcall(pseries, pseries_kexec_setup); | ||
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index cf79b46d8f88..39e6e0a7b2fa 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -248,11 +248,13 @@ void vpa_init(int cpu) | |||
248 | int hwcpu = get_hard_smp_processor_id(cpu); | 248 | int hwcpu = get_hard_smp_processor_id(cpu); |
249 | unsigned long addr; | 249 | unsigned long addr; |
250 | long ret; | 250 | long ret; |
251 | struct paca_struct *pp; | ||
252 | struct dtl_entry *dtl; | ||
251 | 253 | ||
252 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | 254 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) |
253 | lppaca[cpu].vmxregs_in_use = 1; | 255 | lppaca_of(cpu).vmxregs_in_use = 1; |
254 | 256 | ||
255 | addr = __pa(&lppaca[cpu]); | 257 | addr = __pa(&lppaca_of(cpu)); |
256 | ret = register_vpa(hwcpu, addr); | 258 | ret = register_vpa(hwcpu, addr); |
257 | 259 | ||
258 | if (ret) { | 260 | if (ret) { |
@@ -274,6 +276,25 @@ void vpa_init(int cpu) | |||
274 | "registration for cpu %d (hw %d) of area %lx " | 276 | "registration for cpu %d (hw %d) of area %lx " |
275 | "returns %ld\n", cpu, hwcpu, addr, ret); | 277 | "returns %ld\n", cpu, hwcpu, addr, ret); |
276 | } | 278 | } |
279 | |||
280 | /* | ||
281 | * Register dispatch trace log, if one has been allocated. | ||
282 | */ | ||
283 | pp = &paca[cpu]; | ||
284 | dtl = pp->dispatch_log; | ||
285 | if (dtl) { | ||
286 | pp->dtl_ridx = 0; | ||
287 | pp->dtl_curr = dtl; | ||
288 | lppaca_of(cpu).dtl_idx = 0; | ||
289 | |||
290 | /* hypervisor reads buffer length from this field */ | ||
291 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; | ||
292 | ret = register_dtl(hwcpu, __pa(dtl)); | ||
293 | if (ret) | ||
294 | pr_warn("DTL registration failed for cpu %d (%ld)\n", | ||
295 | cpu, ret); | ||
296 | lppaca_of(cpu).dtl_enable_mask = 2; | ||
297 | } | ||
277 | } | 298 | } |
278 | 299 | ||
279 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, | 300 | static long pSeries_lpar_hpte_insert(unsigned long hpte_group, |
@@ -308,6 +329,8 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group, | |||
308 | /* Make pHyp happy */ | 329 | /* Make pHyp happy */ |
309 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) | 330 | if ((rflags & _PAGE_NO_CACHE) & !(rflags & _PAGE_WRITETHRU)) |
310 | hpte_r &= ~_PAGE_COHERENT; | 331 | hpte_r &= ~_PAGE_COHERENT; |
332 | if (firmware_has_feature(FW_FEATURE_XCMO) && !(hpte_r & HPTE_R_N)) | ||
333 | flags |= H_COALESCE_CAND; | ||
311 | 334 | ||
312 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); | 335 | lpar_rc = plpar_pte_enter(flags, hpte_group, hpte_v, hpte_r, &slot); |
313 | if (unlikely(lpar_rc == H_PTEG_FULL)) { | 336 | if (unlikely(lpar_rc == H_PTEG_FULL)) { |
@@ -552,7 +575,7 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | |||
552 | unsigned long i, pix, rc; | 575 | unsigned long i, pix, rc; |
553 | unsigned long flags = 0; | 576 | unsigned long flags = 0; |
554 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); | 577 | struct ppc64_tlb_batch *batch = &__get_cpu_var(ppc64_tlb_batch); |
555 | int lock_tlbie = !cpu_has_feature(CPU_FTR_LOCKLESS_TLBIE); | 578 | int lock_tlbie = !mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE); |
556 | unsigned long param[9]; | 579 | unsigned long param[9]; |
557 | unsigned long va; | 580 | unsigned long va; |
558 | unsigned long hash, index, shift, hidx, slot; | 581 | unsigned long hash, index, shift, hidx, slot; |
@@ -606,6 +629,18 @@ static void pSeries_lpar_flush_hash_range(unsigned long number, int local) | |||
606 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); | 629 | spin_unlock_irqrestore(&pSeries_lpar_tlbie_lock, flags); |
607 | } | 630 | } |
608 | 631 | ||
632 | static int __init disable_bulk_remove(char *str) | ||
633 | { | ||
634 | if (strcmp(str, "off") == 0 && | ||
635 | firmware_has_feature(FW_FEATURE_BULK_REMOVE)) { | ||
636 | printk(KERN_INFO "Disabling BULK_REMOVE firmware feature"); | ||
637 | powerpc_firmware_features &= ~FW_FEATURE_BULK_REMOVE; | ||
638 | } | ||
639 | return 1; | ||
640 | } | ||
641 | |||
642 | __setup("bulk_remove=", disable_bulk_remove); | ||
643 | |||
609 | void __init hpte_init_lpar(void) | 644 | void __init hpte_init_lpar(void) |
610 | { | 645 | { |
611 | ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; | 646 | ppc_md.hpte_invalidate = pSeries_lpar_hpte_invalidate; |
@@ -680,6 +715,13 @@ EXPORT_SYMBOL(arch_free_page); | |||
680 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ | 715 | /* NB: reg/unreg are called while guarded with the tracepoints_mutex */ |
681 | extern long hcall_tracepoint_refcount; | 716 | extern long hcall_tracepoint_refcount; |
682 | 717 | ||
718 | /* | ||
719 | * Since the tracing code might execute hcalls we need to guard against | ||
720 | * recursion. One example of this are spinlocks calling H_YIELD on | ||
721 | * shared processor partitions. | ||
722 | */ | ||
723 | static DEFINE_PER_CPU(unsigned int, hcall_trace_depth); | ||
724 | |||
683 | void hcall_tracepoint_regfunc(void) | 725 | void hcall_tracepoint_regfunc(void) |
684 | { | 726 | { |
685 | hcall_tracepoint_refcount++; | 727 | hcall_tracepoint_refcount++; |
@@ -692,12 +734,86 @@ void hcall_tracepoint_unregfunc(void) | |||
692 | 734 | ||
693 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) | 735 | void __trace_hcall_entry(unsigned long opcode, unsigned long *args) |
694 | { | 736 | { |
737 | unsigned long flags; | ||
738 | unsigned int *depth; | ||
739 | |||
740 | local_irq_save(flags); | ||
741 | |||
742 | depth = &__get_cpu_var(hcall_trace_depth); | ||
743 | |||
744 | if (*depth) | ||
745 | goto out; | ||
746 | |||
747 | (*depth)++; | ||
695 | trace_hcall_entry(opcode, args); | 748 | trace_hcall_entry(opcode, args); |
749 | (*depth)--; | ||
750 | |||
751 | out: | ||
752 | local_irq_restore(flags); | ||
696 | } | 753 | } |
697 | 754 | ||
698 | void __trace_hcall_exit(long opcode, unsigned long retval, | 755 | void __trace_hcall_exit(long opcode, unsigned long retval, |
699 | unsigned long *retbuf) | 756 | unsigned long *retbuf) |
700 | { | 757 | { |
758 | unsigned long flags; | ||
759 | unsigned int *depth; | ||
760 | |||
761 | local_irq_save(flags); | ||
762 | |||
763 | depth = &__get_cpu_var(hcall_trace_depth); | ||
764 | |||
765 | if (*depth) | ||
766 | goto out; | ||
767 | |||
768 | (*depth)++; | ||
701 | trace_hcall_exit(opcode, retval, retbuf); | 769 | trace_hcall_exit(opcode, retval, retbuf); |
770 | (*depth)--; | ||
771 | |||
772 | out: | ||
773 | local_irq_restore(flags); | ||
702 | } | 774 | } |
703 | #endif | 775 | #endif |
776 | |||
777 | /** | ||
778 | * h_get_mpp | ||
779 | * H_GET_MPP hcall returns info in 7 parms | ||
780 | */ | ||
781 | int h_get_mpp(struct hvcall_mpp_data *mpp_data) | ||
782 | { | ||
783 | int rc; | ||
784 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
785 | |||
786 | rc = plpar_hcall9(H_GET_MPP, retbuf); | ||
787 | |||
788 | mpp_data->entitled_mem = retbuf[0]; | ||
789 | mpp_data->mapped_mem = retbuf[1]; | ||
790 | |||
791 | mpp_data->group_num = (retbuf[2] >> 2 * 8) & 0xffff; | ||
792 | mpp_data->pool_num = retbuf[2] & 0xffff; | ||
793 | |||
794 | mpp_data->mem_weight = (retbuf[3] >> 7 * 8) & 0xff; | ||
795 | mpp_data->unallocated_mem_weight = (retbuf[3] >> 6 * 8) & 0xff; | ||
796 | mpp_data->unallocated_entitlement = retbuf[3] & 0xffffffffffff; | ||
797 | |||
798 | mpp_data->pool_size = retbuf[4]; | ||
799 | mpp_data->loan_request = retbuf[5]; | ||
800 | mpp_data->backing_mem = retbuf[6]; | ||
801 | |||
802 | return rc; | ||
803 | } | ||
804 | EXPORT_SYMBOL(h_get_mpp); | ||
805 | |||
806 | int h_get_mpp_x(struct hvcall_mpp_x_data *mpp_x_data) | ||
807 | { | ||
808 | int rc; | ||
809 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE] = { 0 }; | ||
810 | |||
811 | rc = plpar_hcall9(H_GET_MPP_X, retbuf); | ||
812 | |||
813 | mpp_x_data->coalesced_bytes = retbuf[0]; | ||
814 | mpp_x_data->pool_coalesced_bytes = retbuf[1]; | ||
815 | mpp_x_data->pool_purr_cycles = retbuf[2]; | ||
816 | mpp_x_data->pool_spurr_cycles = retbuf[3]; | ||
817 | |||
818 | return rc; | ||
819 | } | ||
diff --git a/arch/powerpc/platforms/pseries/mobility.c b/arch/powerpc/platforms/pseries/mobility.c new file mode 100644 index 000000000000..3e7f651e50ac --- /dev/null +++ b/arch/powerpc/platforms/pseries/mobility.c | |||
@@ -0,0 +1,362 @@ | |||
1 | /* | ||
2 | * Support for Partition Mobility/Migration | ||
3 | * | ||
4 | * Copyright (C) 2010 Nathan Fontenot | ||
5 | * Copyright (C) 2010 IBM Corporation | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License version | ||
9 | * 2 as published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/kobject.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/completion.h> | ||
16 | #include <linux/device.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include <asm/rtas.h> | ||
21 | #include "pseries.h" | ||
22 | |||
23 | static struct kobject *mobility_kobj; | ||
24 | |||
25 | struct update_props_workarea { | ||
26 | u32 phandle; | ||
27 | u32 state; | ||
28 | u64 reserved; | ||
29 | u32 nprops; | ||
30 | }; | ||
31 | |||
32 | #define NODE_ACTION_MASK 0xff000000 | ||
33 | #define NODE_COUNT_MASK 0x00ffffff | ||
34 | |||
35 | #define DELETE_DT_NODE 0x01000000 | ||
36 | #define UPDATE_DT_NODE 0x02000000 | ||
37 | #define ADD_DT_NODE 0x03000000 | ||
38 | |||
39 | static int mobility_rtas_call(int token, char *buf) | ||
40 | { | ||
41 | int rc; | ||
42 | |||
43 | spin_lock(&rtas_data_buf_lock); | ||
44 | |||
45 | memcpy(rtas_data_buf, buf, RTAS_DATA_BUF_SIZE); | ||
46 | rc = rtas_call(token, 2, 1, NULL, rtas_data_buf, 1); | ||
47 | memcpy(buf, rtas_data_buf, RTAS_DATA_BUF_SIZE); | ||
48 | |||
49 | spin_unlock(&rtas_data_buf_lock); | ||
50 | return rc; | ||
51 | } | ||
52 | |||
53 | static int delete_dt_node(u32 phandle) | ||
54 | { | ||
55 | struct device_node *dn; | ||
56 | |||
57 | dn = of_find_node_by_phandle(phandle); | ||
58 | if (!dn) | ||
59 | return -ENOENT; | ||
60 | |||
61 | dlpar_detach_node(dn); | ||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int update_dt_property(struct device_node *dn, struct property **prop, | ||
66 | const char *name, u32 vd, char *value) | ||
67 | { | ||
68 | struct property *new_prop = *prop; | ||
69 | struct property *old_prop; | ||
70 | int more = 0; | ||
71 | |||
72 | /* A negative 'vd' value indicates that only part of the new property | ||
73 | * value is contained in the buffer and we need to call | ||
74 | * ibm,update-properties again to get the rest of the value. | ||
75 | * | ||
76 | * A negative value is also the two's compliment of the actual value. | ||
77 | */ | ||
78 | if (vd & 0x80000000) { | ||
79 | vd = ~vd + 1; | ||
80 | more = 1; | ||
81 | } | ||
82 | |||
83 | if (new_prop) { | ||
84 | /* partial property fixup */ | ||
85 | char *new_data = kzalloc(new_prop->length + vd, GFP_KERNEL); | ||
86 | if (!new_data) | ||
87 | return -ENOMEM; | ||
88 | |||
89 | memcpy(new_data, new_prop->value, new_prop->length); | ||
90 | memcpy(new_data + new_prop->length, value, vd); | ||
91 | |||
92 | kfree(new_prop->value); | ||
93 | new_prop->value = new_data; | ||
94 | new_prop->length += vd; | ||
95 | } else { | ||
96 | new_prop = kzalloc(sizeof(*new_prop), GFP_KERNEL); | ||
97 | if (!new_prop) | ||
98 | return -ENOMEM; | ||
99 | |||
100 | new_prop->name = kstrdup(name, GFP_KERNEL); | ||
101 | if (!new_prop->name) { | ||
102 | kfree(new_prop); | ||
103 | return -ENOMEM; | ||
104 | } | ||
105 | |||
106 | new_prop->length = vd; | ||
107 | new_prop->value = kzalloc(new_prop->length, GFP_KERNEL); | ||
108 | if (!new_prop->value) { | ||
109 | kfree(new_prop->name); | ||
110 | kfree(new_prop); | ||
111 | return -ENOMEM; | ||
112 | } | ||
113 | |||
114 | memcpy(new_prop->value, value, vd); | ||
115 | *prop = new_prop; | ||
116 | } | ||
117 | |||
118 | if (!more) { | ||
119 | old_prop = of_find_property(dn, new_prop->name, NULL); | ||
120 | if (old_prop) | ||
121 | prom_update_property(dn, new_prop, old_prop); | ||
122 | else | ||
123 | prom_add_property(dn, new_prop); | ||
124 | |||
125 | new_prop = NULL; | ||
126 | } | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int update_dt_node(u32 phandle) | ||
132 | { | ||
133 | struct update_props_workarea *upwa; | ||
134 | struct device_node *dn; | ||
135 | struct property *prop = NULL; | ||
136 | int i, rc; | ||
137 | char *prop_data; | ||
138 | char *rtas_buf; | ||
139 | int update_properties_token; | ||
140 | |||
141 | update_properties_token = rtas_token("ibm,update-properties"); | ||
142 | if (update_properties_token == RTAS_UNKNOWN_SERVICE) | ||
143 | return -EINVAL; | ||
144 | |||
145 | rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); | ||
146 | if (!rtas_buf) | ||
147 | return -ENOMEM; | ||
148 | |||
149 | dn = of_find_node_by_phandle(phandle); | ||
150 | if (!dn) { | ||
151 | kfree(rtas_buf); | ||
152 | return -ENOENT; | ||
153 | } | ||
154 | |||
155 | upwa = (struct update_props_workarea *)&rtas_buf[0]; | ||
156 | upwa->phandle = phandle; | ||
157 | |||
158 | do { | ||
159 | rc = mobility_rtas_call(update_properties_token, rtas_buf); | ||
160 | if (rc < 0) | ||
161 | break; | ||
162 | |||
163 | prop_data = rtas_buf + sizeof(*upwa); | ||
164 | |||
165 | for (i = 0; i < upwa->nprops; i++) { | ||
166 | char *prop_name; | ||
167 | u32 vd; | ||
168 | |||
169 | prop_name = prop_data + 1; | ||
170 | prop_data += strlen(prop_name) + 1; | ||
171 | vd = *prop_data++; | ||
172 | |||
173 | switch (vd) { | ||
174 | case 0x00000000: | ||
175 | /* name only property, nothing to do */ | ||
176 | break; | ||
177 | |||
178 | case 0x80000000: | ||
179 | prop = of_find_property(dn, prop_name, NULL); | ||
180 | prom_remove_property(dn, prop); | ||
181 | prop = NULL; | ||
182 | break; | ||
183 | |||
184 | default: | ||
185 | rc = update_dt_property(dn, &prop, prop_name, | ||
186 | vd, prop_data); | ||
187 | if (rc) { | ||
188 | printk(KERN_ERR "Could not update %s" | ||
189 | " property\n", prop_name); | ||
190 | } | ||
191 | |||
192 | prop_data += vd; | ||
193 | } | ||
194 | } | ||
195 | } while (rc == 1); | ||
196 | |||
197 | of_node_put(dn); | ||
198 | kfree(rtas_buf); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | static int add_dt_node(u32 parent_phandle, u32 drc_index) | ||
203 | { | ||
204 | struct device_node *dn; | ||
205 | struct device_node *parent_dn; | ||
206 | int rc; | ||
207 | |||
208 | dn = dlpar_configure_connector(drc_index); | ||
209 | if (!dn) | ||
210 | return -ENOENT; | ||
211 | |||
212 | parent_dn = of_find_node_by_phandle(parent_phandle); | ||
213 | if (!parent_dn) { | ||
214 | dlpar_free_cc_nodes(dn); | ||
215 | return -ENOENT; | ||
216 | } | ||
217 | |||
218 | dn->parent = parent_dn; | ||
219 | rc = dlpar_attach_node(dn); | ||
220 | if (rc) | ||
221 | dlpar_free_cc_nodes(dn); | ||
222 | |||
223 | of_node_put(parent_dn); | ||
224 | return rc; | ||
225 | } | ||
226 | |||
227 | static int pseries_devicetree_update(void) | ||
228 | { | ||
229 | char *rtas_buf; | ||
230 | u32 *data; | ||
231 | int update_nodes_token; | ||
232 | int rc; | ||
233 | |||
234 | update_nodes_token = rtas_token("ibm,update-nodes"); | ||
235 | if (update_nodes_token == RTAS_UNKNOWN_SERVICE) | ||
236 | return -EINVAL; | ||
237 | |||
238 | rtas_buf = kzalloc(RTAS_DATA_BUF_SIZE, GFP_KERNEL); | ||
239 | if (!rtas_buf) | ||
240 | return -ENOMEM; | ||
241 | |||
242 | do { | ||
243 | rc = mobility_rtas_call(update_nodes_token, rtas_buf); | ||
244 | if (rc && rc != 1) | ||
245 | break; | ||
246 | |||
247 | data = (u32 *)rtas_buf + 4; | ||
248 | while (*data & NODE_ACTION_MASK) { | ||
249 | int i; | ||
250 | u32 action = *data & NODE_ACTION_MASK; | ||
251 | int node_count = *data & NODE_COUNT_MASK; | ||
252 | |||
253 | data++; | ||
254 | |||
255 | for (i = 0; i < node_count; i++) { | ||
256 | u32 phandle = *data++; | ||
257 | u32 drc_index; | ||
258 | |||
259 | switch (action) { | ||
260 | case DELETE_DT_NODE: | ||
261 | delete_dt_node(phandle); | ||
262 | break; | ||
263 | case UPDATE_DT_NODE: | ||
264 | update_dt_node(phandle); | ||
265 | break; | ||
266 | case ADD_DT_NODE: | ||
267 | drc_index = *data++; | ||
268 | add_dt_node(phandle, drc_index); | ||
269 | break; | ||
270 | } | ||
271 | } | ||
272 | } | ||
273 | } while (rc == 1); | ||
274 | |||
275 | kfree(rtas_buf); | ||
276 | return rc; | ||
277 | } | ||
278 | |||
279 | void post_mobility_fixup(void) | ||
280 | { | ||
281 | int rc; | ||
282 | int activate_fw_token; | ||
283 | |||
284 | rc = pseries_devicetree_update(); | ||
285 | if (rc) { | ||
286 | printk(KERN_ERR "Initial post-mobility device tree update " | ||
287 | "failed: %d\n", rc); | ||
288 | return; | ||
289 | } | ||
290 | |||
291 | activate_fw_token = rtas_token("ibm,activate-firmware"); | ||
292 | if (activate_fw_token == RTAS_UNKNOWN_SERVICE) { | ||
293 | printk(KERN_ERR "Could not make post-mobility " | ||
294 | "activate-fw call.\n"); | ||
295 | return; | ||
296 | } | ||
297 | |||
298 | rc = rtas_call(activate_fw_token, 0, 1, NULL); | ||
299 | if (!rc) { | ||
300 | rc = pseries_devicetree_update(); | ||
301 | if (rc) | ||
302 | printk(KERN_ERR "Secondary post-mobility device tree " | ||
303 | "update failed: %d\n", rc); | ||
304 | } else { | ||
305 | printk(KERN_ERR "Post-mobility activate-fw failed: %d\n", rc); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | return; | ||
310 | } | ||
311 | |||
312 | static ssize_t migrate_store(struct class *class, struct class_attribute *attr, | ||
313 | const char *buf, size_t count) | ||
314 | { | ||
315 | struct rtas_args args; | ||
316 | u64 streamid; | ||
317 | int rc; | ||
318 | |||
319 | rc = strict_strtoull(buf, 0, &streamid); | ||
320 | if (rc) | ||
321 | return rc; | ||
322 | |||
323 | memset(&args, 0, sizeof(args)); | ||
324 | args.token = rtas_token("ibm,suspend-me"); | ||
325 | args.nargs = 2; | ||
326 | args.nret = 1; | ||
327 | |||
328 | args.args[0] = streamid >> 32 ; | ||
329 | args.args[1] = streamid & 0xffffffff; | ||
330 | args.rets = &args.args[args.nargs]; | ||
331 | |||
332 | do { | ||
333 | args.rets[0] = 0; | ||
334 | rc = rtas_ibm_suspend_me(&args); | ||
335 | if (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE) | ||
336 | ssleep(1); | ||
337 | } while (!rc && args.rets[0] == RTAS_NOT_SUSPENDABLE); | ||
338 | |||
339 | if (rc) | ||
340 | return rc; | ||
341 | else if (args.rets[0]) | ||
342 | return args.rets[0]; | ||
343 | |||
344 | post_mobility_fixup(); | ||
345 | return count; | ||
346 | } | ||
347 | |||
348 | static CLASS_ATTR(migration, S_IWUSR, NULL, migrate_store); | ||
349 | |||
350 | static int __init mobility_sysfs_init(void) | ||
351 | { | ||
352 | int rc; | ||
353 | |||
354 | mobility_kobj = kobject_create_and_add("mobility", kernel_kobj); | ||
355 | if (!mobility_kobj) | ||
356 | return -ENOMEM; | ||
357 | |||
358 | rc = sysfs_create_file(mobility_kobj, &class_attr_migration.attr); | ||
359 | |||
360 | return rc; | ||
361 | } | ||
362 | device_initcall(mobility_sysfs_init); | ||
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c index 1164c3430f2c..38d24e7e7bb1 100644 --- a/arch/powerpc/platforms/pseries/msi.c +++ b/arch/powerpc/platforms/pseries/msi.c | |||
@@ -93,8 +93,18 @@ static void rtas_disable_msi(struct pci_dev *pdev) | |||
93 | if (!pdn) | 93 | if (!pdn) |
94 | return; | 94 | return; |
95 | 95 | ||
96 | if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) | 96 | /* |
97 | pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); | 97 | * disabling MSI with the explicit interface also disables MSI-X |
98 | */ | ||
99 | if (rtas_change_msi(pdn, RTAS_CHANGE_MSI_FN, 0) != 0) { | ||
100 | /* | ||
101 | * may have failed because explicit interface is not | ||
102 | * present | ||
103 | */ | ||
104 | if (rtas_change_msi(pdn, RTAS_CHANGE_FN, 0) != 0) { | ||
105 | pr_debug("rtas_msi: Setting MSIs to 0 failed!\n"); | ||
106 | } | ||
107 | } | ||
98 | } | 108 | } |
99 | 109 | ||
100 | static int rtas_query_irq_number(struct pci_dn *pdn, int offset) | 110 | static int rtas_query_irq_number(struct pci_dn *pdn, int offset) |
@@ -127,7 +137,7 @@ static void rtas_teardown_msi_irqs(struct pci_dev *pdev) | |||
127 | if (entry->irq == NO_IRQ) | 137 | if (entry->irq == NO_IRQ) |
128 | continue; | 138 | continue; |
129 | 139 | ||
130 | set_irq_msi(entry->irq, NULL); | 140 | irq_set_msi_desc(entry->irq, NULL); |
131 | irq_dispose_mapping(entry->irq); | 141 | irq_dispose_mapping(entry->irq); |
132 | } | 142 | } |
133 | 143 | ||
@@ -427,7 +437,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
427 | } | 437 | } |
428 | 438 | ||
429 | dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); | 439 | dev_dbg(&pdev->dev, "rtas_msi: allocated virq %d\n", virq); |
430 | set_irq_msi(virq, entry); | 440 | irq_set_msi_desc(virq, entry); |
431 | 441 | ||
432 | /* Read config space back so we can restore after reset */ | 442 | /* Read config space back so we can restore after reset */ |
433 | read_msi_msg(virq, &msg); | 443 | read_msi_msg(virq, &msg); |
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c index bc3c7f2abd79..00cc3a094885 100644 --- a/arch/powerpc/platforms/pseries/nvram.c +++ b/arch/powerpc/platforms/pseries/nvram.c | |||
@@ -16,17 +16,70 @@ | |||
16 | #include <linux/errno.h> | 16 | #include <linux/errno.h> |
17 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/spinlock.h> | 18 | #include <linux/spinlock.h> |
19 | #include <linux/slab.h> | ||
20 | #include <linux/kmsg_dump.h> | ||
19 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
20 | #include <asm/nvram.h> | 22 | #include <asm/nvram.h> |
21 | #include <asm/rtas.h> | 23 | #include <asm/rtas.h> |
22 | #include <asm/prom.h> | 24 | #include <asm/prom.h> |
23 | #include <asm/machdep.h> | 25 | #include <asm/machdep.h> |
24 | 26 | ||
27 | /* Max bytes to read/write in one go */ | ||
28 | #define NVRW_CNT 0x20 | ||
29 | |||
25 | static unsigned int nvram_size; | 30 | static unsigned int nvram_size; |
26 | static int nvram_fetch, nvram_store; | 31 | static int nvram_fetch, nvram_store; |
27 | static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ | 32 | static char nvram_buf[NVRW_CNT]; /* assume this is in the first 4GB */ |
28 | static DEFINE_SPINLOCK(nvram_lock); | 33 | static DEFINE_SPINLOCK(nvram_lock); |
29 | 34 | ||
35 | struct err_log_info { | ||
36 | int error_type; | ||
37 | unsigned int seq_num; | ||
38 | }; | ||
39 | |||
40 | struct nvram_os_partition { | ||
41 | const char *name; | ||
42 | int req_size; /* desired size, in bytes */ | ||
43 | int min_size; /* minimum acceptable size (0 means req_size) */ | ||
44 | long size; /* size of data portion (excluding err_log_info) */ | ||
45 | long index; /* offset of data portion of partition */ | ||
46 | }; | ||
47 | |||
48 | static struct nvram_os_partition rtas_log_partition = { | ||
49 | .name = "ibm,rtas-log", | ||
50 | .req_size = 2079, | ||
51 | .min_size = 1055, | ||
52 | .index = -1 | ||
53 | }; | ||
54 | |||
55 | static struct nvram_os_partition oops_log_partition = { | ||
56 | .name = "lnx,oops-log", | ||
57 | .req_size = 4000, | ||
58 | .min_size = 2000, | ||
59 | .index = -1 | ||
60 | }; | ||
61 | |||
62 | static const char *pseries_nvram_os_partitions[] = { | ||
63 | "ibm,rtas-log", | ||
64 | "lnx,oops-log", | ||
65 | NULL | ||
66 | }; | ||
67 | |||
68 | static void oops_to_nvram(struct kmsg_dumper *dumper, | ||
69 | enum kmsg_dump_reason reason, | ||
70 | const char *old_msgs, unsigned long old_len, | ||
71 | const char *new_msgs, unsigned long new_len); | ||
72 | |||
73 | static struct kmsg_dumper nvram_kmsg_dumper = { | ||
74 | .dump = oops_to_nvram | ||
75 | }; | ||
76 | |||
77 | /* See clobbering_unread_rtas_event() */ | ||
78 | #define NVRAM_RTAS_READ_TIMEOUT 5 /* seconds */ | ||
79 | static unsigned long last_unread_rtas_event; /* timestamp */ | ||
80 | |||
81 | /* We preallocate oops_buf during init to avoid kmalloc during oops/panic. */ | ||
82 | static char *oops_buf; | ||
30 | 83 | ||
31 | static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) | 84 | static ssize_t pSeries_nvram_read(char *buf, size_t count, loff_t *index) |
32 | { | 85 | { |
@@ -119,6 +172,239 @@ static ssize_t pSeries_nvram_get_size(void) | |||
119 | return nvram_size ? nvram_size : -ENODEV; | 172 | return nvram_size ? nvram_size : -ENODEV; |
120 | } | 173 | } |
121 | 174 | ||
175 | |||
176 | /* nvram_write_os_partition, nvram_write_error_log | ||
177 | * | ||
178 | * We need to buffer the error logs into nvram to ensure that we have | ||
179 | * the failure information to decode. If we have a severe error there | ||
180 | * is no way to guarantee that the OS or the machine is in a state to | ||
181 | * get back to user land and write the error to disk. For example if | ||
182 | * the SCSI device driver causes a Machine Check by writing to a bad | ||
183 | * IO address, there is no way of guaranteeing that the device driver | ||
184 | * is in any state that is would also be able to write the error data | ||
185 | * captured to disk, thus we buffer it in NVRAM for analysis on the | ||
186 | * next boot. | ||
187 | * | ||
188 | * In NVRAM the partition containing the error log buffer will looks like: | ||
189 | * Header (in bytes): | ||
190 | * +-----------+----------+--------+------------+------------------+ | ||
191 | * | signature | checksum | length | name | data | | ||
192 | * |0 |1 |2 3|4 15|16 length-1| | ||
193 | * +-----------+----------+--------+------------+------------------+ | ||
194 | * | ||
195 | * The 'data' section would look like (in bytes): | ||
196 | * +--------------+------------+-----------------------------------+ | ||
197 | * | event_logged | sequence # | error log | | ||
198 | * |0 3|4 7|8 error_log_size-1| | ||
199 | * +--------------+------------+-----------------------------------+ | ||
200 | * | ||
201 | * event_logged: 0 if event has not been logged to syslog, 1 if it has | ||
202 | * sequence #: The unique sequence # for each event. (until it wraps) | ||
203 | * error log: The error log from event_scan | ||
204 | */ | ||
205 | int nvram_write_os_partition(struct nvram_os_partition *part, char * buff, | ||
206 | int length, unsigned int err_type, unsigned int error_log_cnt) | ||
207 | { | ||
208 | int rc; | ||
209 | loff_t tmp_index; | ||
210 | struct err_log_info info; | ||
211 | |||
212 | if (part->index == -1) { | ||
213 | return -ESPIPE; | ||
214 | } | ||
215 | |||
216 | if (length > part->size) { | ||
217 | length = part->size; | ||
218 | } | ||
219 | |||
220 | info.error_type = err_type; | ||
221 | info.seq_num = error_log_cnt; | ||
222 | |||
223 | tmp_index = part->index; | ||
224 | |||
225 | rc = ppc_md.nvram_write((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
226 | if (rc <= 0) { | ||
227 | pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); | ||
228 | return rc; | ||
229 | } | ||
230 | |||
231 | rc = ppc_md.nvram_write(buff, length, &tmp_index); | ||
232 | if (rc <= 0) { | ||
233 | pr_err("%s: Failed nvram_write (%d)\n", __FUNCTION__, rc); | ||
234 | return rc; | ||
235 | } | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | int nvram_write_error_log(char * buff, int length, | ||
241 | unsigned int err_type, unsigned int error_log_cnt) | ||
242 | { | ||
243 | int rc = nvram_write_os_partition(&rtas_log_partition, buff, length, | ||
244 | err_type, error_log_cnt); | ||
245 | if (!rc) | ||
246 | last_unread_rtas_event = get_seconds(); | ||
247 | return rc; | ||
248 | } | ||
249 | |||
250 | /* nvram_read_error_log | ||
251 | * | ||
252 | * Reads nvram for error log for at most 'length' | ||
253 | */ | ||
254 | int nvram_read_error_log(char * buff, int length, | ||
255 | unsigned int * err_type, unsigned int * error_log_cnt) | ||
256 | { | ||
257 | int rc; | ||
258 | loff_t tmp_index; | ||
259 | struct err_log_info info; | ||
260 | |||
261 | if (rtas_log_partition.index == -1) | ||
262 | return -1; | ||
263 | |||
264 | if (length > rtas_log_partition.size) | ||
265 | length = rtas_log_partition.size; | ||
266 | |||
267 | tmp_index = rtas_log_partition.index; | ||
268 | |||
269 | rc = ppc_md.nvram_read((char *)&info, sizeof(struct err_log_info), &tmp_index); | ||
270 | if (rc <= 0) { | ||
271 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
272 | return rc; | ||
273 | } | ||
274 | |||
275 | rc = ppc_md.nvram_read(buff, length, &tmp_index); | ||
276 | if (rc <= 0) { | ||
277 | printk(KERN_ERR "nvram_read_error_log: Failed nvram_read (%d)\n", rc); | ||
278 | return rc; | ||
279 | } | ||
280 | |||
281 | *error_log_cnt = info.seq_num; | ||
282 | *err_type = info.error_type; | ||
283 | |||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | /* This doesn't actually zero anything, but it sets the event_logged | ||
288 | * word to tell that this event is safely in syslog. | ||
289 | */ | ||
290 | int nvram_clear_error_log(void) | ||
291 | { | ||
292 | loff_t tmp_index; | ||
293 | int clear_word = ERR_FLAG_ALREADY_LOGGED; | ||
294 | int rc; | ||
295 | |||
296 | if (rtas_log_partition.index == -1) | ||
297 | return -1; | ||
298 | |||
299 | tmp_index = rtas_log_partition.index; | ||
300 | |||
301 | rc = ppc_md.nvram_write((char *)&clear_word, sizeof(int), &tmp_index); | ||
302 | if (rc <= 0) { | ||
303 | printk(KERN_ERR "nvram_clear_error_log: Failed nvram_write (%d)\n", rc); | ||
304 | return rc; | ||
305 | } | ||
306 | last_unread_rtas_event = 0; | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | /* pseries_nvram_init_os_partition | ||
312 | * | ||
313 | * This sets up a partition with an "OS" signature. | ||
314 | * | ||
315 | * The general strategy is the following: | ||
316 | * 1.) If a partition with the indicated name already exists... | ||
317 | * - If it's large enough, use it. | ||
318 | * - Otherwise, recycle it and keep going. | ||
319 | * 2.) Search for a free partition that is large enough. | ||
320 | * 3.) If there's not a free partition large enough, recycle any obsolete | ||
321 | * OS partitions and try again. | ||
322 | * 4.) Will first try getting a chunk that will satisfy the requested size. | ||
323 | * 5.) If a chunk of the requested size cannot be allocated, then try finding | ||
324 | * a chunk that will satisfy the minum needed. | ||
325 | * | ||
326 | * Returns 0 on success, else -1. | ||
327 | */ | ||
328 | static int __init pseries_nvram_init_os_partition(struct nvram_os_partition | ||
329 | *part) | ||
330 | { | ||
331 | loff_t p; | ||
332 | int size; | ||
333 | |||
334 | /* Scan nvram for partitions */ | ||
335 | nvram_scan_partitions(); | ||
336 | |||
337 | /* Look for ours */ | ||
338 | p = nvram_find_partition(part->name, NVRAM_SIG_OS, &size); | ||
339 | |||
340 | /* Found one but too small, remove it */ | ||
341 | if (p && size < part->min_size) { | ||
342 | pr_info("nvram: Found too small %s partition," | ||
343 | " removing it...\n", part->name); | ||
344 | nvram_remove_partition(part->name, NVRAM_SIG_OS, NULL); | ||
345 | p = 0; | ||
346 | } | ||
347 | |||
348 | /* Create one if we didn't find */ | ||
349 | if (!p) { | ||
350 | p = nvram_create_partition(part->name, NVRAM_SIG_OS, | ||
351 | part->req_size, part->min_size); | ||
352 | if (p == -ENOSPC) { | ||
353 | pr_info("nvram: No room to create %s partition, " | ||
354 | "deleting any obsolete OS partitions...\n", | ||
355 | part->name); | ||
356 | nvram_remove_partition(NULL, NVRAM_SIG_OS, | ||
357 | pseries_nvram_os_partitions); | ||
358 | p = nvram_create_partition(part->name, NVRAM_SIG_OS, | ||
359 | part->req_size, part->min_size); | ||
360 | } | ||
361 | } | ||
362 | |||
363 | if (p <= 0) { | ||
364 | pr_err("nvram: Failed to find or create %s" | ||
365 | " partition, err %d\n", part->name, (int)p); | ||
366 | return -1; | ||
367 | } | ||
368 | |||
369 | part->index = p; | ||
370 | part->size = nvram_get_partition_size(p) - sizeof(struct err_log_info); | ||
371 | |||
372 | return 0; | ||
373 | } | ||
374 | |||
375 | static void __init nvram_init_oops_partition(int rtas_partition_exists) | ||
376 | { | ||
377 | int rc; | ||
378 | |||
379 | rc = pseries_nvram_init_os_partition(&oops_log_partition); | ||
380 | if (rc != 0) { | ||
381 | if (!rtas_partition_exists) | ||
382 | return; | ||
383 | pr_notice("nvram: Using %s partition to log both" | ||
384 | " RTAS errors and oops/panic reports\n", | ||
385 | rtas_log_partition.name); | ||
386 | memcpy(&oops_log_partition, &rtas_log_partition, | ||
387 | sizeof(rtas_log_partition)); | ||
388 | } | ||
389 | oops_buf = kmalloc(oops_log_partition.size, GFP_KERNEL); | ||
390 | rc = kmsg_dump_register(&nvram_kmsg_dumper); | ||
391 | if (rc != 0) { | ||
392 | pr_err("nvram: kmsg_dump_register() failed; returned %d\n", rc); | ||
393 | kfree(oops_buf); | ||
394 | return; | ||
395 | } | ||
396 | } | ||
397 | |||
398 | static int __init pseries_nvram_init_log_partitions(void) | ||
399 | { | ||
400 | int rc; | ||
401 | |||
402 | rc = pseries_nvram_init_os_partition(&rtas_log_partition); | ||
403 | nvram_init_oops_partition(rc == 0); | ||
404 | return 0; | ||
405 | } | ||
406 | machine_arch_initcall(pseries, pseries_nvram_init_log_partitions); | ||
407 | |||
122 | int __init pSeries_nvram_init(void) | 408 | int __init pSeries_nvram_init(void) |
123 | { | 409 | { |
124 | struct device_node *nvram; | 410 | struct device_node *nvram; |
@@ -148,3 +434,83 @@ int __init pSeries_nvram_init(void) | |||
148 | 434 | ||
149 | return 0; | 435 | return 0; |
150 | } | 436 | } |
437 | |||
438 | /* | ||
439 | * Try to capture the last capture_len bytes of the printk buffer. Return | ||
440 | * the amount actually captured. | ||
441 | */ | ||
442 | static size_t capture_last_msgs(const char *old_msgs, size_t old_len, | ||
443 | const char *new_msgs, size_t new_len, | ||
444 | char *captured, size_t capture_len) | ||
445 | { | ||
446 | if (new_len >= capture_len) { | ||
447 | memcpy(captured, new_msgs + (new_len - capture_len), | ||
448 | capture_len); | ||
449 | return capture_len; | ||
450 | } else { | ||
451 | /* Grab the end of old_msgs. */ | ||
452 | size_t old_tail_len = min(old_len, capture_len - new_len); | ||
453 | memcpy(captured, old_msgs + (old_len - old_tail_len), | ||
454 | old_tail_len); | ||
455 | memcpy(captured + old_tail_len, new_msgs, new_len); | ||
456 | return old_tail_len + new_len; | ||
457 | } | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * Are we using the ibm,rtas-log for oops/panic reports? And if so, | ||
462 | * would logging this oops/panic overwrite an RTAS event that rtas_errd | ||
463 | * hasn't had a chance to read and process? Return 1 if so, else 0. | ||
464 | * | ||
465 | * We assume that if rtas_errd hasn't read the RTAS event in | ||
466 | * NVRAM_RTAS_READ_TIMEOUT seconds, it's probably not going to. | ||
467 | */ | ||
468 | static int clobbering_unread_rtas_event(void) | ||
469 | { | ||
470 | return (oops_log_partition.index == rtas_log_partition.index | ||
471 | && last_unread_rtas_event | ||
472 | && get_seconds() - last_unread_rtas_event <= | ||
473 | NVRAM_RTAS_READ_TIMEOUT); | ||
474 | } | ||
475 | |||
476 | /* our kmsg_dump callback */ | ||
477 | static void oops_to_nvram(struct kmsg_dumper *dumper, | ||
478 | enum kmsg_dump_reason reason, | ||
479 | const char *old_msgs, unsigned long old_len, | ||
480 | const char *new_msgs, unsigned long new_len) | ||
481 | { | ||
482 | static unsigned int oops_count = 0; | ||
483 | static bool panicking = false; | ||
484 | size_t text_len; | ||
485 | |||
486 | switch (reason) { | ||
487 | case KMSG_DUMP_RESTART: | ||
488 | case KMSG_DUMP_HALT: | ||
489 | case KMSG_DUMP_POWEROFF: | ||
490 | /* These are almost always orderly shutdowns. */ | ||
491 | return; | ||
492 | case KMSG_DUMP_OOPS: | ||
493 | case KMSG_DUMP_KEXEC: | ||
494 | break; | ||
495 | case KMSG_DUMP_PANIC: | ||
496 | panicking = true; | ||
497 | break; | ||
498 | case KMSG_DUMP_EMERG: | ||
499 | if (panicking) | ||
500 | /* Panic report already captured. */ | ||
501 | return; | ||
502 | break; | ||
503 | default: | ||
504 | pr_err("%s: ignoring unrecognized KMSG_DUMP_* reason %d\n", | ||
505 | __FUNCTION__, (int) reason); | ||
506 | return; | ||
507 | } | ||
508 | |||
509 | if (clobbering_unread_rtas_event()) | ||
510 | return; | ||
511 | |||
512 | text_len = capture_last_msgs(old_msgs, old_len, new_msgs, new_len, | ||
513 | oops_buf, oops_log_partition.size); | ||
514 | (void) nvram_write_os_partition(&oops_log_partition, oops_buf, | ||
515 | (int) text_len, ERR_TYPE_KERNEL_PANIC, ++oops_count); | ||
516 | } | ||
diff --git a/arch/powerpc/platforms/pseries/offline_states.h b/arch/powerpc/platforms/pseries/offline_states.h index 75a6f480d931..08672d9136ab 100644 --- a/arch/powerpc/platforms/pseries/offline_states.h +++ b/arch/powerpc/platforms/pseries/offline_states.h | |||
@@ -34,6 +34,4 @@ static inline void set_default_offline_state(int cpu) | |||
34 | #endif | 34 | #endif |
35 | 35 | ||
36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); | 36 | extern enum cpu_state_vals get_preferred_offline_state(int cpu); |
37 | extern int start_secondary(void); | ||
38 | extern void start_secondary_resume(void); | ||
39 | #endif | 37 | #endif |
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c index 4b7a062dee15..3bf4488aaec6 100644 --- a/arch/powerpc/platforms/pseries/pci_dlpar.c +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c | |||
@@ -25,8 +25,6 @@ | |||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #undef DEBUG | ||
29 | |||
30 | #include <linux/pci.h> | 28 | #include <linux/pci.h> |
31 | #include <asm/pci-bridge.h> | 29 | #include <asm/pci-bridge.h> |
32 | #include <asm/ppc-pci.h> | 30 | #include <asm/ppc-pci.h> |
@@ -151,7 +149,7 @@ struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) | |||
151 | if (dn->child) | 149 | if (dn->child) |
152 | eeh_add_device_tree_early(dn); | 150 | eeh_add_device_tree_early(dn); |
153 | 151 | ||
154 | pcibios_scan_phb(phb, dn); | 152 | pcibios_scan_phb(phb); |
155 | pcibios_finish_adding_to_bus(phb->bus); | 153 | pcibios_finish_adding_to_bus(phb->bus); |
156 | 154 | ||
157 | return phb; | 155 | return phb; |
diff --git a/arch/powerpc/platforms/pseries/plpar_wrappers.h b/arch/powerpc/platforms/pseries/plpar_wrappers.h index d9801117124b..4bf21207d7d3 100644 --- a/arch/powerpc/platforms/pseries/plpar_wrappers.h +++ b/arch/powerpc/platforms/pseries/plpar_wrappers.h | |||
@@ -270,31 +270,4 @@ static inline long plpar_put_term_char(unsigned long termno, unsigned long len, | |||
270 | lbuf[1]); | 270 | lbuf[1]); |
271 | } | 271 | } |
272 | 272 | ||
273 | static inline long plpar_eoi(unsigned long xirr) | ||
274 | { | ||
275 | return plpar_hcall_norets(H_EOI, xirr); | ||
276 | } | ||
277 | |||
278 | static inline long plpar_cppr(unsigned long cppr) | ||
279 | { | ||
280 | return plpar_hcall_norets(H_CPPR, cppr); | ||
281 | } | ||
282 | |||
283 | static inline long plpar_ipi(unsigned long servernum, unsigned long mfrr) | ||
284 | { | ||
285 | return plpar_hcall_norets(H_IPI, servernum, mfrr); | ||
286 | } | ||
287 | |||
288 | static inline long plpar_xirr(unsigned long *xirr_ret, unsigned char cppr) | ||
289 | { | ||
290 | long rc; | ||
291 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
292 | |||
293 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | ||
294 | |||
295 | *xirr_ret = retbuf[0]; | ||
296 | |||
297 | return rc; | ||
298 | } | ||
299 | |||
300 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ | 273 | #endif /* _PSERIES_PLPAR_WRAPPERS_H */ |
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h index 40c93cad91d2..e9f6d2859c3c 100644 --- a/arch/powerpc/platforms/pseries/pseries.h +++ b/arch/powerpc/platforms/pseries/pseries.h | |||
@@ -17,6 +17,8 @@ struct device_node; | |||
17 | extern void request_event_sources_irqs(struct device_node *np, | 17 | extern void request_event_sources_irqs(struct device_node *np, |
18 | irq_handler_t handler, const char *name); | 18 | irq_handler_t handler, const char *name); |
19 | 19 | ||
20 | #include <linux/of.h> | ||
21 | |||
20 | extern void __init fw_feature_init(const char *hypertas, unsigned long len); | 22 | extern void __init fw_feature_init(const char *hypertas, unsigned long len); |
21 | 23 | ||
22 | struct pt_regs; | 24 | struct pt_regs; |
@@ -47,4 +49,11 @@ extern unsigned long rtas_poweron_auto; | |||
47 | 49 | ||
48 | extern void find_udbg_vterm(void); | 50 | extern void find_udbg_vterm(void); |
49 | 51 | ||
52 | /* Dynamic logical Partitioning/Mobility */ | ||
53 | extern void dlpar_free_cc_nodes(struct device_node *); | ||
54 | extern void dlpar_free_cc_property(struct property *); | ||
55 | extern struct device_node *dlpar_configure_connector(u32); | ||
56 | extern int dlpar_attach_node(struct device_node *); | ||
57 | extern int dlpar_detach_node(struct device_node *); | ||
58 | |||
50 | #endif /* _PSERIES_PSERIES_H */ | 59 | #endif /* _PSERIES_PSERIES_H */ |
diff --git a/arch/powerpc/platforms/pseries/pseries_energy.c b/arch/powerpc/platforms/pseries/pseries_energy.c new file mode 100644 index 000000000000..c8b3c69fe891 --- /dev/null +++ b/arch/powerpc/platforms/pseries/pseries_energy.c | |||
@@ -0,0 +1,326 @@ | |||
1 | /* | ||
2 | * POWER platform energy management driver | ||
3 | * Copyright (C) 2010 IBM Corporation | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public License | ||
7 | * version 2 as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This pseries platform device driver provides access to | ||
10 | * platform energy management capabilities. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/sysdev.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <asm/cputhreads.h> | ||
22 | #include <asm/page.h> | ||
23 | #include <asm/hvcall.h> | ||
24 | |||
25 | |||
26 | #define MODULE_VERS "1.0" | ||
27 | #define MODULE_NAME "pseries_energy" | ||
28 | |||
29 | /* Driver flags */ | ||
30 | |||
31 | static int sysfs_entries; | ||
32 | |||
33 | /* Helper routines */ | ||
34 | |||
35 | /* | ||
36 | * Routine to detect firmware support for hcall | ||
37 | * return 1 if H_BEST_ENERGY is supported | ||
38 | * else return 0 | ||
39 | */ | ||
40 | |||
41 | static int check_for_h_best_energy(void) | ||
42 | { | ||
43 | struct device_node *rtas = NULL; | ||
44 | const char *hypertas, *s; | ||
45 | int length; | ||
46 | int rc = 0; | ||
47 | |||
48 | rtas = of_find_node_by_path("/rtas"); | ||
49 | if (!rtas) | ||
50 | return 0; | ||
51 | |||
52 | hypertas = of_get_property(rtas, "ibm,hypertas-functions", &length); | ||
53 | if (!hypertas) { | ||
54 | of_node_put(rtas); | ||
55 | return 0; | ||
56 | } | ||
57 | |||
58 | /* hypertas will have list of strings with hcall names */ | ||
59 | for (s = hypertas; s < hypertas + length; s += strlen(s) + 1) { | ||
60 | if (!strncmp("hcall-best-energy-1", s, 19)) { | ||
61 | rc = 1; /* Found the string */ | ||
62 | break; | ||
63 | } | ||
64 | } | ||
65 | of_node_put(rtas); | ||
66 | return rc; | ||
67 | } | ||
68 | |||
69 | /* Helper Routines to convert between drc_index to cpu numbers */ | ||
70 | |||
71 | static u32 cpu_to_drc_index(int cpu) | ||
72 | { | ||
73 | struct device_node *dn = NULL; | ||
74 | const int *indexes; | ||
75 | int i; | ||
76 | int rc = 1; | ||
77 | u32 ret = 0; | ||
78 | |||
79 | dn = of_find_node_by_path("/cpus"); | ||
80 | if (dn == NULL) | ||
81 | goto err; | ||
82 | indexes = of_get_property(dn, "ibm,drc-indexes", NULL); | ||
83 | if (indexes == NULL) | ||
84 | goto err_of_node_put; | ||
85 | /* Convert logical cpu number to core number */ | ||
86 | i = cpu_core_index_of_thread(cpu); | ||
87 | /* | ||
88 | * The first element indexes[0] is the number of drc_indexes | ||
89 | * returned in the list. Hence i+1 will get the drc_index | ||
90 | * corresponding to core number i. | ||
91 | */ | ||
92 | WARN_ON(i > indexes[0]); | ||
93 | ret = indexes[i + 1]; | ||
94 | rc = 0; | ||
95 | |||
96 | err_of_node_put: | ||
97 | of_node_put(dn); | ||
98 | err: | ||
99 | if (rc) | ||
100 | printk(KERN_WARNING "cpu_to_drc_index(%d) failed", cpu); | ||
101 | return ret; | ||
102 | } | ||
103 | |||
104 | static int drc_index_to_cpu(u32 drc_index) | ||
105 | { | ||
106 | struct device_node *dn = NULL; | ||
107 | const int *indexes; | ||
108 | int i, cpu = 0; | ||
109 | int rc = 1; | ||
110 | |||
111 | dn = of_find_node_by_path("/cpus"); | ||
112 | if (dn == NULL) | ||
113 | goto err; | ||
114 | indexes = of_get_property(dn, "ibm,drc-indexes", NULL); | ||
115 | if (indexes == NULL) | ||
116 | goto err_of_node_put; | ||
117 | /* | ||
118 | * First element in the array is the number of drc_indexes | ||
119 | * returned. Search through the list to find the matching | ||
120 | * drc_index and get the core number | ||
121 | */ | ||
122 | for (i = 0; i < indexes[0]; i++) { | ||
123 | if (indexes[i + 1] == drc_index) | ||
124 | break; | ||
125 | } | ||
126 | /* Convert core number to logical cpu number */ | ||
127 | cpu = cpu_first_thread_of_core(i); | ||
128 | rc = 0; | ||
129 | |||
130 | err_of_node_put: | ||
131 | of_node_put(dn); | ||
132 | err: | ||
133 | if (rc) | ||
134 | printk(KERN_WARNING "drc_index_to_cpu(%d) failed", drc_index); | ||
135 | return cpu; | ||
136 | } | ||
137 | |||
138 | /* | ||
139 | * pseries hypervisor call H_BEST_ENERGY provides hints to OS on | ||
140 | * preferred logical cpus to activate or deactivate for optimized | ||
141 | * energy consumption. | ||
142 | */ | ||
143 | |||
144 | #define FLAGS_MODE1 0x004E200000080E01 | ||
145 | #define FLAGS_MODE2 0x004E200000080401 | ||
146 | #define FLAGS_ACTIVATE 0x100 | ||
147 | |||
148 | static ssize_t get_best_energy_list(char *page, int activate) | ||
149 | { | ||
150 | int rc, cnt, i, cpu; | ||
151 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
152 | unsigned long flags = 0; | ||
153 | u32 *buf_page; | ||
154 | char *s = page; | ||
155 | |||
156 | buf_page = (u32 *) get_zeroed_page(GFP_KERNEL); | ||
157 | if (!buf_page) | ||
158 | return -ENOMEM; | ||
159 | |||
160 | flags = FLAGS_MODE1; | ||
161 | if (activate) | ||
162 | flags |= FLAGS_ACTIVATE; | ||
163 | |||
164 | rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, 0, __pa(buf_page), | ||
165 | 0, 0, 0, 0, 0, 0); | ||
166 | if (rc != H_SUCCESS) { | ||
167 | free_page((unsigned long) buf_page); | ||
168 | return -EINVAL; | ||
169 | } | ||
170 | |||
171 | cnt = retbuf[0]; | ||
172 | for (i = 0; i < cnt; i++) { | ||
173 | cpu = drc_index_to_cpu(buf_page[2*i+1]); | ||
174 | if ((cpu_online(cpu) && !activate) || | ||
175 | (!cpu_online(cpu) && activate)) | ||
176 | s += sprintf(s, "%d,", cpu); | ||
177 | } | ||
178 | if (s > page) { /* Something to show */ | ||
179 | s--; /* Suppress last comma */ | ||
180 | s += sprintf(s, "\n"); | ||
181 | } | ||
182 | |||
183 | free_page((unsigned long) buf_page); | ||
184 | return s-page; | ||
185 | } | ||
186 | |||
187 | static ssize_t get_best_energy_data(struct sys_device *dev, | ||
188 | char *page, int activate) | ||
189 | { | ||
190 | int rc; | ||
191 | unsigned long retbuf[PLPAR_HCALL9_BUFSIZE]; | ||
192 | unsigned long flags = 0; | ||
193 | |||
194 | flags = FLAGS_MODE2; | ||
195 | if (activate) | ||
196 | flags |= FLAGS_ACTIVATE; | ||
197 | |||
198 | rc = plpar_hcall9(H_BEST_ENERGY, retbuf, flags, | ||
199 | cpu_to_drc_index(dev->id), | ||
200 | 0, 0, 0, 0, 0, 0, 0); | ||
201 | |||
202 | if (rc != H_SUCCESS) | ||
203 | return -EINVAL; | ||
204 | |||
205 | return sprintf(page, "%lu\n", retbuf[1] >> 32); | ||
206 | } | ||
207 | |||
208 | /* Wrapper functions */ | ||
209 | |||
210 | static ssize_t cpu_activate_hint_list_show(struct sysdev_class *class, | ||
211 | struct sysdev_class_attribute *attr, char *page) | ||
212 | { | ||
213 | return get_best_energy_list(page, 1); | ||
214 | } | ||
215 | |||
216 | static ssize_t cpu_deactivate_hint_list_show(struct sysdev_class *class, | ||
217 | struct sysdev_class_attribute *attr, char *page) | ||
218 | { | ||
219 | return get_best_energy_list(page, 0); | ||
220 | } | ||
221 | |||
222 | static ssize_t percpu_activate_hint_show(struct sys_device *dev, | ||
223 | struct sysdev_attribute *attr, char *page) | ||
224 | { | ||
225 | return get_best_energy_data(dev, page, 1); | ||
226 | } | ||
227 | |||
228 | static ssize_t percpu_deactivate_hint_show(struct sys_device *dev, | ||
229 | struct sysdev_attribute *attr, char *page) | ||
230 | { | ||
231 | return get_best_energy_data(dev, page, 0); | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Create sysfs interface: | ||
236 | * /sys/devices/system/cpu/pseries_activate_hint_list | ||
237 | * /sys/devices/system/cpu/pseries_deactivate_hint_list | ||
238 | * Comma separated list of cpus to activate or deactivate | ||
239 | * /sys/devices/system/cpu/cpuN/pseries_activate_hint | ||
240 | * /sys/devices/system/cpu/cpuN/pseries_deactivate_hint | ||
241 | * Per-cpu value of the hint | ||
242 | */ | ||
243 | |||
244 | struct sysdev_class_attribute attr_cpu_activate_hint_list = | ||
245 | _SYSDEV_CLASS_ATTR(pseries_activate_hint_list, 0444, | ||
246 | cpu_activate_hint_list_show, NULL); | ||
247 | |||
248 | struct sysdev_class_attribute attr_cpu_deactivate_hint_list = | ||
249 | _SYSDEV_CLASS_ATTR(pseries_deactivate_hint_list, 0444, | ||
250 | cpu_deactivate_hint_list_show, NULL); | ||
251 | |||
252 | struct sysdev_attribute attr_percpu_activate_hint = | ||
253 | _SYSDEV_ATTR(pseries_activate_hint, 0444, | ||
254 | percpu_activate_hint_show, NULL); | ||
255 | |||
256 | struct sysdev_attribute attr_percpu_deactivate_hint = | ||
257 | _SYSDEV_ATTR(pseries_deactivate_hint, 0444, | ||
258 | percpu_deactivate_hint_show, NULL); | ||
259 | |||
260 | static int __init pseries_energy_init(void) | ||
261 | { | ||
262 | int cpu, err; | ||
263 | struct sys_device *cpu_sys_dev; | ||
264 | |||
265 | if (!check_for_h_best_energy()) { | ||
266 | printk(KERN_INFO "Hypercall H_BEST_ENERGY not supported\n"); | ||
267 | return 0; | ||
268 | } | ||
269 | /* Create the sysfs files */ | ||
270 | err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
271 | &attr_cpu_activate_hint_list.attr); | ||
272 | if (!err) | ||
273 | err = sysfs_create_file(&cpu_sysdev_class.kset.kobj, | ||
274 | &attr_cpu_deactivate_hint_list.attr); | ||
275 | |||
276 | if (err) | ||
277 | return err; | ||
278 | for_each_possible_cpu(cpu) { | ||
279 | cpu_sys_dev = get_cpu_sysdev(cpu); | ||
280 | err = sysfs_create_file(&cpu_sys_dev->kobj, | ||
281 | &attr_percpu_activate_hint.attr); | ||
282 | if (err) | ||
283 | break; | ||
284 | err = sysfs_create_file(&cpu_sys_dev->kobj, | ||
285 | &attr_percpu_deactivate_hint.attr); | ||
286 | if (err) | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | if (err) | ||
291 | return err; | ||
292 | |||
293 | sysfs_entries = 1; /* Removed entries on cleanup */ | ||
294 | return 0; | ||
295 | |||
296 | } | ||
297 | |||
298 | static void __exit pseries_energy_cleanup(void) | ||
299 | { | ||
300 | int cpu; | ||
301 | struct sys_device *cpu_sys_dev; | ||
302 | |||
303 | if (!sysfs_entries) | ||
304 | return; | ||
305 | |||
306 | /* Remove the sysfs files */ | ||
307 | sysfs_remove_file(&cpu_sysdev_class.kset.kobj, | ||
308 | &attr_cpu_activate_hint_list.attr); | ||
309 | |||
310 | sysfs_remove_file(&cpu_sysdev_class.kset.kobj, | ||
311 | &attr_cpu_deactivate_hint_list.attr); | ||
312 | |||
313 | for_each_possible_cpu(cpu) { | ||
314 | cpu_sys_dev = get_cpu_sysdev(cpu); | ||
315 | sysfs_remove_file(&cpu_sys_dev->kobj, | ||
316 | &attr_percpu_activate_hint.attr); | ||
317 | sysfs_remove_file(&cpu_sys_dev->kobj, | ||
318 | &attr_percpu_deactivate_hint.attr); | ||
319 | } | ||
320 | } | ||
321 | |||
322 | module_init(pseries_energy_init); | ||
323 | module_exit(pseries_energy_cleanup); | ||
324 | MODULE_DESCRIPTION("Driver for pSeries platform energy management"); | ||
325 | MODULE_AUTHOR("Vaidyanathan Srinivasan"); | ||
326 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index a4fc6da87c2e..086d2ae4e06a 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -54,7 +54,8 @@ | |||
54 | static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; | 54 | static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; |
55 | static DEFINE_SPINLOCK(ras_log_buf_lock); | 55 | static DEFINE_SPINLOCK(ras_log_buf_lock); |
56 | 56 | ||
57 | static char mce_data_buf[RTAS_ERROR_LOG_MAX]; | 57 | static char global_mce_data_buf[RTAS_ERROR_LOG_MAX]; |
58 | static DEFINE_PER_CPU(__u64, mce_data_buf); | ||
58 | 59 | ||
59 | static int ras_get_sensor_state_token; | 60 | static int ras_get_sensor_state_token; |
60 | static int ras_check_exception_token; | 61 | static int ras_check_exception_token; |
@@ -121,7 +122,7 @@ static irqreturn_t ras_epow_interrupt(int irq, void *dev_id) | |||
121 | 122 | ||
122 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 123 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
123 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | 124 | RTAS_VECTOR_EXTERNAL_INTERRUPT, |
124 | irq_map[irq].hwirq, | 125 | virq_to_hw(irq), |
125 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, | 126 | RTAS_EPOW_WARNING | RTAS_POWERMGM_EVENTS, |
126 | critical, __pa(&ras_log_buf), | 127 | critical, __pa(&ras_log_buf), |
127 | rtas_get_error_log_max()); | 128 | rtas_get_error_log_max()); |
@@ -156,7 +157,7 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
156 | 157 | ||
157 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, | 158 | status = rtas_call(ras_check_exception_token, 6, 1, NULL, |
158 | RTAS_VECTOR_EXTERNAL_INTERRUPT, | 159 | RTAS_VECTOR_EXTERNAL_INTERRUPT, |
159 | irq_map[irq].hwirq, | 160 | virq_to_hw(irq), |
160 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, | 161 | RTAS_INTERNAL_ERROR, 1 /*Time Critical */, |
161 | __pa(&ras_log_buf), | 162 | __pa(&ras_log_buf), |
162 | rtas_get_error_log_max()); | 163 | rtas_get_error_log_max()); |
@@ -196,12 +197,24 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
196 | return IRQ_HANDLED; | 197 | return IRQ_HANDLED; |
197 | } | 198 | } |
198 | 199 | ||
199 | /* Get the error information for errors coming through the | 200 | /* |
201 | * Some versions of FWNMI place the buffer inside the 4kB page starting at | ||
202 | * 0x7000. Other versions place it inside the rtas buffer. We check both. | ||
203 | */ | ||
204 | #define VALID_FWNMI_BUFFER(A) \ | ||
205 | ((((A) >= 0x7000) && ((A) < 0x7ff0)) || \ | ||
206 | (((A) >= rtas.base) && ((A) < (rtas.base + rtas.size - 16)))) | ||
207 | |||
208 | /* | ||
209 | * Get the error information for errors coming through the | ||
200 | * FWNMI vectors. The pt_regs' r3 will be updated to reflect | 210 | * FWNMI vectors. The pt_regs' r3 will be updated to reflect |
201 | * the actual r3 if possible, and a ptr to the error log entry | 211 | * the actual r3 if possible, and a ptr to the error log entry |
202 | * will be returned if found. | 212 | * will be returned if found. |
203 | * | 213 | * |
204 | * The mce_data_buf does not have any locks or protection around it, | 214 | * If the RTAS error is not of the extended type, then we put it in a per |
215 | * cpu 64bit buffer. If it is the extended type we use global_mce_data_buf. | ||
216 | * | ||
217 | * The global_mce_data_buf does not have any locks or protection around it, | ||
205 | * if a second machine check comes in, or a system reset is done | 218 | * if a second machine check comes in, or a system reset is done |
206 | * before we have logged the error, then we will get corruption in the | 219 | * before we have logged the error, then we will get corruption in the |
207 | * error log. This is preferable over holding off on calling | 220 | * error log. This is preferable over holding off on calling |
@@ -210,20 +223,31 @@ static irqreturn_t ras_error_interrupt(int irq, void *dev_id) | |||
210 | */ | 223 | */ |
211 | static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) | 224 | static struct rtas_error_log *fwnmi_get_errinfo(struct pt_regs *regs) |
212 | { | 225 | { |
213 | unsigned long errdata = regs->gpr[3]; | ||
214 | struct rtas_error_log *errhdr = NULL; | ||
215 | unsigned long *savep; | 226 | unsigned long *savep; |
227 | struct rtas_error_log *h, *errhdr = NULL; | ||
228 | |||
229 | if (!VALID_FWNMI_BUFFER(regs->gpr[3])) { | ||
230 | printk(KERN_ERR "FWNMI: corrupt r3 0x%016lx\n", regs->gpr[3]); | ||
231 | return NULL; | ||
232 | } | ||
216 | 233 | ||
217 | if ((errdata >= 0x7000 && errdata < 0x7fff0) || | 234 | savep = __va(regs->gpr[3]); |
218 | (errdata >= rtas.base && errdata < rtas.base + rtas.size - 16)) { | 235 | regs->gpr[3] = savep[0]; /* restore original r3 */ |
219 | savep = __va(errdata); | 236 | |
220 | regs->gpr[3] = savep[0]; /* restore original r3 */ | 237 | /* If it isn't an extended log we can use the per cpu 64bit buffer */ |
221 | memset(mce_data_buf, 0, RTAS_ERROR_LOG_MAX); | 238 | h = (struct rtas_error_log *)&savep[1]; |
222 | memcpy(mce_data_buf, (char *)(savep + 1), RTAS_ERROR_LOG_MAX); | 239 | if (!h->extended) { |
223 | errhdr = (struct rtas_error_log *)mce_data_buf; | 240 | memcpy(&__get_cpu_var(mce_data_buf), h, sizeof(__u64)); |
241 | errhdr = (struct rtas_error_log *)&__get_cpu_var(mce_data_buf); | ||
224 | } else { | 242 | } else { |
225 | printk("FWNMI: corrupt r3\n"); | 243 | int len; |
244 | |||
245 | len = max_t(int, 8+h->extended_log_length, RTAS_ERROR_LOG_MAX); | ||
246 | memset(global_mce_data_buf, 0, RTAS_ERROR_LOG_MAX); | ||
247 | memcpy(global_mce_data_buf, h, len); | ||
248 | errhdr = (struct rtas_error_log *)global_mce_data_buf; | ||
226 | } | 249 | } |
250 | |||
227 | return errhdr; | 251 | return errhdr; |
228 | } | 252 | } |
229 | 253 | ||
@@ -235,7 +259,7 @@ static void fwnmi_release_errinfo(void) | |||
235 | { | 259 | { |
236 | int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL); | 260 | int ret = rtas_call(rtas_token("ibm,nmi-interlock"), 0, 1, NULL); |
237 | if (ret != 0) | 261 | if (ret != 0) |
238 | printk("FWNMI: nmi-interlock failed: %d\n", ret); | 262 | printk(KERN_ERR "FWNMI: nmi-interlock failed: %d\n", ret); |
239 | } | 263 | } |
240 | 264 | ||
241 | int pSeries_system_reset_exception(struct pt_regs *regs) | 265 | int pSeries_system_reset_exception(struct pt_regs *regs) |
@@ -259,31 +283,43 @@ int pSeries_system_reset_exception(struct pt_regs *regs) | |||
259 | * Return 1 if corrected (or delivered a signal). | 283 | * Return 1 if corrected (or delivered a signal). |
260 | * Return 0 if there is nothing we can do. | 284 | * Return 0 if there is nothing we can do. |
261 | */ | 285 | */ |
262 | static int recover_mce(struct pt_regs *regs, struct rtas_error_log * err) | 286 | static int recover_mce(struct pt_regs *regs, struct rtas_error_log *err) |
263 | { | 287 | { |
264 | int nonfatal = 0; | 288 | int recovered = 0; |
265 | 289 | ||
266 | if (err->disposition == RTAS_DISP_FULLY_RECOVERED) { | 290 | if (!(regs->msr & MSR_RI)) { |
291 | /* If MSR_RI isn't set, we cannot recover */ | ||
292 | recovered = 0; | ||
293 | |||
294 | } else if (err->disposition == RTAS_DISP_FULLY_RECOVERED) { | ||
267 | /* Platform corrected itself */ | 295 | /* Platform corrected itself */ |
268 | nonfatal = 1; | 296 | recovered = 1; |
269 | } else if ((regs->msr & MSR_RI) && | 297 | |
270 | user_mode(regs) && | 298 | } else if (err->disposition == RTAS_DISP_LIMITED_RECOVERY) { |
271 | err->severity == RTAS_SEVERITY_ERROR_SYNC && | 299 | /* Platform corrected itself but could be degraded */ |
272 | err->disposition == RTAS_DISP_NOT_RECOVERED && | 300 | printk(KERN_ERR "MCE: limited recovery, system may " |
273 | err->target == RTAS_TARGET_MEMORY && | 301 | "be degraded\n"); |
274 | err->type == RTAS_TYPE_ECC_UNCORR && | 302 | recovered = 1; |
275 | !(current->pid == 0 || is_global_init(current))) { | 303 | |
276 | /* Kill off a user process with an ECC error */ | 304 | } else if (user_mode(regs) && !is_global_init(current) && |
277 | printk(KERN_ERR "MCE: uncorrectable ecc error for pid %d\n", | 305 | err->severity == RTAS_SEVERITY_ERROR_SYNC) { |
278 | current->pid); | 306 | |
279 | /* XXX something better for ECC error? */ | 307 | /* |
280 | _exception(SIGBUS, regs, BUS_ADRERR, regs->nip); | 308 | * If we received a synchronous error when in userspace |
281 | nonfatal = 1; | 309 | * kill the task. Firmware may report details of the fail |
310 | * asynchronously, so we can't rely on the target and type | ||
311 | * fields being valid here. | ||
312 | */ | ||
313 | printk(KERN_ERR "MCE: uncorrectable error, killing task " | ||
314 | "%s:%d\n", current->comm, current->pid); | ||
315 | |||
316 | _exception(SIGBUS, regs, BUS_MCEERR_AR, regs->nip); | ||
317 | recovered = 1; | ||
282 | } | 318 | } |
283 | 319 | ||
284 | log_error((char *)err, ERR_TYPE_RTAS_LOG, !nonfatal); | 320 | log_error((char *)err, ERR_TYPE_RTAS_LOG, 0); |
285 | 321 | ||
286 | return nonfatal; | 322 | return recovered; |
287 | } | 323 | } |
288 | 324 | ||
289 | /* | 325 | /* |
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c index 57ddbb43b33a..1de2cbb92303 100644 --- a/arch/powerpc/platforms/pseries/reconfig.c +++ b/arch/powerpc/platforms/pseries/reconfig.c | |||
@@ -539,7 +539,8 @@ out: | |||
539 | } | 539 | } |
540 | 540 | ||
541 | static const struct file_operations ofdt_fops = { | 541 | static const struct file_operations ofdt_fops = { |
542 | .write = ofdt_write | 542 | .write = ofdt_write, |
543 | .llseek = noop_llseek, | ||
543 | }; | 544 | }; |
544 | 545 | ||
545 | /* create /proc/powerpc/ofdt write-only by root */ | 546 | /* create /proc/powerpc/ofdt write-only by root */ |
diff --git a/arch/powerpc/platforms/pseries/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c index 80e9e7652a4d..554457294a2b 100644 --- a/arch/powerpc/platforms/pseries/scanlog.c +++ b/arch/powerpc/platforms/pseries/scanlog.c | |||
@@ -170,6 +170,7 @@ const struct file_operations scanlog_fops = { | |||
170 | .write = scanlog_write, | 170 | .write = scanlog_write, |
171 | .open = scanlog_open, | 171 | .open = scanlog_open, |
172 | .release = scanlog_release, | 172 | .release = scanlog_release, |
173 | .llseek = noop_llseek, | ||
173 | }; | 174 | }; |
174 | 175 | ||
175 | static int __init scanlog_init(void) | 176 | static int __init scanlog_init(void) |
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index a6d19e3a505e..593acceeff96 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -53,9 +53,9 @@ | |||
53 | #include <asm/irq.h> | 53 | #include <asm/irq.h> |
54 | #include <asm/time.h> | 54 | #include <asm/time.h> |
55 | #include <asm/nvram.h> | 55 | #include <asm/nvram.h> |
56 | #include "xics.h" | ||
57 | #include <asm/pmc.h> | 56 | #include <asm/pmc.h> |
58 | #include <asm/mpic.h> | 57 | #include <asm/mpic.h> |
58 | #include <asm/xics.h> | ||
59 | #include <asm/ppc-pci.h> | 59 | #include <asm/ppc-pci.h> |
60 | #include <asm/i8259.h> | 60 | #include <asm/i8259.h> |
61 | #include <asm/udbg.h> | 61 | #include <asm/udbg.h> |
@@ -114,10 +114,13 @@ static void __init fwnmi_init(void) | |||
114 | 114 | ||
115 | static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) | 115 | static void pseries_8259_cascade(unsigned int irq, struct irq_desc *desc) |
116 | { | 116 | { |
117 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
117 | unsigned int cascade_irq = i8259_irq(); | 118 | unsigned int cascade_irq = i8259_irq(); |
119 | |||
118 | if (cascade_irq != NO_IRQ) | 120 | if (cascade_irq != NO_IRQ) |
119 | generic_handle_irq(cascade_irq); | 121 | generic_handle_irq(cascade_irq); |
120 | desc->chip->eoi(irq); | 122 | |
123 | chip->irq_eoi(&desc->irq_data); | ||
121 | } | 124 | } |
122 | 125 | ||
123 | static void __init pseries_setup_i8259_cascade(void) | 126 | static void __init pseries_setup_i8259_cascade(void) |
@@ -166,7 +169,7 @@ static void __init pseries_setup_i8259_cascade(void) | |||
166 | printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); | 169 | printk(KERN_DEBUG "pic: PCI 8259 intack at 0x%016lx\n", intack); |
167 | i8259_init(found, intack); | 170 | i8259_init(found, intack); |
168 | of_node_put(found); | 171 | of_node_put(found); |
169 | set_irq_chained_handler(cascade, pseries_8259_cascade); | 172 | irq_set_chained_handler(cascade, pseries_8259_cascade); |
170 | } | 173 | } |
171 | 174 | ||
172 | static void __init pseries_mpic_init_IRQ(void) | 175 | static void __init pseries_mpic_init_IRQ(void) |
@@ -202,6 +205,9 @@ static void __init pseries_mpic_init_IRQ(void) | |||
202 | mpic_assign_isu(mpic, n, isuaddr); | 205 | mpic_assign_isu(mpic, n, isuaddr); |
203 | } | 206 | } |
204 | 207 | ||
208 | /* Setup top-level get_irq */ | ||
209 | ppc_md.get_irq = mpic_get_irq; | ||
210 | |||
205 | /* All ISUs are setup, complete initialization */ | 211 | /* All ISUs are setup, complete initialization */ |
206 | mpic_init(mpic); | 212 | mpic_init(mpic); |
207 | 213 | ||
@@ -211,7 +217,7 @@ static void __init pseries_mpic_init_IRQ(void) | |||
211 | 217 | ||
212 | static void __init pseries_xics_init_IRQ(void) | 218 | static void __init pseries_xics_init_IRQ(void) |
213 | { | 219 | { |
214 | xics_init_IRQ(); | 220 | xics_init(); |
215 | pseries_setup_i8259_cascade(); | 221 | pseries_setup_i8259_cascade(); |
216 | } | 222 | } |
217 | 223 | ||
@@ -235,7 +241,6 @@ static void __init pseries_discover_pic(void) | |||
235 | if (strstr(typep, "open-pic")) { | 241 | if (strstr(typep, "open-pic")) { |
236 | pSeries_mpic_node = of_node_get(np); | 242 | pSeries_mpic_node = of_node_get(np); |
237 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; | 243 | ppc_md.init_IRQ = pseries_mpic_init_IRQ; |
238 | ppc_md.get_irq = mpic_get_irq; | ||
239 | setup_kexec_cpu_down_mpic(); | 244 | setup_kexec_cpu_down_mpic(); |
240 | smp_init_pseries_mpic(); | 245 | smp_init_pseries_mpic(); |
241 | return; | 246 | return; |
@@ -273,6 +278,79 @@ static struct notifier_block pci_dn_reconfig_nb = { | |||
273 | .notifier_call = pci_dn_reconfig_notifier, | 278 | .notifier_call = pci_dn_reconfig_notifier, |
274 | }; | 279 | }; |
275 | 280 | ||
281 | struct kmem_cache *dtl_cache; | ||
282 | |||
283 | #ifdef CONFIG_VIRT_CPU_ACCOUNTING | ||
284 | /* | ||
285 | * Allocate space for the dispatch trace log for all possible cpus | ||
286 | * and register the buffers with the hypervisor. This is used for | ||
287 | * computing time stolen by the hypervisor. | ||
288 | */ | ||
289 | static int alloc_dispatch_logs(void) | ||
290 | { | ||
291 | int cpu, ret; | ||
292 | struct paca_struct *pp; | ||
293 | struct dtl_entry *dtl; | ||
294 | |||
295 | if (!firmware_has_feature(FW_FEATURE_SPLPAR)) | ||
296 | return 0; | ||
297 | |||
298 | if (!dtl_cache) | ||
299 | return 0; | ||
300 | |||
301 | for_each_possible_cpu(cpu) { | ||
302 | pp = &paca[cpu]; | ||
303 | dtl = kmem_cache_alloc(dtl_cache, GFP_KERNEL); | ||
304 | if (!dtl) { | ||
305 | pr_warn("Failed to allocate dispatch trace log for cpu %d\n", | ||
306 | cpu); | ||
307 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
308 | break; | ||
309 | } | ||
310 | |||
311 | pp->dtl_ridx = 0; | ||
312 | pp->dispatch_log = dtl; | ||
313 | pp->dispatch_log_end = dtl + N_DISPATCH_LOG; | ||
314 | pp->dtl_curr = dtl; | ||
315 | } | ||
316 | |||
317 | /* Register the DTL for the current (boot) cpu */ | ||
318 | dtl = get_paca()->dispatch_log; | ||
319 | get_paca()->dtl_ridx = 0; | ||
320 | get_paca()->dtl_curr = dtl; | ||
321 | get_paca()->lppaca_ptr->dtl_idx = 0; | ||
322 | |||
323 | /* hypervisor reads buffer length from this field */ | ||
324 | dtl->enqueue_to_dispatch_time = DISPATCH_LOG_BYTES; | ||
325 | ret = register_dtl(hard_smp_processor_id(), __pa(dtl)); | ||
326 | if (ret) | ||
327 | pr_warn("DTL registration failed for boot cpu %d (%d)\n", | ||
328 | smp_processor_id(), ret); | ||
329 | get_paca()->lppaca_ptr->dtl_enable_mask = 2; | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | #else /* !CONFIG_VIRT_CPU_ACCOUNTING */ | ||
334 | static inline int alloc_dispatch_logs(void) | ||
335 | { | ||
336 | return 0; | ||
337 | } | ||
338 | #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ | ||
339 | |||
340 | static int alloc_dispatch_log_kmem_cache(void) | ||
341 | { | ||
342 | dtl_cache = kmem_cache_create("dtl", DISPATCH_LOG_BYTES, | ||
343 | DISPATCH_LOG_BYTES, 0, NULL); | ||
344 | if (!dtl_cache) { | ||
345 | pr_warn("Failed to create dispatch trace log buffer cache\n"); | ||
346 | pr_warn("Stolen time statistics will be unreliable\n"); | ||
347 | return 0; | ||
348 | } | ||
349 | |||
350 | return alloc_dispatch_logs(); | ||
351 | } | ||
352 | early_initcall(alloc_dispatch_log_kmem_cache); | ||
353 | |||
276 | static void __init pSeries_setup_arch(void) | 354 | static void __init pSeries_setup_arch(void) |
277 | { | 355 | { |
278 | /* Discover PIC type and setup ppc_md accordingly */ | 356 | /* Discover PIC type and setup ppc_md accordingly */ |
@@ -323,7 +401,7 @@ static int __init pSeries_init_panel(void) | |||
323 | 401 | ||
324 | return 0; | 402 | return 0; |
325 | } | 403 | } |
326 | arch_initcall(pSeries_init_panel); | 404 | machine_arch_initcall(pseries, pSeries_init_panel); |
327 | 405 | ||
328 | static int pseries_set_dabr(unsigned long dabr) | 406 | static int pseries_set_dabr(unsigned long dabr) |
329 | { | 407 | { |
@@ -340,6 +418,16 @@ static int pseries_set_xdabr(unsigned long dabr) | |||
340 | #define CMO_CHARACTERISTICS_TOKEN 44 | 418 | #define CMO_CHARACTERISTICS_TOKEN 44 |
341 | #define CMO_MAXLENGTH 1026 | 419 | #define CMO_MAXLENGTH 1026 |
342 | 420 | ||
421 | void pSeries_coalesce_init(void) | ||
422 | { | ||
423 | struct hvcall_mpp_x_data mpp_x_data; | ||
424 | |||
425 | if (firmware_has_feature(FW_FEATURE_CMO) && !h_get_mpp_x(&mpp_x_data)) | ||
426 | powerpc_firmware_features |= FW_FEATURE_XCMO; | ||
427 | else | ||
428 | powerpc_firmware_features &= ~FW_FEATURE_XCMO; | ||
429 | } | ||
430 | |||
343 | /** | 431 | /** |
344 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, | 432 | * fw_cmo_feature_init - FW_FEATURE_CMO is not stored in ibm,hypertas-functions, |
345 | * handle that here. (Stolen from parse_system_parameter_string) | 433 | * handle that here. (Stolen from parse_system_parameter_string) |
@@ -409,6 +497,7 @@ void pSeries_cmo_feature_init(void) | |||
409 | pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, | 497 | pr_debug("CMO enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, |
410 | CMO_SecPSP); | 498 | CMO_SecPSP); |
411 | powerpc_firmware_features |= FW_FEATURE_CMO; | 499 | powerpc_firmware_features |= FW_FEATURE_CMO; |
500 | pSeries_coalesce_init(); | ||
412 | } else | 501 | } else |
413 | pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, | 502 | pr_debug("CMO not enabled, PrPSP=%d, SecPSP=%d\n", CMO_PrPSP, |
414 | CMO_SecPSP); | 503 | CMO_SecPSP); |
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c index 0317cce877c6..fbffd7e47ab8 100644 --- a/arch/powerpc/platforms/pseries/smp.c +++ b/arch/powerpc/platforms/pseries/smp.c | |||
@@ -44,10 +44,11 @@ | |||
44 | #include <asm/mpic.h> | 44 | #include <asm/mpic.h> |
45 | #include <asm/vdso_datapage.h> | 45 | #include <asm/vdso_datapage.h> |
46 | #include <asm/cputhreads.h> | 46 | #include <asm/cputhreads.h> |
47 | #include <asm/mpic.h> | ||
48 | #include <asm/xics.h> | ||
47 | 49 | ||
48 | #include "plpar_wrappers.h" | 50 | #include "plpar_wrappers.h" |
49 | #include "pseries.h" | 51 | #include "pseries.h" |
50 | #include "xics.h" | ||
51 | #include "offline_states.h" | 52 | #include "offline_states.h" |
52 | 53 | ||
53 | 54 | ||
@@ -64,8 +65,8 @@ int smp_query_cpu_stopped(unsigned int pcpu) | |||
64 | int qcss_tok = rtas_token("query-cpu-stopped-state"); | 65 | int qcss_tok = rtas_token("query-cpu-stopped-state"); |
65 | 66 | ||
66 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) { | 67 | if (qcss_tok == RTAS_UNKNOWN_SERVICE) { |
67 | printk(KERN_INFO "Firmware doesn't support " | 68 | printk_once(KERN_INFO |
68 | "query-cpu-stopped-state\n"); | 69 | "Firmware doesn't support query-cpu-stopped-state\n"); |
69 | return QCSS_HARDWARE_ERROR; | 70 | return QCSS_HARDWARE_ERROR; |
70 | } | 71 | } |
71 | 72 | ||
@@ -112,10 +113,10 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
112 | 113 | ||
113 | /* Fixup atomic count: it exited inside IRQ handler. */ | 114 | /* Fixup atomic count: it exited inside IRQ handler. */ |
114 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; | 115 | task_thread_info(paca[lcpu].__current)->preempt_count = 0; |
115 | 116 | #ifdef CONFIG_HOTPLUG_CPU | |
116 | if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) | 117 | if (get_cpu_current_state(lcpu) == CPU_STATE_INACTIVE) |
117 | goto out; | 118 | goto out; |
118 | 119 | #endif | |
119 | /* | 120 | /* |
120 | * If the RTAS start-cpu token does not exist then presume the | 121 | * If the RTAS start-cpu token does not exist then presume the |
121 | * cpu is already spinning. | 122 | * cpu is already spinning. |
@@ -130,11 +131,12 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
130 | return 0; | 131 | return 0; |
131 | } | 132 | } |
132 | 133 | ||
134 | #ifdef CONFIG_HOTPLUG_CPU | ||
133 | out: | 135 | out: |
136 | #endif | ||
134 | return 1; | 137 | return 1; |
135 | } | 138 | } |
136 | 139 | ||
137 | #ifdef CONFIG_XICS | ||
138 | static void __devinit smp_xics_setup_cpu(int cpu) | 140 | static void __devinit smp_xics_setup_cpu(int cpu) |
139 | { | 141 | { |
140 | if (cpu != boot_cpuid) | 142 | if (cpu != boot_cpuid) |
@@ -144,20 +146,18 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
144 | vpa_init(cpu); | 146 | vpa_init(cpu); |
145 | 147 | ||
146 | cpumask_clear_cpu(cpu, of_spin_mask); | 148 | cpumask_clear_cpu(cpu, of_spin_mask); |
149 | #ifdef CONFIG_HOTPLUG_CPU | ||
147 | set_cpu_current_state(cpu, CPU_STATE_ONLINE); | 150 | set_cpu_current_state(cpu, CPU_STATE_ONLINE); |
148 | set_default_offline_state(cpu); | 151 | set_default_offline_state(cpu); |
149 | 152 | #endif | |
150 | } | 153 | } |
151 | #endif /* CONFIG_XICS */ | ||
152 | 154 | ||
153 | static void __devinit smp_pSeries_kick_cpu(int nr) | 155 | static int __devinit smp_pSeries_kick_cpu(int nr) |
154 | { | 156 | { |
155 | long rc; | ||
156 | unsigned long hcpuid; | ||
157 | BUG_ON(nr < 0 || nr >= NR_CPUS); | 157 | BUG_ON(nr < 0 || nr >= NR_CPUS); |
158 | 158 | ||
159 | if (!smp_startup_cpu(nr)) | 159 | if (!smp_startup_cpu(nr)) |
160 | return; | 160 | return -ENOENT; |
161 | 161 | ||
162 | /* | 162 | /* |
163 | * The processor is currently spinning, waiting for the | 163 | * The processor is currently spinning, waiting for the |
@@ -165,16 +165,22 @@ static void __devinit smp_pSeries_kick_cpu(int nr) | |||
165 | * the processor will continue on to secondary_start | 165 | * the processor will continue on to secondary_start |
166 | */ | 166 | */ |
167 | paca[nr].cpu_start = 1; | 167 | paca[nr].cpu_start = 1; |
168 | 168 | #ifdef CONFIG_HOTPLUG_CPU | |
169 | set_preferred_offline_state(nr, CPU_STATE_ONLINE); | 169 | set_preferred_offline_state(nr, CPU_STATE_ONLINE); |
170 | 170 | ||
171 | if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { | 171 | if (get_cpu_current_state(nr) == CPU_STATE_INACTIVE) { |
172 | long rc; | ||
173 | unsigned long hcpuid; | ||
174 | |||
172 | hcpuid = get_hard_smp_processor_id(nr); | 175 | hcpuid = get_hard_smp_processor_id(nr); |
173 | rc = plpar_hcall_norets(H_PROD, hcpuid); | 176 | rc = plpar_hcall_norets(H_PROD, hcpuid); |
174 | if (rc != H_SUCCESS) | 177 | if (rc != H_SUCCESS) |
175 | printk(KERN_ERR "Error: Prod to wake up processor %d " | 178 | printk(KERN_ERR "Error: Prod to wake up processor %d " |
176 | "Ret= %ld\n", nr, rc); | 179 | "Ret= %ld\n", nr, rc); |
177 | } | 180 | } |
181 | #endif | ||
182 | |||
183 | return 0; | ||
178 | } | 184 | } |
179 | 185 | ||
180 | static int smp_pSeries_cpu_bootable(unsigned int nr) | 186 | static int smp_pSeries_cpu_bootable(unsigned int nr) |
@@ -192,23 +198,22 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) | |||
192 | 198 | ||
193 | return 1; | 199 | return 1; |
194 | } | 200 | } |
195 | #ifdef CONFIG_MPIC | 201 | |
196 | static struct smp_ops_t pSeries_mpic_smp_ops = { | 202 | static struct smp_ops_t pSeries_mpic_smp_ops = { |
197 | .message_pass = smp_mpic_message_pass, | 203 | .message_pass = smp_mpic_message_pass, |
198 | .probe = smp_mpic_probe, | 204 | .probe = smp_mpic_probe, |
199 | .kick_cpu = smp_pSeries_kick_cpu, | 205 | .kick_cpu = smp_pSeries_kick_cpu, |
200 | .setup_cpu = smp_mpic_setup_cpu, | 206 | .setup_cpu = smp_mpic_setup_cpu, |
201 | }; | 207 | }; |
202 | #endif | 208 | |
203 | #ifdef CONFIG_XICS | ||
204 | static struct smp_ops_t pSeries_xics_smp_ops = { | 209 | static struct smp_ops_t pSeries_xics_smp_ops = { |
205 | .message_pass = smp_xics_message_pass, | 210 | .message_pass = smp_muxed_ipi_message_pass, |
206 | .probe = smp_xics_probe, | 211 | .cause_ipi = NULL, /* Filled at runtime by xics_smp_probe() */ |
212 | .probe = xics_smp_probe, | ||
207 | .kick_cpu = smp_pSeries_kick_cpu, | 213 | .kick_cpu = smp_pSeries_kick_cpu, |
208 | .setup_cpu = smp_xics_setup_cpu, | 214 | .setup_cpu = smp_xics_setup_cpu, |
209 | .cpu_bootable = smp_pSeries_cpu_bootable, | 215 | .cpu_bootable = smp_pSeries_cpu_bootable, |
210 | }; | 216 | }; |
211 | #endif | ||
212 | 217 | ||
213 | /* This is called very early */ | 218 | /* This is called very early */ |
214 | static void __init smp_init_pseries(void) | 219 | static void __init smp_init_pseries(void) |
@@ -240,14 +245,12 @@ static void __init smp_init_pseries(void) | |||
240 | pr_debug(" <- smp_init_pSeries()\n"); | 245 | pr_debug(" <- smp_init_pSeries()\n"); |
241 | } | 246 | } |
242 | 247 | ||
243 | #ifdef CONFIG_MPIC | ||
244 | void __init smp_init_pseries_mpic(void) | 248 | void __init smp_init_pseries_mpic(void) |
245 | { | 249 | { |
246 | smp_ops = &pSeries_mpic_smp_ops; | 250 | smp_ops = &pSeries_mpic_smp_ops; |
247 | 251 | ||
248 | smp_init_pseries(); | 252 | smp_init_pseries(); |
249 | } | 253 | } |
250 | #endif | ||
251 | 254 | ||
252 | void __init smp_init_pseries_xics(void) | 255 | void __init smp_init_pseries_xics(void) |
253 | { | 256 | { |
diff --git a/arch/powerpc/platforms/pseries/suspend.c b/arch/powerpc/platforms/pseries/suspend.c index ed72098bb4e3..a8ca289ff267 100644 --- a/arch/powerpc/platforms/pseries/suspend.c +++ b/arch/powerpc/platforms/pseries/suspend.c | |||
@@ -153,7 +153,7 @@ static struct sysdev_class suspend_sysdev_class = { | |||
153 | .name = "power", | 153 | .name = "power", |
154 | }; | 154 | }; |
155 | 155 | ||
156 | static struct platform_suspend_ops pseries_suspend_ops = { | 156 | static const struct platform_suspend_ops pseries_suspend_ops = { |
157 | .valid = suspend_valid_only_mem, | 157 | .valid = suspend_valid_only_mem, |
158 | .begin = pseries_suspend_begin, | 158 | .begin = pseries_suspend_begin, |
159 | .prepare_late = pseries_prepare_late, | 159 | .prepare_late = pseries_prepare_late, |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c deleted file mode 100644 index 93834b0d8272..000000000000 --- a/arch/powerpc/platforms/pseries/xics.c +++ /dev/null | |||
@@ -1,943 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.c | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/types.h> | ||
13 | #include <linux/threads.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/irq.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/radix-tree.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/msi.h> | ||
22 | #include <linux/of.h> | ||
23 | #include <linux/percpu.h> | ||
24 | |||
25 | #include <asm/firmware.h> | ||
26 | #include <asm/io.h> | ||
27 | #include <asm/pgtable.h> | ||
28 | #include <asm/smp.h> | ||
29 | #include <asm/rtas.h> | ||
30 | #include <asm/hvcall.h> | ||
31 | #include <asm/machdep.h> | ||
32 | |||
33 | #include "xics.h" | ||
34 | #include "plpar_wrappers.h" | ||
35 | |||
36 | static struct irq_host *xics_host; | ||
37 | |||
38 | #define XICS_IPI 2 | ||
39 | #define XICS_IRQ_SPURIOUS 0 | ||
40 | |||
41 | /* Want a priority other than 0. Various HW issues require this. */ | ||
42 | #define DEFAULT_PRIORITY 5 | ||
43 | |||
44 | /* | ||
45 | * Mark IPIs as higher priority so we can take them inside interrupts that | ||
46 | * arent marked IRQF_DISABLED | ||
47 | */ | ||
48 | #define IPI_PRIORITY 4 | ||
49 | |||
50 | /* The least favored priority */ | ||
51 | #define LOWEST_PRIORITY 0xFF | ||
52 | |||
53 | /* The number of priorities defined above */ | ||
54 | #define MAX_NUM_PRIORITIES 3 | ||
55 | |||
56 | static unsigned int default_server = 0xFF; | ||
57 | static unsigned int default_distrib_server = 0; | ||
58 | static unsigned int interrupt_server_size = 8; | ||
59 | |||
60 | /* RTAS service tokens */ | ||
61 | static int ibm_get_xive; | ||
62 | static int ibm_set_xive; | ||
63 | static int ibm_int_on; | ||
64 | static int ibm_int_off; | ||
65 | |||
66 | struct xics_cppr { | ||
67 | unsigned char stack[MAX_NUM_PRIORITIES]; | ||
68 | int index; | ||
69 | }; | ||
70 | |||
71 | static DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | ||
72 | |||
73 | /* Direct hardware low level accessors */ | ||
74 | |||
75 | /* The part of the interrupt presentation layer that we care about */ | ||
76 | struct xics_ipl { | ||
77 | union { | ||
78 | u32 word; | ||
79 | u8 bytes[4]; | ||
80 | } xirr_poll; | ||
81 | union { | ||
82 | u32 word; | ||
83 | u8 bytes[4]; | ||
84 | } xirr; | ||
85 | u32 dummy; | ||
86 | union { | ||
87 | u32 word; | ||
88 | u8 bytes[4]; | ||
89 | } qirr; | ||
90 | }; | ||
91 | |||
92 | static struct xics_ipl __iomem *xics_per_cpu[NR_CPUS]; | ||
93 | |||
94 | static inline unsigned int direct_xirr_info_get(void) | ||
95 | { | ||
96 | int cpu = smp_processor_id(); | ||
97 | |||
98 | return in_be32(&xics_per_cpu[cpu]->xirr.word); | ||
99 | } | ||
100 | |||
101 | static inline void direct_xirr_info_set(unsigned int value) | ||
102 | { | ||
103 | int cpu = smp_processor_id(); | ||
104 | |||
105 | out_be32(&xics_per_cpu[cpu]->xirr.word, value); | ||
106 | } | ||
107 | |||
108 | static inline void direct_cppr_info(u8 value) | ||
109 | { | ||
110 | int cpu = smp_processor_id(); | ||
111 | |||
112 | out_8(&xics_per_cpu[cpu]->xirr.bytes[0], value); | ||
113 | } | ||
114 | |||
115 | static inline void direct_qirr_info(int n_cpu, u8 value) | ||
116 | { | ||
117 | out_8(&xics_per_cpu[n_cpu]->qirr.bytes[0], value); | ||
118 | } | ||
119 | |||
120 | |||
121 | /* LPAR low level accessors */ | ||
122 | |||
123 | static inline unsigned int lpar_xirr_info_get(unsigned char cppr) | ||
124 | { | ||
125 | unsigned long lpar_rc; | ||
126 | unsigned long return_value; | ||
127 | |||
128 | lpar_rc = plpar_xirr(&return_value, cppr); | ||
129 | if (lpar_rc != H_SUCCESS) | ||
130 | panic(" bad return code xirr - rc = %lx\n", lpar_rc); | ||
131 | return (unsigned int)return_value; | ||
132 | } | ||
133 | |||
134 | static inline void lpar_xirr_info_set(unsigned int value) | ||
135 | { | ||
136 | unsigned long lpar_rc; | ||
137 | |||
138 | lpar_rc = plpar_eoi(value); | ||
139 | if (lpar_rc != H_SUCCESS) | ||
140 | panic("bad return code EOI - rc = %ld, value=%x\n", lpar_rc, | ||
141 | value); | ||
142 | } | ||
143 | |||
144 | static inline void lpar_cppr_info(u8 value) | ||
145 | { | ||
146 | unsigned long lpar_rc; | ||
147 | |||
148 | lpar_rc = plpar_cppr(value); | ||
149 | if (lpar_rc != H_SUCCESS) | ||
150 | panic("bad return code cppr - rc = %lx\n", lpar_rc); | ||
151 | } | ||
152 | |||
153 | static inline void lpar_qirr_info(int n_cpu , u8 value) | ||
154 | { | ||
155 | unsigned long lpar_rc; | ||
156 | |||
157 | lpar_rc = plpar_ipi(get_hard_smp_processor_id(n_cpu), value); | ||
158 | if (lpar_rc != H_SUCCESS) | ||
159 | panic("bad return code qirr - rc = %lx\n", lpar_rc); | ||
160 | } | ||
161 | |||
162 | |||
163 | /* Interface to generic irq subsystem */ | ||
164 | |||
165 | #ifdef CONFIG_SMP | ||
166 | /* | ||
167 | * For the moment we only implement delivery to all cpus or one cpu. | ||
168 | * | ||
169 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
170 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
171 | * are set. This is so things like irqbalance (which set core and package | ||
172 | * wide affinities) do the right thing. | ||
173 | */ | ||
174 | static int get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
175 | unsigned int strict_check) | ||
176 | { | ||
177 | |||
178 | if (!distribute_irqs) | ||
179 | return default_server; | ||
180 | |||
181 | if (!cpumask_equal(cpumask, cpu_all_mask)) { | ||
182 | int server = cpumask_first_and(cpu_online_mask, cpumask); | ||
183 | |||
184 | if (server < nr_cpu_ids) | ||
185 | return get_hard_smp_processor_id(server); | ||
186 | |||
187 | if (strict_check) | ||
188 | return -1; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Workaround issue with some versions of JS20 firmware that | ||
193 | * deliver interrupts to cpus which haven't been started. This | ||
194 | * happens when using the maxcpus= boot option. | ||
195 | */ | ||
196 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
197 | return default_distrib_server; | ||
198 | |||
199 | return default_server; | ||
200 | } | ||
201 | #else | ||
202 | #define get_irq_server(virq, cpumask, strict_check) (default_server) | ||
203 | #endif | ||
204 | |||
205 | static void xics_unmask_irq(unsigned int virq) | ||
206 | { | ||
207 | unsigned int irq; | ||
208 | int call_status; | ||
209 | int server; | ||
210 | |||
211 | pr_devel("xics: unmask virq %d\n", virq); | ||
212 | |||
213 | irq = (unsigned int)irq_map[virq].hwirq; | ||
214 | pr_devel(" -> map to hwirq 0x%x\n", irq); | ||
215 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
216 | return; | ||
217 | |||
218 | server = get_irq_server(virq, irq_to_desc(virq)->affinity, 0); | ||
219 | |||
220 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, server, | ||
221 | DEFAULT_PRIORITY); | ||
222 | if (call_status != 0) { | ||
223 | printk(KERN_ERR | ||
224 | "%s: ibm_set_xive irq %u server %x returned %d\n", | ||
225 | __func__, irq, server, call_status); | ||
226 | return; | ||
227 | } | ||
228 | |||
229 | /* Now unmask the interrupt (often a no-op) */ | ||
230 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, irq); | ||
231 | if (call_status != 0) { | ||
232 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | ||
233 | __func__, irq, call_status); | ||
234 | return; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | static unsigned int xics_startup(unsigned int virq) | ||
239 | { | ||
240 | /* | ||
241 | * The generic MSI code returns with the interrupt disabled on the | ||
242 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
243 | * at that level, so we do it here by hand. | ||
244 | */ | ||
245 | if (irq_to_desc(virq)->msi_desc) | ||
246 | unmask_msi_irq(virq); | ||
247 | |||
248 | /* unmask it */ | ||
249 | xics_unmask_irq(virq); | ||
250 | return 0; | ||
251 | } | ||
252 | |||
253 | static void xics_mask_real_irq(unsigned int irq) | ||
254 | { | ||
255 | int call_status; | ||
256 | |||
257 | if (irq == XICS_IPI) | ||
258 | return; | ||
259 | |||
260 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, irq); | ||
261 | if (call_status != 0) { | ||
262 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | ||
263 | __func__, irq, call_status); | ||
264 | return; | ||
265 | } | ||
266 | |||
267 | /* Have to set XIVE to 0xff to be able to remove a slot */ | ||
268 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, irq, | ||
269 | default_server, 0xff); | ||
270 | if (call_status != 0) { | ||
271 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | ||
272 | __func__, irq, call_status); | ||
273 | return; | ||
274 | } | ||
275 | } | ||
276 | |||
277 | static void xics_mask_irq(unsigned int virq) | ||
278 | { | ||
279 | unsigned int irq; | ||
280 | |||
281 | pr_devel("xics: mask virq %d\n", virq); | ||
282 | |||
283 | irq = (unsigned int)irq_map[virq].hwirq; | ||
284 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
285 | return; | ||
286 | xics_mask_real_irq(irq); | ||
287 | } | ||
288 | |||
289 | static void xics_mask_unknown_vec(unsigned int vec) | ||
290 | { | ||
291 | printk(KERN_ERR "Interrupt %u (real) is invalid, disabling it.\n", vec); | ||
292 | xics_mask_real_irq(vec); | ||
293 | } | ||
294 | |||
295 | static inline unsigned int xics_xirr_vector(unsigned int xirr) | ||
296 | { | ||
297 | /* | ||
298 | * The top byte is the old cppr, to be restored on EOI. | ||
299 | * The remaining 24 bits are the vector. | ||
300 | */ | ||
301 | return xirr & 0x00ffffff; | ||
302 | } | ||
303 | |||
304 | static void push_cppr(unsigned int vec) | ||
305 | { | ||
306 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
307 | |||
308 | if (WARN_ON(os_cppr->index >= MAX_NUM_PRIORITIES - 1)) | ||
309 | return; | ||
310 | |||
311 | if (vec == XICS_IPI) | ||
312 | os_cppr->stack[++os_cppr->index] = IPI_PRIORITY; | ||
313 | else | ||
314 | os_cppr->stack[++os_cppr->index] = DEFAULT_PRIORITY; | ||
315 | } | ||
316 | |||
317 | static unsigned int xics_get_irq_direct(void) | ||
318 | { | ||
319 | unsigned int xirr = direct_xirr_info_get(); | ||
320 | unsigned int vec = xics_xirr_vector(xirr); | ||
321 | unsigned int irq; | ||
322 | |||
323 | if (vec == XICS_IRQ_SPURIOUS) | ||
324 | return NO_IRQ; | ||
325 | |||
326 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
327 | if (likely(irq != NO_IRQ)) { | ||
328 | push_cppr(vec); | ||
329 | return irq; | ||
330 | } | ||
331 | |||
332 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
333 | xics_mask_unknown_vec(vec); | ||
334 | |||
335 | /* We might learn about it later, so EOI it */ | ||
336 | direct_xirr_info_set(xirr); | ||
337 | return NO_IRQ; | ||
338 | } | ||
339 | |||
340 | static unsigned int xics_get_irq_lpar(void) | ||
341 | { | ||
342 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
343 | unsigned int xirr = lpar_xirr_info_get(os_cppr->stack[os_cppr->index]); | ||
344 | unsigned int vec = xics_xirr_vector(xirr); | ||
345 | unsigned int irq; | ||
346 | |||
347 | if (vec == XICS_IRQ_SPURIOUS) | ||
348 | return NO_IRQ; | ||
349 | |||
350 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
351 | if (likely(irq != NO_IRQ)) { | ||
352 | push_cppr(vec); | ||
353 | return irq; | ||
354 | } | ||
355 | |||
356 | /* We don't have a linux mapping, so have RTAS mask it. */ | ||
357 | xics_mask_unknown_vec(vec); | ||
358 | |||
359 | /* We might learn about it later, so EOI it */ | ||
360 | lpar_xirr_info_set(xirr); | ||
361 | return NO_IRQ; | ||
362 | } | ||
363 | |||
364 | static unsigned char pop_cppr(void) | ||
365 | { | ||
366 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
367 | |||
368 | if (WARN_ON(os_cppr->index < 1)) | ||
369 | return LOWEST_PRIORITY; | ||
370 | |||
371 | return os_cppr->stack[--os_cppr->index]; | ||
372 | } | ||
373 | |||
374 | static void xics_eoi_direct(unsigned int virq) | ||
375 | { | ||
376 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
377 | |||
378 | iosync(); | ||
379 | direct_xirr_info_set((pop_cppr() << 24) | irq); | ||
380 | } | ||
381 | |||
382 | static void xics_eoi_lpar(unsigned int virq) | ||
383 | { | ||
384 | unsigned int irq = (unsigned int)irq_map[virq].hwirq; | ||
385 | |||
386 | iosync(); | ||
387 | lpar_xirr_info_set((pop_cppr() << 24) | irq); | ||
388 | } | ||
389 | |||
390 | static int xics_set_affinity(unsigned int virq, const struct cpumask *cpumask) | ||
391 | { | ||
392 | unsigned int irq; | ||
393 | int status; | ||
394 | int xics_status[2]; | ||
395 | int irq_server; | ||
396 | |||
397 | irq = (unsigned int)irq_map[virq].hwirq; | ||
398 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
399 | return -1; | ||
400 | |||
401 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
402 | |||
403 | if (status) { | ||
404 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
405 | __func__, irq, status); | ||
406 | return -1; | ||
407 | } | ||
408 | |||
409 | irq_server = get_irq_server(virq, cpumask, 1); | ||
410 | if (irq_server == -1) { | ||
411 | char cpulist[128]; | ||
412 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
413 | printk(KERN_WARNING | ||
414 | "%s: No online cpus in the mask %s for irq %d\n", | ||
415 | __func__, cpulist, virq); | ||
416 | return -1; | ||
417 | } | ||
418 | |||
419 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
420 | irq, irq_server, xics_status[1]); | ||
421 | |||
422 | if (status) { | ||
423 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | ||
424 | __func__, irq, status); | ||
425 | return -1; | ||
426 | } | ||
427 | |||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | static struct irq_chip xics_pic_direct = { | ||
432 | .name = "XICS", | ||
433 | .startup = xics_startup, | ||
434 | .mask = xics_mask_irq, | ||
435 | .unmask = xics_unmask_irq, | ||
436 | .eoi = xics_eoi_direct, | ||
437 | .set_affinity = xics_set_affinity | ||
438 | }; | ||
439 | |||
440 | static struct irq_chip xics_pic_lpar = { | ||
441 | .name = "XICS", | ||
442 | .startup = xics_startup, | ||
443 | .mask = xics_mask_irq, | ||
444 | .unmask = xics_unmask_irq, | ||
445 | .eoi = xics_eoi_lpar, | ||
446 | .set_affinity = xics_set_affinity | ||
447 | }; | ||
448 | |||
449 | |||
450 | /* Interface to arch irq controller subsystem layer */ | ||
451 | |||
452 | /* Points to the irq_chip we're actually using */ | ||
453 | static struct irq_chip *xics_irq_chip; | ||
454 | |||
455 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
456 | { | ||
457 | /* IBM machines have interrupt parents of various funky types for things | ||
458 | * like vdevices, events, etc... The trick we use here is to match | ||
459 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
460 | */ | ||
461 | return !of_device_is_compatible(node, "chrp,iic"); | ||
462 | } | ||
463 | |||
464 | static int xics_host_map(struct irq_host *h, unsigned int virq, | ||
465 | irq_hw_number_t hw) | ||
466 | { | ||
467 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | ||
468 | |||
469 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | ||
470 | irq_radix_revmap_insert(xics_host, virq, hw); | ||
471 | |||
472 | irq_to_desc(virq)->status |= IRQ_LEVEL; | ||
473 | set_irq_chip_and_handler(virq, xics_irq_chip, handle_fasteoi_irq); | ||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
478 | const u32 *intspec, unsigned int intsize, | ||
479 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
480 | |||
481 | { | ||
482 | /* Current xics implementation translates everything | ||
483 | * to level. It is not technically right for MSIs but this | ||
484 | * is irrelevant at this point. We might get smarter in the future | ||
485 | */ | ||
486 | *out_hwirq = intspec[0]; | ||
487 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
488 | |||
489 | return 0; | ||
490 | } | ||
491 | |||
492 | static struct irq_host_ops xics_host_ops = { | ||
493 | .match = xics_host_match, | ||
494 | .map = xics_host_map, | ||
495 | .xlate = xics_host_xlate, | ||
496 | }; | ||
497 | |||
498 | static void __init xics_init_host(void) | ||
499 | { | ||
500 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
501 | xics_irq_chip = &xics_pic_lpar; | ||
502 | else | ||
503 | xics_irq_chip = &xics_pic_direct; | ||
504 | |||
505 | xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | ||
506 | XICS_IRQ_SPURIOUS); | ||
507 | BUG_ON(xics_host == NULL); | ||
508 | irq_set_default_host(xics_host); | ||
509 | } | ||
510 | |||
511 | |||
512 | /* Inter-processor interrupt support */ | ||
513 | |||
514 | #ifdef CONFIG_SMP | ||
515 | /* | ||
516 | * XICS only has a single IPI, so encode the messages per CPU | ||
517 | */ | ||
518 | static DEFINE_PER_CPU_SHARED_ALIGNED(unsigned long, xics_ipi_message); | ||
519 | |||
520 | static inline void smp_xics_do_message(int cpu, int msg) | ||
521 | { | ||
522 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
523 | |||
524 | set_bit(msg, tgt); | ||
525 | mb(); | ||
526 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
527 | lpar_qirr_info(cpu, IPI_PRIORITY); | ||
528 | else | ||
529 | direct_qirr_info(cpu, IPI_PRIORITY); | ||
530 | } | ||
531 | |||
532 | void smp_xics_message_pass(int target, int msg) | ||
533 | { | ||
534 | unsigned int i; | ||
535 | |||
536 | if (target < NR_CPUS) { | ||
537 | smp_xics_do_message(target, msg); | ||
538 | } else { | ||
539 | for_each_online_cpu(i) { | ||
540 | if (target == MSG_ALL_BUT_SELF | ||
541 | && i == smp_processor_id()) | ||
542 | continue; | ||
543 | smp_xics_do_message(i, msg); | ||
544 | } | ||
545 | } | ||
546 | } | ||
547 | |||
548 | static irqreturn_t xics_ipi_dispatch(int cpu) | ||
549 | { | ||
550 | unsigned long *tgt = &per_cpu(xics_ipi_message, cpu); | ||
551 | |||
552 | mb(); /* order mmio clearing qirr */ | ||
553 | while (*tgt) { | ||
554 | if (test_and_clear_bit(PPC_MSG_CALL_FUNCTION, tgt)) { | ||
555 | smp_message_recv(PPC_MSG_CALL_FUNCTION); | ||
556 | } | ||
557 | if (test_and_clear_bit(PPC_MSG_RESCHEDULE, tgt)) { | ||
558 | smp_message_recv(PPC_MSG_RESCHEDULE); | ||
559 | } | ||
560 | if (test_and_clear_bit(PPC_MSG_CALL_FUNC_SINGLE, tgt)) { | ||
561 | smp_message_recv(PPC_MSG_CALL_FUNC_SINGLE); | ||
562 | } | ||
563 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | ||
564 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, tgt)) { | ||
565 | smp_message_recv(PPC_MSG_DEBUGGER_BREAK); | ||
566 | } | ||
567 | #endif | ||
568 | } | ||
569 | return IRQ_HANDLED; | ||
570 | } | ||
571 | |||
572 | static irqreturn_t xics_ipi_action_direct(int irq, void *dev_id) | ||
573 | { | ||
574 | int cpu = smp_processor_id(); | ||
575 | |||
576 | direct_qirr_info(cpu, 0xff); | ||
577 | |||
578 | return xics_ipi_dispatch(cpu); | ||
579 | } | ||
580 | |||
581 | static irqreturn_t xics_ipi_action_lpar(int irq, void *dev_id) | ||
582 | { | ||
583 | int cpu = smp_processor_id(); | ||
584 | |||
585 | lpar_qirr_info(cpu, 0xff); | ||
586 | |||
587 | return xics_ipi_dispatch(cpu); | ||
588 | } | ||
589 | |||
590 | static void xics_request_ipi(void) | ||
591 | { | ||
592 | unsigned int ipi; | ||
593 | int rc; | ||
594 | |||
595 | ipi = irq_create_mapping(xics_host, XICS_IPI); | ||
596 | BUG_ON(ipi == NO_IRQ); | ||
597 | |||
598 | /* | ||
599 | * IPIs are marked IRQF_DISABLED as they must run with irqs | ||
600 | * disabled | ||
601 | */ | ||
602 | set_irq_handler(ipi, handle_percpu_irq); | ||
603 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
604 | rc = request_irq(ipi, xics_ipi_action_lpar, | ||
605 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
606 | else | ||
607 | rc = request_irq(ipi, xics_ipi_action_direct, | ||
608 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL); | ||
609 | BUG_ON(rc); | ||
610 | } | ||
611 | |||
612 | int __init smp_xics_probe(void) | ||
613 | { | ||
614 | xics_request_ipi(); | ||
615 | |||
616 | return cpumask_weight(cpu_possible_mask); | ||
617 | } | ||
618 | |||
619 | #endif /* CONFIG_SMP */ | ||
620 | |||
621 | |||
622 | /* Initialization */ | ||
623 | |||
624 | static void xics_update_irq_servers(void) | ||
625 | { | ||
626 | int i, j; | ||
627 | struct device_node *np; | ||
628 | u32 ilen; | ||
629 | const u32 *ireg; | ||
630 | u32 hcpuid; | ||
631 | |||
632 | /* Find the server numbers for the boot cpu. */ | ||
633 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
634 | BUG_ON(!np); | ||
635 | |||
636 | ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | ||
637 | if (!ireg) { | ||
638 | of_node_put(np); | ||
639 | return; | ||
640 | } | ||
641 | |||
642 | i = ilen / sizeof(int); | ||
643 | hcpuid = get_hard_smp_processor_id(boot_cpuid); | ||
644 | |||
645 | /* Global interrupt distribution server is specified in the last | ||
646 | * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | ||
647 | * entry fom this property for current boot cpu id and use it as | ||
648 | * default distribution server | ||
649 | */ | ||
650 | for (j = 0; j < i; j += 2) { | ||
651 | if (ireg[j] == hcpuid) { | ||
652 | default_server = hcpuid; | ||
653 | default_distrib_server = ireg[j+1]; | ||
654 | } | ||
655 | } | ||
656 | |||
657 | of_node_put(np); | ||
658 | } | ||
659 | |||
660 | static void __init xics_map_one_cpu(int hw_id, unsigned long addr, | ||
661 | unsigned long size) | ||
662 | { | ||
663 | int i; | ||
664 | |||
665 | /* This may look gross but it's good enough for now, we don't quite | ||
666 | * have a hard -> linux processor id matching. | ||
667 | */ | ||
668 | for_each_possible_cpu(i) { | ||
669 | if (!cpu_present(i)) | ||
670 | continue; | ||
671 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
672 | xics_per_cpu[i] = ioremap(addr, size); | ||
673 | return; | ||
674 | } | ||
675 | } | ||
676 | } | ||
677 | |||
678 | static void __init xics_init_one_node(struct device_node *np, | ||
679 | unsigned int *indx) | ||
680 | { | ||
681 | unsigned int ilen; | ||
682 | const u32 *ireg; | ||
683 | |||
684 | /* This code does the theorically broken assumption that the interrupt | ||
685 | * server numbers are the same as the hard CPU numbers. | ||
686 | * This happens to be the case so far but we are playing with fire... | ||
687 | * should be fixed one of these days. -BenH. | ||
688 | */ | ||
689 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", NULL); | ||
690 | |||
691 | /* Do that ever happen ? we'll know soon enough... but even good'old | ||
692 | * f80 does have that property .. | ||
693 | */ | ||
694 | WARN_ON(ireg == NULL); | ||
695 | if (ireg) { | ||
696 | /* | ||
697 | * set node starting index for this node | ||
698 | */ | ||
699 | *indx = *ireg; | ||
700 | } | ||
701 | ireg = of_get_property(np, "reg", &ilen); | ||
702 | if (!ireg) | ||
703 | panic("xics_init_IRQ: can't find interrupt reg property"); | ||
704 | |||
705 | while (ilen >= (4 * sizeof(u32))) { | ||
706 | unsigned long addr, size; | ||
707 | |||
708 | /* XXX Use proper OF parsing code here !!! */ | ||
709 | addr = (unsigned long)*ireg++ << 32; | ||
710 | ilen -= sizeof(u32); | ||
711 | addr |= *ireg++; | ||
712 | ilen -= sizeof(u32); | ||
713 | size = (unsigned long)*ireg++ << 32; | ||
714 | ilen -= sizeof(u32); | ||
715 | size |= *ireg++; | ||
716 | ilen -= sizeof(u32); | ||
717 | xics_map_one_cpu(*indx, addr, size); | ||
718 | (*indx)++; | ||
719 | } | ||
720 | } | ||
721 | |||
722 | void __init xics_init_IRQ(void) | ||
723 | { | ||
724 | struct device_node *np; | ||
725 | u32 indx = 0; | ||
726 | int found = 0; | ||
727 | const u32 *isize; | ||
728 | |||
729 | ppc64_boot_msg(0x20, "XICS Init"); | ||
730 | |||
731 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
732 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
733 | ibm_int_on = rtas_token("ibm,int-on"); | ||
734 | ibm_int_off = rtas_token("ibm,int-off"); | ||
735 | |||
736 | for_each_node_by_type(np, "PowerPC-External-Interrupt-Presentation") { | ||
737 | found = 1; | ||
738 | if (firmware_has_feature(FW_FEATURE_LPAR)) { | ||
739 | of_node_put(np); | ||
740 | break; | ||
741 | } | ||
742 | xics_init_one_node(np, &indx); | ||
743 | } | ||
744 | if (found == 0) | ||
745 | return; | ||
746 | |||
747 | /* get the bit size of server numbers */ | ||
748 | found = 0; | ||
749 | |||
750 | for_each_compatible_node(np, NULL, "ibm,ppc-xics") { | ||
751 | isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | ||
752 | |||
753 | if (!isize) | ||
754 | continue; | ||
755 | |||
756 | if (!found) { | ||
757 | interrupt_server_size = *isize; | ||
758 | found = 1; | ||
759 | } else if (*isize != interrupt_server_size) { | ||
760 | printk(KERN_WARNING "XICS: " | ||
761 | "mismatched ibm,interrupt-server#-size\n"); | ||
762 | interrupt_server_size = max(*isize, | ||
763 | interrupt_server_size); | ||
764 | } | ||
765 | } | ||
766 | |||
767 | xics_update_irq_servers(); | ||
768 | xics_init_host(); | ||
769 | |||
770 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
771 | ppc_md.get_irq = xics_get_irq_lpar; | ||
772 | else | ||
773 | ppc_md.get_irq = xics_get_irq_direct; | ||
774 | |||
775 | xics_setup_cpu(); | ||
776 | |||
777 | ppc64_boot_msg(0x21, "XICS Done"); | ||
778 | } | ||
779 | |||
780 | /* Cpu startup, shutdown, and hotplug */ | ||
781 | |||
782 | static void xics_set_cpu_priority(unsigned char cppr) | ||
783 | { | ||
784 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
785 | |||
786 | /* | ||
787 | * we only really want to set the priority when there's | ||
788 | * just one cppr value on the stack | ||
789 | */ | ||
790 | WARN_ON(os_cppr->index != 0); | ||
791 | |||
792 | os_cppr->stack[0] = cppr; | ||
793 | |||
794 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
795 | lpar_cppr_info(cppr); | ||
796 | else | ||
797 | direct_cppr_info(cppr); | ||
798 | iosync(); | ||
799 | } | ||
800 | |||
801 | /* Have the calling processor join or leave the specified global queue */ | ||
802 | static void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | ||
803 | { | ||
804 | int index; | ||
805 | int status; | ||
806 | |||
807 | if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | ||
808 | return; | ||
809 | |||
810 | index = (1UL << interrupt_server_size) - 1 - gserver; | ||
811 | |||
812 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | ||
813 | |||
814 | WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | ||
815 | GLOBAL_INTERRUPT_QUEUE, index, join, status); | ||
816 | } | ||
817 | |||
818 | void xics_setup_cpu(void) | ||
819 | { | ||
820 | xics_set_cpu_priority(LOWEST_PRIORITY); | ||
821 | |||
822 | xics_set_cpu_giq(default_distrib_server, 1); | ||
823 | } | ||
824 | |||
825 | void xics_teardown_cpu(void) | ||
826 | { | ||
827 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
828 | int cpu = smp_processor_id(); | ||
829 | |||
830 | /* | ||
831 | * we have to reset the cppr index to 0 because we're | ||
832 | * not going to return from the IPI | ||
833 | */ | ||
834 | os_cppr->index = 0; | ||
835 | xics_set_cpu_priority(0); | ||
836 | |||
837 | /* Clear any pending IPI request */ | ||
838 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
839 | lpar_qirr_info(cpu, 0xff); | ||
840 | else | ||
841 | direct_qirr_info(cpu, 0xff); | ||
842 | } | ||
843 | |||
844 | void xics_kexec_teardown_cpu(int secondary) | ||
845 | { | ||
846 | xics_teardown_cpu(); | ||
847 | |||
848 | /* | ||
849 | * we take the ipi irq but and never return so we | ||
850 | * need to EOI the IPI, but want to leave our priority 0 | ||
851 | * | ||
852 | * should we check all the other interrupts too? | ||
853 | * should we be flagging idle loop instead? | ||
854 | * or creating some task to be scheduled? | ||
855 | */ | ||
856 | |||
857 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
858 | lpar_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
859 | else | ||
860 | direct_xirr_info_set((0x00 << 24) | XICS_IPI); | ||
861 | |||
862 | /* | ||
863 | * Some machines need to have at least one cpu in the GIQ, | ||
864 | * so leave the master cpu in the group. | ||
865 | */ | ||
866 | if (secondary) | ||
867 | xics_set_cpu_giq(default_distrib_server, 0); | ||
868 | } | ||
869 | |||
870 | #ifdef CONFIG_HOTPLUG_CPU | ||
871 | |||
872 | /* Interrupts are disabled. */ | ||
873 | void xics_migrate_irqs_away(void) | ||
874 | { | ||
875 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||
876 | unsigned int irq, virq; | ||
877 | |||
878 | /* If we used to be the default server, move to the new "boot_cpuid" */ | ||
879 | if (hw_cpu == default_server) | ||
880 | xics_update_irq_servers(); | ||
881 | |||
882 | /* Reject any interrupt that was queued to us... */ | ||
883 | xics_set_cpu_priority(0); | ||
884 | |||
885 | /* Remove ourselves from the global interrupt queue */ | ||
886 | xics_set_cpu_giq(default_distrib_server, 0); | ||
887 | |||
888 | /* Allow IPIs again... */ | ||
889 | xics_set_cpu_priority(DEFAULT_PRIORITY); | ||
890 | |||
891 | for_each_irq(virq) { | ||
892 | struct irq_desc *desc; | ||
893 | int xics_status[2]; | ||
894 | int status; | ||
895 | unsigned long flags; | ||
896 | |||
897 | /* We cant set affinity on ISA interrupts */ | ||
898 | if (virq < NUM_ISA_INTERRUPTS) | ||
899 | continue; | ||
900 | if (irq_map[virq].host != xics_host) | ||
901 | continue; | ||
902 | irq = (unsigned int)irq_map[virq].hwirq; | ||
903 | /* We need to get IPIs still. */ | ||
904 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
905 | continue; | ||
906 | desc = irq_to_desc(virq); | ||
907 | |||
908 | /* We only need to migrate enabled IRQS */ | ||
909 | if (desc == NULL || desc->chip == NULL | ||
910 | || desc->action == NULL | ||
911 | || desc->chip->set_affinity == NULL) | ||
912 | continue; | ||
913 | |||
914 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
915 | |||
916 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, irq); | ||
917 | if (status) { | ||
918 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
919 | __func__, irq, status); | ||
920 | goto unlock; | ||
921 | } | ||
922 | |||
923 | /* | ||
924 | * We only support delivery to all cpus or to one cpu. | ||
925 | * The irq has to be migrated only in the single cpu | ||
926 | * case. | ||
927 | */ | ||
928 | if (xics_status[0] != hw_cpu) | ||
929 | goto unlock; | ||
930 | |||
931 | /* This is expected during cpu offline. */ | ||
932 | if (cpu_online(cpu)) | ||
933 | printk(KERN_WARNING "IRQ %u affinity broken off cpu %u\n", | ||
934 | virq, cpu); | ||
935 | |||
936 | /* Reset affinity to all cpus */ | ||
937 | cpumask_setall(irq_to_desc(virq)->affinity); | ||
938 | desc->chip->set_affinity(virq, cpu_all_mask); | ||
939 | unlock: | ||
940 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
941 | } | ||
942 | } | ||
943 | #endif | ||
diff --git a/arch/powerpc/platforms/pseries/xics.h b/arch/powerpc/platforms/pseries/xics.h deleted file mode 100644 index d1d5a83039ae..000000000000 --- a/arch/powerpc/platforms/pseries/xics.h +++ /dev/null | |||
@@ -1,23 +0,0 @@ | |||
1 | /* | ||
2 | * arch/powerpc/platforms/pseries/xics.h | ||
3 | * | ||
4 | * Copyright 2000 IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _POWERPC_KERNEL_XICS_H | ||
13 | #define _POWERPC_KERNEL_XICS_H | ||
14 | |||
15 | extern void xics_init_IRQ(void); | ||
16 | extern void xics_setup_cpu(void); | ||
17 | extern void xics_teardown_cpu(void); | ||
18 | extern void xics_kexec_teardown_cpu(int secondary); | ||
19 | extern void xics_migrate_irqs_away(void); | ||
20 | extern int smp_xics_probe(void); | ||
21 | extern void smp_xics_message_pass(int target, int msg); | ||
22 | |||
23 | #endif /* _POWERPC_KERNEL_XICS_H */ | ||
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig new file mode 100644 index 000000000000..c3c48eb62cc1 --- /dev/null +++ b/arch/powerpc/platforms/wsp/Kconfig | |||
@@ -0,0 +1,28 @@ | |||
1 | config PPC_WSP | ||
2 | bool | ||
3 | default n | ||
4 | |||
5 | menu "WSP platform selection" | ||
6 | depends on PPC_BOOK3E_64 | ||
7 | |||
8 | config PPC_PSR2 | ||
9 | bool "PSR-2 platform" | ||
10 | select PPC_A2 | ||
11 | select GENERIC_TBSYNC | ||
12 | select PPC_SCOM | ||
13 | select EPAPR_BOOT | ||
14 | select PPC_WSP | ||
15 | select PPC_XICS | ||
16 | select PPC_ICP_NATIVE | ||
17 | default y | ||
18 | |||
19 | endmenu | ||
20 | |||
21 | config PPC_A2_DD2 | ||
22 | bool "Support for DD2 based A2/WSP systems" | ||
23 | depends on PPC_A2 | ||
24 | |||
25 | config WORKAROUND_ERRATUM_463 | ||
26 | depends on PPC_A2_DD2 | ||
27 | bool "Workaround erratum 463" | ||
28 | default y | ||
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile new file mode 100644 index 000000000000..095be73d6cd4 --- /dev/null +++ b/arch/powerpc/platforms/wsp/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | ccflags-y += -mno-minimal-toc | ||
2 | |||
3 | obj-y += setup.o ics.o | ||
4 | obj-$(CONFIG_PPC_PSR2) += psr2.o opb_pic.o | ||
5 | obj-$(CONFIG_PPC_WSP) += scom_wsp.o | ||
6 | obj-$(CONFIG_SMP) += smp.o scom_smp.o | ||
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c new file mode 100644 index 000000000000..e53bd9e7b125 --- /dev/null +++ b/arch/powerpc/platforms/wsp/ics.c | |||
@@ -0,0 +1,712 @@ | |||
1 | /* | ||
2 | * Copyright 2008-2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/cpu.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/msi.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/types.h> | ||
21 | |||
22 | #include <asm/io.h> | ||
23 | #include <asm/irq.h> | ||
24 | #include <asm/xics.h> | ||
25 | |||
26 | #include "wsp.h" | ||
27 | #include "ics.h" | ||
28 | |||
29 | |||
30 | /* WSP ICS */ | ||
31 | |||
32 | struct wsp_ics { | ||
33 | struct ics ics; | ||
34 | struct device_node *dn; | ||
35 | void __iomem *regs; | ||
36 | spinlock_t lock; | ||
37 | unsigned long *bitmap; | ||
38 | u32 chip_id; | ||
39 | u32 lsi_base; | ||
40 | u32 lsi_count; | ||
41 | u64 hwirq_start; | ||
42 | u64 count; | ||
43 | #ifdef CONFIG_SMP | ||
44 | int *hwirq_cpu_map; | ||
45 | #endif | ||
46 | }; | ||
47 | |||
48 | #define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics) | ||
49 | |||
50 | #define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00) | ||
51 | #define IODA_TBL_ADDR_REG(base) ((base) + 0x18) | ||
52 | #define IODA_TBL_DATA_REG(base) ((base) + 0x20) | ||
53 | #define XIVE_UPDATE_REG(base) ((base) + 0x28) | ||
54 | #define ICS_INT_CAPS_REG(base) ((base) + 0x30) | ||
55 | |||
56 | #define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15)) | ||
57 | #define TBL_SELECT_XIST (1UL << 48) | ||
58 | #define TBL_SELECT_XIVT (1UL << 49) | ||
59 | |||
60 | #define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */ | ||
61 | |||
62 | #define XIST_REQUIRED 0x8 | ||
63 | #define XIST_REJECTED 0x4 | ||
64 | #define XIST_PRESENTED 0x2 | ||
65 | #define XIST_PENDING 0x1 | ||
66 | |||
67 | #define XIVE_SERVER_SHIFT 42 | ||
68 | #define XIVE_SERVER_MASK 0xFFFFULL | ||
69 | #define XIVE_PRIORITY_MASK 0xFFULL | ||
70 | #define XIVE_PRIORITY_SHIFT 32 | ||
71 | #define XIVE_WRITE_ENABLE (1ULL << 63) | ||
72 | |||
73 | /* | ||
74 | * The docs refer to a 6 bit field called ChipID, which consists of a | ||
75 | * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero | ||
76 | * so we ignore it, and every where we use "chip id" in this code we | ||
77 | * mean the NodeID. | ||
78 | */ | ||
79 | #define WSP_ICS_CHIP_SHIFT 17 | ||
80 | |||
81 | |||
82 | static struct wsp_ics *ics_list; | ||
83 | static int num_ics; | ||
84 | |||
85 | /* ICS Source controller accessors */ | ||
86 | |||
87 | static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | u64 xive; | ||
91 | |||
92 | spin_lock_irqsave(&ics->lock, flags); | ||
93 | out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq)); | ||
94 | xive = in_be64(IODA_TBL_DATA_REG(ics->regs)); | ||
95 | spin_unlock_irqrestore(&ics->lock, flags); | ||
96 | |||
97 | return xive; | ||
98 | } | ||
99 | |||
100 | static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive) | ||
101 | { | ||
102 | xive &= ~XIVE_ADDR_MASK; | ||
103 | xive |= (irq & XIVE_ADDR_MASK); | ||
104 | xive |= XIVE_WRITE_ENABLE; | ||
105 | |||
106 | out_be64(XIVE_UPDATE_REG(ics->regs), xive); | ||
107 | } | ||
108 | |||
109 | static u64 xive_set_server(u64 xive, unsigned int server) | ||
110 | { | ||
111 | u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT); | ||
112 | |||
113 | xive &= mask; | ||
114 | xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT; | ||
115 | |||
116 | return xive; | ||
117 | } | ||
118 | |||
119 | static u64 xive_set_priority(u64 xive, unsigned int priority) | ||
120 | { | ||
121 | u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT); | ||
122 | |||
123 | xive &= mask; | ||
124 | xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT; | ||
125 | |||
126 | return xive; | ||
127 | } | ||
128 | |||
129 | |||
130 | #ifdef CONFIG_SMP | ||
131 | /* Find logical CPUs within mask on a given chip and store result in ret */ | ||
132 | void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret) | ||
133 | { | ||
134 | int cpu, chip; | ||
135 | struct device_node *cpu_dn, *dn; | ||
136 | const u32 *prop; | ||
137 | |||
138 | cpumask_clear(ret); | ||
139 | for_each_cpu(cpu, mask) { | ||
140 | cpu_dn = of_get_cpu_node(cpu, NULL); | ||
141 | if (!cpu_dn) | ||
142 | continue; | ||
143 | |||
144 | prop = of_get_property(cpu_dn, "at-node", NULL); | ||
145 | if (!prop) { | ||
146 | of_node_put(cpu_dn); | ||
147 | continue; | ||
148 | } | ||
149 | |||
150 | dn = of_find_node_by_phandle(*prop); | ||
151 | of_node_put(cpu_dn); | ||
152 | |||
153 | chip = wsp_get_chip_id(dn); | ||
154 | if (chip == chip_id) | ||
155 | cpumask_set_cpu(cpu, ret); | ||
156 | |||
157 | of_node_put(dn); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | /* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */ | ||
162 | static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, | ||
163 | const cpumask_t *affinity) | ||
164 | { | ||
165 | cpumask_var_t avail, newmask; | ||
166 | int ret = -ENOMEM, cpu, cpu_rover = 0, target; | ||
167 | int index = hwirq - ics->hwirq_start; | ||
168 | unsigned int nodeid; | ||
169 | |||
170 | BUG_ON(index < 0 || index >= ics->count); | ||
171 | |||
172 | if (!ics->hwirq_cpu_map) | ||
173 | return -ENOMEM; | ||
174 | |||
175 | if (!distribute_irqs) { | ||
176 | ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server; | ||
177 | return 0; | ||
178 | } | ||
179 | |||
180 | /* Allocate needed CPU masks */ | ||
181 | if (!alloc_cpumask_var(&avail, GFP_KERNEL)) | ||
182 | goto ret; | ||
183 | if (!alloc_cpumask_var(&newmask, GFP_KERNEL)) | ||
184 | goto freeavail; | ||
185 | |||
186 | /* Find PBus attached to the source of this IRQ */ | ||
187 | nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */ | ||
188 | |||
189 | /* Find CPUs that could handle this IRQ */ | ||
190 | if (affinity) | ||
191 | cpumask_and(avail, cpu_online_mask, affinity); | ||
192 | else | ||
193 | cpumask_copy(avail, cpu_online_mask); | ||
194 | |||
195 | /* Narrow selection down to logical CPUs on the same chip */ | ||
196 | cpus_on_chip(nodeid, avail, newmask); | ||
197 | |||
198 | /* Ensure we haven't narrowed it down to 0 */ | ||
199 | if (unlikely(cpumask_empty(newmask))) { | ||
200 | if (unlikely(cpumask_empty(avail))) { | ||
201 | ret = -1; | ||
202 | goto out; | ||
203 | } | ||
204 | cpumask_copy(newmask, avail); | ||
205 | } | ||
206 | |||
207 | /* Choose a CPU out of those we narrowed it down to in round robin */ | ||
208 | target = hwirq % cpumask_weight(newmask); | ||
209 | for_each_cpu(cpu, newmask) { | ||
210 | if (cpu_rover++ >= target) { | ||
211 | ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu); | ||
212 | ret = 0; | ||
213 | goto out; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | /* Shouldn't happen */ | ||
218 | WARN_ON(1); | ||
219 | |||
220 | out: | ||
221 | free_cpumask_var(newmask); | ||
222 | freeavail: | ||
223 | free_cpumask_var(avail); | ||
224 | ret: | ||
225 | if (ret < 0) { | ||
226 | ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask); | ||
227 | pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n", | ||
228 | hwirq, ics->hwirq_cpu_map[index]); | ||
229 | } | ||
230 | return ret; | ||
231 | } | ||
232 | |||
233 | static void alloc_irq_map(struct wsp_ics *ics) | ||
234 | { | ||
235 | int i; | ||
236 | |||
237 | ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL); | ||
238 | if (!ics->hwirq_cpu_map) { | ||
239 | pr_warning("Allocate hwirq_cpu_map failed, " | ||
240 | "IRQ balancing disabled\n"); | ||
241 | return; | ||
242 | } | ||
243 | |||
244 | for (i=0; i < ics->count; i++) | ||
245 | ics->hwirq_cpu_map[i] = xics_default_server; | ||
246 | } | ||
247 | |||
248 | static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) | ||
249 | { | ||
250 | int index = hwirq - ics->hwirq_start; | ||
251 | |||
252 | BUG_ON(index < 0 || index >= ics->count); | ||
253 | |||
254 | if (!ics->hwirq_cpu_map) | ||
255 | return xics_default_server; | ||
256 | |||
257 | return ics->hwirq_cpu_map[index]; | ||
258 | } | ||
259 | #else /* !CONFIG_SMP */ | ||
260 | static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq, | ||
261 | const cpumask_t *affinity) | ||
262 | { | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq) | ||
267 | { | ||
268 | return xics_default_server; | ||
269 | } | ||
270 | |||
271 | static void alloc_irq_map(struct wsp_ics *ics) { } | ||
272 | #endif | ||
273 | |||
274 | static void wsp_chip_unmask_irq(struct irq_data *d) | ||
275 | { | ||
276 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
277 | struct wsp_ics *ics; | ||
278 | int server; | ||
279 | u64 xive; | ||
280 | |||
281 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
282 | return; | ||
283 | |||
284 | ics = d->chip_data; | ||
285 | if (WARN_ON(!ics)) | ||
286 | return; | ||
287 | |||
288 | server = get_irq_server(ics, hw_irq); | ||
289 | |||
290 | xive = wsp_ics_get_xive(ics, hw_irq); | ||
291 | xive = xive_set_server(xive, server); | ||
292 | xive = xive_set_priority(xive, DEFAULT_PRIORITY); | ||
293 | wsp_ics_set_xive(ics, hw_irq, xive); | ||
294 | } | ||
295 | |||
296 | static unsigned int wsp_chip_startup(struct irq_data *d) | ||
297 | { | ||
298 | /* unmask it */ | ||
299 | wsp_chip_unmask_irq(d); | ||
300 | return 0; | ||
301 | } | ||
302 | |||
303 | static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics) | ||
304 | { | ||
305 | u64 xive; | ||
306 | |||
307 | if (hw_irq == XICS_IPI) | ||
308 | return; | ||
309 | |||
310 | if (WARN_ON(!ics)) | ||
311 | return; | ||
312 | xive = wsp_ics_get_xive(ics, hw_irq); | ||
313 | xive = xive_set_server(xive, xics_default_server); | ||
314 | xive = xive_set_priority(xive, LOWEST_PRIORITY); | ||
315 | wsp_ics_set_xive(ics, hw_irq, xive); | ||
316 | } | ||
317 | |||
318 | static void wsp_chip_mask_irq(struct irq_data *d) | ||
319 | { | ||
320 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
321 | struct wsp_ics *ics = d->chip_data; | ||
322 | |||
323 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
324 | return; | ||
325 | |||
326 | wsp_mask_real_irq(hw_irq, ics); | ||
327 | } | ||
328 | |||
329 | static int wsp_chip_set_affinity(struct irq_data *d, | ||
330 | const struct cpumask *cpumask, bool force) | ||
331 | { | ||
332 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
333 | struct wsp_ics *ics; | ||
334 | int ret; | ||
335 | u64 xive; | ||
336 | |||
337 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
338 | return -1; | ||
339 | |||
340 | ics = d->chip_data; | ||
341 | if (WARN_ON(!ics)) | ||
342 | return -1; | ||
343 | xive = wsp_ics_get_xive(ics, hw_irq); | ||
344 | |||
345 | /* | ||
346 | * For the moment only implement delivery to all cpus or one cpu. | ||
347 | * Get current irq_server for the given irq | ||
348 | */ | ||
349 | ret = cache_hwirq_map(ics, d->irq, cpumask); | ||
350 | if (ret == -1) { | ||
351 | char cpulist[128]; | ||
352 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
353 | pr_warning("%s: No online cpus in the mask %s for irq %d\n", | ||
354 | __func__, cpulist, d->irq); | ||
355 | return -1; | ||
356 | } else if (ret == -ENOMEM) { | ||
357 | pr_warning("%s: Out of memory\n", __func__); | ||
358 | return -1; | ||
359 | } | ||
360 | |||
361 | xive = xive_set_server(xive, get_irq_server(ics, hw_irq)); | ||
362 | wsp_ics_set_xive(ics, hw_irq, xive); | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static struct irq_chip wsp_irq_chip = { | ||
368 | .name = "WSP ICS", | ||
369 | .irq_startup = wsp_chip_startup, | ||
370 | .irq_mask = wsp_chip_mask_irq, | ||
371 | .irq_unmask = wsp_chip_unmask_irq, | ||
372 | .irq_set_affinity = wsp_chip_set_affinity | ||
373 | }; | ||
374 | |||
375 | static int wsp_ics_host_match(struct ics *ics, struct device_node *dn) | ||
376 | { | ||
377 | /* All ICSs in the system implement a global irq number space, | ||
378 | * so match against them all. */ | ||
379 | return of_device_is_compatible(dn, "ibm,ppc-xics"); | ||
380 | } | ||
381 | |||
382 | static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq) | ||
383 | { | ||
384 | if (hwirq >= wsp_ics->hwirq_start && | ||
385 | hwirq < wsp_ics->hwirq_start + wsp_ics->count) | ||
386 | return 1; | ||
387 | |||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static int wsp_ics_map(struct ics *ics, unsigned int virq) | ||
392 | { | ||
393 | struct wsp_ics *wsp_ics = to_wsp_ics(ics); | ||
394 | unsigned int hw_irq = virq_to_hw(virq); | ||
395 | unsigned long flags; | ||
396 | |||
397 | if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) | ||
398 | return -ENOENT; | ||
399 | |||
400 | irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq); | ||
401 | |||
402 | irq_set_chip_data(virq, wsp_ics); | ||
403 | |||
404 | spin_lock_irqsave(&wsp_ics->lock, flags); | ||
405 | bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0); | ||
406 | spin_unlock_irqrestore(&wsp_ics->lock, flags); | ||
407 | |||
408 | return 0; | ||
409 | } | ||
410 | |||
411 | static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq) | ||
412 | { | ||
413 | struct wsp_ics *wsp_ics = to_wsp_ics(ics); | ||
414 | |||
415 | if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) | ||
416 | return; | ||
417 | |||
418 | pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq); | ||
419 | wsp_mask_real_irq(hw_irq, wsp_ics); | ||
420 | } | ||
421 | |||
422 | static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq) | ||
423 | { | ||
424 | struct wsp_ics *wsp_ics = to_wsp_ics(ics); | ||
425 | |||
426 | if (!wsp_ics_match_hwirq(wsp_ics, hw_irq)) | ||
427 | return -ENOENT; | ||
428 | |||
429 | return get_irq_server(wsp_ics, hw_irq); | ||
430 | } | ||
431 | |||
432 | /* HW Number allocation API */ | ||
433 | |||
434 | static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn) | ||
435 | { | ||
436 | struct device_node *iparent; | ||
437 | int i; | ||
438 | |||
439 | iparent = of_irq_find_parent(dn); | ||
440 | if (!iparent) { | ||
441 | pr_err("wsp_ics: Failed to find interrupt parent!\n"); | ||
442 | return NULL; | ||
443 | } | ||
444 | |||
445 | for(i = 0; i < num_ics; i++) { | ||
446 | if(ics_list[i].dn == iparent) | ||
447 | break; | ||
448 | } | ||
449 | |||
450 | if (i >= num_ics) { | ||
451 | pr_err("wsp_ics: Unable to find parent bitmap!\n"); | ||
452 | return NULL; | ||
453 | } | ||
454 | |||
455 | return &ics_list[i]; | ||
456 | } | ||
457 | |||
458 | int wsp_ics_alloc_irq(struct device_node *dn, int num) | ||
459 | { | ||
460 | struct wsp_ics *ics; | ||
461 | int order, offset; | ||
462 | |||
463 | ics = wsp_ics_find_dn_ics(dn); | ||
464 | if (!ics) | ||
465 | return -ENODEV; | ||
466 | |||
467 | /* Fast, but overly strict if num isn't a power of two */ | ||
468 | order = get_count_order(num); | ||
469 | |||
470 | spin_lock_irq(&ics->lock); | ||
471 | offset = bitmap_find_free_region(ics->bitmap, ics->count, order); | ||
472 | spin_unlock_irq(&ics->lock); | ||
473 | |||
474 | if (offset < 0) | ||
475 | return offset; | ||
476 | |||
477 | return offset + ics->hwirq_start; | ||
478 | } | ||
479 | |||
480 | void wsp_ics_free_irq(struct device_node *dn, unsigned int irq) | ||
481 | { | ||
482 | struct wsp_ics *ics; | ||
483 | |||
484 | ics = wsp_ics_find_dn_ics(dn); | ||
485 | if (WARN_ON(!ics)) | ||
486 | return; | ||
487 | |||
488 | spin_lock_irq(&ics->lock); | ||
489 | bitmap_release_region(ics->bitmap, irq, 0); | ||
490 | spin_unlock_irq(&ics->lock); | ||
491 | } | ||
492 | |||
493 | /* Initialisation */ | ||
494 | |||
495 | static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics, | ||
496 | struct device_node *dn) | ||
497 | { | ||
498 | int len, i, j, size; | ||
499 | u32 start, count; | ||
500 | const u32 *p; | ||
501 | |||
502 | size = BITS_TO_LONGS(ics->count) * sizeof(long); | ||
503 | ics->bitmap = kzalloc(size, GFP_KERNEL); | ||
504 | if (!ics->bitmap) { | ||
505 | pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n"); | ||
506 | return -ENOMEM; | ||
507 | } | ||
508 | |||
509 | spin_lock_init(&ics->lock); | ||
510 | |||
511 | p = of_get_property(dn, "available-ranges", &len); | ||
512 | if (!p || !len) { | ||
513 | /* FIXME this should be a WARN() once mambo is updated */ | ||
514 | pr_err("wsp_ics: No available-ranges defined for %s\n", | ||
515 | dn->full_name); | ||
516 | return 0; | ||
517 | } | ||
518 | |||
519 | if (len % (2 * sizeof(u32)) != 0) { | ||
520 | /* FIXME this should be a WARN() once mambo is updated */ | ||
521 | pr_err("wsp_ics: Invalid available-ranges for %s\n", | ||
522 | dn->full_name); | ||
523 | return 0; | ||
524 | } | ||
525 | |||
526 | bitmap_fill(ics->bitmap, ics->count); | ||
527 | |||
528 | for (i = 0; i < len / sizeof(u32); i += 2) { | ||
529 | start = of_read_number(p + i, 1); | ||
530 | count = of_read_number(p + i + 1, 1); | ||
531 | |||
532 | pr_devel("%s: start: %d count: %d\n", __func__, start, count); | ||
533 | |||
534 | if ((start + count) > (ics->hwirq_start + ics->count) || | ||
535 | start < ics->hwirq_start) { | ||
536 | pr_err("wsp_ics: Invalid range! -> %d to %d\n", | ||
537 | start, start + count); | ||
538 | break; | ||
539 | } | ||
540 | |||
541 | for (j = 0; j < count; j++) | ||
542 | bitmap_release_region(ics->bitmap, | ||
543 | (start + j) - ics->hwirq_start, 0); | ||
544 | } | ||
545 | |||
546 | /* Ensure LSIs are not available for allocation */ | ||
547 | bitmap_allocate_region(ics->bitmap, ics->lsi_base, | ||
548 | get_count_order(ics->lsi_count)); | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn) | ||
554 | { | ||
555 | u32 lsi_buid, msi_buid, msi_base, msi_count; | ||
556 | void __iomem *regs; | ||
557 | const u32 *p; | ||
558 | int rc, len, i; | ||
559 | u64 caps, buid; | ||
560 | |||
561 | p = of_get_property(dn, "interrupt-ranges", &len); | ||
562 | if (!p || len < (2 * sizeof(u32))) { | ||
563 | pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n", | ||
564 | dn->full_name); | ||
565 | return -ENOENT; | ||
566 | } | ||
567 | |||
568 | if (len > (2 * sizeof(u32))) { | ||
569 | pr_err("wsp_ics: Multiple ics ranges not supported.\n"); | ||
570 | return -EINVAL; | ||
571 | } | ||
572 | |||
573 | regs = of_iomap(dn, 0); | ||
574 | if (!regs) { | ||
575 | pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name); | ||
576 | return -ENXIO; | ||
577 | } | ||
578 | |||
579 | ics->hwirq_start = of_read_number(p, 1); | ||
580 | ics->count = of_read_number(p + 1, 1); | ||
581 | ics->regs = regs; | ||
582 | |||
583 | ics->chip_id = wsp_get_chip_id(dn); | ||
584 | if (WARN_ON(ics->chip_id < 0)) | ||
585 | ics->chip_id = 0; | ||
586 | |||
587 | /* Get some informations about the critter */ | ||
588 | caps = in_be64(ICS_INT_CAPS_REG(ics->regs)); | ||
589 | buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs)); | ||
590 | ics->lsi_count = caps >> 56; | ||
591 | msi_count = (caps >> 44) & 0x7ff; | ||
592 | |||
593 | /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the | ||
594 | * rest is mixed in the interrupt number. We store the whole | ||
595 | * thing though | ||
596 | */ | ||
597 | lsi_buid = (buid >> 48) & 0x1ff; | ||
598 | ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5; | ||
599 | msi_buid = (buid >> 37) & 0x7; | ||
600 | msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11; | ||
601 | |||
602 | pr_info("wsp_ics: Found %s\n", dn->full_name); | ||
603 | pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n", | ||
604 | ics->hwirq_start, ics->hwirq_start + ics->count - 1); | ||
605 | pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n", | ||
606 | ics->lsi_count, ics->lsi_base, | ||
607 | ics->lsi_base + ics->lsi_count - 1); | ||
608 | pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n", | ||
609 | msi_count, msi_base, | ||
610 | msi_base + msi_count - 1); | ||
611 | |||
612 | /* Let's check the HW config is sane */ | ||
613 | if (ics->lsi_base < ics->hwirq_start || | ||
614 | (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count)) | ||
615 | pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n"); | ||
616 | if (msi_base < ics->hwirq_start || | ||
617 | (msi_base + msi_count) > (ics->hwirq_start + ics->count)) | ||
618 | pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n"); | ||
619 | |||
620 | /* We don't check for overlap between LSI and MSI, which will happen | ||
621 | * if we use the same BUID, I'm not sure yet how legit that is. | ||
622 | */ | ||
623 | |||
624 | rc = wsp_ics_bitmap_setup(ics, dn); | ||
625 | if (rc) { | ||
626 | iounmap(regs); | ||
627 | return rc; | ||
628 | } | ||
629 | |||
630 | ics->dn = of_node_get(dn); | ||
631 | alloc_irq_map(ics); | ||
632 | |||
633 | for(i = 0; i < ics->count; i++) | ||
634 | wsp_mask_real_irq(ics->hwirq_start + i, ics); | ||
635 | |||
636 | ics->ics.map = wsp_ics_map; | ||
637 | ics->ics.mask_unknown = wsp_ics_mask_unknown; | ||
638 | ics->ics.get_server = wsp_ics_get_server; | ||
639 | ics->ics.host_match = wsp_ics_host_match; | ||
640 | |||
641 | xics_register_ics(&ics->ics); | ||
642 | |||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | static void __init wsp_ics_set_default_server(void) | ||
647 | { | ||
648 | struct device_node *np; | ||
649 | u32 hwid; | ||
650 | |||
651 | /* Find the server number for the boot cpu. */ | ||
652 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
653 | BUG_ON(!np); | ||
654 | |||
655 | hwid = get_hard_smp_processor_id(boot_cpuid); | ||
656 | |||
657 | pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name); | ||
658 | xics_default_server = hwid; | ||
659 | |||
660 | of_node_put(np); | ||
661 | } | ||
662 | |||
663 | static int __init wsp_ics_init(void) | ||
664 | { | ||
665 | struct device_node *dn; | ||
666 | struct wsp_ics *ics; | ||
667 | int rc, found; | ||
668 | |||
669 | wsp_ics_set_default_server(); | ||
670 | |||
671 | found = 0; | ||
672 | for_each_compatible_node(dn, NULL, "ibm,ppc-xics") | ||
673 | found++; | ||
674 | |||
675 | if (found == 0) { | ||
676 | pr_err("wsp_ics: No ICS's found!\n"); | ||
677 | return -ENODEV; | ||
678 | } | ||
679 | |||
680 | ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL); | ||
681 | if (!ics_list) { | ||
682 | pr_err("wsp_ics: No memory for structs.\n"); | ||
683 | return -ENOMEM; | ||
684 | } | ||
685 | |||
686 | num_ics = 0; | ||
687 | ics = ics_list; | ||
688 | for_each_compatible_node(dn, NULL, "ibm,wsp-xics") { | ||
689 | rc = wsp_ics_setup(ics, dn); | ||
690 | if (rc == 0) { | ||
691 | ics++; | ||
692 | num_ics++; | ||
693 | } | ||
694 | } | ||
695 | |||
696 | if (found != num_ics) { | ||
697 | pr_err("wsp_ics: Failed setting up %d ICS's\n", | ||
698 | found - num_ics); | ||
699 | return -1; | ||
700 | } | ||
701 | |||
702 | return 0; | ||
703 | } | ||
704 | |||
705 | void __init wsp_init_irq(void) | ||
706 | { | ||
707 | wsp_ics_init(); | ||
708 | xics_init(); | ||
709 | |||
710 | /* We need to patch our irq chip's EOI to point to the right ICP */ | ||
711 | wsp_irq_chip.irq_eoi = icp_ops->eoi; | ||
712 | } | ||
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h new file mode 100644 index 000000000000..e34d53102640 --- /dev/null +++ b/arch/powerpc/platforms/wsp/ics.h | |||
@@ -0,0 +1,20 @@ | |||
1 | /* | ||
2 | * Copyright 2009 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #ifndef __ICS_H | ||
11 | #define __ICS_H | ||
12 | |||
13 | #define XIVE_ADDR_MASK 0x7FFULL | ||
14 | |||
15 | extern void wsp_init_irq(void); | ||
16 | |||
17 | extern int wsp_ics_alloc_irq(struct device_node *dn, int num); | ||
18 | extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq); | ||
19 | |||
20 | #endif /* __ICS_H */ | ||
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c new file mode 100644 index 000000000000..be05631a3c1c --- /dev/null +++ b/arch/powerpc/platforms/wsp/opb_pic.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /* | ||
2 | * IBM Onboard Peripheral Bus Interrupt Controller | ||
3 | * | ||
4 | * Copyright 2010 Jack Miller, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify it | ||
7 | * under the terms of the GNU General Public License as published by the | ||
8 | * Free Software Foundation; either version 2 of the License, or (at your | ||
9 | * option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/irq.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/time.h> | ||
18 | |||
19 | #include <asm/reg_a2.h> | ||
20 | #include <asm/irq.h> | ||
21 | |||
22 | #define OPB_NR_IRQS 32 | ||
23 | |||
24 | #define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */ | ||
25 | #define OPB_MLSIR 0x50 /* MLS Interrupt Register */ | ||
26 | #define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */ | ||
27 | #define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */ | ||
28 | #define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */ | ||
29 | |||
30 | static int opb_index = 0; | ||
31 | |||
32 | struct opb_pic { | ||
33 | struct irq_host *host; | ||
34 | void *regs; | ||
35 | int index; | ||
36 | spinlock_t lock; | ||
37 | }; | ||
38 | |||
39 | static u32 opb_in(struct opb_pic *opb, int offset) | ||
40 | { | ||
41 | return in_be32(opb->regs + offset); | ||
42 | } | ||
43 | |||
44 | static void opb_out(struct opb_pic *opb, int offset, u32 val) | ||
45 | { | ||
46 | out_be32(opb->regs + offset, val); | ||
47 | } | ||
48 | |||
49 | static void opb_unmask_irq(struct irq_data *d) | ||
50 | { | ||
51 | struct opb_pic *opb; | ||
52 | unsigned long flags; | ||
53 | u32 ier, bitset; | ||
54 | |||
55 | opb = d->chip_data; | ||
56 | bitset = (1 << (31 - irqd_to_hwirq(d))); | ||
57 | |||
58 | spin_lock_irqsave(&opb->lock, flags); | ||
59 | |||
60 | ier = opb_in(opb, OPB_MLSIER); | ||
61 | opb_out(opb, OPB_MLSIER, ier | bitset); | ||
62 | ier = opb_in(opb, OPB_MLSIER); | ||
63 | |||
64 | spin_unlock_irqrestore(&opb->lock, flags); | ||
65 | } | ||
66 | |||
67 | static void opb_mask_irq(struct irq_data *d) | ||
68 | { | ||
69 | struct opb_pic *opb; | ||
70 | unsigned long flags; | ||
71 | u32 ier, mask; | ||
72 | |||
73 | opb = d->chip_data; | ||
74 | mask = ~(1 << (31 - irqd_to_hwirq(d))); | ||
75 | |||
76 | spin_lock_irqsave(&opb->lock, flags); | ||
77 | |||
78 | ier = opb_in(opb, OPB_MLSIER); | ||
79 | opb_out(opb, OPB_MLSIER, ier & mask); | ||
80 | ier = opb_in(opb, OPB_MLSIER); // Flush posted writes | ||
81 | |||
82 | spin_unlock_irqrestore(&opb->lock, flags); | ||
83 | } | ||
84 | |||
85 | static void opb_ack_irq(struct irq_data *d) | ||
86 | { | ||
87 | struct opb_pic *opb; | ||
88 | unsigned long flags; | ||
89 | u32 bitset; | ||
90 | |||
91 | opb = d->chip_data; | ||
92 | bitset = (1 << (31 - irqd_to_hwirq(d))); | ||
93 | |||
94 | spin_lock_irqsave(&opb->lock, flags); | ||
95 | |||
96 | opb_out(opb, OPB_MLSIR, bitset); | ||
97 | opb_in(opb, OPB_MLSIR); // Flush posted writes | ||
98 | |||
99 | spin_unlock_irqrestore(&opb->lock, flags); | ||
100 | } | ||
101 | |||
102 | static void opb_mask_ack_irq(struct irq_data *d) | ||
103 | { | ||
104 | struct opb_pic *opb; | ||
105 | unsigned long flags; | ||
106 | u32 bitset; | ||
107 | u32 ier, ir; | ||
108 | |||
109 | opb = d->chip_data; | ||
110 | bitset = (1 << (31 - irqd_to_hwirq(d))); | ||
111 | |||
112 | spin_lock_irqsave(&opb->lock, flags); | ||
113 | |||
114 | ier = opb_in(opb, OPB_MLSIER); | ||
115 | opb_out(opb, OPB_MLSIER, ier & ~bitset); | ||
116 | ier = opb_in(opb, OPB_MLSIER); // Flush posted writes | ||
117 | |||
118 | opb_out(opb, OPB_MLSIR, bitset); | ||
119 | ir = opb_in(opb, OPB_MLSIR); // Flush posted writes | ||
120 | |||
121 | spin_unlock_irqrestore(&opb->lock, flags); | ||
122 | } | ||
123 | |||
124 | static int opb_set_irq_type(struct irq_data *d, unsigned int flow) | ||
125 | { | ||
126 | struct opb_pic *opb; | ||
127 | unsigned long flags; | ||
128 | int invert, ipr, mask, bit; | ||
129 | |||
130 | opb = d->chip_data; | ||
131 | |||
132 | /* The only information we're interested in in the type is whether it's | ||
133 | * a high or low trigger. For high triggered interrupts, the polarity | ||
134 | * set for it in the MLS Interrupt Polarity Register is 0, for low | ||
135 | * interrupts it's 1 so that the proper input in the MLS Interrupt Input | ||
136 | * Register is interrupted as asserting the interrupt. */ | ||
137 | |||
138 | switch (flow) { | ||
139 | case IRQ_TYPE_NONE: | ||
140 | opb_mask_irq(d); | ||
141 | return 0; | ||
142 | |||
143 | case IRQ_TYPE_LEVEL_HIGH: | ||
144 | invert = 0; | ||
145 | break; | ||
146 | |||
147 | case IRQ_TYPE_LEVEL_LOW: | ||
148 | invert = 1; | ||
149 | break; | ||
150 | |||
151 | default: | ||
152 | return -EINVAL; | ||
153 | } | ||
154 | |||
155 | bit = (1 << (31 - irqd_to_hwirq(d))); | ||
156 | mask = ~bit; | ||
157 | |||
158 | spin_lock_irqsave(&opb->lock, flags); | ||
159 | |||
160 | ipr = opb_in(opb, OPB_MLSIPR); | ||
161 | ipr = (ipr & mask) | (invert ? bit : 0); | ||
162 | opb_out(opb, OPB_MLSIPR, ipr); | ||
163 | ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes | ||
164 | |||
165 | spin_unlock_irqrestore(&opb->lock, flags); | ||
166 | |||
167 | /* Record the type in the interrupt descriptor */ | ||
168 | irqd_set_trigger_type(d, flow); | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static struct irq_chip opb_irq_chip = { | ||
174 | .name = "OPB", | ||
175 | .irq_mask = opb_mask_irq, | ||
176 | .irq_unmask = opb_unmask_irq, | ||
177 | .irq_mask_ack = opb_mask_ack_irq, | ||
178 | .irq_ack = opb_ack_irq, | ||
179 | .irq_set_type = opb_set_irq_type | ||
180 | }; | ||
181 | |||
182 | static int opb_host_map(struct irq_host *host, unsigned int virq, | ||
183 | irq_hw_number_t hwirq) | ||
184 | { | ||
185 | struct opb_pic *opb; | ||
186 | |||
187 | opb = host->host_data; | ||
188 | |||
189 | /* Most of the important stuff is handled by the generic host code, like | ||
190 | * the lookup, so just attach some info to the virtual irq */ | ||
191 | |||
192 | irq_set_chip_data(virq, opb); | ||
193 | irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq); | ||
194 | irq_set_irq_type(virq, IRQ_TYPE_NONE); | ||
195 | |||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static int opb_host_xlate(struct irq_host *host, struct device_node *dn, | ||
200 | const u32 *intspec, unsigned int intsize, | ||
201 | irq_hw_number_t *out_hwirq, unsigned int *out_type) | ||
202 | { | ||
203 | /* Interrupt size must == 2 */ | ||
204 | BUG_ON(intsize != 2); | ||
205 | *out_hwirq = intspec[0]; | ||
206 | *out_type = intspec[1]; | ||
207 | return 0; | ||
208 | } | ||
209 | |||
210 | static struct irq_host_ops opb_host_ops = { | ||
211 | .map = opb_host_map, | ||
212 | .xlate = opb_host_xlate, | ||
213 | }; | ||
214 | |||
215 | irqreturn_t opb_irq_handler(int irq, void *private) | ||
216 | { | ||
217 | struct opb_pic *opb; | ||
218 | u32 ir, src, subvirq; | ||
219 | |||
220 | opb = (struct opb_pic *) private; | ||
221 | |||
222 | /* Read the OPB MLS Interrupt Register for | ||
223 | * asserted interrupts */ | ||
224 | ir = opb_in(opb, OPB_MLSIR); | ||
225 | if (!ir) | ||
226 | return IRQ_NONE; | ||
227 | |||
228 | do { | ||
229 | /* Get 1 - 32 source, *NOT* bit */ | ||
230 | src = 32 - ffs(ir); | ||
231 | |||
232 | /* Translate from the OPB's conception of interrupt number to | ||
233 | * Linux's virtual IRQ */ | ||
234 | |||
235 | subvirq = irq_linear_revmap(opb->host, src); | ||
236 | |||
237 | generic_handle_irq(subvirq); | ||
238 | } while ((ir = opb_in(opb, OPB_MLSIR))); | ||
239 | |||
240 | return IRQ_HANDLED; | ||
241 | } | ||
242 | |||
243 | struct opb_pic *opb_pic_init_one(struct device_node *dn) | ||
244 | { | ||
245 | struct opb_pic *opb; | ||
246 | struct resource res; | ||
247 | |||
248 | if (of_address_to_resource(dn, 0, &res)) { | ||
249 | printk(KERN_ERR "opb: Couldn't translate resource\n"); | ||
250 | return NULL; | ||
251 | } | ||
252 | |||
253 | opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL); | ||
254 | if (!opb) { | ||
255 | printk(KERN_ERR "opb: Failed to allocate opb struct!\n"); | ||
256 | return NULL; | ||
257 | } | ||
258 | |||
259 | /* Get access to the OPB MMIO registers */ | ||
260 | opb->regs = ioremap(res.start + 0x10000, 0x1000); | ||
261 | if (!opb->regs) { | ||
262 | printk(KERN_ERR "opb: Failed to allocate register space!\n"); | ||
263 | goto free_opb; | ||
264 | } | ||
265 | |||
266 | /* Allocate an irq host so that Linux knows that despite only | ||
267 | * having one interrupt to issue, we're the controller for multiple | ||
268 | * hardware IRQs, so later we can lookup their virtual IRQs. */ | ||
269 | |||
270 | opb->host = irq_alloc_host(dn, IRQ_HOST_MAP_LINEAR, | ||
271 | OPB_NR_IRQS, &opb_host_ops, -1); | ||
272 | |||
273 | if (!opb->host) { | ||
274 | printk(KERN_ERR "opb: Failed to allocate IRQ host!\n"); | ||
275 | goto free_regs; | ||
276 | } | ||
277 | |||
278 | opb->index = opb_index++; | ||
279 | spin_lock_init(&opb->lock); | ||
280 | opb->host->host_data = opb; | ||
281 | |||
282 | /* Disable all interrupts by default */ | ||
283 | opb_out(opb, OPB_MLSASIER, 0); | ||
284 | opb_out(opb, OPB_MLSIER, 0); | ||
285 | |||
286 | /* ACK any interrupts left by FW */ | ||
287 | opb_out(opb, OPB_MLSIR, 0xFFFFFFFF); | ||
288 | |||
289 | return opb; | ||
290 | |||
291 | free_regs: | ||
292 | iounmap(opb->regs); | ||
293 | free_opb: | ||
294 | kfree(opb); | ||
295 | return NULL; | ||
296 | } | ||
297 | |||
298 | void __init opb_pic_init(void) | ||
299 | { | ||
300 | struct device_node *dn; | ||
301 | struct opb_pic *opb; | ||
302 | int virq; | ||
303 | int rc; | ||
304 | |||
305 | /* Call init_one for each OPB device */ | ||
306 | for_each_compatible_node(dn, NULL, "ibm,opb") { | ||
307 | |||
308 | /* Fill in an OPB struct */ | ||
309 | opb = opb_pic_init_one(dn); | ||
310 | if (!opb) { | ||
311 | printk(KERN_WARNING "opb: Failed to init node, skipped!\n"); | ||
312 | continue; | ||
313 | } | ||
314 | |||
315 | /* Map / get opb's hardware virtual irq */ | ||
316 | virq = irq_of_parse_and_map(dn, 0); | ||
317 | if (virq <= 0) { | ||
318 | printk("opb: irq_op_parse_and_map failed!\n"); | ||
319 | continue; | ||
320 | } | ||
321 | |||
322 | /* Attach opb interrupt handler to new virtual IRQ */ | ||
323 | rc = request_irq(virq, opb_irq_handler, 0, "OPB LS Cascade", opb); | ||
324 | if (rc) { | ||
325 | printk("opb: request_irq failed: %d\n", rc); | ||
326 | continue; | ||
327 | } | ||
328 | |||
329 | printk("OPB%d init with %d IRQs at %p\n", opb->index, | ||
330 | OPB_NR_IRQS, opb->regs); | ||
331 | } | ||
332 | } | ||
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c new file mode 100644 index 000000000000..40f28916ff6c --- /dev/null +++ b/arch/powerpc/platforms/wsp/psr2.c | |||
@@ -0,0 +1,95 @@ | |||
1 | /* | ||
2 | * Copyright 2008-2011, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/delay.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/smp.h> | ||
17 | |||
18 | #include <asm/machdep.h> | ||
19 | #include <asm/system.h> | ||
20 | #include <asm/time.h> | ||
21 | #include <asm/udbg.h> | ||
22 | |||
23 | #include "ics.h" | ||
24 | #include "wsp.h" | ||
25 | |||
26 | |||
27 | static void psr2_spin(void) | ||
28 | { | ||
29 | hard_irq_disable(); | ||
30 | for (;;) ; | ||
31 | } | ||
32 | |||
33 | static void psr2_restart(char *cmd) | ||
34 | { | ||
35 | psr2_spin(); | ||
36 | } | ||
37 | |||
38 | static int psr2_probe_devices(void) | ||
39 | { | ||
40 | struct device_node *np; | ||
41 | |||
42 | /* Our RTC is a ds1500. It seems to be programatically compatible | ||
43 | * with the ds1511 for which we have a driver so let's use that | ||
44 | */ | ||
45 | np = of_find_compatible_node(NULL, NULL, "dallas,ds1500"); | ||
46 | if (np != NULL) { | ||
47 | struct resource res; | ||
48 | if (of_address_to_resource(np, 0, &res) == 0) | ||
49 | platform_device_register_simple("ds1511", 0, &res, 1); | ||
50 | } | ||
51 | return 0; | ||
52 | } | ||
53 | machine_arch_initcall(psr2_md, psr2_probe_devices); | ||
54 | |||
55 | static void __init psr2_setup_arch(void) | ||
56 | { | ||
57 | /* init to some ~sane value until calibrate_delay() runs */ | ||
58 | loops_per_jiffy = 50000000; | ||
59 | |||
60 | scom_init_wsp(); | ||
61 | |||
62 | /* Setup SMP callback */ | ||
63 | #ifdef CONFIG_SMP | ||
64 | a2_setup_smp(); | ||
65 | #endif | ||
66 | } | ||
67 | |||
68 | static int __init psr2_probe(void) | ||
69 | { | ||
70 | unsigned long root = of_get_flat_dt_root(); | ||
71 | |||
72 | if (!of_flat_dt_is_compatible(root, "ibm,psr2")) | ||
73 | return 0; | ||
74 | |||
75 | return 1; | ||
76 | } | ||
77 | |||
78 | static void __init psr2_init_irq(void) | ||
79 | { | ||
80 | wsp_init_irq(); | ||
81 | opb_pic_init(); | ||
82 | } | ||
83 | |||
84 | define_machine(psr2_md) { | ||
85 | .name = "PSR2 A2", | ||
86 | .probe = psr2_probe, | ||
87 | .setup_arch = psr2_setup_arch, | ||
88 | .restart = psr2_restart, | ||
89 | .power_off = psr2_spin, | ||
90 | .halt = psr2_spin, | ||
91 | .calibrate_decr = generic_calibrate_decr, | ||
92 | .init_IRQ = psr2_init_irq, | ||
93 | .progress = udbg_progress, | ||
94 | .power_save = book3e_idle, | ||
95 | }; | ||
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c new file mode 100644 index 000000000000..141e78032097 --- /dev/null +++ b/arch/powerpc/platforms/wsp/scom_smp.c | |||
@@ -0,0 +1,427 @@ | |||
1 | /* | ||
2 | * SCOM support for A2 platforms | ||
3 | * | ||
4 | * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson, | ||
5 | * Michael Ellerman, IBM Corp. | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * as published by the Free Software Foundation; either version | ||
10 | * 2 of the License, or (at your option) any later version. | ||
11 | */ | ||
12 | |||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/io.h> | ||
15 | #include <linux/of.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/types.h> | ||
18 | |||
19 | #include <asm/cputhreads.h> | ||
20 | #include <asm/reg_a2.h> | ||
21 | #include <asm/scom.h> | ||
22 | #include <asm/udbg.h> | ||
23 | |||
24 | #include "wsp.h" | ||
25 | |||
26 | #define SCOM_RAMC 0x2a /* Ram Command */ | ||
27 | #define SCOM_RAMC_TGT1_EXT 0x80000000 | ||
28 | #define SCOM_RAMC_SRC1_EXT 0x40000000 | ||
29 | #define SCOM_RAMC_SRC2_EXT 0x20000000 | ||
30 | #define SCOM_RAMC_SRC3_EXT 0x10000000 | ||
31 | #define SCOM_RAMC_ENABLE 0x00080000 | ||
32 | #define SCOM_RAMC_THREADSEL 0x00060000 | ||
33 | #define SCOM_RAMC_EXECUTE 0x00010000 | ||
34 | #define SCOM_RAMC_MSR_OVERRIDE 0x00008000 | ||
35 | #define SCOM_RAMC_MSR_PR 0x00004000 | ||
36 | #define SCOM_RAMC_MSR_GS 0x00002000 | ||
37 | #define SCOM_RAMC_FORCE 0x00001000 | ||
38 | #define SCOM_RAMC_FLUSH 0x00000800 | ||
39 | #define SCOM_RAMC_INTERRUPT 0x00000004 | ||
40 | #define SCOM_RAMC_ERROR 0x00000002 | ||
41 | #define SCOM_RAMC_DONE 0x00000001 | ||
42 | #define SCOM_RAMI 0x29 /* Ram Instruction */ | ||
43 | #define SCOM_RAMIC 0x28 /* Ram Instruction and Command */ | ||
44 | #define SCOM_RAMIC_INSN 0xffffffff00000000 | ||
45 | #define SCOM_RAMD 0x2d /* Ram Data */ | ||
46 | #define SCOM_RAMDH 0x2e /* Ram Data High */ | ||
47 | #define SCOM_RAMDL 0x2f /* Ram Data Low */ | ||
48 | #define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */ | ||
49 | #define SCOM_PCCR0_ENABLE_DEBUG 0x80000000 | ||
50 | #define SCOM_PCCR0_ENABLE_RAM 0x40000000 | ||
51 | #define SCOM_THRCTL 0x30 /* Thread Control and Status */ | ||
52 | #define SCOM_THRCTL_T0_STOP 0x80000000 | ||
53 | #define SCOM_THRCTL_T1_STOP 0x40000000 | ||
54 | #define SCOM_THRCTL_T2_STOP 0x20000000 | ||
55 | #define SCOM_THRCTL_T3_STOP 0x10000000 | ||
56 | #define SCOM_THRCTL_T0_STEP 0x08000000 | ||
57 | #define SCOM_THRCTL_T1_STEP 0x04000000 | ||
58 | #define SCOM_THRCTL_T2_STEP 0x02000000 | ||
59 | #define SCOM_THRCTL_T3_STEP 0x01000000 | ||
60 | #define SCOM_THRCTL_T0_RUN 0x00800000 | ||
61 | #define SCOM_THRCTL_T1_RUN 0x00400000 | ||
62 | #define SCOM_THRCTL_T2_RUN 0x00200000 | ||
63 | #define SCOM_THRCTL_T3_RUN 0x00100000 | ||
64 | #define SCOM_THRCTL_T0_PM 0x00080000 | ||
65 | #define SCOM_THRCTL_T1_PM 0x00040000 | ||
66 | #define SCOM_THRCTL_T2_PM 0x00020000 | ||
67 | #define SCOM_THRCTL_T3_PM 0x00010000 | ||
68 | #define SCOM_THRCTL_T0_UDE 0x00008000 | ||
69 | #define SCOM_THRCTL_T1_UDE 0x00004000 | ||
70 | #define SCOM_THRCTL_T2_UDE 0x00002000 | ||
71 | #define SCOM_THRCTL_T3_UDE 0x00001000 | ||
72 | #define SCOM_THRCTL_ASYNC_DIS 0x00000800 | ||
73 | #define SCOM_THRCTL_TB_DIS 0x00000400 | ||
74 | #define SCOM_THRCTL_DEC_DIS 0x00000200 | ||
75 | #define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */ | ||
76 | #define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */ | ||
77 | |||
78 | |||
79 | static DEFINE_PER_CPU(scom_map_t, scom_ptrs); | ||
80 | |||
81 | static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread) | ||
82 | { | ||
83 | scom_map_t scom = per_cpu(scom_ptrs, cpu); | ||
84 | int tcpu; | ||
85 | |||
86 | if (scom_map_ok(scom)) { | ||
87 | *first_thread = 0; | ||
88 | return scom; | ||
89 | } | ||
90 | |||
91 | *first_thread = 1; | ||
92 | |||
93 | scom = scom_map_device(np, 0); | ||
94 | |||
95 | for (tcpu = cpu_first_thread_sibling(cpu); | ||
96 | tcpu <= cpu_last_thread_sibling(cpu); tcpu++) | ||
97 | per_cpu(scom_ptrs, tcpu) = scom; | ||
98 | |||
99 | /* Hack: for the boot core, this will actually get called on | ||
100 | * the second thread up, not the first so our test above will | ||
101 | * set first_thread incorrectly. */ | ||
102 | if (cpu_first_thread_sibling(cpu) == 0) | ||
103 | *first_thread = 0; | ||
104 | |||
105 | return scom; | ||
106 | } | ||
107 | |||
108 | static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask) | ||
109 | { | ||
110 | u64 cmd, mask, val; | ||
111 | int n = 0; | ||
112 | |||
113 | cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28) | ||
114 | | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE; | ||
115 | mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR; | ||
116 | |||
117 | scom_write(scom, SCOM_RAMIC, cmd); | ||
118 | |||
119 | while (!((val = scom_read(scom, SCOM_RAMC)) & mask)) { | ||
120 | pr_devel("Waiting on RAMC = 0x%llx\n", val); | ||
121 | if (++n == 3) { | ||
122 | pr_err("RAMC timeout on instruction 0x%08x, thread %d\n", | ||
123 | insn, thread); | ||
124 | return -1; | ||
125 | } | ||
126 | } | ||
127 | |||
128 | if (val & SCOM_RAMC_INTERRUPT) { | ||
129 | pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n", | ||
130 | insn, thread); | ||
131 | return -SCOM_RAMC_INTERRUPT; | ||
132 | } | ||
133 | |||
134 | if (val & SCOM_RAMC_ERROR) { | ||
135 | pr_err("RAMC error on instruction 0x%08x, thread %d\n", | ||
136 | insn, thread); | ||
137 | return -SCOM_RAMC_ERROR; | ||
138 | } | ||
139 | |||
140 | return 0; | ||
141 | } | ||
142 | |||
143 | static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt, | ||
144 | u64 *out_gpr) | ||
145 | { | ||
146 | int rc; | ||
147 | |||
148 | /* or rN, rN, rN */ | ||
149 | u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11); | ||
150 | rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0); | ||
151 | if (rc) | ||
152 | return rc; | ||
153 | |||
154 | *out_gpr = scom_read(scom, SCOM_RAMD); | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr) | ||
160 | { | ||
161 | int rc, sprhi, sprlo; | ||
162 | u32 insn; | ||
163 | |||
164 | sprhi = spr >> 5; | ||
165 | sprlo = spr & 0x1f; | ||
166 | insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */ | ||
167 | |||
168 | if (spr == 0x0ff0) | ||
169 | insn = 0x7c2000a6; /* mfmsr r1 */ | ||
170 | |||
171 | rc = a2_scom_ram(scom, thread, insn, 0xf); | ||
172 | if (rc) | ||
173 | return rc; | ||
174 | return a2_scom_getgpr(scom, thread, 1, 1, out_spr); | ||
175 | } | ||
176 | |||
177 | static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr, | ||
178 | int alt, u64 val) | ||
179 | { | ||
180 | u32 lis = 0x3c000000 | (gpr << 21); | ||
181 | u32 li = 0x38000000 | (gpr << 21); | ||
182 | u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16); | ||
183 | u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16); | ||
184 | u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16); | ||
185 | u32 highest = val >> 48; | ||
186 | u32 higher = (val >> 32) & 0xffff; | ||
187 | u32 high = (val >> 16) & 0xffff; | ||
188 | u32 low = val & 0xffff; | ||
189 | int lext = alt ? 0x8 : 0x0; | ||
190 | int oext = alt ? 0xf : 0x0; | ||
191 | int rc = 0; | ||
192 | |||
193 | if (highest) | ||
194 | rc |= a2_scom_ram(scom, thread, lis | highest, lext); | ||
195 | |||
196 | if (higher) { | ||
197 | if (highest) | ||
198 | rc |= a2_scom_ram(scom, thread, oris | higher, oext); | ||
199 | else | ||
200 | rc |= a2_scom_ram(scom, thread, li | higher, lext); | ||
201 | } | ||
202 | |||
203 | if (highest || higher) | ||
204 | rc |= a2_scom_ram(scom, thread, rldicr32, oext); | ||
205 | |||
206 | if (high) { | ||
207 | if (highest || higher) | ||
208 | rc |= a2_scom_ram(scom, thread, oris | high, oext); | ||
209 | else | ||
210 | rc |= a2_scom_ram(scom, thread, lis | high, lext); | ||
211 | } | ||
212 | |||
213 | if (highest || higher || high) | ||
214 | rc |= a2_scom_ram(scom, thread, ori | low, oext); | ||
215 | else | ||
216 | rc |= a2_scom_ram(scom, thread, li | low, lext); | ||
217 | |||
218 | return rc; | ||
219 | } | ||
220 | |||
221 | static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val) | ||
222 | { | ||
223 | int sprhi = spr >> 5; | ||
224 | int sprlo = spr & 0x1f; | ||
225 | /* mtspr spr, r1 */ | ||
226 | u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11); | ||
227 | |||
228 | if (spr == 0x0ff0) | ||
229 | insn = 0x7c200124; /* mtmsr r1 */ | ||
230 | |||
231 | if (a2_scom_setgpr(scom, thread, 1, 1, val)) | ||
232 | return -1; | ||
233 | |||
234 | return a2_scom_ram(scom, thread, insn, 0xf); | ||
235 | } | ||
236 | |||
237 | static int a2_scom_initial_tlb(scom_map_t scom, int thread) | ||
238 | { | ||
239 | extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[]; | ||
240 | extern u32 a2_tlbinit_after_iprot_flush[]; | ||
241 | extern u32 a2_tlbinit_after_linear_map[]; | ||
242 | u32 assoc, entries, i; | ||
243 | u64 epn, tlbcfg; | ||
244 | u32 *p; | ||
245 | int rc; | ||
246 | |||
247 | /* Invalidate all entries (including iprot) */ | ||
248 | |||
249 | rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg); | ||
250 | if (rc) | ||
251 | goto scom_fail; | ||
252 | entries = tlbcfg & TLBnCFG_N_ENTRY; | ||
253 | assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24; | ||
254 | epn = 0; | ||
255 | |||
256 | /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */ | ||
257 | a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531); | ||
258 | /* Set MMUCR3 to write all thids bit to the TLB */ | ||
259 | a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f); | ||
260 | |||
261 | /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */ | ||
262 | a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB)); | ||
263 | a2_scom_setspr(scom, thread, SPRN_MAS2, epn); | ||
264 | for (i = 0; i < entries; i++) { | ||
265 | |||
266 | a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc)); | ||
267 | |||
268 | /* tlbwe */ | ||
269 | rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0); | ||
270 | if (rc) | ||
271 | goto scom_fail; | ||
272 | |||
273 | /* Next entry is new address? */ | ||
274 | if((i + 1) % assoc == 0) { | ||
275 | epn += (1 << 30); | ||
276 | a2_scom_setspr(scom, thread, SPRN_MAS2, epn); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | /* Setup args for linear mapping */ | ||
281 | rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0)); | ||
282 | if (rc) | ||
283 | goto scom_fail; | ||
284 | |||
285 | /* Linear mapping */ | ||
286 | for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) { | ||
287 | rc = a2_scom_ram(scom, thread, *p, 0); | ||
288 | if (rc) | ||
289 | goto scom_fail; | ||
290 | } | ||
291 | |||
292 | /* | ||
293 | * For the boot thread, between the linear mapping and the debug | ||
294 | * mappings there is a loop to flush iprot mappings. Ramming doesn't do | ||
295 | * branches, but the secondary threads don't need to be nearly as smart | ||
296 | * (i.e. we don't need to worry about invalidating the mapping we're | ||
297 | * standing on). | ||
298 | */ | ||
299 | |||
300 | /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */ | ||
301 | for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) { | ||
302 | rc = a2_scom_ram(scom, thread, *p, 0); | ||
303 | if (rc) | ||
304 | goto scom_fail; | ||
305 | } | ||
306 | |||
307 | scom_fail: | ||
308 | if (rc) | ||
309 | pr_err("Setting up initial TLB failed, err %d\n", rc); | ||
310 | |||
311 | if (rc == -SCOM_RAMC_INTERRUPT) { | ||
312 | /* Interrupt, dump some status */ | ||
313 | int rc[10]; | ||
314 | u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2; | ||
315 | rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar); | ||
316 | rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0); | ||
317 | rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1); | ||
318 | rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr); | ||
319 | rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0); | ||
320 | rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1); | ||
321 | rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2); | ||
322 | rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3); | ||
323 | rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8); | ||
324 | rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2); | ||
325 | pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]); | ||
326 | pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]); | ||
327 | pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]); | ||
328 | pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]); | ||
329 | pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]); | ||
330 | pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]); | ||
331 | pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]); | ||
332 | pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]); | ||
333 | pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]); | ||
334 | pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]); | ||
335 | } | ||
336 | |||
337 | return rc; | ||
338 | } | ||
339 | |||
340 | int __devinit a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, | ||
341 | struct device_node *np) | ||
342 | { | ||
343 | u64 init_iar, init_msr, init_ccr2; | ||
344 | unsigned long start_here; | ||
345 | int rc, core_setup; | ||
346 | scom_map_t scom; | ||
347 | u64 pccr0; | ||
348 | |||
349 | scom = get_scom(lcpu, np, &core_setup); | ||
350 | if (!scom) { | ||
351 | printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu); | ||
352 | return -1; | ||
353 | } | ||
354 | |||
355 | pr_devel("Bringing up CPU%d using SCOM...\n", lcpu); | ||
356 | |||
357 | pccr0 = scom_read(scom, SCOM_PCCR0); | ||
358 | scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG | | ||
359 | SCOM_PCCR0_ENABLE_RAM); | ||
360 | |||
361 | /* Stop the thead with THRCTL. If we are setting up the TLB we stop all | ||
362 | * threads. We also disable asynchronous interrupts while RAMing. | ||
363 | */ | ||
364 | if (core_setup) | ||
365 | scom_write(scom, SCOM_THRCTL_OR, | ||
366 | SCOM_THRCTL_T0_STOP | | ||
367 | SCOM_THRCTL_T1_STOP | | ||
368 | SCOM_THRCTL_T2_STOP | | ||
369 | SCOM_THRCTL_T3_STOP | | ||
370 | SCOM_THRCTL_ASYNC_DIS); | ||
371 | else | ||
372 | scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx); | ||
373 | |||
374 | /* Flush its pipeline just in case */ | ||
375 | scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) | | ||
376 | SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE); | ||
377 | |||
378 | a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar); | ||
379 | a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr); | ||
380 | a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2); | ||
381 | |||
382 | /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */ | ||
383 | rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM); | ||
384 | if (rc) { | ||
385 | pr_err("Failed to set MSR ! err %d\n", rc); | ||
386 | return rc; | ||
387 | } | ||
388 | |||
389 | /* RAM in an sync/isync for the sake of it */ | ||
390 | a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0); | ||
391 | a2_scom_ram(scom, thr_idx, 0x4c00012c, 0); | ||
392 | |||
393 | if (core_setup) { | ||
394 | pr_devel("CPU%d is first thread in core, initializing TLB...\n", | ||
395 | lcpu); | ||
396 | rc = a2_scom_initial_tlb(scom, thr_idx); | ||
397 | if (rc) | ||
398 | goto fail; | ||
399 | } | ||
400 | |||
401 | start_here = *(unsigned long *)(core_setup ? generic_secondary_smp_init | ||
402 | : generic_secondary_thread_init); | ||
403 | pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here); | ||
404 | |||
405 | rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here); | ||
406 | rc |= a2_scom_setgpr(scom, thr_idx, 3, 0, | ||
407 | get_hard_smp_processor_id(lcpu)); | ||
408 | /* | ||
409 | * Tell book3e_secondary_core_init not to set up the TLB, we've | ||
410 | * already done that. | ||
411 | */ | ||
412 | rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1); | ||
413 | |||
414 | rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx); | ||
415 | |||
416 | scom_write(scom, SCOM_RAMC, 0); | ||
417 | scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx)); | ||
418 | scom_write(scom, SCOM_PCCR0, pccr0); | ||
419 | fail: | ||
420 | pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded"); | ||
421 | if (rc) { | ||
422 | pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n", | ||
423 | init_iar, init_msr, init_ccr2); | ||
424 | } | ||
425 | |||
426 | return rc; | ||
427 | } | ||
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c new file mode 100644 index 000000000000..4052e2259f30 --- /dev/null +++ b/arch/powerpc/platforms/wsp/scom_wsp.c | |||
@@ -0,0 +1,77 @@ | |||
1 | /* | ||
2 | * SCOM backend for WSP | ||
3 | * | ||
4 | * Copyright 2010 Benjamin Herrenschmidt, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/cpumask.h> | ||
13 | #include <linux/io.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/types.h> | ||
17 | |||
18 | #include <asm/cputhreads.h> | ||
19 | #include <asm/reg_a2.h> | ||
20 | #include <asm/scom.h> | ||
21 | #include <asm/udbg.h> | ||
22 | |||
23 | #include "wsp.h" | ||
24 | |||
25 | |||
26 | static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count) | ||
27 | { | ||
28 | struct resource r; | ||
29 | u64 xscom_addr; | ||
30 | |||
31 | if (!of_get_property(dev, "scom-controller", NULL)) { | ||
32 | pr_err("%s: device %s is not a SCOM controller\n", | ||
33 | __func__, dev->full_name); | ||
34 | return SCOM_MAP_INVALID; | ||
35 | } | ||
36 | |||
37 | if (of_address_to_resource(dev, 0, &r)) { | ||
38 | pr_debug("Failed to find SCOM controller address\n"); | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | /* Transform the SCOM address into an XSCOM offset */ | ||
43 | xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3); | ||
44 | |||
45 | return (scom_map_t)ioremap(r.start + xscom_addr, count << 3); | ||
46 | } | ||
47 | |||
48 | static void wsp_scom_unmap(scom_map_t map) | ||
49 | { | ||
50 | iounmap((void *)map); | ||
51 | } | ||
52 | |||
53 | static u64 wsp_scom_read(scom_map_t map, u32 reg) | ||
54 | { | ||
55 | u64 __iomem *addr = (u64 __iomem *)map; | ||
56 | |||
57 | return in_be64(addr + reg); | ||
58 | } | ||
59 | |||
60 | static void wsp_scom_write(scom_map_t map, u32 reg, u64 value) | ||
61 | { | ||
62 | u64 __iomem *addr = (u64 __iomem *)map; | ||
63 | |||
64 | return out_be64(addr + reg, value); | ||
65 | } | ||
66 | |||
67 | static const struct scom_controller wsp_scom_controller = { | ||
68 | .map = wsp_scom_map, | ||
69 | .unmap = wsp_scom_unmap, | ||
70 | .read = wsp_scom_read, | ||
71 | .write = wsp_scom_write | ||
72 | }; | ||
73 | |||
74 | void scom_init_wsp(void) | ||
75 | { | ||
76 | scom_init(&wsp_scom_controller); | ||
77 | } | ||
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c new file mode 100644 index 000000000000..11ac2f05e01c --- /dev/null +++ b/arch/powerpc/platforms/wsp/setup.c | |||
@@ -0,0 +1,36 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Michael Ellerman, IBM Corporation | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/of_platform.h> | ||
12 | |||
13 | #include "wsp.h" | ||
14 | |||
15 | /* | ||
16 | * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property. | ||
17 | * Won't work for nodes that are not a descendant of a wsp node. | ||
18 | */ | ||
19 | int wsp_get_chip_id(struct device_node *dn) | ||
20 | { | ||
21 | const u32 *p; | ||
22 | int rc; | ||
23 | |||
24 | /* Start looking at the specified node, not its parent */ | ||
25 | dn = of_node_get(dn); | ||
26 | while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL))) | ||
27 | dn = of_get_next_parent(dn); | ||
28 | |||
29 | if (!dn) | ||
30 | return -1; | ||
31 | |||
32 | rc = *p; | ||
33 | of_node_put(dn); | ||
34 | |||
35 | return rc; | ||
36 | } | ||
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c new file mode 100644 index 000000000000..9d20fa9d3710 --- /dev/null +++ b/arch/powerpc/platforms/wsp/smp.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * SMP Support for A2 platforms | ||
3 | * | ||
4 | * Copyright 2007 Benjamin Herrenschmidt, IBM Corp. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/of.h> | ||
17 | #include <linux/smp.h> | ||
18 | |||
19 | #include <asm/dbell.h> | ||
20 | #include <asm/machdep.h> | ||
21 | #include <asm/xics.h> | ||
22 | |||
23 | #include "ics.h" | ||
24 | #include "wsp.h" | ||
25 | |||
26 | static void __devinit smp_a2_setup_cpu(int cpu) | ||
27 | { | ||
28 | doorbell_setup_this_cpu(); | ||
29 | |||
30 | if (cpu != boot_cpuid) | ||
31 | xics_setup_cpu(); | ||
32 | } | ||
33 | |||
34 | int __devinit smp_a2_kick_cpu(int nr) | ||
35 | { | ||
36 | const char *enable_method; | ||
37 | struct device_node *np; | ||
38 | int thr_idx; | ||
39 | |||
40 | if (nr < 0 || nr >= NR_CPUS) | ||
41 | return -ENOENT; | ||
42 | |||
43 | np = of_get_cpu_node(nr, &thr_idx); | ||
44 | if (!np) | ||
45 | return -ENODEV; | ||
46 | |||
47 | enable_method = of_get_property(np, "enable-method", NULL); | ||
48 | pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method); | ||
49 | |||
50 | if (!enable_method) { | ||
51 | printk(KERN_ERR "CPU%d has no enable-method\n", nr); | ||
52 | return -ENOENT; | ||
53 | } else if (strcmp(enable_method, "ibm,a2-scom") == 0) { | ||
54 | if (a2_scom_startup_cpu(nr, thr_idx, np)) | ||
55 | return -1; | ||
56 | } else { | ||
57 | printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n", | ||
58 | nr, enable_method); | ||
59 | return -EINVAL; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * The processor is currently spinning, waiting for the | ||
64 | * cpu_start field to become non-zero After we set cpu_start, | ||
65 | * the processor will continue on to secondary_start | ||
66 | */ | ||
67 | paca[nr].cpu_start = 1; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int __init smp_a2_probe(void) | ||
73 | { | ||
74 | return cpus_weight(cpu_possible_map); | ||
75 | } | ||
76 | |||
77 | static struct smp_ops_t a2_smp_ops = { | ||
78 | .message_pass = smp_muxed_ipi_message_pass, | ||
79 | .cause_ipi = doorbell_cause_ipi, | ||
80 | .probe = smp_a2_probe, | ||
81 | .kick_cpu = smp_a2_kick_cpu, | ||
82 | .setup_cpu = smp_a2_setup_cpu, | ||
83 | }; | ||
84 | |||
85 | void __init a2_setup_smp(void) | ||
86 | { | ||
87 | smp_ops = &a2_smp_ops; | ||
88 | } | ||
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h new file mode 100644 index 000000000000..7c3e087fd2f2 --- /dev/null +++ b/arch/powerpc/platforms/wsp/wsp.h | |||
@@ -0,0 +1,17 @@ | |||
1 | #ifndef __WSP_H | ||
2 | #define __WSP_H | ||
3 | |||
4 | #include <asm/wsp.h> | ||
5 | |||
6 | extern void wsp_setup_pci(void); | ||
7 | extern void scom_init_wsp(void); | ||
8 | |||
9 | extern void a2_setup_smp(void); | ||
10 | extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, | ||
11 | struct device_node *np); | ||
12 | int smp_a2_cpu_bootable(unsigned int nr); | ||
13 | int __devinit smp_a2_kick_cpu(int nr); | ||
14 | |||
15 | void opb_pic_init(void); | ||
16 | |||
17 | #endif /* __WSP_H */ | ||
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index 396582835cb5..7b4df37ac381 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig | |||
@@ -7,8 +7,25 @@ config PPC4xx_PCI_EXPRESS | |||
7 | depends on PCI && 4xx | 7 | depends on PCI && 4xx |
8 | default n | 8 | default n |
9 | 9 | ||
10 | config PPC4xx_MSI | ||
11 | bool | ||
12 | depends on PCI_MSI | ||
13 | depends on PCI && 4xx | ||
14 | default n | ||
15 | |||
10 | config PPC_MSI_BITMAP | 16 | config PPC_MSI_BITMAP |
11 | bool | 17 | bool |
12 | depends on PCI_MSI | 18 | depends on PCI_MSI |
13 | default y if MPIC | 19 | default y if MPIC |
14 | default y if FSL_PCI | 20 | default y if FSL_PCI |
21 | default y if PPC4xx_MSI | ||
22 | |||
23 | source "arch/powerpc/sysdev/xics/Kconfig" | ||
24 | |||
25 | config PPC_SCOM | ||
26 | bool | ||
27 | |||
28 | config SCOM_DEBUGFS | ||
29 | bool "Expose SCOM controllers via debugfs" | ||
30 | depends on PPC_SCOM | ||
31 | default n | ||
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 5642924fb9fb..0efa990e3344 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -1,8 +1,6 @@ | |||
1 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | 1 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror |
2 | 2 | ||
3 | ifeq ($(CONFIG_PPC64),y) | 3 | ccflags-$(CONFIG_PPC64) := -mno-minimal-toc |
4 | EXTRA_CFLAGS += -mno-minimal-toc | ||
5 | endif | ||
6 | 4 | ||
7 | mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o | 5 | mpic-msi-obj-$(CONFIG_PCI_MSI) += mpic_msi.o mpic_u3msi.o mpic_pasemi_msi.o |
8 | obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) | 6 | obj-$(CONFIG_MPIC) += mpic.o $(mpic-msi-obj-y) |
@@ -20,8 +18,9 @@ obj-$(CONFIG_FSL_PMC) += fsl_pmc.o | |||
20 | obj-$(CONFIG_FSL_LBC) += fsl_lbc.o | 18 | obj-$(CONFIG_FSL_LBC) += fsl_lbc.o |
21 | obj-$(CONFIG_FSL_GTM) += fsl_gtm.o | 19 | obj-$(CONFIG_FSL_GTM) += fsl_gtm.o |
22 | obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o | 20 | obj-$(CONFIG_MPC8xxx_GPIO) += mpc8xxx_gpio.o |
21 | obj-$(CONFIG_FSL_85XX_CACHE_SRAM) += fsl_85xx_l2ctlr.o fsl_85xx_cache_sram.o | ||
23 | obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o | 22 | obj-$(CONFIG_SIMPLE_GPIO) += simple_gpio.o |
24 | obj-$(CONFIG_RAPIDIO) += fsl_rio.o | 23 | obj-$(CONFIG_FSL_RIO) += fsl_rio.o |
25 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o | 24 | obj-$(CONFIG_TSI108_BRIDGE) += tsi108_pci.o tsi108_dev.o |
26 | obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ | 25 | obj-$(CONFIG_QUICC_ENGINE) += qe_lib/ |
27 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ | 26 | obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ |
@@ -42,6 +41,8 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o | |||
42 | ifeq ($(CONFIG_PCI),y) | 41 | ifeq ($(CONFIG_PCI),y) |
43 | obj-$(CONFIG_4xx) += ppc4xx_pci.o | 42 | obj-$(CONFIG_4xx) += ppc4xx_pci.o |
44 | endif | 43 | endif |
44 | obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o | ||
45 | obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o | ||
45 | obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o | 46 | obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o |
46 | 47 | ||
47 | obj-$(CONFIG_CPM) += cpm_common.o | 48 | obj-$(CONFIG_CPM) += cpm_common.o |
@@ -57,3 +58,9 @@ obj-$(CONFIG_PPC_MPC52xx) += mpc5xxx_clocks.o | |||
57 | ifeq ($(CONFIG_SUSPEND),y) | 58 | ifeq ($(CONFIG_SUSPEND),y) |
58 | obj-$(CONFIG_6xx) += 6xx-suspend.o | 59 | obj-$(CONFIG_6xx) += 6xx-suspend.o |
59 | endif | 60 | endif |
61 | |||
62 | obj-$(CONFIG_PPC_SCOM) += scom.o | ||
63 | |||
64 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | ||
65 | |||
66 | obj-$(CONFIG_PPC_XICS) += xics/ | ||
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c index 2659a60bd7b8..bd0d54060b94 100644 --- a/arch/powerpc/sysdev/axonram.c +++ b/arch/powerpc/sysdev/axonram.c | |||
@@ -95,7 +95,7 @@ axon_ram_irq_handler(int irq, void *dev) | |||
95 | 95 | ||
96 | BUG_ON(!bank); | 96 | BUG_ON(!bank); |
97 | 97 | ||
98 | dev_err(&device->dev, "Correctable memory error occured\n"); | 98 | dev_err(&device->dev, "Correctable memory error occurred\n"); |
99 | bank->ecc_counter++; | 99 | bank->ecc_counter++; |
100 | return IRQ_HANDLED; | 100 | return IRQ_HANDLED; |
101 | } | 101 | } |
@@ -172,10 +172,9 @@ static const struct block_device_operations axon_ram_devops = { | |||
172 | 172 | ||
173 | /** | 173 | /** |
174 | * axon_ram_probe - probe() method for platform driver | 174 | * axon_ram_probe - probe() method for platform driver |
175 | * @device, @device_id: see of_platform_driver method | 175 | * @device: see platform_driver method |
176 | */ | 176 | */ |
177 | static int axon_ram_probe(struct platform_device *device, | 177 | static int axon_ram_probe(struct platform_device *device) |
178 | const struct of_device_id *device_id) | ||
179 | { | 178 | { |
180 | static int axon_ram_bank_id = -1; | 179 | static int axon_ram_bank_id = -1; |
181 | struct axon_ram_bank *bank; | 180 | struct axon_ram_bank *bank; |
@@ -217,7 +216,7 @@ static int axon_ram_probe(struct platform_device *device, | |||
217 | AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); | 216 | AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); |
218 | 217 | ||
219 | bank->ph_addr = resource.start; | 218 | bank->ph_addr = resource.start; |
220 | bank->io_addr = (unsigned long) ioremap_flags( | 219 | bank->io_addr = (unsigned long) ioremap_prot( |
221 | bank->ph_addr, bank->size, _PAGE_NO_CACHE); | 220 | bank->ph_addr, bank->size, _PAGE_NO_CACHE); |
222 | if (bank->io_addr == 0) { | 221 | if (bank->io_addr == 0) { |
223 | dev_err(&device->dev, "ioremap() failed\n"); | 222 | dev_err(&device->dev, "ioremap() failed\n"); |
@@ -326,7 +325,7 @@ static struct of_device_id axon_ram_device_id[] = { | |||
326 | {} | 325 | {} |
327 | }; | 326 | }; |
328 | 327 | ||
329 | static struct of_platform_driver axon_ram_driver = { | 328 | static struct platform_driver axon_ram_driver = { |
330 | .probe = axon_ram_probe, | 329 | .probe = axon_ram_probe, |
331 | .remove = axon_ram_remove, | 330 | .remove = axon_ram_remove, |
332 | .driver = { | 331 | .driver = { |
@@ -350,7 +349,7 @@ axon_ram_init(void) | |||
350 | } | 349 | } |
351 | azfs_minor = 0; | 350 | azfs_minor = 0; |
352 | 351 | ||
353 | return of_register_platform_driver(&axon_ram_driver); | 352 | return platform_driver_register(&axon_ram_driver); |
354 | } | 353 | } |
355 | 354 | ||
356 | /** | 355 | /** |
@@ -359,7 +358,7 @@ axon_ram_init(void) | |||
359 | static void __exit | 358 | static void __exit |
360 | axon_ram_exit(void) | 359 | axon_ram_exit(void) |
361 | { | 360 | { |
362 | of_unregister_platform_driver(&axon_ram_driver); | 361 | platform_driver_unregister(&axon_ram_driver); |
363 | unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); | 362 | unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); |
364 | } | 363 | } |
365 | 364 | ||
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.c b/arch/powerpc/sysdev/bestcomm/bestcomm.c index 650256115064..b3fbb271be87 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.c +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.c | |||
@@ -365,8 +365,7 @@ bcom_engine_cleanup(void) | |||
365 | /* OF platform driver */ | 365 | /* OF platform driver */ |
366 | /* ======================================================================== */ | 366 | /* ======================================================================== */ |
367 | 367 | ||
368 | static int __devinit mpc52xx_bcom_probe(struct platform_device *op, | 368 | static int __devinit mpc52xx_bcom_probe(struct platform_device *op) |
369 | const struct of_device_id *match) | ||
370 | { | 369 | { |
371 | struct device_node *ofn_sram; | 370 | struct device_node *ofn_sram; |
372 | struct resource res_bcom; | 371 | struct resource res_bcom; |
@@ -492,7 +491,7 @@ static struct of_device_id mpc52xx_bcom_of_match[] = { | |||
492 | MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); | 491 | MODULE_DEVICE_TABLE(of, mpc52xx_bcom_of_match); |
493 | 492 | ||
494 | 493 | ||
495 | static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { | 494 | static struct platform_driver mpc52xx_bcom_of_platform_driver = { |
496 | .probe = mpc52xx_bcom_probe, | 495 | .probe = mpc52xx_bcom_probe, |
497 | .remove = mpc52xx_bcom_remove, | 496 | .remove = mpc52xx_bcom_remove, |
498 | .driver = { | 497 | .driver = { |
@@ -510,13 +509,13 @@ static struct of_platform_driver mpc52xx_bcom_of_platform_driver = { | |||
510 | static int __init | 509 | static int __init |
511 | mpc52xx_bcom_init(void) | 510 | mpc52xx_bcom_init(void) |
512 | { | 511 | { |
513 | return of_register_platform_driver(&mpc52xx_bcom_of_platform_driver); | 512 | return platform_driver_register(&mpc52xx_bcom_of_platform_driver); |
514 | } | 513 | } |
515 | 514 | ||
516 | static void __exit | 515 | static void __exit |
517 | mpc52xx_bcom_exit(void) | 516 | mpc52xx_bcom_exit(void) |
518 | { | 517 | { |
519 | of_unregister_platform_driver(&mpc52xx_bcom_of_platform_driver); | 518 | platform_driver_unregister(&mpc52xx_bcom_of_platform_driver); |
520 | } | 519 | } |
521 | 520 | ||
522 | /* If we're not a module, we must make sure everything is setup before */ | 521 | /* If we're not a module, we must make sure everything is setup before */ |
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm.h b/arch/powerpc/sysdev/bestcomm/bestcomm.h index 23a95f80dfdb..a0e2e6b19b57 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm.h | |||
@@ -20,7 +20,7 @@ | |||
20 | * struct bcom_bd - Structure describing a generic BestComm buffer descriptor | 20 | * struct bcom_bd - Structure describing a generic BestComm buffer descriptor |
21 | * @status: The current status of this buffer. Exact meaning depends on the | 21 | * @status: The current status of this buffer. Exact meaning depends on the |
22 | * task type | 22 | * task type |
23 | * @data: An array of u32 extra data. Size of array is task dependant. | 23 | * @data: An array of u32 extra data. Size of array is task dependent. |
24 | * | 24 | * |
25 | * Note: Don't dereference a bcom_bd pointer as an array. The size of the | 25 | * Note: Don't dereference a bcom_bd pointer as an array. The size of the |
26 | * bcom_bd is variable. Use bcom_get_bd() instead. | 26 | * bcom_bd is variable. Use bcom_get_bd() instead. |
diff --git a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h index eb0d1c883c31..3b52f3ffbdf8 100644 --- a/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h +++ b/arch/powerpc/sysdev/bestcomm/bestcomm_priv.h | |||
@@ -97,7 +97,7 @@ struct bcom_task_header { | |||
97 | u8 reserved[8]; | 97 | u8 reserved[8]; |
98 | }; | 98 | }; |
99 | 99 | ||
100 | /* Descriptors stucture & co */ | 100 | /* Descriptors structure & co */ |
101 | #define BCOM_DESC_NOP 0x000001f8 | 101 | #define BCOM_DESC_NOP 0x000001f8 |
102 | #define BCOM_LCD_MASK 0x80000000 | 102 | #define BCOM_LCD_MASK 0x80000000 |
103 | #define BCOM_DRD_EXTENDED 0x40000000 | 103 | #define BCOM_DRD_EXTENDED 0x40000000 |
diff --git a/arch/powerpc/sysdev/cpm1.c b/arch/powerpc/sysdev/cpm1.c index 00852124ff4a..350787c83e22 100644 --- a/arch/powerpc/sysdev/cpm1.c +++ b/arch/powerpc/sysdev/cpm1.c | |||
@@ -56,32 +56,32 @@ static cpic8xx_t __iomem *cpic_reg; | |||
56 | 56 | ||
57 | static struct irq_host *cpm_pic_host; | 57 | static struct irq_host *cpm_pic_host; |
58 | 58 | ||
59 | static void cpm_mask_irq(unsigned int irq) | 59 | static void cpm_mask_irq(struct irq_data *d) |
60 | { | 60 | { |
61 | unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq; | 61 | unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); |
62 | 62 | ||
63 | clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); | 63 | clrbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); |
64 | } | 64 | } |
65 | 65 | ||
66 | static void cpm_unmask_irq(unsigned int irq) | 66 | static void cpm_unmask_irq(struct irq_data *d) |
67 | { | 67 | { |
68 | unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq; | 68 | unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); |
69 | 69 | ||
70 | setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); | 70 | setbits32(&cpic_reg->cpic_cimr, (1 << cpm_vec)); |
71 | } | 71 | } |
72 | 72 | ||
73 | static void cpm_end_irq(unsigned int irq) | 73 | static void cpm_end_irq(struct irq_data *d) |
74 | { | 74 | { |
75 | unsigned int cpm_vec = (unsigned int)irq_map[irq].hwirq; | 75 | unsigned int cpm_vec = (unsigned int)irqd_to_hwirq(d); |
76 | 76 | ||
77 | out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); | 77 | out_be32(&cpic_reg->cpic_cisr, (1 << cpm_vec)); |
78 | } | 78 | } |
79 | 79 | ||
80 | static struct irq_chip cpm_pic = { | 80 | static struct irq_chip cpm_pic = { |
81 | .name = "CPM PIC", | 81 | .name = "CPM PIC", |
82 | .mask = cpm_mask_irq, | 82 | .irq_mask = cpm_mask_irq, |
83 | .unmask = cpm_unmask_irq, | 83 | .irq_unmask = cpm_unmask_irq, |
84 | .eoi = cpm_end_irq, | 84 | .irq_eoi = cpm_end_irq, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | int cpm_get_irq(void) | 87 | int cpm_get_irq(void) |
@@ -103,8 +103,8 @@ static int cpm_pic_host_map(struct irq_host *h, unsigned int virq, | |||
103 | { | 103 | { |
104 | pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); | 104 | pr_debug("cpm_pic_host_map(%d, 0x%lx)\n", virq, hw); |
105 | 105 | ||
106 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 106 | irq_set_status_flags(virq, IRQ_LEVEL); |
107 | set_irq_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); | 107 | irq_set_chip_and_handler(virq, &cpm_pic, handle_fasteoi_irq); |
108 | return 0; | 108 | return 0; |
109 | } | 109 | } |
110 | 110 | ||
@@ -157,7 +157,7 @@ unsigned int cpm_pic_init(void) | |||
157 | goto end; | 157 | goto end; |
158 | 158 | ||
159 | /* Initialize the CPM interrupt controller. */ | 159 | /* Initialize the CPM interrupt controller. */ |
160 | hwirq = (unsigned int)irq_map[sirq].hwirq; | 160 | hwirq = (unsigned int)virq_to_hw(sirq); |
161 | out_be32(&cpic_reg->cpic_cicr, | 161 | out_be32(&cpic_reg->cpic_cicr, |
162 | (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | | 162 | (CICR_SCD_SCC4 | CICR_SCC_SCC3 | CICR_SCB_SCC2 | CICR_SCA_SCC1) | |
163 | ((hwirq/2) << 13) | CICR_HP_MASK); | 163 | ((hwirq/2) << 13) | CICR_HP_MASK); |
@@ -223,7 +223,7 @@ void __init cpm_reset(void) | |||
223 | 223 | ||
224 | /* Set SDMA Bus Request priority 5. | 224 | /* Set SDMA Bus Request priority 5. |
225 | * On 860T, this also enables FEC priority 6. I am not sure | 225 | * On 860T, this also enables FEC priority 6. I am not sure |
226 | * this is what we realy want for some applications, but the | 226 | * this is what we really want for some applications, but the |
227 | * manual recommends it. | 227 | * manual recommends it. |
228 | * Bit 25, FAM can also be set to use FEC aggressive mode (860T). | 228 | * Bit 25, FAM can also be set to use FEC aggressive mode (860T). |
229 | */ | 229 | */ |
diff --git a/arch/powerpc/sysdev/cpm2_pic.c b/arch/powerpc/sysdev/cpm2_pic.c index fcea4ff825dd..bcab50e2a9eb 100644 --- a/arch/powerpc/sysdev/cpm2_pic.c +++ b/arch/powerpc/sysdev/cpm2_pic.c | |||
@@ -78,10 +78,10 @@ static const u_char irq_to_siubit[] = { | |||
78 | 24, 25, 26, 27, 28, 29, 30, 31, | 78 | 24, 25, 26, 27, 28, 29, 30, 31, |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static void cpm2_mask_irq(unsigned int virq) | 81 | static void cpm2_mask_irq(struct irq_data *d) |
82 | { | 82 | { |
83 | int bit, word; | 83 | int bit, word; |
84 | unsigned int irq_nr = virq_to_hw(virq); | 84 | unsigned int irq_nr = irqd_to_hwirq(d); |
85 | 85 | ||
86 | bit = irq_to_siubit[irq_nr]; | 86 | bit = irq_to_siubit[irq_nr]; |
87 | word = irq_to_siureg[irq_nr]; | 87 | word = irq_to_siureg[irq_nr]; |
@@ -90,10 +90,10 @@ static void cpm2_mask_irq(unsigned int virq) | |||
90 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); | 90 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void cpm2_unmask_irq(unsigned int virq) | 93 | static void cpm2_unmask_irq(struct irq_data *d) |
94 | { | 94 | { |
95 | int bit, word; | 95 | int bit, word; |
96 | unsigned int irq_nr = virq_to_hw(virq); | 96 | unsigned int irq_nr = irqd_to_hwirq(d); |
97 | 97 | ||
98 | bit = irq_to_siubit[irq_nr]; | 98 | bit = irq_to_siubit[irq_nr]; |
99 | word = irq_to_siureg[irq_nr]; | 99 | word = irq_to_siureg[irq_nr]; |
@@ -102,10 +102,10 @@ static void cpm2_unmask_irq(unsigned int virq) | |||
102 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); | 102 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); |
103 | } | 103 | } |
104 | 104 | ||
105 | static void cpm2_ack(unsigned int virq) | 105 | static void cpm2_ack(struct irq_data *d) |
106 | { | 106 | { |
107 | int bit, word; | 107 | int bit, word; |
108 | unsigned int irq_nr = virq_to_hw(virq); | 108 | unsigned int irq_nr = irqd_to_hwirq(d); |
109 | 109 | ||
110 | bit = irq_to_siubit[irq_nr]; | 110 | bit = irq_to_siubit[irq_nr]; |
111 | word = irq_to_siureg[irq_nr]; | 111 | word = irq_to_siureg[irq_nr]; |
@@ -113,34 +113,27 @@ static void cpm2_ack(unsigned int virq) | |||
113 | out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit); | 113 | out_be32(&cpm2_intctl->ic_sipnrh + word, 1 << bit); |
114 | } | 114 | } |
115 | 115 | ||
116 | static void cpm2_end_irq(unsigned int virq) | 116 | static void cpm2_end_irq(struct irq_data *d) |
117 | { | 117 | { |
118 | struct irq_desc *desc; | ||
119 | int bit, word; | 118 | int bit, word; |
120 | unsigned int irq_nr = virq_to_hw(virq); | 119 | unsigned int irq_nr = irqd_to_hwirq(d); |
121 | 120 | ||
122 | desc = irq_to_desc(irq_nr); | 121 | bit = irq_to_siubit[irq_nr]; |
123 | if (!(desc->status & (IRQ_DISABLED|IRQ_INPROGRESS)) | 122 | word = irq_to_siureg[irq_nr]; |
124 | && desc->action) { | ||
125 | |||
126 | bit = irq_to_siubit[irq_nr]; | ||
127 | word = irq_to_siureg[irq_nr]; | ||
128 | 123 | ||
129 | ppc_cached_irq_mask[word] |= 1 << bit; | 124 | ppc_cached_irq_mask[word] |= 1 << bit; |
130 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); | 125 | out_be32(&cpm2_intctl->ic_simrh + word, ppc_cached_irq_mask[word]); |
131 | 126 | ||
132 | /* | 127 | /* |
133 | * Work around large numbers of spurious IRQs on PowerPC 82xx | 128 | * Work around large numbers of spurious IRQs on PowerPC 82xx |
134 | * systems. | 129 | * systems. |
135 | */ | 130 | */ |
136 | mb(); | 131 | mb(); |
137 | } | ||
138 | } | 132 | } |
139 | 133 | ||
140 | static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) | 134 | static int cpm2_set_irq_type(struct irq_data *d, unsigned int flow_type) |
141 | { | 135 | { |
142 | unsigned int src = virq_to_hw(virq); | 136 | unsigned int src = irqd_to_hwirq(d); |
143 | struct irq_desc *desc = irq_to_desc(virq); | ||
144 | unsigned int vold, vnew, edibit; | 137 | unsigned int vold, vnew, edibit; |
145 | 138 | ||
146 | /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or | 139 | /* Port C interrupts are either IRQ_TYPE_EDGE_FALLING or |
@@ -162,13 +155,11 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
162 | goto err_sense; | 155 | goto err_sense; |
163 | } | 156 | } |
164 | 157 | ||
165 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | 158 | irqd_set_trigger_type(d, flow_type); |
166 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | 159 | if (flow_type & IRQ_TYPE_LEVEL_LOW) |
167 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { | 160 | __irq_set_handler_locked(d->irq, handle_level_irq); |
168 | desc->status |= IRQ_LEVEL; | 161 | else |
169 | desc->handle_irq = handle_level_irq; | 162 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
170 | } else | ||
171 | desc->handle_irq = handle_edge_irq; | ||
172 | 163 | ||
173 | /* internal IRQ senses are LEVEL_LOW | 164 | /* internal IRQ senses are LEVEL_LOW |
174 | * EXT IRQ and Port C IRQ senses are programmable | 165 | * EXT IRQ and Port C IRQ senses are programmable |
@@ -179,7 +170,8 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
179 | if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) | 170 | if (src >= CPM2_IRQ_PORTC15 && src <= CPM2_IRQ_PORTC0) |
180 | edibit = (31 - (CPM2_IRQ_PORTC0 - src)); | 171 | edibit = (31 - (CPM2_IRQ_PORTC0 - src)); |
181 | else | 172 | else |
182 | return (flow_type & IRQ_TYPE_LEVEL_LOW) ? 0 : -EINVAL; | 173 | return (flow_type & IRQ_TYPE_LEVEL_LOW) ? |
174 | IRQ_SET_MASK_OK_NOCOPY : -EINVAL; | ||
183 | 175 | ||
184 | vold = in_be32(&cpm2_intctl->ic_siexr); | 176 | vold = in_be32(&cpm2_intctl->ic_siexr); |
185 | 177 | ||
@@ -190,7 +182,7 @@ static int cpm2_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
190 | 182 | ||
191 | if (vold != vnew) | 183 | if (vold != vnew) |
192 | out_be32(&cpm2_intctl->ic_siexr, vnew); | 184 | out_be32(&cpm2_intctl->ic_siexr, vnew); |
193 | return 0; | 185 | return IRQ_SET_MASK_OK_NOCOPY; |
194 | 186 | ||
195 | err_sense: | 187 | err_sense: |
196 | pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); | 188 | pr_err("CPM2 PIC: sense type 0x%x not supported\n", flow_type); |
@@ -199,11 +191,12 @@ err_sense: | |||
199 | 191 | ||
200 | static struct irq_chip cpm2_pic = { | 192 | static struct irq_chip cpm2_pic = { |
201 | .name = "CPM2 SIU", | 193 | .name = "CPM2 SIU", |
202 | .mask = cpm2_mask_irq, | 194 | .irq_mask = cpm2_mask_irq, |
203 | .unmask = cpm2_unmask_irq, | 195 | .irq_unmask = cpm2_unmask_irq, |
204 | .ack = cpm2_ack, | 196 | .irq_ack = cpm2_ack, |
205 | .eoi = cpm2_end_irq, | 197 | .irq_eoi = cpm2_end_irq, |
206 | .set_type = cpm2_set_irq_type, | 198 | .irq_set_type = cpm2_set_irq_type, |
199 | .flags = IRQCHIP_EOI_IF_HANDLED, | ||
207 | }; | 200 | }; |
208 | 201 | ||
209 | unsigned int cpm2_get_irq(void) | 202 | unsigned int cpm2_get_irq(void) |
@@ -226,8 +219,8 @@ static int cpm2_pic_host_map(struct irq_host *h, unsigned int virq, | |||
226 | { | 219 | { |
227 | pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); | 220 | pr_debug("cpm2_pic_host_map(%d, 0x%lx)\n", virq, hw); |
228 | 221 | ||
229 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 222 | irq_set_status_flags(virq, IRQ_LEVEL); |
230 | set_irq_chip_and_handler(virq, &cpm2_pic, handle_level_irq); | 223 | irq_set_chip_and_handler(virq, &cpm2_pic, handle_level_irq); |
231 | return 0; | 224 | return 0; |
232 | } | 225 | } |
233 | 226 | ||
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 559db2b846a9..8e9e06a7ca59 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c | |||
@@ -70,6 +70,8 @@ static int iommu_table_dart_inited; | |||
70 | static int dart_dirty; | 70 | static int dart_dirty; |
71 | static int dart_is_u4; | 71 | static int dart_is_u4; |
72 | 72 | ||
73 | #define DART_U4_BYPASS_BASE 0x8000000000ull | ||
74 | |||
73 | #define DBG(...) | 75 | #define DBG(...) |
74 | 76 | ||
75 | static inline void dart_tlb_invalidate_all(void) | 77 | static inline void dart_tlb_invalidate_all(void) |
@@ -292,27 +294,67 @@ static void iommu_table_dart_setup(void) | |||
292 | set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); | 294 | set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); |
293 | } | 295 | } |
294 | 296 | ||
295 | static void pci_dma_dev_setup_dart(struct pci_dev *dev) | 297 | static void dma_dev_setup_dart(struct device *dev) |
296 | { | 298 | { |
297 | /* We only have one iommu table on the mac for now, which makes | 299 | /* We only have one iommu table on the mac for now, which makes |
298 | * things simple. Setup all PCI devices to point to this table | 300 | * things simple. Setup all PCI devices to point to this table |
299 | */ | 301 | */ |
300 | set_iommu_table_base(&dev->dev, &iommu_table_dart); | 302 | if (get_dma_ops(dev) == &dma_direct_ops) |
303 | set_dma_offset(dev, DART_U4_BYPASS_BASE); | ||
304 | else | ||
305 | set_iommu_table_base(dev, &iommu_table_dart); | ||
301 | } | 306 | } |
302 | 307 | ||
303 | static void pci_dma_bus_setup_dart(struct pci_bus *bus) | 308 | static void pci_dma_dev_setup_dart(struct pci_dev *dev) |
304 | { | 309 | { |
305 | struct device_node *dn; | 310 | dma_dev_setup_dart(&dev->dev); |
311 | } | ||
306 | 312 | ||
313 | static void pci_dma_bus_setup_dart(struct pci_bus *bus) | ||
314 | { | ||
307 | if (!iommu_table_dart_inited) { | 315 | if (!iommu_table_dart_inited) { |
308 | iommu_table_dart_inited = 1; | 316 | iommu_table_dart_inited = 1; |
309 | iommu_table_dart_setup(); | 317 | iommu_table_dart_setup(); |
310 | } | 318 | } |
319 | } | ||
311 | 320 | ||
312 | dn = pci_bus_to_OF_node(bus); | 321 | static bool dart_device_on_pcie(struct device *dev) |
322 | { | ||
323 | struct device_node *np = of_node_get(dev->of_node); | ||
324 | |||
325 | while(np) { | ||
326 | if (of_device_is_compatible(np, "U4-pcie") || | ||
327 | of_device_is_compatible(np, "u4-pcie")) { | ||
328 | of_node_put(np); | ||
329 | return true; | ||
330 | } | ||
331 | np = of_get_next_parent(np); | ||
332 | } | ||
333 | return false; | ||
334 | } | ||
313 | 335 | ||
314 | if (dn) | 336 | static int dart_dma_set_mask(struct device *dev, u64 dma_mask) |
315 | PCI_DN(dn)->iommu_table = &iommu_table_dart; | 337 | { |
338 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) | ||
339 | return -EIO; | ||
340 | |||
341 | /* U4 supports a DART bypass, we use it for 64-bit capable | ||
342 | * devices to improve performances. However, that only works | ||
343 | * for devices connected to U4 own PCIe interface, not bridged | ||
344 | * through hypertransport. We need the device to support at | ||
345 | * least 40 bits of addresses. | ||
346 | */ | ||
347 | if (dart_device_on_pcie(dev) && dma_mask >= DMA_BIT_MASK(40)) { | ||
348 | dev_info(dev, "Using 64-bit DMA iommu bypass\n"); | ||
349 | set_dma_ops(dev, &dma_direct_ops); | ||
350 | } else { | ||
351 | dev_info(dev, "Using 32-bit DMA via iommu\n"); | ||
352 | set_dma_ops(dev, &dma_iommu_ops); | ||
353 | } | ||
354 | dma_dev_setup_dart(dev); | ||
355 | |||
356 | *dev->dma_mask = dma_mask; | ||
357 | return 0; | ||
316 | } | 358 | } |
317 | 359 | ||
318 | void __init iommu_init_early_dart(void) | 360 | void __init iommu_init_early_dart(void) |
@@ -324,24 +366,29 @@ void __init iommu_init_early_dart(void) | |||
324 | if (dn == NULL) { | 366 | if (dn == NULL) { |
325 | dn = of_find_compatible_node(NULL, "dart", "u4-dart"); | 367 | dn = of_find_compatible_node(NULL, "dart", "u4-dart"); |
326 | if (dn == NULL) | 368 | if (dn == NULL) |
327 | goto bail; | 369 | return; /* use default direct_dma_ops */ |
328 | dart_is_u4 = 1; | 370 | dart_is_u4 = 1; |
329 | } | 371 | } |
330 | 372 | ||
373 | /* Initialize the DART HW */ | ||
374 | if (dart_init(dn) != 0) | ||
375 | goto bail; | ||
376 | |||
331 | /* Setup low level TCE operations for the core IOMMU code */ | 377 | /* Setup low level TCE operations for the core IOMMU code */ |
332 | ppc_md.tce_build = dart_build; | 378 | ppc_md.tce_build = dart_build; |
333 | ppc_md.tce_free = dart_free; | 379 | ppc_md.tce_free = dart_free; |
334 | ppc_md.tce_flush = dart_flush; | 380 | ppc_md.tce_flush = dart_flush; |
335 | 381 | ||
336 | /* Initialize the DART HW */ | 382 | /* Setup bypass if supported */ |
337 | if (dart_init(dn) == 0) { | 383 | if (dart_is_u4) |
338 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart; | 384 | ppc_md.dma_set_mask = dart_dma_set_mask; |
339 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart; | ||
340 | 385 | ||
341 | /* Setup pci_dma ops */ | 386 | ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_dart; |
342 | set_pci_dma_ops(&dma_iommu_ops); | 387 | ppc_md.pci_dma_bus_setup = pci_dma_bus_setup_dart; |
343 | return; | 388 | |
344 | } | 389 | /* Setup pci_dma ops */ |
390 | set_pci_dma_ops(&dma_iommu_ops); | ||
391 | return; | ||
345 | 392 | ||
346 | bail: | 393 | bail: |
347 | /* If init failed, use direct iommu and null setup functions */ | 394 | /* If init failed, use direct iommu and null setup functions */ |
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h new file mode 100644 index 000000000000..60c9c0bd5ba2 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_85xx_cache_ctlr.h | |||
@@ -0,0 +1,101 @@ | |||
1 | /* | ||
2 | * Copyright 2009-2010 Freescale Semiconductor, Inc | ||
3 | * | ||
4 | * QorIQ based Cache Controller Memory Mapped Registers | ||
5 | * | ||
6 | * Author: Vivek Mahajan <vivek.mahajan@freescale.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #ifndef __FSL_85XX_CACHE_CTLR_H__ | ||
24 | #define __FSL_85XX_CACHE_CTLR_H__ | ||
25 | |||
26 | #define L2CR_L2FI 0x40000000 /* L2 flash invalidate */ | ||
27 | #define L2CR_L2IO 0x00200000 /* L2 instruction only */ | ||
28 | #define L2CR_SRAM_ZERO 0x00000000 /* L2SRAM zero size */ | ||
29 | #define L2CR_SRAM_FULL 0x00010000 /* L2SRAM full size */ | ||
30 | #define L2CR_SRAM_HALF 0x00020000 /* L2SRAM half size */ | ||
31 | #define L2CR_SRAM_TWO_HALFS 0x00030000 /* L2SRAM two half sizes */ | ||
32 | #define L2CR_SRAM_QUART 0x00040000 /* L2SRAM one quarter size */ | ||
33 | #define L2CR_SRAM_TWO_QUARTS 0x00050000 /* L2SRAM two quarter size */ | ||
34 | #define L2CR_SRAM_EIGHTH 0x00060000 /* L2SRAM one eighth size */ | ||
35 | #define L2CR_SRAM_TWO_EIGHTH 0x00070000 /* L2SRAM two eighth size */ | ||
36 | |||
37 | #define L2SRAM_OPTIMAL_SZ_SHIFT 0x00000003 /* Optimum size for L2SRAM */ | ||
38 | |||
39 | #define L2SRAM_BAR_MSK_LO18 0xFFFFC000 /* Lower 18 bits */ | ||
40 | #define L2SRAM_BARE_MSK_HI4 0x0000000F /* Upper 4 bits */ | ||
41 | |||
42 | enum cache_sram_lock_ways { | ||
43 | LOCK_WAYS_ZERO, | ||
44 | LOCK_WAYS_EIGHTH, | ||
45 | LOCK_WAYS_TWO_EIGHTH, | ||
46 | LOCK_WAYS_HALF = 4, | ||
47 | LOCK_WAYS_FULL = 8, | ||
48 | }; | ||
49 | |||
50 | struct mpc85xx_l2ctlr { | ||
51 | u32 ctl; /* 0x000 - L2 control */ | ||
52 | u8 res1[0xC]; | ||
53 | u32 ewar0; /* 0x010 - External write address 0 */ | ||
54 | u32 ewarea0; /* 0x014 - External write address extended 0 */ | ||
55 | u32 ewcr0; /* 0x018 - External write ctrl */ | ||
56 | u8 res2[4]; | ||
57 | u32 ewar1; /* 0x020 - External write address 1 */ | ||
58 | u32 ewarea1; /* 0x024 - External write address extended 1 */ | ||
59 | u32 ewcr1; /* 0x028 - External write ctrl 1 */ | ||
60 | u8 res3[4]; | ||
61 | u32 ewar2; /* 0x030 - External write address 2 */ | ||
62 | u32 ewarea2; /* 0x034 - External write address extended 2 */ | ||
63 | u32 ewcr2; /* 0x038 - External write ctrl 2 */ | ||
64 | u8 res4[4]; | ||
65 | u32 ewar3; /* 0x040 - External write address 3 */ | ||
66 | u32 ewarea3; /* 0x044 - External write address extended 3 */ | ||
67 | u32 ewcr3; /* 0x048 - External write ctrl 3 */ | ||
68 | u8 res5[0xB4]; | ||
69 | u32 srbar0; /* 0x100 - SRAM base address 0 */ | ||
70 | u32 srbarea0; /* 0x104 - SRAM base addr reg ext address 0 */ | ||
71 | u32 srbar1; /* 0x108 - SRAM base address 1 */ | ||
72 | u32 srbarea1; /* 0x10C - SRAM base addr reg ext address 1 */ | ||
73 | u8 res6[0xCF0]; | ||
74 | u32 errinjhi; /* 0xE00 - Error injection mask high */ | ||
75 | u32 errinjlo; /* 0xE04 - Error injection mask low */ | ||
76 | u32 errinjctl; /* 0xE08 - Error injection tag/ecc control */ | ||
77 | u8 res7[0x14]; | ||
78 | u32 captdatahi; /* 0xE20 - Error data high capture */ | ||
79 | u32 captdatalo; /* 0xE24 - Error data low capture */ | ||
80 | u32 captecc; /* 0xE28 - Error syndrome */ | ||
81 | u8 res8[0x14]; | ||
82 | u32 errdet; /* 0xE40 - Error detect */ | ||
83 | u32 errdis; /* 0xE44 - Error disable */ | ||
84 | u32 errinten; /* 0xE48 - Error interrupt enable */ | ||
85 | u32 errattr; /* 0xE4c - Error attribute capture */ | ||
86 | u32 erradrrl; /* 0xE50 - Error address capture low */ | ||
87 | u32 erradrrh; /* 0xE54 - Error address capture high */ | ||
88 | u32 errctl; /* 0xE58 - Error control */ | ||
89 | u8 res9[0x1A4]; | ||
90 | }; | ||
91 | |||
92 | struct sram_parameters { | ||
93 | unsigned int sram_size; | ||
94 | uint64_t sram_offset; | ||
95 | }; | ||
96 | |||
97 | extern int instantiate_cache_sram(struct platform_device *dev, | ||
98 | struct sram_parameters sram_params); | ||
99 | extern void remove_cache_sram(struct platform_device *dev); | ||
100 | |||
101 | #endif /* __FSL_85XX_CACHE_CTLR_H__ */ | ||
diff --git a/arch/powerpc/sysdev/fsl_85xx_cache_sram.c b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c new file mode 100644 index 000000000000..116415899176 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_85xx_cache_sram.c | |||
@@ -0,0 +1,159 @@ | |||
1 | /* | ||
2 | * Copyright 2009-2010 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * Simple memory allocator abstraction for QorIQ (P1/P2) based Cache-SRAM | ||
5 | * | ||
6 | * Author: Vivek Mahajan <vivek.mahajan@freescale.com> | ||
7 | * | ||
8 | * This file is derived from the original work done | ||
9 | * by Sylvain Munaut for the Bestcomm SRAM allocator. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify it | ||
12 | * under the terms of the GNU General Public License as published by the | ||
13 | * Free Software Foundation; either version 2 of the License, or (at your | ||
14 | * option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/err.h> | ||
29 | #include <linux/of_platform.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/fsl_85xx_cache_sram.h> | ||
32 | |||
33 | #include "fsl_85xx_cache_ctlr.h" | ||
34 | |||
35 | struct mpc85xx_cache_sram *cache_sram; | ||
36 | |||
37 | void *mpc85xx_cache_sram_alloc(unsigned int size, | ||
38 | phys_addr_t *phys, unsigned int align) | ||
39 | { | ||
40 | unsigned long offset; | ||
41 | unsigned long flags; | ||
42 | |||
43 | if (unlikely(cache_sram == NULL)) | ||
44 | return NULL; | ||
45 | |||
46 | if (!size || (size > cache_sram->size) || (align > cache_sram->size)) { | ||
47 | pr_err("%s(): size(=%x) or align(=%x) zero or too big\n", | ||
48 | __func__, size, align); | ||
49 | return NULL; | ||
50 | } | ||
51 | |||
52 | if ((align & (align - 1)) || align <= 1) { | ||
53 | pr_err("%s(): align(=%x) must be power of two and >1\n", | ||
54 | __func__, align); | ||
55 | return NULL; | ||
56 | } | ||
57 | |||
58 | spin_lock_irqsave(&cache_sram->lock, flags); | ||
59 | offset = rh_alloc_align(cache_sram->rh, size, align, NULL); | ||
60 | spin_unlock_irqrestore(&cache_sram->lock, flags); | ||
61 | |||
62 | if (IS_ERR_VALUE(offset)) | ||
63 | return NULL; | ||
64 | |||
65 | *phys = cache_sram->base_phys + offset; | ||
66 | |||
67 | return (unsigned char *)cache_sram->base_virt + offset; | ||
68 | } | ||
69 | EXPORT_SYMBOL(mpc85xx_cache_sram_alloc); | ||
70 | |||
71 | void mpc85xx_cache_sram_free(void *ptr) | ||
72 | { | ||
73 | unsigned long flags; | ||
74 | BUG_ON(!ptr); | ||
75 | |||
76 | spin_lock_irqsave(&cache_sram->lock, flags); | ||
77 | rh_free(cache_sram->rh, ptr - cache_sram->base_virt); | ||
78 | spin_unlock_irqrestore(&cache_sram->lock, flags); | ||
79 | } | ||
80 | EXPORT_SYMBOL(mpc85xx_cache_sram_free); | ||
81 | |||
82 | int __init instantiate_cache_sram(struct platform_device *dev, | ||
83 | struct sram_parameters sram_params) | ||
84 | { | ||
85 | int ret = 0; | ||
86 | |||
87 | if (cache_sram) { | ||
88 | dev_err(&dev->dev, "Already initialized cache-sram\n"); | ||
89 | return -EBUSY; | ||
90 | } | ||
91 | |||
92 | cache_sram = kzalloc(sizeof(struct mpc85xx_cache_sram), GFP_KERNEL); | ||
93 | if (!cache_sram) { | ||
94 | dev_err(&dev->dev, "Out of memory for cache_sram structure\n"); | ||
95 | return -ENOMEM; | ||
96 | } | ||
97 | |||
98 | cache_sram->base_phys = sram_params.sram_offset; | ||
99 | cache_sram->size = sram_params.sram_size; | ||
100 | |||
101 | if (!request_mem_region(cache_sram->base_phys, cache_sram->size, | ||
102 | "fsl_85xx_cache_sram")) { | ||
103 | dev_err(&dev->dev, "%s: request memory failed\n", | ||
104 | dev->dev.of_node->full_name); | ||
105 | ret = -ENXIO; | ||
106 | goto out_free; | ||
107 | } | ||
108 | |||
109 | cache_sram->base_virt = ioremap_prot(cache_sram->base_phys, | ||
110 | cache_sram->size, _PAGE_COHERENT | PAGE_KERNEL); | ||
111 | if (!cache_sram->base_virt) { | ||
112 | dev_err(&dev->dev, "%s: ioremap_prot failed\n", | ||
113 | dev->dev.of_node->full_name); | ||
114 | ret = -ENOMEM; | ||
115 | goto out_release; | ||
116 | } | ||
117 | |||
118 | cache_sram->rh = rh_create(sizeof(unsigned int)); | ||
119 | if (IS_ERR(cache_sram->rh)) { | ||
120 | dev_err(&dev->dev, "%s: Unable to create remote heap\n", | ||
121 | dev->dev.of_node->full_name); | ||
122 | ret = PTR_ERR(cache_sram->rh); | ||
123 | goto out_unmap; | ||
124 | } | ||
125 | |||
126 | rh_attach_region(cache_sram->rh, 0, cache_sram->size); | ||
127 | spin_lock_init(&cache_sram->lock); | ||
128 | |||
129 | dev_info(&dev->dev, "[base:0x%llx, size:0x%x] configured and loaded\n", | ||
130 | (unsigned long long)cache_sram->base_phys, cache_sram->size); | ||
131 | |||
132 | return 0; | ||
133 | |||
134 | out_unmap: | ||
135 | iounmap(cache_sram->base_virt); | ||
136 | |||
137 | out_release: | ||
138 | release_mem_region(cache_sram->base_phys, cache_sram->size); | ||
139 | |||
140 | out_free: | ||
141 | kfree(cache_sram); | ||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | void remove_cache_sram(struct platform_device *dev) | ||
146 | { | ||
147 | BUG_ON(!cache_sram); | ||
148 | |||
149 | rh_detach_region(cache_sram->rh, 0, cache_sram->size); | ||
150 | rh_destroy(cache_sram->rh); | ||
151 | |||
152 | iounmap(cache_sram->base_virt); | ||
153 | release_mem_region(cache_sram->base_phys, cache_sram->size); | ||
154 | |||
155 | kfree(cache_sram); | ||
156 | cache_sram = NULL; | ||
157 | |||
158 | dev_info(&dev->dev, "MPC85xx Cache-SRAM driver unloaded\n"); | ||
159 | } | ||
diff --git a/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c new file mode 100644 index 000000000000..5f88797dce73 --- /dev/null +++ b/arch/powerpc/sysdev/fsl_85xx_l2ctlr.c | |||
@@ -0,0 +1,230 @@ | |||
1 | /* | ||
2 | * Copyright 2009-2010 Freescale Semiconductor, Inc. | ||
3 | * | ||
4 | * QorIQ (P1/P2) L2 controller init for Cache-SRAM instantiation | ||
5 | * | ||
6 | * Author: Vivek Mahajan <vivek.mahajan@freescale.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/kernel.h> | ||
24 | #include <linux/of_platform.h> | ||
25 | #include <asm/io.h> | ||
26 | |||
27 | #include "fsl_85xx_cache_ctlr.h" | ||
28 | |||
29 | static char *sram_size; | ||
30 | static char *sram_offset; | ||
31 | struct mpc85xx_l2ctlr __iomem *l2ctlr; | ||
32 | |||
33 | static long get_cache_sram_size(void) | ||
34 | { | ||
35 | unsigned long val; | ||
36 | |||
37 | if (!sram_size || (strict_strtoul(sram_size, 0, &val) < 0)) | ||
38 | return -EINVAL; | ||
39 | |||
40 | return val; | ||
41 | } | ||
42 | |||
43 | static long get_cache_sram_offset(void) | ||
44 | { | ||
45 | unsigned long val; | ||
46 | |||
47 | if (!sram_offset || (strict_strtoul(sram_offset, 0, &val) < 0)) | ||
48 | return -EINVAL; | ||
49 | |||
50 | return val; | ||
51 | } | ||
52 | |||
53 | static int __init get_size_from_cmdline(char *str) | ||
54 | { | ||
55 | if (!str) | ||
56 | return 0; | ||
57 | |||
58 | sram_size = str; | ||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | static int __init get_offset_from_cmdline(char *str) | ||
63 | { | ||
64 | if (!str) | ||
65 | return 0; | ||
66 | |||
67 | sram_offset = str; | ||
68 | return 1; | ||
69 | } | ||
70 | |||
71 | __setup("cache-sram-size=", get_size_from_cmdline); | ||
72 | __setup("cache-sram-offset=", get_offset_from_cmdline); | ||
73 | |||
74 | static int __devinit mpc85xx_l2ctlr_of_probe(struct platform_device *dev) | ||
75 | { | ||
76 | long rval; | ||
77 | unsigned int rem; | ||
78 | unsigned char ways; | ||
79 | const unsigned int *prop; | ||
80 | unsigned int l2cache_size; | ||
81 | struct sram_parameters sram_params; | ||
82 | |||
83 | if (!dev->dev.of_node) { | ||
84 | dev_err(&dev->dev, "Device's OF-node is NULL\n"); | ||
85 | return -EINVAL; | ||
86 | } | ||
87 | |||
88 | prop = of_get_property(dev->dev.of_node, "cache-size", NULL); | ||
89 | if (!prop) { | ||
90 | dev_err(&dev->dev, "Missing L2 cache-size\n"); | ||
91 | return -EINVAL; | ||
92 | } | ||
93 | l2cache_size = *prop; | ||
94 | |||
95 | sram_params.sram_size = get_cache_sram_size(); | ||
96 | if ((int)sram_params.sram_size <= 0) { | ||
97 | dev_err(&dev->dev, | ||
98 | "Entire L2 as cache, Aborting Cache-SRAM stuff\n"); | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | |||
102 | sram_params.sram_offset = get_cache_sram_offset(); | ||
103 | if ((int64_t)sram_params.sram_offset <= 0) { | ||
104 | dev_err(&dev->dev, | ||
105 | "Entire L2 as cache, provide a valid sram offset\n"); | ||
106 | return -EINVAL; | ||
107 | } | ||
108 | |||
109 | |||
110 | rem = l2cache_size % sram_params.sram_size; | ||
111 | ways = LOCK_WAYS_FULL * sram_params.sram_size / l2cache_size; | ||
112 | if (rem || (ways & (ways - 1))) { | ||
113 | dev_err(&dev->dev, "Illegal cache-sram-size in command line\n"); | ||
114 | return -EINVAL; | ||
115 | } | ||
116 | |||
117 | l2ctlr = of_iomap(dev->dev.of_node, 0); | ||
118 | if (!l2ctlr) { | ||
119 | dev_err(&dev->dev, "Can't map L2 controller\n"); | ||
120 | return -EINVAL; | ||
121 | } | ||
122 | |||
123 | /* | ||
124 | * Write bits[0-17] to srbar0 | ||
125 | */ | ||
126 | out_be32(&l2ctlr->srbar0, | ||
127 | sram_params.sram_offset & L2SRAM_BAR_MSK_LO18); | ||
128 | |||
129 | /* | ||
130 | * Write bits[18-21] to srbare0 | ||
131 | */ | ||
132 | #ifdef CONFIG_PHYS_64BIT | ||
133 | out_be32(&l2ctlr->srbarea0, | ||
134 | (sram_params.sram_offset >> 32) & L2SRAM_BARE_MSK_HI4); | ||
135 | #endif | ||
136 | |||
137 | clrsetbits_be32(&l2ctlr->ctl, L2CR_L2E, L2CR_L2FI); | ||
138 | |||
139 | switch (ways) { | ||
140 | case LOCK_WAYS_EIGHTH: | ||
141 | setbits32(&l2ctlr->ctl, | ||
142 | L2CR_L2E | L2CR_L2FI | L2CR_SRAM_EIGHTH); | ||
143 | break; | ||
144 | |||
145 | case LOCK_WAYS_TWO_EIGHTH: | ||
146 | setbits32(&l2ctlr->ctl, | ||
147 | L2CR_L2E | L2CR_L2FI | L2CR_SRAM_QUART); | ||
148 | break; | ||
149 | |||
150 | case LOCK_WAYS_HALF: | ||
151 | setbits32(&l2ctlr->ctl, | ||
152 | L2CR_L2E | L2CR_L2FI | L2CR_SRAM_HALF); | ||
153 | break; | ||
154 | |||
155 | case LOCK_WAYS_FULL: | ||
156 | default: | ||
157 | setbits32(&l2ctlr->ctl, | ||
158 | L2CR_L2E | L2CR_L2FI | L2CR_SRAM_FULL); | ||
159 | break; | ||
160 | } | ||
161 | eieio(); | ||
162 | |||
163 | rval = instantiate_cache_sram(dev, sram_params); | ||
164 | if (rval < 0) { | ||
165 | dev_err(&dev->dev, "Can't instantiate Cache-SRAM\n"); | ||
166 | iounmap(l2ctlr); | ||
167 | return -EINVAL; | ||
168 | } | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int __devexit mpc85xx_l2ctlr_of_remove(struct platform_device *dev) | ||
174 | { | ||
175 | BUG_ON(!l2ctlr); | ||
176 | |||
177 | iounmap(l2ctlr); | ||
178 | remove_cache_sram(dev); | ||
179 | dev_info(&dev->dev, "MPC85xx L2 controller unloaded\n"); | ||
180 | |||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | static struct of_device_id mpc85xx_l2ctlr_of_match[] = { | ||
185 | { | ||
186 | .compatible = "fsl,p2020-l2-cache-controller", | ||
187 | }, | ||
188 | { | ||
189 | .compatible = "fsl,p2010-l2-cache-controller", | ||
190 | }, | ||
191 | { | ||
192 | .compatible = "fsl,p1020-l2-cache-controller", | ||
193 | }, | ||
194 | { | ||
195 | .compatible = "fsl,p1011-l2-cache-controller", | ||
196 | }, | ||
197 | { | ||
198 | .compatible = "fsl,p1013-l2-cache-controller", | ||
199 | }, | ||
200 | { | ||
201 | .compatible = "fsl,p1022-l2-cache-controller", | ||
202 | }, | ||
203 | {}, | ||
204 | }; | ||
205 | |||
206 | static struct platform_driver mpc85xx_l2ctlr_of_platform_driver = { | ||
207 | .driver = { | ||
208 | .name = "fsl-l2ctlr", | ||
209 | .owner = THIS_MODULE, | ||
210 | .of_match_table = mpc85xx_l2ctlr_of_match, | ||
211 | }, | ||
212 | .probe = mpc85xx_l2ctlr_of_probe, | ||
213 | .remove = __devexit_p(mpc85xx_l2ctlr_of_remove), | ||
214 | }; | ||
215 | |||
216 | static __init int mpc85xx_l2ctlr_of_init(void) | ||
217 | { | ||
218 | return platform_driver_register(&mpc85xx_l2ctlr_of_platform_driver); | ||
219 | } | ||
220 | |||
221 | static void __exit mpc85xx_l2ctlr_of_exit(void) | ||
222 | { | ||
223 | platform_driver_unregister(&mpc85xx_l2ctlr_of_platform_driver); | ||
224 | } | ||
225 | |||
226 | subsys_initcall(mpc85xx_l2ctlr_of_init); | ||
227 | module_exit(mpc85xx_l2ctlr_of_exit); | ||
228 | |||
229 | MODULE_DESCRIPTION("Freescale MPC85xx L2 controller init"); | ||
230 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index dceb8d1a843d..d917573cf1a8 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c | |||
@@ -1,9 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * Freescale LBC and UPM routines. | 2 | * Freescale LBC and UPM routines. |
3 | * | 3 | * |
4 | * Copyright (c) 2007-2008 MontaVista Software, Inc. | 4 | * Copyright © 2007-2008 MontaVista Software, Inc. |
5 | * Copyright © 2010 Freescale Semiconductor | ||
5 | * | 6 | * |
6 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> | 7 | * Author: Anton Vorontsov <avorontsov@ru.mvista.com> |
8 | * Author: Jack Lan <Jack.Lan@freescale.com> | ||
9 | * Author: Roy Zang <tie-fei.zang@freescale.com> | ||
7 | * | 10 | * |
8 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -19,39 +22,37 @@ | |||
19 | #include <linux/types.h> | 22 | #include <linux/types.h> |
20 | #include <linux/io.h> | 23 | #include <linux/io.h> |
21 | #include <linux/of.h> | 24 | #include <linux/of.h> |
25 | #include <linux/slab.h> | ||
26 | #include <linux/platform_device.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/mod_devicetable.h> | ||
22 | #include <asm/prom.h> | 29 | #include <asm/prom.h> |
23 | #include <asm/fsl_lbc.h> | 30 | #include <asm/fsl_lbc.h> |
24 | 31 | ||
25 | static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); | 32 | static spinlock_t fsl_lbc_lock = __SPIN_LOCK_UNLOCKED(fsl_lbc_lock); |
26 | static struct fsl_lbc_regs __iomem *fsl_lbc_regs; | 33 | struct fsl_lbc_ctrl *fsl_lbc_ctrl_dev; |
34 | EXPORT_SYMBOL(fsl_lbc_ctrl_dev); | ||
27 | 35 | ||
28 | static char __initdata *compat_lbc[] = { | 36 | /** |
29 | "fsl,pq2-localbus", | 37 | * fsl_lbc_addr - convert the base address |
30 | "fsl,pq2pro-localbus", | 38 | * @addr_base: base address of the memory bank |
31 | "fsl,pq3-localbus", | 39 | * |
32 | "fsl,elbc", | 40 | * This function converts a base address of lbc into the right format for the |
33 | }; | 41 | * BR register. If the SOC has eLBC then it returns 32bit physical address |
34 | 42 | * else it convers a 34bit local bus physical address to correct format of | |
35 | static int __init fsl_lbc_init(void) | 43 | * 32bit address for BR register (Example: MPC8641). |
44 | */ | ||
45 | u32 fsl_lbc_addr(phys_addr_t addr_base) | ||
36 | { | 46 | { |
37 | struct device_node *lbus; | 47 | struct device_node *np = fsl_lbc_ctrl_dev->dev->of_node; |
38 | int i; | 48 | u32 addr = addr_base & 0xffff8000; |
39 | 49 | ||
40 | for (i = 0; i < ARRAY_SIZE(compat_lbc); i++) { | 50 | if (of_device_is_compatible(np, "fsl,elbc")) |
41 | lbus = of_find_compatible_node(NULL, NULL, compat_lbc[i]); | 51 | return addr; |
42 | if (lbus) | ||
43 | goto found; | ||
44 | } | ||
45 | return -ENODEV; | ||
46 | 52 | ||
47 | found: | 53 | return addr | ((addr_base & 0x300000000ull) >> 19); |
48 | fsl_lbc_regs = of_iomap(lbus, 0); | ||
49 | of_node_put(lbus); | ||
50 | if (!fsl_lbc_regs) | ||
51 | return -ENOMEM; | ||
52 | return 0; | ||
53 | } | 54 | } |
54 | arch_initcall(fsl_lbc_init); | 55 | EXPORT_SYMBOL(fsl_lbc_addr); |
55 | 56 | ||
56 | /** | 57 | /** |
57 | * fsl_lbc_find - find Localbus bank | 58 | * fsl_lbc_find - find Localbus bank |
@@ -65,15 +66,17 @@ arch_initcall(fsl_lbc_init); | |||
65 | int fsl_lbc_find(phys_addr_t addr_base) | 66 | int fsl_lbc_find(phys_addr_t addr_base) |
66 | { | 67 | { |
67 | int i; | 68 | int i; |
69 | struct fsl_lbc_regs __iomem *lbc; | ||
68 | 70 | ||
69 | if (!fsl_lbc_regs) | 71 | if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) |
70 | return -ENODEV; | 72 | return -ENODEV; |
71 | 73 | ||
72 | for (i = 0; i < ARRAY_SIZE(fsl_lbc_regs->bank); i++) { | 74 | lbc = fsl_lbc_ctrl_dev->regs; |
73 | __be32 br = in_be32(&fsl_lbc_regs->bank[i].br); | 75 | for (i = 0; i < ARRAY_SIZE(lbc->bank); i++) { |
74 | __be32 or = in_be32(&fsl_lbc_regs->bank[i].or); | 76 | __be32 br = in_be32(&lbc->bank[i].br); |
77 | __be32 or = in_be32(&lbc->bank[i].or); | ||
75 | 78 | ||
76 | if (br & BR_V && (br & or & BR_BA) == addr_base) | 79 | if (br & BR_V && (br & or & BR_BA) == fsl_lbc_addr(addr_base)) |
77 | return i; | 80 | return i; |
78 | } | 81 | } |
79 | 82 | ||
@@ -94,22 +97,27 @@ int fsl_upm_find(phys_addr_t addr_base, struct fsl_upm *upm) | |||
94 | { | 97 | { |
95 | int bank; | 98 | int bank; |
96 | __be32 br; | 99 | __be32 br; |
100 | struct fsl_lbc_regs __iomem *lbc; | ||
97 | 101 | ||
98 | bank = fsl_lbc_find(addr_base); | 102 | bank = fsl_lbc_find(addr_base); |
99 | if (bank < 0) | 103 | if (bank < 0) |
100 | return bank; | 104 | return bank; |
101 | 105 | ||
102 | br = in_be32(&fsl_lbc_regs->bank[bank].br); | 106 | if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) |
107 | return -ENODEV; | ||
108 | |||
109 | lbc = fsl_lbc_ctrl_dev->regs; | ||
110 | br = in_be32(&lbc->bank[bank].br); | ||
103 | 111 | ||
104 | switch (br & BR_MSEL) { | 112 | switch (br & BR_MSEL) { |
105 | case BR_MS_UPMA: | 113 | case BR_MS_UPMA: |
106 | upm->mxmr = &fsl_lbc_regs->mamr; | 114 | upm->mxmr = &lbc->mamr; |
107 | break; | 115 | break; |
108 | case BR_MS_UPMB: | 116 | case BR_MS_UPMB: |
109 | upm->mxmr = &fsl_lbc_regs->mbmr; | 117 | upm->mxmr = &lbc->mbmr; |
110 | break; | 118 | break; |
111 | case BR_MS_UPMC: | 119 | case BR_MS_UPMC: |
112 | upm->mxmr = &fsl_lbc_regs->mcmr; | 120 | upm->mxmr = &lbc->mcmr; |
113 | break; | 121 | break; |
114 | default: | 122 | default: |
115 | return -EINVAL; | 123 | return -EINVAL; |
@@ -148,9 +156,12 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar) | |||
148 | int ret = 0; | 156 | int ret = 0; |
149 | unsigned long flags; | 157 | unsigned long flags; |
150 | 158 | ||
159 | if (!fsl_lbc_ctrl_dev || !fsl_lbc_ctrl_dev->regs) | ||
160 | return -ENODEV; | ||
161 | |||
151 | spin_lock_irqsave(&fsl_lbc_lock, flags); | 162 | spin_lock_irqsave(&fsl_lbc_lock, flags); |
152 | 163 | ||
153 | out_be32(&fsl_lbc_regs->mar, mar); | 164 | out_be32(&fsl_lbc_ctrl_dev->regs->mar, mar); |
154 | 165 | ||
155 | switch (upm->width) { | 166 | switch (upm->width) { |
156 | case 8: | 167 | case 8: |
@@ -172,3 +183,171 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar) | |||
172 | return ret; | 183 | return ret; |
173 | } | 184 | } |
174 | EXPORT_SYMBOL(fsl_upm_run_pattern); | 185 | EXPORT_SYMBOL(fsl_upm_run_pattern); |
186 | |||
187 | static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, | ||
188 | struct device_node *node) | ||
189 | { | ||
190 | struct fsl_lbc_regs __iomem *lbc = ctrl->regs; | ||
191 | |||
192 | /* clear event registers */ | ||
193 | setbits32(&lbc->ltesr, LTESR_CLEAR); | ||
194 | out_be32(&lbc->lteatr, 0); | ||
195 | out_be32(&lbc->ltear, 0); | ||
196 | out_be32(&lbc->lteccr, LTECCR_CLEAR); | ||
197 | out_be32(&lbc->ltedr, LTEDR_ENABLE); | ||
198 | |||
199 | /* Set the monitor timeout value to the maximum for erratum A001 */ | ||
200 | if (of_device_is_compatible(node, "fsl,elbc")) | ||
201 | clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | /* | ||
207 | * NOTE: This interrupt is used to report localbus events of various kinds, | ||
208 | * such as transaction errors on the chipselects. | ||
209 | */ | ||
210 | |||
211 | static irqreturn_t fsl_lbc_ctrl_irq(int irqno, void *data) | ||
212 | { | ||
213 | struct fsl_lbc_ctrl *ctrl = data; | ||
214 | struct fsl_lbc_regs __iomem *lbc = ctrl->regs; | ||
215 | u32 status; | ||
216 | |||
217 | status = in_be32(&lbc->ltesr); | ||
218 | if (!status) | ||
219 | return IRQ_NONE; | ||
220 | |||
221 | out_be32(&lbc->ltesr, LTESR_CLEAR); | ||
222 | out_be32(&lbc->lteatr, 0); | ||
223 | out_be32(&lbc->ltear, 0); | ||
224 | ctrl->irq_status = status; | ||
225 | |||
226 | if (status & LTESR_BM) | ||
227 | dev_err(ctrl->dev, "Local bus monitor time-out: " | ||
228 | "LTESR 0x%08X\n", status); | ||
229 | if (status & LTESR_WP) | ||
230 | dev_err(ctrl->dev, "Write protect error: " | ||
231 | "LTESR 0x%08X\n", status); | ||
232 | if (status & LTESR_ATMW) | ||
233 | dev_err(ctrl->dev, "Atomic write error: " | ||
234 | "LTESR 0x%08X\n", status); | ||
235 | if (status & LTESR_ATMR) | ||
236 | dev_err(ctrl->dev, "Atomic read error: " | ||
237 | "LTESR 0x%08X\n", status); | ||
238 | if (status & LTESR_CS) | ||
239 | dev_err(ctrl->dev, "Chip select error: " | ||
240 | "LTESR 0x%08X\n", status); | ||
241 | if (status & LTESR_UPM) | ||
242 | ; | ||
243 | if (status & LTESR_FCT) { | ||
244 | dev_err(ctrl->dev, "FCM command time-out: " | ||
245 | "LTESR 0x%08X\n", status); | ||
246 | smp_wmb(); | ||
247 | wake_up(&ctrl->irq_wait); | ||
248 | } | ||
249 | if (status & LTESR_PAR) { | ||
250 | dev_err(ctrl->dev, "Parity or Uncorrectable ECC error: " | ||
251 | "LTESR 0x%08X\n", status); | ||
252 | smp_wmb(); | ||
253 | wake_up(&ctrl->irq_wait); | ||
254 | } | ||
255 | if (status & LTESR_CC) { | ||
256 | smp_wmb(); | ||
257 | wake_up(&ctrl->irq_wait); | ||
258 | } | ||
259 | if (status & ~LTESR_MASK) | ||
260 | dev_err(ctrl->dev, "Unknown error: " | ||
261 | "LTESR 0x%08X\n", status); | ||
262 | return IRQ_HANDLED; | ||
263 | } | ||
264 | |||
265 | /* | ||
266 | * fsl_lbc_ctrl_probe | ||
267 | * | ||
268 | * called by device layer when it finds a device matching | ||
269 | * one our driver can handled. This code allocates all of | ||
270 | * the resources needed for the controller only. The | ||
271 | * resources for the NAND banks themselves are allocated | ||
272 | * in the chip probe function. | ||
273 | */ | ||
274 | |||
275 | static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) | ||
276 | { | ||
277 | int ret; | ||
278 | |||
279 | if (!dev->dev.of_node) { | ||
280 | dev_err(&dev->dev, "Device OF-Node is NULL"); | ||
281 | return -EFAULT; | ||
282 | } | ||
283 | |||
284 | fsl_lbc_ctrl_dev = kzalloc(sizeof(*fsl_lbc_ctrl_dev), GFP_KERNEL); | ||
285 | if (!fsl_lbc_ctrl_dev) | ||
286 | return -ENOMEM; | ||
287 | |||
288 | dev_set_drvdata(&dev->dev, fsl_lbc_ctrl_dev); | ||
289 | |||
290 | spin_lock_init(&fsl_lbc_ctrl_dev->lock); | ||
291 | init_waitqueue_head(&fsl_lbc_ctrl_dev->irq_wait); | ||
292 | |||
293 | fsl_lbc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); | ||
294 | if (!fsl_lbc_ctrl_dev->regs) { | ||
295 | dev_err(&dev->dev, "failed to get memory region\n"); | ||
296 | ret = -ENODEV; | ||
297 | goto err; | ||
298 | } | ||
299 | |||
300 | fsl_lbc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); | ||
301 | if (fsl_lbc_ctrl_dev->irq == NO_IRQ) { | ||
302 | dev_err(&dev->dev, "failed to get irq resource\n"); | ||
303 | ret = -ENODEV; | ||
304 | goto err; | ||
305 | } | ||
306 | |||
307 | fsl_lbc_ctrl_dev->dev = &dev->dev; | ||
308 | |||
309 | ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node); | ||
310 | if (ret < 0) | ||
311 | goto err; | ||
312 | |||
313 | ret = request_irq(fsl_lbc_ctrl_dev->irq, fsl_lbc_ctrl_irq, 0, | ||
314 | "fsl-lbc", fsl_lbc_ctrl_dev); | ||
315 | if (ret != 0) { | ||
316 | dev_err(&dev->dev, "failed to install irq (%d)\n", | ||
317 | fsl_lbc_ctrl_dev->irq); | ||
318 | ret = fsl_lbc_ctrl_dev->irq; | ||
319 | goto err; | ||
320 | } | ||
321 | |||
322 | /* Enable interrupts for any detected events */ | ||
323 | out_be32(&fsl_lbc_ctrl_dev->regs->lteir, LTEIR_ENABLE); | ||
324 | |||
325 | return 0; | ||
326 | |||
327 | err: | ||
328 | iounmap(fsl_lbc_ctrl_dev->regs); | ||
329 | kfree(fsl_lbc_ctrl_dev); | ||
330 | return ret; | ||
331 | } | ||
332 | |||
333 | static const struct of_device_id fsl_lbc_match[] = { | ||
334 | { .compatible = "fsl,elbc", }, | ||
335 | { .compatible = "fsl,pq3-localbus", }, | ||
336 | { .compatible = "fsl,pq2-localbus", }, | ||
337 | { .compatible = "fsl,pq2pro-localbus", }, | ||
338 | {}, | ||
339 | }; | ||
340 | |||
341 | static struct platform_driver fsl_lbc_ctrl_driver = { | ||
342 | .driver = { | ||
343 | .name = "fsl-lbc", | ||
344 | .of_match_table = fsl_lbc_match, | ||
345 | }, | ||
346 | .probe = fsl_lbc_ctrl_probe, | ||
347 | }; | ||
348 | |||
349 | static int __init fsl_lbc_init(void) | ||
350 | { | ||
351 | return platform_driver_register(&fsl_lbc_ctrl_driver); | ||
352 | } | ||
353 | module_init(fsl_lbc_init); | ||
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c index 87991d3abbab..92e78333c47c 100644 --- a/arch/powerpc/sysdev/fsl_msi.c +++ b/arch/powerpc/sysdev/fsl_msi.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. | 2 | * Copyright (C) 2007-2011 Freescale Semiconductor, Inc. |
3 | * | 3 | * |
4 | * Author: Tony Li <tony.li@freescale.com> | 4 | * Author: Tony Li <tony.li@freescale.com> |
5 | * Jason Jin <Jason.jin@freescale.com> | 5 | * Jason Jin <Jason.jin@freescale.com> |
@@ -24,6 +24,7 @@ | |||
24 | #include <asm/ppc-pci.h> | 24 | #include <asm/ppc-pci.h> |
25 | #include <asm/mpic.h> | 25 | #include <asm/mpic.h> |
26 | #include "fsl_msi.h" | 26 | #include "fsl_msi.h" |
27 | #include "fsl_pci.h" | ||
27 | 28 | ||
28 | LIST_HEAD(msi_head); | 29 | LIST_HEAD(msi_head); |
29 | 30 | ||
@@ -46,14 +47,14 @@ static inline u32 fsl_msi_read(u32 __iomem *base, unsigned int reg) | |||
46 | * We do not need this actually. The MSIR register has been read once | 47 | * We do not need this actually. The MSIR register has been read once |
47 | * in the cascade interrupt. So, this MSI interrupt has been acked | 48 | * in the cascade interrupt. So, this MSI interrupt has been acked |
48 | */ | 49 | */ |
49 | static void fsl_msi_end_irq(unsigned int virq) | 50 | static void fsl_msi_end_irq(struct irq_data *d) |
50 | { | 51 | { |
51 | } | 52 | } |
52 | 53 | ||
53 | static struct irq_chip fsl_msi_chip = { | 54 | static struct irq_chip fsl_msi_chip = { |
54 | .mask = mask_msi_irq, | 55 | .irq_mask = mask_msi_irq, |
55 | .unmask = unmask_msi_irq, | 56 | .irq_unmask = unmask_msi_irq, |
56 | .ack = fsl_msi_end_irq, | 57 | .irq_ack = fsl_msi_end_irq, |
57 | .name = "FSL-MSI", | 58 | .name = "FSL-MSI", |
58 | }; | 59 | }; |
59 | 60 | ||
@@ -63,10 +64,10 @@ static int fsl_msi_host_map(struct irq_host *h, unsigned int virq, | |||
63 | struct fsl_msi *msi_data = h->host_data; | 64 | struct fsl_msi *msi_data = h->host_data; |
64 | struct irq_chip *chip = &fsl_msi_chip; | 65 | struct irq_chip *chip = &fsl_msi_chip; |
65 | 66 | ||
66 | irq_to_desc(virq)->status |= IRQ_TYPE_EDGE_FALLING; | 67 | irq_set_status_flags(virq, IRQ_TYPE_EDGE_FALLING); |
67 | 68 | ||
68 | set_irq_chip_data(virq, msi_data); | 69 | irq_set_chip_data(virq, msi_data); |
69 | set_irq_chip_and_handler(virq, chip, handle_edge_irq); | 70 | irq_set_chip_and_handler(virq, chip, handle_edge_irq); |
70 | 71 | ||
71 | return 0; | 72 | return 0; |
72 | } | 73 | } |
@@ -109,8 +110,8 @@ static void fsl_teardown_msi_irqs(struct pci_dev *pdev) | |||
109 | list_for_each_entry(entry, &pdev->msi_list, list) { | 110 | list_for_each_entry(entry, &pdev->msi_list, list) { |
110 | if (entry->irq == NO_IRQ) | 111 | if (entry->irq == NO_IRQ) |
111 | continue; | 112 | continue; |
112 | msi_data = get_irq_data(entry->irq); | 113 | msi_data = irq_get_chip_data(entry->irq); |
113 | set_irq_msi(entry->irq, NULL); | 114 | irq_set_msi_desc(entry->irq, NULL); |
114 | msi_bitmap_free_hwirqs(&msi_data->bitmap, | 115 | msi_bitmap_free_hwirqs(&msi_data->bitmap, |
115 | virq_to_hw(entry->irq), 1); | 116 | virq_to_hw(entry->irq), 1); |
116 | irq_dispose_mapping(entry->irq); | 117 | irq_dispose_mapping(entry->irq); |
@@ -125,13 +126,11 @@ static void fsl_compose_msi_msg(struct pci_dev *pdev, int hwirq, | |||
125 | { | 126 | { |
126 | struct fsl_msi *msi_data = fsl_msi_data; | 127 | struct fsl_msi *msi_data = fsl_msi_data; |
127 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); | 128 | struct pci_controller *hose = pci_bus_to_host(pdev->bus); |
128 | u32 base = 0; | 129 | u64 base = fsl_pci_immrbar_base(hose); |
129 | 130 | ||
130 | pci_bus_read_config_dword(hose->bus, | 131 | msg->address_lo = msi_data->msi_addr_lo + lower_32_bits(base); |
131 | PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); | 132 | msg->address_hi = msi_data->msi_addr_hi + upper_32_bits(base); |
132 | 133 | ||
133 | msg->address_lo = msi_data->msi_addr_lo + base; | ||
134 | msg->address_hi = msi_data->msi_addr_hi; | ||
135 | msg->data = hwirq; | 134 | msg->data = hwirq; |
136 | 135 | ||
137 | pr_debug("%s: allocated srs: %d, ibs: %d\n", | 136 | pr_debug("%s: allocated srs: %d, ibs: %d\n", |
@@ -169,8 +168,8 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
169 | rc = -ENOSPC; | 168 | rc = -ENOSPC; |
170 | goto out_free; | 169 | goto out_free; |
171 | } | 170 | } |
172 | set_irq_data(virq, msi_data); | 171 | /* chip_data is msi_data via host->hostdata in host->map() */ |
173 | set_irq_msi(virq, entry); | 172 | irq_set_msi_desc(virq, entry); |
174 | 173 | ||
175 | fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); | 174 | fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); |
176 | write_msi_msg(virq, &msg); | 175 | write_msi_msg(virq, &msg); |
@@ -184,6 +183,8 @@ out_free: | |||
184 | 183 | ||
185 | static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | 184 | static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) |
186 | { | 185 | { |
186 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
187 | struct irq_data *idata = irq_desc_get_irq_data(desc); | ||
187 | unsigned int cascade_irq; | 188 | unsigned int cascade_irq; |
188 | struct fsl_msi *msi_data; | 189 | struct fsl_msi *msi_data; |
189 | int msir_index = -1; | 190 | int msir_index = -1; |
@@ -192,20 +193,20 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
192 | u32 have_shift = 0; | 193 | u32 have_shift = 0; |
193 | struct fsl_msi_cascade_data *cascade_data; | 194 | struct fsl_msi_cascade_data *cascade_data; |
194 | 195 | ||
195 | cascade_data = (struct fsl_msi_cascade_data *)get_irq_data(irq); | 196 | cascade_data = irq_get_handler_data(irq); |
196 | msi_data = cascade_data->msi_data; | 197 | msi_data = cascade_data->msi_data; |
197 | 198 | ||
198 | raw_spin_lock(&desc->lock); | 199 | raw_spin_lock(&desc->lock); |
199 | if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { | 200 | if ((msi_data->feature & FSL_PIC_IP_MASK) == FSL_PIC_IP_IPIC) { |
200 | if (desc->chip->mask_ack) | 201 | if (chip->irq_mask_ack) |
201 | desc->chip->mask_ack(irq); | 202 | chip->irq_mask_ack(idata); |
202 | else { | 203 | else { |
203 | desc->chip->mask(irq); | 204 | chip->irq_mask(idata); |
204 | desc->chip->ack(irq); | 205 | chip->irq_ack(idata); |
205 | } | 206 | } |
206 | } | 207 | } |
207 | 208 | ||
208 | if (unlikely(desc->status & IRQ_INPROGRESS)) | 209 | if (unlikely(irqd_irq_inprogress(idata))) |
209 | goto unlock; | 210 | goto unlock; |
210 | 211 | ||
211 | msir_index = cascade_data->index; | 212 | msir_index = cascade_data->index; |
@@ -213,7 +214,7 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
213 | if (msir_index >= NR_MSI_REG) | 214 | if (msir_index >= NR_MSI_REG) |
214 | cascade_irq = NO_IRQ; | 215 | cascade_irq = NO_IRQ; |
215 | 216 | ||
216 | desc->status |= IRQ_INPROGRESS; | 217 | irqd_set_chained_irq_inprogress(idata); |
217 | switch (msi_data->feature & FSL_PIC_IP_MASK) { | 218 | switch (msi_data->feature & FSL_PIC_IP_MASK) { |
218 | case FSL_PIC_IP_MPIC: | 219 | case FSL_PIC_IP_MPIC: |
219 | msir_value = fsl_msi_read(msi_data->msi_regs, | 220 | msir_value = fsl_msi_read(msi_data->msi_regs, |
@@ -235,15 +236,15 @@ static void fsl_msi_cascade(unsigned int irq, struct irq_desc *desc) | |||
235 | have_shift += intr_index + 1; | 236 | have_shift += intr_index + 1; |
236 | msir_value = msir_value >> (intr_index + 1); | 237 | msir_value = msir_value >> (intr_index + 1); |
237 | } | 238 | } |
238 | desc->status &= ~IRQ_INPROGRESS; | 239 | irqd_clr_chained_irq_inprogress(idata); |
239 | 240 | ||
240 | switch (msi_data->feature & FSL_PIC_IP_MASK) { | 241 | switch (msi_data->feature & FSL_PIC_IP_MASK) { |
241 | case FSL_PIC_IP_MPIC: | 242 | case FSL_PIC_IP_MPIC: |
242 | desc->chip->eoi(irq); | 243 | chip->irq_eoi(idata); |
243 | break; | 244 | break; |
244 | case FSL_PIC_IP_IPIC: | 245 | case FSL_PIC_IP_IPIC: |
245 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | 246 | if (!irqd_irq_disabled(idata) && chip->irq_unmask) |
246 | desc->chip->unmask(irq); | 247 | chip->irq_unmask(idata); |
247 | break; | 248 | break; |
248 | } | 249 | } |
249 | unlock: | 250 | unlock: |
@@ -252,7 +253,7 @@ unlock: | |||
252 | 253 | ||
253 | static int fsl_of_msi_remove(struct platform_device *ofdev) | 254 | static int fsl_of_msi_remove(struct platform_device *ofdev) |
254 | { | 255 | { |
255 | struct fsl_msi *msi = ofdev->dev.platform_data; | 256 | struct fsl_msi *msi = platform_get_drvdata(ofdev); |
256 | int virq, i; | 257 | int virq, i; |
257 | struct fsl_msi_cascade_data *cascade_data; | 258 | struct fsl_msi_cascade_data *cascade_data; |
258 | 259 | ||
@@ -261,7 +262,7 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) | |||
261 | for (i = 0; i < NR_MSI_REG; i++) { | 262 | for (i = 0; i < NR_MSI_REG; i++) { |
262 | virq = msi->msi_virqs[i]; | 263 | virq = msi->msi_virqs[i]; |
263 | if (virq != NO_IRQ) { | 264 | if (virq != NO_IRQ) { |
264 | cascade_data = get_irq_data(virq); | 265 | cascade_data = irq_get_handler_data(virq); |
265 | kfree(cascade_data); | 266 | kfree(cascade_data); |
266 | irq_dispose_mapping(virq); | 267 | irq_dispose_mapping(virq); |
267 | } | 268 | } |
@@ -274,19 +275,53 @@ static int fsl_of_msi_remove(struct platform_device *ofdev) | |||
274 | return 0; | 275 | return 0; |
275 | } | 276 | } |
276 | 277 | ||
277 | static int __devinit fsl_of_msi_probe(struct platform_device *dev, | 278 | static int __devinit fsl_msi_setup_hwirq(struct fsl_msi *msi, |
278 | const struct of_device_id *match) | 279 | struct platform_device *dev, |
280 | int offset, int irq_index) | ||
279 | { | 281 | { |
282 | struct fsl_msi_cascade_data *cascade_data = NULL; | ||
283 | int virt_msir; | ||
284 | |||
285 | virt_msir = irq_of_parse_and_map(dev->dev.of_node, irq_index); | ||
286 | if (virt_msir == NO_IRQ) { | ||
287 | dev_err(&dev->dev, "%s: Cannot translate IRQ index %d\n", | ||
288 | __func__, irq_index); | ||
289 | return 0; | ||
290 | } | ||
291 | |||
292 | cascade_data = kzalloc(sizeof(struct fsl_msi_cascade_data), GFP_KERNEL); | ||
293 | if (!cascade_data) { | ||
294 | dev_err(&dev->dev, "No memory for MSI cascade data\n"); | ||
295 | return -ENOMEM; | ||
296 | } | ||
297 | |||
298 | msi->msi_virqs[irq_index] = virt_msir; | ||
299 | cascade_data->index = offset + irq_index; | ||
300 | cascade_data->msi_data = msi; | ||
301 | irq_set_handler_data(virt_msir, cascade_data); | ||
302 | irq_set_chained_handler(virt_msir, fsl_msi_cascade); | ||
303 | |||
304 | return 0; | ||
305 | } | ||
306 | |||
307 | static const struct of_device_id fsl_of_msi_ids[]; | ||
308 | static int __devinit fsl_of_msi_probe(struct platform_device *dev) | ||
309 | { | ||
310 | const struct of_device_id *match; | ||
280 | struct fsl_msi *msi; | 311 | struct fsl_msi *msi; |
281 | struct resource res; | 312 | struct resource res; |
282 | int err, i, count; | 313 | int err, i, j, irq_index, count; |
283 | int rc; | 314 | int rc; |
284 | int virt_msir; | ||
285 | const u32 *p; | 315 | const u32 *p; |
286 | struct fsl_msi_feature *features = match->data; | 316 | struct fsl_msi_feature *features; |
287 | struct fsl_msi_cascade_data *cascade_data = NULL; | ||
288 | int len; | 317 | int len; |
289 | u32 offset; | 318 | u32 offset; |
319 | static const u32 all_avail[] = { 0, NR_MSI_IRQS }; | ||
320 | |||
321 | match = of_match_device(fsl_of_msi_ids, &dev->dev); | ||
322 | if (!match) | ||
323 | return -EINVAL; | ||
324 | features = match->data; | ||
290 | 325 | ||
291 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); | 326 | printk(KERN_DEBUG "Setting up Freescale MSI support\n"); |
292 | 327 | ||
@@ -295,7 +330,7 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev, | |||
295 | dev_err(&dev->dev, "No memory for MSI structure\n"); | 330 | dev_err(&dev->dev, "No memory for MSI structure\n"); |
296 | return -ENOMEM; | 331 | return -ENOMEM; |
297 | } | 332 | } |
298 | dev->dev.platform_data = msi; | 333 | platform_set_drvdata(dev, msi); |
299 | 334 | ||
300 | msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, | 335 | msi->irqhost = irq_alloc_host(dev->dev.of_node, IRQ_HOST_MAP_LINEAR, |
301 | NR_MSI_IRQS, &fsl_msi_host_ops, 0); | 336 | NR_MSI_IRQS, &fsl_msi_host_ops, 0); |
@@ -333,42 +368,34 @@ static int __devinit fsl_of_msi_probe(struct platform_device *dev, | |||
333 | goto error_out; | 368 | goto error_out; |
334 | } | 369 | } |
335 | 370 | ||
336 | p = of_get_property(dev->dev.of_node, "interrupts", &count); | 371 | p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); |
337 | if (!p) { | 372 | if (p && len % (2 * sizeof(u32)) != 0) { |
338 | dev_err(&dev->dev, "no interrupts property found on %s\n", | 373 | dev_err(&dev->dev, "%s: Malformed msi-available-ranges property\n", |
339 | dev->dev.of_node->full_name); | 374 | __func__); |
340 | err = -ENODEV; | ||
341 | goto error_out; | ||
342 | } | ||
343 | if (count % 8 != 0) { | ||
344 | dev_err(&dev->dev, "Malformed interrupts property on %s\n", | ||
345 | dev->dev.of_node->full_name); | ||
346 | err = -EINVAL; | 375 | err = -EINVAL; |
347 | goto error_out; | 376 | goto error_out; |
348 | } | 377 | } |
349 | offset = 0; | 378 | |
350 | p = of_get_property(dev->dev.of_node, "msi-available-ranges", &len); | 379 | if (!p) |
351 | if (p) | 380 | p = all_avail; |
352 | offset = *p / IRQS_PER_MSI_REG; | 381 | |
353 | 382 | for (irq_index = 0, i = 0; i < len / (2 * sizeof(u32)); i++) { | |
354 | count /= sizeof(u32); | 383 | if (p[i * 2] % IRQS_PER_MSI_REG || |
355 | for (i = 0; i < min(count / 2, NR_MSI_REG); i++) { | 384 | p[i * 2 + 1] % IRQS_PER_MSI_REG) { |
356 | virt_msir = irq_of_parse_and_map(dev->dev.of_node, i); | 385 | printk(KERN_WARNING "%s: %s: msi available range of %u at %u is not IRQ-aligned\n", |
357 | if (virt_msir != NO_IRQ) { | 386 | __func__, dev->dev.of_node->full_name, |
358 | cascade_data = kzalloc( | 387 | p[i * 2 + 1], p[i * 2]); |
359 | sizeof(struct fsl_msi_cascade_data), | 388 | err = -EINVAL; |
360 | GFP_KERNEL); | 389 | goto error_out; |
361 | if (!cascade_data) { | 390 | } |
362 | dev_err(&dev->dev, | 391 | |
363 | "No memory for MSI cascade data\n"); | 392 | offset = p[i * 2] / IRQS_PER_MSI_REG; |
364 | err = -ENOMEM; | 393 | count = p[i * 2 + 1] / IRQS_PER_MSI_REG; |
394 | |||
395 | for (j = 0; j < count; j++, irq_index++) { | ||
396 | err = fsl_msi_setup_hwirq(msi, dev, offset, irq_index); | ||
397 | if (err) | ||
365 | goto error_out; | 398 | goto error_out; |
366 | } | ||
367 | msi->msi_virqs[i] = virt_msir; | ||
368 | cascade_data->index = i + offset; | ||
369 | cascade_data->msi_data = msi; | ||
370 | set_irq_data(virt_msir, (void *)cascade_data); | ||
371 | set_irq_chained_handler(virt_msir, fsl_msi_cascade); | ||
372 | } | 399 | } |
373 | } | 400 | } |
374 | 401 | ||
@@ -412,7 +439,7 @@ static const struct of_device_id fsl_of_msi_ids[] = { | |||
412 | {} | 439 | {} |
413 | }; | 440 | }; |
414 | 441 | ||
415 | static struct of_platform_driver fsl_of_msi_driver = { | 442 | static struct platform_driver fsl_of_msi_driver = { |
416 | .driver = { | 443 | .driver = { |
417 | .name = "fsl-msi", | 444 | .name = "fsl-msi", |
418 | .owner = THIS_MODULE, | 445 | .owner = THIS_MODULE, |
@@ -424,7 +451,7 @@ static struct of_platform_driver fsl_of_msi_driver = { | |||
424 | 451 | ||
425 | static __init int fsl_of_msi_init(void) | 452 | static __init int fsl_of_msi_init(void) |
426 | { | 453 | { |
427 | return of_register_platform_driver(&fsl_of_msi_driver); | 454 | return platform_driver_register(&fsl_of_msi_driver); |
428 | } | 455 | } |
429 | 456 | ||
430 | subsys_initcall(fsl_of_msi_init); | 457 | subsys_initcall(fsl_of_msi_init); |
diff --git a/arch/powerpc/sysdev/fsl_pci.c b/arch/powerpc/sysdev/fsl_pci.c index 4ae933225251..68ca9290df94 100644 --- a/arch/powerpc/sysdev/fsl_pci.c +++ b/arch/powerpc/sysdev/fsl_pci.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * MPC83xx/85xx/86xx PCI/PCIE support routing. | 2 | * MPC83xx/85xx/86xx PCI/PCIE support routing. |
3 | * | 3 | * |
4 | * Copyright 2007-2009 Freescale Semiconductor, Inc. | 4 | * Copyright 2007-2011 Freescale Semiconductor, Inc. |
5 | * Copyright 2008-2009 MontaVista Software, Inc. | 5 | * Copyright 2008-2009 MontaVista Software, Inc. |
6 | * | 6 | * |
7 | * Initial author: Xianghua Xiao <x.xiao@freescale.com> | 7 | * Initial author: Xianghua Xiao <x.xiao@freescale.com> |
@@ -34,7 +34,7 @@ | |||
34 | #include <sysdev/fsl_soc.h> | 34 | #include <sysdev/fsl_soc.h> |
35 | #include <sysdev/fsl_pci.h> | 35 | #include <sysdev/fsl_pci.h> |
36 | 36 | ||
37 | static int fsl_pcie_bus_fixup; | 37 | static int fsl_pcie_bus_fixup, is_mpc83xx_pci; |
38 | 38 | ||
39 | static void __init quirk_fsl_pcie_header(struct pci_dev *dev) | 39 | static void __init quirk_fsl_pcie_header(struct pci_dev *dev) |
40 | { | 40 | { |
@@ -99,7 +99,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose, | |||
99 | struct resource *rsrc) | 99 | struct resource *rsrc) |
100 | { | 100 | { |
101 | struct ccsr_pci __iomem *pci; | 101 | struct ccsr_pci __iomem *pci; |
102 | int i, j, n, mem_log, win_idx = 2; | 102 | int i, j, n, mem_log, win_idx = 3, start_idx = 1, end_idx = 4; |
103 | u64 mem, sz, paddr_hi = 0; | 103 | u64 mem, sz, paddr_hi = 0; |
104 | u64 paddr_lo = ULLONG_MAX; | 104 | u64 paddr_lo = ULLONG_MAX; |
105 | u32 pcicsrbar = 0, pcicsrbar_sz; | 105 | u32 pcicsrbar = 0, pcicsrbar_sz; |
@@ -109,6 +109,13 @@ static void __init setup_pci_atmu(struct pci_controller *hose, | |||
109 | 109 | ||
110 | pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", | 110 | pr_debug("PCI memory map start 0x%016llx, size 0x%016llx\n", |
111 | (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1); | 111 | (u64)rsrc->start, (u64)rsrc->end - (u64)rsrc->start + 1); |
112 | |||
113 | if (of_device_is_compatible(hose->dn, "fsl,qoriq-pcie-v2.2")) { | ||
114 | win_idx = 2; | ||
115 | start_idx = 0; | ||
116 | end_idx = 3; | ||
117 | } | ||
118 | |||
112 | pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1); | 119 | pci = ioremap(rsrc->start, rsrc->end - rsrc->start + 1); |
113 | if (!pci) { | 120 | if (!pci) { |
114 | dev_err(hose->parent, "Unable to map ATMU registers\n"); | 121 | dev_err(hose->parent, "Unable to map ATMU registers\n"); |
@@ -118,7 +125,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose, | |||
118 | /* Disable all windows (except powar0 since it's ignored) */ | 125 | /* Disable all windows (except powar0 since it's ignored) */ |
119 | for(i = 1; i < 5; i++) | 126 | for(i = 1; i < 5; i++) |
120 | out_be32(&pci->pow[i].powar, 0); | 127 | out_be32(&pci->pow[i].powar, 0); |
121 | for(i = 0; i < 3; i++) | 128 | for (i = start_idx; i < end_idx; i++) |
122 | out_be32(&pci->piw[i].piwar, 0); | 129 | out_be32(&pci->piw[i].piwar, 0); |
123 | 130 | ||
124 | /* Setup outbound MEM window */ | 131 | /* Setup outbound MEM window */ |
@@ -204,7 +211,7 @@ static void __init setup_pci_atmu(struct pci_controller *hose, | |||
204 | mem_log++; | 211 | mem_log++; |
205 | } | 212 | } |
206 | 213 | ||
207 | piwar |= (mem_log - 1); | 214 | piwar |= ((mem_log - 1) & PIWAR_SZ_MASK); |
208 | 215 | ||
209 | /* Setup inbound memory window */ | 216 | /* Setup inbound memory window */ |
210 | out_be32(&pci->piw[win_idx].pitar, 0x00000000); | 217 | out_be32(&pci->piw[win_idx].pitar, 0x00000000); |
@@ -317,6 +324,11 @@ int __init fsl_add_bridge(struct device_node *dev, int is_primary) | |||
317 | struct resource rsrc; | 324 | struct resource rsrc; |
318 | const int *bus_range; | 325 | const int *bus_range; |
319 | 326 | ||
327 | if (!of_device_is_available(dev)) { | ||
328 | pr_warning("%s: disabled\n", dev->full_name); | ||
329 | return -ENODEV; | ||
330 | } | ||
331 | |||
320 | pr_debug("Adding PCI host bridge %s\n", dev->full_name); | 332 | pr_debug("Adding PCI host bridge %s\n", dev->full_name); |
321 | 333 | ||
322 | /* Fetch host bridge registers address */ | 334 | /* Fetch host bridge registers address */ |
@@ -407,10 +419,18 @@ DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010E, quirk_fsl_pcie_header); | |||
407 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); | 419 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2010, quirk_fsl_pcie_header); |
408 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); | 420 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020E, quirk_fsl_pcie_header); |
409 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); | 421 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2020, quirk_fsl_pcie_header); |
422 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040E, quirk_fsl_pcie_header); | ||
423 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P2040, quirk_fsl_pcie_header); | ||
424 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041E, quirk_fsl_pcie_header); | ||
425 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P3041, quirk_fsl_pcie_header); | ||
410 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); | 426 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040E, quirk_fsl_pcie_header); |
411 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); | 427 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4040, quirk_fsl_pcie_header); |
412 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); | 428 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080E, quirk_fsl_pcie_header); |
413 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); | 429 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P4080, quirk_fsl_pcie_header); |
430 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010E, quirk_fsl_pcie_header); | ||
431 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5010, quirk_fsl_pcie_header); | ||
432 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020E, quirk_fsl_pcie_header); | ||
433 | DECLARE_PCI_FIXUP_HEADER(0x1957, PCI_DEVICE_ID_P5020, quirk_fsl_pcie_header); | ||
414 | #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ | 434 | #endif /* CONFIG_FSL_SOC_BOOKE || CONFIG_PPC_86xx */ |
415 | 435 | ||
416 | #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) | 436 | #if defined(CONFIG_PPC_83xx) || defined(CONFIG_PPC_MPC512x) |
@@ -430,6 +450,13 @@ struct mpc83xx_pcie_priv { | |||
430 | u32 dev_base; | 450 | u32 dev_base; |
431 | }; | 451 | }; |
432 | 452 | ||
453 | struct pex_inbound_window { | ||
454 | u32 ar; | ||
455 | u32 tar; | ||
456 | u32 barl; | ||
457 | u32 barh; | ||
458 | }; | ||
459 | |||
433 | /* | 460 | /* |
434 | * With the convention of u-boot, the PCIE outbound window 0 serves | 461 | * With the convention of u-boot, the PCIE outbound window 0 serves |
435 | * as configuration transactions outbound. | 462 | * as configuration transactions outbound. |
@@ -437,6 +464,8 @@ struct mpc83xx_pcie_priv { | |||
437 | #define PEX_OUTWIN0_BAR 0xCA4 | 464 | #define PEX_OUTWIN0_BAR 0xCA4 |
438 | #define PEX_OUTWIN0_TAL 0xCA8 | 465 | #define PEX_OUTWIN0_TAL 0xCA8 |
439 | #define PEX_OUTWIN0_TAH 0xCAC | 466 | #define PEX_OUTWIN0_TAH 0xCAC |
467 | #define PEX_RC_INWIN_BASE 0xE60 | ||
468 | #define PEX_RCIWARn_EN 0x1 | ||
440 | 469 | ||
441 | static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn) | 470 | static int mpc83xx_pcie_exclude_device(struct pci_bus *bus, unsigned int devfn) |
442 | { | 471 | { |
@@ -604,6 +633,8 @@ int __init mpc83xx_add_bridge(struct device_node *dev) | |||
604 | const int *bus_range; | 633 | const int *bus_range; |
605 | int primary; | 634 | int primary; |
606 | 635 | ||
636 | is_mpc83xx_pci = 1; | ||
637 | |||
607 | if (!of_device_is_available(dev)) { | 638 | if (!of_device_is_available(dev)) { |
608 | pr_warning("%s: disabled by the firmware.\n", | 639 | pr_warning("%s: disabled by the firmware.\n", |
609 | dev->full_name); | 640 | dev->full_name); |
@@ -683,3 +714,40 @@ err0: | |||
683 | return ret; | 714 | return ret; |
684 | } | 715 | } |
685 | #endif /* CONFIG_PPC_83xx */ | 716 | #endif /* CONFIG_PPC_83xx */ |
717 | |||
718 | u64 fsl_pci_immrbar_base(struct pci_controller *hose) | ||
719 | { | ||
720 | #ifdef CONFIG_PPC_83xx | ||
721 | if (is_mpc83xx_pci) { | ||
722 | struct mpc83xx_pcie_priv *pcie = hose->dn->data; | ||
723 | struct pex_inbound_window *in; | ||
724 | int i; | ||
725 | |||
726 | /* Walk the Root Complex Inbound windows to match IMMR base */ | ||
727 | in = pcie->cfg_type0 + PEX_RC_INWIN_BASE; | ||
728 | for (i = 0; i < 4; i++) { | ||
729 | /* not enabled, skip */ | ||
730 | if (!in_le32(&in[i].ar) & PEX_RCIWARn_EN) | ||
731 | continue; | ||
732 | |||
733 | if (get_immrbase() == in_le32(&in[i].tar)) | ||
734 | return (u64)in_le32(&in[i].barh) << 32 | | ||
735 | in_le32(&in[i].barl); | ||
736 | } | ||
737 | |||
738 | printk(KERN_WARNING "could not find PCI BAR matching IMMR\n"); | ||
739 | } | ||
740 | #endif | ||
741 | |||
742 | #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) | ||
743 | if (!is_mpc83xx_pci) { | ||
744 | u32 base; | ||
745 | |||
746 | pci_bus_read_config_dword(hose->bus, | ||
747 | PCI_DEVFN(0, 0), PCI_BASE_ADDRESS_0, &base); | ||
748 | return base; | ||
749 | } | ||
750 | #endif | ||
751 | |||
752 | return 0; | ||
753 | } | ||
diff --git a/arch/powerpc/sysdev/fsl_pci.h b/arch/powerpc/sysdev/fsl_pci.h index a9d8bbebed80..a39ed5cc2c5a 100644 --- a/arch/powerpc/sysdev/fsl_pci.h +++ b/arch/powerpc/sysdev/fsl_pci.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * MPC85xx/86xx PCI Express structure define | 2 | * MPC85xx/86xx PCI Express structure define |
3 | * | 3 | * |
4 | * Copyright 2007 Freescale Semiconductor, Inc | 4 | * Copyright 2007,2011 Freescale Semiconductor, Inc |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or modify it | 6 | * This program is free software; you can redistribute it and/or modify it |
7 | * under the terms of the GNU General Public License as published by the | 7 | * under the terms of the GNU General Public License as published by the |
@@ -21,6 +21,7 @@ | |||
21 | #define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */ | 21 | #define PIWAR_TGI_LOCAL 0x00f00000 /* target - local memory */ |
22 | #define PIWAR_READ_SNOOP 0x00050000 | 22 | #define PIWAR_READ_SNOOP 0x00050000 |
23 | #define PIWAR_WRITE_SNOOP 0x00005000 | 23 | #define PIWAR_WRITE_SNOOP 0x00005000 |
24 | #define PIWAR_SZ_MASK 0x0000003f | ||
24 | 25 | ||
25 | /* PCI/PCI Express outbound window reg */ | 26 | /* PCI/PCI Express outbound window reg */ |
26 | struct pci_outbound_window_regs { | 27 | struct pci_outbound_window_regs { |
@@ -49,7 +50,9 @@ struct ccsr_pci { | |||
49 | __be32 int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */ | 50 | __be32 int_ack; /* 0x.008 - PCI Interrupt Acknowledge Register */ |
50 | __be32 pex_otb_cpl_tor; /* 0x.00c - PCIE Outbound completion timeout register */ | 51 | __be32 pex_otb_cpl_tor; /* 0x.00c - PCIE Outbound completion timeout register */ |
51 | __be32 pex_conf_tor; /* 0x.010 - PCIE configuration timeout register */ | 52 | __be32 pex_conf_tor; /* 0x.010 - PCIE configuration timeout register */ |
52 | u8 res2[12]; | 53 | __be32 pex_config; /* 0x.014 - PCIE CONFIG Register */ |
54 | __be32 pex_int_status; /* 0x.018 - PCIE interrupt status */ | ||
55 | u8 res2[4]; | ||
53 | __be32 pex_pme_mes_dr; /* 0x.020 - PCIE PME and message detect register */ | 56 | __be32 pex_pme_mes_dr; /* 0x.020 - PCIE PME and message detect register */ |
54 | __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */ | 57 | __be32 pex_pme_mes_disr; /* 0x.024 - PCIE PME and message disable register */ |
55 | __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */ | 58 | __be32 pex_pme_mes_ier; /* 0x.028 - PCIE PME and message interrupt enable register */ |
@@ -62,14 +65,14 @@ struct ccsr_pci { | |||
62 | * in all of the other outbound windows. | 65 | * in all of the other outbound windows. |
63 | */ | 66 | */ |
64 | struct pci_outbound_window_regs pow[5]; | 67 | struct pci_outbound_window_regs pow[5]; |
65 | 68 | u8 res14[96]; | |
66 | u8 res14[256]; | 69 | struct pci_inbound_window_regs pmit; /* 0xd00 - 0xd9c Inbound MSI */ |
67 | 70 | u8 res6[96]; | |
68 | /* PCI/PCI Express inbound window 3-1 | 71 | /* PCI/PCI Express inbound window 3-0 |
69 | * inbound window 1 supports only a 32-bit base address and does not | 72 | * inbound window 1 supports only a 32-bit base address and does not |
70 | * define an inbound window base extended address register. | 73 | * define an inbound window base extended address register. |
71 | */ | 74 | */ |
72 | struct pci_inbound_window_regs piw[3]; | 75 | struct pci_inbound_window_regs piw[4]; |
73 | 76 | ||
74 | __be32 pex_err_dr; /* 0x.e00 - PCI/PCIE error detect register */ | 77 | __be32 pex_err_dr; /* 0x.e00 - PCI/PCIE error detect register */ |
75 | u8 res21[4]; | 78 | u8 res21[4]; |
@@ -88,6 +91,7 @@ struct ccsr_pci { | |||
88 | extern int fsl_add_bridge(struct device_node *dev, int is_primary); | 91 | extern int fsl_add_bridge(struct device_node *dev, int is_primary); |
89 | extern void fsl_pcibios_fixup_bus(struct pci_bus *bus); | 92 | extern void fsl_pcibios_fixup_bus(struct pci_bus *bus); |
90 | extern int mpc83xx_add_bridge(struct device_node *dev); | 93 | extern int mpc83xx_add_bridge(struct device_node *dev); |
94 | u64 fsl_pci_immrbar_base(struct pci_controller *hose); | ||
91 | 95 | ||
92 | #endif /* __POWERPC_FSL_PCI_H */ | 96 | #endif /* __POWERPC_FSL_PCI_H */ |
93 | #endif /* __KERNEL__ */ | 97 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/sysdev/fsl_pmc.c b/arch/powerpc/sysdev/fsl_pmc.c index 44de8559c975..f122e8961d32 100644 --- a/arch/powerpc/sysdev/fsl_pmc.c +++ b/arch/powerpc/sysdev/fsl_pmc.c | |||
@@ -53,13 +53,12 @@ static int pmc_suspend_valid(suspend_state_t state) | |||
53 | return 1; | 53 | return 1; |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct platform_suspend_ops pmc_suspend_ops = { | 56 | static const struct platform_suspend_ops pmc_suspend_ops = { |
57 | .valid = pmc_suspend_valid, | 57 | .valid = pmc_suspend_valid, |
58 | .enter = pmc_suspend_enter, | 58 | .enter = pmc_suspend_enter, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | static int pmc_probe(struct platform_device *ofdev, | 61 | static int pmc_probe(struct platform_device *ofdev) |
62 | const struct of_device_id *id) | ||
63 | { | 62 | { |
64 | pmc_regs = of_iomap(ofdev->dev.of_node, 0); | 63 | pmc_regs = of_iomap(ofdev->dev.of_node, 0); |
65 | if (!pmc_regs) | 64 | if (!pmc_regs) |
@@ -76,7 +75,7 @@ static const struct of_device_id pmc_ids[] = { | |||
76 | { }, | 75 | { }, |
77 | }; | 76 | }; |
78 | 77 | ||
79 | static struct of_platform_driver pmc_driver = { | 78 | static struct platform_driver pmc_driver = { |
80 | .driver = { | 79 | .driver = { |
81 | .name = "fsl-pmc", | 80 | .name = "fsl-pmc", |
82 | .owner = THIS_MODULE, | 81 | .owner = THIS_MODULE, |
@@ -87,6 +86,6 @@ static struct of_platform_driver pmc_driver = { | |||
87 | 86 | ||
88 | static int __init pmc_init(void) | 87 | static int __init pmc_init(void) |
89 | { | 88 | { |
90 | return of_register_platform_driver(&pmc_driver); | 89 | return platform_driver_register(&pmc_driver); |
91 | } | 90 | } |
92 | device_initcall(pmc_init); | 91 | device_initcall(pmc_init); |
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 3017532319c8..b3fd081d56f5 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * - Added Port-Write message handling | 10 | * - Added Port-Write message handling |
11 | * - Added Machine Check exception handling | 11 | * - Added Machine Check exception handling |
12 | * | 12 | * |
13 | * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. | 13 | * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc. |
14 | * Zhang Wei <wei.zhang@freescale.com> | 14 | * Zhang Wei <wei.zhang@freescale.com> |
15 | * | 15 | * |
16 | * Copyright 2005 MontaVista Software, Inc. | 16 | * Copyright 2005 MontaVista Software, Inc. |
@@ -47,14 +47,33 @@ | |||
47 | #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) | 47 | #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) |
48 | #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) | 48 | #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) |
49 | 49 | ||
50 | #define IPWSR_CLEAR 0x98 | ||
51 | #define OMSR_CLEAR 0x1cb3 | ||
52 | #define IMSR_CLEAR 0x491 | ||
53 | #define IDSR_CLEAR 0x91 | ||
54 | #define ODSR_CLEAR 0x1c00 | ||
55 | #define LTLEECSR_ENABLE_ALL 0xFFC000FC | ||
56 | #define ESCSR_CLEAR 0x07120204 | ||
57 | |||
58 | #define RIO_PORT1_EDCSR 0x0640 | ||
59 | #define RIO_PORT2_EDCSR 0x0680 | ||
60 | #define RIO_PORT1_IECSR 0x10130 | ||
61 | #define RIO_PORT2_IECSR 0x101B0 | ||
62 | #define RIO_IM0SR 0x13064 | ||
63 | #define RIO_IM1SR 0x13164 | ||
64 | #define RIO_OM0SR 0x13004 | ||
65 | #define RIO_OM1SR 0x13104 | ||
66 | |||
50 | #define RIO_ATMU_REGS_OFFSET 0x10c00 | 67 | #define RIO_ATMU_REGS_OFFSET 0x10c00 |
51 | #define RIO_P_MSG_REGS_OFFSET 0x11000 | 68 | #define RIO_P_MSG_REGS_OFFSET 0x11000 |
52 | #define RIO_S_MSG_REGS_OFFSET 0x13000 | 69 | #define RIO_S_MSG_REGS_OFFSET 0x13000 |
70 | #define RIO_GCCSR 0x13c | ||
53 | #define RIO_ESCSR 0x158 | 71 | #define RIO_ESCSR 0x158 |
72 | #define RIO_PORT2_ESCSR 0x178 | ||
54 | #define RIO_CCSR 0x15c | 73 | #define RIO_CCSR 0x15c |
55 | #define RIO_LTLEDCSR 0x0608 | 74 | #define RIO_LTLEDCSR 0x0608 |
56 | #define RIO_LTLEDCSR_IER 0x80000000 | 75 | #define RIO_LTLEDCSR_IER 0x80000000 |
57 | #define RIO_LTLEDCSR_PRT 0x01000000 | 76 | #define RIO_LTLEDCSR_PRT 0x01000000 |
58 | #define RIO_LTLEECSR 0x060c | 77 | #define RIO_LTLEECSR 0x060c |
59 | #define RIO_EPWISR 0x10010 | 78 | #define RIO_EPWISR 0x10010 |
60 | #define RIO_ISR_AACR 0x10120 | 79 | #define RIO_ISR_AACR 0x10120 |
@@ -87,6 +106,12 @@ | |||
87 | #define RIO_IPWSR_PWD 0x00000008 | 106 | #define RIO_IPWSR_PWD 0x00000008 |
88 | #define RIO_IPWSR_PWB 0x00000004 | 107 | #define RIO_IPWSR_PWB 0x00000004 |
89 | 108 | ||
109 | /* EPWISR Error match value */ | ||
110 | #define RIO_EPWISR_PINT1 0x80000000 | ||
111 | #define RIO_EPWISR_PINT2 0x40000000 | ||
112 | #define RIO_EPWISR_MU 0x00000002 | ||
113 | #define RIO_EPWISR_PW 0x00000001 | ||
114 | |||
90 | #define RIO_MSG_DESC_SIZE 32 | 115 | #define RIO_MSG_DESC_SIZE 32 |
91 | #define RIO_MSG_BUFFER_SIZE 4096 | 116 | #define RIO_MSG_BUFFER_SIZE 4096 |
92 | #define RIO_MIN_TX_RING_SIZE 2 | 117 | #define RIO_MIN_TX_RING_SIZE 2 |
@@ -117,44 +142,59 @@ struct rio_atmu_regs { | |||
117 | }; | 142 | }; |
118 | 143 | ||
119 | struct rio_msg_regs { | 144 | struct rio_msg_regs { |
120 | u32 omr; | 145 | u32 omr; /* 0xD_3000 - Outbound message 0 mode register */ |
121 | u32 osr; | 146 | u32 osr; /* 0xD_3004 - Outbound message 0 status register */ |
122 | u32 pad1; | 147 | u32 pad1; |
123 | u32 odqdpar; | 148 | u32 odqdpar; /* 0xD_300C - Outbound message 0 descriptor queue |
149 | dequeue pointer address register */ | ||
124 | u32 pad2; | 150 | u32 pad2; |
125 | u32 osar; | 151 | u32 osar; /* 0xD_3014 - Outbound message 0 source address |
126 | u32 odpr; | 152 | register */ |
127 | u32 odatr; | 153 | u32 odpr; /* 0xD_3018 - Outbound message 0 destination port |
128 | u32 odcr; | 154 | register */ |
155 | u32 odatr; /* 0xD_301C - Outbound message 0 destination attributes | ||
156 | Register*/ | ||
157 | u32 odcr; /* 0xD_3020 - Outbound message 0 double-word count | ||
158 | register */ | ||
129 | u32 pad3; | 159 | u32 pad3; |
130 | u32 odqepar; | 160 | u32 odqepar; /* 0xD_3028 - Outbound message 0 descriptor queue |
161 | enqueue pointer address register */ | ||
131 | u32 pad4[13]; | 162 | u32 pad4[13]; |
132 | u32 imr; | 163 | u32 imr; /* 0xD_3060 - Inbound message 0 mode register */ |
133 | u32 isr; | 164 | u32 isr; /* 0xD_3064 - Inbound message 0 status register */ |
134 | u32 pad5; | 165 | u32 pad5; |
135 | u32 ifqdpar; | 166 | u32 ifqdpar; /* 0xD_306C - Inbound message 0 frame queue dequeue |
167 | pointer address register*/ | ||
136 | u32 pad6; | 168 | u32 pad6; |
137 | u32 ifqepar; | 169 | u32 ifqepar; /* 0xD_3074 - Inbound message 0 frame queue enqueue |
170 | pointer address register */ | ||
138 | u32 pad7[226]; | 171 | u32 pad7[226]; |
139 | u32 odmr; | 172 | u32 odmr; /* 0xD_3400 - Outbound doorbell mode register */ |
140 | u32 odsr; | 173 | u32 odsr; /* 0xD_3404 - Outbound doorbell status register */ |
141 | u32 res0[4]; | 174 | u32 res0[4]; |
142 | u32 oddpr; | 175 | u32 oddpr; /* 0xD_3418 - Outbound doorbell destination port |
143 | u32 oddatr; | 176 | register */ |
177 | u32 oddatr; /* 0xD_341c - Outbound doorbell destination attributes | ||
178 | register */ | ||
144 | u32 res1[3]; | 179 | u32 res1[3]; |
145 | u32 odretcr; | 180 | u32 odretcr; /* 0xD_342C - Outbound doorbell retry error threshold |
181 | configuration register */ | ||
146 | u32 res2[12]; | 182 | u32 res2[12]; |
147 | u32 dmr; | 183 | u32 dmr; /* 0xD_3460 - Inbound doorbell mode register */ |
148 | u32 dsr; | 184 | u32 dsr; /* 0xD_3464 - Inbound doorbell status register */ |
149 | u32 pad8; | 185 | u32 pad8; |
150 | u32 dqdpar; | 186 | u32 dqdpar; /* 0xD_346C - Inbound doorbell queue dequeue Pointer |
187 | address register */ | ||
151 | u32 pad9; | 188 | u32 pad9; |
152 | u32 dqepar; | 189 | u32 dqepar; /* 0xD_3474 - Inbound doorbell Queue enqueue pointer |
190 | address register */ | ||
153 | u32 pad10[26]; | 191 | u32 pad10[26]; |
154 | u32 pwmr; | 192 | u32 pwmr; /* 0xD_34E0 - Inbound port-write mode register */ |
155 | u32 pwsr; | 193 | u32 pwsr; /* 0xD_34E4 - Inbound port-write status register */ |
156 | u32 epwqbar; | 194 | u32 epwqbar; /* 0xD_34E8 - Extended Port-Write Queue Base Address |
157 | u32 pwqbar; | 195 | register */ |
196 | u32 pwqbar; /* 0xD_34EC - Inbound port-write queue base address | ||
197 | register */ | ||
158 | }; | 198 | }; |
159 | 199 | ||
160 | struct rio_tx_desc { | 200 | struct rio_tx_desc { |
@@ -241,35 +281,32 @@ struct rio_priv { | |||
241 | static void __iomem *rio_regs_win; | 281 | static void __iomem *rio_regs_win; |
242 | 282 | ||
243 | #ifdef CONFIG_E500 | 283 | #ifdef CONFIG_E500 |
244 | static int (*saved_mcheck_exception)(struct pt_regs *regs); | 284 | int fsl_rio_mcheck_exception(struct pt_regs *regs) |
245 | |||
246 | static int fsl_rio_mcheck_exception(struct pt_regs *regs) | ||
247 | { | 285 | { |
248 | const struct exception_table_entry *entry = NULL; | 286 | const struct exception_table_entry *entry; |
249 | unsigned long reason = mfspr(SPRN_MCSR); | 287 | unsigned long reason; |
250 | 288 | ||
251 | if (reason & MCSR_BUS_RBERR) { | 289 | if (!rio_regs_win) |
252 | reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); | 290 | return 0; |
253 | if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { | 291 | |
254 | /* Check if we are prepared to handle this fault */ | 292 | reason = in_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR)); |
255 | entry = search_exception_tables(regs->nip); | 293 | if (reason & (RIO_LTLEDCSR_IER | RIO_LTLEDCSR_PRT)) { |
256 | if (entry) { | 294 | /* Check if we are prepared to handle this fault */ |
257 | pr_debug("RIO: %s - MC Exception handled\n", | 295 | entry = search_exception_tables(regs->nip); |
258 | __func__); | 296 | if (entry) { |
259 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), | 297 | pr_debug("RIO: %s - MC Exception handled\n", |
260 | 0); | 298 | __func__); |
261 | regs->msr |= MSR_RI; | 299 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), |
262 | regs->nip = entry->fixup; | 300 | 0); |
263 | return 1; | 301 | regs->msr |= MSR_RI; |
264 | } | 302 | regs->nip = entry->fixup; |
303 | return 1; | ||
265 | } | 304 | } |
266 | } | 305 | } |
267 | 306 | ||
268 | if (saved_mcheck_exception) | 307 | return 0; |
269 | return saved_mcheck_exception(regs); | ||
270 | else | ||
271 | return cur_cpu_spec->machine_check(regs); | ||
272 | } | 308 | } |
309 | EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); | ||
273 | #endif | 310 | #endif |
274 | 311 | ||
275 | /** | 312 | /** |
@@ -463,7 +500,7 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
463 | } | 500 | } |
464 | 501 | ||
465 | /** | 502 | /** |
466 | * rio_hw_add_outb_message - Add message to the MPC85xx outbound message queue | 503 | * fsl_add_outb_message - Add message to the MPC85xx outbound message queue |
467 | * @mport: Master port with outbound message queue | 504 | * @mport: Master port with outbound message queue |
468 | * @rdev: Target of outbound message | 505 | * @rdev: Target of outbound message |
469 | * @mbox: Outbound mailbox | 506 | * @mbox: Outbound mailbox |
@@ -473,8 +510,8 @@ fsl_rio_config_write(struct rio_mport *mport, int index, u16 destid, | |||
473 | * Adds the @buffer message to the MPC85xx outbound message queue. Returns | 510 | * Adds the @buffer message to the MPC85xx outbound message queue. Returns |
474 | * %0 on success or %-EINVAL on failure. | 511 | * %0 on success or %-EINVAL on failure. |
475 | */ | 512 | */ |
476 | int | 513 | static int |
477 | rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, | 514 | fsl_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, |
478 | void *buffer, size_t len) | 515 | void *buffer, size_t len) |
479 | { | 516 | { |
480 | struct rio_priv *priv = mport->priv; | 517 | struct rio_priv *priv = mport->priv; |
@@ -483,9 +520,8 @@ rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, | |||
483 | + priv->msg_tx_ring.tx_slot; | 520 | + priv->msg_tx_ring.tx_slot; |
484 | int ret = 0; | 521 | int ret = 0; |
485 | 522 | ||
486 | pr_debug | 523 | pr_debug("RIO: fsl_add_outb_message(): destid %4.4x mbox %d buffer " \ |
487 | ("RIO: rio_hw_add_outb_message(): destid %4.4x mbox %d buffer %8.8x len %8.8x\n", | 524 | "%8.8x len %8.8x\n", rdev->destid, mbox, (int)buffer, len); |
488 | rdev->destid, mbox, (int)buffer, len); | ||
489 | 525 | ||
490 | if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { | 526 | if ((len < 8) || (len > RIO_MAX_MSG_SIZE)) { |
491 | ret = -EINVAL; | 527 | ret = -EINVAL; |
@@ -535,8 +571,6 @@ rio_hw_add_outb_message(struct rio_mport *mport, struct rio_dev *rdev, int mbox, | |||
535 | return ret; | 571 | return ret; |
536 | } | 572 | } |
537 | 573 | ||
538 | EXPORT_SYMBOL_GPL(rio_hw_add_outb_message); | ||
539 | |||
540 | /** | 574 | /** |
541 | * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler | 575 | * fsl_rio_tx_handler - MPC85xx outbound message interrupt handler |
542 | * @irq: Linux interrupt number | 576 | * @irq: Linux interrupt number |
@@ -581,7 +615,7 @@ fsl_rio_tx_handler(int irq, void *dev_instance) | |||
581 | } | 615 | } |
582 | 616 | ||
583 | /** | 617 | /** |
584 | * rio_open_outb_mbox - Initialize MPC85xx outbound mailbox | 618 | * fsl_open_outb_mbox - Initialize MPC85xx outbound mailbox |
585 | * @mport: Master port implementing the outbound message unit | 619 | * @mport: Master port implementing the outbound message unit |
586 | * @dev_id: Device specific pointer to pass on event | 620 | * @dev_id: Device specific pointer to pass on event |
587 | * @mbox: Mailbox to open | 621 | * @mbox: Mailbox to open |
@@ -591,7 +625,8 @@ fsl_rio_tx_handler(int irq, void *dev_instance) | |||
591 | * and enables the outbound message unit. Returns %0 on success and | 625 | * and enables the outbound message unit. Returns %0 on success and |
592 | * %-EINVAL or %-ENOMEM on failure. | 626 | * %-EINVAL or %-ENOMEM on failure. |
593 | */ | 627 | */ |
594 | int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | 628 | static int |
629 | fsl_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | ||
595 | { | 630 | { |
596 | int i, j, rc = 0; | 631 | int i, j, rc = 0; |
597 | struct rio_priv *priv = mport->priv; | 632 | struct rio_priv *priv = mport->priv; |
@@ -687,14 +722,14 @@ int rio_open_outb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entr | |||
687 | } | 722 | } |
688 | 723 | ||
689 | /** | 724 | /** |
690 | * rio_close_outb_mbox - Shut down MPC85xx outbound mailbox | 725 | * fsl_close_outb_mbox - Shut down MPC85xx outbound mailbox |
691 | * @mport: Master port implementing the outbound message unit | 726 | * @mport: Master port implementing the outbound message unit |
692 | * @mbox: Mailbox to close | 727 | * @mbox: Mailbox to close |
693 | * | 728 | * |
694 | * Disables the outbound message unit, free all buffers, and | 729 | * Disables the outbound message unit, free all buffers, and |
695 | * frees the outbound message interrupt. | 730 | * frees the outbound message interrupt. |
696 | */ | 731 | */ |
697 | void rio_close_outb_mbox(struct rio_mport *mport, int mbox) | 732 | static void fsl_close_outb_mbox(struct rio_mport *mport, int mbox) |
698 | { | 733 | { |
699 | struct rio_priv *priv = mport->priv; | 734 | struct rio_priv *priv = mport->priv; |
700 | /* Disable inbound message unit */ | 735 | /* Disable inbound message unit */ |
@@ -751,7 +786,7 @@ fsl_rio_rx_handler(int irq, void *dev_instance) | |||
751 | } | 786 | } |
752 | 787 | ||
753 | /** | 788 | /** |
754 | * rio_open_inb_mbox - Initialize MPC85xx inbound mailbox | 789 | * fsl_open_inb_mbox - Initialize MPC85xx inbound mailbox |
755 | * @mport: Master port implementing the inbound message unit | 790 | * @mport: Master port implementing the inbound message unit |
756 | * @dev_id: Device specific pointer to pass on event | 791 | * @dev_id: Device specific pointer to pass on event |
757 | * @mbox: Mailbox to open | 792 | * @mbox: Mailbox to open |
@@ -761,7 +796,8 @@ fsl_rio_rx_handler(int irq, void *dev_instance) | |||
761 | * and enables the inbound message unit. Returns %0 on success | 796 | * and enables the inbound message unit. Returns %0 on success |
762 | * and %-EINVAL or %-ENOMEM on failure. | 797 | * and %-EINVAL or %-ENOMEM on failure. |
763 | */ | 798 | */ |
764 | int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | 799 | static int |
800 | fsl_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entries) | ||
765 | { | 801 | { |
766 | int i, rc = 0; | 802 | int i, rc = 0; |
767 | struct rio_priv *priv = mport->priv; | 803 | struct rio_priv *priv = mport->priv; |
@@ -825,14 +861,14 @@ int rio_open_inb_mbox(struct rio_mport *mport, void *dev_id, int mbox, int entri | |||
825 | } | 861 | } |
826 | 862 | ||
827 | /** | 863 | /** |
828 | * rio_close_inb_mbox - Shut down MPC85xx inbound mailbox | 864 | * fsl_close_inb_mbox - Shut down MPC85xx inbound mailbox |
829 | * @mport: Master port implementing the inbound message unit | 865 | * @mport: Master port implementing the inbound message unit |
830 | * @mbox: Mailbox to close | 866 | * @mbox: Mailbox to close |
831 | * | 867 | * |
832 | * Disables the inbound message unit, free all buffers, and | 868 | * Disables the inbound message unit, free all buffers, and |
833 | * frees the inbound message interrupt. | 869 | * frees the inbound message interrupt. |
834 | */ | 870 | */ |
835 | void rio_close_inb_mbox(struct rio_mport *mport, int mbox) | 871 | static void fsl_close_inb_mbox(struct rio_mport *mport, int mbox) |
836 | { | 872 | { |
837 | struct rio_priv *priv = mport->priv; | 873 | struct rio_priv *priv = mport->priv; |
838 | /* Disable inbound message unit */ | 874 | /* Disable inbound message unit */ |
@@ -847,7 +883,7 @@ void rio_close_inb_mbox(struct rio_mport *mport, int mbox) | |||
847 | } | 883 | } |
848 | 884 | ||
849 | /** | 885 | /** |
850 | * rio_hw_add_inb_buffer - Add buffer to the MPC85xx inbound message queue | 886 | * fsl_add_inb_buffer - Add buffer to the MPC85xx inbound message queue |
851 | * @mport: Master port implementing the inbound message unit | 887 | * @mport: Master port implementing the inbound message unit |
852 | * @mbox: Inbound mailbox number | 888 | * @mbox: Inbound mailbox number |
853 | * @buf: Buffer to add to inbound queue | 889 | * @buf: Buffer to add to inbound queue |
@@ -855,12 +891,12 @@ void rio_close_inb_mbox(struct rio_mport *mport, int mbox) | |||
855 | * Adds the @buf buffer to the MPC85xx inbound message queue. Returns | 891 | * Adds the @buf buffer to the MPC85xx inbound message queue. Returns |
856 | * %0 on success or %-EINVAL on failure. | 892 | * %0 on success or %-EINVAL on failure. |
857 | */ | 893 | */ |
858 | int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) | 894 | static int fsl_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) |
859 | { | 895 | { |
860 | int rc = 0; | 896 | int rc = 0; |
861 | struct rio_priv *priv = mport->priv; | 897 | struct rio_priv *priv = mport->priv; |
862 | 898 | ||
863 | pr_debug("RIO: rio_hw_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", | 899 | pr_debug("RIO: fsl_add_inb_buffer(), msg_rx_ring.rx_slot %d\n", |
864 | priv->msg_rx_ring.rx_slot); | 900 | priv->msg_rx_ring.rx_slot); |
865 | 901 | ||
866 | if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { | 902 | if (priv->msg_rx_ring.virt_buffer[priv->msg_rx_ring.rx_slot]) { |
@@ -879,17 +915,15 @@ int rio_hw_add_inb_buffer(struct rio_mport *mport, int mbox, void *buf) | |||
879 | return rc; | 915 | return rc; |
880 | } | 916 | } |
881 | 917 | ||
882 | EXPORT_SYMBOL_GPL(rio_hw_add_inb_buffer); | ||
883 | |||
884 | /** | 918 | /** |
885 | * rio_hw_get_inb_message - Fetch inbound message from the MPC85xx message unit | 919 | * fsl_get_inb_message - Fetch inbound message from the MPC85xx message unit |
886 | * @mport: Master port implementing the inbound message unit | 920 | * @mport: Master port implementing the inbound message unit |
887 | * @mbox: Inbound mailbox number | 921 | * @mbox: Inbound mailbox number |
888 | * | 922 | * |
889 | * Gets the next available inbound message from the inbound message queue. | 923 | * Gets the next available inbound message from the inbound message queue. |
890 | * A pointer to the message is returned on success or NULL on failure. | 924 | * A pointer to the message is returned on success or NULL on failure. |
891 | */ | 925 | */ |
892 | void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox) | 926 | static void *fsl_get_inb_message(struct rio_mport *mport, int mbox) |
893 | { | 927 | { |
894 | struct rio_priv *priv = mport->priv; | 928 | struct rio_priv *priv = mport->priv; |
895 | u32 phys_buf, virt_buf; | 929 | u32 phys_buf, virt_buf; |
@@ -926,8 +960,6 @@ void *rio_hw_get_inb_message(struct rio_mport *mport, int mbox) | |||
926 | return buf; | 960 | return buf; |
927 | } | 961 | } |
928 | 962 | ||
929 | EXPORT_SYMBOL_GPL(rio_hw_get_inb_message); | ||
930 | |||
931 | /** | 963 | /** |
932 | * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler | 964 | * fsl_rio_dbell_handler - MPC85xx doorbell interrupt handler |
933 | * @irq: Linux interrupt number | 965 | * @irq: Linux interrupt number |
@@ -954,7 +986,6 @@ fsl_rio_dbell_handler(int irq, void *dev_instance) | |||
954 | if (dsr & DOORBELL_DSR_QFI) { | 986 | if (dsr & DOORBELL_DSR_QFI) { |
955 | pr_info("RIO: doorbell queue full\n"); | 987 | pr_info("RIO: doorbell queue full\n"); |
956 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); | 988 | out_be32(&priv->msg_regs->dsr, DOORBELL_DSR_QFI); |
957 | goto out; | ||
958 | } | 989 | } |
959 | 990 | ||
960 | /* XXX Need to check/dispatch until queue empty */ | 991 | /* XXX Need to check/dispatch until queue empty */ |
@@ -1051,6 +1082,40 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport) | |||
1051 | return rc; | 1082 | return rc; |
1052 | } | 1083 | } |
1053 | 1084 | ||
1085 | static void port_error_handler(struct rio_mport *port, int offset) | ||
1086 | { | ||
1087 | /*XXX: Error recovery is not implemented, we just clear errors */ | ||
1088 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); | ||
1089 | |||
1090 | if (offset == 0) { | ||
1091 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); | ||
1092 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0); | ||
1093 | out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); | ||
1094 | } else { | ||
1095 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); | ||
1096 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0); | ||
1097 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); | ||
1098 | } | ||
1099 | } | ||
1100 | |||
1101 | static void msg_unit_error_handler(struct rio_mport *port) | ||
1102 | { | ||
1103 | struct rio_priv *priv = port->priv; | ||
1104 | |||
1105 | /*XXX: Error recovery is not implemented, we just clear errors */ | ||
1106 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); | ||
1107 | |||
1108 | out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR); | ||
1109 | out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR); | ||
1110 | out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR); | ||
1111 | out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR); | ||
1112 | |||
1113 | out_be32(&priv->msg_regs->odsr, ODSR_CLEAR); | ||
1114 | out_be32(&priv->msg_regs->dsr, IDSR_CLEAR); | ||
1115 | |||
1116 | out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR); | ||
1117 | } | ||
1118 | |||
1054 | /** | 1119 | /** |
1055 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler | 1120 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler |
1056 | * @irq: Linux interrupt number | 1121 | * @irq: Linux interrupt number |
@@ -1067,18 +1132,12 @@ fsl_rio_port_write_handler(int irq, void *dev_instance) | |||
1067 | struct rio_priv *priv = port->priv; | 1132 | struct rio_priv *priv = port->priv; |
1068 | u32 epwisr, tmp; | 1133 | u32 epwisr, tmp; |
1069 | 1134 | ||
1070 | ipwmr = in_be32(&priv->msg_regs->pwmr); | ||
1071 | ipwsr = in_be32(&priv->msg_regs->pwsr); | ||
1072 | |||
1073 | epwisr = in_be32(priv->regs_win + RIO_EPWISR); | 1135 | epwisr = in_be32(priv->regs_win + RIO_EPWISR); |
1074 | if (epwisr & 0x80000000) { | 1136 | if (!(epwisr & RIO_EPWISR_PW)) |
1075 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | 1137 | goto pw_done; |
1076 | pr_info("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1077 | out_be32(priv->regs_win + RIO_LTLEDCSR, 0); | ||
1078 | } | ||
1079 | 1138 | ||
1080 | if (!(epwisr & 0x00000001)) | 1139 | ipwmr = in_be32(&priv->msg_regs->pwmr); |
1081 | return IRQ_HANDLED; | 1140 | ipwsr = in_be32(&priv->msg_regs->pwsr); |
1082 | 1141 | ||
1083 | #ifdef DEBUG_PW | 1142 | #ifdef DEBUG_PW |
1084 | pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); | 1143 | pr_debug("PW Int->IPWMR: 0x%08x IPWSR: 0x%08x (", ipwmr, ipwsr); |
@@ -1094,20 +1153,6 @@ fsl_rio_port_write_handler(int irq, void *dev_instance) | |||
1094 | pr_debug(" PWB"); | 1153 | pr_debug(" PWB"); |
1095 | pr_debug(" )\n"); | 1154 | pr_debug(" )\n"); |
1096 | #endif | 1155 | #endif |
1097 | out_be32(&priv->msg_regs->pwsr, | ||
1098 | ipwsr & (RIO_IPWSR_TE | RIO_IPWSR_QFI | RIO_IPWSR_PWD)); | ||
1099 | |||
1100 | if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { | ||
1101 | priv->port_write_msg.err_count++; | ||
1102 | pr_info("RIO: Port-Write Transaction Err (%d)\n", | ||
1103 | priv->port_write_msg.err_count); | ||
1104 | } | ||
1105 | if (ipwsr & RIO_IPWSR_PWD) { | ||
1106 | priv->port_write_msg.discard_count++; | ||
1107 | pr_info("RIO: Port Discarded Port-Write Msg(s) (%d)\n", | ||
1108 | priv->port_write_msg.discard_count); | ||
1109 | } | ||
1110 | |||
1111 | /* Schedule deferred processing if PW was received */ | 1156 | /* Schedule deferred processing if PW was received */ |
1112 | if (ipwsr & RIO_IPWSR_QFI) { | 1157 | if (ipwsr & RIO_IPWSR_QFI) { |
1113 | /* Save PW message (if there is room in FIFO), | 1158 | /* Save PW message (if there is room in FIFO), |
@@ -1119,16 +1164,55 @@ fsl_rio_port_write_handler(int irq, void *dev_instance) | |||
1119 | RIO_PW_MSG_SIZE); | 1164 | RIO_PW_MSG_SIZE); |
1120 | } else { | 1165 | } else { |
1121 | priv->port_write_msg.discard_count++; | 1166 | priv->port_write_msg.discard_count++; |
1122 | pr_info("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", | 1167 | pr_debug("RIO: ISR Discarded Port-Write Msg(s) (%d)\n", |
1123 | priv->port_write_msg.discard_count); | 1168 | priv->port_write_msg.discard_count); |
1124 | } | 1169 | } |
1170 | /* Clear interrupt and issue Clear Queue command. This allows | ||
1171 | * another port-write to be received. | ||
1172 | */ | ||
1173 | out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_QFI); | ||
1174 | out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); | ||
1175 | |||
1125 | schedule_work(&priv->pw_work); | 1176 | schedule_work(&priv->pw_work); |
1126 | } | 1177 | } |
1127 | 1178 | ||
1128 | /* Issue Clear Queue command. This allows another | 1179 | if ((ipwmr & RIO_IPWMR_EIE) && (ipwsr & RIO_IPWSR_TE)) { |
1129 | * port-write to be received. | 1180 | priv->port_write_msg.err_count++; |
1130 | */ | 1181 | pr_debug("RIO: Port-Write Transaction Err (%d)\n", |
1131 | out_be32(&priv->msg_regs->pwmr, ipwmr | RIO_IPWMR_CQ); | 1182 | priv->port_write_msg.err_count); |
1183 | /* Clear Transaction Error: port-write controller should be | ||
1184 | * disabled when clearing this error | ||
1185 | */ | ||
1186 | out_be32(&priv->msg_regs->pwmr, ipwmr & ~RIO_IPWMR_PWE); | ||
1187 | out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_TE); | ||
1188 | out_be32(&priv->msg_regs->pwmr, ipwmr); | ||
1189 | } | ||
1190 | |||
1191 | if (ipwsr & RIO_IPWSR_PWD) { | ||
1192 | priv->port_write_msg.discard_count++; | ||
1193 | pr_debug("RIO: Port Discarded Port-Write Msg(s) (%d)\n", | ||
1194 | priv->port_write_msg.discard_count); | ||
1195 | out_be32(&priv->msg_regs->pwsr, RIO_IPWSR_PWD); | ||
1196 | } | ||
1197 | |||
1198 | pw_done: | ||
1199 | if (epwisr & RIO_EPWISR_PINT1) { | ||
1200 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1201 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1202 | port_error_handler(port, 0); | ||
1203 | } | ||
1204 | |||
1205 | if (epwisr & RIO_EPWISR_PINT2) { | ||
1206 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1207 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1208 | port_error_handler(port, 1); | ||
1209 | } | ||
1210 | |||
1211 | if (epwisr & RIO_EPWISR_MU) { | ||
1212 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1213 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1214 | msg_unit_error_handler(port); | ||
1215 | } | ||
1132 | 1216 | ||
1133 | return IRQ_HANDLED; | 1217 | return IRQ_HANDLED; |
1134 | } | 1218 | } |
@@ -1238,12 +1322,14 @@ static int fsl_rio_port_write_init(struct rio_mport *mport) | |||
1238 | 1322 | ||
1239 | 1323 | ||
1240 | /* Hook up port-write handler */ | 1324 | /* Hook up port-write handler */ |
1241 | rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, | 1325 | rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, |
1242 | "port-write", (void *)mport); | 1326 | IRQF_SHARED, "port-write", (void *)mport); |
1243 | if (rc < 0) { | 1327 | if (rc < 0) { |
1244 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); | 1328 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); |
1245 | goto err_out; | 1329 | goto err_out; |
1246 | } | 1330 | } |
1331 | /* Enable Error Interrupt */ | ||
1332 | out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); | ||
1247 | 1333 | ||
1248 | INIT_WORK(&priv->pw_work, fsl_pw_dpc); | 1334 | INIT_WORK(&priv->pw_work, fsl_pw_dpc); |
1249 | spin_lock_init(&priv->pw_fifo_lock); | 1335 | spin_lock_init(&priv->pw_fifo_lock); |
@@ -1268,28 +1354,6 @@ err_out: | |||
1268 | return rc; | 1354 | return rc; |
1269 | } | 1355 | } |
1270 | 1356 | ||
1271 | static char *cmdline = NULL; | ||
1272 | |||
1273 | static int fsl_rio_get_hdid(int index) | ||
1274 | { | ||
1275 | /* XXX Need to parse multiple entries in some format */ | ||
1276 | if (!cmdline) | ||
1277 | return -1; | ||
1278 | |||
1279 | return simple_strtol(cmdline, NULL, 0); | ||
1280 | } | ||
1281 | |||
1282 | static int fsl_rio_get_cmdline(char *s) | ||
1283 | { | ||
1284 | if (!s) | ||
1285 | return 0; | ||
1286 | |||
1287 | cmdline = s; | ||
1288 | return 1; | ||
1289 | } | ||
1290 | |||
1291 | __setup("riohdid=", fsl_rio_get_cmdline); | ||
1292 | |||
1293 | static inline void fsl_rio_info(struct device *dev, u32 ccsr) | 1357 | static inline void fsl_rio_info(struct device *dev, u32 ccsr) |
1294 | { | 1358 | { |
1295 | const char *str; | 1359 | const char *str; |
@@ -1406,13 +1470,19 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1406 | ops->cwrite = fsl_rio_config_write; | 1470 | ops->cwrite = fsl_rio_config_write; |
1407 | ops->dsend = fsl_rio_doorbell_send; | 1471 | ops->dsend = fsl_rio_doorbell_send; |
1408 | ops->pwenable = fsl_rio_pw_enable; | 1472 | ops->pwenable = fsl_rio_pw_enable; |
1473 | ops->open_outb_mbox = fsl_open_outb_mbox; | ||
1474 | ops->open_inb_mbox = fsl_open_inb_mbox; | ||
1475 | ops->close_outb_mbox = fsl_close_outb_mbox; | ||
1476 | ops->close_inb_mbox = fsl_close_inb_mbox; | ||
1477 | ops->add_outb_message = fsl_add_outb_message; | ||
1478 | ops->add_inb_buffer = fsl_add_inb_buffer; | ||
1479 | ops->get_inb_message = fsl_get_inb_message; | ||
1409 | 1480 | ||
1410 | port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); | 1481 | port = kzalloc(sizeof(struct rio_mport), GFP_KERNEL); |
1411 | if (!port) { | 1482 | if (!port) { |
1412 | rc = -ENOMEM; | 1483 | rc = -ENOMEM; |
1413 | goto err_port; | 1484 | goto err_port; |
1414 | } | 1485 | } |
1415 | port->id = 0; | ||
1416 | port->index = 0; | 1486 | port->index = 0; |
1417 | 1487 | ||
1418 | priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); | 1488 | priv = kzalloc(sizeof(struct rio_priv), GFP_KERNEL); |
@@ -1428,6 +1498,14 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1428 | port->iores.flags = IORESOURCE_MEM; | 1498 | port->iores.flags = IORESOURCE_MEM; |
1429 | port->iores.name = "rio_io_win"; | 1499 | port->iores.name = "rio_io_win"; |
1430 | 1500 | ||
1501 | if (request_resource(&iomem_resource, &port->iores) < 0) { | ||
1502 | dev_err(&dev->dev, "RIO: Error requesting master port region" | ||
1503 | " 0x%016llx-0x%016llx\n", | ||
1504 | (u64)port->iores.start, (u64)port->iores.end); | ||
1505 | rc = -ENOMEM; | ||
1506 | goto err_res; | ||
1507 | } | ||
1508 | |||
1431 | priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); | 1509 | priv->pwirq = irq_of_parse_and_map(dev->dev.of_node, 0); |
1432 | priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); | 1510 | priv->bellirq = irq_of_parse_and_map(dev->dev.of_node, 2); |
1433 | priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); | 1511 | priv->txirq = irq_of_parse_and_map(dev->dev.of_node, 3); |
@@ -1443,10 +1521,8 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1443 | priv->dev = &dev->dev; | 1521 | priv->dev = &dev->dev; |
1444 | 1522 | ||
1445 | port->ops = ops; | 1523 | port->ops = ops; |
1446 | port->host_deviceid = fsl_rio_get_hdid(port->id); | ||
1447 | |||
1448 | port->priv = priv; | 1524 | port->priv = priv; |
1449 | rio_register_mport(port); | 1525 | port->phys_efptr = 0x100; |
1450 | 1526 | ||
1451 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); | 1527 | priv->regs_win = ioremap(regs.start, regs.end - regs.start + 1); |
1452 | rio_regs_win = priv->regs_win; | 1528 | rio_regs_win = priv->regs_win; |
@@ -1493,6 +1569,15 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1493 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", | 1569 | dev_info(&dev->dev, "RapidIO Common Transport System size: %d\n", |
1494 | port->sys_size ? 65536 : 256); | 1570 | port->sys_size ? 65536 : 256); |
1495 | 1571 | ||
1572 | if (rio_register_mport(port)) | ||
1573 | goto err; | ||
1574 | |||
1575 | if (port->host_deviceid >= 0) | ||
1576 | out_be32(priv->regs_win + RIO_GCCSR, RIO_PORT_GEN_HOST | | ||
1577 | RIO_PORT_GEN_MASTER | RIO_PORT_GEN_DISCOVERED); | ||
1578 | else | ||
1579 | out_be32(priv->regs_win + RIO_GCCSR, 0x00000000); | ||
1580 | |||
1496 | priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win | 1581 | priv->atmu_regs = (struct rio_atmu_regs *)(priv->regs_win |
1497 | + RIO_ATMU_REGS_OFFSET); | 1582 | + RIO_ATMU_REGS_OFFSET); |
1498 | priv->maint_atmu_regs = priv->atmu_regs + 1; | 1583 | priv->maint_atmu_regs = priv->atmu_regs + 1; |
@@ -1519,16 +1604,10 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1519 | fsl_rio_doorbell_init(port); | 1604 | fsl_rio_doorbell_init(port); |
1520 | fsl_rio_port_write_init(port); | 1605 | fsl_rio_port_write_init(port); |
1521 | 1606 | ||
1522 | #ifdef CONFIG_E500 | ||
1523 | saved_mcheck_exception = ppc_md.machine_check_exception; | ||
1524 | ppc_md.machine_check_exception = fsl_rio_mcheck_exception; | ||
1525 | #endif | ||
1526 | /* Ensure that RFXE is set */ | ||
1527 | mtspr(SPRN_HID1, (mfspr(SPRN_HID1) | 0x20000)); | ||
1528 | |||
1529 | return 0; | 1607 | return 0; |
1530 | err: | 1608 | err: |
1531 | iounmap(priv->regs_win); | 1609 | iounmap(priv->regs_win); |
1610 | err_res: | ||
1532 | kfree(priv); | 1611 | kfree(priv); |
1533 | err_priv: | 1612 | err_priv: |
1534 | kfree(port); | 1613 | kfree(port); |
@@ -1540,21 +1619,12 @@ err_ops: | |||
1540 | 1619 | ||
1541 | /* The probe function for RapidIO peer-to-peer network. | 1620 | /* The probe function for RapidIO peer-to-peer network. |
1542 | */ | 1621 | */ |
1543 | static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev, | 1622 | static int __devinit fsl_of_rio_rpn_probe(struct platform_device *dev) |
1544 | const struct of_device_id *match) | ||
1545 | { | 1623 | { |
1546 | int rc; | ||
1547 | printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", | 1624 | printk(KERN_INFO "Setting up RapidIO peer-to-peer network %s\n", |
1548 | dev->dev.of_node->full_name); | 1625 | dev->dev.of_node->full_name); |
1549 | 1626 | ||
1550 | rc = fsl_rio_setup(dev); | 1627 | return fsl_rio_setup(dev); |
1551 | if (rc) | ||
1552 | goto out; | ||
1553 | |||
1554 | /* Enumerate all registered ports */ | ||
1555 | rc = rio_init_mports(); | ||
1556 | out: | ||
1557 | return rc; | ||
1558 | }; | 1628 | }; |
1559 | 1629 | ||
1560 | static const struct of_device_id fsl_of_rio_rpn_ids[] = { | 1630 | static const struct of_device_id fsl_of_rio_rpn_ids[] = { |
@@ -1564,7 +1634,7 @@ static const struct of_device_id fsl_of_rio_rpn_ids[] = { | |||
1564 | {}, | 1634 | {}, |
1565 | }; | 1635 | }; |
1566 | 1636 | ||
1567 | static struct of_platform_driver fsl_of_rio_rpn_driver = { | 1637 | static struct platform_driver fsl_of_rio_rpn_driver = { |
1568 | .driver = { | 1638 | .driver = { |
1569 | .name = "fsl-of-rio", | 1639 | .name = "fsl-of-rio", |
1570 | .owner = THIS_MODULE, | 1640 | .owner = THIS_MODULE, |
@@ -1575,7 +1645,7 @@ static struct of_platform_driver fsl_of_rio_rpn_driver = { | |||
1575 | 1645 | ||
1576 | static __init int fsl_of_rio_rpn_init(void) | 1646 | static __init int fsl_of_rio_rpn_init(void) |
1577 | { | 1647 | { |
1578 | return of_register_platform_driver(&fsl_of_rio_rpn_driver); | 1648 | return platform_driver_register(&fsl_of_rio_rpn_driver); |
1579 | } | 1649 | } |
1580 | 1650 | ||
1581 | subsys_initcall(fsl_of_rio_rpn_init); | 1651 | subsys_initcall(fsl_of_rio_rpn_init); |
diff --git a/arch/powerpc/sysdev/fsl_soc.c b/arch/powerpc/sysdev/fsl_soc.c index b91f7acdda6f..19e5015e039b 100644 --- a/arch/powerpc/sysdev/fsl_soc.c +++ b/arch/powerpc/sysdev/fsl_soc.c | |||
@@ -209,186 +209,29 @@ static int __init of_add_fixed_phys(void) | |||
209 | arch_initcall(of_add_fixed_phys); | 209 | arch_initcall(of_add_fixed_phys); |
210 | #endif /* CONFIG_FIXED_PHY */ | 210 | #endif /* CONFIG_FIXED_PHY */ |
211 | 211 | ||
212 | static enum fsl_usb2_phy_modes determine_usb_phy(const char *phy_type) | 212 | #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) |
213 | { | 213 | static __be32 __iomem *rstcr; |
214 | if (!phy_type) | ||
215 | return FSL_USB2_PHY_NONE; | ||
216 | if (!strcasecmp(phy_type, "ulpi")) | ||
217 | return FSL_USB2_PHY_ULPI; | ||
218 | if (!strcasecmp(phy_type, "utmi")) | ||
219 | return FSL_USB2_PHY_UTMI; | ||
220 | if (!strcasecmp(phy_type, "utmi_wide")) | ||
221 | return FSL_USB2_PHY_UTMI_WIDE; | ||
222 | if (!strcasecmp(phy_type, "serial")) | ||
223 | return FSL_USB2_PHY_SERIAL; | ||
224 | |||
225 | return FSL_USB2_PHY_NONE; | ||
226 | } | ||
227 | 214 | ||
228 | static int __init fsl_usb_of_init(void) | 215 | static int __init setup_rstcr(void) |
229 | { | 216 | { |
230 | struct device_node *np; | 217 | struct device_node *np; |
231 | unsigned int i = 0; | ||
232 | struct platform_device *usb_dev_mph = NULL, *usb_dev_dr_host = NULL, | ||
233 | *usb_dev_dr_client = NULL; | ||
234 | int ret; | ||
235 | |||
236 | for_each_compatible_node(np, NULL, "fsl-usb2-mph") { | ||
237 | struct resource r[2]; | ||
238 | struct fsl_usb2_platform_data usb_data; | ||
239 | const unsigned char *prop = NULL; | ||
240 | |||
241 | memset(&r, 0, sizeof(r)); | ||
242 | memset(&usb_data, 0, sizeof(usb_data)); | ||
243 | |||
244 | ret = of_address_to_resource(np, 0, &r[0]); | ||
245 | if (ret) | ||
246 | goto err; | ||
247 | |||
248 | of_irq_to_resource(np, 0, &r[1]); | ||
249 | |||
250 | usb_dev_mph = | ||
251 | platform_device_register_simple("fsl-ehci", i, r, 2); | ||
252 | if (IS_ERR(usb_dev_mph)) { | ||
253 | ret = PTR_ERR(usb_dev_mph); | ||
254 | goto err; | ||
255 | } | ||
256 | |||
257 | usb_dev_mph->dev.coherent_dma_mask = 0xffffffffUL; | ||
258 | usb_dev_mph->dev.dma_mask = &usb_dev_mph->dev.coherent_dma_mask; | ||
259 | |||
260 | usb_data.operating_mode = FSL_USB2_MPH_HOST; | ||
261 | |||
262 | prop = of_get_property(np, "port0", NULL); | ||
263 | if (prop) | ||
264 | usb_data.port_enables |= FSL_USB2_PORT0_ENABLED; | ||
265 | |||
266 | prop = of_get_property(np, "port1", NULL); | ||
267 | if (prop) | ||
268 | usb_data.port_enables |= FSL_USB2_PORT1_ENABLED; | ||
269 | |||
270 | prop = of_get_property(np, "phy_type", NULL); | ||
271 | usb_data.phy_mode = determine_usb_phy(prop); | ||
272 | |||
273 | ret = | ||
274 | platform_device_add_data(usb_dev_mph, &usb_data, | ||
275 | sizeof(struct | ||
276 | fsl_usb2_platform_data)); | ||
277 | if (ret) | ||
278 | goto unreg_mph; | ||
279 | i++; | ||
280 | } | ||
281 | |||
282 | for_each_compatible_node(np, NULL, "fsl-usb2-dr") { | ||
283 | struct resource r[2]; | ||
284 | struct fsl_usb2_platform_data usb_data; | ||
285 | const unsigned char *prop = NULL; | ||
286 | 218 | ||
287 | if (!of_device_is_available(np)) | 219 | for_each_node_by_name(np, "global-utilities") { |
288 | continue; | 220 | if ((of_get_property(np, "fsl,has-rstcr", NULL))) { |
289 | 221 | rstcr = of_iomap(np, 0) + 0xb0; | |
290 | memset(&r, 0, sizeof(r)); | 222 | if (!rstcr) |
291 | memset(&usb_data, 0, sizeof(usb_data)); | 223 | printk (KERN_ERR "Error: reset control " |
292 | 224 | "register not mapped!\n"); | |
293 | ret = of_address_to_resource(np, 0, &r[0]); | 225 | break; |
294 | if (ret) | ||
295 | goto unreg_mph; | ||
296 | |||
297 | of_irq_to_resource(np, 0, &r[1]); | ||
298 | |||
299 | prop = of_get_property(np, "dr_mode", NULL); | ||
300 | |||
301 | if (!prop || !strcmp(prop, "host")) { | ||
302 | usb_data.operating_mode = FSL_USB2_DR_HOST; | ||
303 | usb_dev_dr_host = platform_device_register_simple( | ||
304 | "fsl-ehci", i, r, 2); | ||
305 | if (IS_ERR(usb_dev_dr_host)) { | ||
306 | ret = PTR_ERR(usb_dev_dr_host); | ||
307 | goto err; | ||
308 | } | ||
309 | } else if (prop && !strcmp(prop, "peripheral")) { | ||
310 | usb_data.operating_mode = FSL_USB2_DR_DEVICE; | ||
311 | usb_dev_dr_client = platform_device_register_simple( | ||
312 | "fsl-usb2-udc", i, r, 2); | ||
313 | if (IS_ERR(usb_dev_dr_client)) { | ||
314 | ret = PTR_ERR(usb_dev_dr_client); | ||
315 | goto err; | ||
316 | } | ||
317 | } else if (prop && !strcmp(prop, "otg")) { | ||
318 | usb_data.operating_mode = FSL_USB2_DR_OTG; | ||
319 | usb_dev_dr_host = platform_device_register_simple( | ||
320 | "fsl-ehci", i, r, 2); | ||
321 | if (IS_ERR(usb_dev_dr_host)) { | ||
322 | ret = PTR_ERR(usb_dev_dr_host); | ||
323 | goto err; | ||
324 | } | ||
325 | usb_dev_dr_client = platform_device_register_simple( | ||
326 | "fsl-usb2-udc", i, r, 2); | ||
327 | if (IS_ERR(usb_dev_dr_client)) { | ||
328 | ret = PTR_ERR(usb_dev_dr_client); | ||
329 | goto err; | ||
330 | } | ||
331 | } else { | ||
332 | ret = -EINVAL; | ||
333 | goto err; | ||
334 | } | 226 | } |
335 | |||
336 | prop = of_get_property(np, "phy_type", NULL); | ||
337 | usb_data.phy_mode = determine_usb_phy(prop); | ||
338 | |||
339 | if (usb_dev_dr_host) { | ||
340 | usb_dev_dr_host->dev.coherent_dma_mask = 0xffffffffUL; | ||
341 | usb_dev_dr_host->dev.dma_mask = &usb_dev_dr_host-> | ||
342 | dev.coherent_dma_mask; | ||
343 | if ((ret = platform_device_add_data(usb_dev_dr_host, | ||
344 | &usb_data, sizeof(struct | ||
345 | fsl_usb2_platform_data)))) | ||
346 | goto unreg_dr; | ||
347 | } | ||
348 | if (usb_dev_dr_client) { | ||
349 | usb_dev_dr_client->dev.coherent_dma_mask = 0xffffffffUL; | ||
350 | usb_dev_dr_client->dev.dma_mask = &usb_dev_dr_client-> | ||
351 | dev.coherent_dma_mask; | ||
352 | if ((ret = platform_device_add_data(usb_dev_dr_client, | ||
353 | &usb_data, sizeof(struct | ||
354 | fsl_usb2_platform_data)))) | ||
355 | goto unreg_dr; | ||
356 | } | ||
357 | i++; | ||
358 | } | 227 | } |
359 | return 0; | ||
360 | |||
361 | unreg_dr: | ||
362 | if (usb_dev_dr_host) | ||
363 | platform_device_unregister(usb_dev_dr_host); | ||
364 | if (usb_dev_dr_client) | ||
365 | platform_device_unregister(usb_dev_dr_client); | ||
366 | unreg_mph: | ||
367 | if (usb_dev_mph) | ||
368 | platform_device_unregister(usb_dev_mph); | ||
369 | err: | ||
370 | return ret; | ||
371 | } | ||
372 | |||
373 | arch_initcall(fsl_usb_of_init); | ||
374 | |||
375 | #if defined(CONFIG_FSL_SOC_BOOKE) || defined(CONFIG_PPC_86xx) | ||
376 | static __be32 __iomem *rstcr; | ||
377 | 228 | ||
378 | static int __init setup_rstcr(void) | 229 | if (!rstcr && ppc_md.restart == fsl_rstcr_restart) |
379 | { | ||
380 | struct device_node *np; | ||
381 | np = of_find_node_by_name(NULL, "global-utilities"); | ||
382 | if ((np && of_get_property(np, "fsl,has-rstcr", NULL))) { | ||
383 | rstcr = of_iomap(np, 0) + 0xb0; | ||
384 | if (!rstcr) | ||
385 | printk (KERN_EMERG "Error: reset control register " | ||
386 | "not mapped!\n"); | ||
387 | } else if (ppc_md.restart == fsl_rstcr_restart) | ||
388 | printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); | 230 | printk(KERN_ERR "No RSTCR register, warm reboot won't work\n"); |
389 | 231 | ||
390 | if (np) | 232 | if (np) |
391 | of_node_put(np); | 233 | of_node_put(np); |
234 | |||
392 | return 0; | 235 | return 0; |
393 | } | 236 | } |
394 | 237 | ||
diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c index 6323e70e6bf4..d18bb27e4df9 100644 --- a/arch/powerpc/sysdev/i8259.c +++ b/arch/powerpc/sysdev/i8259.c | |||
@@ -78,19 +78,19 @@ unsigned int i8259_irq(void) | |||
78 | return irq; | 78 | return irq; |
79 | } | 79 | } |
80 | 80 | ||
81 | static void i8259_mask_and_ack_irq(unsigned int irq_nr) | 81 | static void i8259_mask_and_ack_irq(struct irq_data *d) |
82 | { | 82 | { |
83 | unsigned long flags; | 83 | unsigned long flags; |
84 | 84 | ||
85 | raw_spin_lock_irqsave(&i8259_lock, flags); | 85 | raw_spin_lock_irqsave(&i8259_lock, flags); |
86 | if (irq_nr > 7) { | 86 | if (d->irq > 7) { |
87 | cached_A1 |= 1 << (irq_nr-8); | 87 | cached_A1 |= 1 << (d->irq-8); |
88 | inb(0xA1); /* DUMMY */ | 88 | inb(0xA1); /* DUMMY */ |
89 | outb(cached_A1, 0xA1); | 89 | outb(cached_A1, 0xA1); |
90 | outb(0x20, 0xA0); /* Non-specific EOI */ | 90 | outb(0x20, 0xA0); /* Non-specific EOI */ |
91 | outb(0x20, 0x20); /* Non-specific EOI to cascade */ | 91 | outb(0x20, 0x20); /* Non-specific EOI to cascade */ |
92 | } else { | 92 | } else { |
93 | cached_21 |= 1 << irq_nr; | 93 | cached_21 |= 1 << d->irq; |
94 | inb(0x21); /* DUMMY */ | 94 | inb(0x21); /* DUMMY */ |
95 | outb(cached_21, 0x21); | 95 | outb(cached_21, 0x21); |
96 | outb(0x20, 0x20); /* Non-specific EOI */ | 96 | outb(0x20, 0x20); /* Non-specific EOI */ |
@@ -104,42 +104,42 @@ static void i8259_set_irq_mask(int irq_nr) | |||
104 | outb(cached_21,0x21); | 104 | outb(cached_21,0x21); |
105 | } | 105 | } |
106 | 106 | ||
107 | static void i8259_mask_irq(unsigned int irq_nr) | 107 | static void i8259_mask_irq(struct irq_data *d) |
108 | { | 108 | { |
109 | unsigned long flags; | 109 | unsigned long flags; |
110 | 110 | ||
111 | pr_debug("i8259_mask_irq(%d)\n", irq_nr); | 111 | pr_debug("i8259_mask_irq(%d)\n", d->irq); |
112 | 112 | ||
113 | raw_spin_lock_irqsave(&i8259_lock, flags); | 113 | raw_spin_lock_irqsave(&i8259_lock, flags); |
114 | if (irq_nr < 8) | 114 | if (d->irq < 8) |
115 | cached_21 |= 1 << irq_nr; | 115 | cached_21 |= 1 << d->irq; |
116 | else | 116 | else |
117 | cached_A1 |= 1 << (irq_nr-8); | 117 | cached_A1 |= 1 << (d->irq-8); |
118 | i8259_set_irq_mask(irq_nr); | 118 | i8259_set_irq_mask(d->irq); |
119 | raw_spin_unlock_irqrestore(&i8259_lock, flags); | 119 | raw_spin_unlock_irqrestore(&i8259_lock, flags); |
120 | } | 120 | } |
121 | 121 | ||
122 | static void i8259_unmask_irq(unsigned int irq_nr) | 122 | static void i8259_unmask_irq(struct irq_data *d) |
123 | { | 123 | { |
124 | unsigned long flags; | 124 | unsigned long flags; |
125 | 125 | ||
126 | pr_debug("i8259_unmask_irq(%d)\n", irq_nr); | 126 | pr_debug("i8259_unmask_irq(%d)\n", d->irq); |
127 | 127 | ||
128 | raw_spin_lock_irqsave(&i8259_lock, flags); | 128 | raw_spin_lock_irqsave(&i8259_lock, flags); |
129 | if (irq_nr < 8) | 129 | if (d->irq < 8) |
130 | cached_21 &= ~(1 << irq_nr); | 130 | cached_21 &= ~(1 << d->irq); |
131 | else | 131 | else |
132 | cached_A1 &= ~(1 << (irq_nr-8)); | 132 | cached_A1 &= ~(1 << (d->irq-8)); |
133 | i8259_set_irq_mask(irq_nr); | 133 | i8259_set_irq_mask(d->irq); |
134 | raw_spin_unlock_irqrestore(&i8259_lock, flags); | 134 | raw_spin_unlock_irqrestore(&i8259_lock, flags); |
135 | } | 135 | } |
136 | 136 | ||
137 | static struct irq_chip i8259_pic = { | 137 | static struct irq_chip i8259_pic = { |
138 | .name = "i8259", | 138 | .name = "i8259", |
139 | .mask = i8259_mask_irq, | 139 | .irq_mask = i8259_mask_irq, |
140 | .disable = i8259_mask_irq, | 140 | .irq_disable = i8259_mask_irq, |
141 | .unmask = i8259_unmask_irq, | 141 | .irq_unmask = i8259_unmask_irq, |
142 | .mask_ack = i8259_mask_and_ack_irq, | 142 | .irq_mask_ack = i8259_mask_and_ack_irq, |
143 | }; | 143 | }; |
144 | 144 | ||
145 | static struct resource pic1_iores = { | 145 | static struct resource pic1_iores = { |
@@ -175,28 +175,16 @@ static int i8259_host_map(struct irq_host *h, unsigned int virq, | |||
175 | 175 | ||
176 | /* We block the internal cascade */ | 176 | /* We block the internal cascade */ |
177 | if (hw == 2) | 177 | if (hw == 2) |
178 | irq_to_desc(virq)->status |= IRQ_NOREQUEST; | 178 | irq_set_status_flags(virq, IRQ_NOREQUEST); |
179 | 179 | ||
180 | /* We use the level handler only for now, we might want to | 180 | /* We use the level handler only for now, we might want to |
181 | * be more cautious here but that works for now | 181 | * be more cautious here but that works for now |
182 | */ | 182 | */ |
183 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 183 | irq_set_status_flags(virq, IRQ_LEVEL); |
184 | set_irq_chip_and_handler(virq, &i8259_pic, handle_level_irq); | 184 | irq_set_chip_and_handler(virq, &i8259_pic, handle_level_irq); |
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
188 | static void i8259_host_unmap(struct irq_host *h, unsigned int virq) | ||
189 | { | ||
190 | /* Make sure irq is masked in hardware */ | ||
191 | i8259_mask_irq(virq); | ||
192 | |||
193 | /* remove chip and handler */ | ||
194 | set_irq_chip_and_handler(virq, NULL, NULL); | ||
195 | |||
196 | /* Make sure it's completed */ | ||
197 | synchronize_irq(virq); | ||
198 | } | ||
199 | |||
200 | static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, | 188 | static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, |
201 | const u32 *intspec, unsigned int intsize, | 189 | const u32 *intspec, unsigned int intsize, |
202 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | 190 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) |
@@ -220,7 +208,6 @@ static int i8259_host_xlate(struct irq_host *h, struct device_node *ct, | |||
220 | static struct irq_host_ops i8259_host_ops = { | 208 | static struct irq_host_ops i8259_host_ops = { |
221 | .match = i8259_host_match, | 209 | .match = i8259_host_match, |
222 | .map = i8259_host_map, | 210 | .map = i8259_host_map, |
223 | .unmap = i8259_host_unmap, | ||
224 | .xlate = i8259_host_xlate, | 211 | .xlate = i8259_host_xlate, |
225 | }; | 212 | }; |
226 | 213 | ||
diff --git a/arch/powerpc/sysdev/indirect_pci.c b/arch/powerpc/sysdev/indirect_pci.c index 7ed809676642..82fdad885d20 100644 --- a/arch/powerpc/sysdev/indirect_pci.c +++ b/arch/powerpc/sysdev/indirect_pci.c | |||
@@ -117,7 +117,7 @@ indirect_write_config(struct pci_bus *bus, unsigned int devfn, int offset, | |||
117 | out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | | 117 | out_le32(hose->cfg_addr, (0x80000000 | (bus_no << 16) | |
118 | (devfn << 8) | reg | cfg_type)); | 118 | (devfn << 8) | reg | cfg_type)); |
119 | 119 | ||
120 | /* surpress setting of PCI_PRIMARY_BUS */ | 120 | /* suppress setting of PCI_PRIMARY_BUS */ |
121 | if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) | 121 | if (hose->indirect_type & PPC_INDIRECT_TYPE_SURPRESS_PRIMARY_BUS) |
122 | if ((offset == PCI_PRIMARY_BUS) && | 122 | if ((offset == PCI_PRIMARY_BUS) && |
123 | (bus->number == hose->first_busno)) | 123 | (bus->number == hose->first_busno)) |
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c index d7b9b9c69287..7367d17364cb 100644 --- a/arch/powerpc/sysdev/ipic.c +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -18,7 +18,7 @@ | |||
18 | #include <linux/stddef.h> | 18 | #include <linux/stddef.h> |
19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
20 | #include <linux/signal.h> | 20 | #include <linux/signal.h> |
21 | #include <linux/sysdev.h> | 21 | #include <linux/syscore_ops.h> |
22 | #include <linux/device.h> | 22 | #include <linux/device.h> |
23 | #include <linux/bootmem.h> | 23 | #include <linux/bootmem.h> |
24 | #include <linux/spinlock.h> | 24 | #include <linux/spinlock.h> |
@@ -521,12 +521,10 @@ static inline struct ipic * ipic_from_irq(unsigned int virq) | |||
521 | return primary_ipic; | 521 | return primary_ipic; |
522 | } | 522 | } |
523 | 523 | ||
524 | #define ipic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | 524 | static void ipic_unmask_irq(struct irq_data *d) |
525 | |||
526 | static void ipic_unmask_irq(unsigned int virq) | ||
527 | { | 525 | { |
528 | struct ipic *ipic = ipic_from_irq(virq); | 526 | struct ipic *ipic = ipic_from_irq(d->irq); |
529 | unsigned int src = ipic_irq_to_hw(virq); | 527 | unsigned int src = irqd_to_hwirq(d); |
530 | unsigned long flags; | 528 | unsigned long flags; |
531 | u32 temp; | 529 | u32 temp; |
532 | 530 | ||
@@ -539,10 +537,10 @@ static void ipic_unmask_irq(unsigned int virq) | |||
539 | raw_spin_unlock_irqrestore(&ipic_lock, flags); | 537 | raw_spin_unlock_irqrestore(&ipic_lock, flags); |
540 | } | 538 | } |
541 | 539 | ||
542 | static void ipic_mask_irq(unsigned int virq) | 540 | static void ipic_mask_irq(struct irq_data *d) |
543 | { | 541 | { |
544 | struct ipic *ipic = ipic_from_irq(virq); | 542 | struct ipic *ipic = ipic_from_irq(d->irq); |
545 | unsigned int src = ipic_irq_to_hw(virq); | 543 | unsigned int src = irqd_to_hwirq(d); |
546 | unsigned long flags; | 544 | unsigned long flags; |
547 | u32 temp; | 545 | u32 temp; |
548 | 546 | ||
@@ -559,10 +557,10 @@ static void ipic_mask_irq(unsigned int virq) | |||
559 | raw_spin_unlock_irqrestore(&ipic_lock, flags); | 557 | raw_spin_unlock_irqrestore(&ipic_lock, flags); |
560 | } | 558 | } |
561 | 559 | ||
562 | static void ipic_ack_irq(unsigned int virq) | 560 | static void ipic_ack_irq(struct irq_data *d) |
563 | { | 561 | { |
564 | struct ipic *ipic = ipic_from_irq(virq); | 562 | struct ipic *ipic = ipic_from_irq(d->irq); |
565 | unsigned int src = ipic_irq_to_hw(virq); | 563 | unsigned int src = irqd_to_hwirq(d); |
566 | unsigned long flags; | 564 | unsigned long flags; |
567 | u32 temp; | 565 | u32 temp; |
568 | 566 | ||
@@ -578,10 +576,10 @@ static void ipic_ack_irq(unsigned int virq) | |||
578 | raw_spin_unlock_irqrestore(&ipic_lock, flags); | 576 | raw_spin_unlock_irqrestore(&ipic_lock, flags); |
579 | } | 577 | } |
580 | 578 | ||
581 | static void ipic_mask_irq_and_ack(unsigned int virq) | 579 | static void ipic_mask_irq_and_ack(struct irq_data *d) |
582 | { | 580 | { |
583 | struct ipic *ipic = ipic_from_irq(virq); | 581 | struct ipic *ipic = ipic_from_irq(d->irq); |
584 | unsigned int src = ipic_irq_to_hw(virq); | 582 | unsigned int src = irqd_to_hwirq(d); |
585 | unsigned long flags; | 583 | unsigned long flags; |
586 | u32 temp; | 584 | u32 temp; |
587 | 585 | ||
@@ -601,11 +599,10 @@ static void ipic_mask_irq_and_ack(unsigned int virq) | |||
601 | raw_spin_unlock_irqrestore(&ipic_lock, flags); | 599 | raw_spin_unlock_irqrestore(&ipic_lock, flags); |
602 | } | 600 | } |
603 | 601 | ||
604 | static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type) | 602 | static int ipic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
605 | { | 603 | { |
606 | struct ipic *ipic = ipic_from_irq(virq); | 604 | struct ipic *ipic = ipic_from_irq(d->irq); |
607 | unsigned int src = ipic_irq_to_hw(virq); | 605 | unsigned int src = irqd_to_hwirq(d); |
608 | struct irq_desc *desc = irq_to_desc(virq); | ||
609 | unsigned int vold, vnew, edibit; | 606 | unsigned int vold, vnew, edibit; |
610 | 607 | ||
611 | if (flow_type == IRQ_TYPE_NONE) | 608 | if (flow_type == IRQ_TYPE_NONE) |
@@ -623,17 +620,16 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
623 | printk(KERN_ERR "ipic: edge sense not supported on internal " | 620 | printk(KERN_ERR "ipic: edge sense not supported on internal " |
624 | "interrupts\n"); | 621 | "interrupts\n"); |
625 | return -EINVAL; | 622 | return -EINVAL; |
623 | |||
626 | } | 624 | } |
627 | 625 | ||
628 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | 626 | irqd_set_trigger_type(d, flow_type); |
629 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
630 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { | 627 | if (flow_type & IRQ_TYPE_LEVEL_LOW) { |
631 | desc->status |= IRQ_LEVEL; | 628 | __irq_set_handler_locked(d->irq, handle_level_irq); |
632 | desc->handle_irq = handle_level_irq; | 629 | d->chip = &ipic_level_irq_chip; |
633 | desc->chip = &ipic_level_irq_chip; | ||
634 | } else { | 630 | } else { |
635 | desc->handle_irq = handle_edge_irq; | 631 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
636 | desc->chip = &ipic_edge_irq_chip; | 632 | d->chip = &ipic_edge_irq_chip; |
637 | } | 633 | } |
638 | 634 | ||
639 | /* only EXT IRQ senses are programmable on ipic | 635 | /* only EXT IRQ senses are programmable on ipic |
@@ -655,25 +651,25 @@ static int ipic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
655 | } | 651 | } |
656 | if (vold != vnew) | 652 | if (vold != vnew) |
657 | ipic_write(ipic->regs, IPIC_SECNR, vnew); | 653 | ipic_write(ipic->regs, IPIC_SECNR, vnew); |
658 | return 0; | 654 | return IRQ_SET_MASK_OK_NOCOPY; |
659 | } | 655 | } |
660 | 656 | ||
661 | /* level interrupts and edge interrupts have different ack operations */ | 657 | /* level interrupts and edge interrupts have different ack operations */ |
662 | static struct irq_chip ipic_level_irq_chip = { | 658 | static struct irq_chip ipic_level_irq_chip = { |
663 | .name = "IPIC", | 659 | .name = "IPIC", |
664 | .unmask = ipic_unmask_irq, | 660 | .irq_unmask = ipic_unmask_irq, |
665 | .mask = ipic_mask_irq, | 661 | .irq_mask = ipic_mask_irq, |
666 | .mask_ack = ipic_mask_irq, | 662 | .irq_mask_ack = ipic_mask_irq, |
667 | .set_type = ipic_set_irq_type, | 663 | .irq_set_type = ipic_set_irq_type, |
668 | }; | 664 | }; |
669 | 665 | ||
670 | static struct irq_chip ipic_edge_irq_chip = { | 666 | static struct irq_chip ipic_edge_irq_chip = { |
671 | .name = "IPIC", | 667 | .name = "IPIC", |
672 | .unmask = ipic_unmask_irq, | 668 | .irq_unmask = ipic_unmask_irq, |
673 | .mask = ipic_mask_irq, | 669 | .irq_mask = ipic_mask_irq, |
674 | .mask_ack = ipic_mask_irq_and_ack, | 670 | .irq_mask_ack = ipic_mask_irq_and_ack, |
675 | .ack = ipic_ack_irq, | 671 | .irq_ack = ipic_ack_irq, |
676 | .set_type = ipic_set_irq_type, | 672 | .irq_set_type = ipic_set_irq_type, |
677 | }; | 673 | }; |
678 | 674 | ||
679 | static int ipic_host_match(struct irq_host *h, struct device_node *node) | 675 | static int ipic_host_match(struct irq_host *h, struct device_node *node) |
@@ -687,11 +683,11 @@ static int ipic_host_map(struct irq_host *h, unsigned int virq, | |||
687 | { | 683 | { |
688 | struct ipic *ipic = h->host_data; | 684 | struct ipic *ipic = h->host_data; |
689 | 685 | ||
690 | set_irq_chip_data(virq, ipic); | 686 | irq_set_chip_data(virq, ipic); |
691 | set_irq_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); | 687 | irq_set_chip_and_handler(virq, &ipic_level_irq_chip, handle_level_irq); |
692 | 688 | ||
693 | /* Set default irq type */ | 689 | /* Set default irq type */ |
694 | set_irq_type(virq, IRQ_TYPE_NONE); | 690 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
695 | 691 | ||
696 | return 0; | 692 | return 0; |
697 | } | 693 | } |
@@ -795,7 +791,7 @@ struct ipic * __init ipic_init(struct device_node *node, unsigned int flags) | |||
795 | int ipic_set_priority(unsigned int virq, unsigned int priority) | 791 | int ipic_set_priority(unsigned int virq, unsigned int priority) |
796 | { | 792 | { |
797 | struct ipic *ipic = ipic_from_irq(virq); | 793 | struct ipic *ipic = ipic_from_irq(virq); |
798 | unsigned int src = ipic_irq_to_hw(virq); | 794 | unsigned int src = virq_to_hw(virq); |
799 | u32 temp; | 795 | u32 temp; |
800 | 796 | ||
801 | if (priority > 7) | 797 | if (priority > 7) |
@@ -823,7 +819,7 @@ int ipic_set_priority(unsigned int virq, unsigned int priority) | |||
823 | void ipic_set_highest_priority(unsigned int virq) | 819 | void ipic_set_highest_priority(unsigned int virq) |
824 | { | 820 | { |
825 | struct ipic *ipic = ipic_from_irq(virq); | 821 | struct ipic *ipic = ipic_from_irq(virq); |
826 | unsigned int src = ipic_irq_to_hw(virq); | 822 | unsigned int src = virq_to_hw(virq); |
827 | u32 temp; | 823 | u32 temp; |
828 | 824 | ||
829 | temp = ipic_read(ipic->regs, IPIC_SICFR); | 825 | temp = ipic_read(ipic->regs, IPIC_SICFR); |
@@ -904,7 +900,7 @@ static struct { | |||
904 | u32 sercr; | 900 | u32 sercr; |
905 | } ipic_saved_state; | 901 | } ipic_saved_state; |
906 | 902 | ||
907 | static int ipic_suspend(struct sys_device *sdev, pm_message_t state) | 903 | static int ipic_suspend(void) |
908 | { | 904 | { |
909 | struct ipic *ipic = primary_ipic; | 905 | struct ipic *ipic = primary_ipic; |
910 | 906 | ||
@@ -935,7 +931,7 @@ static int ipic_suspend(struct sys_device *sdev, pm_message_t state) | |||
935 | return 0; | 931 | return 0; |
936 | } | 932 | } |
937 | 933 | ||
938 | static int ipic_resume(struct sys_device *sdev) | 934 | static void ipic_resume(void) |
939 | { | 935 | { |
940 | struct ipic *ipic = primary_ipic; | 936 | struct ipic *ipic = primary_ipic; |
941 | 937 | ||
@@ -951,44 +947,26 @@ static int ipic_resume(struct sys_device *sdev) | |||
951 | ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); | 947 | ipic_write(ipic->regs, IPIC_SECNR, ipic_saved_state.secnr); |
952 | ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); | 948 | ipic_write(ipic->regs, IPIC_SERMR, ipic_saved_state.sermr); |
953 | ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); | 949 | ipic_write(ipic->regs, IPIC_SERCR, ipic_saved_state.sercr); |
954 | |||
955 | return 0; | ||
956 | } | 950 | } |
957 | #else | 951 | #else |
958 | #define ipic_suspend NULL | 952 | #define ipic_suspend NULL |
959 | #define ipic_resume NULL | 953 | #define ipic_resume NULL |
960 | #endif | 954 | #endif |
961 | 955 | ||
962 | static struct sysdev_class ipic_sysclass = { | 956 | static struct syscore_ops ipic_syscore_ops = { |
963 | .name = "ipic", | ||
964 | .suspend = ipic_suspend, | 957 | .suspend = ipic_suspend, |
965 | .resume = ipic_resume, | 958 | .resume = ipic_resume, |
966 | }; | 959 | }; |
967 | 960 | ||
968 | static struct sys_device device_ipic = { | 961 | static int __init init_ipic_syscore(void) |
969 | .id = 0, | ||
970 | .cls = &ipic_sysclass, | ||
971 | }; | ||
972 | |||
973 | static int __init init_ipic_sysfs(void) | ||
974 | { | 962 | { |
975 | int rc; | ||
976 | |||
977 | if (!primary_ipic || !primary_ipic->regs) | 963 | if (!primary_ipic || !primary_ipic->regs) |
978 | return -ENODEV; | 964 | return -ENODEV; |
979 | printk(KERN_DEBUG "Registering ipic with sysfs...\n"); | ||
980 | 965 | ||
981 | rc = sysdev_class_register(&ipic_sysclass); | 966 | printk(KERN_DEBUG "Registering ipic system core operations\n"); |
982 | if (rc) { | 967 | register_syscore_ops(&ipic_syscore_ops); |
983 | printk(KERN_ERR "Failed registering ipic sys class\n"); | 968 | |
984 | return -ENODEV; | ||
985 | } | ||
986 | rc = sysdev_register(&device_ipic); | ||
987 | if (rc) { | ||
988 | printk(KERN_ERR "Failed registering ipic sys device\n"); | ||
989 | return -ENODEV; | ||
990 | } | ||
991 | return 0; | 969 | return 0; |
992 | } | 970 | } |
993 | 971 | ||
994 | subsys_initcall(init_ipic_sysfs); | 972 | subsys_initcall(init_ipic_syscore); |
diff --git a/arch/powerpc/sysdev/mmio_nvram.c b/arch/powerpc/sysdev/mmio_nvram.c index 207324209065..ddc877a3a23a 100644 --- a/arch/powerpc/sysdev/mmio_nvram.c +++ b/arch/powerpc/sysdev/mmio_nvram.c | |||
@@ -115,6 +115,8 @@ int __init mmio_nvram_init(void) | |||
115 | int ret; | 115 | int ret; |
116 | 116 | ||
117 | nvram_node = of_find_node_by_type(NULL, "nvram"); | 117 | nvram_node = of_find_node_by_type(NULL, "nvram"); |
118 | if (!nvram_node) | ||
119 | nvram_node = of_find_compatible_node(NULL, NULL, "nvram"); | ||
118 | if (!nvram_node) { | 120 | if (!nvram_node) { |
119 | printk(KERN_WARNING "nvram: no node found in device-tree\n"); | 121 | printk(KERN_WARNING "nvram: no node found in device-tree\n"); |
120 | return -ENODEV; | 122 | return -ENODEV; |
diff --git a/arch/powerpc/sysdev/mpc8xx_pic.c b/arch/powerpc/sysdev/mpc8xx_pic.c index 8c27d261aba8..20924f2246f0 100644 --- a/arch/powerpc/sysdev/mpc8xx_pic.c +++ b/arch/powerpc/sysdev/mpc8xx_pic.c | |||
@@ -25,10 +25,10 @@ static sysconf8xx_t __iomem *siu_reg; | |||
25 | 25 | ||
26 | int cpm_get_irq(struct pt_regs *regs); | 26 | int cpm_get_irq(struct pt_regs *regs); |
27 | 27 | ||
28 | static void mpc8xx_unmask_irq(unsigned int virq) | 28 | static void mpc8xx_unmask_irq(struct irq_data *d) |
29 | { | 29 | { |
30 | int bit, word; | 30 | int bit, word; |
31 | unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; | 31 | unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); |
32 | 32 | ||
33 | bit = irq_nr & 0x1f; | 33 | bit = irq_nr & 0x1f; |
34 | word = irq_nr >> 5; | 34 | word = irq_nr >> 5; |
@@ -37,10 +37,10 @@ static void mpc8xx_unmask_irq(unsigned int virq) | |||
37 | out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); | 37 | out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); |
38 | } | 38 | } |
39 | 39 | ||
40 | static void mpc8xx_mask_irq(unsigned int virq) | 40 | static void mpc8xx_mask_irq(struct irq_data *d) |
41 | { | 41 | { |
42 | int bit, word; | 42 | int bit, word; |
43 | unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; | 43 | unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); |
44 | 44 | ||
45 | bit = irq_nr & 0x1f; | 45 | bit = irq_nr & 0x1f; |
46 | word = irq_nr >> 5; | 46 | word = irq_nr >> 5; |
@@ -49,19 +49,19 @@ static void mpc8xx_mask_irq(unsigned int virq) | |||
49 | out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); | 49 | out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); |
50 | } | 50 | } |
51 | 51 | ||
52 | static void mpc8xx_ack(unsigned int virq) | 52 | static void mpc8xx_ack(struct irq_data *d) |
53 | { | 53 | { |
54 | int bit; | 54 | int bit; |
55 | unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; | 55 | unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); |
56 | 56 | ||
57 | bit = irq_nr & 0x1f; | 57 | bit = irq_nr & 0x1f; |
58 | out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); | 58 | out_be32(&siu_reg->sc_sipend, 1 << (31-bit)); |
59 | } | 59 | } |
60 | 60 | ||
61 | static void mpc8xx_end_irq(unsigned int virq) | 61 | static void mpc8xx_end_irq(struct irq_data *d) |
62 | { | 62 | { |
63 | int bit, word; | 63 | int bit, word; |
64 | unsigned int irq_nr = (unsigned int)irq_map[virq].hwirq; | 64 | unsigned int irq_nr = (unsigned int)irqd_to_hwirq(d); |
65 | 65 | ||
66 | bit = irq_nr & 0x1f; | 66 | bit = irq_nr & 0x1f; |
67 | word = irq_nr >> 5; | 67 | word = irq_nr >> 5; |
@@ -70,24 +70,17 @@ static void mpc8xx_end_irq(unsigned int virq) | |||
70 | out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); | 70 | out_be32(&siu_reg->sc_simask, ppc_cached_irq_mask[word]); |
71 | } | 71 | } |
72 | 72 | ||
73 | static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type) | 73 | static int mpc8xx_set_irq_type(struct irq_data *d, unsigned int flow_type) |
74 | { | 74 | { |
75 | struct irq_desc *desc = irq_to_desc(virq); | ||
76 | |||
77 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
78 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
79 | if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
80 | desc->status |= IRQ_LEVEL; | ||
81 | |||
82 | if (flow_type & IRQ_TYPE_EDGE_FALLING) { | 75 | if (flow_type & IRQ_TYPE_EDGE_FALLING) { |
83 | irq_hw_number_t hw = (unsigned int)irq_map[virq].hwirq; | 76 | irq_hw_number_t hw = (unsigned int)irqd_to_hwirq(d); |
84 | unsigned int siel = in_be32(&siu_reg->sc_siel); | 77 | unsigned int siel = in_be32(&siu_reg->sc_siel); |
85 | 78 | ||
86 | /* only external IRQ senses are programmable */ | 79 | /* only external IRQ senses are programmable */ |
87 | if ((hw & 1) == 0) { | 80 | if ((hw & 1) == 0) { |
88 | siel |= (0x80000000 >> hw); | 81 | siel |= (0x80000000 >> hw); |
89 | out_be32(&siu_reg->sc_siel, siel); | 82 | out_be32(&siu_reg->sc_siel, siel); |
90 | desc->handle_irq = handle_edge_irq; | 83 | __irq_set_handler_locked(d->irq, handle_edge_irq); |
91 | } | 84 | } |
92 | } | 85 | } |
93 | return 0; | 86 | return 0; |
@@ -95,11 +88,11 @@ static int mpc8xx_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
95 | 88 | ||
96 | static struct irq_chip mpc8xx_pic = { | 89 | static struct irq_chip mpc8xx_pic = { |
97 | .name = "MPC8XX SIU", | 90 | .name = "MPC8XX SIU", |
98 | .unmask = mpc8xx_unmask_irq, | 91 | .irq_unmask = mpc8xx_unmask_irq, |
99 | .mask = mpc8xx_mask_irq, | 92 | .irq_mask = mpc8xx_mask_irq, |
100 | .ack = mpc8xx_ack, | 93 | .irq_ack = mpc8xx_ack, |
101 | .eoi = mpc8xx_end_irq, | 94 | .irq_eoi = mpc8xx_end_irq, |
102 | .set_type = mpc8xx_set_irq_type, | 95 | .irq_set_type = mpc8xx_set_irq_type, |
103 | }; | 96 | }; |
104 | 97 | ||
105 | unsigned int mpc8xx_get_irq(void) | 98 | unsigned int mpc8xx_get_irq(void) |
@@ -124,7 +117,7 @@ static int mpc8xx_pic_host_map(struct irq_host *h, unsigned int virq, | |||
124 | pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); | 117 | pr_debug("mpc8xx_pic_host_map(%d, 0x%lx)\n", virq, hw); |
125 | 118 | ||
126 | /* Set default irq handle */ | 119 | /* Set default irq handle */ |
127 | set_irq_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); | 120 | irq_set_chip_and_handler(virq, &mpc8xx_pic, handle_level_irq); |
128 | return 0; | 121 | return 0; |
129 | } | 122 | } |
130 | 123 | ||
diff --git a/arch/powerpc/sysdev/mpc8xxx_gpio.c b/arch/powerpc/sysdev/mpc8xxx_gpio.c index 2b69084d0f0c..fb4963abdf55 100644 --- a/arch/powerpc/sysdev/mpc8xxx_gpio.c +++ b/arch/powerpc/sysdev/mpc8xxx_gpio.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * GPIOs on MPC8349/8572/8610 and compatible | 2 | * GPIOs on MPC512x/8349/8572/8610 and compatible |
3 | * | 3 | * |
4 | * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> | 4 | * Copyright (C) 2008 Peter Korsgaard <jacmet@sunsite.dk> |
5 | * | 5 | * |
@@ -26,6 +26,7 @@ | |||
26 | #define GPIO_IER 0x0c | 26 | #define GPIO_IER 0x0c |
27 | #define GPIO_IMR 0x10 | 27 | #define GPIO_IMR 0x10 |
28 | #define GPIO_ICR 0x14 | 28 | #define GPIO_ICR 0x14 |
29 | #define GPIO_ICR2 0x18 | ||
29 | 30 | ||
30 | struct mpc8xxx_gpio_chip { | 31 | struct mpc8xxx_gpio_chip { |
31 | struct of_mm_gpio_chip mm_gc; | 32 | struct of_mm_gpio_chip mm_gc; |
@@ -37,6 +38,7 @@ struct mpc8xxx_gpio_chip { | |||
37 | */ | 38 | */ |
38 | u32 data; | 39 | u32 data; |
39 | struct irq_host *irq; | 40 | struct irq_host *irq; |
41 | void *of_dev_id_data; | ||
40 | }; | 42 | }; |
41 | 43 | ||
42 | static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) | 44 | static inline u32 mpc8xxx_gpio2mask(unsigned int gpio) |
@@ -143,7 +145,7 @@ static int mpc8xxx_gpio_to_irq(struct gpio_chip *gc, unsigned offset) | |||
143 | 145 | ||
144 | static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) | 146 | static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) |
145 | { | 147 | { |
146 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_desc_data(desc); | 148 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_desc_get_handler_data(desc); |
147 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | 149 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; |
148 | unsigned int mask; | 150 | unsigned int mask; |
149 | 151 | ||
@@ -153,43 +155,43 @@ static void mpc8xxx_gpio_irq_cascade(unsigned int irq, struct irq_desc *desc) | |||
153 | 32 - ffs(mask))); | 155 | 32 - ffs(mask))); |
154 | } | 156 | } |
155 | 157 | ||
156 | static void mpc8xxx_irq_unmask(unsigned int virq) | 158 | static void mpc8xxx_irq_unmask(struct irq_data *d) |
157 | { | 159 | { |
158 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | 160 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); |
159 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | 161 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; |
160 | unsigned long flags; | 162 | unsigned long flags; |
161 | 163 | ||
162 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | 164 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); |
163 | 165 | ||
164 | setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq))); | 166 | setbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); |
165 | 167 | ||
166 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | 168 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); |
167 | } | 169 | } |
168 | 170 | ||
169 | static void mpc8xxx_irq_mask(unsigned int virq) | 171 | static void mpc8xxx_irq_mask(struct irq_data *d) |
170 | { | 172 | { |
171 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | 173 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); |
172 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | 174 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; |
173 | unsigned long flags; | 175 | unsigned long flags; |
174 | 176 | ||
175 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | 177 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); |
176 | 178 | ||
177 | clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(virq_to_hw(virq))); | 179 | clrbits32(mm->regs + GPIO_IMR, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); |
178 | 180 | ||
179 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | 181 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); |
180 | } | 182 | } |
181 | 183 | ||
182 | static void mpc8xxx_irq_ack(unsigned int virq) | 184 | static void mpc8xxx_irq_ack(struct irq_data *d) |
183 | { | 185 | { |
184 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | 186 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); |
185 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | 187 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; |
186 | 188 | ||
187 | out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(virq_to_hw(virq))); | 189 | out_be32(mm->regs + GPIO_IER, mpc8xxx_gpio2mask(irqd_to_hwirq(d))); |
188 | } | 190 | } |
189 | 191 | ||
190 | static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type) | 192 | static int mpc8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) |
191 | { | 193 | { |
192 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = get_irq_chip_data(virq); | 194 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); |
193 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | 195 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; |
194 | unsigned long flags; | 196 | unsigned long flags; |
195 | 197 | ||
@@ -197,14 +199,59 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type) | |||
197 | case IRQ_TYPE_EDGE_FALLING: | 199 | case IRQ_TYPE_EDGE_FALLING: |
198 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | 200 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); |
199 | setbits32(mm->regs + GPIO_ICR, | 201 | setbits32(mm->regs + GPIO_ICR, |
200 | mpc8xxx_gpio2mask(virq_to_hw(virq))); | 202 | mpc8xxx_gpio2mask(irqd_to_hwirq(d))); |
201 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | 203 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); |
202 | break; | 204 | break; |
203 | 205 | ||
204 | case IRQ_TYPE_EDGE_BOTH: | 206 | case IRQ_TYPE_EDGE_BOTH: |
205 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | 207 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); |
206 | clrbits32(mm->regs + GPIO_ICR, | 208 | clrbits32(mm->regs + GPIO_ICR, |
207 | mpc8xxx_gpio2mask(virq_to_hw(virq))); | 209 | mpc8xxx_gpio2mask(irqd_to_hwirq(d))); |
210 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | ||
211 | break; | ||
212 | |||
213 | default: | ||
214 | return -EINVAL; | ||
215 | } | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static int mpc512x_irq_set_type(struct irq_data *d, unsigned int flow_type) | ||
221 | { | ||
222 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = irq_data_get_irq_chip_data(d); | ||
223 | struct of_mm_gpio_chip *mm = &mpc8xxx_gc->mm_gc; | ||
224 | unsigned long gpio = irqd_to_hwirq(d); | ||
225 | void __iomem *reg; | ||
226 | unsigned int shift; | ||
227 | unsigned long flags; | ||
228 | |||
229 | if (gpio < 16) { | ||
230 | reg = mm->regs + GPIO_ICR; | ||
231 | shift = (15 - gpio) * 2; | ||
232 | } else { | ||
233 | reg = mm->regs + GPIO_ICR2; | ||
234 | shift = (15 - (gpio % 16)) * 2; | ||
235 | } | ||
236 | |||
237 | switch (flow_type) { | ||
238 | case IRQ_TYPE_EDGE_FALLING: | ||
239 | case IRQ_TYPE_LEVEL_LOW: | ||
240 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | ||
241 | clrsetbits_be32(reg, 3 << shift, 2 << shift); | ||
242 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | ||
243 | break; | ||
244 | |||
245 | case IRQ_TYPE_EDGE_RISING: | ||
246 | case IRQ_TYPE_LEVEL_HIGH: | ||
247 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | ||
248 | clrsetbits_be32(reg, 3 << shift, 1 << shift); | ||
249 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | ||
250 | break; | ||
251 | |||
252 | case IRQ_TYPE_EDGE_BOTH: | ||
253 | spin_lock_irqsave(&mpc8xxx_gc->lock, flags); | ||
254 | clrbits32(reg, 3 << shift); | ||
208 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); | 255 | spin_unlock_irqrestore(&mpc8xxx_gc->lock, flags); |
209 | break; | 256 | break; |
210 | 257 | ||
@@ -217,18 +264,23 @@ static int mpc8xxx_irq_set_type(unsigned int virq, unsigned int flow_type) | |||
217 | 264 | ||
218 | static struct irq_chip mpc8xxx_irq_chip = { | 265 | static struct irq_chip mpc8xxx_irq_chip = { |
219 | .name = "mpc8xxx-gpio", | 266 | .name = "mpc8xxx-gpio", |
220 | .unmask = mpc8xxx_irq_unmask, | 267 | .irq_unmask = mpc8xxx_irq_unmask, |
221 | .mask = mpc8xxx_irq_mask, | 268 | .irq_mask = mpc8xxx_irq_mask, |
222 | .ack = mpc8xxx_irq_ack, | 269 | .irq_ack = mpc8xxx_irq_ack, |
223 | .set_type = mpc8xxx_irq_set_type, | 270 | .irq_set_type = mpc8xxx_irq_set_type, |
224 | }; | 271 | }; |
225 | 272 | ||
226 | static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, | 273 | static int mpc8xxx_gpio_irq_map(struct irq_host *h, unsigned int virq, |
227 | irq_hw_number_t hw) | 274 | irq_hw_number_t hw) |
228 | { | 275 | { |
229 | set_irq_chip_data(virq, h->host_data); | 276 | struct mpc8xxx_gpio_chip *mpc8xxx_gc = h->host_data; |
230 | set_irq_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); | 277 | |
231 | set_irq_type(virq, IRQ_TYPE_NONE); | 278 | if (mpc8xxx_gc->of_dev_id_data) |
279 | mpc8xxx_irq_chip.irq_set_type = mpc8xxx_gc->of_dev_id_data; | ||
280 | |||
281 | irq_set_chip_data(virq, h->host_data); | ||
282 | irq_set_chip_and_handler(virq, &mpc8xxx_irq_chip, handle_level_irq); | ||
283 | irq_set_irq_type(virq, IRQ_TYPE_NONE); | ||
232 | 284 | ||
233 | return 0; | 285 | return 0; |
234 | } | 286 | } |
@@ -253,11 +305,21 @@ static struct irq_host_ops mpc8xxx_gpio_irq_ops = { | |||
253 | .xlate = mpc8xxx_gpio_irq_xlate, | 305 | .xlate = mpc8xxx_gpio_irq_xlate, |
254 | }; | 306 | }; |
255 | 307 | ||
308 | static struct of_device_id mpc8xxx_gpio_ids[] __initdata = { | ||
309 | { .compatible = "fsl,mpc8349-gpio", }, | ||
310 | { .compatible = "fsl,mpc8572-gpio", }, | ||
311 | { .compatible = "fsl,mpc8610-gpio", }, | ||
312 | { .compatible = "fsl,mpc5121-gpio", .data = mpc512x_irq_set_type, }, | ||
313 | { .compatible = "fsl,qoriq-gpio", }, | ||
314 | {} | ||
315 | }; | ||
316 | |||
256 | static void __init mpc8xxx_add_controller(struct device_node *np) | 317 | static void __init mpc8xxx_add_controller(struct device_node *np) |
257 | { | 318 | { |
258 | struct mpc8xxx_gpio_chip *mpc8xxx_gc; | 319 | struct mpc8xxx_gpio_chip *mpc8xxx_gc; |
259 | struct of_mm_gpio_chip *mm_gc; | 320 | struct of_mm_gpio_chip *mm_gc; |
260 | struct gpio_chip *gc; | 321 | struct gpio_chip *gc; |
322 | const struct of_device_id *id; | ||
261 | unsigned hwirq; | 323 | unsigned hwirq; |
262 | int ret; | 324 | int ret; |
263 | 325 | ||
@@ -297,14 +359,18 @@ static void __init mpc8xxx_add_controller(struct device_node *np) | |||
297 | if (!mpc8xxx_gc->irq) | 359 | if (!mpc8xxx_gc->irq) |
298 | goto skip_irq; | 360 | goto skip_irq; |
299 | 361 | ||
362 | id = of_match_node(mpc8xxx_gpio_ids, np); | ||
363 | if (id) | ||
364 | mpc8xxx_gc->of_dev_id_data = id->data; | ||
365 | |||
300 | mpc8xxx_gc->irq->host_data = mpc8xxx_gc; | 366 | mpc8xxx_gc->irq->host_data = mpc8xxx_gc; |
301 | 367 | ||
302 | /* ack and mask all irqs */ | 368 | /* ack and mask all irqs */ |
303 | out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); | 369 | out_be32(mm_gc->regs + GPIO_IER, 0xffffffff); |
304 | out_be32(mm_gc->regs + GPIO_IMR, 0); | 370 | out_be32(mm_gc->regs + GPIO_IMR, 0); |
305 | 371 | ||
306 | set_irq_data(hwirq, mpc8xxx_gc); | 372 | irq_set_handler_data(hwirq, mpc8xxx_gc); |
307 | set_irq_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); | 373 | irq_set_chained_handler(hwirq, mpc8xxx_gpio_irq_cascade); |
308 | 374 | ||
309 | skip_irq: | 375 | skip_irq: |
310 | return; | 376 | return; |
@@ -321,13 +387,7 @@ static int __init mpc8xxx_add_gpiochips(void) | |||
321 | { | 387 | { |
322 | struct device_node *np; | 388 | struct device_node *np; |
323 | 389 | ||
324 | for_each_compatible_node(np, NULL, "fsl,mpc8349-gpio") | 390 | for_each_matching_node(np, mpc8xxx_gpio_ids) |
325 | mpc8xxx_add_controller(np); | ||
326 | |||
327 | for_each_compatible_node(np, NULL, "fsl,mpc8572-gpio") | ||
328 | mpc8xxx_add_controller(np); | ||
329 | |||
330 | for_each_compatible_node(np, NULL, "fsl,mpc8610-gpio") | ||
331 | mpc8xxx_add_controller(np); | 391 | mpc8xxx_add_controller(np); |
332 | 392 | ||
333 | return 0; | 393 | return 0; |
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 7c1342618a30..58d7a534f877 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -6,6 +6,7 @@ | |||
6 | * with various broken implementations of this HW. | 6 | * with various broken implementations of this HW. |
7 | * | 7 | * |
8 | * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. | 8 | * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp. |
9 | * Copyright 2010-2011 Freescale Semiconductor, Inc. | ||
9 | * | 10 | * |
10 | * This file is subject to the terms and conditions of the GNU General Public | 11 | * This file is subject to the terms and conditions of the GNU General Public |
11 | * License. See the file COPYING in the main directory of this archive | 12 | * License. See the file COPYING in the main directory of this archive |
@@ -27,6 +28,8 @@ | |||
27 | #include <linux/spinlock.h> | 28 | #include <linux/spinlock.h> |
28 | #include <linux/pci.h> | 29 | #include <linux/pci.h> |
29 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/syscore_ops.h> | ||
32 | #include <linux/ratelimit.h> | ||
30 | 33 | ||
31 | #include <asm/ptrace.h> | 34 | #include <asm/ptrace.h> |
32 | #include <asm/signal.h> | 35 | #include <asm/signal.h> |
@@ -147,6 +150,16 @@ static u32 mpic_infos[][MPIC_IDX_END] = { | |||
147 | 150 | ||
148 | #endif /* CONFIG_MPIC_WEIRD */ | 151 | #endif /* CONFIG_MPIC_WEIRD */ |
149 | 152 | ||
153 | static inline unsigned int mpic_processor_id(struct mpic *mpic) | ||
154 | { | ||
155 | unsigned int cpu = 0; | ||
156 | |||
157 | if (mpic->flags & MPIC_PRIMARY) | ||
158 | cpu = hard_smp_processor_id(); | ||
159 | |||
160 | return cpu; | ||
161 | } | ||
162 | |||
150 | /* | 163 | /* |
151 | * Register accessor functions | 164 | * Register accessor functions |
152 | */ | 165 | */ |
@@ -208,21 +221,38 @@ static inline void _mpic_ipi_write(struct mpic *mpic, unsigned int ipi, u32 valu | |||
208 | _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); | 221 | _mpic_write(mpic->reg_type, &mpic->gregs, offset, value); |
209 | } | 222 | } |
210 | 223 | ||
224 | static inline u32 _mpic_tm_read(struct mpic *mpic, unsigned int tm) | ||
225 | { | ||
226 | unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + | ||
227 | ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); | ||
228 | |||
229 | if (tm >= 4) | ||
230 | offset += 0x1000 / 4; | ||
231 | |||
232 | return _mpic_read(mpic->reg_type, &mpic->tmregs, offset); | ||
233 | } | ||
234 | |||
235 | static inline void _mpic_tm_write(struct mpic *mpic, unsigned int tm, u32 value) | ||
236 | { | ||
237 | unsigned int offset = MPIC_INFO(TIMER_VECTOR_PRI) + | ||
238 | ((tm & 3) * MPIC_INFO(TIMER_STRIDE)); | ||
239 | |||
240 | if (tm >= 4) | ||
241 | offset += 0x1000 / 4; | ||
242 | |||
243 | _mpic_write(mpic->reg_type, &mpic->tmregs, offset, value); | ||
244 | } | ||
245 | |||
211 | static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) | 246 | static inline u32 _mpic_cpu_read(struct mpic *mpic, unsigned int reg) |
212 | { | 247 | { |
213 | unsigned int cpu = 0; | 248 | unsigned int cpu = mpic_processor_id(mpic); |
214 | 249 | ||
215 | if (mpic->flags & MPIC_PRIMARY) | ||
216 | cpu = hard_smp_processor_id(); | ||
217 | return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); | 250 | return _mpic_read(mpic->reg_type, &mpic->cpuregs[cpu], reg); |
218 | } | 251 | } |
219 | 252 | ||
220 | static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) | 253 | static inline void _mpic_cpu_write(struct mpic *mpic, unsigned int reg, u32 value) |
221 | { | 254 | { |
222 | unsigned int cpu = 0; | 255 | unsigned int cpu = mpic_processor_id(mpic); |
223 | |||
224 | if (mpic->flags & MPIC_PRIMARY) | ||
225 | cpu = hard_smp_processor_id(); | ||
226 | 256 | ||
227 | _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); | 257 | _mpic_write(mpic->reg_type, &mpic->cpuregs[cpu], reg, value); |
228 | } | 258 | } |
@@ -263,6 +293,8 @@ static inline void _mpic_irq_write(struct mpic *mpic, unsigned int src_no, | |||
263 | #define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) | 293 | #define mpic_write(b,r,v) _mpic_write(mpic->reg_type,&(b),(r),(v)) |
264 | #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) | 294 | #define mpic_ipi_read(i) _mpic_ipi_read(mpic,(i)) |
265 | #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) | 295 | #define mpic_ipi_write(i,v) _mpic_ipi_write(mpic,(i),(v)) |
296 | #define mpic_tm_read(i) _mpic_tm_read(mpic,(i)) | ||
297 | #define mpic_tm_write(i,v) _mpic_tm_write(mpic,(i),(v)) | ||
266 | #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) | 298 | #define mpic_cpu_read(i) _mpic_cpu_read(mpic,(i)) |
267 | #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) | 299 | #define mpic_cpu_write(i,v) _mpic_cpu_write(mpic,(i),(v)) |
268 | #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) | 300 | #define mpic_irq_read(s,r) _mpic_irq_read(mpic,(s),(r)) |
@@ -356,7 +388,7 @@ static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) | |||
356 | } | 388 | } |
357 | 389 | ||
358 | static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, | 390 | static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, |
359 | unsigned int irqflags) | 391 | bool level) |
360 | { | 392 | { |
361 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; | 393 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; |
362 | unsigned long flags; | 394 | unsigned long flags; |
@@ -365,14 +397,14 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, | |||
365 | if (fixup->base == NULL) | 397 | if (fixup->base == NULL) |
366 | return; | 398 | return; |
367 | 399 | ||
368 | DBG("startup_ht_interrupt(0x%x, 0x%x) index: %d\n", | 400 | DBG("startup_ht_interrupt(0x%x) index: %d\n", |
369 | source, irqflags, fixup->index); | 401 | source, fixup->index); |
370 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); | 402 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); |
371 | /* Enable and configure */ | 403 | /* Enable and configure */ |
372 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); | 404 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); |
373 | tmp = readl(fixup->base + 4); | 405 | tmp = readl(fixup->base + 4); |
374 | tmp &= ~(0x23U); | 406 | tmp &= ~(0x23U); |
375 | if (irqflags & IRQ_LEVEL) | 407 | if (level) |
376 | tmp |= 0x22; | 408 | tmp |= 0x22; |
377 | writel(tmp, fixup->base + 4); | 409 | writel(tmp, fixup->base + 4); |
378 | raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); | 410 | raw_spin_unlock_irqrestore(&mpic->fixup_lock, flags); |
@@ -384,8 +416,7 @@ static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, | |||
384 | #endif | 416 | #endif |
385 | } | 417 | } |
386 | 418 | ||
387 | static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, | 419 | static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source) |
388 | unsigned int irqflags) | ||
389 | { | 420 | { |
390 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; | 421 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; |
391 | unsigned long flags; | 422 | unsigned long flags; |
@@ -394,7 +425,7 @@ static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, | |||
394 | if (fixup->base == NULL) | 425 | if (fixup->base == NULL) |
395 | return; | 426 | return; |
396 | 427 | ||
397 | DBG("shutdown_ht_interrupt(0x%x, 0x%x)\n", source, irqflags); | 428 | DBG("shutdown_ht_interrupt(0x%x)\n", source); |
398 | 429 | ||
399 | /* Disable */ | 430 | /* Disable */ |
400 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); | 431 | raw_spin_lock_irqsave(&mpic->fixup_lock, flags); |
@@ -603,25 +634,30 @@ static int irq_choose_cpu(const struct cpumask *mask) | |||
603 | } | 634 | } |
604 | #endif | 635 | #endif |
605 | 636 | ||
606 | #define mpic_irq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | ||
607 | |||
608 | /* Find an mpic associated with a given linux interrupt */ | 637 | /* Find an mpic associated with a given linux interrupt */ |
609 | static struct mpic *mpic_find(unsigned int irq) | 638 | static struct mpic *mpic_find(unsigned int irq) |
610 | { | 639 | { |
611 | if (irq < NUM_ISA_INTERRUPTS) | 640 | if (irq < NUM_ISA_INTERRUPTS) |
612 | return NULL; | 641 | return NULL; |
613 | 642 | ||
614 | return irq_to_desc(irq)->chip_data; | 643 | return irq_get_chip_data(irq); |
615 | } | 644 | } |
616 | 645 | ||
617 | /* Determine if the linux irq is an IPI */ | 646 | /* Determine if the linux irq is an IPI */ |
618 | static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) | 647 | static unsigned int mpic_is_ipi(struct mpic *mpic, unsigned int irq) |
619 | { | 648 | { |
620 | unsigned int src = mpic_irq_to_hw(irq); | 649 | unsigned int src = virq_to_hw(irq); |
621 | 650 | ||
622 | return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); | 651 | return (src >= mpic->ipi_vecs[0] && src <= mpic->ipi_vecs[3]); |
623 | } | 652 | } |
624 | 653 | ||
654 | /* Determine if the linux irq is a timer */ | ||
655 | static unsigned int mpic_is_tm(struct mpic *mpic, unsigned int irq) | ||
656 | { | ||
657 | unsigned int src = virq_to_hw(irq); | ||
658 | |||
659 | return (src >= mpic->timer_vecs[0] && src <= mpic->timer_vecs[7]); | ||
660 | } | ||
625 | 661 | ||
626 | /* Convert a cpu mask from logical to physical cpu numbers. */ | 662 | /* Convert a cpu mask from logical to physical cpu numbers. */ |
627 | static inline u32 mpic_physmask(u32 cpumask) | 663 | static inline u32 mpic_physmask(u32 cpumask) |
@@ -629,23 +665,29 @@ static inline u32 mpic_physmask(u32 cpumask) | |||
629 | int i; | 665 | int i; |
630 | u32 mask = 0; | 666 | u32 mask = 0; |
631 | 667 | ||
632 | for (i = 0; i < NR_CPUS; ++i, cpumask >>= 1) | 668 | for (i = 0; i < min(32, NR_CPUS); ++i, cpumask >>= 1) |
633 | mask |= (cpumask & 1) << get_hard_smp_processor_id(i); | 669 | mask |= (cpumask & 1) << get_hard_smp_processor_id(i); |
634 | return mask; | 670 | return mask; |
635 | } | 671 | } |
636 | 672 | ||
637 | #ifdef CONFIG_SMP | 673 | #ifdef CONFIG_SMP |
638 | /* Get the mpic structure from the IPI number */ | 674 | /* Get the mpic structure from the IPI number */ |
639 | static inline struct mpic * mpic_from_ipi(unsigned int ipi) | 675 | static inline struct mpic * mpic_from_ipi(struct irq_data *d) |
640 | { | 676 | { |
641 | return irq_to_desc(ipi)->chip_data; | 677 | return irq_data_get_irq_chip_data(d); |
642 | } | 678 | } |
643 | #endif | 679 | #endif |
644 | 680 | ||
645 | /* Get the mpic structure from the irq number */ | 681 | /* Get the mpic structure from the irq number */ |
646 | static inline struct mpic * mpic_from_irq(unsigned int irq) | 682 | static inline struct mpic * mpic_from_irq(unsigned int irq) |
647 | { | 683 | { |
648 | return irq_to_desc(irq)->chip_data; | 684 | return irq_get_chip_data(irq); |
685 | } | ||
686 | |||
687 | /* Get the mpic structure from the irq data */ | ||
688 | static inline struct mpic * mpic_from_irq_data(struct irq_data *d) | ||
689 | { | ||
690 | return irq_data_get_irq_chip_data(d); | ||
649 | } | 691 | } |
650 | 692 | ||
651 | /* Send an EOI */ | 693 | /* Send an EOI */ |
@@ -660,13 +702,13 @@ static inline void mpic_eoi(struct mpic *mpic) | |||
660 | */ | 702 | */ |
661 | 703 | ||
662 | 704 | ||
663 | void mpic_unmask_irq(unsigned int irq) | 705 | void mpic_unmask_irq(struct irq_data *d) |
664 | { | 706 | { |
665 | unsigned int loops = 100000; | 707 | unsigned int loops = 100000; |
666 | struct mpic *mpic = mpic_from_irq(irq); | 708 | struct mpic *mpic = mpic_from_irq_data(d); |
667 | unsigned int src = mpic_irq_to_hw(irq); | 709 | unsigned int src = irqd_to_hwirq(d); |
668 | 710 | ||
669 | DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, irq, src); | 711 | DBG("%p: %s: enable_irq: %d (src %d)\n", mpic, mpic->name, d->irq, src); |
670 | 712 | ||
671 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), | 713 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), |
672 | mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & | 714 | mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & |
@@ -674,19 +716,20 @@ void mpic_unmask_irq(unsigned int irq) | |||
674 | /* make sure mask gets to controller before we return to user */ | 716 | /* make sure mask gets to controller before we return to user */ |
675 | do { | 717 | do { |
676 | if (!loops--) { | 718 | if (!loops--) { |
677 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | 719 | printk(KERN_ERR "%s: timeout on hwirq %u\n", |
720 | __func__, src); | ||
678 | break; | 721 | break; |
679 | } | 722 | } |
680 | } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); | 723 | } while(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK); |
681 | } | 724 | } |
682 | 725 | ||
683 | void mpic_mask_irq(unsigned int irq) | 726 | void mpic_mask_irq(struct irq_data *d) |
684 | { | 727 | { |
685 | unsigned int loops = 100000; | 728 | unsigned int loops = 100000; |
686 | struct mpic *mpic = mpic_from_irq(irq); | 729 | struct mpic *mpic = mpic_from_irq_data(d); |
687 | unsigned int src = mpic_irq_to_hw(irq); | 730 | unsigned int src = irqd_to_hwirq(d); |
688 | 731 | ||
689 | DBG("%s: disable_irq: %d (src %d)\n", mpic->name, irq, src); | 732 | DBG("%s: disable_irq: %d (src %d)\n", mpic->name, d->irq, src); |
690 | 733 | ||
691 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), | 734 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), |
692 | mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | | 735 | mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | |
@@ -695,18 +738,19 @@ void mpic_mask_irq(unsigned int irq) | |||
695 | /* make sure mask gets to controller before we return to user */ | 738 | /* make sure mask gets to controller before we return to user */ |
696 | do { | 739 | do { |
697 | if (!loops--) { | 740 | if (!loops--) { |
698 | printk(KERN_ERR "mpic_enable_irq timeout\n"); | 741 | printk(KERN_ERR "%s: timeout on hwirq %u\n", |
742 | __func__, src); | ||
699 | break; | 743 | break; |
700 | } | 744 | } |
701 | } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); | 745 | } while(!(mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) & MPIC_VECPRI_MASK)); |
702 | } | 746 | } |
703 | 747 | ||
704 | void mpic_end_irq(unsigned int irq) | 748 | void mpic_end_irq(struct irq_data *d) |
705 | { | 749 | { |
706 | struct mpic *mpic = mpic_from_irq(irq); | 750 | struct mpic *mpic = mpic_from_irq_data(d); |
707 | 751 | ||
708 | #ifdef DEBUG_IRQ | 752 | #ifdef DEBUG_IRQ |
709 | DBG("%s: end_irq: %d\n", mpic->name, irq); | 753 | DBG("%s: end_irq: %d\n", mpic->name, d->irq); |
710 | #endif | 754 | #endif |
711 | /* We always EOI on end_irq() even for edge interrupts since that | 755 | /* We always EOI on end_irq() even for edge interrupts since that |
712 | * should only lower the priority, the MPIC should have properly | 756 | * should only lower the priority, the MPIC should have properly |
@@ -718,51 +762,51 @@ void mpic_end_irq(unsigned int irq) | |||
718 | 762 | ||
719 | #ifdef CONFIG_MPIC_U3_HT_IRQS | 763 | #ifdef CONFIG_MPIC_U3_HT_IRQS |
720 | 764 | ||
721 | static void mpic_unmask_ht_irq(unsigned int irq) | 765 | static void mpic_unmask_ht_irq(struct irq_data *d) |
722 | { | 766 | { |
723 | struct mpic *mpic = mpic_from_irq(irq); | 767 | struct mpic *mpic = mpic_from_irq_data(d); |
724 | unsigned int src = mpic_irq_to_hw(irq); | 768 | unsigned int src = irqd_to_hwirq(d); |
725 | 769 | ||
726 | mpic_unmask_irq(irq); | 770 | mpic_unmask_irq(d); |
727 | 771 | ||
728 | if (irq_to_desc(irq)->status & IRQ_LEVEL) | 772 | if (irqd_is_level_type(d)) |
729 | mpic_ht_end_irq(mpic, src); | 773 | mpic_ht_end_irq(mpic, src); |
730 | } | 774 | } |
731 | 775 | ||
732 | static unsigned int mpic_startup_ht_irq(unsigned int irq) | 776 | static unsigned int mpic_startup_ht_irq(struct irq_data *d) |
733 | { | 777 | { |
734 | struct mpic *mpic = mpic_from_irq(irq); | 778 | struct mpic *mpic = mpic_from_irq_data(d); |
735 | unsigned int src = mpic_irq_to_hw(irq); | 779 | unsigned int src = irqd_to_hwirq(d); |
736 | 780 | ||
737 | mpic_unmask_irq(irq); | 781 | mpic_unmask_irq(d); |
738 | mpic_startup_ht_interrupt(mpic, src, irq_to_desc(irq)->status); | 782 | mpic_startup_ht_interrupt(mpic, src, irqd_is_level_type(d)); |
739 | 783 | ||
740 | return 0; | 784 | return 0; |
741 | } | 785 | } |
742 | 786 | ||
743 | static void mpic_shutdown_ht_irq(unsigned int irq) | 787 | static void mpic_shutdown_ht_irq(struct irq_data *d) |
744 | { | 788 | { |
745 | struct mpic *mpic = mpic_from_irq(irq); | 789 | struct mpic *mpic = mpic_from_irq_data(d); |
746 | unsigned int src = mpic_irq_to_hw(irq); | 790 | unsigned int src = irqd_to_hwirq(d); |
747 | 791 | ||
748 | mpic_shutdown_ht_interrupt(mpic, src, irq_to_desc(irq)->status); | 792 | mpic_shutdown_ht_interrupt(mpic, src); |
749 | mpic_mask_irq(irq); | 793 | mpic_mask_irq(d); |
750 | } | 794 | } |
751 | 795 | ||
752 | static void mpic_end_ht_irq(unsigned int irq) | 796 | static void mpic_end_ht_irq(struct irq_data *d) |
753 | { | 797 | { |
754 | struct mpic *mpic = mpic_from_irq(irq); | 798 | struct mpic *mpic = mpic_from_irq_data(d); |
755 | unsigned int src = mpic_irq_to_hw(irq); | 799 | unsigned int src = irqd_to_hwirq(d); |
756 | 800 | ||
757 | #ifdef DEBUG_IRQ | 801 | #ifdef DEBUG_IRQ |
758 | DBG("%s: end_irq: %d\n", mpic->name, irq); | 802 | DBG("%s: end_irq: %d\n", mpic->name, d->irq); |
759 | #endif | 803 | #endif |
760 | /* We always EOI on end_irq() even for edge interrupts since that | 804 | /* We always EOI on end_irq() even for edge interrupts since that |
761 | * should only lower the priority, the MPIC should have properly | 805 | * should only lower the priority, the MPIC should have properly |
762 | * latched another edge interrupt coming in anyway | 806 | * latched another edge interrupt coming in anyway |
763 | */ | 807 | */ |
764 | 808 | ||
765 | if (irq_to_desc(irq)->status & IRQ_LEVEL) | 809 | if (irqd_is_level_type(d)) |
766 | mpic_ht_end_irq(mpic, src); | 810 | mpic_ht_end_irq(mpic, src); |
767 | mpic_eoi(mpic); | 811 | mpic_eoi(mpic); |
768 | } | 812 | } |
@@ -770,23 +814,23 @@ static void mpic_end_ht_irq(unsigned int irq) | |||
770 | 814 | ||
771 | #ifdef CONFIG_SMP | 815 | #ifdef CONFIG_SMP |
772 | 816 | ||
773 | static void mpic_unmask_ipi(unsigned int irq) | 817 | static void mpic_unmask_ipi(struct irq_data *d) |
774 | { | 818 | { |
775 | struct mpic *mpic = mpic_from_ipi(irq); | 819 | struct mpic *mpic = mpic_from_ipi(d); |
776 | unsigned int src = mpic_irq_to_hw(irq) - mpic->ipi_vecs[0]; | 820 | unsigned int src = virq_to_hw(d->irq) - mpic->ipi_vecs[0]; |
777 | 821 | ||
778 | DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, irq, src); | 822 | DBG("%s: enable_ipi: %d (ipi %d)\n", mpic->name, d->irq, src); |
779 | mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); | 823 | mpic_ipi_write(src, mpic_ipi_read(src) & ~MPIC_VECPRI_MASK); |
780 | } | 824 | } |
781 | 825 | ||
782 | static void mpic_mask_ipi(unsigned int irq) | 826 | static void mpic_mask_ipi(struct irq_data *d) |
783 | { | 827 | { |
784 | /* NEVER disable an IPI... that's just plain wrong! */ | 828 | /* NEVER disable an IPI... that's just plain wrong! */ |
785 | } | 829 | } |
786 | 830 | ||
787 | static void mpic_end_ipi(unsigned int irq) | 831 | static void mpic_end_ipi(struct irq_data *d) |
788 | { | 832 | { |
789 | struct mpic *mpic = mpic_from_ipi(irq); | 833 | struct mpic *mpic = mpic_from_ipi(d); |
790 | 834 | ||
791 | /* | 835 | /* |
792 | * IPIs are marked IRQ_PER_CPU. This has the side effect of | 836 | * IPIs are marked IRQ_PER_CPU. This has the side effect of |
@@ -800,26 +844,42 @@ static void mpic_end_ipi(unsigned int irq) | |||
800 | 844 | ||
801 | #endif /* CONFIG_SMP */ | 845 | #endif /* CONFIG_SMP */ |
802 | 846 | ||
803 | int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask) | 847 | static void mpic_unmask_tm(struct irq_data *d) |
804 | { | 848 | { |
805 | struct mpic *mpic = mpic_from_irq(irq); | 849 | struct mpic *mpic = mpic_from_irq_data(d); |
806 | unsigned int src = mpic_irq_to_hw(irq); | 850 | unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; |
851 | |||
852 | DBG("%s: enable_tm: %d (tm %d)\n", mpic->name, irq, src); | ||
853 | mpic_tm_write(src, mpic_tm_read(src) & ~MPIC_VECPRI_MASK); | ||
854 | mpic_tm_read(src); | ||
855 | } | ||
856 | |||
857 | static void mpic_mask_tm(struct irq_data *d) | ||
858 | { | ||
859 | struct mpic *mpic = mpic_from_irq_data(d); | ||
860 | unsigned int src = virq_to_hw(d->irq) - mpic->timer_vecs[0]; | ||
861 | |||
862 | mpic_tm_write(src, mpic_tm_read(src) | MPIC_VECPRI_MASK); | ||
863 | mpic_tm_read(src); | ||
864 | } | ||
865 | |||
866 | int mpic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, | ||
867 | bool force) | ||
868 | { | ||
869 | struct mpic *mpic = mpic_from_irq_data(d); | ||
870 | unsigned int src = irqd_to_hwirq(d); | ||
807 | 871 | ||
808 | if (mpic->flags & MPIC_SINGLE_DEST_CPU) { | 872 | if (mpic->flags & MPIC_SINGLE_DEST_CPU) { |
809 | int cpuid = irq_choose_cpu(cpumask); | 873 | int cpuid = irq_choose_cpu(cpumask); |
810 | 874 | ||
811 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); | 875 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); |
812 | } else { | 876 | } else { |
813 | cpumask_var_t tmp; | 877 | u32 mask = cpumask_bits(cpumask)[0]; |
814 | |||
815 | alloc_cpumask_var(&tmp, GFP_KERNEL); | ||
816 | 878 | ||
817 | cpumask_and(tmp, cpumask, cpu_online_mask); | 879 | mask &= cpumask_bits(cpu_online_mask)[0]; |
818 | 880 | ||
819 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), | 881 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), |
820 | mpic_physmask(cpumask_bits(tmp)[0])); | 882 | mpic_physmask(mask)); |
821 | |||
822 | free_cpumask_var(tmp); | ||
823 | } | 883 | } |
824 | 884 | ||
825 | return 0; | 885 | return 0; |
@@ -846,15 +906,14 @@ static unsigned int mpic_type_to_vecpri(struct mpic *mpic, unsigned int type) | |||
846 | } | 906 | } |
847 | } | 907 | } |
848 | 908 | ||
849 | int mpic_set_irq_type(unsigned int virq, unsigned int flow_type) | 909 | int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
850 | { | 910 | { |
851 | struct mpic *mpic = mpic_from_irq(virq); | 911 | struct mpic *mpic = mpic_from_irq_data(d); |
852 | unsigned int src = mpic_irq_to_hw(virq); | 912 | unsigned int src = irqd_to_hwirq(d); |
853 | struct irq_desc *desc = irq_to_desc(virq); | ||
854 | unsigned int vecpri, vold, vnew; | 913 | unsigned int vecpri, vold, vnew; |
855 | 914 | ||
856 | DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", | 915 | DBG("mpic: set_irq_type(mpic:@%p,virq:%d,src:0x%x,type:0x%x)\n", |
857 | mpic, virq, src, flow_type); | 916 | mpic, d->irq, src, flow_type); |
858 | 917 | ||
859 | if (src >= mpic->irq_count) | 918 | if (src >= mpic->irq_count) |
860 | return -EINVAL; | 919 | return -EINVAL; |
@@ -865,10 +924,7 @@ int mpic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
865 | if (flow_type == IRQ_TYPE_NONE) | 924 | if (flow_type == IRQ_TYPE_NONE) |
866 | flow_type = IRQ_TYPE_LEVEL_LOW; | 925 | flow_type = IRQ_TYPE_LEVEL_LOW; |
867 | 926 | ||
868 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | 927 | irqd_set_trigger_type(d, flow_type); |
869 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
870 | if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
871 | desc->status |= IRQ_LEVEL; | ||
872 | 928 | ||
873 | if (mpic_is_ht_interrupt(mpic, src)) | 929 | if (mpic_is_ht_interrupt(mpic, src)) |
874 | vecpri = MPIC_VECPRI_POLARITY_POSITIVE | | 930 | vecpri = MPIC_VECPRI_POLARITY_POSITIVE | |
@@ -883,13 +939,13 @@ int mpic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
883 | if (vold != vnew) | 939 | if (vold != vnew) |
884 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); | 940 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vnew); |
885 | 941 | ||
886 | return 0; | 942 | return IRQ_SET_MASK_OK_NOCOPY;; |
887 | } | 943 | } |
888 | 944 | ||
889 | void mpic_set_vector(unsigned int virq, unsigned int vector) | 945 | void mpic_set_vector(unsigned int virq, unsigned int vector) |
890 | { | 946 | { |
891 | struct mpic *mpic = mpic_from_irq(virq); | 947 | struct mpic *mpic = mpic_from_irq(virq); |
892 | unsigned int src = mpic_irq_to_hw(virq); | 948 | unsigned int src = virq_to_hw(virq); |
893 | unsigned int vecpri; | 949 | unsigned int vecpri; |
894 | 950 | ||
895 | DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", | 951 | DBG("mpic: set_vector(mpic:@%p,virq:%d,src:%d,vector:0x%x)\n", |
@@ -904,29 +960,49 @@ void mpic_set_vector(unsigned int virq, unsigned int vector) | |||
904 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); | 960 | mpic_irq_write(src, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); |
905 | } | 961 | } |
906 | 962 | ||
963 | void mpic_set_destination(unsigned int virq, unsigned int cpuid) | ||
964 | { | ||
965 | struct mpic *mpic = mpic_from_irq(virq); | ||
966 | unsigned int src = virq_to_hw(virq); | ||
967 | |||
968 | DBG("mpic: set_destination(mpic:@%p,virq:%d,src:%d,cpuid:0x%x)\n", | ||
969 | mpic, virq, src, cpuid); | ||
970 | |||
971 | if (src >= mpic->irq_count) | ||
972 | return; | ||
973 | |||
974 | mpic_irq_write(src, MPIC_INFO(IRQ_DESTINATION), 1 << cpuid); | ||
975 | } | ||
976 | |||
907 | static struct irq_chip mpic_irq_chip = { | 977 | static struct irq_chip mpic_irq_chip = { |
908 | .mask = mpic_mask_irq, | 978 | .irq_mask = mpic_mask_irq, |
909 | .unmask = mpic_unmask_irq, | 979 | .irq_unmask = mpic_unmask_irq, |
910 | .eoi = mpic_end_irq, | 980 | .irq_eoi = mpic_end_irq, |
911 | .set_type = mpic_set_irq_type, | 981 | .irq_set_type = mpic_set_irq_type, |
912 | }; | 982 | }; |
913 | 983 | ||
914 | #ifdef CONFIG_SMP | 984 | #ifdef CONFIG_SMP |
915 | static struct irq_chip mpic_ipi_chip = { | 985 | static struct irq_chip mpic_ipi_chip = { |
916 | .mask = mpic_mask_ipi, | 986 | .irq_mask = mpic_mask_ipi, |
917 | .unmask = mpic_unmask_ipi, | 987 | .irq_unmask = mpic_unmask_ipi, |
918 | .eoi = mpic_end_ipi, | 988 | .irq_eoi = mpic_end_ipi, |
919 | }; | 989 | }; |
920 | #endif /* CONFIG_SMP */ | 990 | #endif /* CONFIG_SMP */ |
921 | 991 | ||
992 | static struct irq_chip mpic_tm_chip = { | ||
993 | .irq_mask = mpic_mask_tm, | ||
994 | .irq_unmask = mpic_unmask_tm, | ||
995 | .irq_eoi = mpic_end_irq, | ||
996 | }; | ||
997 | |||
922 | #ifdef CONFIG_MPIC_U3_HT_IRQS | 998 | #ifdef CONFIG_MPIC_U3_HT_IRQS |
923 | static struct irq_chip mpic_irq_ht_chip = { | 999 | static struct irq_chip mpic_irq_ht_chip = { |
924 | .startup = mpic_startup_ht_irq, | 1000 | .irq_startup = mpic_startup_ht_irq, |
925 | .shutdown = mpic_shutdown_ht_irq, | 1001 | .irq_shutdown = mpic_shutdown_ht_irq, |
926 | .mask = mpic_mask_irq, | 1002 | .irq_mask = mpic_mask_irq, |
927 | .unmask = mpic_unmask_ht_irq, | 1003 | .irq_unmask = mpic_unmask_ht_irq, |
928 | .eoi = mpic_end_ht_irq, | 1004 | .irq_eoi = mpic_end_ht_irq, |
929 | .set_type = mpic_set_irq_type, | 1005 | .irq_set_type = mpic_set_irq_type, |
930 | }; | 1006 | }; |
931 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ | 1007 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ |
932 | 1008 | ||
@@ -955,13 +1031,23 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, | |||
955 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); | 1031 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); |
956 | 1032 | ||
957 | DBG("mpic: mapping as IPI\n"); | 1033 | DBG("mpic: mapping as IPI\n"); |
958 | set_irq_chip_data(virq, mpic); | 1034 | irq_set_chip_data(virq, mpic); |
959 | set_irq_chip_and_handler(virq, &mpic->hc_ipi, | 1035 | irq_set_chip_and_handler(virq, &mpic->hc_ipi, |
960 | handle_percpu_irq); | 1036 | handle_percpu_irq); |
961 | return 0; | 1037 | return 0; |
962 | } | 1038 | } |
963 | #endif /* CONFIG_SMP */ | 1039 | #endif /* CONFIG_SMP */ |
964 | 1040 | ||
1041 | if (hw >= mpic->timer_vecs[0] && hw <= mpic->timer_vecs[7]) { | ||
1042 | WARN_ON(!(mpic->flags & MPIC_PRIMARY)); | ||
1043 | |||
1044 | DBG("mpic: mapping as timer\n"); | ||
1045 | irq_set_chip_data(virq, mpic); | ||
1046 | irq_set_chip_and_handler(virq, &mpic->hc_tm, | ||
1047 | handle_fasteoi_irq); | ||
1048 | return 0; | ||
1049 | } | ||
1050 | |||
965 | if (hw >= mpic->irq_count) | 1051 | if (hw >= mpic->irq_count) |
966 | return -EINVAL; | 1052 | return -EINVAL; |
967 | 1053 | ||
@@ -978,11 +1064,21 @@ static int mpic_host_map(struct irq_host *h, unsigned int virq, | |||
978 | 1064 | ||
979 | DBG("mpic: mapping to irq chip @%p\n", chip); | 1065 | DBG("mpic: mapping to irq chip @%p\n", chip); |
980 | 1066 | ||
981 | set_irq_chip_data(virq, mpic); | 1067 | irq_set_chip_data(virq, mpic); |
982 | set_irq_chip_and_handler(virq, chip, handle_fasteoi_irq); | 1068 | irq_set_chip_and_handler(virq, chip, handle_fasteoi_irq); |
983 | 1069 | ||
984 | /* Set default irq type */ | 1070 | /* Set default irq type */ |
985 | set_irq_type(virq, IRQ_TYPE_NONE); | 1071 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
1072 | |||
1073 | /* If the MPIC was reset, then all vectors have already been | ||
1074 | * initialized. Otherwise, a per source lazy initialization | ||
1075 | * is done here. | ||
1076 | */ | ||
1077 | if (!mpic_is_ipi(mpic, hw) && (mpic->flags & MPIC_NO_RESET)) { | ||
1078 | mpic_set_vector(virq, hw); | ||
1079 | mpic_set_destination(virq, mpic_processor_id(mpic)); | ||
1080 | mpic_irq_set_priority(virq, 8); | ||
1081 | } | ||
986 | 1082 | ||
987 | return 0; | 1083 | return 0; |
988 | } | 1084 | } |
@@ -992,6 +1088,7 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, | |||
992 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | 1088 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) |
993 | 1089 | ||
994 | { | 1090 | { |
1091 | struct mpic *mpic = h->host_data; | ||
995 | static unsigned char map_mpic_senses[4] = { | 1092 | static unsigned char map_mpic_senses[4] = { |
996 | IRQ_TYPE_EDGE_RISING, | 1093 | IRQ_TYPE_EDGE_RISING, |
997 | IRQ_TYPE_LEVEL_LOW, | 1094 | IRQ_TYPE_LEVEL_LOW, |
@@ -1000,7 +1097,38 @@ static int mpic_host_xlate(struct irq_host *h, struct device_node *ct, | |||
1000 | }; | 1097 | }; |
1001 | 1098 | ||
1002 | *out_hwirq = intspec[0]; | 1099 | *out_hwirq = intspec[0]; |
1003 | if (intsize > 1) { | 1100 | if (intsize >= 4 && (mpic->flags & MPIC_FSL)) { |
1101 | /* | ||
1102 | * Freescale MPIC with extended intspec: | ||
1103 | * First two cells are as usual. Third specifies | ||
1104 | * an "interrupt type". Fourth is type-specific data. | ||
1105 | * | ||
1106 | * See Documentation/devicetree/bindings/powerpc/fsl/mpic.txt | ||
1107 | */ | ||
1108 | switch (intspec[2]) { | ||
1109 | case 0: | ||
1110 | case 1: /* no EISR/EIMR support for now, treat as shared IRQ */ | ||
1111 | break; | ||
1112 | case 2: | ||
1113 | if (intspec[0] >= ARRAY_SIZE(mpic->ipi_vecs)) | ||
1114 | return -EINVAL; | ||
1115 | |||
1116 | *out_hwirq = mpic->ipi_vecs[intspec[0]]; | ||
1117 | break; | ||
1118 | case 3: | ||
1119 | if (intspec[0] >= ARRAY_SIZE(mpic->timer_vecs)) | ||
1120 | return -EINVAL; | ||
1121 | |||
1122 | *out_hwirq = mpic->timer_vecs[intspec[0]]; | ||
1123 | break; | ||
1124 | default: | ||
1125 | pr_debug("%s: unknown irq type %u\n", | ||
1126 | __func__, intspec[2]); | ||
1127 | return -EINVAL; | ||
1128 | } | ||
1129 | |||
1130 | *out_flags = map_mpic_senses[intspec[1] & 3]; | ||
1131 | } else if (intsize > 1) { | ||
1004 | u32 mask = 0x3; | 1132 | u32 mask = 0x3; |
1005 | 1133 | ||
1006 | /* Apple invented a new race of encoding on machines with | 1134 | /* Apple invented a new race of encoding on machines with |
@@ -1031,6 +1159,11 @@ static struct irq_host_ops mpic_host_ops = { | |||
1031 | .xlate = mpic_host_xlate, | 1159 | .xlate = mpic_host_xlate, |
1032 | }; | 1160 | }; |
1033 | 1161 | ||
1162 | static int mpic_reset_prohibited(struct device_node *node) | ||
1163 | { | ||
1164 | return node && of_get_property(node, "pic-no-reset", NULL); | ||
1165 | } | ||
1166 | |||
1034 | /* | 1167 | /* |
1035 | * Exported functions | 1168 | * Exported functions |
1036 | */ | 1169 | */ |
@@ -1058,12 +1191,12 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1058 | mpic->hc_irq = mpic_irq_chip; | 1191 | mpic->hc_irq = mpic_irq_chip; |
1059 | mpic->hc_irq.name = name; | 1192 | mpic->hc_irq.name = name; |
1060 | if (flags & MPIC_PRIMARY) | 1193 | if (flags & MPIC_PRIMARY) |
1061 | mpic->hc_irq.set_affinity = mpic_set_affinity; | 1194 | mpic->hc_irq.irq_set_affinity = mpic_set_affinity; |
1062 | #ifdef CONFIG_MPIC_U3_HT_IRQS | 1195 | #ifdef CONFIG_MPIC_U3_HT_IRQS |
1063 | mpic->hc_ht_irq = mpic_irq_ht_chip; | 1196 | mpic->hc_ht_irq = mpic_irq_ht_chip; |
1064 | mpic->hc_ht_irq.name = name; | 1197 | mpic->hc_ht_irq.name = name; |
1065 | if (flags & MPIC_PRIMARY) | 1198 | if (flags & MPIC_PRIMARY) |
1066 | mpic->hc_ht_irq.set_affinity = mpic_set_affinity; | 1199 | mpic->hc_ht_irq.irq_set_affinity = mpic_set_affinity; |
1067 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ | 1200 | #endif /* CONFIG_MPIC_U3_HT_IRQS */ |
1068 | 1201 | ||
1069 | #ifdef CONFIG_SMP | 1202 | #ifdef CONFIG_SMP |
@@ -1071,6 +1204,9 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1071 | mpic->hc_ipi.name = name; | 1204 | mpic->hc_ipi.name = name; |
1072 | #endif /* CONFIG_SMP */ | 1205 | #endif /* CONFIG_SMP */ |
1073 | 1206 | ||
1207 | mpic->hc_tm = mpic_tm_chip; | ||
1208 | mpic->hc_tm.name = name; | ||
1209 | |||
1074 | mpic->flags = flags; | 1210 | mpic->flags = flags; |
1075 | mpic->isu_size = isu_size; | 1211 | mpic->isu_size = isu_size; |
1076 | mpic->irq_count = irq_count; | 1212 | mpic->irq_count = irq_count; |
@@ -1081,10 +1217,14 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1081 | else | 1217 | else |
1082 | intvec_top = 255; | 1218 | intvec_top = 255; |
1083 | 1219 | ||
1084 | mpic->timer_vecs[0] = intvec_top - 8; | 1220 | mpic->timer_vecs[0] = intvec_top - 12; |
1085 | mpic->timer_vecs[1] = intvec_top - 7; | 1221 | mpic->timer_vecs[1] = intvec_top - 11; |
1086 | mpic->timer_vecs[2] = intvec_top - 6; | 1222 | mpic->timer_vecs[2] = intvec_top - 10; |
1087 | mpic->timer_vecs[3] = intvec_top - 5; | 1223 | mpic->timer_vecs[3] = intvec_top - 9; |
1224 | mpic->timer_vecs[4] = intvec_top - 8; | ||
1225 | mpic->timer_vecs[5] = intvec_top - 7; | ||
1226 | mpic->timer_vecs[6] = intvec_top - 6; | ||
1227 | mpic->timer_vecs[7] = intvec_top - 5; | ||
1088 | mpic->ipi_vecs[0] = intvec_top - 4; | 1228 | mpic->ipi_vecs[0] = intvec_top - 4; |
1089 | mpic->ipi_vecs[1] = intvec_top - 3; | 1229 | mpic->ipi_vecs[1] = intvec_top - 3; |
1090 | mpic->ipi_vecs[2] = intvec_top - 2; | 1230 | mpic->ipi_vecs[2] = intvec_top - 2; |
@@ -1094,6 +1234,8 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1094 | /* Check for "big-endian" in device-tree */ | 1234 | /* Check for "big-endian" in device-tree */ |
1095 | if (node && of_get_property(node, "big-endian", NULL) != NULL) | 1235 | if (node && of_get_property(node, "big-endian", NULL) != NULL) |
1096 | mpic->flags |= MPIC_BIG_ENDIAN; | 1236 | mpic->flags |= MPIC_BIG_ENDIAN; |
1237 | if (node && of_device_is_compatible(node, "fsl,mpic")) | ||
1238 | mpic->flags |= MPIC_FSL; | ||
1097 | 1239 | ||
1098 | /* Look for protected sources */ | 1240 | /* Look for protected sources */ |
1099 | if (node) { | 1241 | if (node) { |
@@ -1151,7 +1293,15 @@ struct mpic * __init mpic_alloc(struct device_node *node, | |||
1151 | mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); | 1293 | mpic_map(mpic, node, paddr, &mpic->tmregs, MPIC_INFO(TIMER_BASE), 0x1000); |
1152 | 1294 | ||
1153 | /* Reset */ | 1295 | /* Reset */ |
1154 | if (flags & MPIC_WANTS_RESET) { | 1296 | |
1297 | /* When using a device-node, reset requests are only honored if the MPIC | ||
1298 | * is allowed to reset. | ||
1299 | */ | ||
1300 | if (mpic_reset_prohibited(node)) | ||
1301 | mpic->flags |= MPIC_NO_RESET; | ||
1302 | |||
1303 | if ((flags & MPIC_WANTS_RESET) && !(mpic->flags & MPIC_NO_RESET)) { | ||
1304 | printk(KERN_DEBUG "mpic: Resetting\n"); | ||
1155 | mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), | 1305 | mpic_write(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0), |
1156 | mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) | 1306 | mpic_read(mpic->gregs, MPIC_INFO(GREG_GLOBAL_CONF_0)) |
1157 | | MPIC_GREG_GCONF_RESET); | 1307 | | MPIC_GREG_GCONF_RESET); |
@@ -1277,15 +1427,17 @@ void __init mpic_init(struct mpic *mpic) | |||
1277 | /* Set current processor priority to max */ | 1427 | /* Set current processor priority to max */ |
1278 | mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); | 1428 | mpic_cpu_write(MPIC_INFO(CPU_CURRENT_TASK_PRI), 0xf); |
1279 | 1429 | ||
1280 | /* Initialize timers: just disable them all */ | 1430 | /* Initialize timers to our reserved vectors and mask them for now */ |
1281 | for (i = 0; i < 4; i++) { | 1431 | for (i = 0; i < 4; i++) { |
1282 | mpic_write(mpic->tmregs, | 1432 | mpic_write(mpic->tmregs, |
1283 | i * MPIC_INFO(TIMER_STRIDE) + | 1433 | i * MPIC_INFO(TIMER_STRIDE) + |
1284 | MPIC_INFO(TIMER_DESTINATION), 0); | 1434 | MPIC_INFO(TIMER_DESTINATION), |
1435 | 1 << hard_smp_processor_id()); | ||
1285 | mpic_write(mpic->tmregs, | 1436 | mpic_write(mpic->tmregs, |
1286 | i * MPIC_INFO(TIMER_STRIDE) + | 1437 | i * MPIC_INFO(TIMER_STRIDE) + |
1287 | MPIC_INFO(TIMER_VECTOR_PRI), | 1438 | MPIC_INFO(TIMER_VECTOR_PRI), |
1288 | MPIC_VECPRI_MASK | | 1439 | MPIC_VECPRI_MASK | |
1440 | (9 << MPIC_VECPRI_PRIORITY_SHIFT) | | ||
1289 | (mpic->timer_vecs[0] + i)); | 1441 | (mpic->timer_vecs[0] + i)); |
1290 | } | 1442 | } |
1291 | 1443 | ||
@@ -1311,22 +1463,21 @@ void __init mpic_init(struct mpic *mpic) | |||
1311 | 1463 | ||
1312 | mpic_pasemi_msi_init(mpic); | 1464 | mpic_pasemi_msi_init(mpic); |
1313 | 1465 | ||
1314 | if (mpic->flags & MPIC_PRIMARY) | 1466 | cpu = mpic_processor_id(mpic); |
1315 | cpu = hard_smp_processor_id(); | ||
1316 | else | ||
1317 | cpu = 0; | ||
1318 | 1467 | ||
1319 | for (i = 0; i < mpic->num_sources; i++) { | 1468 | if (!(mpic->flags & MPIC_NO_RESET)) { |
1320 | /* start with vector = source number, and masked */ | 1469 | for (i = 0; i < mpic->num_sources; i++) { |
1321 | u32 vecpri = MPIC_VECPRI_MASK | i | | 1470 | /* start with vector = source number, and masked */ |
1322 | (8 << MPIC_VECPRI_PRIORITY_SHIFT); | 1471 | u32 vecpri = MPIC_VECPRI_MASK | i | |
1472 | (8 << MPIC_VECPRI_PRIORITY_SHIFT); | ||
1323 | 1473 | ||
1324 | /* check if protected */ | 1474 | /* check if protected */ |
1325 | if (mpic->protected && test_bit(i, mpic->protected)) | 1475 | if (mpic->protected && test_bit(i, mpic->protected)) |
1326 | continue; | 1476 | continue; |
1327 | /* init hw */ | 1477 | /* init hw */ |
1328 | mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); | 1478 | mpic_irq_write(i, MPIC_INFO(IRQ_VECTOR_PRI), vecpri); |
1329 | mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); | 1479 | mpic_irq_write(i, MPIC_INFO(IRQ_DESTINATION), 1 << cpu); |
1480 | } | ||
1330 | } | 1481 | } |
1331 | 1482 | ||
1332 | /* Init spurious vector */ | 1483 | /* Init spurious vector */ |
@@ -1382,7 +1533,7 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable) | |||
1382 | void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | 1533 | void mpic_irq_set_priority(unsigned int irq, unsigned int pri) |
1383 | { | 1534 | { |
1384 | struct mpic *mpic = mpic_find(irq); | 1535 | struct mpic *mpic = mpic_find(irq); |
1385 | unsigned int src = mpic_irq_to_hw(irq); | 1536 | unsigned int src = virq_to_hw(irq); |
1386 | unsigned long flags; | 1537 | unsigned long flags; |
1387 | u32 reg; | 1538 | u32 reg; |
1388 | 1539 | ||
@@ -1395,6 +1546,11 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) | |||
1395 | ~MPIC_VECPRI_PRIORITY_MASK; | 1546 | ~MPIC_VECPRI_PRIORITY_MASK; |
1396 | mpic_ipi_write(src - mpic->ipi_vecs[0], | 1547 | mpic_ipi_write(src - mpic->ipi_vecs[0], |
1397 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | 1548 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); |
1549 | } else if (mpic_is_tm(mpic, irq)) { | ||
1550 | reg = mpic_tm_read(src - mpic->timer_vecs[0]) & | ||
1551 | ~MPIC_VECPRI_PRIORITY_MASK; | ||
1552 | mpic_tm_write(src - mpic->timer_vecs[0], | ||
1553 | reg | (pri << MPIC_VECPRI_PRIORITY_SHIFT)); | ||
1398 | } else { | 1554 | } else { |
1399 | reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) | 1555 | reg = mpic_irq_read(src, MPIC_INFO(IRQ_VECTOR_PRI)) |
1400 | & ~MPIC_VECPRI_PRIORITY_MASK; | 1556 | & ~MPIC_VECPRI_PRIORITY_MASK; |
@@ -1493,9 +1649,8 @@ static unsigned int _mpic_get_one_irq(struct mpic *mpic, int reg) | |||
1493 | return NO_IRQ; | 1649 | return NO_IRQ; |
1494 | } | 1650 | } |
1495 | if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { | 1651 | if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { |
1496 | if (printk_ratelimit()) | 1652 | printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", |
1497 | printk(KERN_WARNING "%s: Got protected source %d !\n", | 1653 | mpic->name, (int)src); |
1498 | mpic->name, (int)src); | ||
1499 | mpic_eoi(mpic); | 1654 | mpic_eoi(mpic); |
1500 | return NO_IRQ; | 1655 | return NO_IRQ; |
1501 | } | 1656 | } |
@@ -1533,9 +1688,8 @@ unsigned int mpic_get_coreint_irq(void) | |||
1533 | return NO_IRQ; | 1688 | return NO_IRQ; |
1534 | } | 1689 | } |
1535 | if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { | 1690 | if (unlikely(mpic->protected && test_bit(src, mpic->protected))) { |
1536 | if (printk_ratelimit()) | 1691 | printk_ratelimited(KERN_WARNING "%s: Got protected source %d !\n", |
1537 | printk(KERN_WARNING "%s: Got protected source %d !\n", | 1692 | mpic->name, (int)src); |
1538 | mpic->name, (int)src); | ||
1539 | return NO_IRQ; | 1693 | return NO_IRQ; |
1540 | } | 1694 | } |
1541 | 1695 | ||
@@ -1574,46 +1728,28 @@ void mpic_request_ipis(void) | |||
1574 | } | 1728 | } |
1575 | } | 1729 | } |
1576 | 1730 | ||
1577 | static void mpic_send_ipi(unsigned int ipi_no, const struct cpumask *cpu_mask) | 1731 | void smp_mpic_message_pass(int cpu, int msg) |
1578 | { | 1732 | { |
1579 | struct mpic *mpic = mpic_primary; | 1733 | struct mpic *mpic = mpic_primary; |
1734 | u32 physmask; | ||
1580 | 1735 | ||
1581 | BUG_ON(mpic == NULL); | 1736 | BUG_ON(mpic == NULL); |
1582 | 1737 | ||
1583 | #ifdef DEBUG_IPI | ||
1584 | DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); | ||
1585 | #endif | ||
1586 | |||
1587 | mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + | ||
1588 | ipi_no * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), | ||
1589 | mpic_physmask(cpumask_bits(cpu_mask)[0])); | ||
1590 | } | ||
1591 | |||
1592 | void smp_mpic_message_pass(int target, int msg) | ||
1593 | { | ||
1594 | cpumask_var_t tmp; | ||
1595 | |||
1596 | /* make sure we're sending something that translates to an IPI */ | 1738 | /* make sure we're sending something that translates to an IPI */ |
1597 | if ((unsigned int)msg > 3) { | 1739 | if ((unsigned int)msg > 3) { |
1598 | printk("SMP %d: smp_message_pass: unknown msg %d\n", | 1740 | printk("SMP %d: smp_message_pass: unknown msg %d\n", |
1599 | smp_processor_id(), msg); | 1741 | smp_processor_id(), msg); |
1600 | return; | 1742 | return; |
1601 | } | 1743 | } |
1602 | switch (target) { | 1744 | |
1603 | case MSG_ALL: | 1745 | #ifdef DEBUG_IPI |
1604 | mpic_send_ipi(msg, cpu_online_mask); | 1746 | DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, msg); |
1605 | break; | 1747 | #endif |
1606 | case MSG_ALL_BUT_SELF: | 1748 | |
1607 | alloc_cpumask_var(&tmp, GFP_NOWAIT); | 1749 | physmask = 1 << get_hard_smp_processor_id(cpu); |
1608 | cpumask_andnot(tmp, cpu_online_mask, | 1750 | |
1609 | cpumask_of(smp_processor_id())); | 1751 | mpic_cpu_write(MPIC_INFO(CPU_IPI_DISPATCH_0) + |
1610 | mpic_send_ipi(msg, tmp); | 1752 | msg * MPIC_INFO(CPU_IPI_DISPATCH_STRIDE), physmask); |
1611 | free_cpumask_var(tmp); | ||
1612 | break; | ||
1613 | default: | ||
1614 | mpic_send_ipi(msg, cpumask_of(target)); | ||
1615 | break; | ||
1616 | } | ||
1617 | } | 1753 | } |
1618 | 1754 | ||
1619 | int __init smp_mpic_probe(void) | 1755 | int __init smp_mpic_probe(void) |
@@ -1657,9 +1793,8 @@ void mpic_reset_core(int cpu) | |||
1657 | #endif /* CONFIG_SMP */ | 1793 | #endif /* CONFIG_SMP */ |
1658 | 1794 | ||
1659 | #ifdef CONFIG_PM | 1795 | #ifdef CONFIG_PM |
1660 | static int mpic_suspend(struct sys_device *dev, pm_message_t state) | 1796 | static void mpic_suspend_one(struct mpic *mpic) |
1661 | { | 1797 | { |
1662 | struct mpic *mpic = container_of(dev, struct mpic, sysdev); | ||
1663 | int i; | 1798 | int i; |
1664 | 1799 | ||
1665 | for (i = 0; i < mpic->num_sources; i++) { | 1800 | for (i = 0; i < mpic->num_sources; i++) { |
@@ -1668,13 +1803,22 @@ static int mpic_suspend(struct sys_device *dev, pm_message_t state) | |||
1668 | mpic->save_data[i].dest = | 1803 | mpic->save_data[i].dest = |
1669 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); | 1804 | mpic_irq_read(i, MPIC_INFO(IRQ_DESTINATION)); |
1670 | } | 1805 | } |
1806 | } | ||
1807 | |||
1808 | static int mpic_suspend(void) | ||
1809 | { | ||
1810 | struct mpic *mpic = mpics; | ||
1811 | |||
1812 | while (mpic) { | ||
1813 | mpic_suspend_one(mpic); | ||
1814 | mpic = mpic->next; | ||
1815 | } | ||
1671 | 1816 | ||
1672 | return 0; | 1817 | return 0; |
1673 | } | 1818 | } |
1674 | 1819 | ||
1675 | static int mpic_resume(struct sys_device *dev) | 1820 | static void mpic_resume_one(struct mpic *mpic) |
1676 | { | 1821 | { |
1677 | struct mpic *mpic = container_of(dev, struct mpic, sysdev); | ||
1678 | int i; | 1822 | int i; |
1679 | 1823 | ||
1680 | for (i = 0; i < mpic->num_sources; i++) { | 1824 | for (i = 0; i < mpic->num_sources; i++) { |
@@ -1701,33 +1845,28 @@ static int mpic_resume(struct sys_device *dev) | |||
1701 | } | 1845 | } |
1702 | #endif | 1846 | #endif |
1703 | } /* end for loop */ | 1847 | } /* end for loop */ |
1848 | } | ||
1704 | 1849 | ||
1705 | return 0; | 1850 | static void mpic_resume(void) |
1851 | { | ||
1852 | struct mpic *mpic = mpics; | ||
1853 | |||
1854 | while (mpic) { | ||
1855 | mpic_resume_one(mpic); | ||
1856 | mpic = mpic->next; | ||
1857 | } | ||
1706 | } | 1858 | } |
1707 | #endif | ||
1708 | 1859 | ||
1709 | static struct sysdev_class mpic_sysclass = { | 1860 | static struct syscore_ops mpic_syscore_ops = { |
1710 | #ifdef CONFIG_PM | ||
1711 | .resume = mpic_resume, | 1861 | .resume = mpic_resume, |
1712 | .suspend = mpic_suspend, | 1862 | .suspend = mpic_suspend, |
1713 | #endif | ||
1714 | .name = "mpic", | ||
1715 | }; | 1863 | }; |
1716 | 1864 | ||
1717 | static int mpic_init_sys(void) | 1865 | static int mpic_init_sys(void) |
1718 | { | 1866 | { |
1719 | struct mpic *mpic = mpics; | 1867 | register_syscore_ops(&mpic_syscore_ops); |
1720 | int error, id = 0; | 1868 | return 0; |
1721 | |||
1722 | error = sysdev_class_register(&mpic_sysclass); | ||
1723 | |||
1724 | while (mpic && !error) { | ||
1725 | mpic->sysdev.cls = &mpic_sysclass; | ||
1726 | mpic->sysdev.id = id++; | ||
1727 | error = sysdev_register(&mpic->sysdev); | ||
1728 | mpic = mpic->next; | ||
1729 | } | ||
1730 | return error; | ||
1731 | } | 1869 | } |
1732 | 1870 | ||
1733 | device_initcall(mpic_init_sys); | 1871 | device_initcall(mpic_init_sys); |
1872 | #endif | ||
diff --git a/arch/powerpc/sysdev/mpic.h b/arch/powerpc/sysdev/mpic.h index e4a6df77b8d7..13f3e8913a93 100644 --- a/arch/powerpc/sysdev/mpic.h +++ b/arch/powerpc/sysdev/mpic.h | |||
@@ -34,9 +34,10 @@ static inline int mpic_pasemi_msi_init(struct mpic *mpic) | |||
34 | } | 34 | } |
35 | #endif | 35 | #endif |
36 | 36 | ||
37 | extern int mpic_set_irq_type(unsigned int virq, unsigned int flow_type); | 37 | extern int mpic_set_irq_type(struct irq_data *d, unsigned int flow_type); |
38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); | 38 | extern void mpic_set_vector(unsigned int virq, unsigned int vector); |
39 | extern int mpic_set_affinity(unsigned int irq, const struct cpumask *cpumask); | 39 | extern int mpic_set_affinity(struct irq_data *d, |
40 | const struct cpumask *cpumask, bool force); | ||
40 | extern void mpic_reset_core(int cpu); | 41 | extern void mpic_reset_core(int cpu); |
41 | 42 | ||
42 | #endif /* _POWERPC_SYSDEV_MPIC_H */ | 43 | #endif /* _POWERPC_SYSDEV_MPIC_H */ |
diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c index 3b6a9a43718f..38e62382070c 100644 --- a/arch/powerpc/sysdev/mpic_pasemi_msi.c +++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c | |||
@@ -39,28 +39,28 @@ | |||
39 | static struct mpic *msi_mpic; | 39 | static struct mpic *msi_mpic; |
40 | 40 | ||
41 | 41 | ||
42 | static void mpic_pasemi_msi_mask_irq(unsigned int irq) | 42 | static void mpic_pasemi_msi_mask_irq(struct irq_data *data) |
43 | { | 43 | { |
44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", irq); | 44 | pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); |
45 | mask_msi_irq(irq); | 45 | mask_msi_irq(data); |
46 | mpic_mask_irq(irq); | 46 | mpic_mask_irq(data); |
47 | } | 47 | } |
48 | 48 | ||
49 | static void mpic_pasemi_msi_unmask_irq(unsigned int irq) | 49 | static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) |
50 | { | 50 | { |
51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", irq); | 51 | pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); |
52 | mpic_unmask_irq(irq); | 52 | mpic_unmask_irq(data); |
53 | unmask_msi_irq(irq); | 53 | unmask_msi_irq(data); |
54 | } | 54 | } |
55 | 55 | ||
56 | static struct irq_chip mpic_pasemi_msi_chip = { | 56 | static struct irq_chip mpic_pasemi_msi_chip = { |
57 | .shutdown = mpic_pasemi_msi_mask_irq, | 57 | .irq_shutdown = mpic_pasemi_msi_mask_irq, |
58 | .mask = mpic_pasemi_msi_mask_irq, | 58 | .irq_mask = mpic_pasemi_msi_mask_irq, |
59 | .unmask = mpic_pasemi_msi_unmask_irq, | 59 | .irq_unmask = mpic_pasemi_msi_unmask_irq, |
60 | .eoi = mpic_end_irq, | 60 | .irq_eoi = mpic_end_irq, |
61 | .set_type = mpic_set_irq_type, | 61 | .irq_set_type = mpic_set_irq_type, |
62 | .set_affinity = mpic_set_affinity, | 62 | .irq_set_affinity = mpic_set_affinity, |
63 | .name = "PASEMI-MSI", | 63 | .name = "PASEMI-MSI", |
64 | }; | 64 | }; |
65 | 65 | ||
66 | static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) | 66 | static int pasemi_msi_check_device(struct pci_dev *pdev, int nvec, int type) |
@@ -81,7 +81,7 @@ static void pasemi_msi_teardown_msi_irqs(struct pci_dev *pdev) | |||
81 | if (entry->irq == NO_IRQ) | 81 | if (entry->irq == NO_IRQ) |
82 | continue; | 82 | continue; |
83 | 83 | ||
84 | set_irq_msi(entry->irq, NULL); | 84 | irq_set_msi_desc(entry->irq, NULL); |
85 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, | 85 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, |
86 | virq_to_hw(entry->irq), ALLOC_CHUNK); | 86 | virq_to_hw(entry->irq), ALLOC_CHUNK); |
87 | irq_dispose_mapping(entry->irq); | 87 | irq_dispose_mapping(entry->irq); |
@@ -131,9 +131,9 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
131 | */ | 131 | */ |
132 | mpic_set_vector(virq, 0); | 132 | mpic_set_vector(virq, 0); |
133 | 133 | ||
134 | set_irq_msi(virq, entry); | 134 | irq_set_msi_desc(virq, entry); |
135 | set_irq_chip(virq, &mpic_pasemi_msi_chip); | 135 | irq_set_chip(virq, &mpic_pasemi_msi_chip); |
136 | set_irq_type(virq, IRQ_TYPE_EDGE_RISING); | 136 | irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); |
137 | 137 | ||
138 | pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ | 138 | pr_debug("pasemi_msi: allocated virq 0x%x (hw 0x%x) " \ |
139 | "addr 0x%x\n", virq, hwirq, msg.address_lo); | 139 | "addr 0x%x\n", virq, hwirq, msg.address_lo); |
diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c index bcbfe79c704b..9a7aa0ed9c1c 100644 --- a/arch/powerpc/sysdev/mpic_u3msi.c +++ b/arch/powerpc/sysdev/mpic_u3msi.c | |||
@@ -23,26 +23,26 @@ | |||
23 | /* A bit ugly, can we get this from the pci_dev somehow? */ | 23 | /* A bit ugly, can we get this from the pci_dev somehow? */ |
24 | static struct mpic *msi_mpic; | 24 | static struct mpic *msi_mpic; |
25 | 25 | ||
26 | static void mpic_u3msi_mask_irq(unsigned int irq) | 26 | static void mpic_u3msi_mask_irq(struct irq_data *data) |
27 | { | 27 | { |
28 | mask_msi_irq(irq); | 28 | mask_msi_irq(data); |
29 | mpic_mask_irq(irq); | 29 | mpic_mask_irq(data); |
30 | } | 30 | } |
31 | 31 | ||
32 | static void mpic_u3msi_unmask_irq(unsigned int irq) | 32 | static void mpic_u3msi_unmask_irq(struct irq_data *data) |
33 | { | 33 | { |
34 | mpic_unmask_irq(irq); | 34 | mpic_unmask_irq(data); |
35 | unmask_msi_irq(irq); | 35 | unmask_msi_irq(data); |
36 | } | 36 | } |
37 | 37 | ||
38 | static struct irq_chip mpic_u3msi_chip = { | 38 | static struct irq_chip mpic_u3msi_chip = { |
39 | .shutdown = mpic_u3msi_mask_irq, | 39 | .irq_shutdown = mpic_u3msi_mask_irq, |
40 | .mask = mpic_u3msi_mask_irq, | 40 | .irq_mask = mpic_u3msi_mask_irq, |
41 | .unmask = mpic_u3msi_unmask_irq, | 41 | .irq_unmask = mpic_u3msi_unmask_irq, |
42 | .eoi = mpic_end_irq, | 42 | .irq_eoi = mpic_end_irq, |
43 | .set_type = mpic_set_irq_type, | 43 | .irq_set_type = mpic_set_irq_type, |
44 | .set_affinity = mpic_set_affinity, | 44 | .irq_set_affinity = mpic_set_affinity, |
45 | .name = "MPIC-U3MSI", | 45 | .name = "MPIC-U3MSI", |
46 | }; | 46 | }; |
47 | 47 | ||
48 | static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) | 48 | static u64 read_ht_magic_addr(struct pci_dev *pdev, unsigned int pos) |
@@ -129,7 +129,7 @@ static void u3msi_teardown_msi_irqs(struct pci_dev *pdev) | |||
129 | if (entry->irq == NO_IRQ) | 129 | if (entry->irq == NO_IRQ) |
130 | continue; | 130 | continue; |
131 | 131 | ||
132 | set_irq_msi(entry->irq, NULL); | 132 | irq_set_msi_desc(entry->irq, NULL); |
133 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, | 133 | msi_bitmap_free_hwirqs(&msi_mpic->msi_bitmap, |
134 | virq_to_hw(entry->irq), 1); | 134 | virq_to_hw(entry->irq), 1); |
135 | irq_dispose_mapping(entry->irq); | 135 | irq_dispose_mapping(entry->irq); |
@@ -166,9 +166,9 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) | |||
166 | return -ENOSPC; | 166 | return -ENOSPC; |
167 | } | 167 | } |
168 | 168 | ||
169 | set_irq_msi(virq, entry); | 169 | irq_set_msi_desc(virq, entry); |
170 | set_irq_chip(virq, &mpic_u3msi_chip); | 170 | irq_set_chip(virq, &mpic_u3msi_chip); |
171 | set_irq_type(virq, IRQ_TYPE_EDGE_RISING); | 171 | irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING); |
172 | 172 | ||
173 | pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", | 173 | pr_debug("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", |
174 | virq, hwirq, (unsigned long)addr); | 174 | virq, hwirq, (unsigned long)addr); |
diff --git a/arch/powerpc/sysdev/mv64x60_dev.c b/arch/powerpc/sysdev/mv64x60_dev.c index 1398bc454999..0f6af41ebb44 100644 --- a/arch/powerpc/sysdev/mv64x60_dev.c +++ b/arch/powerpc/sysdev/mv64x60_dev.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mv643xx.h> | 16 | #include <linux/mv643xx.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/of_platform.h> | 18 | #include <linux/of_platform.h> |
19 | #include <linux/of_net.h> | ||
19 | #include <linux/dma-mapping.h> | 20 | #include <linux/dma-mapping.h> |
20 | 21 | ||
21 | #include <asm/prom.h> | 22 | #include <asm/prom.h> |
@@ -345,7 +346,7 @@ static int __init mv64x60_i2c_device_setup(struct device_node *np, int id) | |||
345 | if (prop) | 346 | if (prop) |
346 | pdata.freq_m = *prop; | 347 | pdata.freq_m = *prop; |
347 | 348 | ||
348 | pdata.freq_m = 3; /* default */ | 349 | pdata.freq_n = 3; /* default */ |
349 | prop = of_get_property(np, "freq_n", NULL); | 350 | prop = of_get_property(np, "freq_n", NULL); |
350 | if (prop) | 351 | if (prop) |
351 | pdata.freq_n = *prop; | 352 | pdata.freq_n = *prop; |
diff --git a/arch/powerpc/sysdev/mv64x60_pic.c b/arch/powerpc/sysdev/mv64x60_pic.c index 485b92477d7c..14d130268e7a 100644 --- a/arch/powerpc/sysdev/mv64x60_pic.c +++ b/arch/powerpc/sysdev/mv64x60_pic.c | |||
@@ -76,9 +76,9 @@ static struct irq_host *mv64x60_irq_host; | |||
76 | * mv64x60_chip_low functions | 76 | * mv64x60_chip_low functions |
77 | */ | 77 | */ |
78 | 78 | ||
79 | static void mv64x60_mask_low(unsigned int virq) | 79 | static void mv64x60_mask_low(struct irq_data *d) |
80 | { | 80 | { |
81 | int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; | 81 | int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; |
82 | unsigned long flags; | 82 | unsigned long flags; |
83 | 83 | ||
84 | spin_lock_irqsave(&mv64x60_lock, flags); | 84 | spin_lock_irqsave(&mv64x60_lock, flags); |
@@ -89,9 +89,9 @@ static void mv64x60_mask_low(unsigned int virq) | |||
89 | (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); | 89 | (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_LO); |
90 | } | 90 | } |
91 | 91 | ||
92 | static void mv64x60_unmask_low(unsigned int virq) | 92 | static void mv64x60_unmask_low(struct irq_data *d) |
93 | { | 93 | { |
94 | int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; | 94 | int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; |
95 | unsigned long flags; | 95 | unsigned long flags; |
96 | 96 | ||
97 | spin_lock_irqsave(&mv64x60_lock, flags); | 97 | spin_lock_irqsave(&mv64x60_lock, flags); |
@@ -104,18 +104,18 @@ static void mv64x60_unmask_low(unsigned int virq) | |||
104 | 104 | ||
105 | static struct irq_chip mv64x60_chip_low = { | 105 | static struct irq_chip mv64x60_chip_low = { |
106 | .name = "mv64x60_low", | 106 | .name = "mv64x60_low", |
107 | .mask = mv64x60_mask_low, | 107 | .irq_mask = mv64x60_mask_low, |
108 | .mask_ack = mv64x60_mask_low, | 108 | .irq_mask_ack = mv64x60_mask_low, |
109 | .unmask = mv64x60_unmask_low, | 109 | .irq_unmask = mv64x60_unmask_low, |
110 | }; | 110 | }; |
111 | 111 | ||
112 | /* | 112 | /* |
113 | * mv64x60_chip_high functions | 113 | * mv64x60_chip_high functions |
114 | */ | 114 | */ |
115 | 115 | ||
116 | static void mv64x60_mask_high(unsigned int virq) | 116 | static void mv64x60_mask_high(struct irq_data *d) |
117 | { | 117 | { |
118 | int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; | 118 | int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; |
119 | unsigned long flags; | 119 | unsigned long flags; |
120 | 120 | ||
121 | spin_lock_irqsave(&mv64x60_lock, flags); | 121 | spin_lock_irqsave(&mv64x60_lock, flags); |
@@ -126,9 +126,9 @@ static void mv64x60_mask_high(unsigned int virq) | |||
126 | (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); | 126 | (void)in_le32(mv64x60_irq_reg_base + MV64X60_IC_CPU0_INTR_MASK_HI); |
127 | } | 127 | } |
128 | 128 | ||
129 | static void mv64x60_unmask_high(unsigned int virq) | 129 | static void mv64x60_unmask_high(struct irq_data *d) |
130 | { | 130 | { |
131 | int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; | 131 | int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; |
132 | unsigned long flags; | 132 | unsigned long flags; |
133 | 133 | ||
134 | spin_lock_irqsave(&mv64x60_lock, flags); | 134 | spin_lock_irqsave(&mv64x60_lock, flags); |
@@ -141,18 +141,18 @@ static void mv64x60_unmask_high(unsigned int virq) | |||
141 | 141 | ||
142 | static struct irq_chip mv64x60_chip_high = { | 142 | static struct irq_chip mv64x60_chip_high = { |
143 | .name = "mv64x60_high", | 143 | .name = "mv64x60_high", |
144 | .mask = mv64x60_mask_high, | 144 | .irq_mask = mv64x60_mask_high, |
145 | .mask_ack = mv64x60_mask_high, | 145 | .irq_mask_ack = mv64x60_mask_high, |
146 | .unmask = mv64x60_unmask_high, | 146 | .irq_unmask = mv64x60_unmask_high, |
147 | }; | 147 | }; |
148 | 148 | ||
149 | /* | 149 | /* |
150 | * mv64x60_chip_gpp functions | 150 | * mv64x60_chip_gpp functions |
151 | */ | 151 | */ |
152 | 152 | ||
153 | static void mv64x60_mask_gpp(unsigned int virq) | 153 | static void mv64x60_mask_gpp(struct irq_data *d) |
154 | { | 154 | { |
155 | int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; | 155 | int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; |
156 | unsigned long flags; | 156 | unsigned long flags; |
157 | 157 | ||
158 | spin_lock_irqsave(&mv64x60_lock, flags); | 158 | spin_lock_irqsave(&mv64x60_lock, flags); |
@@ -163,9 +163,9 @@ static void mv64x60_mask_gpp(unsigned int virq) | |||
163 | (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); | 163 | (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_MASK); |
164 | } | 164 | } |
165 | 165 | ||
166 | static void mv64x60_mask_ack_gpp(unsigned int virq) | 166 | static void mv64x60_mask_ack_gpp(struct irq_data *d) |
167 | { | 167 | { |
168 | int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; | 168 | int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; |
169 | unsigned long flags; | 169 | unsigned long flags; |
170 | 170 | ||
171 | spin_lock_irqsave(&mv64x60_lock, flags); | 171 | spin_lock_irqsave(&mv64x60_lock, flags); |
@@ -178,9 +178,9 @@ static void mv64x60_mask_ack_gpp(unsigned int virq) | |||
178 | (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE); | 178 | (void)in_le32(mv64x60_gpp_reg_base + MV64x60_GPP_INTR_CAUSE); |
179 | } | 179 | } |
180 | 180 | ||
181 | static void mv64x60_unmask_gpp(unsigned int virq) | 181 | static void mv64x60_unmask_gpp(struct irq_data *d) |
182 | { | 182 | { |
183 | int level2 = irq_map[virq].hwirq & MV64x60_LEVEL2_MASK; | 183 | int level2 = irqd_to_hwirq(d) & MV64x60_LEVEL2_MASK; |
184 | unsigned long flags; | 184 | unsigned long flags; |
185 | 185 | ||
186 | spin_lock_irqsave(&mv64x60_lock, flags); | 186 | spin_lock_irqsave(&mv64x60_lock, flags); |
@@ -193,9 +193,9 @@ static void mv64x60_unmask_gpp(unsigned int virq) | |||
193 | 193 | ||
194 | static struct irq_chip mv64x60_chip_gpp = { | 194 | static struct irq_chip mv64x60_chip_gpp = { |
195 | .name = "mv64x60_gpp", | 195 | .name = "mv64x60_gpp", |
196 | .mask = mv64x60_mask_gpp, | 196 | .irq_mask = mv64x60_mask_gpp, |
197 | .mask_ack = mv64x60_mask_ack_gpp, | 197 | .irq_mask_ack = mv64x60_mask_ack_gpp, |
198 | .unmask = mv64x60_unmask_gpp, | 198 | .irq_unmask = mv64x60_unmask_gpp, |
199 | }; | 199 | }; |
200 | 200 | ||
201 | /* | 201 | /* |
@@ -213,11 +213,12 @@ static int mv64x60_host_map(struct irq_host *h, unsigned int virq, | |||
213 | { | 213 | { |
214 | int level1; | 214 | int level1; |
215 | 215 | ||
216 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 216 | irq_set_status_flags(virq, IRQ_LEVEL); |
217 | 217 | ||
218 | level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; | 218 | level1 = (hwirq & MV64x60_LEVEL1_MASK) >> MV64x60_LEVEL1_OFFSET; |
219 | BUG_ON(level1 > MV64x60_LEVEL1_GPP); | 219 | BUG_ON(level1 > MV64x60_LEVEL1_GPP); |
220 | set_irq_chip_and_handler(virq, mv64x60_chips[level1], handle_level_irq); | 220 | irq_set_chip_and_handler(virq, mv64x60_chips[level1], |
221 | handle_level_irq); | ||
221 | 222 | ||
222 | return 0; | 223 | return 0; |
223 | } | 224 | } |
diff --git a/arch/powerpc/sysdev/pmi.c b/arch/powerpc/sysdev/pmi.c index 24a0bb955b18..8ce4fc3d9828 100644 --- a/arch/powerpc/sysdev/pmi.c +++ b/arch/powerpc/sysdev/pmi.c | |||
@@ -114,15 +114,14 @@ static void pmi_notify_handlers(struct work_struct *work) | |||
114 | 114 | ||
115 | spin_lock(&data->handler_spinlock); | 115 | spin_lock(&data->handler_spinlock); |
116 | list_for_each_entry(handler, &data->handler, node) { | 116 | list_for_each_entry(handler, &data->handler, node) { |
117 | pr_debug(KERN_INFO "pmi: notifying handler %p\n", handler); | 117 | pr_debug("pmi: notifying handler %p\n", handler); |
118 | if (handler->type == data->msg.type) | 118 | if (handler->type == data->msg.type) |
119 | handler->handle_pmi_message(data->msg); | 119 | handler->handle_pmi_message(data->msg); |
120 | } | 120 | } |
121 | spin_unlock(&data->handler_spinlock); | 121 | spin_unlock(&data->handler_spinlock); |
122 | } | 122 | } |
123 | 123 | ||
124 | static int pmi_of_probe(struct platform_device *dev, | 124 | static int pmi_of_probe(struct platform_device *dev) |
125 | const struct of_device_id *match) | ||
126 | { | 125 | { |
127 | struct device_node *np = dev->dev.of_node; | 126 | struct device_node *np = dev->dev.of_node; |
128 | int rc; | 127 | int rc; |
@@ -205,7 +204,7 @@ static int pmi_of_remove(struct platform_device *dev) | |||
205 | return 0; | 204 | return 0; |
206 | } | 205 | } |
207 | 206 | ||
208 | static struct of_platform_driver pmi_of_platform_driver = { | 207 | static struct platform_driver pmi_of_platform_driver = { |
209 | .probe = pmi_of_probe, | 208 | .probe = pmi_of_probe, |
210 | .remove = pmi_of_remove, | 209 | .remove = pmi_of_remove, |
211 | .driver = { | 210 | .driver = { |
@@ -217,13 +216,13 @@ static struct of_platform_driver pmi_of_platform_driver = { | |||
217 | 216 | ||
218 | static int __init pmi_module_init(void) | 217 | static int __init pmi_module_init(void) |
219 | { | 218 | { |
220 | return of_register_platform_driver(&pmi_of_platform_driver); | 219 | return platform_driver_register(&pmi_of_platform_driver); |
221 | } | 220 | } |
222 | module_init(pmi_module_init); | 221 | module_init(pmi_module_init); |
223 | 222 | ||
224 | static void __exit pmi_module_exit(void) | 223 | static void __exit pmi_module_exit(void) |
225 | { | 224 | { |
226 | of_unregister_platform_driver(&pmi_of_platform_driver); | 225 | platform_driver_unregister(&pmi_of_platform_driver); |
227 | } | 226 | } |
228 | module_exit(pmi_module_exit); | 227 | module_exit(pmi_module_exit); |
229 | 228 | ||
diff --git a/arch/powerpc/sysdev/ppc4xx_cpm.c b/arch/powerpc/sysdev/ppc4xx_cpm.c new file mode 100644 index 000000000000..73b86cc5ea74 --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_cpm.c | |||
@@ -0,0 +1,346 @@ | |||
1 | /* | ||
2 | * PowerPC 4xx Clock and Power Management | ||
3 | * | ||
4 | * Copyright (C) 2010, Applied Micro Circuits Corporation | ||
5 | * Victor Gallardo (vgallardo@apm.com) | ||
6 | * | ||
7 | * Based on arch/powerpc/platforms/44x/idle.c: | ||
8 | * Jerone Young <jyoung5@us.ibm.com> | ||
9 | * Copyright 2008 IBM Corp. | ||
10 | * | ||
11 | * Based on arch/powerpc/sysdev/fsl_pmc.c: | ||
12 | * Anton Vorontsov <avorontsov@ru.mvista.com> | ||
13 | * Copyright 2009 MontaVista Software, Inc. | ||
14 | * | ||
15 | * See file CREDITS for list of people who contributed to this | ||
16 | * project. | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or | ||
19 | * modify it under the terms of the GNU General Public License as | ||
20 | * published by the Free Software Foundation; either version 2 of | ||
21 | * the License, or (at your option) any later version. | ||
22 | * | ||
23 | * This program is distributed in the hope that it will be useful, | ||
24 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
26 | * GNU General Public License for more details. | ||
27 | * | ||
28 | * You should have received a copy of the GNU General Public License | ||
29 | * along with this program; if not, write to the Free Software | ||
30 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
31 | * MA 02111-1307 USA | ||
32 | */ | ||
33 | |||
34 | #include <linux/kernel.h> | ||
35 | #include <linux/of_platform.h> | ||
36 | #include <linux/sysfs.h> | ||
37 | #include <linux/cpu.h> | ||
38 | #include <linux/suspend.h> | ||
39 | #include <asm/dcr.h> | ||
40 | #include <asm/dcr-native.h> | ||
41 | #include <asm/machdep.h> | ||
42 | |||
43 | #define CPM_ER 0 | ||
44 | #define CPM_FR 1 | ||
45 | #define CPM_SR 2 | ||
46 | |||
47 | #define CPM_IDLE_WAIT 0 | ||
48 | #define CPM_IDLE_DOZE 1 | ||
49 | |||
50 | struct cpm { | ||
51 | dcr_host_t dcr_host; | ||
52 | unsigned int dcr_offset[3]; | ||
53 | unsigned int powersave_off; | ||
54 | unsigned int unused; | ||
55 | unsigned int idle_doze; | ||
56 | unsigned int standby; | ||
57 | unsigned int suspend; | ||
58 | }; | ||
59 | |||
60 | static struct cpm cpm; | ||
61 | |||
62 | struct cpm_idle_mode { | ||
63 | unsigned int enabled; | ||
64 | const char *name; | ||
65 | }; | ||
66 | |||
67 | static struct cpm_idle_mode idle_mode[] = { | ||
68 | [CPM_IDLE_WAIT] = { 1, "wait" }, /* default */ | ||
69 | [CPM_IDLE_DOZE] = { 0, "doze" }, | ||
70 | }; | ||
71 | |||
72 | static unsigned int cpm_set(unsigned int cpm_reg, unsigned int mask) | ||
73 | { | ||
74 | unsigned int value; | ||
75 | |||
76 | /* CPM controller supports 3 different types of sleep interface | ||
77 | * known as class 1, 2 and 3. For class 1 units, they are | ||
78 | * unconditionally put to sleep when the corresponding CPM bit is | ||
79 | * set. For class 2 and 3 units this is not case; if they can be | ||
80 | * put to to sleep, they will. Here we do not verify, we just | ||
81 | * set them and expect them to eventually go off when they can. | ||
82 | */ | ||
83 | value = dcr_read(cpm.dcr_host, cpm.dcr_offset[cpm_reg]); | ||
84 | dcr_write(cpm.dcr_host, cpm.dcr_offset[cpm_reg], value | mask); | ||
85 | |||
86 | /* return old state, to restore later if needed */ | ||
87 | return value; | ||
88 | } | ||
89 | |||
90 | static void cpm_idle_wait(void) | ||
91 | { | ||
92 | unsigned long msr_save; | ||
93 | |||
94 | /* save off initial state */ | ||
95 | msr_save = mfmsr(); | ||
96 | /* sync required when CPM0_ER[CPU] is set */ | ||
97 | mb(); | ||
98 | /* set wait state MSR */ | ||
99 | mtmsr(msr_save|MSR_WE|MSR_EE|MSR_CE|MSR_DE); | ||
100 | isync(); | ||
101 | /* return to initial state */ | ||
102 | mtmsr(msr_save); | ||
103 | isync(); | ||
104 | } | ||
105 | |||
106 | static void cpm_idle_sleep(unsigned int mask) | ||
107 | { | ||
108 | unsigned int er_save; | ||
109 | |||
110 | /* update CPM_ER state */ | ||
111 | er_save = cpm_set(CPM_ER, mask); | ||
112 | |||
113 | /* go to wait state so that CPM0_ER[CPU] can take effect */ | ||
114 | cpm_idle_wait(); | ||
115 | |||
116 | /* restore CPM_ER state */ | ||
117 | dcr_write(cpm.dcr_host, cpm.dcr_offset[CPM_ER], er_save); | ||
118 | } | ||
119 | |||
120 | static void cpm_idle_doze(void) | ||
121 | { | ||
122 | cpm_idle_sleep(cpm.idle_doze); | ||
123 | } | ||
124 | |||
125 | static void cpm_idle_config(int mode) | ||
126 | { | ||
127 | int i; | ||
128 | |||
129 | if (idle_mode[mode].enabled) | ||
130 | return; | ||
131 | |||
132 | for (i = 0; i < ARRAY_SIZE(idle_mode); i++) | ||
133 | idle_mode[i].enabled = 0; | ||
134 | |||
135 | idle_mode[mode].enabled = 1; | ||
136 | } | ||
137 | |||
138 | static ssize_t cpm_idle_show(struct kobject *kobj, | ||
139 | struct kobj_attribute *attr, char *buf) | ||
140 | { | ||
141 | char *s = buf; | ||
142 | int i; | ||
143 | |||
144 | for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { | ||
145 | if (idle_mode[i].enabled) | ||
146 | s += sprintf(s, "[%s] ", idle_mode[i].name); | ||
147 | else | ||
148 | s += sprintf(s, "%s ", idle_mode[i].name); | ||
149 | } | ||
150 | |||
151 | *(s-1) = '\n'; /* convert the last space to a newline */ | ||
152 | |||
153 | return s - buf; | ||
154 | } | ||
155 | |||
156 | static ssize_t cpm_idle_store(struct kobject *kobj, | ||
157 | struct kobj_attribute *attr, | ||
158 | const char *buf, size_t n) | ||
159 | { | ||
160 | int i; | ||
161 | char *p; | ||
162 | int len; | ||
163 | |||
164 | p = memchr(buf, '\n', n); | ||
165 | len = p ? p - buf : n; | ||
166 | |||
167 | for (i = 0; i < ARRAY_SIZE(idle_mode); i++) { | ||
168 | if (strncmp(buf, idle_mode[i].name, len) == 0) { | ||
169 | cpm_idle_config(i); | ||
170 | return n; | ||
171 | } | ||
172 | } | ||
173 | |||
174 | return -EINVAL; | ||
175 | } | ||
176 | |||
177 | static struct kobj_attribute cpm_idle_attr = | ||
178 | __ATTR(idle, 0644, cpm_idle_show, cpm_idle_store); | ||
179 | |||
180 | static void cpm_idle_config_sysfs(void) | ||
181 | { | ||
182 | struct sys_device *sys_dev; | ||
183 | unsigned long ret; | ||
184 | |||
185 | sys_dev = get_cpu_sysdev(0); | ||
186 | |||
187 | ret = sysfs_create_file(&sys_dev->kobj, | ||
188 | &cpm_idle_attr.attr); | ||
189 | if (ret) | ||
190 | printk(KERN_WARNING | ||
191 | "cpm: failed to create idle sysfs entry\n"); | ||
192 | } | ||
193 | |||
194 | static void cpm_idle(void) | ||
195 | { | ||
196 | if (idle_mode[CPM_IDLE_DOZE].enabled) | ||
197 | cpm_idle_doze(); | ||
198 | else | ||
199 | cpm_idle_wait(); | ||
200 | } | ||
201 | |||
202 | static int cpm_suspend_valid(suspend_state_t state) | ||
203 | { | ||
204 | switch (state) { | ||
205 | case PM_SUSPEND_STANDBY: | ||
206 | return !!cpm.standby; | ||
207 | case PM_SUSPEND_MEM: | ||
208 | return !!cpm.suspend; | ||
209 | default: | ||
210 | return 0; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static void cpm_suspend_standby(unsigned int mask) | ||
215 | { | ||
216 | unsigned long tcr_save; | ||
217 | |||
218 | /* disable decrement interrupt */ | ||
219 | tcr_save = mfspr(SPRN_TCR); | ||
220 | mtspr(SPRN_TCR, tcr_save & ~TCR_DIE); | ||
221 | |||
222 | /* go to sleep state */ | ||
223 | cpm_idle_sleep(mask); | ||
224 | |||
225 | /* restore decrement interrupt */ | ||
226 | mtspr(SPRN_TCR, tcr_save); | ||
227 | } | ||
228 | |||
229 | static int cpm_suspend_enter(suspend_state_t state) | ||
230 | { | ||
231 | switch (state) { | ||
232 | case PM_SUSPEND_STANDBY: | ||
233 | cpm_suspend_standby(cpm.standby); | ||
234 | break; | ||
235 | case PM_SUSPEND_MEM: | ||
236 | cpm_suspend_standby(cpm.suspend); | ||
237 | break; | ||
238 | } | ||
239 | |||
240 | return 0; | ||
241 | } | ||
242 | |||
243 | static struct platform_suspend_ops cpm_suspend_ops = { | ||
244 | .valid = cpm_suspend_valid, | ||
245 | .enter = cpm_suspend_enter, | ||
246 | }; | ||
247 | |||
248 | static int cpm_get_uint_property(struct device_node *np, | ||
249 | const char *name) | ||
250 | { | ||
251 | int len; | ||
252 | const unsigned int *prop = of_get_property(np, name, &len); | ||
253 | |||
254 | if (prop == NULL || len < sizeof(u32)) | ||
255 | return 0; | ||
256 | |||
257 | return *prop; | ||
258 | } | ||
259 | |||
260 | static int __init cpm_init(void) | ||
261 | { | ||
262 | struct device_node *np; | ||
263 | int dcr_base, dcr_len; | ||
264 | int ret = 0; | ||
265 | |||
266 | if (!cpm.powersave_off) { | ||
267 | cpm_idle_config(CPM_IDLE_WAIT); | ||
268 | ppc_md.power_save = &cpm_idle; | ||
269 | } | ||
270 | |||
271 | np = of_find_compatible_node(NULL, NULL, "ibm,cpm"); | ||
272 | if (!np) { | ||
273 | ret = -EINVAL; | ||
274 | goto out; | ||
275 | } | ||
276 | |||
277 | dcr_base = dcr_resource_start(np, 0); | ||
278 | dcr_len = dcr_resource_len(np, 0); | ||
279 | |||
280 | if (dcr_base == 0 || dcr_len == 0) { | ||
281 | printk(KERN_ERR "cpm: could not parse dcr property for %s\n", | ||
282 | np->full_name); | ||
283 | ret = -EINVAL; | ||
284 | goto out; | ||
285 | } | ||
286 | |||
287 | cpm.dcr_host = dcr_map(np, dcr_base, dcr_len); | ||
288 | |||
289 | if (!DCR_MAP_OK(cpm.dcr_host)) { | ||
290 | printk(KERN_ERR "cpm: failed to map dcr property for %s\n", | ||
291 | np->full_name); | ||
292 | ret = -EINVAL; | ||
293 | goto out; | ||
294 | } | ||
295 | |||
296 | /* All 4xx SoCs with a CPM controller have one of two | ||
297 | * different order for the CPM registers. Some have the | ||
298 | * CPM registers in the following order (ER,FR,SR). The | ||
299 | * others have them in the following order (SR,ER,FR). | ||
300 | */ | ||
301 | |||
302 | if (cpm_get_uint_property(np, "er-offset") == 0) { | ||
303 | cpm.dcr_offset[CPM_ER] = 0; | ||
304 | cpm.dcr_offset[CPM_FR] = 1; | ||
305 | cpm.dcr_offset[CPM_SR] = 2; | ||
306 | } else { | ||
307 | cpm.dcr_offset[CPM_ER] = 1; | ||
308 | cpm.dcr_offset[CPM_FR] = 2; | ||
309 | cpm.dcr_offset[CPM_SR] = 0; | ||
310 | } | ||
311 | |||
312 | /* Now let's see what IPs to turn off for the following modes */ | ||
313 | |||
314 | cpm.unused = cpm_get_uint_property(np, "unused-units"); | ||
315 | cpm.idle_doze = cpm_get_uint_property(np, "idle-doze"); | ||
316 | cpm.standby = cpm_get_uint_property(np, "standby"); | ||
317 | cpm.suspend = cpm_get_uint_property(np, "suspend"); | ||
318 | |||
319 | /* If some IPs are unused let's turn them off now */ | ||
320 | |||
321 | if (cpm.unused) { | ||
322 | cpm_set(CPM_ER, cpm.unused); | ||
323 | cpm_set(CPM_FR, cpm.unused); | ||
324 | } | ||
325 | |||
326 | /* Now let's export interfaces */ | ||
327 | |||
328 | if (!cpm.powersave_off && cpm.idle_doze) | ||
329 | cpm_idle_config_sysfs(); | ||
330 | |||
331 | if (cpm.standby || cpm.suspend) | ||
332 | suspend_set_ops(&cpm_suspend_ops); | ||
333 | out: | ||
334 | if (np) | ||
335 | of_node_put(np); | ||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | late_initcall(cpm_init); | ||
340 | |||
341 | static int __init cpm_powersave_off(char *arg) | ||
342 | { | ||
343 | cpm.powersave_off = 1; | ||
344 | return 0; | ||
345 | } | ||
346 | __setup("powersave=off", cpm_powersave_off); | ||
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c new file mode 100644 index 000000000000..367af0241851 --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_msi.c | |||
@@ -0,0 +1,276 @@ | |||
1 | /* | ||
2 | * Adding PCI-E MSI support for PPC4XX SoCs. | ||
3 | * | ||
4 | * Copyright (c) 2010, Applied Micro Circuits Corporation | ||
5 | * Authors: Tirumala R Marri <tmarri@apm.com> | ||
6 | * Feng Kan <fkan@apm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
21 | * MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #include <linux/irq.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/msi.h> | ||
28 | #include <linux/of_platform.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <asm/prom.h> | ||
31 | #include <asm/hw_irq.h> | ||
32 | #include <asm/ppc-pci.h> | ||
33 | #include <boot/dcr.h> | ||
34 | #include <asm/dcr-regs.h> | ||
35 | #include <asm/msi_bitmap.h> | ||
36 | |||
37 | #define PEIH_TERMADH 0x00 | ||
38 | #define PEIH_TERMADL 0x08 | ||
39 | #define PEIH_MSIED 0x10 | ||
40 | #define PEIH_MSIMK 0x18 | ||
41 | #define PEIH_MSIASS 0x20 | ||
42 | #define PEIH_FLUSH0 0x30 | ||
43 | #define PEIH_FLUSH1 0x38 | ||
44 | #define PEIH_CNTRST 0x48 | ||
45 | #define NR_MSI_IRQS 4 | ||
46 | |||
47 | struct ppc4xx_msi { | ||
48 | u32 msi_addr_lo; | ||
49 | u32 msi_addr_hi; | ||
50 | void __iomem *msi_regs; | ||
51 | int msi_virqs[NR_MSI_IRQS]; | ||
52 | struct msi_bitmap bitmap; | ||
53 | struct device_node *msi_dev; | ||
54 | }; | ||
55 | |||
56 | static struct ppc4xx_msi ppc4xx_msi; | ||
57 | |||
58 | static int ppc4xx_msi_init_allocator(struct platform_device *dev, | ||
59 | struct ppc4xx_msi *msi_data) | ||
60 | { | ||
61 | int err; | ||
62 | |||
63 | err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, | ||
64 | dev->dev.of_node); | ||
65 | if (err) | ||
66 | return err; | ||
67 | |||
68 | err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap); | ||
69 | if (err < 0) { | ||
70 | msi_bitmap_free(&msi_data->bitmap); | ||
71 | return err; | ||
72 | } | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
78 | { | ||
79 | int int_no = -ENOMEM; | ||
80 | unsigned int virq; | ||
81 | struct msi_msg msg; | ||
82 | struct msi_desc *entry; | ||
83 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; | ||
84 | |||
85 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
86 | int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); | ||
87 | if (int_no >= 0) | ||
88 | break; | ||
89 | if (int_no < 0) { | ||
90 | pr_debug("%s: fail allocating msi interrupt\n", | ||
91 | __func__); | ||
92 | } | ||
93 | virq = irq_of_parse_and_map(msi_data->msi_dev, int_no); | ||
94 | if (virq == NO_IRQ) { | ||
95 | dev_err(&dev->dev, "%s: fail mapping irq\n", __func__); | ||
96 | msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1); | ||
97 | return -ENOSPC; | ||
98 | } | ||
99 | dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq); | ||
100 | |||
101 | /* Setup msi address space */ | ||
102 | msg.address_hi = msi_data->msi_addr_hi; | ||
103 | msg.address_lo = msi_data->msi_addr_lo; | ||
104 | |||
105 | irq_set_msi_desc(virq, entry); | ||
106 | msg.data = int_no; | ||
107 | write_msi_msg(virq, &msg); | ||
108 | } | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | void ppc4xx_teardown_msi_irqs(struct pci_dev *dev) | ||
113 | { | ||
114 | struct msi_desc *entry; | ||
115 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; | ||
116 | |||
117 | dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); | ||
118 | |||
119 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
120 | if (entry->irq == NO_IRQ) | ||
121 | continue; | ||
122 | irq_set_msi_desc(entry->irq, NULL); | ||
123 | msi_bitmap_free_hwirqs(&msi_data->bitmap, | ||
124 | virq_to_hw(entry->irq), 1); | ||
125 | irq_dispose_mapping(entry->irq); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type) | ||
130 | { | ||
131 | dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n", | ||
132 | __func__, nvec, type); | ||
133 | if (type == PCI_CAP_ID_MSIX) | ||
134 | pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n"); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, | ||
140 | struct resource res, struct ppc4xx_msi *msi) | ||
141 | { | ||
142 | const u32 *msi_data; | ||
143 | const u32 *msi_mask; | ||
144 | const u32 *sdr_addr; | ||
145 | dma_addr_t msi_phys; | ||
146 | void *msi_virt; | ||
147 | |||
148 | sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL); | ||
149 | if (!sdr_addr) | ||
150 | return -1; | ||
151 | |||
152 | SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */ | ||
153 | SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */ | ||
154 | |||
155 | |||
156 | msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi"); | ||
157 | if (msi->msi_dev) | ||
158 | return -ENODEV; | ||
159 | |||
160 | msi->msi_regs = of_iomap(msi->msi_dev, 0); | ||
161 | if (!msi->msi_regs) { | ||
162 | dev_err(&dev->dev, "of_iomap problem failed\n"); | ||
163 | return -ENOMEM; | ||
164 | } | ||
165 | dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n", | ||
166 | (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs)); | ||
167 | |||
168 | msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL); | ||
169 | msi->msi_addr_hi = 0x0; | ||
170 | msi->msi_addr_lo = (u32) msi_phys; | ||
171 | dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo); | ||
172 | |||
173 | /* Progam the Interrupt handler Termination addr registers */ | ||
174 | out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi); | ||
175 | out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo); | ||
176 | |||
177 | msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL); | ||
178 | if (!msi_data) | ||
179 | return -1; | ||
180 | msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL); | ||
181 | if (!msi_mask) | ||
182 | return -1; | ||
183 | /* Program MSI Expected data and Mask bits */ | ||
184 | out_be32(msi->msi_regs + PEIH_MSIED, *msi_data); | ||
185 | out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static int ppc4xx_of_msi_remove(struct platform_device *dev) | ||
191 | { | ||
192 | struct ppc4xx_msi *msi = dev->dev.platform_data; | ||
193 | int i; | ||
194 | int virq; | ||
195 | |||
196 | for (i = 0; i < NR_MSI_IRQS; i++) { | ||
197 | virq = msi->msi_virqs[i]; | ||
198 | if (virq != NO_IRQ) | ||
199 | irq_dispose_mapping(virq); | ||
200 | } | ||
201 | |||
202 | if (msi->bitmap.bitmap) | ||
203 | msi_bitmap_free(&msi->bitmap); | ||
204 | iounmap(msi->msi_regs); | ||
205 | of_node_put(msi->msi_dev); | ||
206 | kfree(msi); | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int __devinit ppc4xx_msi_probe(struct platform_device *dev) | ||
212 | { | ||
213 | struct ppc4xx_msi *msi; | ||
214 | struct resource res; | ||
215 | int err = 0; | ||
216 | |||
217 | msi = &ppc4xx_msi;/*keep the msi data for further use*/ | ||
218 | |||
219 | dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n"); | ||
220 | |||
221 | msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL); | ||
222 | if (!msi) { | ||
223 | dev_err(&dev->dev, "No memory for MSI structure\n"); | ||
224 | return -ENOMEM; | ||
225 | } | ||
226 | dev->dev.platform_data = msi; | ||
227 | |||
228 | /* Get MSI ranges */ | ||
229 | err = of_address_to_resource(dev->dev.of_node, 0, &res); | ||
230 | if (err) { | ||
231 | dev_err(&dev->dev, "%s resource error!\n", | ||
232 | dev->dev.of_node->full_name); | ||
233 | goto error_out; | ||
234 | } | ||
235 | |||
236 | if (ppc4xx_setup_pcieh_hw(dev, res, msi)) | ||
237 | goto error_out; | ||
238 | |||
239 | err = ppc4xx_msi_init_allocator(dev, msi); | ||
240 | if (err) { | ||
241 | dev_err(&dev->dev, "Error allocating MSI bitmap\n"); | ||
242 | goto error_out; | ||
243 | } | ||
244 | |||
245 | ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs; | ||
246 | ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs; | ||
247 | ppc_md.msi_check_device = ppc4xx_msi_check_device; | ||
248 | return err; | ||
249 | |||
250 | error_out: | ||
251 | ppc4xx_of_msi_remove(dev); | ||
252 | return err; | ||
253 | } | ||
254 | static const struct of_device_id ppc4xx_msi_ids[] = { | ||
255 | { | ||
256 | .compatible = "amcc,ppc4xx-msi", | ||
257 | }, | ||
258 | {} | ||
259 | }; | ||
260 | static struct platform_driver ppc4xx_msi_driver = { | ||
261 | .probe = ppc4xx_msi_probe, | ||
262 | .remove = ppc4xx_of_msi_remove, | ||
263 | .driver = { | ||
264 | .name = "ppc4xx-msi", | ||
265 | .owner = THIS_MODULE, | ||
266 | .of_match_table = ppc4xx_msi_ids, | ||
267 | }, | ||
268 | |||
269 | }; | ||
270 | |||
271 | static __init int ppc4xx_msi_init(void) | ||
272 | { | ||
273 | return platform_driver_register(&ppc4xx_msi_driver); | ||
274 | } | ||
275 | |||
276 | subsys_initcall(ppc4xx_msi_init); | ||
diff --git a/arch/powerpc/sysdev/ppc4xx_pci.h b/arch/powerpc/sysdev/ppc4xx_pci.h index 56d9e5deccbf..c39a134e8684 100644 --- a/arch/powerpc/sysdev/ppc4xx_pci.h +++ b/arch/powerpc/sysdev/ppc4xx_pci.h | |||
@@ -324,7 +324,7 @@ | |||
324 | #define PESDR0_460EX_IHS2 0x036D | 324 | #define PESDR0_460EX_IHS2 0x036D |
325 | 325 | ||
326 | /* | 326 | /* |
327 | * 460SX addtional DCRs | 327 | * 460SX additional DCRs |
328 | */ | 328 | */ |
329 | #define PESDRn_460SX_RCEI 0x02 | 329 | #define PESDRn_460SX_RCEI 0x02 |
330 | 330 | ||
diff --git a/arch/powerpc/sysdev/qe_lib/qe.c b/arch/powerpc/sysdev/qe_lib/qe.c index 90020de4dcf2..904c6cbaf45b 100644 --- a/arch/powerpc/sysdev/qe_lib/qe.c +++ b/arch/powerpc/sysdev/qe_lib/qe.c | |||
@@ -659,8 +659,7 @@ static int qe_resume(struct platform_device *ofdev) | |||
659 | return 0; | 659 | return 0; |
660 | } | 660 | } |
661 | 661 | ||
662 | static int qe_probe(struct platform_device *ofdev, | 662 | static int qe_probe(struct platform_device *ofdev) |
663 | const struct of_device_id *id) | ||
664 | { | 663 | { |
665 | return 0; | 664 | return 0; |
666 | } | 665 | } |
@@ -670,7 +669,7 @@ static const struct of_device_id qe_ids[] = { | |||
670 | { }, | 669 | { }, |
671 | }; | 670 | }; |
672 | 671 | ||
673 | static struct of_platform_driver qe_driver = { | 672 | static struct platform_driver qe_driver = { |
674 | .driver = { | 673 | .driver = { |
675 | .name = "fsl-qe", | 674 | .name = "fsl-qe", |
676 | .owner = THIS_MODULE, | 675 | .owner = THIS_MODULE, |
@@ -682,7 +681,7 @@ static struct of_platform_driver qe_driver = { | |||
682 | 681 | ||
683 | static int __init qe_drv_init(void) | 682 | static int __init qe_drv_init(void) |
684 | { | 683 | { |
685 | return of_register_platform_driver(&qe_driver); | 684 | return platform_driver_register(&qe_driver); |
686 | } | 685 | } |
687 | device_initcall(qe_drv_init); | 686 | device_initcall(qe_drv_init); |
688 | #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ | 687 | #endif /* defined(CONFIG_SUSPEND) && defined(CONFIG_PPC_85xx) */ |
diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c index 541ba9863647..b2acda07220d 100644 --- a/arch/powerpc/sysdev/qe_lib/qe_ic.c +++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c | |||
@@ -189,15 +189,18 @@ static inline void qe_ic_write(volatile __be32 __iomem * base, unsigned int reg | |||
189 | 189 | ||
190 | static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) | 190 | static inline struct qe_ic *qe_ic_from_irq(unsigned int virq) |
191 | { | 191 | { |
192 | return irq_to_desc(virq)->chip_data; | 192 | return irq_get_chip_data(virq); |
193 | } | 193 | } |
194 | 194 | ||
195 | #define virq_to_hw(virq) ((unsigned int)irq_map[virq].hwirq) | 195 | static inline struct qe_ic *qe_ic_from_irq_data(struct irq_data *d) |
196 | { | ||
197 | return irq_data_get_irq_chip_data(d); | ||
198 | } | ||
196 | 199 | ||
197 | static void qe_ic_unmask_irq(unsigned int virq) | 200 | static void qe_ic_unmask_irq(struct irq_data *d) |
198 | { | 201 | { |
199 | struct qe_ic *qe_ic = qe_ic_from_irq(virq); | 202 | struct qe_ic *qe_ic = qe_ic_from_irq_data(d); |
200 | unsigned int src = virq_to_hw(virq); | 203 | unsigned int src = irqd_to_hwirq(d); |
201 | unsigned long flags; | 204 | unsigned long flags; |
202 | u32 temp; | 205 | u32 temp; |
203 | 206 | ||
@@ -210,10 +213,10 @@ static void qe_ic_unmask_irq(unsigned int virq) | |||
210 | raw_spin_unlock_irqrestore(&qe_ic_lock, flags); | 213 | raw_spin_unlock_irqrestore(&qe_ic_lock, flags); |
211 | } | 214 | } |
212 | 215 | ||
213 | static void qe_ic_mask_irq(unsigned int virq) | 216 | static void qe_ic_mask_irq(struct irq_data *d) |
214 | { | 217 | { |
215 | struct qe_ic *qe_ic = qe_ic_from_irq(virq); | 218 | struct qe_ic *qe_ic = qe_ic_from_irq_data(d); |
216 | unsigned int src = virq_to_hw(virq); | 219 | unsigned int src = irqd_to_hwirq(d); |
217 | unsigned long flags; | 220 | unsigned long flags; |
218 | u32 temp; | 221 | u32 temp; |
219 | 222 | ||
@@ -238,9 +241,9 @@ static void qe_ic_mask_irq(unsigned int virq) | |||
238 | 241 | ||
239 | static struct irq_chip qe_ic_irq_chip = { | 242 | static struct irq_chip qe_ic_irq_chip = { |
240 | .name = "QEIC", | 243 | .name = "QEIC", |
241 | .unmask = qe_ic_unmask_irq, | 244 | .irq_unmask = qe_ic_unmask_irq, |
242 | .mask = qe_ic_mask_irq, | 245 | .irq_mask = qe_ic_mask_irq, |
243 | .mask_ack = qe_ic_mask_irq, | 246 | .irq_mask_ack = qe_ic_mask_irq, |
244 | }; | 247 | }; |
245 | 248 | ||
246 | static int qe_ic_host_match(struct irq_host *h, struct device_node *node) | 249 | static int qe_ic_host_match(struct irq_host *h, struct device_node *node) |
@@ -262,10 +265,10 @@ static int qe_ic_host_map(struct irq_host *h, unsigned int virq, | |||
262 | /* Default chip */ | 265 | /* Default chip */ |
263 | chip = &qe_ic->hc_irq; | 266 | chip = &qe_ic->hc_irq; |
264 | 267 | ||
265 | set_irq_chip_data(virq, qe_ic); | 268 | irq_set_chip_data(virq, qe_ic); |
266 | irq_to_desc(virq)->status |= IRQ_LEVEL; | 269 | irq_set_status_flags(virq, IRQ_LEVEL); |
267 | 270 | ||
268 | set_irq_chip_and_handler(virq, chip, handle_level_irq); | 271 | irq_set_chip_and_handler(virq, chip, handle_level_irq); |
269 | 272 | ||
270 | return 0; | 273 | return 0; |
271 | } | 274 | } |
@@ -381,13 +384,13 @@ void __init qe_ic_init(struct device_node *node, unsigned int flags, | |||
381 | 384 | ||
382 | qe_ic_write(qe_ic->regs, QEIC_CICR, temp); | 385 | qe_ic_write(qe_ic->regs, QEIC_CICR, temp); |
383 | 386 | ||
384 | set_irq_data(qe_ic->virq_low, qe_ic); | 387 | irq_set_handler_data(qe_ic->virq_low, qe_ic); |
385 | set_irq_chained_handler(qe_ic->virq_low, low_handler); | 388 | irq_set_chained_handler(qe_ic->virq_low, low_handler); |
386 | 389 | ||
387 | if (qe_ic->virq_high != NO_IRQ && | 390 | if (qe_ic->virq_high != NO_IRQ && |
388 | qe_ic->virq_high != qe_ic->virq_low) { | 391 | qe_ic->virq_high != qe_ic->virq_low) { |
389 | set_irq_data(qe_ic->virq_high, qe_ic); | 392 | irq_set_handler_data(qe_ic->virq_high, qe_ic); |
390 | set_irq_chained_handler(qe_ic->virq_high, high_handler); | 393 | irq_set_chained_handler(qe_ic->virq_high, high_handler); |
391 | } | 394 | } |
392 | } | 395 | } |
393 | 396 | ||
diff --git a/arch/powerpc/sysdev/scom.c b/arch/powerpc/sysdev/scom.c new file mode 100644 index 000000000000..b2593ce30c9b --- /dev/null +++ b/arch/powerpc/sysdev/scom.c | |||
@@ -0,0 +1,192 @@ | |||
1 | /* | ||
2 | * Copyright 2010 Benjamin Herrenschmidt, IBM Corp | ||
3 | * <benh@kernel.crashing.org> | ||
4 | * and David Gibson, IBM Corporation. | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See | ||
14 | * the GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | #include <linux/kernel.h> | ||
22 | #include <linux/debugfs.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <asm/prom.h> | ||
25 | #include <asm/scom.h> | ||
26 | |||
27 | const struct scom_controller *scom_controller; | ||
28 | EXPORT_SYMBOL_GPL(scom_controller); | ||
29 | |||
30 | struct device_node *scom_find_parent(struct device_node *node) | ||
31 | { | ||
32 | struct device_node *par, *tmp; | ||
33 | const u32 *p; | ||
34 | |||
35 | for (par = of_node_get(node); par;) { | ||
36 | if (of_get_property(par, "scom-controller", NULL)) | ||
37 | break; | ||
38 | p = of_get_property(par, "scom-parent", NULL); | ||
39 | tmp = par; | ||
40 | if (p == NULL) | ||
41 | par = of_get_parent(par); | ||
42 | else | ||
43 | par = of_find_node_by_phandle(*p); | ||
44 | of_node_put(tmp); | ||
45 | } | ||
46 | return par; | ||
47 | } | ||
48 | EXPORT_SYMBOL_GPL(scom_find_parent); | ||
49 | |||
50 | scom_map_t scom_map_device(struct device_node *dev, int index) | ||
51 | { | ||
52 | struct device_node *parent; | ||
53 | unsigned int cells, size; | ||
54 | const u32 *prop; | ||
55 | u64 reg, cnt; | ||
56 | scom_map_t ret; | ||
57 | |||
58 | parent = scom_find_parent(dev); | ||
59 | |||
60 | if (parent == NULL) | ||
61 | return 0; | ||
62 | |||
63 | prop = of_get_property(parent, "#scom-cells", NULL); | ||
64 | cells = prop ? *prop : 1; | ||
65 | |||
66 | prop = of_get_property(dev, "scom-reg", &size); | ||
67 | if (!prop) | ||
68 | return 0; | ||
69 | size >>= 2; | ||
70 | |||
71 | if (index >= (size / (2*cells))) | ||
72 | return 0; | ||
73 | |||
74 | reg = of_read_number(&prop[index * cells * 2], cells); | ||
75 | cnt = of_read_number(&prop[index * cells * 2 + cells], cells); | ||
76 | |||
77 | ret = scom_map(parent, reg, cnt); | ||
78 | of_node_put(parent); | ||
79 | |||
80 | return ret; | ||
81 | } | ||
82 | EXPORT_SYMBOL_GPL(scom_map_device); | ||
83 | |||
84 | #ifdef CONFIG_SCOM_DEBUGFS | ||
85 | struct scom_debug_entry { | ||
86 | struct device_node *dn; | ||
87 | unsigned long addr; | ||
88 | scom_map_t map; | ||
89 | spinlock_t lock; | ||
90 | char name[8]; | ||
91 | struct debugfs_blob_wrapper blob; | ||
92 | }; | ||
93 | |||
94 | static int scom_addr_set(void *data, u64 val) | ||
95 | { | ||
96 | struct scom_debug_entry *ent = data; | ||
97 | |||
98 | ent->addr = 0; | ||
99 | scom_unmap(ent->map); | ||
100 | |||
101 | ent->map = scom_map(ent->dn, val, 1); | ||
102 | if (scom_map_ok(ent->map)) | ||
103 | ent->addr = val; | ||
104 | else | ||
105 | return -EFAULT; | ||
106 | |||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static int scom_addr_get(void *data, u64 *val) | ||
111 | { | ||
112 | struct scom_debug_entry *ent = data; | ||
113 | *val = ent->addr; | ||
114 | return 0; | ||
115 | } | ||
116 | DEFINE_SIMPLE_ATTRIBUTE(scom_addr_fops, scom_addr_get, scom_addr_set, | ||
117 | "0x%llx\n"); | ||
118 | |||
119 | static int scom_val_set(void *data, u64 val) | ||
120 | { | ||
121 | struct scom_debug_entry *ent = data; | ||
122 | |||
123 | if (!scom_map_ok(ent->map)) | ||
124 | return -EFAULT; | ||
125 | |||
126 | scom_write(ent->map, 0, val); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int scom_val_get(void *data, u64 *val) | ||
132 | { | ||
133 | struct scom_debug_entry *ent = data; | ||
134 | |||
135 | if (!scom_map_ok(ent->map)) | ||
136 | return -EFAULT; | ||
137 | |||
138 | *val = scom_read(ent->map, 0); | ||
139 | return 0; | ||
140 | } | ||
141 | DEFINE_SIMPLE_ATTRIBUTE(scom_val_fops, scom_val_get, scom_val_set, | ||
142 | "0x%llx\n"); | ||
143 | |||
144 | static int scom_debug_init_one(struct dentry *root, struct device_node *dn, | ||
145 | int i) | ||
146 | { | ||
147 | struct scom_debug_entry *ent; | ||
148 | struct dentry *dir; | ||
149 | |||
150 | ent = kzalloc(sizeof(*ent), GFP_KERNEL); | ||
151 | if (!ent) | ||
152 | return -ENOMEM; | ||
153 | |||
154 | ent->dn = of_node_get(dn); | ||
155 | ent->map = SCOM_MAP_INVALID; | ||
156 | spin_lock_init(&ent->lock); | ||
157 | snprintf(ent->name, 8, "scom%d", i); | ||
158 | ent->blob.data = dn->full_name; | ||
159 | ent->blob.size = strlen(dn->full_name); | ||
160 | |||
161 | dir = debugfs_create_dir(ent->name, root); | ||
162 | if (!dir) { | ||
163 | of_node_put(dn); | ||
164 | kfree(ent); | ||
165 | return -1; | ||
166 | } | ||
167 | |||
168 | debugfs_create_file("addr", 0600, dir, ent, &scom_addr_fops); | ||
169 | debugfs_create_file("value", 0600, dir, ent, &scom_val_fops); | ||
170 | debugfs_create_blob("path", 0400, dir, &ent->blob); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static int scom_debug_init(void) | ||
176 | { | ||
177 | struct device_node *dn; | ||
178 | struct dentry *root; | ||
179 | int i, rc; | ||
180 | |||
181 | root = debugfs_create_dir("scom", powerpc_debugfs_root); | ||
182 | if (!root) | ||
183 | return -1; | ||
184 | |||
185 | i = rc = 0; | ||
186 | for_each_node_with_property(dn, "scom-controller") | ||
187 | rc |= scom_debug_init_one(root, dn, i++); | ||
188 | |||
189 | return rc; | ||
190 | } | ||
191 | device_initcall(scom_debug_init); | ||
192 | #endif /* CONFIG_SCOM_DEBUGFS */ | ||
diff --git a/arch/powerpc/sysdev/tsi108_dev.c b/arch/powerpc/sysdev/tsi108_dev.c index d4d15aaf18fa..ee056807b52c 100644 --- a/arch/powerpc/sysdev/tsi108_dev.c +++ b/arch/powerpc/sysdev/tsi108_dev.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/module.h> | 19 | #include <linux/module.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <linux/of_net.h> | ||
22 | #include <asm/tsi108.h> | 23 | #include <asm/tsi108.h> |
23 | 24 | ||
24 | #include <asm/system.h> | 25 | #include <asm/system.h> |
@@ -83,8 +84,8 @@ static int __init tsi108_eth_of_init(void) | |||
83 | memset(&tsi_eth_data, 0, sizeof(tsi_eth_data)); | 84 | memset(&tsi_eth_data, 0, sizeof(tsi_eth_data)); |
84 | 85 | ||
85 | ret = of_address_to_resource(np, 0, &r[0]); | 86 | ret = of_address_to_resource(np, 0, &r[0]); |
86 | DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", | 87 | DBG("%s: name:start->end = %s:%pR\n", |
87 | __func__,r[0].name, r[0].start, r[0].end); | 88 | __func__, r[0].name, &r[0]); |
88 | if (ret) | 89 | if (ret) |
89 | goto err; | 90 | goto err; |
90 | 91 | ||
@@ -92,8 +93,8 @@ static int __init tsi108_eth_of_init(void) | |||
92 | r[1].start = irq_of_parse_and_map(np, 0); | 93 | r[1].start = irq_of_parse_and_map(np, 0); |
93 | r[1].end = irq_of_parse_and_map(np, 0); | 94 | r[1].end = irq_of_parse_and_map(np, 0); |
94 | r[1].flags = IORESOURCE_IRQ; | 95 | r[1].flags = IORESOURCE_IRQ; |
95 | DBG("%s: name:start->end = %s:0x%lx-> 0x%lx\n", | 96 | DBG("%s: name:start->end = %s:%pR\n", |
96 | __func__,r[1].name, r[1].start, r[1].end); | 97 | __func__, r[1].name, &r[1]); |
97 | 98 | ||
98 | tsi_eth_dev = | 99 | tsi_eth_dev = |
99 | platform_device_register_simple("tsi-ethernet", i++, &r[0], | 100 | platform_device_register_simple("tsi-ethernet", i++, &r[0], |
diff --git a/arch/powerpc/sysdev/tsi108_pci.c b/arch/powerpc/sysdev/tsi108_pci.c index 0ab9281e49ae..4d18658116e5 100644 --- a/arch/powerpc/sysdev/tsi108_pci.c +++ b/arch/powerpc/sysdev/tsi108_pci.c | |||
@@ -343,24 +343,9 @@ static inline unsigned int get_pci_source(void) | |||
343 | * Linux descriptor level callbacks | 343 | * Linux descriptor level callbacks |
344 | */ | 344 | */ |
345 | 345 | ||
346 | static void tsi108_pci_irq_enable(u_int irq) | 346 | static void tsi108_pci_irq_unmask(struct irq_data *d) |
347 | { | 347 | { |
348 | tsi108_pci_int_unmask(irq); | 348 | tsi108_pci_int_unmask(d->irq); |
349 | } | ||
350 | |||
351 | static void tsi108_pci_irq_disable(u_int irq) | ||
352 | { | ||
353 | tsi108_pci_int_mask(irq); | ||
354 | } | ||
355 | |||
356 | static void tsi108_pci_irq_ack(u_int irq) | ||
357 | { | ||
358 | tsi108_pci_int_mask(irq); | ||
359 | } | ||
360 | |||
361 | static void tsi108_pci_irq_end(u_int irq) | ||
362 | { | ||
363 | tsi108_pci_int_unmask(irq); | ||
364 | 349 | ||
365 | /* Enable interrupts from PCI block */ | 350 | /* Enable interrupts from PCI block */ |
366 | tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, | 351 | tsi108_write_reg(TSI108_PCI_OFFSET + TSI108_PCI_IRP_ENABLE, |
@@ -370,16 +355,25 @@ static void tsi108_pci_irq_end(u_int irq) | |||
370 | mb(); | 355 | mb(); |
371 | } | 356 | } |
372 | 357 | ||
358 | static void tsi108_pci_irq_mask(struct irq_data *d) | ||
359 | { | ||
360 | tsi108_pci_int_mask(d->irq); | ||
361 | } | ||
362 | |||
363 | static void tsi108_pci_irq_ack(struct irq_data *d) | ||
364 | { | ||
365 | tsi108_pci_int_mask(d->irq); | ||
366 | } | ||
367 | |||
373 | /* | 368 | /* |
374 | * Interrupt controller descriptor for cascaded PCI interrupt controller. | 369 | * Interrupt controller descriptor for cascaded PCI interrupt controller. |
375 | */ | 370 | */ |
376 | 371 | ||
377 | static struct irq_chip tsi108_pci_irq = { | 372 | static struct irq_chip tsi108_pci_irq = { |
378 | .name = "tsi108_PCI_int", | 373 | .name = "tsi108_PCI_int", |
379 | .mask = tsi108_pci_irq_disable, | 374 | .irq_mask = tsi108_pci_irq_mask, |
380 | .ack = tsi108_pci_irq_ack, | 375 | .irq_ack = tsi108_pci_irq_ack, |
381 | .end = tsi108_pci_irq_end, | 376 | .irq_unmask = tsi108_pci_irq_unmask, |
382 | .unmask = tsi108_pci_irq_enable, | ||
383 | }; | 377 | }; |
384 | 378 | ||
385 | static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, | 379 | static int pci_irq_host_xlate(struct irq_host *h, struct device_node *ct, |
@@ -397,8 +391,8 @@ static int pci_irq_host_map(struct irq_host *h, unsigned int virq, | |||
397 | DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); | 391 | DBG("%s(%d, 0x%lx)\n", __func__, virq, hw); |
398 | if ((virq >= 1) && (virq <= 4)){ | 392 | if ((virq >= 1) && (virq <= 4)){ |
399 | irq = virq + IRQ_PCI_INTAD_BASE - 1; | 393 | irq = virq + IRQ_PCI_INTAD_BASE - 1; |
400 | irq_to_desc(irq)->status |= IRQ_LEVEL; | 394 | irq_set_status_flags(irq, IRQ_LEVEL); |
401 | set_irq_chip(irq, &tsi108_pci_irq); | 395 | irq_set_chip(irq, &tsi108_pci_irq); |
402 | } | 396 | } |
403 | return 0; | 397 | return 0; |
404 | } | 398 | } |
@@ -437,8 +431,11 @@ void __init tsi108_pci_int_init(struct device_node *node) | |||
437 | 431 | ||
438 | void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) | 432 | void tsi108_irq_cascade(unsigned int irq, struct irq_desc *desc) |
439 | { | 433 | { |
434 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
440 | unsigned int cascade_irq = get_pci_source(); | 435 | unsigned int cascade_irq = get_pci_source(); |
436 | |||
441 | if (cascade_irq != NO_IRQ) | 437 | if (cascade_irq != NO_IRQ) |
442 | generic_handle_irq(cascade_irq); | 438 | generic_handle_irq(cascade_irq); |
443 | desc->chip->eoi(irq); | 439 | |
440 | chip->irq_eoi(&desc->irq_data); | ||
444 | } | 441 | } |
diff --git a/arch/powerpc/sysdev/uic.c b/arch/powerpc/sysdev/uic.c index 0038fb78f094..984cd2029158 100644 --- a/arch/powerpc/sysdev/uic.c +++ b/arch/powerpc/sysdev/uic.c | |||
@@ -41,8 +41,6 @@ | |||
41 | #define UIC_VR 0x7 | 41 | #define UIC_VR 0x7 |
42 | #define UIC_VCR 0x8 | 42 | #define UIC_VCR 0x8 |
43 | 43 | ||
44 | #define uic_irq_to_hw(virq) (irq_map[virq].hwirq) | ||
45 | |||
46 | struct uic *primary_uic; | 44 | struct uic *primary_uic; |
47 | 45 | ||
48 | struct uic { | 46 | struct uic { |
@@ -55,18 +53,17 @@ struct uic { | |||
55 | struct irq_host *irqhost; | 53 | struct irq_host *irqhost; |
56 | }; | 54 | }; |
57 | 55 | ||
58 | static void uic_unmask_irq(unsigned int virq) | 56 | static void uic_unmask_irq(struct irq_data *d) |
59 | { | 57 | { |
60 | struct irq_desc *desc = irq_to_desc(virq); | 58 | struct uic *uic = irq_data_get_irq_chip_data(d); |
61 | struct uic *uic = get_irq_chip_data(virq); | 59 | unsigned int src = irqd_to_hwirq(d); |
62 | unsigned int src = uic_irq_to_hw(virq); | ||
63 | unsigned long flags; | 60 | unsigned long flags; |
64 | u32 er, sr; | 61 | u32 er, sr; |
65 | 62 | ||
66 | sr = 1 << (31-src); | 63 | sr = 1 << (31-src); |
67 | spin_lock_irqsave(&uic->lock, flags); | 64 | spin_lock_irqsave(&uic->lock, flags); |
68 | /* ack level-triggered interrupts here */ | 65 | /* ack level-triggered interrupts here */ |
69 | if (desc->status & IRQ_LEVEL) | 66 | if (irqd_is_level_type(d)) |
70 | mtdcr(uic->dcrbase + UIC_SR, sr); | 67 | mtdcr(uic->dcrbase + UIC_SR, sr); |
71 | er = mfdcr(uic->dcrbase + UIC_ER); | 68 | er = mfdcr(uic->dcrbase + UIC_ER); |
72 | er |= sr; | 69 | er |= sr; |
@@ -74,10 +71,10 @@ static void uic_unmask_irq(unsigned int virq) | |||
74 | spin_unlock_irqrestore(&uic->lock, flags); | 71 | spin_unlock_irqrestore(&uic->lock, flags); |
75 | } | 72 | } |
76 | 73 | ||
77 | static void uic_mask_irq(unsigned int virq) | 74 | static void uic_mask_irq(struct irq_data *d) |
78 | { | 75 | { |
79 | struct uic *uic = get_irq_chip_data(virq); | 76 | struct uic *uic = irq_data_get_irq_chip_data(d); |
80 | unsigned int src = uic_irq_to_hw(virq); | 77 | unsigned int src = irqd_to_hwirq(d); |
81 | unsigned long flags; | 78 | unsigned long flags; |
82 | u32 er; | 79 | u32 er; |
83 | 80 | ||
@@ -88,10 +85,10 @@ static void uic_mask_irq(unsigned int virq) | |||
88 | spin_unlock_irqrestore(&uic->lock, flags); | 85 | spin_unlock_irqrestore(&uic->lock, flags); |
89 | } | 86 | } |
90 | 87 | ||
91 | static void uic_ack_irq(unsigned int virq) | 88 | static void uic_ack_irq(struct irq_data *d) |
92 | { | 89 | { |
93 | struct uic *uic = get_irq_chip_data(virq); | 90 | struct uic *uic = irq_data_get_irq_chip_data(d); |
94 | unsigned int src = uic_irq_to_hw(virq); | 91 | unsigned int src = irqd_to_hwirq(d); |
95 | unsigned long flags; | 92 | unsigned long flags; |
96 | 93 | ||
97 | spin_lock_irqsave(&uic->lock, flags); | 94 | spin_lock_irqsave(&uic->lock, flags); |
@@ -99,11 +96,10 @@ static void uic_ack_irq(unsigned int virq) | |||
99 | spin_unlock_irqrestore(&uic->lock, flags); | 96 | spin_unlock_irqrestore(&uic->lock, flags); |
100 | } | 97 | } |
101 | 98 | ||
102 | static void uic_mask_ack_irq(unsigned int virq) | 99 | static void uic_mask_ack_irq(struct irq_data *d) |
103 | { | 100 | { |
104 | struct irq_desc *desc = irq_to_desc(virq); | 101 | struct uic *uic = irq_data_get_irq_chip_data(d); |
105 | struct uic *uic = get_irq_chip_data(virq); | 102 | unsigned int src = irqd_to_hwirq(d); |
106 | unsigned int src = uic_irq_to_hw(virq); | ||
107 | unsigned long flags; | 103 | unsigned long flags; |
108 | u32 er, sr; | 104 | u32 er, sr; |
109 | 105 | ||
@@ -120,23 +116,22 @@ static void uic_mask_ack_irq(unsigned int virq) | |||
120 | * level interrupts are ack'ed after the actual | 116 | * level interrupts are ack'ed after the actual |
121 | * isr call in the uic_unmask_irq() | 117 | * isr call in the uic_unmask_irq() |
122 | */ | 118 | */ |
123 | if (!(desc->status & IRQ_LEVEL)) | 119 | if (!irqd_is_level_type(d)) |
124 | mtdcr(uic->dcrbase + UIC_SR, sr); | 120 | mtdcr(uic->dcrbase + UIC_SR, sr); |
125 | spin_unlock_irqrestore(&uic->lock, flags); | 121 | spin_unlock_irqrestore(&uic->lock, flags); |
126 | } | 122 | } |
127 | 123 | ||
128 | static int uic_set_irq_type(unsigned int virq, unsigned int flow_type) | 124 | static int uic_set_irq_type(struct irq_data *d, unsigned int flow_type) |
129 | { | 125 | { |
130 | struct uic *uic = get_irq_chip_data(virq); | 126 | struct uic *uic = irq_data_get_irq_chip_data(d); |
131 | unsigned int src = uic_irq_to_hw(virq); | 127 | unsigned int src = irqd_to_hwirq(d); |
132 | struct irq_desc *desc = irq_to_desc(virq); | ||
133 | unsigned long flags; | 128 | unsigned long flags; |
134 | int trigger, polarity; | 129 | int trigger, polarity; |
135 | u32 tr, pr, mask; | 130 | u32 tr, pr, mask; |
136 | 131 | ||
137 | switch (flow_type & IRQ_TYPE_SENSE_MASK) { | 132 | switch (flow_type & IRQ_TYPE_SENSE_MASK) { |
138 | case IRQ_TYPE_NONE: | 133 | case IRQ_TYPE_NONE: |
139 | uic_mask_irq(virq); | 134 | uic_mask_irq(d); |
140 | return 0; | 135 | return 0; |
141 | 136 | ||
142 | case IRQ_TYPE_EDGE_RISING: | 137 | case IRQ_TYPE_EDGE_RISING: |
@@ -166,11 +161,6 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
166 | mtdcr(uic->dcrbase + UIC_PR, pr); | 161 | mtdcr(uic->dcrbase + UIC_PR, pr); |
167 | mtdcr(uic->dcrbase + UIC_TR, tr); | 162 | mtdcr(uic->dcrbase + UIC_TR, tr); |
168 | 163 | ||
169 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
170 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
171 | if (!trigger) | ||
172 | desc->status |= IRQ_LEVEL; | ||
173 | |||
174 | spin_unlock_irqrestore(&uic->lock, flags); | 164 | spin_unlock_irqrestore(&uic->lock, flags); |
175 | 165 | ||
176 | return 0; | 166 | return 0; |
@@ -178,11 +168,11 @@ static int uic_set_irq_type(unsigned int virq, unsigned int flow_type) | |||
178 | 168 | ||
179 | static struct irq_chip uic_irq_chip = { | 169 | static struct irq_chip uic_irq_chip = { |
180 | .name = "UIC", | 170 | .name = "UIC", |
181 | .unmask = uic_unmask_irq, | 171 | .irq_unmask = uic_unmask_irq, |
182 | .mask = uic_mask_irq, | 172 | .irq_mask = uic_mask_irq, |
183 | .mask_ack = uic_mask_ack_irq, | 173 | .irq_mask_ack = uic_mask_ack_irq, |
184 | .ack = uic_ack_irq, | 174 | .irq_ack = uic_ack_irq, |
185 | .set_type = uic_set_irq_type, | 175 | .irq_set_type = uic_set_irq_type, |
186 | }; | 176 | }; |
187 | 177 | ||
188 | static int uic_host_map(struct irq_host *h, unsigned int virq, | 178 | static int uic_host_map(struct irq_host *h, unsigned int virq, |
@@ -190,13 +180,13 @@ static int uic_host_map(struct irq_host *h, unsigned int virq, | |||
190 | { | 180 | { |
191 | struct uic *uic = h->host_data; | 181 | struct uic *uic = h->host_data; |
192 | 182 | ||
193 | set_irq_chip_data(virq, uic); | 183 | irq_set_chip_data(virq, uic); |
194 | /* Despite the name, handle_level_irq() works for both level | 184 | /* Despite the name, handle_level_irq() works for both level |
195 | * and edge irqs on UIC. FIXME: check this is correct */ | 185 | * and edge irqs on UIC. FIXME: check this is correct */ |
196 | set_irq_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); | 186 | irq_set_chip_and_handler(virq, &uic_irq_chip, handle_level_irq); |
197 | 187 | ||
198 | /* Set default irq type */ | 188 | /* Set default irq type */ |
199 | set_irq_type(virq, IRQ_TYPE_NONE); | 189 | irq_set_irq_type(virq, IRQ_TYPE_NONE); |
200 | 190 | ||
201 | return 0; | 191 | return 0; |
202 | } | 192 | } |
@@ -220,16 +210,18 @@ static struct irq_host_ops uic_host_ops = { | |||
220 | 210 | ||
221 | void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) | 211 | void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) |
222 | { | 212 | { |
223 | struct uic *uic = get_irq_data(virq); | 213 | struct irq_chip *chip = irq_desc_get_chip(desc); |
214 | struct irq_data *idata = irq_desc_get_irq_data(desc); | ||
215 | struct uic *uic = irq_get_handler_data(virq); | ||
224 | u32 msr; | 216 | u32 msr; |
225 | int src; | 217 | int src; |
226 | int subvirq; | 218 | int subvirq; |
227 | 219 | ||
228 | raw_spin_lock(&desc->lock); | 220 | raw_spin_lock(&desc->lock); |
229 | if (desc->status & IRQ_LEVEL) | 221 | if (irqd_is_level_type(idata)) |
230 | desc->chip->mask(virq); | 222 | chip->irq_mask(idata); |
231 | else | 223 | else |
232 | desc->chip->mask_ack(virq); | 224 | chip->irq_mask_ack(idata); |
233 | raw_spin_unlock(&desc->lock); | 225 | raw_spin_unlock(&desc->lock); |
234 | 226 | ||
235 | msr = mfdcr(uic->dcrbase + UIC_MSR); | 227 | msr = mfdcr(uic->dcrbase + UIC_MSR); |
@@ -243,10 +235,10 @@ void uic_irq_cascade(unsigned int virq, struct irq_desc *desc) | |||
243 | 235 | ||
244 | uic_irq_ret: | 236 | uic_irq_ret: |
245 | raw_spin_lock(&desc->lock); | 237 | raw_spin_lock(&desc->lock); |
246 | if (desc->status & IRQ_LEVEL) | 238 | if (irqd_is_level_type(idata)) |
247 | desc->chip->ack(virq); | 239 | chip->irq_ack(idata); |
248 | if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask) | 240 | if (!irqd_irq_disabled(idata) && chip->irq_unmask) |
249 | desc->chip->unmask(virq); | 241 | chip->irq_unmask(idata); |
250 | raw_spin_unlock(&desc->lock); | 242 | raw_spin_unlock(&desc->lock); |
251 | } | 243 | } |
252 | 244 | ||
@@ -335,8 +327,8 @@ void __init uic_init_tree(void) | |||
335 | 327 | ||
336 | cascade_virq = irq_of_parse_and_map(np, 0); | 328 | cascade_virq = irq_of_parse_and_map(np, 0); |
337 | 329 | ||
338 | set_irq_data(cascade_virq, uic); | 330 | irq_set_handler_data(cascade_virq, uic); |
339 | set_irq_chained_handler(cascade_virq, uic_irq_cascade); | 331 | irq_set_chained_handler(cascade_virq, uic_irq_cascade); |
340 | 332 | ||
341 | /* FIXME: setup critical cascade?? */ | 333 | /* FIXME: setup critical cascade?? */ |
342 | } | 334 | } |
diff --git a/arch/powerpc/sysdev/xics/Kconfig b/arch/powerpc/sysdev/xics/Kconfig new file mode 100644 index 000000000000..0031eda320c3 --- /dev/null +++ b/arch/powerpc/sysdev/xics/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | config PPC_XICS | ||
2 | def_bool n | ||
3 | select PPC_SMP_MUXED_IPI | ||
4 | |||
5 | config PPC_ICP_NATIVE | ||
6 | def_bool n | ||
7 | |||
8 | config PPC_ICP_HV | ||
9 | def_bool n | ||
10 | |||
11 | config PPC_ICS_RTAS | ||
12 | def_bool n | ||
13 | |||
diff --git a/arch/powerpc/sysdev/xics/Makefile b/arch/powerpc/sysdev/xics/Makefile new file mode 100644 index 000000000000..b75a6059337f --- /dev/null +++ b/arch/powerpc/sysdev/xics/Makefile | |||
@@ -0,0 +1,6 @@ | |||
1 | subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | ||
2 | |||
3 | obj-y += xics-common.o | ||
4 | obj-$(CONFIG_PPC_ICP_NATIVE) += icp-native.o | ||
5 | obj-$(CONFIG_PPC_ICP_HV) += icp-hv.o | ||
6 | obj-$(CONFIG_PPC_ICS_RTAS) += ics-rtas.o | ||
diff --git a/arch/powerpc/sysdev/xics/icp-hv.c b/arch/powerpc/sysdev/xics/icp-hv.c new file mode 100644 index 000000000000..9518d367a64f --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-hv.c | |||
@@ -0,0 +1,164 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/irq.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/cpu.h> | ||
17 | #include <linux/of.h> | ||
18 | |||
19 | #include <asm/smp.h> | ||
20 | #include <asm/irq.h> | ||
21 | #include <asm/errno.h> | ||
22 | #include <asm/xics.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/hvcall.h> | ||
25 | |||
26 | static inline unsigned int icp_hv_get_xirr(unsigned char cppr) | ||
27 | { | ||
28 | unsigned long retbuf[PLPAR_HCALL_BUFSIZE]; | ||
29 | long rc; | ||
30 | |||
31 | rc = plpar_hcall(H_XIRR, retbuf, cppr); | ||
32 | if (rc != H_SUCCESS) | ||
33 | panic(" bad return code xirr - rc = %lx\n", rc); | ||
34 | return (unsigned int)retbuf[0]; | ||
35 | } | ||
36 | |||
37 | static inline void icp_hv_set_xirr(unsigned int value) | ||
38 | { | ||
39 | long rc = plpar_hcall_norets(H_EOI, value); | ||
40 | if (rc != H_SUCCESS) | ||
41 | panic("bad return code EOI - rc = %ld, value=%x\n", rc, value); | ||
42 | } | ||
43 | |||
44 | static inline void icp_hv_set_cppr(u8 value) | ||
45 | { | ||
46 | long rc = plpar_hcall_norets(H_CPPR, value); | ||
47 | if (rc != H_SUCCESS) | ||
48 | panic("bad return code cppr - rc = %lx\n", rc); | ||
49 | } | ||
50 | |||
51 | static inline void icp_hv_set_qirr(int n_cpu , u8 value) | ||
52 | { | ||
53 | long rc = plpar_hcall_norets(H_IPI, get_hard_smp_processor_id(n_cpu), | ||
54 | value); | ||
55 | if (rc != H_SUCCESS) | ||
56 | panic("bad return code qirr - rc = %lx\n", rc); | ||
57 | } | ||
58 | |||
59 | static void icp_hv_eoi(struct irq_data *d) | ||
60 | { | ||
61 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
62 | |||
63 | iosync(); | ||
64 | icp_hv_set_xirr((xics_pop_cppr() << 24) | hw_irq); | ||
65 | } | ||
66 | |||
67 | static void icp_hv_teardown_cpu(void) | ||
68 | { | ||
69 | int cpu = smp_processor_id(); | ||
70 | |||
71 | /* Clear any pending IPI */ | ||
72 | icp_hv_set_qirr(cpu, 0xff); | ||
73 | } | ||
74 | |||
75 | static void icp_hv_flush_ipi(void) | ||
76 | { | ||
77 | /* We take the ipi irq but and never return so we | ||
78 | * need to EOI the IPI, but want to leave our priority 0 | ||
79 | * | ||
80 | * should we check all the other interrupts too? | ||
81 | * should we be flagging idle loop instead? | ||
82 | * or creating some task to be scheduled? | ||
83 | */ | ||
84 | |||
85 | icp_hv_set_xirr((0x00 << 24) | XICS_IPI); | ||
86 | } | ||
87 | |||
88 | static unsigned int icp_hv_get_irq(void) | ||
89 | { | ||
90 | unsigned int xirr = icp_hv_get_xirr(xics_cppr_top()); | ||
91 | unsigned int vec = xirr & 0x00ffffff; | ||
92 | unsigned int irq; | ||
93 | |||
94 | if (vec == XICS_IRQ_SPURIOUS) | ||
95 | return NO_IRQ; | ||
96 | |||
97 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
98 | if (likely(irq != NO_IRQ)) { | ||
99 | xics_push_cppr(vec); | ||
100 | return irq; | ||
101 | } | ||
102 | |||
103 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
104 | xics_mask_unknown_vec(vec); | ||
105 | |||
106 | /* We might learn about it later, so EOI it */ | ||
107 | icp_hv_set_xirr(xirr); | ||
108 | |||
109 | return NO_IRQ; | ||
110 | } | ||
111 | |||
112 | static void icp_hv_set_cpu_priority(unsigned char cppr) | ||
113 | { | ||
114 | xics_set_base_cppr(cppr); | ||
115 | icp_hv_set_cppr(cppr); | ||
116 | iosync(); | ||
117 | } | ||
118 | |||
119 | #ifdef CONFIG_SMP | ||
120 | |||
121 | static void icp_hv_cause_ipi(int cpu, unsigned long data) | ||
122 | { | ||
123 | icp_hv_set_qirr(cpu, IPI_PRIORITY); | ||
124 | } | ||
125 | |||
126 | static irqreturn_t icp_hv_ipi_action(int irq, void *dev_id) | ||
127 | { | ||
128 | int cpu = smp_processor_id(); | ||
129 | |||
130 | icp_hv_set_qirr(cpu, 0xff); | ||
131 | |||
132 | return smp_ipi_demux(); | ||
133 | } | ||
134 | |||
135 | #endif /* CONFIG_SMP */ | ||
136 | |||
137 | static const struct icp_ops icp_hv_ops = { | ||
138 | .get_irq = icp_hv_get_irq, | ||
139 | .eoi = icp_hv_eoi, | ||
140 | .set_priority = icp_hv_set_cpu_priority, | ||
141 | .teardown_cpu = icp_hv_teardown_cpu, | ||
142 | .flush_ipi = icp_hv_flush_ipi, | ||
143 | #ifdef CONFIG_SMP | ||
144 | .ipi_action = icp_hv_ipi_action, | ||
145 | .cause_ipi = icp_hv_cause_ipi, | ||
146 | #endif | ||
147 | }; | ||
148 | |||
149 | int icp_hv_init(void) | ||
150 | { | ||
151 | struct device_node *np; | ||
152 | |||
153 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xicp"); | ||
154 | if (!np) | ||
155 | np = of_find_node_by_type(NULL, | ||
156 | "PowerPC-External-Interrupt-Presentation"); | ||
157 | if (!np) | ||
158 | return -ENODEV; | ||
159 | |||
160 | icp_ops = &icp_hv_ops; | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c new file mode 100644 index 000000000000..1f15ad436140 --- /dev/null +++ b/arch/powerpc/sysdev/xics/icp-native.c | |||
@@ -0,0 +1,293 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | |||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/cpu.h> | ||
18 | #include <linux/of.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | |||
21 | #include <asm/prom.h> | ||
22 | #include <asm/io.h> | ||
23 | #include <asm/smp.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include <asm/errno.h> | ||
26 | #include <asm/xics.h> | ||
27 | |||
28 | struct icp_ipl { | ||
29 | union { | ||
30 | u32 word; | ||
31 | u8 bytes[4]; | ||
32 | } xirr_poll; | ||
33 | union { | ||
34 | u32 word; | ||
35 | u8 bytes[4]; | ||
36 | } xirr; | ||
37 | u32 dummy; | ||
38 | union { | ||
39 | u32 word; | ||
40 | u8 bytes[4]; | ||
41 | } qirr; | ||
42 | u32 link_a; | ||
43 | u32 link_b; | ||
44 | u32 link_c; | ||
45 | }; | ||
46 | |||
47 | static struct icp_ipl __iomem *icp_native_regs[NR_CPUS]; | ||
48 | |||
49 | static inline unsigned int icp_native_get_xirr(void) | ||
50 | { | ||
51 | int cpu = smp_processor_id(); | ||
52 | |||
53 | return in_be32(&icp_native_regs[cpu]->xirr.word); | ||
54 | } | ||
55 | |||
56 | static inline void icp_native_set_xirr(unsigned int value) | ||
57 | { | ||
58 | int cpu = smp_processor_id(); | ||
59 | |||
60 | out_be32(&icp_native_regs[cpu]->xirr.word, value); | ||
61 | } | ||
62 | |||
63 | static inline void icp_native_set_cppr(u8 value) | ||
64 | { | ||
65 | int cpu = smp_processor_id(); | ||
66 | |||
67 | out_8(&icp_native_regs[cpu]->xirr.bytes[0], value); | ||
68 | } | ||
69 | |||
70 | static inline void icp_native_set_qirr(int n_cpu, u8 value) | ||
71 | { | ||
72 | out_8(&icp_native_regs[n_cpu]->qirr.bytes[0], value); | ||
73 | } | ||
74 | |||
75 | static void icp_native_set_cpu_priority(unsigned char cppr) | ||
76 | { | ||
77 | xics_set_base_cppr(cppr); | ||
78 | icp_native_set_cppr(cppr); | ||
79 | iosync(); | ||
80 | } | ||
81 | |||
82 | static void icp_native_eoi(struct irq_data *d) | ||
83 | { | ||
84 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
85 | |||
86 | iosync(); | ||
87 | icp_native_set_xirr((xics_pop_cppr() << 24) | hw_irq); | ||
88 | } | ||
89 | |||
90 | static void icp_native_teardown_cpu(void) | ||
91 | { | ||
92 | int cpu = smp_processor_id(); | ||
93 | |||
94 | /* Clear any pending IPI */ | ||
95 | icp_native_set_qirr(cpu, 0xff); | ||
96 | } | ||
97 | |||
98 | static void icp_native_flush_ipi(void) | ||
99 | { | ||
100 | /* We take the ipi irq but and never return so we | ||
101 | * need to EOI the IPI, but want to leave our priority 0 | ||
102 | * | ||
103 | * should we check all the other interrupts too? | ||
104 | * should we be flagging idle loop instead? | ||
105 | * or creating some task to be scheduled? | ||
106 | */ | ||
107 | |||
108 | icp_native_set_xirr((0x00 << 24) | XICS_IPI); | ||
109 | } | ||
110 | |||
111 | static unsigned int icp_native_get_irq(void) | ||
112 | { | ||
113 | unsigned int xirr = icp_native_get_xirr(); | ||
114 | unsigned int vec = xirr & 0x00ffffff; | ||
115 | unsigned int irq; | ||
116 | |||
117 | if (vec == XICS_IRQ_SPURIOUS) | ||
118 | return NO_IRQ; | ||
119 | |||
120 | irq = irq_radix_revmap_lookup(xics_host, vec); | ||
121 | if (likely(irq != NO_IRQ)) { | ||
122 | xics_push_cppr(vec); | ||
123 | return irq; | ||
124 | } | ||
125 | |||
126 | /* We don't have a linux mapping, so have rtas mask it. */ | ||
127 | xics_mask_unknown_vec(vec); | ||
128 | |||
129 | /* We might learn about it later, so EOI it */ | ||
130 | icp_native_set_xirr(xirr); | ||
131 | |||
132 | return NO_IRQ; | ||
133 | } | ||
134 | |||
135 | #ifdef CONFIG_SMP | ||
136 | |||
137 | static void icp_native_cause_ipi(int cpu, unsigned long data) | ||
138 | { | ||
139 | icp_native_set_qirr(cpu, IPI_PRIORITY); | ||
140 | } | ||
141 | |||
142 | static irqreturn_t icp_native_ipi_action(int irq, void *dev_id) | ||
143 | { | ||
144 | int cpu = smp_processor_id(); | ||
145 | |||
146 | icp_native_set_qirr(cpu, 0xff); | ||
147 | |||
148 | return smp_ipi_demux(); | ||
149 | } | ||
150 | |||
151 | #endif /* CONFIG_SMP */ | ||
152 | |||
153 | static int __init icp_native_map_one_cpu(int hw_id, unsigned long addr, | ||
154 | unsigned long size) | ||
155 | { | ||
156 | char *rname; | ||
157 | int i, cpu = -1; | ||
158 | |||
159 | /* This may look gross but it's good enough for now, we don't quite | ||
160 | * have a hard -> linux processor id matching. | ||
161 | */ | ||
162 | for_each_possible_cpu(i) { | ||
163 | if (!cpu_present(i)) | ||
164 | continue; | ||
165 | if (hw_id == get_hard_smp_processor_id(i)) { | ||
166 | cpu = i; | ||
167 | break; | ||
168 | } | ||
169 | } | ||
170 | |||
171 | /* Fail, skip that CPU. Don't print, it's normal, some XICS come up | ||
172 | * with way more entries in there than you have CPUs | ||
173 | */ | ||
174 | if (cpu == -1) | ||
175 | return 0; | ||
176 | |||
177 | rname = kasprintf(GFP_KERNEL, "CPU %d [0x%x] Interrupt Presentation", | ||
178 | cpu, hw_id); | ||
179 | |||
180 | if (!request_mem_region(addr, size, rname)) { | ||
181 | pr_warning("icp_native: Could not reserve ICP MMIO" | ||
182 | " for CPU %d, interrupt server #0x%x\n", | ||
183 | cpu, hw_id); | ||
184 | return -EBUSY; | ||
185 | } | ||
186 | |||
187 | icp_native_regs[cpu] = ioremap(addr, size); | ||
188 | if (!icp_native_regs[cpu]) { | ||
189 | pr_warning("icp_native: Failed ioremap for CPU %d, " | ||
190 | "interrupt server #0x%x, addr %#lx\n", | ||
191 | cpu, hw_id, addr); | ||
192 | release_mem_region(addr, size); | ||
193 | return -ENOMEM; | ||
194 | } | ||
195 | return 0; | ||
196 | } | ||
197 | |||
198 | static int __init icp_native_init_one_node(struct device_node *np, | ||
199 | unsigned int *indx) | ||
200 | { | ||
201 | unsigned int ilen; | ||
202 | const u32 *ireg; | ||
203 | int i; | ||
204 | int reg_tuple_size; | ||
205 | int num_servers = 0; | ||
206 | |||
207 | /* This code does the theorically broken assumption that the interrupt | ||
208 | * server numbers are the same as the hard CPU numbers. | ||
209 | * This happens to be the case so far but we are playing with fire... | ||
210 | * should be fixed one of these days. -BenH. | ||
211 | */ | ||
212 | ireg = of_get_property(np, "ibm,interrupt-server-ranges", &ilen); | ||
213 | |||
214 | /* Do that ever happen ? we'll know soon enough... but even good'old | ||
215 | * f80 does have that property .. | ||
216 | */ | ||
217 | WARN_ON((ireg == NULL) || (ilen != 2*sizeof(u32))); | ||
218 | |||
219 | if (ireg) { | ||
220 | *indx = of_read_number(ireg, 1); | ||
221 | if (ilen >= 2*sizeof(u32)) | ||
222 | num_servers = of_read_number(ireg + 1, 1); | ||
223 | } | ||
224 | |||
225 | ireg = of_get_property(np, "reg", &ilen); | ||
226 | if (!ireg) { | ||
227 | pr_err("icp_native: Can't find interrupt reg property"); | ||
228 | return -1; | ||
229 | } | ||
230 | |||
231 | reg_tuple_size = (of_n_addr_cells(np) + of_n_size_cells(np)) * 4; | ||
232 | if (((ilen % reg_tuple_size) != 0) | ||
233 | || (num_servers && (num_servers != (ilen / reg_tuple_size)))) { | ||
234 | pr_err("icp_native: ICP reg len (%d) != num servers (%d)", | ||
235 | ilen / reg_tuple_size, num_servers); | ||
236 | return -1; | ||
237 | } | ||
238 | |||
239 | for (i = 0; i < (ilen / reg_tuple_size); i++) { | ||
240 | struct resource r; | ||
241 | int err; | ||
242 | |||
243 | err = of_address_to_resource(np, i, &r); | ||
244 | if (err) { | ||
245 | pr_err("icp_native: Could not translate ICP MMIO" | ||
246 | " for interrupt server 0x%x (%d)\n", *indx, err); | ||
247 | return -1; | ||
248 | } | ||
249 | |||
250 | if (icp_native_map_one_cpu(*indx, r.start, r.end - r.start)) | ||
251 | return -1; | ||
252 | |||
253 | (*indx)++; | ||
254 | } | ||
255 | return 0; | ||
256 | } | ||
257 | |||
258 | static const struct icp_ops icp_native_ops = { | ||
259 | .get_irq = icp_native_get_irq, | ||
260 | .eoi = icp_native_eoi, | ||
261 | .set_priority = icp_native_set_cpu_priority, | ||
262 | .teardown_cpu = icp_native_teardown_cpu, | ||
263 | .flush_ipi = icp_native_flush_ipi, | ||
264 | #ifdef CONFIG_SMP | ||
265 | .ipi_action = icp_native_ipi_action, | ||
266 | .cause_ipi = icp_native_cause_ipi, | ||
267 | #endif | ||
268 | }; | ||
269 | |||
270 | int icp_native_init(void) | ||
271 | { | ||
272 | struct device_node *np; | ||
273 | u32 indx = 0; | ||
274 | int found = 0; | ||
275 | |||
276 | for_each_compatible_node(np, NULL, "ibm,ppc-xicp") | ||
277 | if (icp_native_init_one_node(np, &indx) == 0) | ||
278 | found = 1; | ||
279 | if (!found) { | ||
280 | for_each_node_by_type(np, | ||
281 | "PowerPC-External-Interrupt-Presentation") { | ||
282 | if (icp_native_init_one_node(np, &indx) == 0) | ||
283 | found = 1; | ||
284 | } | ||
285 | } | ||
286 | |||
287 | if (found == 0) | ||
288 | return -ENODEV; | ||
289 | |||
290 | icp_ops = &icp_native_ops; | ||
291 | |||
292 | return 0; | ||
293 | } | ||
diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c new file mode 100644 index 000000000000..c782f85cf7e4 --- /dev/null +++ b/arch/powerpc/sysdev/xics/ics-rtas.c | |||
@@ -0,0 +1,240 @@ | |||
1 | #include <linux/types.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/irq.h> | ||
4 | #include <linux/smp.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/cpu.h> | ||
8 | #include <linux/of.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/msi.h> | ||
11 | |||
12 | #include <asm/prom.h> | ||
13 | #include <asm/smp.h> | ||
14 | #include <asm/machdep.h> | ||
15 | #include <asm/irq.h> | ||
16 | #include <asm/errno.h> | ||
17 | #include <asm/xics.h> | ||
18 | #include <asm/rtas.h> | ||
19 | |||
20 | /* RTAS service tokens */ | ||
21 | static int ibm_get_xive; | ||
22 | static int ibm_set_xive; | ||
23 | static int ibm_int_on; | ||
24 | static int ibm_int_off; | ||
25 | |||
26 | static int ics_rtas_map(struct ics *ics, unsigned int virq); | ||
27 | static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec); | ||
28 | static long ics_rtas_get_server(struct ics *ics, unsigned long vec); | ||
29 | static int ics_rtas_host_match(struct ics *ics, struct device_node *node); | ||
30 | |||
31 | /* Only one global & state struct ics */ | ||
32 | static struct ics ics_rtas = { | ||
33 | .map = ics_rtas_map, | ||
34 | .mask_unknown = ics_rtas_mask_unknown, | ||
35 | .get_server = ics_rtas_get_server, | ||
36 | .host_match = ics_rtas_host_match, | ||
37 | }; | ||
38 | |||
39 | static void ics_rtas_unmask_irq(struct irq_data *d) | ||
40 | { | ||
41 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
42 | int call_status; | ||
43 | int server; | ||
44 | |||
45 | pr_devel("xics: unmask virq %d [hw 0x%x]\n", d->irq, hw_irq); | ||
46 | |||
47 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
48 | return; | ||
49 | |||
50 | server = xics_get_irq_server(d->irq, d->affinity, 0); | ||
51 | |||
52 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, server, | ||
53 | DEFAULT_PRIORITY); | ||
54 | if (call_status != 0) { | ||
55 | printk(KERN_ERR | ||
56 | "%s: ibm_set_xive irq %u server %x returned %d\n", | ||
57 | __func__, hw_irq, server, call_status); | ||
58 | return; | ||
59 | } | ||
60 | |||
61 | /* Now unmask the interrupt (often a no-op) */ | ||
62 | call_status = rtas_call(ibm_int_on, 1, 1, NULL, hw_irq); | ||
63 | if (call_status != 0) { | ||
64 | printk(KERN_ERR "%s: ibm_int_on irq=%u returned %d\n", | ||
65 | __func__, hw_irq, call_status); | ||
66 | return; | ||
67 | } | ||
68 | } | ||
69 | |||
70 | static unsigned int ics_rtas_startup(struct irq_data *d) | ||
71 | { | ||
72 | #ifdef CONFIG_PCI_MSI | ||
73 | /* | ||
74 | * The generic MSI code returns with the interrupt disabled on the | ||
75 | * card, using the MSI mask bits. Firmware doesn't appear to unmask | ||
76 | * at that level, so we do it here by hand. | ||
77 | */ | ||
78 | if (d->msi_desc) | ||
79 | unmask_msi_irq(d); | ||
80 | #endif | ||
81 | /* unmask it */ | ||
82 | ics_rtas_unmask_irq(d); | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static void ics_rtas_mask_real_irq(unsigned int hw_irq) | ||
87 | { | ||
88 | int call_status; | ||
89 | |||
90 | if (hw_irq == XICS_IPI) | ||
91 | return; | ||
92 | |||
93 | call_status = rtas_call(ibm_int_off, 1, 1, NULL, hw_irq); | ||
94 | if (call_status != 0) { | ||
95 | printk(KERN_ERR "%s: ibm_int_off irq=%u returned %d\n", | ||
96 | __func__, hw_irq, call_status); | ||
97 | return; | ||
98 | } | ||
99 | |||
100 | /* Have to set XIVE to 0xff to be able to remove a slot */ | ||
101 | call_status = rtas_call(ibm_set_xive, 3, 1, NULL, hw_irq, | ||
102 | xics_default_server, 0xff); | ||
103 | if (call_status != 0) { | ||
104 | printk(KERN_ERR "%s: ibm_set_xive(0xff) irq=%u returned %d\n", | ||
105 | __func__, hw_irq, call_status); | ||
106 | return; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static void ics_rtas_mask_irq(struct irq_data *d) | ||
111 | { | ||
112 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
113 | |||
114 | pr_devel("xics: mask virq %d [hw 0x%x]\n", d->irq, hw_irq); | ||
115 | |||
116 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
117 | return; | ||
118 | ics_rtas_mask_real_irq(hw_irq); | ||
119 | } | ||
120 | |||
121 | static int ics_rtas_set_affinity(struct irq_data *d, | ||
122 | const struct cpumask *cpumask, | ||
123 | bool force) | ||
124 | { | ||
125 | unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); | ||
126 | int status; | ||
127 | int xics_status[2]; | ||
128 | int irq_server; | ||
129 | |||
130 | if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS) | ||
131 | return -1; | ||
132 | |||
133 | status = rtas_call(ibm_get_xive, 1, 3, xics_status, hw_irq); | ||
134 | |||
135 | if (status) { | ||
136 | printk(KERN_ERR "%s: ibm,get-xive irq=%u returns %d\n", | ||
137 | __func__, hw_irq, status); | ||
138 | return -1; | ||
139 | } | ||
140 | |||
141 | irq_server = xics_get_irq_server(d->irq, cpumask, 1); | ||
142 | if (irq_server == -1) { | ||
143 | char cpulist[128]; | ||
144 | cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask); | ||
145 | printk(KERN_WARNING | ||
146 | "%s: No online cpus in the mask %s for irq %d\n", | ||
147 | __func__, cpulist, d->irq); | ||
148 | return -1; | ||
149 | } | ||
150 | |||
151 | status = rtas_call(ibm_set_xive, 3, 1, NULL, | ||
152 | hw_irq, irq_server, xics_status[1]); | ||
153 | |||
154 | if (status) { | ||
155 | printk(KERN_ERR "%s: ibm,set-xive irq=%u returns %d\n", | ||
156 | __func__, hw_irq, status); | ||
157 | return -1; | ||
158 | } | ||
159 | |||
160 | return IRQ_SET_MASK_OK; | ||
161 | } | ||
162 | |||
163 | static struct irq_chip ics_rtas_irq_chip = { | ||
164 | .name = "XICS", | ||
165 | .irq_startup = ics_rtas_startup, | ||
166 | .irq_mask = ics_rtas_mask_irq, | ||
167 | .irq_unmask = ics_rtas_unmask_irq, | ||
168 | .irq_eoi = NULL, /* Patched at init time */ | ||
169 | .irq_set_affinity = ics_rtas_set_affinity | ||
170 | }; | ||
171 | |||
172 | static int ics_rtas_map(struct ics *ics, unsigned int virq) | ||
173 | { | ||
174 | unsigned int hw_irq = (unsigned int)virq_to_hw(virq); | ||
175 | int status[2]; | ||
176 | int rc; | ||
177 | |||
178 | if (WARN_ON(hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)) | ||
179 | return -EINVAL; | ||
180 | |||
181 | /* Check if RTAS knows about this interrupt */ | ||
182 | rc = rtas_call(ibm_get_xive, 1, 3, status, hw_irq); | ||
183 | if (rc) | ||
184 | return -ENXIO; | ||
185 | |||
186 | irq_set_chip_and_handler(virq, &ics_rtas_irq_chip, handle_fasteoi_irq); | ||
187 | irq_set_chip_data(virq, &ics_rtas); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static void ics_rtas_mask_unknown(struct ics *ics, unsigned long vec) | ||
193 | { | ||
194 | ics_rtas_mask_real_irq(vec); | ||
195 | } | ||
196 | |||
197 | static long ics_rtas_get_server(struct ics *ics, unsigned long vec) | ||
198 | { | ||
199 | int rc, status[2]; | ||
200 | |||
201 | rc = rtas_call(ibm_get_xive, 1, 3, status, vec); | ||
202 | if (rc) | ||
203 | return -1; | ||
204 | return status[0]; | ||
205 | } | ||
206 | |||
207 | static int ics_rtas_host_match(struct ics *ics, struct device_node *node) | ||
208 | { | ||
209 | /* IBM machines have interrupt parents of various funky types for things | ||
210 | * like vdevices, events, etc... The trick we use here is to match | ||
211 | * everything here except the legacy 8259 which is compatible "chrp,iic" | ||
212 | */ | ||
213 | return !of_device_is_compatible(node, "chrp,iic"); | ||
214 | } | ||
215 | |||
216 | int ics_rtas_init(void) | ||
217 | { | ||
218 | ibm_get_xive = rtas_token("ibm,get-xive"); | ||
219 | ibm_set_xive = rtas_token("ibm,set-xive"); | ||
220 | ibm_int_on = rtas_token("ibm,int-on"); | ||
221 | ibm_int_off = rtas_token("ibm,int-off"); | ||
222 | |||
223 | /* We enable the RTAS "ICS" if RTAS is present with the | ||
224 | * appropriate tokens | ||
225 | */ | ||
226 | if (ibm_get_xive == RTAS_UNKNOWN_SERVICE || | ||
227 | ibm_set_xive == RTAS_UNKNOWN_SERVICE) | ||
228 | return -ENODEV; | ||
229 | |||
230 | /* We need to patch our irq chip's EOI to point to the | ||
231 | * right ICP | ||
232 | */ | ||
233 | ics_rtas_irq_chip.irq_eoi = icp_ops->eoi; | ||
234 | |||
235 | /* Register ourselves */ | ||
236 | xics_register_ics(&ics_rtas); | ||
237 | |||
238 | return 0; | ||
239 | } | ||
240 | |||
diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c new file mode 100644 index 000000000000..445c5a01b766 --- /dev/null +++ b/arch/powerpc/sysdev/xics/xics-common.c | |||
@@ -0,0 +1,443 @@ | |||
1 | /* | ||
2 | * Copyright 2011 IBM Corporation. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | */ | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/threads.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irq.h> | ||
14 | #include <linux/debugfs.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/seq_file.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/of.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | |||
24 | #include <asm/prom.h> | ||
25 | #include <asm/io.h> | ||
26 | #include <asm/smp.h> | ||
27 | #include <asm/machdep.h> | ||
28 | #include <asm/irq.h> | ||
29 | #include <asm/errno.h> | ||
30 | #include <asm/rtas.h> | ||
31 | #include <asm/xics.h> | ||
32 | #include <asm/firmware.h> | ||
33 | |||
34 | /* Globals common to all ICP/ICS implementations */ | ||
35 | const struct icp_ops *icp_ops; | ||
36 | |||
37 | unsigned int xics_default_server = 0xff; | ||
38 | unsigned int xics_default_distrib_server = 0; | ||
39 | unsigned int xics_interrupt_server_size = 8; | ||
40 | |||
41 | DEFINE_PER_CPU(struct xics_cppr, xics_cppr); | ||
42 | |||
43 | struct irq_host *xics_host; | ||
44 | |||
45 | static LIST_HEAD(ics_list); | ||
46 | |||
47 | void xics_update_irq_servers(void) | ||
48 | { | ||
49 | int i, j; | ||
50 | struct device_node *np; | ||
51 | u32 ilen; | ||
52 | const u32 *ireg; | ||
53 | u32 hcpuid; | ||
54 | |||
55 | /* Find the server numbers for the boot cpu. */ | ||
56 | np = of_get_cpu_node(boot_cpuid, NULL); | ||
57 | BUG_ON(!np); | ||
58 | |||
59 | hcpuid = get_hard_smp_processor_id(boot_cpuid); | ||
60 | xics_default_server = xics_default_distrib_server = hcpuid; | ||
61 | |||
62 | pr_devel("xics: xics_default_server = 0x%x\n", xics_default_server); | ||
63 | |||
64 | ireg = of_get_property(np, "ibm,ppc-interrupt-gserver#s", &ilen); | ||
65 | if (!ireg) { | ||
66 | of_node_put(np); | ||
67 | return; | ||
68 | } | ||
69 | |||
70 | i = ilen / sizeof(int); | ||
71 | |||
72 | /* Global interrupt distribution server is specified in the last | ||
73 | * entry of "ibm,ppc-interrupt-gserver#s" property. Get the last | ||
74 | * entry fom this property for current boot cpu id and use it as | ||
75 | * default distribution server | ||
76 | */ | ||
77 | for (j = 0; j < i; j += 2) { | ||
78 | if (ireg[j] == hcpuid) { | ||
79 | xics_default_distrib_server = ireg[j+1]; | ||
80 | break; | ||
81 | } | ||
82 | } | ||
83 | pr_devel("xics: xics_default_distrib_server = 0x%x\n", | ||
84 | xics_default_distrib_server); | ||
85 | of_node_put(np); | ||
86 | } | ||
87 | |||
88 | /* GIQ stuff, currently only supported on RTAS setups, will have | ||
89 | * to be sorted properly for bare metal | ||
90 | */ | ||
91 | void xics_set_cpu_giq(unsigned int gserver, unsigned int join) | ||
92 | { | ||
93 | #ifdef CONFIG_PPC_RTAS | ||
94 | int index; | ||
95 | int status; | ||
96 | |||
97 | if (!rtas_indicator_present(GLOBAL_INTERRUPT_QUEUE, NULL)) | ||
98 | return; | ||
99 | |||
100 | index = (1UL << xics_interrupt_server_size) - 1 - gserver; | ||
101 | |||
102 | status = rtas_set_indicator_fast(GLOBAL_INTERRUPT_QUEUE, index, join); | ||
103 | |||
104 | WARN(status < 0, "set-indicator(%d, %d, %u) returned %d\n", | ||
105 | GLOBAL_INTERRUPT_QUEUE, index, join, status); | ||
106 | #endif | ||
107 | } | ||
108 | |||
109 | void xics_setup_cpu(void) | ||
110 | { | ||
111 | icp_ops->set_priority(LOWEST_PRIORITY); | ||
112 | |||
113 | xics_set_cpu_giq(xics_default_distrib_server, 1); | ||
114 | } | ||
115 | |||
116 | void xics_mask_unknown_vec(unsigned int vec) | ||
117 | { | ||
118 | struct ics *ics; | ||
119 | |||
120 | pr_err("Interrupt 0x%x (real) is invalid, disabling it.\n", vec); | ||
121 | |||
122 | list_for_each_entry(ics, &ics_list, link) | ||
123 | ics->mask_unknown(ics, vec); | ||
124 | } | ||
125 | |||
126 | |||
127 | #ifdef CONFIG_SMP | ||
128 | |||
129 | static void xics_request_ipi(void) | ||
130 | { | ||
131 | unsigned int ipi; | ||
132 | |||
133 | ipi = irq_create_mapping(xics_host, XICS_IPI); | ||
134 | BUG_ON(ipi == NO_IRQ); | ||
135 | |||
136 | /* | ||
137 | * IPIs are marked IRQF_DISABLED as they must run with irqs | ||
138 | * disabled, and PERCPU. The handler was set in map. | ||
139 | */ | ||
140 | BUG_ON(request_irq(ipi, icp_ops->ipi_action, | ||
141 | IRQF_DISABLED|IRQF_PERCPU, "IPI", NULL)); | ||
142 | } | ||
143 | |||
144 | int __init xics_smp_probe(void) | ||
145 | { | ||
146 | /* Setup cause_ipi callback based on which ICP is used */ | ||
147 | smp_ops->cause_ipi = icp_ops->cause_ipi; | ||
148 | |||
149 | /* Register all the IPIs */ | ||
150 | xics_request_ipi(); | ||
151 | |||
152 | return cpumask_weight(cpu_possible_mask); | ||
153 | } | ||
154 | |||
155 | #endif /* CONFIG_SMP */ | ||
156 | |||
157 | void xics_teardown_cpu(void) | ||
158 | { | ||
159 | struct xics_cppr *os_cppr = &__get_cpu_var(xics_cppr); | ||
160 | |||
161 | /* | ||
162 | * we have to reset the cppr index to 0 because we're | ||
163 | * not going to return from the IPI | ||
164 | */ | ||
165 | os_cppr->index = 0; | ||
166 | icp_ops->set_priority(0); | ||
167 | icp_ops->teardown_cpu(); | ||
168 | } | ||
169 | |||
170 | void xics_kexec_teardown_cpu(int secondary) | ||
171 | { | ||
172 | xics_teardown_cpu(); | ||
173 | |||
174 | icp_ops->flush_ipi(); | ||
175 | |||
176 | /* | ||
177 | * Some machines need to have at least one cpu in the GIQ, | ||
178 | * so leave the master cpu in the group. | ||
179 | */ | ||
180 | if (secondary) | ||
181 | xics_set_cpu_giq(xics_default_distrib_server, 0); | ||
182 | } | ||
183 | |||
184 | |||
185 | #ifdef CONFIG_HOTPLUG_CPU | ||
186 | |||
187 | /* Interrupts are disabled. */ | ||
188 | void xics_migrate_irqs_away(void) | ||
189 | { | ||
190 | int cpu = smp_processor_id(), hw_cpu = hard_smp_processor_id(); | ||
191 | unsigned int irq, virq; | ||
192 | |||
193 | /* If we used to be the default server, move to the new "boot_cpuid" */ | ||
194 | if (hw_cpu == xics_default_server) | ||
195 | xics_update_irq_servers(); | ||
196 | |||
197 | /* Reject any interrupt that was queued to us... */ | ||
198 | icp_ops->set_priority(0); | ||
199 | |||
200 | /* Remove ourselves from the global interrupt queue */ | ||
201 | xics_set_cpu_giq(xics_default_distrib_server, 0); | ||
202 | |||
203 | /* Allow IPIs again... */ | ||
204 | icp_ops->set_priority(DEFAULT_PRIORITY); | ||
205 | |||
206 | for_each_irq(virq) { | ||
207 | struct irq_desc *desc; | ||
208 | struct irq_chip *chip; | ||
209 | long server; | ||
210 | unsigned long flags; | ||
211 | struct ics *ics; | ||
212 | |||
213 | /* We can't set affinity on ISA interrupts */ | ||
214 | if (virq < NUM_ISA_INTERRUPTS) | ||
215 | continue; | ||
216 | if (!virq_is_host(virq, xics_host)) | ||
217 | continue; | ||
218 | irq = (unsigned int)virq_to_hw(virq); | ||
219 | /* We need to get IPIs still. */ | ||
220 | if (irq == XICS_IPI || irq == XICS_IRQ_SPURIOUS) | ||
221 | continue; | ||
222 | desc = irq_to_desc(virq); | ||
223 | /* We only need to migrate enabled IRQS */ | ||
224 | if (!desc || !desc->action) | ||
225 | continue; | ||
226 | chip = irq_desc_get_chip(desc); | ||
227 | if (!chip || !chip->irq_set_affinity) | ||
228 | continue; | ||
229 | |||
230 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
231 | |||
232 | /* Locate interrupt server */ | ||
233 | server = -1; | ||
234 | ics = irq_get_chip_data(virq); | ||
235 | if (ics) | ||
236 | server = ics->get_server(ics, irq); | ||
237 | if (server < 0) { | ||
238 | printk(KERN_ERR "%s: Can't find server for irq %d\n", | ||
239 | __func__, irq); | ||
240 | goto unlock; | ||
241 | } | ||
242 | |||
243 | /* We only support delivery to all cpus or to one cpu. | ||
244 | * The irq has to be migrated only in the single cpu | ||
245 | * case. | ||
246 | */ | ||
247 | if (server != hw_cpu) | ||
248 | goto unlock; | ||
249 | |||
250 | /* This is expected during cpu offline. */ | ||
251 | if (cpu_online(cpu)) | ||
252 | pr_warning("IRQ %u affinity broken off cpu %u\n", | ||
253 | virq, cpu); | ||
254 | |||
255 | /* Reset affinity to all cpus */ | ||
256 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
257 | irq_set_affinity(virq, cpu_all_mask); | ||
258 | continue; | ||
259 | unlock: | ||
260 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
261 | } | ||
262 | } | ||
263 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
264 | |||
265 | #ifdef CONFIG_SMP | ||
266 | /* | ||
267 | * For the moment we only implement delivery to all cpus or one cpu. | ||
268 | * | ||
269 | * If the requested affinity is cpu_all_mask, we set global affinity. | ||
270 | * If not we set it to the first cpu in the mask, even if multiple cpus | ||
271 | * are set. This is so things like irqbalance (which set core and package | ||
272 | * wide affinities) do the right thing. | ||
273 | * | ||
274 | * We need to fix this to implement support for the links | ||
275 | */ | ||
276 | int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, | ||
277 | unsigned int strict_check) | ||
278 | { | ||
279 | |||
280 | if (!distribute_irqs) | ||
281 | return xics_default_server; | ||
282 | |||
283 | if (!cpumask_subset(cpu_possible_mask, cpumask)) { | ||
284 | int server = cpumask_first_and(cpu_online_mask, cpumask); | ||
285 | |||
286 | if (server < nr_cpu_ids) | ||
287 | return get_hard_smp_processor_id(server); | ||
288 | |||
289 | if (strict_check) | ||
290 | return -1; | ||
291 | } | ||
292 | |||
293 | /* | ||
294 | * Workaround issue with some versions of JS20 firmware that | ||
295 | * deliver interrupts to cpus which haven't been started. This | ||
296 | * happens when using the maxcpus= boot option. | ||
297 | */ | ||
298 | if (cpumask_equal(cpu_online_mask, cpu_present_mask)) | ||
299 | return xics_default_distrib_server; | ||
300 | |||
301 | return xics_default_server; | ||
302 | } | ||
303 | #endif /* CONFIG_SMP */ | ||
304 | |||
305 | static int xics_host_match(struct irq_host *h, struct device_node *node) | ||
306 | { | ||
307 | struct ics *ics; | ||
308 | |||
309 | list_for_each_entry(ics, &ics_list, link) | ||
310 | if (ics->host_match(ics, node)) | ||
311 | return 1; | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | |||
316 | /* Dummies */ | ||
317 | static void xics_ipi_unmask(struct irq_data *d) { } | ||
318 | static void xics_ipi_mask(struct irq_data *d) { } | ||
319 | |||
320 | static struct irq_chip xics_ipi_chip = { | ||
321 | .name = "XICS", | ||
322 | .irq_eoi = NULL, /* Patched at init time */ | ||
323 | .irq_mask = xics_ipi_mask, | ||
324 | .irq_unmask = xics_ipi_unmask, | ||
325 | }; | ||
326 | |||
327 | static int xics_host_map(struct irq_host *h, unsigned int virq, | ||
328 | irq_hw_number_t hw) | ||
329 | { | ||
330 | struct ics *ics; | ||
331 | |||
332 | pr_devel("xics: map virq %d, hwirq 0x%lx\n", virq, hw); | ||
333 | |||
334 | /* Insert the interrupt mapping into the radix tree for fast lookup */ | ||
335 | irq_radix_revmap_insert(xics_host, virq, hw); | ||
336 | |||
337 | /* They aren't all level sensitive but we just don't really know */ | ||
338 | irq_set_status_flags(virq, IRQ_LEVEL); | ||
339 | |||
340 | /* Don't call into ICS for IPIs */ | ||
341 | if (hw == XICS_IPI) { | ||
342 | irq_set_chip_and_handler(virq, &xics_ipi_chip, | ||
343 | handle_percpu_irq); | ||
344 | return 0; | ||
345 | } | ||
346 | |||
347 | /* Let the ICS setup the chip data */ | ||
348 | list_for_each_entry(ics, &ics_list, link) | ||
349 | if (ics->map(ics, virq) == 0) | ||
350 | return 0; | ||
351 | |||
352 | return -EINVAL; | ||
353 | } | ||
354 | |||
355 | static int xics_host_xlate(struct irq_host *h, struct device_node *ct, | ||
356 | const u32 *intspec, unsigned int intsize, | ||
357 | irq_hw_number_t *out_hwirq, unsigned int *out_flags) | ||
358 | |||
359 | { | ||
360 | /* Current xics implementation translates everything | ||
361 | * to level. It is not technically right for MSIs but this | ||
362 | * is irrelevant at this point. We might get smarter in the future | ||
363 | */ | ||
364 | *out_hwirq = intspec[0]; | ||
365 | *out_flags = IRQ_TYPE_LEVEL_LOW; | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static struct irq_host_ops xics_host_ops = { | ||
371 | .match = xics_host_match, | ||
372 | .map = xics_host_map, | ||
373 | .xlate = xics_host_xlate, | ||
374 | }; | ||
375 | |||
376 | static void __init xics_init_host(void) | ||
377 | { | ||
378 | xics_host = irq_alloc_host(NULL, IRQ_HOST_MAP_TREE, 0, &xics_host_ops, | ||
379 | XICS_IRQ_SPURIOUS); | ||
380 | BUG_ON(xics_host == NULL); | ||
381 | irq_set_default_host(xics_host); | ||
382 | } | ||
383 | |||
384 | void __init xics_register_ics(struct ics *ics) | ||
385 | { | ||
386 | list_add(&ics->link, &ics_list); | ||
387 | } | ||
388 | |||
389 | static void __init xics_get_server_size(void) | ||
390 | { | ||
391 | struct device_node *np; | ||
392 | const u32 *isize; | ||
393 | |||
394 | /* We fetch the interrupt server size from the first ICS node | ||
395 | * we find if any | ||
396 | */ | ||
397 | np = of_find_compatible_node(NULL, NULL, "ibm,ppc-xics"); | ||
398 | if (!np) | ||
399 | return; | ||
400 | isize = of_get_property(np, "ibm,interrupt-server#-size", NULL); | ||
401 | if (!isize) | ||
402 | return; | ||
403 | xics_interrupt_server_size = *isize; | ||
404 | of_node_put(np); | ||
405 | } | ||
406 | |||
407 | void __init xics_init(void) | ||
408 | { | ||
409 | int rc = -1; | ||
410 | |||
411 | /* Fist locate ICP */ | ||
412 | #ifdef CONFIG_PPC_ICP_HV | ||
413 | if (firmware_has_feature(FW_FEATURE_LPAR)) | ||
414 | rc = icp_hv_init(); | ||
415 | #endif | ||
416 | #ifdef CONFIG_PPC_ICP_NATIVE | ||
417 | if (rc < 0) | ||
418 | rc = icp_native_init(); | ||
419 | #endif | ||
420 | if (rc < 0) { | ||
421 | pr_warning("XICS: Cannot find a Presentation Controller !\n"); | ||
422 | return; | ||
423 | } | ||
424 | |||
425 | /* Copy get_irq callback over to ppc_md */ | ||
426 | ppc_md.get_irq = icp_ops->get_irq; | ||
427 | |||
428 | /* Patch up IPI chip EOI */ | ||
429 | xics_ipi_chip.irq_eoi = icp_ops->eoi; | ||
430 | |||
431 | /* Now locate ICS */ | ||
432 | #ifdef CONFIG_PPC_ICS_RTAS | ||
433 | rc = ics_rtas_init(); | ||
434 | #endif | ||
435 | if (rc < 0) | ||
436 | pr_warning("XICS: Cannot find a Source Controller !\n"); | ||
437 | |||
438 | /* Initialize common bits */ | ||
439 | xics_get_server_size(); | ||
440 | xics_update_irq_servers(); | ||
441 | xics_init_host(); | ||
442 | xics_setup_cpu(); | ||
443 | } | ||
diff --git a/arch/powerpc/sysdev/xilinx_intc.c b/arch/powerpc/sysdev/xilinx_intc.c index 1e0ccfaf403e..6183799754af 100644 --- a/arch/powerpc/sysdev/xilinx_intc.c +++ b/arch/powerpc/sysdev/xilinx_intc.c | |||
@@ -69,32 +69,26 @@ static unsigned char xilinx_intc_map_senses[] = { | |||
69 | * | 69 | * |
70 | * IRQ Chip common (across level and edge) operations | 70 | * IRQ Chip common (across level and edge) operations |
71 | */ | 71 | */ |
72 | static void xilinx_intc_mask(unsigned int virq) | 72 | static void xilinx_intc_mask(struct irq_data *d) |
73 | { | 73 | { |
74 | int irq = virq_to_hw(virq); | 74 | int irq = irqd_to_hwirq(d); |
75 | void * regs = get_irq_chip_data(virq); | 75 | void * regs = irq_data_get_irq_chip_data(d); |
76 | pr_debug("mask: %d\n", irq); | 76 | pr_debug("mask: %d\n", irq); |
77 | out_be32(regs + XINTC_CIE, 1 << irq); | 77 | out_be32(regs + XINTC_CIE, 1 << irq); |
78 | } | 78 | } |
79 | 79 | ||
80 | static int xilinx_intc_set_type(unsigned int virq, unsigned int flow_type) | 80 | static int xilinx_intc_set_type(struct irq_data *d, unsigned int flow_type) |
81 | { | 81 | { |
82 | struct irq_desc *desc = irq_to_desc(virq); | ||
83 | |||
84 | desc->status &= ~(IRQ_TYPE_SENSE_MASK | IRQ_LEVEL); | ||
85 | desc->status |= flow_type & IRQ_TYPE_SENSE_MASK; | ||
86 | if (flow_type & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) | ||
87 | desc->status |= IRQ_LEVEL; | ||
88 | return 0; | 82 | return 0; |
89 | } | 83 | } |
90 | 84 | ||
91 | /* | 85 | /* |
92 | * IRQ Chip level operations | 86 | * IRQ Chip level operations |
93 | */ | 87 | */ |
94 | static void xilinx_intc_level_unmask(unsigned int virq) | 88 | static void xilinx_intc_level_unmask(struct irq_data *d) |
95 | { | 89 | { |
96 | int irq = virq_to_hw(virq); | 90 | int irq = irqd_to_hwirq(d); |
97 | void * regs = get_irq_chip_data(virq); | 91 | void * regs = irq_data_get_irq_chip_data(d); |
98 | pr_debug("unmask: %d\n", irq); | 92 | pr_debug("unmask: %d\n", irq); |
99 | out_be32(regs + XINTC_SIE, 1 << irq); | 93 | out_be32(regs + XINTC_SIE, 1 << irq); |
100 | 94 | ||
@@ -107,37 +101,37 @@ static void xilinx_intc_level_unmask(unsigned int virq) | |||
107 | 101 | ||
108 | static struct irq_chip xilinx_intc_level_irqchip = { | 102 | static struct irq_chip xilinx_intc_level_irqchip = { |
109 | .name = "Xilinx Level INTC", | 103 | .name = "Xilinx Level INTC", |
110 | .mask = xilinx_intc_mask, | 104 | .irq_mask = xilinx_intc_mask, |
111 | .mask_ack = xilinx_intc_mask, | 105 | .irq_mask_ack = xilinx_intc_mask, |
112 | .unmask = xilinx_intc_level_unmask, | 106 | .irq_unmask = xilinx_intc_level_unmask, |
113 | .set_type = xilinx_intc_set_type, | 107 | .irq_set_type = xilinx_intc_set_type, |
114 | }; | 108 | }; |
115 | 109 | ||
116 | /* | 110 | /* |
117 | * IRQ Chip edge operations | 111 | * IRQ Chip edge operations |
118 | */ | 112 | */ |
119 | static void xilinx_intc_edge_unmask(unsigned int virq) | 113 | static void xilinx_intc_edge_unmask(struct irq_data *d) |
120 | { | 114 | { |
121 | int irq = virq_to_hw(virq); | 115 | int irq = irqd_to_hwirq(d); |
122 | void *regs = get_irq_chip_data(virq); | 116 | void *regs = irq_data_get_irq_chip_data(d); |
123 | pr_debug("unmask: %d\n", irq); | 117 | pr_debug("unmask: %d\n", irq); |
124 | out_be32(regs + XINTC_SIE, 1 << irq); | 118 | out_be32(regs + XINTC_SIE, 1 << irq); |
125 | } | 119 | } |
126 | 120 | ||
127 | static void xilinx_intc_edge_ack(unsigned int virq) | 121 | static void xilinx_intc_edge_ack(struct irq_data *d) |
128 | { | 122 | { |
129 | int irq = virq_to_hw(virq); | 123 | int irq = irqd_to_hwirq(d); |
130 | void * regs = get_irq_chip_data(virq); | 124 | void * regs = irq_data_get_irq_chip_data(d); |
131 | pr_debug("ack: %d\n", irq); | 125 | pr_debug("ack: %d\n", irq); |
132 | out_be32(regs + XINTC_IAR, 1 << irq); | 126 | out_be32(regs + XINTC_IAR, 1 << irq); |
133 | } | 127 | } |
134 | 128 | ||
135 | static struct irq_chip xilinx_intc_edge_irqchip = { | 129 | static struct irq_chip xilinx_intc_edge_irqchip = { |
136 | .name = "Xilinx Edge INTC", | 130 | .name = "Xilinx Edge INTC", |
137 | .mask = xilinx_intc_mask, | 131 | .irq_mask = xilinx_intc_mask, |
138 | .unmask = xilinx_intc_edge_unmask, | 132 | .irq_unmask = xilinx_intc_edge_unmask, |
139 | .ack = xilinx_intc_edge_ack, | 133 | .irq_ack = xilinx_intc_edge_ack, |
140 | .set_type = xilinx_intc_set_type, | 134 | .irq_set_type = xilinx_intc_set_type, |
141 | }; | 135 | }; |
142 | 136 | ||
143 | /* | 137 | /* |
@@ -170,15 +164,15 @@ static int xilinx_intc_xlate(struct irq_host *h, struct device_node *ct, | |||
170 | static int xilinx_intc_map(struct irq_host *h, unsigned int virq, | 164 | static int xilinx_intc_map(struct irq_host *h, unsigned int virq, |
171 | irq_hw_number_t irq) | 165 | irq_hw_number_t irq) |
172 | { | 166 | { |
173 | set_irq_chip_data(virq, h->host_data); | 167 | irq_set_chip_data(virq, h->host_data); |
174 | 168 | ||
175 | if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || | 169 | if (xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_HIGH || |
176 | xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { | 170 | xilinx_intc_typetable[irq] == IRQ_TYPE_LEVEL_LOW) { |
177 | set_irq_chip_and_handler(virq, &xilinx_intc_level_irqchip, | 171 | irq_set_chip_and_handler(virq, &xilinx_intc_level_irqchip, |
178 | handle_level_irq); | 172 | handle_level_irq); |
179 | } else { | 173 | } else { |
180 | set_irq_chip_and_handler(virq, &xilinx_intc_edge_irqchip, | 174 | irq_set_chip_and_handler(virq, &xilinx_intc_edge_irqchip, |
181 | handle_edge_irq); | 175 | handle_edge_irq); |
182 | } | 176 | } |
183 | return 0; | 177 | return 0; |
184 | } | 178 | } |
@@ -229,12 +223,14 @@ int xilinx_intc_get_irq(void) | |||
229 | */ | 223 | */ |
230 | static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) | 224 | static void xilinx_i8259_cascade(unsigned int irq, struct irq_desc *desc) |
231 | { | 225 | { |
226 | struct irq_chip *chip = irq_desc_get_chip(desc); | ||
232 | unsigned int cascade_irq = i8259_irq(); | 227 | unsigned int cascade_irq = i8259_irq(); |
228 | |||
233 | if (cascade_irq) | 229 | if (cascade_irq) |
234 | generic_handle_irq(cascade_irq); | 230 | generic_handle_irq(cascade_irq); |
235 | 231 | ||
236 | /* Let xilinx_intc end the interrupt */ | 232 | /* Let xilinx_intc end the interrupt */ |
237 | desc->chip->unmask(irq); | 233 | chip->irq_unmask(&desc->irq_data); |
238 | } | 234 | } |
239 | 235 | ||
240 | static void __init xilinx_i8259_setup_cascade(void) | 236 | static void __init xilinx_i8259_setup_cascade(void) |
@@ -254,7 +250,7 @@ static void __init xilinx_i8259_setup_cascade(void) | |||
254 | } | 250 | } |
255 | 251 | ||
256 | i8259_init(cascade_node, 0); | 252 | i8259_init(cascade_node, 0); |
257 | set_irq_chained_handler(cascade_irq, xilinx_i8259_cascade); | 253 | irq_set_chained_handler(cascade_irq, xilinx_i8259_cascade); |
258 | 254 | ||
259 | /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ | 255 | /* Program irq 7 (usb/audio), 14/15 (ide) to level sensitive */ |
260 | /* This looks like a dirty hack to me --gcl */ | 256 | /* This looks like a dirty hack to me --gcl */ |
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index faa81b6a6612..c168c54e3c40 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile | |||
@@ -4,9 +4,7 @@ subdir-ccflags-$(CONFIG_PPC_WERROR) := -Werror | |||
4 | 4 | ||
5 | GCOV_PROFILE := n | 5 | GCOV_PROFILE := n |
6 | 6 | ||
7 | ifdef CONFIG_PPC64 | 7 | ccflags-$(CONFIG_PPC64) := -mno-minimal-toc |
8 | EXTRA_CFLAGS += -mno-minimal-toc | ||
9 | endif | ||
10 | 8 | ||
11 | obj-y += xmon.o start.o nonstdio.o | 9 | obj-y += xmon.o start.o nonstdio.o |
12 | 10 | ||
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index d17d04cfb2cd..42541bbcc7fa 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -334,7 +334,7 @@ static void release_output_lock(void) | |||
334 | 334 | ||
335 | int cpus_are_in_xmon(void) | 335 | int cpus_are_in_xmon(void) |
336 | { | 336 | { |
337 | return !cpus_empty(cpus_in_xmon); | 337 | return !cpumask_empty(&cpus_in_xmon); |
338 | } | 338 | } |
339 | #endif | 339 | #endif |
340 | 340 | ||
@@ -373,7 +373,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
373 | 373 | ||
374 | #ifdef CONFIG_SMP | 374 | #ifdef CONFIG_SMP |
375 | cpu = smp_processor_id(); | 375 | cpu = smp_processor_id(); |
376 | if (cpu_isset(cpu, cpus_in_xmon)) { | 376 | if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { |
377 | get_output_lock(); | 377 | get_output_lock(); |
378 | excprint(regs); | 378 | excprint(regs); |
379 | printf("cpu 0x%x: Exception %lx %s in xmon, " | 379 | printf("cpu 0x%x: Exception %lx %s in xmon, " |
@@ -396,10 +396,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
396 | } | 396 | } |
397 | 397 | ||
398 | xmon_fault_jmp[cpu] = recurse_jmp; | 398 | xmon_fault_jmp[cpu] = recurse_jmp; |
399 | cpu_set(cpu, cpus_in_xmon); | 399 | cpumask_set_cpu(cpu, &cpus_in_xmon); |
400 | 400 | ||
401 | bp = NULL; | 401 | bp = NULL; |
402 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) | 402 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) |
403 | bp = at_breakpoint(regs->nip); | 403 | bp = at_breakpoint(regs->nip); |
404 | if (bp || unrecoverable_excp(regs)) | 404 | if (bp || unrecoverable_excp(regs)) |
405 | fromipi = 0; | 405 | fromipi = 0; |
@@ -437,10 +437,10 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
437 | xmon_owner = cpu; | 437 | xmon_owner = cpu; |
438 | mb(); | 438 | mb(); |
439 | if (ncpus > 1) { | 439 | if (ncpus > 1) { |
440 | smp_send_debugger_break(MSG_ALL_BUT_SELF); | 440 | smp_send_debugger_break(); |
441 | /* wait for other cpus to come in */ | 441 | /* wait for other cpus to come in */ |
442 | for (timeout = 100000000; timeout != 0; --timeout) { | 442 | for (timeout = 100000000; timeout != 0; --timeout) { |
443 | if (cpus_weight(cpus_in_xmon) >= ncpus) | 443 | if (cpumask_weight(&cpus_in_xmon) >= ncpus) |
444 | break; | 444 | break; |
445 | barrier(); | 445 | barrier(); |
446 | } | 446 | } |
@@ -484,7 +484,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
484 | } | 484 | } |
485 | } | 485 | } |
486 | leave: | 486 | leave: |
487 | cpu_clear(cpu, cpus_in_xmon); | 487 | cpumask_clear_cpu(cpu, &cpus_in_xmon); |
488 | xmon_fault_jmp[cpu] = NULL; | 488 | xmon_fault_jmp[cpu] = NULL; |
489 | #else | 489 | #else |
490 | /* UP is simple... */ | 490 | /* UP is simple... */ |
@@ -529,7 +529,7 @@ static int xmon_core(struct pt_regs *regs, int fromipi) | |||
529 | } | 529 | } |
530 | } | 530 | } |
531 | #else | 531 | #else |
532 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) { | 532 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { |
533 | bp = at_breakpoint(regs->nip); | 533 | bp = at_breakpoint(regs->nip); |
534 | if (bp != NULL) { | 534 | if (bp != NULL) { |
535 | int stepped = emulate_step(regs, bp->instr[0]); | 535 | int stepped = emulate_step(regs, bp->instr[0]); |
@@ -578,7 +578,7 @@ static int xmon_bpt(struct pt_regs *regs) | |||
578 | struct bpt *bp; | 578 | struct bpt *bp; |
579 | unsigned long offset; | 579 | unsigned long offset; |
580 | 580 | ||
581 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) | 581 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) |
582 | return 0; | 582 | return 0; |
583 | 583 | ||
584 | /* Are we at the trap at bp->instr[1] for some bp? */ | 584 | /* Are we at the trap at bp->instr[1] for some bp? */ |
@@ -609,7 +609,7 @@ static int xmon_sstep(struct pt_regs *regs) | |||
609 | 609 | ||
610 | static int xmon_dabr_match(struct pt_regs *regs) | 610 | static int xmon_dabr_match(struct pt_regs *regs) |
611 | { | 611 | { |
612 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) | 612 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) |
613 | return 0; | 613 | return 0; |
614 | if (dabr.enabled == 0) | 614 | if (dabr.enabled == 0) |
615 | return 0; | 615 | return 0; |
@@ -619,7 +619,7 @@ static int xmon_dabr_match(struct pt_regs *regs) | |||
619 | 619 | ||
620 | static int xmon_iabr_match(struct pt_regs *regs) | 620 | static int xmon_iabr_match(struct pt_regs *regs) |
621 | { | 621 | { |
622 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) != (MSR_IR|MSR_SF)) | 622 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) != (MSR_IR|MSR_64BIT)) |
623 | return 0; | 623 | return 0; |
624 | if (iabr == NULL) | 624 | if (iabr == NULL) |
625 | return 0; | 625 | return 0; |
@@ -630,7 +630,7 @@ static int xmon_iabr_match(struct pt_regs *regs) | |||
630 | static int xmon_ipi(struct pt_regs *regs) | 630 | static int xmon_ipi(struct pt_regs *regs) |
631 | { | 631 | { |
632 | #ifdef CONFIG_SMP | 632 | #ifdef CONFIG_SMP |
633 | if (in_xmon && !cpu_isset(smp_processor_id(), cpus_in_xmon)) | 633 | if (in_xmon && !cpumask_test_cpu(smp_processor_id(), &cpus_in_xmon)) |
634 | xmon_core(regs, 1); | 634 | xmon_core(regs, 1); |
635 | #endif | 635 | #endif |
636 | return 0; | 636 | return 0; |
@@ -644,7 +644,7 @@ static int xmon_fault_handler(struct pt_regs *regs) | |||
644 | if (in_xmon && catch_memory_errors) | 644 | if (in_xmon && catch_memory_errors) |
645 | handle_fault(regs); /* doesn't return */ | 645 | handle_fault(regs); /* doesn't return */ |
646 | 646 | ||
647 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_SF)) == (MSR_IR|MSR_SF)) { | 647 | if ((regs->msr & (MSR_IR|MSR_PR|MSR_64BIT)) == (MSR_IR|MSR_64BIT)) { |
648 | bp = in_breakpoint_table(regs->nip, &offset); | 648 | bp = in_breakpoint_table(regs->nip, &offset); |
649 | if (bp != NULL) { | 649 | if (bp != NULL) { |
650 | regs->nip = bp->address + offset; | 650 | regs->nip = bp->address + offset; |
@@ -821,7 +821,7 @@ cmds(struct pt_regs *excp) | |||
821 | memzcan(); | 821 | memzcan(); |
822 | break; | 822 | break; |
823 | case 'i': | 823 | case 'i': |
824 | show_mem(); | 824 | show_mem(0); |
825 | break; | 825 | break; |
826 | default: | 826 | default: |
827 | termch = cmd; | 827 | termch = cmd; |
@@ -929,7 +929,7 @@ static int do_step(struct pt_regs *regs) | |||
929 | int stepped; | 929 | int stepped; |
930 | 930 | ||
931 | /* check we are in 64-bit kernel mode, translation enabled */ | 931 | /* check we are in 64-bit kernel mode, translation enabled */ |
932 | if ((regs->msr & (MSR_SF|MSR_PR|MSR_IR)) == (MSR_SF|MSR_IR)) { | 932 | if ((regs->msr & (MSR_64BIT|MSR_PR|MSR_IR)) == (MSR_64BIT|MSR_IR)) { |
933 | if (mread(regs->nip, &instr, 4) == 4) { | 933 | if (mread(regs->nip, &instr, 4) == 4) { |
934 | stepped = emulate_step(regs, instr); | 934 | stepped = emulate_step(regs, instr); |
935 | if (stepped < 0) { | 935 | if (stepped < 0) { |
@@ -976,7 +976,7 @@ static int cpu_cmd(void) | |||
976 | printf("cpus stopped:"); | 976 | printf("cpus stopped:"); |
977 | count = 0; | 977 | count = 0; |
978 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { | 978 | for (cpu = 0; cpu < NR_CPUS; ++cpu) { |
979 | if (cpu_isset(cpu, cpus_in_xmon)) { | 979 | if (cpumask_test_cpu(cpu, &cpus_in_xmon)) { |
980 | if (count == 0) | 980 | if (count == 0) |
981 | printf(" %x", cpu); | 981 | printf(" %x", cpu); |
982 | ++count; | 982 | ++count; |
@@ -992,7 +992,7 @@ static int cpu_cmd(void) | |||
992 | return 0; | 992 | return 0; |
993 | } | 993 | } |
994 | /* try to switch to cpu specified */ | 994 | /* try to switch to cpu specified */ |
995 | if (!cpu_isset(cpu, cpus_in_xmon)) { | 995 | if (!cpumask_test_cpu(cpu, &cpus_in_xmon)) { |
996 | printf("cpu 0x%x isn't in xmon\n", cpu); | 996 | printf("cpu 0x%x isn't in xmon\n", cpu); |
997 | return 0; | 997 | return 0; |
998 | } | 998 | } |
@@ -1497,6 +1497,10 @@ static void prregs(struct pt_regs *fp) | |||
1497 | #endif | 1497 | #endif |
1498 | printf("pc = "); | 1498 | printf("pc = "); |
1499 | xmon_print_symbol(fp->nip, " ", "\n"); | 1499 | xmon_print_symbol(fp->nip, " ", "\n"); |
1500 | if (TRAP(fp) != 0xc00 && cpu_has_feature(CPU_FTR_CFAR)) { | ||
1501 | printf("cfar= "); | ||
1502 | xmon_print_symbol(fp->orig_gpr3, " ", "\n"); | ||
1503 | } | ||
1500 | printf("lr = "); | 1504 | printf("lr = "); |
1501 | xmon_print_symbol(fp->link, " ", "\n"); | 1505 | xmon_print_symbol(fp->link, " ", "\n"); |
1502 | printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); | 1506 | printf("msr = "REG" cr = %.8lx\n", fp->msr, fp->ccr); |
@@ -2663,7 +2667,7 @@ static void dump_stab(void) | |||
2663 | 2667 | ||
2664 | void dump_segments(void) | 2668 | void dump_segments(void) |
2665 | { | 2669 | { |
2666 | if (cpu_has_feature(CPU_FTR_SLB)) | 2670 | if (mmu_has_feature(MMU_FTR_SLB)) |
2667 | dump_slb(); | 2671 | dump_slb(); |
2668 | else | 2672 | else |
2669 | dump_stab(); | 2673 | dump_stab(); |