diff options
Diffstat (limited to 'arch/ppc64')
87 files changed, 3831 insertions, 2215 deletions
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig index 5cb343883e4d..f804f25232ac 100644 --- a/arch/ppc64/Kconfig +++ b/arch/ppc64/Kconfig | |||
@@ -77,6 +77,10 @@ config PPC_PSERIES | |||
77 | bool " IBM pSeries & new iSeries" | 77 | bool " IBM pSeries & new iSeries" |
78 | default y | 78 | default y |
79 | 79 | ||
80 | config PPC_BPA | ||
81 | bool " Broadband Processor Architecture" | ||
82 | depends on PPC_MULTIPLATFORM | ||
83 | |||
80 | config PPC_PMAC | 84 | config PPC_PMAC |
81 | depends on PPC_MULTIPLATFORM | 85 | depends on PPC_MULTIPLATFORM |
82 | bool " Apple G5 based machines" | 86 | bool " Apple G5 based machines" |
@@ -106,6 +110,21 @@ config PPC_OF | |||
106 | bool | 110 | bool |
107 | default y | 111 | default y |
108 | 112 | ||
113 | config XICS | ||
114 | depends on PPC_PSERIES | ||
115 | bool | ||
116 | default y | ||
117 | |||
118 | config MPIC | ||
119 | depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE | ||
120 | bool | ||
121 | default y | ||
122 | |||
123 | config BPA_IIC | ||
124 | depends on PPC_BPA | ||
125 | bool | ||
126 | default y | ||
127 | |||
109 | # VMX is pSeries only for now until somebody writes the iSeries | 128 | # VMX is pSeries only for now until somebody writes the iSeries |
110 | # exception vectors for it | 129 | # exception vectors for it |
111 | config ALTIVEC | 130 | config ALTIVEC |
@@ -123,6 +142,23 @@ config PPC_SPLPAR | |||
123 | processors, that is, which share physical processors between | 142 | processors, that is, which share physical processors between |
124 | two or more partitions. | 143 | two or more partitions. |
125 | 144 | ||
145 | config KEXEC | ||
146 | bool "kexec system call (EXPERIMENTAL)" | ||
147 | depends on PPC_MULTIPLATFORM && EXPERIMENTAL | ||
148 | help | ||
149 | kexec is a system call that implements the ability to shutdown your | ||
150 | current kernel, and to start another kernel. It is like a reboot | ||
151 | but it is indepedent of the system firmware. And like a reboot | ||
152 | you can start any kernel with it, not just Linux. | ||
153 | |||
154 | The name comes from the similiarity to the exec system call. | ||
155 | |||
156 | It is an ongoing process to be certain the hardware in a machine | ||
157 | is properly shutdown, so do not be surprised if this code does not | ||
158 | initially work for you. It may help to enable device hotplugging | ||
159 | support. As of this writing the exact hardware interface is | ||
160 | strongly in flux, so no good recommendation can be made. | ||
161 | |||
126 | config IBMVIO | 162 | config IBMVIO |
127 | depends on PPC_PSERIES || PPC_ISERIES | 163 | depends on PPC_PSERIES || PPC_ISERIES |
128 | bool | 164 | bool |
@@ -198,13 +234,49 @@ config HMT | |||
198 | This option enables hardware multithreading on RS64 cpus. | 234 | This option enables hardware multithreading on RS64 cpus. |
199 | pSeries systems p620 and p660 have such a cpu type. | 235 | pSeries systems p620 and p660 have such a cpu type. |
200 | 236 | ||
201 | config DISCONTIGMEM | 237 | config ARCH_SELECT_MEMORY_MODEL |
202 | bool "Discontiguous Memory Support" | 238 | def_bool y |
239 | |||
240 | config ARCH_FLATMEM_ENABLE | ||
241 | def_bool y | ||
242 | depends on !NUMA | ||
243 | |||
244 | config ARCH_DISCONTIGMEM_ENABLE | ||
245 | def_bool y | ||
203 | depends on SMP && PPC_PSERIES | 246 | depends on SMP && PPC_PSERIES |
204 | 247 | ||
248 | config ARCH_DISCONTIGMEM_DEFAULT | ||
249 | def_bool y | ||
250 | depends on ARCH_DISCONTIGMEM_ENABLE | ||
251 | |||
252 | config ARCH_FLATMEM_ENABLE | ||
253 | def_bool y | ||
254 | |||
255 | config ARCH_SPARSEMEM_ENABLE | ||
256 | def_bool y | ||
257 | depends on ARCH_DISCONTIGMEM_ENABLE | ||
258 | |||
259 | source "mm/Kconfig" | ||
260 | |||
261 | config HAVE_ARCH_EARLY_PFN_TO_NID | ||
262 | def_bool y | ||
263 | depends on NEED_MULTIPLE_NODES | ||
264 | |||
265 | # Some NUMA nodes have memory ranges that span | ||
266 | # other nodes. Even though a pfn is valid and | ||
267 | # between a node's start and end pfns, it may not | ||
268 | # reside on that node. | ||
269 | # | ||
270 | # This is a relatively temporary hack that should | ||
271 | # be able to go away when sparsemem is fully in | ||
272 | # place | ||
273 | config NODES_SPAN_OTHER_NODES | ||
274 | def_bool y | ||
275 | depends on NEED_MULTIPLE_NODES | ||
276 | |||
205 | config NUMA | 277 | config NUMA |
206 | bool "NUMA support" | 278 | bool "NUMA support" |
207 | depends on DISCONTIGMEM | 279 | default y if DISCONTIGMEM || SPARSEMEM |
208 | 280 | ||
209 | config SCHED_SMT | 281 | config SCHED_SMT |
210 | bool "SMT (Hyperthreading) scheduler support" | 282 | bool "SMT (Hyperthreading) scheduler support" |
@@ -215,26 +287,7 @@ config SCHED_SMT | |||
215 | when dealing with POWER5 cpus at a cost of slightly increased | 287 | when dealing with POWER5 cpus at a cost of slightly increased |
216 | overhead in some places. If unsure say N here. | 288 | overhead in some places. If unsure say N here. |
217 | 289 | ||
218 | config PREEMPT | 290 | source "kernel/Kconfig.preempt" |
219 | bool "Preemptible Kernel" | ||
220 | help | ||
221 | This option reduces the latency of the kernel when reacting to | ||
222 | real-time or interactive events by allowing a low priority process to | ||
223 | be preempted even if it is in kernel mode executing a system call. | ||
224 | |||
225 | Say Y here if you are building a kernel for a desktop, embedded | ||
226 | or real-time system. Say N if you are unsure. | ||
227 | |||
228 | config PREEMPT_BKL | ||
229 | bool "Preempt The Big Kernel Lock" | ||
230 | depends on PREEMPT | ||
231 | default y | ||
232 | help | ||
233 | This option reduces the latency of the kernel by making the | ||
234 | big kernel lock preemptible. | ||
235 | |||
236 | Say Y here if you are building a kernel for a desktop system. | ||
237 | Say N if you are unsure. | ||
238 | 291 | ||
239 | config EEH | 292 | config EEH |
240 | bool "PCI Extended Error Handling (EEH)" if EMBEDDED | 293 | bool "PCI Extended Error Handling (EEH)" if EMBEDDED |
@@ -256,7 +309,7 @@ config MSCHUNKS | |||
256 | 309 | ||
257 | config PPC_RTAS | 310 | config PPC_RTAS |
258 | bool | 311 | bool |
259 | depends on PPC_PSERIES | 312 | depends on PPC_PSERIES || PPC_BPA |
260 | default y | 313 | default y |
261 | 314 | ||
262 | config RTAS_PROC | 315 | config RTAS_PROC |
@@ -323,7 +376,7 @@ config EISA | |||
323 | bool | 376 | bool |
324 | 377 | ||
325 | config PCI | 378 | config PCI |
326 | bool | 379 | bool "support for PCI devices" if (EMBEDDED && PPC_ISERIES) |
327 | default y | 380 | default y |
328 | help | 381 | help |
329 | Find out whether your system includes a PCI bus. PCI is the name of | 382 | Find out whether your system includes a PCI bus. PCI is the name of |
diff --git a/arch/ppc64/Makefile b/arch/ppc64/Makefile index 691f3008e698..731b84758331 100644 --- a/arch/ppc64/Makefile +++ b/arch/ppc64/Makefile | |||
@@ -35,9 +35,9 @@ CROSS32AS := $(AS) -a32 | |||
35 | CROSS32LD := $(LD) -m elf32ppc | 35 | CROSS32LD := $(LD) -m elf32ppc |
36 | CROSS32OBJCOPY := $(OBJCOPY) | 36 | CROSS32OBJCOPY := $(OBJCOPY) |
37 | endif | 37 | endif |
38 | AS := $(AS) -a64 | 38 | override AS += -a64 |
39 | LD := $(LD) -m elf64ppc | 39 | override LD += -m elf64ppc |
40 | CC := $(CC) -m64 | 40 | override CC += -m64 |
41 | endif | 41 | endif |
42 | 42 | ||
43 | export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY | 43 | export CROSS32CC CROSS32AS CROSS32LD CROSS32OBJCOPY |
@@ -90,12 +90,14 @@ boot := arch/ppc64/boot | |||
90 | boottarget-$(CONFIG_PPC_PSERIES) := zImage zImage.initrd | 90 | boottarget-$(CONFIG_PPC_PSERIES) := zImage zImage.initrd |
91 | boottarget-$(CONFIG_PPC_MAPLE) := zImage zImage.initrd | 91 | boottarget-$(CONFIG_PPC_MAPLE) := zImage zImage.initrd |
92 | boottarget-$(CONFIG_PPC_ISERIES) := vmlinux.sminitrd vmlinux.initrd vmlinux.sm | 92 | boottarget-$(CONFIG_PPC_ISERIES) := vmlinux.sminitrd vmlinux.initrd vmlinux.sm |
93 | boottarget-$(CONFIG_PPC_BPA) := zImage zImage.initrd | ||
93 | $(boottarget-y): vmlinux | 94 | $(boottarget-y): vmlinux |
94 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ | 95 | $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ |
95 | 96 | ||
96 | bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage | 97 | bootimage-$(CONFIG_PPC_PSERIES) := $(boot)/zImage |
97 | bootimage-$(CONFIG_PPC_PMAC) := vmlinux | 98 | bootimage-$(CONFIG_PPC_PMAC) := vmlinux |
98 | bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage | 99 | bootimage-$(CONFIG_PPC_MAPLE) := $(boot)/zImage |
100 | bootimage-$(CONFIG_PPC_BPA) := zImage | ||
99 | bootimage-$(CONFIG_PPC_ISERIES) := vmlinux | 101 | bootimage-$(CONFIG_PPC_ISERIES) := vmlinux |
100 | BOOTIMAGE := $(bootimage-y) | 102 | BOOTIMAGE := $(bootimage-y) |
101 | install: vmlinux | 103 | install: vmlinux |
diff --git a/arch/ppc64/boot/Makefile b/arch/ppc64/boot/Makefile index d3e1d6af9203..683b2d43c15f 100644 --- a/arch/ppc64/boot/Makefile +++ b/arch/ppc64/boot/Makefile | |||
@@ -52,7 +52,7 @@ obj-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.o, $(section))) | |||
52 | src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section))) | 52 | src-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.c, $(section))) |
53 | gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section))) | 53 | gz-sec = $(foreach section, $(1), $(patsubst %,$(obj)/kernel-%.gz, $(section))) |
54 | 54 | ||
55 | hostprogs-y := piggy addnote addRamDisk | 55 | hostprogs-y := addnote addRamDisk |
56 | targets += zImage zImage.initrd imagesize.c \ | 56 | targets += zImage zImage.initrd imagesize.c \ |
57 | $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \ | 57 | $(patsubst $(obj)/%,%, $(call obj-sec, $(required) $(initrd))) \ |
58 | $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \ | 58 | $(patsubst $(obj)/%,%, $(call src-sec, $(required) $(initrd))) \ |
@@ -78,9 +78,6 @@ addsection = $(CROSS32OBJCOPY) $(1) \ | |||
78 | quiet_cmd_addnote = ADDNOTE $@ | 78 | quiet_cmd_addnote = ADDNOTE $@ |
79 | cmd_addnote = $(CROSS32LD) $(BOOTLFLAGS) -o $@ $(obj-boot) && $(obj)/addnote $@ | 79 | cmd_addnote = $(CROSS32LD) $(BOOTLFLAGS) -o $@ $(obj-boot) && $(obj)/addnote $@ |
80 | 80 | ||
81 | quiet_cmd_piggy = PIGGY $@ | ||
82 | cmd_piggy = $(obj)/piggyback $(@:.o=) < $< | $(CROSS32AS) -o $@ | ||
83 | |||
84 | $(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE | 81 | $(call gz-sec, $(required)): $(obj)/kernel-%.gz: % FORCE |
85 | $(call if_changed,gzip) | 82 | $(call if_changed,gzip) |
86 | 83 | ||
diff --git a/arch/ppc64/boot/install.sh b/arch/ppc64/boot/install.sh index 955c5681db6c..cb2d6626b555 100644 --- a/arch/ppc64/boot/install.sh +++ b/arch/ppc64/boot/install.sh | |||
@@ -22,8 +22,8 @@ | |||
22 | 22 | ||
23 | # User may have a custom install script | 23 | # User may have a custom install script |
24 | 24 | ||
25 | if [ -x ~/bin/installkernel ]; then exec ~/bin/installkernel "$@"; fi | 25 | if [ -x ~/bin/${CROSS_COMPILE}installkernel ]; then exec ~/bin/${CROSS_COMPILE}installkernel "$@"; fi |
26 | if [ -x /sbin/installkernel ]; then exec /sbin/installkernel "$@"; fi | 26 | if [ -x /sbin/${CROSS_COMPILE}installkernel ]; then exec /sbin/${CROSS_COMPILE}installkernel "$@"; fi |
27 | 27 | ||
28 | # Default install | 28 | # Default install |
29 | 29 | ||
diff --git a/arch/ppc64/boot/main.c b/arch/ppc64/boot/main.c index da12ea2ca464..199d9804f61c 100644 --- a/arch/ppc64/boot/main.c +++ b/arch/ppc64/boot/main.c | |||
@@ -17,7 +17,6 @@ | |||
17 | 17 | ||
18 | extern void *finddevice(const char *); | 18 | extern void *finddevice(const char *); |
19 | extern int getprop(void *, const char *, void *, int); | 19 | extern int getprop(void *, const char *, void *, int); |
20 | extern void printk(char *fmt, ...); | ||
21 | extern void printf(const char *fmt, ...); | 20 | extern void printf(const char *fmt, ...); |
22 | extern int sprintf(char *buf, const char *fmt, ...); | 21 | extern int sprintf(char *buf, const char *fmt, ...); |
23 | void gunzip(void *, int, unsigned char *, int *); | 22 | void gunzip(void *, int, unsigned char *, int *); |
@@ -147,10 +146,10 @@ void start(unsigned long a1, unsigned long a2, void *promptr) | |||
147 | } | 146 | } |
148 | a1 = initrd.addr; | 147 | a1 = initrd.addr; |
149 | a2 = initrd.size; | 148 | a2 = initrd.size; |
150 | printf("initial ramdisk moving 0x%lx <- 0x%lx (%lx bytes)\n\r", | 149 | printf("initial ramdisk moving 0x%lx <- 0x%lx (0x%lx bytes)\n\r", |
151 | initrd.addr, (unsigned long)_initrd_start, initrd.size); | 150 | initrd.addr, (unsigned long)_initrd_start, initrd.size); |
152 | memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size); | 151 | memmove((void *)initrd.addr, (void *)_initrd_start, initrd.size); |
153 | printf("initrd head: 0x%lx\n\r", *((u32 *)initrd.addr)); | 152 | printf("initrd head: 0x%lx\n\r", *((unsigned long *)initrd.addr)); |
154 | } | 153 | } |
155 | 154 | ||
156 | /* Eventually gunzip the kernel */ | 155 | /* Eventually gunzip the kernel */ |
@@ -201,9 +200,6 @@ void start(unsigned long a1, unsigned long a2, void *promptr) | |||
201 | 200 | ||
202 | flush_cache((void *)vmlinux.addr, vmlinux.size); | 201 | flush_cache((void *)vmlinux.addr, vmlinux.size); |
203 | 202 | ||
204 | if (a1) | ||
205 | printf("initrd head: 0x%lx\n\r", *((u32 *)initrd.addr)); | ||
206 | |||
207 | kernel_entry = (kernel_entry_t)vmlinux.addr; | 203 | kernel_entry = (kernel_entry_t)vmlinux.addr; |
208 | #ifdef DEBUG | 204 | #ifdef DEBUG |
209 | printf( "kernel:\n\r" | 205 | printf( "kernel:\n\r" |
diff --git a/arch/ppc64/boot/mknote.c b/arch/ppc64/boot/mknote.c deleted file mode 100644 index 120cc1d89739..000000000000 --- a/arch/ppc64/boot/mknote.c +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) Cort Dougan 1999. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | * | ||
9 | * Generate a note section as per the CHRP specification. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #include <stdio.h> | ||
14 | |||
15 | #define PL(x) printf("%c%c%c%c", ((x)>>24)&0xff, ((x)>>16)&0xff, ((x)>>8)&0xff, (x)&0xff ); | ||
16 | |||
17 | int main(void) | ||
18 | { | ||
19 | /* header */ | ||
20 | /* namesz */ | ||
21 | PL(strlen("PowerPC")+1); | ||
22 | /* descrsz */ | ||
23 | PL(6*4); | ||
24 | /* type */ | ||
25 | PL(0x1275); | ||
26 | /* name */ | ||
27 | printf("PowerPC"); printf("%c", 0); | ||
28 | |||
29 | /* descriptor */ | ||
30 | /* real-mode */ | ||
31 | PL(0xffffffff); | ||
32 | /* real-base */ | ||
33 | PL(0x00c00000); | ||
34 | /* real-size */ | ||
35 | PL(0xffffffff); | ||
36 | /* virt-base */ | ||
37 | PL(0xffffffff); | ||
38 | /* virt-size */ | ||
39 | PL(0xffffffff); | ||
40 | /* load-base */ | ||
41 | PL(0x4000); | ||
42 | return 0; | ||
43 | } | ||
diff --git a/arch/ppc64/boot/piggyback.c b/arch/ppc64/boot/piggyback.c deleted file mode 100644 index 235c7a87269c..000000000000 --- a/arch/ppc64/boot/piggyback.c +++ /dev/null | |||
@@ -1,83 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright 2001 IBM Corp | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public License | ||
6 | * as published by the Free Software Foundation; either version | ||
7 | * 2 of the License, or (at your option) any later version. | ||
8 | */ | ||
9 | #include <stdio.h> | ||
10 | #include <unistd.h> | ||
11 | #include <string.h> | ||
12 | |||
13 | extern long ce_exec_config[]; | ||
14 | |||
15 | int main(int argc, char *argv[]) | ||
16 | { | ||
17 | int i, cnt, pos, len; | ||
18 | unsigned int cksum, val; | ||
19 | unsigned char *lp; | ||
20 | unsigned char buf[8192]; | ||
21 | char *varname; | ||
22 | if (argc != 2) | ||
23 | { | ||
24 | fprintf(stderr, "usage: %s name <in-file >out-file\n", | ||
25 | argv[0]); | ||
26 | exit(1); | ||
27 | } | ||
28 | |||
29 | varname = strrchr(argv[1], '/'); | ||
30 | if (varname) | ||
31 | varname++; | ||
32 | else | ||
33 | varname = argv[1]; | ||
34 | |||
35 | fprintf(stdout, "#\n"); | ||
36 | fprintf(stdout, "# Miscellaneous data structures:\n"); | ||
37 | fprintf(stdout, "# WARNING - this file is automatically generated!\n"); | ||
38 | fprintf(stdout, "#\n"); | ||
39 | fprintf(stdout, "\n"); | ||
40 | fprintf(stdout, "\t.data\n"); | ||
41 | fprintf(stdout, "\t.globl %s_data\n", varname); | ||
42 | fprintf(stdout, "%s_data:\n", varname); | ||
43 | pos = 0; | ||
44 | cksum = 0; | ||
45 | while ((len = read(0, buf, sizeof(buf))) > 0) | ||
46 | { | ||
47 | cnt = 0; | ||
48 | lp = (unsigned char *)buf; | ||
49 | len = (len + 3) & ~3; /* Round up to longwords */ | ||
50 | for (i = 0; i < len; i += 4) | ||
51 | { | ||
52 | if (cnt == 0) | ||
53 | { | ||
54 | fprintf(stdout, "\t.long\t"); | ||
55 | } | ||
56 | fprintf(stdout, "0x%02X%02X%02X%02X", lp[0], lp[1], lp[2], lp[3]); | ||
57 | val = *(unsigned long *)lp; | ||
58 | cksum ^= val; | ||
59 | lp += 4; | ||
60 | if (++cnt == 4) | ||
61 | { | ||
62 | cnt = 0; | ||
63 | fprintf(stdout, " # %x \n", pos+i-12); | ||
64 | fflush(stdout); | ||
65 | } else | ||
66 | { | ||
67 | fprintf(stdout, ","); | ||
68 | } | ||
69 | } | ||
70 | if (cnt) | ||
71 | { | ||
72 | fprintf(stdout, "0\n"); | ||
73 | } | ||
74 | pos += len; | ||
75 | } | ||
76 | fprintf(stdout, "\t.globl %s_len\n", varname); | ||
77 | fprintf(stdout, "%s_len:\t.long\t0x%x\n", varname, pos); | ||
78 | fflush(stdout); | ||
79 | fclose(stdout); | ||
80 | fprintf(stderr, "cksum = %x\n", cksum); | ||
81 | exit(0); | ||
82 | } | ||
83 | |||
diff --git a/arch/ppc64/boot/prom.c b/arch/ppc64/boot/prom.c index d5218b15824e..5e48b80ff5a0 100644 --- a/arch/ppc64/boot/prom.c +++ b/arch/ppc64/boot/prom.c | |||
@@ -40,7 +40,7 @@ void *finddevice(const char *name); | |||
40 | int getprop(void *phandle, const char *name, void *buf, int buflen); | 40 | int getprop(void *phandle, const char *name, void *buf, int buflen); |
41 | void chrpboot(int a1, int a2, void *prom); /* in main.c */ | 41 | void chrpboot(int a1, int a2, void *prom); /* in main.c */ |
42 | 42 | ||
43 | void printk(char *fmt, ...); | 43 | int printf(char *fmt, ...); |
44 | 44 | ||
45 | /* there is no convenient header to get this from... -- paulus */ | 45 | /* there is no convenient header to get this from... -- paulus */ |
46 | extern unsigned long strlen(const char *); | 46 | extern unsigned long strlen(const char *); |
@@ -220,7 +220,7 @@ readchar(void) | |||
220 | case 1: | 220 | case 1: |
221 | return ch; | 221 | return ch; |
222 | case -1: | 222 | case -1: |
223 | printk("read(stdin) returned -1\r\n"); | 223 | printf("read(stdin) returned -1\r\n"); |
224 | return -1; | 224 | return -1; |
225 | } | 225 | } |
226 | } | 226 | } |
@@ -627,18 +627,6 @@ int sprintf(char * buf, const char *fmt, ...) | |||
627 | 627 | ||
628 | static char sprint_buf[1024]; | 628 | static char sprint_buf[1024]; |
629 | 629 | ||
630 | void | ||
631 | printk(char *fmt, ...) | ||
632 | { | ||
633 | va_list args; | ||
634 | int n; | ||
635 | |||
636 | va_start(args, fmt); | ||
637 | n = vsprintf(sprint_buf, fmt, args); | ||
638 | va_end(args); | ||
639 | write(stdout, sprint_buf, n); | ||
640 | } | ||
641 | |||
642 | int | 630 | int |
643 | printf(char *fmt, ...) | 631 | printf(char *fmt, ...) |
644 | { | 632 | { |
diff --git a/arch/ppc64/configs/pSeries_defconfig b/arch/ppc64/configs/pSeries_defconfig index 3eb5ef25d3a3..d0db8b5966c0 100644 --- a/arch/ppc64/configs/pSeries_defconfig +++ b/arch/ppc64/configs/pSeries_defconfig | |||
@@ -88,7 +88,7 @@ CONFIG_IBMVIO=y | |||
88 | CONFIG_IOMMU_VMERGE=y | 88 | CONFIG_IOMMU_VMERGE=y |
89 | CONFIG_SMP=y | 89 | CONFIG_SMP=y |
90 | CONFIG_NR_CPUS=128 | 90 | CONFIG_NR_CPUS=128 |
91 | CONFIG_DISCONTIGMEM=y | 91 | CONFIG_ARCH_DISCONTIGMEM_ENABLE=y |
92 | CONFIG_NUMA=y | 92 | CONFIG_NUMA=y |
93 | CONFIG_SCHED_SMT=y | 93 | CONFIG_SCHED_SMT=y |
94 | # CONFIG_PREEMPT is not set | 94 | # CONFIG_PREEMPT is not set |
diff --git a/arch/ppc64/defconfig b/arch/ppc64/defconfig index 2f31bf3046f9..b8e2066dde77 100644 --- a/arch/ppc64/defconfig +++ b/arch/ppc64/defconfig | |||
@@ -89,7 +89,7 @@ CONFIG_BOOTX_TEXT=y | |||
89 | CONFIG_IOMMU_VMERGE=y | 89 | CONFIG_IOMMU_VMERGE=y |
90 | CONFIG_SMP=y | 90 | CONFIG_SMP=y |
91 | CONFIG_NR_CPUS=32 | 91 | CONFIG_NR_CPUS=32 |
92 | CONFIG_DISCONTIGMEM=y | 92 | CONFIG_ARCH_DISCONTIGMEM_ENABLE=y |
93 | # CONFIG_NUMA is not set | 93 | # CONFIG_NUMA is not set |
94 | # CONFIG_SCHED_SMT is not set | 94 | # CONFIG_SCHED_SMT is not set |
95 | # CONFIG_PREEMPT is not set | 95 | # CONFIG_PREEMPT is not set |
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c index f8f19637f73f..90032b138902 100644 --- a/arch/ppc64/kernel/HvLpEvent.c +++ b/arch/ppc64/kernel/HvLpEvent.c | |||
@@ -12,7 +12,7 @@ | |||
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/iSeries/HvLpEvent.h> | 13 | #include <asm/iSeries/HvLpEvent.h> |
14 | #include <asm/iSeries/HvCallEvent.h> | 14 | #include <asm/iSeries/HvCallEvent.h> |
15 | #include <asm/iSeries/LparData.h> | 15 | #include <asm/iSeries/ItLpNaca.h> |
16 | 16 | ||
17 | /* Array of LpEvent handler functions */ | 17 | /* Array of LpEvent handler functions */ |
18 | LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; | 18 | LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; |
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c index c923a815760e..4231861288a3 100644 --- a/arch/ppc64/kernel/ItLpQueue.c +++ b/arch/ppc64/kernel/ItLpQueue.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * ItLpQueue.c | 2 | * ItLpQueue.c |
3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation | 3 | * Copyright (C) 2001 Mike Corrigan IBM Corporation |
4 | * | 4 | * |
5 | * This program is free software; you can redistribute it and/or modify | 5 | * This program is free software; you can redistribute it and/or modify |
6 | * it under the terms of the GNU General Public License as published by | 6 | * it under the terms of the GNU General Public License as published by |
7 | * the Free Software Foundation; either version 2 of the License, or | 7 | * the Free Software Foundation; either version 2 of the License, or |
@@ -11,157 +11,252 @@ | |||
11 | #include <linux/stddef.h> | 11 | #include <linux/stddef.h> |
12 | #include <linux/kernel.h> | 12 | #include <linux/kernel.h> |
13 | #include <linux/sched.h> | 13 | #include <linux/sched.h> |
14 | #include <linux/bootmem.h> | ||
15 | #include <linux/seq_file.h> | ||
16 | #include <linux/proc_fs.h> | ||
14 | #include <asm/system.h> | 17 | #include <asm/system.h> |
15 | #include <asm/paca.h> | 18 | #include <asm/paca.h> |
16 | #include <asm/iSeries/ItLpQueue.h> | 19 | #include <asm/iSeries/ItLpQueue.h> |
17 | #include <asm/iSeries/HvLpEvent.h> | 20 | #include <asm/iSeries/HvLpEvent.h> |
18 | #include <asm/iSeries/HvCallEvent.h> | 21 | #include <asm/iSeries/HvCallEvent.h> |
19 | #include <asm/iSeries/LparData.h> | ||
20 | 22 | ||
21 | static __inline__ int set_inUse( struct ItLpQueue * lpQueue ) | 23 | /* |
22 | { | 24 | * The LpQueue is used to pass event data from the hypervisor to |
23 | int t; | 25 | * the partition. This is where I/O interrupt events are communicated. |
24 | u32 * inUseP = &(lpQueue->xInUseWord); | 26 | * |
25 | 27 | * It is written to by the hypervisor so cannot end up in the BSS. | |
26 | __asm__ __volatile__("\n\ | 28 | */ |
27 | 1: lwarx %0,0,%2 \n\ | 29 | struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data"))); |
28 | cmpwi 0,%0,0 \n\ | ||
29 | li %0,0 \n\ | ||
30 | bne- 2f \n\ | ||
31 | addi %0,%0,1 \n\ | ||
32 | stwcx. %0,0,%2 \n\ | ||
33 | bne- 1b \n\ | ||
34 | 2: eieio" | ||
35 | : "=&r" (t), "=m" (lpQueue->xInUseWord) | ||
36 | : "r" (inUseP), "m" (lpQueue->xInUseWord) | ||
37 | : "cc"); | ||
38 | |||
39 | return t; | ||
40 | } | ||
41 | 30 | ||
42 | static __inline__ void clear_inUse( struct ItLpQueue * lpQueue ) | 31 | DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts); |
43 | { | 32 | |
44 | lpQueue->xInUseWord = 0; | 33 | static char *event_types[HvLpEvent_Type_NumTypes] = { |
45 | } | 34 | "Hypervisor", |
35 | "Machine Facilities", | ||
36 | "Session Manager", | ||
37 | "SPD I/O", | ||
38 | "Virtual Bus", | ||
39 | "PCI I/O", | ||
40 | "RIO I/O", | ||
41 | "Virtual Lan", | ||
42 | "Virtual I/O" | ||
43 | }; | ||
46 | 44 | ||
47 | /* Array of LpEvent handler functions */ | 45 | /* Array of LpEvent handler functions */ |
48 | extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; | 46 | extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes]; |
49 | unsigned long ItLpQueueInProcess = 0; | ||
50 | 47 | ||
51 | struct HvLpEvent * ItLpQueue_getNextLpEvent( struct ItLpQueue * lpQueue ) | 48 | static struct HvLpEvent * get_next_hvlpevent(void) |
52 | { | 49 | { |
53 | struct HvLpEvent * nextLpEvent = | 50 | struct HvLpEvent * event; |
54 | (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; | 51 | event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; |
55 | if ( nextLpEvent->xFlags.xValid ) { | 52 | |
53 | if (event->xFlags.xValid) { | ||
56 | /* rmb() needed only for weakly consistent machines (regatta) */ | 54 | /* rmb() needed only for weakly consistent machines (regatta) */ |
57 | rmb(); | 55 | rmb(); |
58 | /* Set pointer to next potential event */ | 56 | /* Set pointer to next potential event */ |
59 | lpQueue->xSlicCurEventPtr += ((nextLpEvent->xSizeMinus1 + | 57 | hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 + |
60 | LpEventAlign ) / | 58 | LpEventAlign) / LpEventAlign) * LpEventAlign; |
61 | LpEventAlign ) * | 59 | |
62 | LpEventAlign; | ||
63 | /* Wrap to beginning if no room at end */ | 60 | /* Wrap to beginning if no room at end */ |
64 | if (lpQueue->xSlicCurEventPtr > lpQueue->xSlicLastValidEventPtr) | 61 | if (hvlpevent_queue.xSlicCurEventPtr > |
65 | lpQueue->xSlicCurEventPtr = lpQueue->xSlicEventStackPtr; | 62 | hvlpevent_queue.xSlicLastValidEventPtr) { |
63 | hvlpevent_queue.xSlicCurEventPtr = | ||
64 | hvlpevent_queue.xSlicEventStackPtr; | ||
65 | } | ||
66 | } else { | ||
67 | event = NULL; | ||
66 | } | 68 | } |
67 | else | ||
68 | nextLpEvent = NULL; | ||
69 | 69 | ||
70 | return nextLpEvent; | 70 | return event; |
71 | } | 71 | } |
72 | 72 | ||
73 | int ItLpQueue_isLpIntPending( struct ItLpQueue * lpQueue ) | 73 | static unsigned long spread_lpevents = NR_CPUS; |
74 | |||
75 | int hvlpevent_is_pending(void) | ||
74 | { | 76 | { |
75 | int retval = 0; | 77 | struct HvLpEvent *next_event; |
76 | struct HvLpEvent * nextLpEvent; | 78 | |
77 | if ( lpQueue ) { | 79 | if (smp_processor_id() >= spread_lpevents) |
78 | nextLpEvent = (struct HvLpEvent *)lpQueue->xSlicCurEventPtr; | 80 | return 0; |
79 | retval = nextLpEvent->xFlags.xValid | lpQueue->xPlicOverflowIntPending; | 81 | |
80 | } | 82 | next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr; |
81 | return retval; | 83 | |
84 | return next_event->xFlags.xValid | | ||
85 | hvlpevent_queue.xPlicOverflowIntPending; | ||
82 | } | 86 | } |
83 | 87 | ||
84 | void ItLpQueue_clearValid( struct HvLpEvent * event ) | 88 | static void hvlpevent_clear_valid(struct HvLpEvent * event) |
85 | { | 89 | { |
86 | /* Clear the valid bit of the event | 90 | /* Tell the Hypervisor that we're done with this event. |
87 | * Also clear bits within this event that might | 91 | * Also clear bits within this event that might look like valid bits. |
88 | * look like valid bits (on 64-byte boundaries) | 92 | * ie. on 64-byte boundaries. |
89 | */ | 93 | */ |
90 | unsigned extra = (( event->xSizeMinus1 + LpEventAlign ) / | 94 | struct HvLpEvent *tmp; |
91 | LpEventAlign ) - 1; | 95 | unsigned extra = ((event->xSizeMinus1 + LpEventAlign) / |
92 | switch ( extra ) { | 96 | LpEventAlign) - 1; |
93 | case 3: | 97 | |
94 | ((struct HvLpEvent*)((char*)event+3*LpEventAlign))->xFlags.xValid=0; | 98 | switch (extra) { |
95 | case 2: | 99 | case 3: |
96 | ((struct HvLpEvent*)((char*)event+2*LpEventAlign))->xFlags.xValid=0; | 100 | tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign); |
97 | case 1: | 101 | tmp->xFlags.xValid = 0; |
98 | ((struct HvLpEvent*)((char*)event+1*LpEventAlign))->xFlags.xValid=0; | 102 | case 2: |
99 | case 0: | 103 | tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign); |
100 | ; | 104 | tmp->xFlags.xValid = 0; |
105 | case 1: | ||
106 | tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign); | ||
107 | tmp->xFlags.xValid = 0; | ||
101 | } | 108 | } |
109 | |||
102 | mb(); | 110 | mb(); |
111 | |||
103 | event->xFlags.xValid = 0; | 112 | event->xFlags.xValid = 0; |
104 | } | 113 | } |
105 | 114 | ||
106 | unsigned ItLpQueue_process( struct ItLpQueue * lpQueue, struct pt_regs *regs ) | 115 | void process_hvlpevents(struct pt_regs *regs) |
107 | { | 116 | { |
108 | unsigned numIntsProcessed = 0; | 117 | struct HvLpEvent * event; |
109 | struct HvLpEvent * nextLpEvent; | ||
110 | 118 | ||
111 | /* If we have recursed, just return */ | 119 | /* If we have recursed, just return */ |
112 | if ( !set_inUse( lpQueue ) ) | 120 | if (!spin_trylock(&hvlpevent_queue.lock)) |
113 | return 0; | 121 | return; |
114 | |||
115 | if (ItLpQueueInProcess == 0) | ||
116 | ItLpQueueInProcess = 1; | ||
117 | else | ||
118 | BUG(); | ||
119 | 122 | ||
120 | for (;;) { | 123 | for (;;) { |
121 | nextLpEvent = ItLpQueue_getNextLpEvent( lpQueue ); | 124 | event = get_next_hvlpevent(); |
122 | if ( nextLpEvent ) { | 125 | if (event) { |
123 | /* Count events to return to caller | 126 | /* Call appropriate handler here, passing |
124 | * and count processed events in lpQueue | ||
125 | */ | ||
126 | ++numIntsProcessed; | ||
127 | lpQueue->xLpIntCount++; | ||
128 | /* Call appropriate handler here, passing | ||
129 | * a pointer to the LpEvent. The handler | 127 | * a pointer to the LpEvent. The handler |
130 | * must make a copy of the LpEvent if it | 128 | * must make a copy of the LpEvent if it |
131 | * needs it in a bottom half. (perhaps for | 129 | * needs it in a bottom half. (perhaps for |
132 | * an ACK) | 130 | * an ACK) |
133 | * | 131 | * |
134 | * Handlers are responsible for ACK processing | 132 | * Handlers are responsible for ACK processing |
135 | * | 133 | * |
136 | * The Hypervisor guarantees that LpEvents will | 134 | * The Hypervisor guarantees that LpEvents will |
137 | * only be delivered with types that we have | 135 | * only be delivered with types that we have |
138 | * registered for, so no type check is necessary | 136 | * registered for, so no type check is necessary |
139 | * here! | 137 | * here! |
140 | */ | 138 | */ |
141 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes ) | 139 | if (event->xType < HvLpEvent_Type_NumTypes) |
142 | lpQueue->xLpIntCountByType[nextLpEvent->xType]++; | 140 | __get_cpu_var(hvlpevent_counts)[event->xType]++; |
143 | if ( nextLpEvent->xType < HvLpEvent_Type_NumTypes && | 141 | if (event->xType < HvLpEvent_Type_NumTypes && |
144 | lpEventHandler[nextLpEvent->xType] ) | 142 | lpEventHandler[event->xType]) |
145 | lpEventHandler[nextLpEvent->xType](nextLpEvent, regs); | 143 | lpEventHandler[event->xType](event, regs); |
146 | else | 144 | else |
147 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", nextLpEvent->xType ); | 145 | printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType ); |
148 | 146 | ||
149 | ItLpQueue_clearValid( nextLpEvent ); | 147 | hvlpevent_clear_valid(event); |
150 | } else if ( lpQueue->xPlicOverflowIntPending ) | 148 | } else if (hvlpevent_queue.xPlicOverflowIntPending) |
151 | /* | 149 | /* |
152 | * No more valid events. If overflow events are | 150 | * No more valid events. If overflow events are |
153 | * pending process them | 151 | * pending process them |
154 | */ | 152 | */ |
155 | HvCallEvent_getOverflowLpEvents( lpQueue->xIndex); | 153 | HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex); |
156 | else | 154 | else |
157 | break; | 155 | break; |
158 | } | 156 | } |
159 | 157 | ||
160 | ItLpQueueInProcess = 0; | 158 | spin_unlock(&hvlpevent_queue.lock); |
161 | mb(); | 159 | } |
162 | clear_inUse( lpQueue ); | 160 | |
161 | static int set_spread_lpevents(char *str) | ||
162 | { | ||
163 | unsigned long val = simple_strtoul(str, NULL, 0); | ||
164 | |||
165 | /* | ||
166 | * The parameter is the number of processors to share in processing | ||
167 | * lp events. | ||
168 | */ | ||
169 | if (( val > 0) && (val <= NR_CPUS)) { | ||
170 | spread_lpevents = val; | ||
171 | printk("lpevent processing spread over %ld processors\n", val); | ||
172 | } else { | ||
173 | printk("invalid spread_lpevents %ld\n", val); | ||
174 | } | ||
163 | 175 | ||
164 | get_paca()->lpevent_count += numIntsProcessed; | 176 | return 1; |
177 | } | ||
178 | __setup("spread_lpevents=", set_spread_lpevents); | ||
179 | |||
180 | void setup_hvlpevent_queue(void) | ||
181 | { | ||
182 | void *eventStack; | ||
183 | |||
184 | /* | ||
185 | * Allocate a page for the Event Stack. The Hypervisor needs the | ||
186 | * absolute real address, so we subtract out the KERNELBASE and add | ||
187 | * in the absolute real address of the kernel load area. | ||
188 | */ | ||
189 | eventStack = alloc_bootmem_pages(LpEventStackSize); | ||
190 | memset(eventStack, 0, LpEventStackSize); | ||
191 | |||
192 | /* Invoke the hypervisor to initialize the event stack */ | ||
193 | HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); | ||
194 | |||
195 | hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack; | ||
196 | hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack; | ||
197 | hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack + | ||
198 | (LpEventStackSize - LpEventMaxSize); | ||
199 | hvlpevent_queue.xIndex = 0; | ||
200 | } | ||
201 | |||
202 | static int proc_lpevents_show(struct seq_file *m, void *v) | ||
203 | { | ||
204 | int cpu, i; | ||
205 | unsigned long sum; | ||
206 | static unsigned long cpu_totals[NR_CPUS]; | ||
207 | |||
208 | /* FIXME: do we care that there's no locking here? */ | ||
209 | sum = 0; | ||
210 | for_each_online_cpu(cpu) { | ||
211 | cpu_totals[cpu] = 0; | ||
212 | for (i = 0; i < HvLpEvent_Type_NumTypes; i++) { | ||
213 | cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i]; | ||
214 | } | ||
215 | sum += cpu_totals[cpu]; | ||
216 | } | ||
217 | |||
218 | seq_printf(m, "LpEventQueue 0\n"); | ||
219 | seq_printf(m, " events processed:\t%lu\n", sum); | ||
220 | |||
221 | for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) { | ||
222 | sum = 0; | ||
223 | for_each_online_cpu(cpu) { | ||
224 | sum += per_cpu(hvlpevent_counts, cpu)[i]; | ||
225 | } | ||
226 | |||
227 | seq_printf(m, " %-20s %10lu\n", event_types[i], sum); | ||
228 | } | ||
229 | |||
230 | seq_printf(m, "\n events processed by processor:\n"); | ||
231 | |||
232 | for_each_online_cpu(cpu) { | ||
233 | seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]); | ||
234 | } | ||
235 | |||
236 | return 0; | ||
237 | } | ||
238 | |||
239 | static int proc_lpevents_open(struct inode *inode, struct file *file) | ||
240 | { | ||
241 | return single_open(file, proc_lpevents_show, NULL); | ||
242 | } | ||
165 | 243 | ||
166 | return numIntsProcessed; | 244 | static struct file_operations proc_lpevents_operations = { |
245 | .open = proc_lpevents_open, | ||
246 | .read = seq_read, | ||
247 | .llseek = seq_lseek, | ||
248 | .release = single_release, | ||
249 | }; | ||
250 | |||
251 | static int __init proc_lpevents_init(void) | ||
252 | { | ||
253 | struct proc_dir_entry *e; | ||
254 | |||
255 | e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL); | ||
256 | if (e) | ||
257 | e->proc_fops = &proc_lpevents_operations; | ||
258 | |||
259 | return 0; | ||
167 | } | 260 | } |
261 | __initcall(proc_lpevents_init); | ||
262 | |||
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c index badc5a443614..6ffcf67dd507 100644 --- a/arch/ppc64/kernel/LparData.c +++ b/arch/ppc64/kernel/LparData.c | |||
@@ -28,13 +28,6 @@ | |||
28 | #include <asm/iSeries/IoHriProcessorVpd.h> | 28 | #include <asm/iSeries/IoHriProcessorVpd.h> |
29 | #include <asm/iSeries/ItSpCommArea.h> | 29 | #include <asm/iSeries/ItSpCommArea.h> |
30 | 30 | ||
31 | /* The LpQueue is used to pass event data from the hypervisor to | ||
32 | * the partition. This is where I/O interrupt events are communicated. | ||
33 | */ | ||
34 | |||
35 | /* May be filled in by the hypervisor so cannot end up in the BSS */ | ||
36 | struct ItLpQueue xItLpQueue __attribute__((__section__(".data"))); | ||
37 | |||
38 | 31 | ||
39 | /* The HvReleaseData is the root of the information shared between | 32 | /* The HvReleaseData is the root of the information shared between |
40 | * the hypervisor and Linux. | 33 | * the hypervisor and Linux. |
@@ -200,7 +193,7 @@ struct ItVpdAreas itVpdAreas = { | |||
200 | 0,0,0, /* 13 - 15 */ | 193 | 0,0,0, /* 13 - 15 */ |
201 | sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */ | 194 | sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */ |
202 | 0,0,0,0,0,0, /* 17 - 22 */ | 195 | 0,0,0,0,0,0, /* 17 - 22 */ |
203 | sizeof(struct ItLpQueue),/* 23 length of Lp Queue */ | 196 | sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */ |
204 | 0,0 /* 24 - 25 */ | 197 | 0,0 /* 24 - 25 */ |
205 | }, | 198 | }, |
206 | .xSlicVpdAdrs = { /* VPD addresses */ | 199 | .xSlicVpdAdrs = { /* VPD addresses */ |
@@ -218,7 +211,7 @@ struct ItVpdAreas itVpdAreas = { | |||
218 | 0,0,0, /* 13 - 15 */ | 211 | 0,0,0, /* 13 - 15 */ |
219 | &xIoHriProcessorVpd, /* 16 Proc Vpd */ | 212 | &xIoHriProcessorVpd, /* 16 Proc Vpd */ |
220 | 0,0,0,0,0,0, /* 17 - 22 */ | 213 | 0,0,0,0,0,0, /* 17 - 22 */ |
221 | &xItLpQueue, /* 23 Lp Queue */ | 214 | &hvlpevent_queue, /* 23 Lp Queue */ |
222 | 0,0 | 215 | 0,0 |
223 | } | 216 | } |
224 | }; | 217 | }; |
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile index 96d90b0c5119..d9b2660ef221 100644 --- a/arch/ppc64/kernel/Makefile +++ b/arch/ppc64/kernel/Makefile | |||
@@ -16,29 +16,33 @@ obj-y += vdso32/ vdso64/ | |||
16 | 16 | ||
17 | obj-$(CONFIG_PPC_OF) += of_device.o | 17 | obj-$(CONFIG_PPC_OF) += of_device.o |
18 | 18 | ||
19 | pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_pci_reset.o | 19 | pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_irq.o \ |
20 | iSeries_VpdInfo.o | ||
20 | pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o | 21 | pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o |
21 | 22 | ||
22 | obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y) | 23 | obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y) |
23 | 24 | ||
24 | obj-$(CONFIG_PPC_ISERIES) += iSeries_irq.o \ | 25 | obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \ |
25 | iSeries_VpdInfo.o XmPciLpEvent.o \ | ||
26 | HvCall.o HvLpConfig.o LparData.o \ | ||
27 | iSeries_setup.o ItLpQueue.o hvCall.o \ | 26 | iSeries_setup.o ItLpQueue.o hvCall.o \ |
28 | mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \ | 27 | mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \ |
29 | iSeries_iommu.o | 28 | iSeries_iommu.o |
30 | 29 | ||
31 | obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o mpic.o | 30 | obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o prom.o |
32 | 31 | ||
33 | obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ | 32 | obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ |
34 | pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \ | 33 | pSeries_nvram.o rtasd.o ras.o pSeries_reconfig.o \ |
35 | xics.o rtas.o pSeries_setup.o pSeries_iommu.o | 34 | pSeries_setup.o pSeries_iommu.o |
36 | 35 | ||
36 | obj-$(CONFIG_PPC_BPA) += bpa_setup.o bpa_iommu.o bpa_nvram.o \ | ||
37 | bpa_iic.o spider-pic.o | ||
38 | |||
39 | obj-$(CONFIG_KEXEC) += machine_kexec.o | ||
37 | obj-$(CONFIG_EEH) += eeh.o | 40 | obj-$(CONFIG_EEH) += eeh.o |
38 | obj-$(CONFIG_PROC_FS) += proc_ppc64.o | 41 | obj-$(CONFIG_PROC_FS) += proc_ppc64.o |
39 | obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o | 42 | obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o |
40 | obj-$(CONFIG_SMP) += smp.o | 43 | obj-$(CONFIG_SMP) += smp.o |
41 | obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o | 44 | obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o |
45 | obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o | ||
42 | obj-$(CONFIG_RTAS_PROC) += rtas-proc.o | 46 | obj-$(CONFIG_RTAS_PROC) += rtas-proc.o |
43 | obj-$(CONFIG_SCANLOG) += scanlog.o | 47 | obj-$(CONFIG_SCANLOG) += scanlog.o |
44 | obj-$(CONFIG_VIOPATH) += viopath.o | 48 | obj-$(CONFIG_VIOPATH) += viopath.o |
@@ -47,6 +51,8 @@ obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o | |||
47 | obj-$(CONFIG_BOOTX_TEXT) += btext.o | 51 | obj-$(CONFIG_BOOTX_TEXT) += btext.o |
48 | obj-$(CONFIG_HVCS) += hvcserver.o | 52 | obj-$(CONFIG_HVCS) += hvcserver.o |
49 | obj-$(CONFIG_IBMVIO) += vio.o | 53 | obj-$(CONFIG_IBMVIO) += vio.o |
54 | obj-$(CONFIG_XICS) += xics.o | ||
55 | obj-$(CONFIG_MPIC) += mpic.o | ||
50 | 56 | ||
51 | obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ | 57 | obj-$(CONFIG_PPC_PMAC) += pmac_setup.o pmac_feature.o pmac_pci.o \ |
52 | pmac_time.o pmac_nvram.o pmac_low_i2c.o | 58 | pmac_time.o pmac_nvram.o pmac_low_i2c.o |
@@ -59,6 +65,7 @@ ifdef CONFIG_SMP | |||
59 | obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o | 65 | obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o |
60 | obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o | 66 | obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o |
61 | obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o | 67 | obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o |
68 | obj-$(CONFIG_PPC_BPA) += pSeries_smp.o | ||
62 | obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o | 69 | obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o |
63 | endif | 70 | endif |
64 | 71 | ||
diff --git a/arch/ppc64/kernel/XmPciLpEvent.c b/arch/ppc64/kernel/XmPciLpEvent.c deleted file mode 100644 index 809c9bc6678b..000000000000 --- a/arch/ppc64/kernel/XmPciLpEvent.c +++ /dev/null | |||
@@ -1,190 +0,0 @@ | |||
1 | /* | ||
2 | * File XmPciLpEvent.h created by Wayne Holm on Mon Jan 15 2001. | ||
3 | * | ||
4 | * This module handles PCI interrupt events sent by the iSeries Hypervisor. | ||
5 | */ | ||
6 | |||
7 | #include <linux/config.h> | ||
8 | #include <linux/pci.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/threads.h> | ||
11 | #include <linux/smp.h> | ||
12 | #include <linux/param.h> | ||
13 | #include <linux/string.h> | ||
14 | #include <linux/bootmem.h> | ||
15 | #include <linux/ide.h> | ||
16 | |||
17 | #include <asm/iSeries/HvTypes.h> | ||
18 | #include <asm/iSeries/HvLpEvent.h> | ||
19 | #include <asm/iSeries/HvCallPci.h> | ||
20 | #include <asm/iSeries/XmPciLpEvent.h> | ||
21 | #include <asm/ppcdebug.h> | ||
22 | |||
23 | static long Pci_Interrupt_Count; | ||
24 | static long Pci_Event_Count; | ||
25 | |||
26 | enum XmPciLpEvent_Subtype { | ||
27 | XmPciLpEvent_BusCreated = 0, // PHB has been created | ||
28 | XmPciLpEvent_BusError = 1, // PHB has failed | ||
29 | XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus | ||
30 | XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed | ||
31 | XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered | ||
32 | XmPciLpEvent_BusRecovered = 12, // PHB has been recovered | ||
33 | XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing | ||
34 | XmPciLpEvent_BridgeError = 21, // Bridge Error | ||
35 | XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt | ||
36 | }; | ||
37 | |||
38 | struct XmPciLpEvent_BusInterrupt { | ||
39 | HvBusNumber busNumber; | ||
40 | HvSubBusNumber subBusNumber; | ||
41 | }; | ||
42 | |||
43 | struct XmPciLpEvent_NodeInterrupt { | ||
44 | HvBusNumber busNumber; | ||
45 | HvSubBusNumber subBusNumber; | ||
46 | HvAgentId deviceId; | ||
47 | }; | ||
48 | |||
49 | struct XmPciLpEvent { | ||
50 | struct HvLpEvent hvLpEvent; | ||
51 | |||
52 | union { | ||
53 | u64 alignData; // Align on an 8-byte boundary | ||
54 | |||
55 | struct { | ||
56 | u32 fisr; | ||
57 | HvBusNumber busNumber; | ||
58 | HvSubBusNumber subBusNumber; | ||
59 | HvAgentId deviceId; | ||
60 | } slotInterrupt; | ||
61 | |||
62 | struct XmPciLpEvent_BusInterrupt busFailed; | ||
63 | struct XmPciLpEvent_BusInterrupt busRecovered; | ||
64 | struct XmPciLpEvent_BusInterrupt busCreated; | ||
65 | |||
66 | struct XmPciLpEvent_NodeInterrupt nodeFailed; | ||
67 | struct XmPciLpEvent_NodeInterrupt nodeRecovered; | ||
68 | |||
69 | } eventData; | ||
70 | |||
71 | }; | ||
72 | |||
73 | static void intReceived(struct XmPciLpEvent *eventParm, | ||
74 | struct pt_regs *regsParm); | ||
75 | |||
76 | static void XmPciLpEvent_handler(struct HvLpEvent *eventParm, | ||
77 | struct pt_regs *regsParm) | ||
78 | { | ||
79 | #ifdef CONFIG_PCI | ||
80 | #if 0 | ||
81 | PPCDBG(PPCDBG_BUSWALK, "XmPciLpEvent_handler, type 0x%x\n", | ||
82 | eventParm->xType); | ||
83 | #endif | ||
84 | ++Pci_Event_Count; | ||
85 | |||
86 | if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) { | ||
87 | switch (eventParm->xFlags.xFunction) { | ||
88 | case HvLpEvent_Function_Int: | ||
89 | intReceived((struct XmPciLpEvent *)eventParm, regsParm); | ||
90 | break; | ||
91 | case HvLpEvent_Function_Ack: | ||
92 | printk(KERN_ERR | ||
93 | "XmPciLpEvent.c: unexpected ack received\n"); | ||
94 | break; | ||
95 | default: | ||
96 | printk(KERN_ERR | ||
97 | "XmPciLpEvent.c: unexpected event function %d\n", | ||
98 | (int)eventParm->xFlags.xFunction); | ||
99 | break; | ||
100 | } | ||
101 | } else if (eventParm) | ||
102 | printk(KERN_ERR | ||
103 | "XmPciLpEvent.c: Unrecognized PCI event type 0x%x\n", | ||
104 | (int)eventParm->xType); | ||
105 | else | ||
106 | printk(KERN_ERR "XmPciLpEvent.c: NULL event received\n"); | ||
107 | #endif | ||
108 | } | ||
109 | |||
110 | static void intReceived(struct XmPciLpEvent *eventParm, | ||
111 | struct pt_regs *regsParm) | ||
112 | { | ||
113 | int irq; | ||
114 | |||
115 | ++Pci_Interrupt_Count; | ||
116 | #if 0 | ||
117 | PPCDBG(PPCDBG_BUSWALK, "PCI: XmPciLpEvent.c: intReceived\n"); | ||
118 | #endif | ||
119 | |||
120 | switch (eventParm->hvLpEvent.xSubtype) { | ||
121 | case XmPciLpEvent_SlotInterrupt: | ||
122 | irq = eventParm->hvLpEvent.xCorrelationToken; | ||
123 | /* Dispatch the interrupt handlers for this irq */ | ||
124 | ppc_irq_dispatch_handler(regsParm, irq); | ||
125 | HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, | ||
126 | eventParm->eventData.slotInterrupt.subBusNumber, | ||
127 | eventParm->eventData.slotInterrupt.deviceId); | ||
128 | break; | ||
129 | /* Ignore error recovery events for now */ | ||
130 | case XmPciLpEvent_BusCreated: | ||
131 | printk(KERN_INFO "XmPciLpEvent.c: system bus %d created\n", | ||
132 | eventParm->eventData.busCreated.busNumber); | ||
133 | break; | ||
134 | case XmPciLpEvent_BusError: | ||
135 | case XmPciLpEvent_BusFailed: | ||
136 | printk(KERN_INFO "XmPciLpEvent.c: system bus %d failed\n", | ||
137 | eventParm->eventData.busFailed.busNumber); | ||
138 | break; | ||
139 | case XmPciLpEvent_BusRecovered: | ||
140 | case XmPciLpEvent_UnQuiesceBus: | ||
141 | printk(KERN_INFO "XmPciLpEvent.c: system bus %d recovered\n", | ||
142 | eventParm->eventData.busRecovered.busNumber); | ||
143 | break; | ||
144 | case XmPciLpEvent_NodeFailed: | ||
145 | case XmPciLpEvent_BridgeError: | ||
146 | printk(KERN_INFO | ||
147 | "XmPciLpEvent.c: multi-adapter bridge %d/%d/%d failed\n", | ||
148 | eventParm->eventData.nodeFailed.busNumber, | ||
149 | eventParm->eventData.nodeFailed.subBusNumber, | ||
150 | eventParm->eventData.nodeFailed.deviceId); | ||
151 | break; | ||
152 | case XmPciLpEvent_NodeRecovered: | ||
153 | printk(KERN_INFO | ||
154 | "XmPciLpEvent.c: multi-adapter bridge %d/%d/%d recovered\n", | ||
155 | eventParm->eventData.nodeRecovered.busNumber, | ||
156 | eventParm->eventData.nodeRecovered.subBusNumber, | ||
157 | eventParm->eventData.nodeRecovered.deviceId); | ||
158 | break; | ||
159 | default: | ||
160 | printk(KERN_ERR | ||
161 | "XmPciLpEvent.c: unrecognized event subtype 0x%x\n", | ||
162 | eventParm->hvLpEvent.xSubtype); | ||
163 | break; | ||
164 | } | ||
165 | } | ||
166 | |||
167 | |||
168 | /* This should be called sometime prior to buswalk (init_IRQ would be good) */ | ||
169 | int XmPciLpEvent_init() | ||
170 | { | ||
171 | int xRc; | ||
172 | |||
173 | PPCDBG(PPCDBG_BUSWALK, | ||
174 | "XmPciLpEvent_init, Register Event type 0x%04X\n", | ||
175 | HvLpEvent_Type_PciIo); | ||
176 | |||
177 | xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | ||
178 | &XmPciLpEvent_handler); | ||
179 | if (xRc == 0) { | ||
180 | xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
181 | if (xRc != 0) | ||
182 | printk(KERN_ERR | ||
183 | "XmPciLpEvent.c: open event path failed with rc 0x%x\n", | ||
184 | xRc); | ||
185 | } else | ||
186 | printk(KERN_ERR | ||
187 | "XmPciLpEvent.c: register handler failed with rc 0x%x\n", | ||
188 | xRc); | ||
189 | return xRc; | ||
190 | } | ||
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c index 0094ac79a18c..abb9e5b5da03 100644 --- a/arch/ppc64/kernel/asm-offsets.c +++ b/arch/ppc64/kernel/asm-offsets.c | |||
@@ -31,7 +31,6 @@ | |||
31 | 31 | ||
32 | #include <asm/paca.h> | 32 | #include <asm/paca.h> |
33 | #include <asm/lppaca.h> | 33 | #include <asm/lppaca.h> |
34 | #include <asm/iSeries/ItLpQueue.h> | ||
35 | #include <asm/iSeries/HvLpEvent.h> | 34 | #include <asm/iSeries/HvLpEvent.h> |
36 | #include <asm/rtas.h> | 35 | #include <asm/rtas.h> |
37 | #include <asm/cputable.h> | 36 | #include <asm/cputable.h> |
diff --git a/arch/ppc64/kernel/bpa_iic.c b/arch/ppc64/kernel/bpa_iic.c new file mode 100644 index 000000000000..c8f3dc3fad70 --- /dev/null +++ b/arch/ppc64/kernel/bpa_iic.c | |||
@@ -0,0 +1,270 @@ | |||
1 | /* | ||
2 | * BPA Internal Interrupt Controller | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/config.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/irq.h> | ||
26 | #include <linux/percpu.h> | ||
27 | #include <linux/types.h> | ||
28 | |||
29 | #include <asm/io.h> | ||
30 | #include <asm/pgtable.h> | ||
31 | #include <asm/prom.h> | ||
32 | #include <asm/ptrace.h> | ||
33 | |||
34 | #include "bpa_iic.h" | ||
35 | |||
36 | struct iic_pending_bits { | ||
37 | u32 data; | ||
38 | u8 flags; | ||
39 | u8 class; | ||
40 | u8 source; | ||
41 | u8 prio; | ||
42 | }; | ||
43 | |||
44 | enum iic_pending_flags { | ||
45 | IIC_VALID = 0x80, | ||
46 | IIC_IPI = 0x40, | ||
47 | }; | ||
48 | |||
49 | struct iic_regs { | ||
50 | struct iic_pending_bits pending; | ||
51 | struct iic_pending_bits pending_destr; | ||
52 | u64 generate; | ||
53 | u64 prio; | ||
54 | }; | ||
55 | |||
56 | struct iic { | ||
57 | struct iic_regs __iomem *regs; | ||
58 | }; | ||
59 | |||
60 | static DEFINE_PER_CPU(struct iic, iic); | ||
61 | |||
62 | void iic_local_enable(void) | ||
63 | { | ||
64 | out_be64(&__get_cpu_var(iic).regs->prio, 0xff); | ||
65 | } | ||
66 | |||
67 | void iic_local_disable(void) | ||
68 | { | ||
69 | out_be64(&__get_cpu_var(iic).regs->prio, 0x0); | ||
70 | } | ||
71 | |||
72 | static unsigned int iic_startup(unsigned int irq) | ||
73 | { | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void iic_enable(unsigned int irq) | ||
78 | { | ||
79 | iic_local_enable(); | ||
80 | } | ||
81 | |||
82 | static void iic_disable(unsigned int irq) | ||
83 | { | ||
84 | } | ||
85 | |||
86 | static void iic_end(unsigned int irq) | ||
87 | { | ||
88 | iic_local_enable(); | ||
89 | } | ||
90 | |||
91 | static struct hw_interrupt_type iic_pic = { | ||
92 | .typename = " BPA-IIC ", | ||
93 | .startup = iic_startup, | ||
94 | .enable = iic_enable, | ||
95 | .disable = iic_disable, | ||
96 | .end = iic_end, | ||
97 | }; | ||
98 | |||
99 | static int iic_external_get_irq(struct iic_pending_bits pending) | ||
100 | { | ||
101 | int irq; | ||
102 | unsigned char node, unit; | ||
103 | |||
104 | node = pending.source >> 4; | ||
105 | unit = pending.source & 0xf; | ||
106 | irq = -1; | ||
107 | |||
108 | /* | ||
109 | * This mapping is specific to the Broadband | ||
110 | * Engine. We might need to get the numbers | ||
111 | * from the device tree to support future CPUs. | ||
112 | */ | ||
113 | switch (unit) { | ||
114 | case 0x00: | ||
115 | case 0x0b: | ||
116 | /* | ||
117 | * One of these units can be connected | ||
118 | * to an external interrupt controller. | ||
119 | */ | ||
120 | if (pending.prio > 0x3f || | ||
121 | pending.class != 2) | ||
122 | break; | ||
123 | irq = IIC_EXT_OFFSET | ||
124 | + spider_get_irq(pending.prio + node * IIC_NODE_STRIDE) | ||
125 | + node * IIC_NODE_STRIDE; | ||
126 | break; | ||
127 | case 0x01 ... 0x04: | ||
128 | case 0x07 ... 0x0a: | ||
129 | /* | ||
130 | * These units are connected to the SPEs | ||
131 | */ | ||
132 | if (pending.class > 2) | ||
133 | break; | ||
134 | irq = IIC_SPE_OFFSET | ||
135 | + pending.class * IIC_CLASS_STRIDE | ||
136 | + node * IIC_NODE_STRIDE | ||
137 | + unit; | ||
138 | break; | ||
139 | } | ||
140 | if (irq == -1) | ||
141 | printk(KERN_WARNING "Unexpected interrupt class %02x, " | ||
142 | "source %02x, prio %02x, cpu %02x\n", pending.class, | ||
143 | pending.source, pending.prio, smp_processor_id()); | ||
144 | return irq; | ||
145 | } | ||
146 | |||
147 | /* Get an IRQ number from the pending state register of the IIC */ | ||
148 | int iic_get_irq(struct pt_regs *regs) | ||
149 | { | ||
150 | struct iic *iic; | ||
151 | int irq; | ||
152 | struct iic_pending_bits pending; | ||
153 | |||
154 | iic = &__get_cpu_var(iic); | ||
155 | *(unsigned long *) &pending = | ||
156 | in_be64((unsigned long __iomem *) &iic->regs->pending_destr); | ||
157 | |||
158 | irq = -1; | ||
159 | if (pending.flags & IIC_VALID) { | ||
160 | if (pending.flags & IIC_IPI) { | ||
161 | irq = IIC_IPI_OFFSET + (pending.prio >> 4); | ||
162 | /* | ||
163 | if (irq > 0x80) | ||
164 | printk(KERN_WARNING "Unexpected IPI prio %02x" | ||
165 | "on CPU %02x\n", pending.prio, | ||
166 | smp_processor_id()); | ||
167 | */ | ||
168 | } else { | ||
169 | irq = iic_external_get_irq(pending); | ||
170 | } | ||
171 | } | ||
172 | return irq; | ||
173 | } | ||
174 | |||
175 | static struct iic_regs __iomem *find_iic(int cpu) | ||
176 | { | ||
177 | struct device_node *np; | ||
178 | int nodeid = cpu / 2; | ||
179 | unsigned long regs; | ||
180 | struct iic_regs __iomem *iic_regs; | ||
181 | |||
182 | for (np = of_find_node_by_type(NULL, "cpu"); | ||
183 | np; | ||
184 | np = of_find_node_by_type(np, "cpu")) { | ||
185 | if (nodeid == *(int *)get_property(np, "node-id", NULL)) | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | if (!np) { | ||
190 | printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); | ||
191 | iic_regs = NULL; | ||
192 | } else { | ||
193 | regs = *(long *)get_property(np, "iic", NULL); | ||
194 | |||
195 | /* hack until we have decided on the devtree info */ | ||
196 | regs += 0x400; | ||
197 | if (cpu & 1) | ||
198 | regs += 0x20; | ||
199 | |||
200 | printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs); | ||
201 | iic_regs = __ioremap(regs, sizeof(struct iic_regs), | ||
202 | _PAGE_NO_CACHE); | ||
203 | } | ||
204 | return iic_regs; | ||
205 | } | ||
206 | |||
207 | #ifdef CONFIG_SMP | ||
208 | void iic_setup_cpu(void) | ||
209 | { | ||
210 | out_be64(&__get_cpu_var(iic).regs->prio, 0xff); | ||
211 | } | ||
212 | |||
213 | void iic_cause_IPI(int cpu, int mesg) | ||
214 | { | ||
215 | out_be64(&per_cpu(iic, cpu).regs->generate, mesg); | ||
216 | } | ||
217 | |||
218 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | ||
219 | { | ||
220 | |||
221 | smp_message_recv(irq - IIC_IPI_OFFSET, regs); | ||
222 | return IRQ_HANDLED; | ||
223 | } | ||
224 | |||
225 | static void iic_request_ipi(int irq, const char *name) | ||
226 | { | ||
227 | /* IPIs are marked SA_INTERRUPT as they must run with irqs | ||
228 | * disabled */ | ||
229 | get_irq_desc(irq)->handler = &iic_pic; | ||
230 | get_irq_desc(irq)->status |= IRQ_PER_CPU; | ||
231 | request_irq(irq, iic_ipi_action, SA_INTERRUPT, name, NULL); | ||
232 | } | ||
233 | |||
234 | void iic_request_IPIs(void) | ||
235 | { | ||
236 | iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_CALL_FUNCTION, "IPI-call"); | ||
237 | iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_RESCHEDULE, "IPI-resched"); | ||
238 | #ifdef CONFIG_DEBUGGER | ||
239 | iic_request_ipi(IIC_IPI_OFFSET + PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | ||
240 | #endif /* CONFIG_DEBUGGER */ | ||
241 | } | ||
242 | #endif /* CONFIG_SMP */ | ||
243 | |||
244 | static void iic_setup_spe_handlers(void) | ||
245 | { | ||
246 | int be, isrc; | ||
247 | |||
248 | /* Assume two threads per BE are present */ | ||
249 | for (be=0; be < num_present_cpus() / 2; be++) { | ||
250 | for (isrc = 0; isrc < IIC_CLASS_STRIDE * 3; isrc++) { | ||
251 | int irq = IIC_NODE_STRIDE * be + IIC_SPE_OFFSET + isrc; | ||
252 | get_irq_desc(irq)->handler = &iic_pic; | ||
253 | } | ||
254 | } | ||
255 | } | ||
256 | |||
257 | void iic_init_IRQ(void) | ||
258 | { | ||
259 | int cpu, irq_offset; | ||
260 | struct iic *iic; | ||
261 | |||
262 | irq_offset = 0; | ||
263 | for_each_cpu(cpu) { | ||
264 | iic = &per_cpu(iic, cpu); | ||
265 | iic->regs = find_iic(cpu); | ||
266 | if (iic->regs) | ||
267 | out_be64(&iic->regs->prio, 0xff); | ||
268 | } | ||
269 | iic_setup_spe_handlers(); | ||
270 | } | ||
diff --git a/arch/ppc64/kernel/bpa_iic.h b/arch/ppc64/kernel/bpa_iic.h new file mode 100644 index 000000000000..6833c3022166 --- /dev/null +++ b/arch/ppc64/kernel/bpa_iic.h | |||
@@ -0,0 +1,62 @@ | |||
1 | #ifndef ASM_BPA_IIC_H | ||
2 | #define ASM_BPA_IIC_H | ||
3 | #ifdef __KERNEL__ | ||
4 | /* | ||
5 | * Mapping of IIC pending bits into per-node | ||
6 | * interrupt numbers. | ||
7 | * | ||
8 | * IRQ FF CC SS PP FF CC SS PP Description | ||
9 | * | ||
10 | * 00-3f 80 02 +0 00 - 80 02 +0 3f South Bridge | ||
11 | * 00-3f 80 02 +b 00 - 80 02 +b 3f South Bridge | ||
12 | * 41-4a 80 00 +1 ** - 80 00 +a ** SPU Class 0 | ||
13 | * 51-5a 80 01 +1 ** - 80 01 +a ** SPU Class 1 | ||
14 | * 61-6a 80 02 +1 ** - 80 02 +a ** SPU Class 2 | ||
15 | * 70-7f C0 ** ** 00 - C0 ** ** 0f IPI | ||
16 | * | ||
17 | * F flags | ||
18 | * C class | ||
19 | * S source | ||
20 | * P Priority | ||
21 | * + node number | ||
22 | * * don't care | ||
23 | * | ||
24 | * A node consists of a Broadband Engine and an optional | ||
25 | * south bridge device providing a maximum of 64 IRQs. | ||
26 | * The south bridge may be connected to either IOIF0 | ||
27 | * or IOIF1. | ||
28 | * Each SPE is represented as three IRQ lines, one per | ||
29 | * interrupt class. | ||
30 | * 16 IRQ numbers are reserved for inter processor | ||
31 | * interruptions, although these are only used in the | ||
32 | * range of the first node. | ||
33 | * | ||
34 | * This scheme needs 128 IRQ numbers per BIF node ID, | ||
35 | * which means that with the total of 512 lines | ||
36 | * available, we can have a maximum of four nodes. | ||
37 | */ | ||
38 | |||
39 | enum { | ||
40 | IIC_EXT_OFFSET = 0x00, /* Start of south bridge IRQs */ | ||
41 | IIC_NUM_EXT = 0x40, /* Number of south bridge IRQs */ | ||
42 | IIC_SPE_OFFSET = 0x40, /* Start of SPE interrupts */ | ||
43 | IIC_CLASS_STRIDE = 0x10, /* SPE IRQs per class */ | ||
44 | IIC_IPI_OFFSET = 0x70, /* Start of IPI IRQs */ | ||
45 | IIC_NUM_IPIS = 0x10, /* IRQs reserved for IPI */ | ||
46 | IIC_NODE_STRIDE = 0x80, /* Total IRQs per node */ | ||
47 | }; | ||
48 | |||
49 | extern void iic_init_IRQ(void); | ||
50 | extern int iic_get_irq(struct pt_regs *regs); | ||
51 | extern void iic_cause_IPI(int cpu, int mesg); | ||
52 | extern void iic_request_IPIs(void); | ||
53 | extern void iic_setup_cpu(void); | ||
54 | extern void iic_local_enable(void); | ||
55 | extern void iic_local_disable(void); | ||
56 | |||
57 | |||
58 | extern void spider_init_IRQ(void); | ||
59 | extern int spider_get_irq(unsigned long int_pending); | ||
60 | |||
61 | #endif | ||
62 | #endif /* ASM_BPA_IIC_H */ | ||
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c new file mode 100644 index 000000000000..f33a7bccb0d7 --- /dev/null +++ b/arch/ppc64/kernel/bpa_iommu.c | |||
@@ -0,0 +1,377 @@ | |||
1 | /* | ||
2 | * IOMMU implementation for Broadband Processor Architecture | ||
3 | * We just establish a linear mapping at boot by setting all the | ||
4 | * IOPT cache entries in the CPU. | ||
5 | * The mapping functions should be identical to pci_direct_iommu, | ||
6 | * except for the handling of the high order bit that is required | ||
7 | * by the Spider bridge. These should be split into a separate | ||
8 | * file at the point where we get a different bridge chip. | ||
9 | * | ||
10 | * Copyright (C) 2005 IBM Deutschland Entwicklung GmbH, | ||
11 | * Arnd Bergmann <arndb@de.ibm.com> | ||
12 | * | ||
13 | * Based on linear mapping | ||
14 | * Copyright (C) 2003 Benjamin Herrenschmidt (benh@kernel.crashing.org) | ||
15 | * | ||
16 | * This program is free software; you can redistribute it and/or | ||
17 | * modify it under the terms of the GNU General Public License | ||
18 | * as published by the Free Software Foundation; either version | ||
19 | * 2 of the License, or (at your option) any later version. | ||
20 | */ | ||
21 | |||
22 | #undef DEBUG | ||
23 | |||
24 | #include <linux/kernel.h> | ||
25 | #include <linux/pci.h> | ||
26 | #include <linux/delay.h> | ||
27 | #include <linux/string.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/bootmem.h> | ||
30 | #include <linux/mm.h> | ||
31 | #include <linux/dma-mapping.h> | ||
32 | |||
33 | #include <asm/sections.h> | ||
34 | #include <asm/iommu.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/prom.h> | ||
37 | #include <asm/pci-bridge.h> | ||
38 | #include <asm/machdep.h> | ||
39 | #include <asm/pmac_feature.h> | ||
40 | #include <asm/abs_addr.h> | ||
41 | #include <asm/system.h> | ||
42 | |||
43 | #include "pci.h" | ||
44 | #include "bpa_iommu.h" | ||
45 | |||
46 | static inline unsigned long | ||
47 | get_iopt_entry(unsigned long real_address, unsigned long ioid, | ||
48 | unsigned long prot) | ||
49 | { | ||
50 | return (prot & IOPT_PROT_MASK) | ||
51 | | (IOPT_COHERENT) | ||
52 | | (IOPT_ORDER_VC) | ||
53 | | (real_address & IOPT_RPN_MASK) | ||
54 | | (ioid & IOPT_IOID_MASK); | ||
55 | } | ||
56 | |||
57 | typedef struct { | ||
58 | unsigned long val; | ||
59 | } ioste; | ||
60 | |||
61 | static inline ioste | ||
62 | mk_ioste(unsigned long val) | ||
63 | { | ||
64 | ioste ioste = { .val = val, }; | ||
65 | return ioste; | ||
66 | } | ||
67 | |||
68 | static inline ioste | ||
69 | get_iost_entry(unsigned long iopt_base, unsigned long io_address, unsigned page_size) | ||
70 | { | ||
71 | unsigned long ps; | ||
72 | unsigned long iostep; | ||
73 | unsigned long nnpt; | ||
74 | unsigned long shift; | ||
75 | |||
76 | switch (page_size) { | ||
77 | case 0x1000000: | ||
78 | ps = IOST_PS_16M; | ||
79 | nnpt = 0; /* one page per segment */ | ||
80 | shift = 5; /* segment has 16 iopt entries */ | ||
81 | break; | ||
82 | |||
83 | case 0x100000: | ||
84 | ps = IOST_PS_1M; | ||
85 | nnpt = 0; /* one page per segment */ | ||
86 | shift = 1; /* segment has 256 iopt entries */ | ||
87 | break; | ||
88 | |||
89 | case 0x10000: | ||
90 | ps = IOST_PS_64K; | ||
91 | nnpt = 0x07; /* 8 pages per io page table */ | ||
92 | shift = 0; /* all entries are used */ | ||
93 | break; | ||
94 | |||
95 | case 0x1000: | ||
96 | ps = IOST_PS_4K; | ||
97 | nnpt = 0x7f; /* 128 pages per io page table */ | ||
98 | shift = 0; /* all entries are used */ | ||
99 | break; | ||
100 | |||
101 | default: /* not a known compile time constant */ | ||
102 | BUILD_BUG_ON(1); | ||
103 | break; | ||
104 | } | ||
105 | |||
106 | iostep = iopt_base + | ||
107 | /* need 8 bytes per iopte */ | ||
108 | (((io_address / page_size * 8) | ||
109 | /* align io page tables on 4k page boundaries */ | ||
110 | << shift) | ||
111 | /* nnpt+1 pages go into each iopt */ | ||
112 | & ~(nnpt << 12)); | ||
113 | |||
114 | nnpt++; /* this seems to work, but the documentation is not clear | ||
115 | about wether we put nnpt or nnpt-1 into the ioste bits. | ||
116 | In theory, this can't work for 4k pages. */ | ||
117 | return mk_ioste(IOST_VALID_MASK | ||
118 | | (iostep & IOST_PT_BASE_MASK) | ||
119 | | ((nnpt << 5) & IOST_NNPT_MASK) | ||
120 | | (ps & IOST_PS_MASK)); | ||
121 | } | ||
122 | |||
123 | /* compute the address of an io pte */ | ||
124 | static inline unsigned long | ||
125 | get_ioptep(ioste iost_entry, unsigned long io_address) | ||
126 | { | ||
127 | unsigned long iopt_base; | ||
128 | unsigned long page_size; | ||
129 | unsigned long page_number; | ||
130 | unsigned long iopt_offset; | ||
131 | |||
132 | iopt_base = iost_entry.val & IOST_PT_BASE_MASK; | ||
133 | page_size = iost_entry.val & IOST_PS_MASK; | ||
134 | |||
135 | /* decode page size to compute page number */ | ||
136 | page_number = (io_address & 0x0fffffff) >> (10 + 2 * page_size); | ||
137 | /* page number is an offset into the io page table */ | ||
138 | iopt_offset = (page_number << 3) & 0x7fff8ul; | ||
139 | return iopt_base + iopt_offset; | ||
140 | } | ||
141 | |||
142 | /* compute the tag field of the iopt cache entry */ | ||
143 | static inline unsigned long | ||
144 | get_ioc_tag(ioste iost_entry, unsigned long io_address) | ||
145 | { | ||
146 | unsigned long iopte = get_ioptep(iost_entry, io_address); | ||
147 | |||
148 | return IOPT_VALID_MASK | ||
149 | | ((iopte & 0x00000000000000ff8ul) >> 3) | ||
150 | | ((iopte & 0x0000003fffffc0000ul) >> 9); | ||
151 | } | ||
152 | |||
153 | /* compute the hashed 6 bit index for the 4-way associative pte cache */ | ||
154 | static inline unsigned long | ||
155 | get_ioc_hash(ioste iost_entry, unsigned long io_address) | ||
156 | { | ||
157 | unsigned long iopte = get_ioptep(iost_entry, io_address); | ||
158 | |||
159 | return ((iopte & 0x000000000000001f8ul) >> 3) | ||
160 | ^ ((iopte & 0x00000000000020000ul) >> 17) | ||
161 | ^ ((iopte & 0x00000000000010000ul) >> 15) | ||
162 | ^ ((iopte & 0x00000000000008000ul) >> 13) | ||
163 | ^ ((iopte & 0x00000000000004000ul) >> 11) | ||
164 | ^ ((iopte & 0x00000000000002000ul) >> 9) | ||
165 | ^ ((iopte & 0x00000000000001000ul) >> 7); | ||
166 | } | ||
167 | |||
168 | /* same as above, but pretend that we have a simpler 1-way associative | ||
169 | pte cache with an 8 bit index */ | ||
170 | static inline unsigned long | ||
171 | get_ioc_hash_1way(ioste iost_entry, unsigned long io_address) | ||
172 | { | ||
173 | unsigned long iopte = get_ioptep(iost_entry, io_address); | ||
174 | |||
175 | return ((iopte & 0x000000000000001f8ul) >> 3) | ||
176 | ^ ((iopte & 0x00000000000020000ul) >> 17) | ||
177 | ^ ((iopte & 0x00000000000010000ul) >> 15) | ||
178 | ^ ((iopte & 0x00000000000008000ul) >> 13) | ||
179 | ^ ((iopte & 0x00000000000004000ul) >> 11) | ||
180 | ^ ((iopte & 0x00000000000002000ul) >> 9) | ||
181 | ^ ((iopte & 0x00000000000001000ul) >> 7) | ||
182 | ^ ((iopte & 0x0000000000000c000ul) >> 8); | ||
183 | } | ||
184 | |||
185 | static inline ioste | ||
186 | get_iost_cache(void __iomem *base, unsigned long index) | ||
187 | { | ||
188 | unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); | ||
189 | return mk_ioste(in_be64(&p[index])); | ||
190 | } | ||
191 | |||
192 | static inline void | ||
193 | set_iost_cache(void __iomem *base, unsigned long index, ioste ste) | ||
194 | { | ||
195 | unsigned long __iomem *p = (base + IOC_ST_CACHE_DIR); | ||
196 | pr_debug("ioste %02lx was %016lx, store %016lx", index, | ||
197 | get_iost_cache(base, index).val, ste.val); | ||
198 | out_be64(&p[index], ste.val); | ||
199 | pr_debug(" now %016lx\n", get_iost_cache(base, index).val); | ||
200 | } | ||
201 | |||
202 | static inline unsigned long | ||
203 | get_iopt_cache(void __iomem *base, unsigned long index, unsigned long *tag) | ||
204 | { | ||
205 | unsigned long __iomem *tags = (void *)(base + IOC_PT_CACHE_DIR); | ||
206 | unsigned long __iomem *p = (void *)(base + IOC_PT_CACHE_REG); | ||
207 | |||
208 | *tag = tags[index]; | ||
209 | rmb(); | ||
210 | return *p; | ||
211 | } | ||
212 | |||
213 | static inline void | ||
214 | set_iopt_cache(void __iomem *base, unsigned long index, | ||
215 | unsigned long tag, unsigned long val) | ||
216 | { | ||
217 | unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; | ||
218 | unsigned long __iomem *p = base + IOC_PT_CACHE_REG; | ||
219 | pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n", | ||
220 | index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag); | ||
221 | |||
222 | out_be64(p, val); | ||
223 | out_be64(&tags[index], tag); | ||
224 | } | ||
225 | |||
226 | static inline void | ||
227 | set_iost_origin(void __iomem *base) | ||
228 | { | ||
229 | unsigned long __iomem *p = base + IOC_ST_ORIGIN; | ||
230 | unsigned long origin = IOSTO_ENABLE | IOSTO_SW; | ||
231 | |||
232 | pr_debug("iost_origin %016lx, now %016lx\n", in_be64(p), origin); | ||
233 | out_be64(p, origin); | ||
234 | } | ||
235 | |||
236 | static inline void | ||
237 | set_iocmd_config(void __iomem *base) | ||
238 | { | ||
239 | unsigned long __iomem *p = base + 0xc00; | ||
240 | unsigned long conf; | ||
241 | |||
242 | conf = in_be64(p); | ||
243 | pr_debug("iost_conf %016lx, now %016lx\n", conf, conf | IOCMD_CONF_TE); | ||
244 | out_be64(p, conf | IOCMD_CONF_TE); | ||
245 | } | ||
246 | |||
247 | /* FIXME: get these from the device tree */ | ||
248 | #define ioc_base 0x20000511000ull | ||
249 | #define ioc_mmio_base 0x20000510000ull | ||
250 | #define ioid 0x48a | ||
251 | #define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */ | ||
252 | #define io_page_size 0x1000000 | ||
253 | |||
254 | static unsigned long map_iopt_entry(unsigned long address) | ||
255 | { | ||
256 | switch (address >> 20) { | ||
257 | case 0x600: | ||
258 | address = 0x24020000000ull; /* spider i/o */ | ||
259 | break; | ||
260 | default: | ||
261 | address += iopt_phys_offset; | ||
262 | break; | ||
263 | } | ||
264 | |||
265 | return get_iopt_entry(address, ioid, IOPT_PROT_RW); | ||
266 | } | ||
267 | |||
268 | static void iommu_bus_setup_null(struct pci_bus *b) { } | ||
269 | static void iommu_dev_setup_null(struct pci_dev *d) { } | ||
270 | |||
271 | /* initialize the iommu to support a simple linear mapping | ||
272 | * for each DMA window used by any device. For now, we | ||
273 | * happen to know that there is only one DMA window in use, | ||
274 | * starting at iopt_phys_offset. */ | ||
275 | static void bpa_map_iommu(void) | ||
276 | { | ||
277 | unsigned long address; | ||
278 | void __iomem *base; | ||
279 | ioste ioste; | ||
280 | unsigned long index; | ||
281 | |||
282 | base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE); | ||
283 | pr_debug("%lx mapped to %p\n", ioc_base, base); | ||
284 | set_iocmd_config(base); | ||
285 | iounmap(base); | ||
286 | |||
287 | base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE); | ||
288 | pr_debug("%lx mapped to %p\n", ioc_mmio_base, base); | ||
289 | |||
290 | set_iost_origin(base); | ||
291 | |||
292 | for (address = 0; address < 0x100000000ul; address += io_page_size) { | ||
293 | ioste = get_iost_entry(0x10000000000ul, address, io_page_size); | ||
294 | if ((address & 0xfffffff) == 0) /* segment start */ | ||
295 | set_iost_cache(base, address >> 28, ioste); | ||
296 | index = get_ioc_hash_1way(ioste, address); | ||
297 | pr_debug("addr %08lx, index %02lx, ioste %016lx\n", | ||
298 | address, index, ioste.val); | ||
299 | set_iopt_cache(base, | ||
300 | get_ioc_hash_1way(ioste, address), | ||
301 | get_ioc_tag(ioste, address), | ||
302 | map_iopt_entry(address)); | ||
303 | } | ||
304 | iounmap(base); | ||
305 | } | ||
306 | |||
307 | |||
308 | static void *bpa_alloc_coherent(struct device *hwdev, size_t size, | ||
309 | dma_addr_t *dma_handle, unsigned int __nocast flag) | ||
310 | { | ||
311 | void *ret; | ||
312 | |||
313 | ret = (void *)__get_free_pages(flag, get_order(size)); | ||
314 | if (ret != NULL) { | ||
315 | memset(ret, 0, size); | ||
316 | *dma_handle = virt_to_abs(ret) | BPA_DMA_VALID; | ||
317 | } | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static void bpa_free_coherent(struct device *hwdev, size_t size, | ||
322 | void *vaddr, dma_addr_t dma_handle) | ||
323 | { | ||
324 | free_pages((unsigned long)vaddr, get_order(size)); | ||
325 | } | ||
326 | |||
327 | static dma_addr_t bpa_map_single(struct device *hwdev, void *ptr, | ||
328 | size_t size, enum dma_data_direction direction) | ||
329 | { | ||
330 | return virt_to_abs(ptr) | BPA_DMA_VALID; | ||
331 | } | ||
332 | |||
333 | static void bpa_unmap_single(struct device *hwdev, dma_addr_t dma_addr, | ||
334 | size_t size, enum dma_data_direction direction) | ||
335 | { | ||
336 | } | ||
337 | |||
338 | static int bpa_map_sg(struct device *hwdev, struct scatterlist *sg, | ||
339 | int nents, enum dma_data_direction direction) | ||
340 | { | ||
341 | int i; | ||
342 | |||
343 | for (i = 0; i < nents; i++, sg++) { | ||
344 | sg->dma_address = (page_to_phys(sg->page) + sg->offset) | ||
345 | | BPA_DMA_VALID; | ||
346 | sg->dma_length = sg->length; | ||
347 | } | ||
348 | |||
349 | return nents; | ||
350 | } | ||
351 | |||
352 | static void bpa_unmap_sg(struct device *hwdev, struct scatterlist *sg, | ||
353 | int nents, enum dma_data_direction direction) | ||
354 | { | ||
355 | } | ||
356 | |||
357 | static int bpa_dma_supported(struct device *dev, u64 mask) | ||
358 | { | ||
359 | return mask < 0x100000000ull; | ||
360 | } | ||
361 | |||
362 | void bpa_init_iommu(void) | ||
363 | { | ||
364 | bpa_map_iommu(); | ||
365 | |||
366 | /* Direct I/O, IOMMU off */ | ||
367 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | ||
368 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | ||
369 | |||
370 | pci_dma_ops.alloc_coherent = bpa_alloc_coherent; | ||
371 | pci_dma_ops.free_coherent = bpa_free_coherent; | ||
372 | pci_dma_ops.map_single = bpa_map_single; | ||
373 | pci_dma_ops.unmap_single = bpa_unmap_single; | ||
374 | pci_dma_ops.map_sg = bpa_map_sg; | ||
375 | pci_dma_ops.unmap_sg = bpa_unmap_sg; | ||
376 | pci_dma_ops.dma_supported = bpa_dma_supported; | ||
377 | } | ||
diff --git a/arch/ppc64/kernel/bpa_iommu.h b/arch/ppc64/kernel/bpa_iommu.h new file mode 100644 index 000000000000..e547d77dfa04 --- /dev/null +++ b/arch/ppc64/kernel/bpa_iommu.h | |||
@@ -0,0 +1,65 @@ | |||
1 | #ifndef BPA_IOMMU_H | ||
2 | #define BPA_IOMMU_H | ||
3 | |||
4 | /* some constants */ | ||
5 | enum { | ||
6 | /* segment table entries */ | ||
7 | IOST_VALID_MASK = 0x8000000000000000ul, | ||
8 | IOST_TAG_MASK = 0x3000000000000000ul, | ||
9 | IOST_PT_BASE_MASK = 0x000003fffffff000ul, | ||
10 | IOST_NNPT_MASK = 0x0000000000000fe0ul, | ||
11 | IOST_PS_MASK = 0x000000000000000ful, | ||
12 | |||
13 | IOST_PS_4K = 0x1, | ||
14 | IOST_PS_64K = 0x3, | ||
15 | IOST_PS_1M = 0x5, | ||
16 | IOST_PS_16M = 0x7, | ||
17 | |||
18 | /* iopt tag register */ | ||
19 | IOPT_VALID_MASK = 0x0000000200000000ul, | ||
20 | IOPT_TAG_MASK = 0x00000001fffffffful, | ||
21 | |||
22 | /* iopt cache register */ | ||
23 | IOPT_PROT_MASK = 0xc000000000000000ul, | ||
24 | IOPT_PROT_NONE = 0x0000000000000000ul, | ||
25 | IOPT_PROT_READ = 0x4000000000000000ul, | ||
26 | IOPT_PROT_WRITE = 0x8000000000000000ul, | ||
27 | IOPT_PROT_RW = 0xc000000000000000ul, | ||
28 | IOPT_COHERENT = 0x2000000000000000ul, | ||
29 | |||
30 | IOPT_ORDER_MASK = 0x1800000000000000ul, | ||
31 | /* order access to same IOID/VC on same address */ | ||
32 | IOPT_ORDER_ADDR = 0x0800000000000000ul, | ||
33 | /* similar, but only after a write access */ | ||
34 | IOPT_ORDER_WRITES = 0x1000000000000000ul, | ||
35 | /* Order all accesses to same IOID/VC */ | ||
36 | IOPT_ORDER_VC = 0x1800000000000000ul, | ||
37 | |||
38 | IOPT_RPN_MASK = 0x000003fffffff000ul, | ||
39 | IOPT_HINT_MASK = 0x0000000000000800ul, | ||
40 | IOPT_IOID_MASK = 0x00000000000007fful, | ||
41 | |||
42 | IOSTO_ENABLE = 0x8000000000000000ul, | ||
43 | IOSTO_ORIGIN = 0x000003fffffff000ul, | ||
44 | IOSTO_HW = 0x0000000000000800ul, | ||
45 | IOSTO_SW = 0x0000000000000400ul, | ||
46 | |||
47 | IOCMD_CONF_TE = 0x0000800000000000ul, | ||
48 | |||
49 | /* memory mapped registers */ | ||
50 | IOC_PT_CACHE_DIR = 0x000, | ||
51 | IOC_ST_CACHE_DIR = 0x800, | ||
52 | IOC_PT_CACHE_REG = 0x910, | ||
53 | IOC_ST_ORIGIN = 0x918, | ||
54 | IOC_CONF = 0x930, | ||
55 | |||
56 | /* The high bit needs to be set on every DMA address, | ||
57 | only 2GB are addressable */ | ||
58 | BPA_DMA_VALID = 0x80000000, | ||
59 | BPA_DMA_MASK = 0x7fffffff, | ||
60 | }; | ||
61 | |||
62 | |||
63 | void bpa_init_iommu(void); | ||
64 | |||
65 | #endif | ||
diff --git a/arch/ppc64/kernel/bpa_nvram.c b/arch/ppc64/kernel/bpa_nvram.c new file mode 100644 index 000000000000..06a119cfceb5 --- /dev/null +++ b/arch/ppc64/kernel/bpa_nvram.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * NVRAM for CPBW | ||
3 | * | ||
4 | * (C) Copyright IBM Corp. 2005 | ||
5 | * | ||
6 | * Authors : Utz Bacher <utz.bacher@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/fs.h> | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/types.h> | ||
28 | |||
29 | #include <asm/machdep.h> | ||
30 | #include <asm/nvram.h> | ||
31 | #include <asm/prom.h> | ||
32 | |||
33 | static void __iomem *bpa_nvram_start; | ||
34 | static long bpa_nvram_len; | ||
35 | static spinlock_t bpa_nvram_lock = SPIN_LOCK_UNLOCKED; | ||
36 | |||
37 | static ssize_t bpa_nvram_read(char *buf, size_t count, loff_t *index) | ||
38 | { | ||
39 | unsigned long flags; | ||
40 | |||
41 | if (*index >= bpa_nvram_len) | ||
42 | return 0; | ||
43 | if (*index + count > bpa_nvram_len) | ||
44 | count = bpa_nvram_len - *index; | ||
45 | |||
46 | spin_lock_irqsave(&bpa_nvram_lock, flags); | ||
47 | |||
48 | memcpy_fromio(buf, bpa_nvram_start + *index, count); | ||
49 | |||
50 | spin_unlock_irqrestore(&bpa_nvram_lock, flags); | ||
51 | |||
52 | *index += count; | ||
53 | return count; | ||
54 | } | ||
55 | |||
56 | static ssize_t bpa_nvram_write(char *buf, size_t count, loff_t *index) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | |||
60 | if (*index >= bpa_nvram_len) | ||
61 | return 0; | ||
62 | if (*index + count > bpa_nvram_len) | ||
63 | count = bpa_nvram_len - *index; | ||
64 | |||
65 | spin_lock_irqsave(&bpa_nvram_lock, flags); | ||
66 | |||
67 | memcpy_toio(bpa_nvram_start + *index, buf, count); | ||
68 | |||
69 | spin_unlock_irqrestore(&bpa_nvram_lock, flags); | ||
70 | |||
71 | *index += count; | ||
72 | return count; | ||
73 | } | ||
74 | |||
75 | static ssize_t bpa_nvram_get_size(void) | ||
76 | { | ||
77 | return bpa_nvram_len; | ||
78 | } | ||
79 | |||
80 | int __init bpa_nvram_init(void) | ||
81 | { | ||
82 | struct device_node *nvram_node; | ||
83 | unsigned long *buffer; | ||
84 | int proplen; | ||
85 | unsigned long nvram_addr; | ||
86 | int ret; | ||
87 | |||
88 | ret = -ENODEV; | ||
89 | nvram_node = of_find_node_by_type(NULL, "nvram"); | ||
90 | if (!nvram_node) | ||
91 | goto out; | ||
92 | |||
93 | ret = -EIO; | ||
94 | buffer = (unsigned long *)get_property(nvram_node, "reg", &proplen); | ||
95 | if (proplen != 2*sizeof(unsigned long)) | ||
96 | goto out; | ||
97 | |||
98 | ret = -ENODEV; | ||
99 | nvram_addr = buffer[0]; | ||
100 | bpa_nvram_len = buffer[1]; | ||
101 | if ( (!bpa_nvram_len) || (!nvram_addr) ) | ||
102 | goto out; | ||
103 | |||
104 | bpa_nvram_start = ioremap(nvram_addr, bpa_nvram_len); | ||
105 | if (!bpa_nvram_start) | ||
106 | goto out; | ||
107 | |||
108 | printk(KERN_INFO "BPA NVRAM, %luk mapped to %p\n", | ||
109 | bpa_nvram_len >> 10, bpa_nvram_start); | ||
110 | |||
111 | ppc_md.nvram_read = bpa_nvram_read; | ||
112 | ppc_md.nvram_write = bpa_nvram_write; | ||
113 | ppc_md.nvram_size = bpa_nvram_get_size; | ||
114 | |||
115 | out: | ||
116 | of_node_put(nvram_node); | ||
117 | return ret; | ||
118 | } | ||
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c new file mode 100644 index 000000000000..57b3db66f458 --- /dev/null +++ b/arch/ppc64/kernel/bpa_setup.c | |||
@@ -0,0 +1,140 @@ | |||
1 | /* | ||
2 | * linux/arch/ppc/kernel/bpa_setup.c | ||
3 | * | ||
4 | * Copyright (C) 1995 Linus Torvalds | ||
5 | * Adapted from 'alpha' version by Gary Thomas | ||
6 | * Modified by Cort Dougan (cort@cs.nmt.edu) | ||
7 | * Modified by PPC64 Team, IBM Corp | ||
8 | * Modified by BPA Team, IBM Deutschland Entwicklung GmbH | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License | ||
12 | * as published by the Free Software Foundation; either version | ||
13 | * 2 of the License, or (at your option) any later version. | ||
14 | */ | ||
15 | #undef DEBUG | ||
16 | |||
17 | #include <linux/config.h> | ||
18 | #include <linux/sched.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/mm.h> | ||
21 | #include <linux/stddef.h> | ||
22 | #include <linux/unistd.h> | ||
23 | #include <linux/slab.h> | ||
24 | #include <linux/user.h> | ||
25 | #include <linux/reboot.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/delay.h> | ||
28 | #include <linux/irq.h> | ||
29 | #include <linux/seq_file.h> | ||
30 | #include <linux/root_dev.h> | ||
31 | #include <linux/console.h> | ||
32 | |||
33 | #include <asm/mmu.h> | ||
34 | #include <asm/processor.h> | ||
35 | #include <asm/io.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/prom.h> | ||
38 | #include <asm/rtas.h> | ||
39 | #include <asm/pci-bridge.h> | ||
40 | #include <asm/iommu.h> | ||
41 | #include <asm/dma.h> | ||
42 | #include <asm/machdep.h> | ||
43 | #include <asm/time.h> | ||
44 | #include <asm/nvram.h> | ||
45 | #include <asm/cputable.h> | ||
46 | |||
47 | #include "pci.h" | ||
48 | #include "bpa_iic.h" | ||
49 | #include "bpa_iommu.h" | ||
50 | |||
51 | #ifdef DEBUG | ||
52 | #define DBG(fmt...) udbg_printf(fmt) | ||
53 | #else | ||
54 | #define DBG(fmt...) | ||
55 | #endif | ||
56 | |||
57 | void bpa_get_cpuinfo(struct seq_file *m) | ||
58 | { | ||
59 | struct device_node *root; | ||
60 | const char *model = ""; | ||
61 | |||
62 | root = of_find_node_by_path("/"); | ||
63 | if (root) | ||
64 | model = get_property(root, "model", NULL); | ||
65 | seq_printf(m, "machine\t\t: BPA %s\n", model); | ||
66 | of_node_put(root); | ||
67 | } | ||
68 | |||
69 | static void bpa_progress(char *s, unsigned short hex) | ||
70 | { | ||
71 | printk("*** %04x : %s\n", hex, s ? s : ""); | ||
72 | } | ||
73 | |||
74 | static void __init bpa_setup_arch(void) | ||
75 | { | ||
76 | ppc_md.init_IRQ = iic_init_IRQ; | ||
77 | ppc_md.get_irq = iic_get_irq; | ||
78 | |||
79 | #ifdef CONFIG_SMP | ||
80 | smp_init_pSeries(); | ||
81 | #endif | ||
82 | |||
83 | /* init to some ~sane value until calibrate_delay() runs */ | ||
84 | loops_per_jiffy = 50000000; | ||
85 | |||
86 | if (ROOT_DEV == 0) { | ||
87 | printk("No ramdisk, default root is /dev/hda2\n"); | ||
88 | ROOT_DEV = Root_HDA2; | ||
89 | } | ||
90 | |||
91 | /* Find and initialize PCI host bridges */ | ||
92 | init_pci_config_tokens(); | ||
93 | find_and_init_phbs(); | ||
94 | spider_init_IRQ(); | ||
95 | #ifdef CONFIG_DUMMY_CONSOLE | ||
96 | conswitchp = &dummy_con; | ||
97 | #endif | ||
98 | |||
99 | bpa_nvram_init(); | ||
100 | } | ||
101 | |||
102 | /* | ||
103 | * Early initialization. Relocation is on but do not reference unbolted pages | ||
104 | */ | ||
105 | static void __init bpa_init_early(void) | ||
106 | { | ||
107 | DBG(" -> bpa_init_early()\n"); | ||
108 | |||
109 | hpte_init_native(); | ||
110 | |||
111 | bpa_init_iommu(); | ||
112 | |||
113 | ppc64_interrupt_controller = IC_BPA_IIC; | ||
114 | |||
115 | DBG(" <- bpa_init_early()\n"); | ||
116 | } | ||
117 | |||
118 | |||
119 | static int __init bpa_probe(int platform) | ||
120 | { | ||
121 | if (platform != PLATFORM_BPA) | ||
122 | return 0; | ||
123 | |||
124 | return 1; | ||
125 | } | ||
126 | |||
127 | struct machdep_calls __initdata bpa_md = { | ||
128 | .probe = bpa_probe, | ||
129 | .setup_arch = bpa_setup_arch, | ||
130 | .init_early = bpa_init_early, | ||
131 | .get_cpuinfo = bpa_get_cpuinfo, | ||
132 | .restart = rtas_restart, | ||
133 | .power_off = rtas_power_off, | ||
134 | .halt = rtas_halt, | ||
135 | .get_boot_time = rtas_get_boot_time, | ||
136 | .get_rtc_time = rtas_get_rtc_time, | ||
137 | .set_rtc_time = rtas_set_rtc_time, | ||
138 | .calibrate_decr = generic_calibrate_decr, | ||
139 | .progress = bpa_progress, | ||
140 | }; | ||
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/ppc64/kernel/cpu_setup_power4.S index 3bd951820850..42fc08cf87a0 100644 --- a/arch/ppc64/kernel/cpu_setup_power4.S +++ b/arch/ppc64/kernel/cpu_setup_power4.S | |||
@@ -73,7 +73,21 @@ _GLOBAL(__970_cpu_preinit) | |||
73 | 73 | ||
74 | _GLOBAL(__setup_cpu_power4) | 74 | _GLOBAL(__setup_cpu_power4) |
75 | blr | 75 | blr |
76 | 76 | ||
77 | _GLOBAL(__setup_cpu_be) | ||
78 | /* Set large page sizes LP=0: 16MB, LP=1: 64KB */ | ||
79 | addi r3, 0, 0 | ||
80 | ori r3, r3, HID6_LB | ||
81 | sldi r3, r3, 32 | ||
82 | nor r3, r3, r3 | ||
83 | mfspr r4, SPRN_HID6 | ||
84 | and r4, r4, r3 | ||
85 | addi r3, 0, 0x02000 | ||
86 | sldi r3, r3, 32 | ||
87 | or r4, r4, r3 | ||
88 | mtspr SPRN_HID6, r4 | ||
89 | blr | ||
90 | |||
77 | _GLOBAL(__setup_cpu_ppc970) | 91 | _GLOBAL(__setup_cpu_ppc970) |
78 | mfspr r0,SPRN_HID0 | 92 | mfspr r0,SPRN_HID0 |
79 | li r11,5 /* clear DOZE and SLEEP */ | 93 | li r11,5 /* clear DOZE and SLEEP */ |
diff --git a/arch/ppc64/kernel/cputable.c b/arch/ppc64/kernel/cputable.c index 8644a8648058..1d162c7c59df 100644 --- a/arch/ppc64/kernel/cputable.c +++ b/arch/ppc64/kernel/cputable.c | |||
@@ -34,6 +34,7 @@ EXPORT_SYMBOL(cur_cpu_spec); | |||
34 | extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec); | 34 | extern void __setup_cpu_power3(unsigned long offset, struct cpu_spec* spec); |
35 | extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec); | 35 | extern void __setup_cpu_power4(unsigned long offset, struct cpu_spec* spec); |
36 | extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); | 36 | extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec); |
37 | extern void __setup_cpu_be(unsigned long offset, struct cpu_spec* spec); | ||
37 | 38 | ||
38 | 39 | ||
39 | /* We only set the altivec features if the kernel was compiled with altivec | 40 | /* We only set the altivec features if the kernel was compiled with altivec |
@@ -162,6 +163,16 @@ struct cpu_spec cpu_specs[] = { | |||
162 | __setup_cpu_power4, | 163 | __setup_cpu_power4, |
163 | COMMON_PPC64_FW | 164 | COMMON_PPC64_FW |
164 | }, | 165 | }, |
166 | { /* BE DD1.x */ | ||
167 | 0xffff0000, 0x00700000, "Broadband Engine", | ||
168 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | | ||
169 | CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_ALTIVEC_COMP | | ||
170 | CPU_FTR_SMT, | ||
171 | COMMON_USER_PPC64 | PPC_FEATURE_HAS_ALTIVEC_COMP, | ||
172 | 128, 128, | ||
173 | __setup_cpu_be, | ||
174 | COMMON_PPC64_FW | ||
175 | }, | ||
165 | { /* default match */ | 176 | { /* default match */ |
166 | 0x00000000, 0x00000000, "POWER4 (compatible)", | 177 | 0x00000000, 0x00000000, "POWER4 (compatible)", |
167 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | | 178 | CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | CPU_FTR_HPTE_TABLE | |
diff --git a/arch/ppc64/kernel/dma.c b/arch/ppc64/kernel/dma.c index ce714c927134..4da8e31b2b61 100644 --- a/arch/ppc64/kernel/dma.c +++ b/arch/ppc64/kernel/dma.c | |||
@@ -15,8 +15,10 @@ | |||
15 | 15 | ||
16 | static struct dma_mapping_ops *get_dma_ops(struct device *dev) | 16 | static struct dma_mapping_ops *get_dma_ops(struct device *dev) |
17 | { | 17 | { |
18 | #ifdef CONFIG_PCI | ||
18 | if (dev->bus == &pci_bus_type) | 19 | if (dev->bus == &pci_bus_type) |
19 | return &pci_dma_ops; | 20 | return &pci_dma_ops; |
21 | #endif | ||
20 | #ifdef CONFIG_IBMVIO | 22 | #ifdef CONFIG_IBMVIO |
21 | if (dev->bus == &vio_bus_type) | 23 | if (dev->bus == &vio_bus_type) |
22 | return &vio_dma_ops; | 24 | return &vio_dma_ops; |
@@ -37,8 +39,10 @@ EXPORT_SYMBOL(dma_supported); | |||
37 | 39 | ||
38 | int dma_set_mask(struct device *dev, u64 dma_mask) | 40 | int dma_set_mask(struct device *dev, u64 dma_mask) |
39 | { | 41 | { |
42 | #ifdef CONFIG_PCI | ||
40 | if (dev->bus == &pci_bus_type) | 43 | if (dev->bus == &pci_bus_type) |
41 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); | 44 | return pci_set_dma_mask(to_pci_dev(dev), dma_mask); |
45 | #endif | ||
42 | #ifdef CONFIG_IBMVIO | 46 | #ifdef CONFIG_IBMVIO |
43 | if (dev->bus == &vio_bus_type) | 47 | if (dev->bus == &vio_bus_type) |
44 | return -EIO; | 48 | return -EIO; |
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c index d63d41f3eecf..af5272fedadf 100644 --- a/arch/ppc64/kernel/eeh.c +++ b/arch/ppc64/kernel/eeh.c | |||
@@ -505,7 +505,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token) | |||
505 | pte_t *ptep; | 505 | pte_t *ptep; |
506 | unsigned long pa; | 506 | unsigned long pa; |
507 | 507 | ||
508 | ptep = find_linux_pte(ioremap_mm.pgd, token); | 508 | ptep = find_linux_pte(init_mm.pgd, token); |
509 | if (!ptep) | 509 | if (!ptep) |
510 | return token; | 510 | return token; |
511 | pa = pte_pfn(*ptep) << PAGE_SHIFT; | 511 | pa = pte_pfn(*ptep) << PAGE_SHIFT; |
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S index 346dbf606b5d..675c2708588f 100644 --- a/arch/ppc64/kernel/head.S +++ b/arch/ppc64/kernel/head.S | |||
@@ -1194,7 +1194,7 @@ _GLOBAL(pSeries_secondary_smp_init) | |||
1194 | bl .__restore_cpu_setup | 1194 | bl .__restore_cpu_setup |
1195 | 1195 | ||
1196 | /* Set up a paca value for this processor. Since we have the | 1196 | /* Set up a paca value for this processor. Since we have the |
1197 | * physical cpu id in r3, we need to search the pacas to find | 1197 | * physical cpu id in r24, we need to search the pacas to find |
1198 | * which logical id maps to our physical one. | 1198 | * which logical id maps to our physical one. |
1199 | */ | 1199 | */ |
1200 | LOADADDR(r13, paca) /* Get base vaddr of paca array */ | 1200 | LOADADDR(r13, paca) /* Get base vaddr of paca array */ |
@@ -1207,8 +1207,8 @@ _GLOBAL(pSeries_secondary_smp_init) | |||
1207 | cmpwi r5,NR_CPUS | 1207 | cmpwi r5,NR_CPUS |
1208 | blt 1b | 1208 | blt 1b |
1209 | 1209 | ||
1210 | 99: HMT_LOW /* Couldn't find our CPU id */ | 1210 | mr r3,r24 /* not found, copy phys to r3 */ |
1211 | b 99b | 1211 | b .kexec_wait /* next kernel might do better */ |
1212 | 1212 | ||
1213 | 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ | 1213 | 2: mtspr SPRG3,r13 /* Save vaddr of paca in SPRG3 */ |
1214 | /* From now on, r24 is expected to be logica cpuid */ | 1214 | /* From now on, r24 is expected to be logica cpuid */ |
@@ -2121,10 +2121,6 @@ empty_zero_page: | |||
2121 | swapper_pg_dir: | 2121 | swapper_pg_dir: |
2122 | .space 4096 | 2122 | .space 4096 |
2123 | 2123 | ||
2124 | .globl ioremap_dir | ||
2125 | ioremap_dir: | ||
2126 | .space 4096 | ||
2127 | |||
2128 | #ifdef CONFIG_SMP | 2124 | #ifdef CONFIG_SMP |
2129 | /* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */ | 2125 | /* 1 page segment table per cpu (max 48, cpu0 allocated at STAB0_PHYS_ADDR) */ |
2130 | .globl stab_array | 2126 | .globl stab_array |
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/ppc64/kernel/iSeries_VpdInfo.c index a6f0ff2d0239..d11c732daf81 100644 --- a/arch/ppc64/kernel/iSeries_VpdInfo.c +++ b/arch/ppc64/kernel/iSeries_VpdInfo.c | |||
@@ -1,31 +1,31 @@ | |||
1 | /************************************************************************/ | 1 | /* |
2 | /* File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001. */ | 2 | * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001. |
3 | /************************************************************************/ | 3 | * |
4 | /* This code gets the card location of the hardware */ | 4 | * This code gets the card location of the hardware |
5 | /* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */ | 5 | * Copyright (C) 2001 <Allan H Trautman> <IBM Corp> |
6 | /* */ | 6 | * Copyright (C) 2005 Stephen Rothwel, IBM Corp |
7 | /* This program is free software; you can redistribute it and/or modify */ | 7 | * |
8 | /* it under the terms of the GNU General Public License as published by */ | 8 | * This program is free software; you can redistribute it and/or modify |
9 | /* the Free Software Foundation; either version 2 of the License, or */ | 9 | * it under the terms of the GNU General Public License as published by |
10 | /* (at your option) any later version. */ | 10 | * the Free Software Foundation; either version 2 of the License, or |
11 | /* */ | 11 | * (at your option) any later version. |
12 | /* This program is distributed in the hope that it will be useful, */ | 12 | * |
13 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | 13 | * This program is distributed in the hope that it will be useful, |
14 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | /* GNU General Public License for more details. */ | 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
16 | /* */ | 16 | * GNU General Public License for more details. |
17 | /* You should have received a copy of the GNU General Public License */ | 17 | * |
18 | /* along with this program; if not, write to the: */ | 18 | * You should have received a copy of the GNU General Public License |
19 | /* Free Software Foundation, Inc., */ | 19 | * along with this program; if not, write to the: |
20 | /* 59 Temple Place, Suite 330, */ | 20 | * Free Software Foundation, Inc., |
21 | /* Boston, MA 02111-1307 USA */ | 21 | * 59 Temple Place, Suite 330, |
22 | /************************************************************************/ | 22 | * Boston, MA 02111-1307 USA |
23 | /* Change Activity: */ | 23 | * |
24 | /* Created, Feb 2, 2001 */ | 24 | * Change Activity: |
25 | /* Ported to ppc64, August 20, 2001 */ | 25 | * Created, Feb 2, 2001 |
26 | /* End Change Activity */ | 26 | * Ported to ppc64, August 20, 2001 |
27 | /************************************************************************/ | 27 | * End Change Activity |
28 | #include <linux/config.h> | 28 | */ |
29 | #include <linux/init.h> | 29 | #include <linux/init.h> |
30 | #include <linux/module.h> | 30 | #include <linux/module.h> |
31 | #include <linux/pci.h> | 31 | #include <linux/pci.h> |
@@ -34,30 +34,25 @@ | |||
34 | 34 | ||
35 | #include <asm/iSeries/HvCallPci.h> | 35 | #include <asm/iSeries/HvCallPci.h> |
36 | #include <asm/iSeries/HvTypes.h> | 36 | #include <asm/iSeries/HvTypes.h> |
37 | #include <asm/iSeries/mf.h> | ||
38 | #include <asm/iSeries/LparData.h> | ||
39 | #include <asm/iSeries/iSeries_pci.h> | 37 | #include <asm/iSeries/iSeries_pci.h> |
40 | #include "pci.h" | ||
41 | 38 | ||
42 | /* | 39 | /* |
43 | * Size of Bus VPD data | 40 | * Size of Bus VPD data |
44 | */ | 41 | */ |
45 | #define BUS_VPDSIZE 1024 | 42 | #define BUS_VPDSIZE 1024 |
43 | |||
46 | /* | 44 | /* |
47 | * Bus Vpd Tags | 45 | * Bus Vpd Tags |
48 | */ | 46 | */ |
49 | #define VpdEndOfDataTag 0x78 | ||
50 | #define VpdEndOfAreaTag 0x79 | 47 | #define VpdEndOfAreaTag 0x79 |
51 | #define VpdIdStringTag 0x82 | 48 | #define VpdIdStringTag 0x82 |
52 | #define VpdVendorAreaTag 0x84 | 49 | #define VpdVendorAreaTag 0x84 |
50 | |||
53 | /* | 51 | /* |
54 | * Mfg Area Tags | 52 | * Mfg Area Tags |
55 | */ | 53 | */ |
56 | #define VpdFruFlag 0x4647 // "FG" | ||
57 | #define VpdFruFrameId 0x4649 // "FI" | 54 | #define VpdFruFrameId 0x4649 // "FI" |
58 | #define VpdSlotMapFormat 0x4D46 // "MF" | 55 | #define VpdSlotMapFormat 0x4D46 // "MF" |
59 | #define VpdAsmPartNumber 0x504E // "PN" | ||
60 | #define VpdFruSerial 0x534E // "SN" | ||
61 | #define VpdSlotMap 0x534D // "SM" | 56 | #define VpdSlotMap 0x534D // "SM" |
62 | 57 | ||
63 | /* | 58 | /* |
@@ -79,74 +74,33 @@ struct SlotMapStruct { | |||
79 | char CardLocation[3]; | 74 | char CardLocation[3]; |
80 | char Parms[8]; | 75 | char Parms[8]; |
81 | char Reserved[2]; | 76 | char Reserved[2]; |
82 | }; | 77 | }; |
83 | typedef struct SlotMapStruct SlotMap; | 78 | typedef struct SlotMapStruct SlotMap; |
84 | #define SLOT_ENTRY_SIZE 16 | 79 | #define SLOT_ENTRY_SIZE 16 |
85 | 80 | ||
86 | /* | 81 | /* |
87 | * Formats the device information. | ||
88 | * - Pass in pci_dev* pointer to the device. | ||
89 | * - Pass in buffer to place the data. Danger here is the buffer must | ||
90 | * be as big as the client says it is. Should be at least 128 bytes. | ||
91 | * Return will the length of the string data put in the buffer. | ||
92 | * Format: | ||
93 | * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet | ||
94 | * controller | ||
95 | */ | ||
96 | int iSeries_Device_Information(struct pci_dev *PciDev, char *buffer, | ||
97 | int BufferSize) | ||
98 | { | ||
99 | struct iSeries_Device_Node *DevNode = | ||
100 | (struct iSeries_Device_Node *)PciDev->sysdata; | ||
101 | int len; | ||
102 | |||
103 | if (DevNode == NULL) | ||
104 | return sprintf(buffer, | ||
105 | "PCI: iSeries_Device_Information DevNode is NULL"); | ||
106 | |||
107 | if (BufferSize < 128) | ||
108 | return 0; | ||
109 | |||
110 | len = sprintf(buffer, "PCI: Bus%3d, Device%3d, Vendor %04X ", | ||
111 | ISERIES_BUS(DevNode), PCI_SLOT(PciDev->devfn), | ||
112 | PciDev->vendor); | ||
113 | len += sprintf(buffer + len, "Frame%3d, Card %4s ", | ||
114 | DevNode->FrameId, DevNode->CardLocation); | ||
115 | #ifdef CONFIG_PCI | ||
116 | if (pci_class_name(PciDev->class >> 8) == 0) | ||
117 | len += sprintf(buffer + len, "0x%04X ", | ||
118 | (int)(PciDev->class >> 8)); | ||
119 | else | ||
120 | len += sprintf(buffer + len, "%s", | ||
121 | pci_class_name(PciDev->class >> 8)); | ||
122 | #endif | ||
123 | return len; | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * Parse the Slot Area | 82 | * Parse the Slot Area |
128 | */ | 83 | */ |
129 | void iSeries_Parse_SlotArea(SlotMap *MapPtr, int MapLen, | 84 | static void __init iSeries_Parse_SlotArea(SlotMap *MapPtr, int MapLen, |
130 | struct iSeries_Device_Node *DevNode) | 85 | HvAgentId agent, u8 *PhbId, char card[4]) |
131 | { | 86 | { |
132 | int SlotMapLen = MapLen; | 87 | int SlotMapLen = MapLen; |
133 | SlotMap *SlotMapPtr = MapPtr; | 88 | SlotMap *SlotMapPtr = MapPtr; |
134 | 89 | ||
135 | /* | 90 | /* |
136 | * Parse Slot label until we find the one requrested | 91 | * Parse Slot label until we find the one requested |
137 | */ | 92 | */ |
138 | while (SlotMapLen > 0) { | 93 | while (SlotMapLen > 0) { |
139 | if (SlotMapPtr->AgentId == DevNode->AgentId ) { | 94 | if (SlotMapPtr->AgentId == agent) { |
140 | /* | 95 | /* |
141 | * If Phb wasn't found, grab the entry first one found. | 96 | * If Phb wasn't found, grab the entry first one found. |
142 | */ | 97 | */ |
143 | if (DevNode->PhbId == 0xff) | 98 | if (*PhbId == 0xff) |
144 | DevNode->PhbId = SlotMapPtr->PhbId; | 99 | *PhbId = SlotMapPtr->PhbId; |
145 | /* Found it, extract the data. */ | 100 | /* Found it, extract the data. */ |
146 | if (SlotMapPtr->PhbId == DevNode->PhbId ) { | 101 | if (SlotMapPtr->PhbId == *PhbId) { |
147 | memcpy(&DevNode->CardLocation, | 102 | memcpy(card, &SlotMapPtr->CardLocation, 3); |
148 | &SlotMapPtr->CardLocation, 3); | 103 | card[3] = 0; |
149 | DevNode->CardLocation[3] = 0; | ||
150 | break; | 104 | break; |
151 | } | 105 | } |
152 | } | 106 | } |
@@ -159,8 +113,9 @@ void iSeries_Parse_SlotArea(SlotMap *MapPtr, int MapLen, | |||
159 | /* | 113 | /* |
160 | * Parse the Mfg Area | 114 | * Parse the Mfg Area |
161 | */ | 115 | */ |
162 | static void iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen, | 116 | static void __init iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen, |
163 | struct iSeries_Device_Node *DevNode) | 117 | HvAgentId agent, u8 *PhbId, |
118 | u8 *frame, char card[4]) | ||
164 | { | 119 | { |
165 | MfgArea *MfgAreaPtr = (MfgArea *)AreaData; | 120 | MfgArea *MfgAreaPtr = (MfgArea *)AreaData; |
166 | int MfgAreaLen = AreaLen; | 121 | int MfgAreaLen = AreaLen; |
@@ -171,7 +126,7 @@ static void iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen, | |||
171 | int MfgTagLen = MfgAreaPtr->TagLength; | 126 | int MfgTagLen = MfgAreaPtr->TagLength; |
172 | /* Frame ID (FI 4649020310 ) */ | 127 | /* Frame ID (FI 4649020310 ) */ |
173 | if (MfgAreaPtr->Tag == VpdFruFrameId) /* FI */ | 128 | if (MfgAreaPtr->Tag == VpdFruFrameId) /* FI */ |
174 | DevNode->FrameId = MfgAreaPtr->AreaData1; | 129 | *frame = MfgAreaPtr->AreaData1; |
175 | /* Slot Map Format (MF 4D46020004 ) */ | 130 | /* Slot Map Format (MF 4D46020004 ) */ |
176 | else if (MfgAreaPtr->Tag == VpdSlotMapFormat) /* MF */ | 131 | else if (MfgAreaPtr->Tag == VpdSlotMapFormat) /* MF */ |
177 | SlotMapFmt = (MfgAreaPtr->AreaData1 * 256) | 132 | SlotMapFmt = (MfgAreaPtr->AreaData1 * 256) |
@@ -183,10 +138,11 @@ static void iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen, | |||
183 | if (SlotMapFmt == 0x1004) | 138 | if (SlotMapFmt == 0x1004) |
184 | SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr | 139 | SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr |
185 | + MFG_ENTRY_SIZE + 1); | 140 | + MFG_ENTRY_SIZE + 1); |
186 | else | 141 | else |
187 | SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr | 142 | SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr |
188 | + MFG_ENTRY_SIZE); | 143 | + MFG_ENTRY_SIZE); |
189 | iSeries_Parse_SlotArea(SlotMapPtr, MfgTagLen, DevNode); | 144 | iSeries_Parse_SlotArea(SlotMapPtr, MfgTagLen, |
145 | agent, PhbId, card); | ||
190 | } | 146 | } |
191 | /* | 147 | /* |
192 | * Point to the next Mfg Area | 148 | * Point to the next Mfg Area |
@@ -194,19 +150,19 @@ static void iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen, | |||
194 | */ | 150 | */ |
195 | MfgAreaPtr = (MfgArea *)((char *)MfgAreaPtr + MfgTagLen | 151 | MfgAreaPtr = (MfgArea *)((char *)MfgAreaPtr + MfgTagLen |
196 | + MFG_ENTRY_SIZE); | 152 | + MFG_ENTRY_SIZE); |
197 | MfgAreaLen -= (MfgTagLen + MFG_ENTRY_SIZE); | 153 | MfgAreaLen -= (MfgTagLen + MFG_ENTRY_SIZE); |
198 | } | 154 | } |
199 | } | 155 | } |
200 | 156 | ||
201 | /* | 157 | /* |
202 | * Look for "BUS".. Data is not Null terminated. | 158 | * Look for "BUS".. Data is not Null terminated. |
203 | * PHBID of 0xFF indicates PHB was not found in VPD Data. | 159 | * PHBID of 0xFF indicates PHB was not found in VPD Data. |
204 | */ | 160 | */ |
205 | static int iSeries_Parse_PhbId(u8 *AreaPtr, int AreaLength) | 161 | static int __init iSeries_Parse_PhbId(u8 *AreaPtr, int AreaLength) |
206 | { | 162 | { |
207 | u8 *PhbPtr = AreaPtr; | 163 | u8 *PhbPtr = AreaPtr; |
208 | int DataLen = AreaLength; | 164 | int DataLen = AreaLength; |
209 | char PhbId = 0xFF; | 165 | char PhbId = 0xFF; |
210 | 166 | ||
211 | while (DataLen > 0) { | 167 | while (DataLen > 0) { |
212 | if ((*PhbPtr == 'B') && (*(PhbPtr + 1) == 'U') | 168 | if ((*PhbPtr == 'B') && (*(PhbPtr + 1) == 'U') |
@@ -216,7 +172,7 @@ static int iSeries_Parse_PhbId(u8 *AreaPtr, int AreaLength) | |||
216 | ++PhbPtr; | 172 | ++PhbPtr; |
217 | PhbId = (*PhbPtr & 0x0F); | 173 | PhbId = (*PhbPtr & 0x0F); |
218 | break; | 174 | break; |
219 | } | 175 | } |
220 | ++PhbPtr; | 176 | ++PhbPtr; |
221 | --DataLen; | 177 | --DataLen; |
222 | } | 178 | } |
@@ -226,52 +182,90 @@ static int iSeries_Parse_PhbId(u8 *AreaPtr, int AreaLength) | |||
226 | /* | 182 | /* |
227 | * Parse out the VPD Areas | 183 | * Parse out the VPD Areas |
228 | */ | 184 | */ |
229 | static void iSeries_Parse_Vpd(u8 *VpdData, int VpdDataLen, | 185 | static void __init iSeries_Parse_Vpd(u8 *VpdData, int VpdDataLen, |
230 | struct iSeries_Device_Node *DevNode) | 186 | HvAgentId agent, u8 *frame, char card[4]) |
231 | { | 187 | { |
232 | u8 *TagPtr = VpdData; | 188 | u8 *TagPtr = VpdData; |
233 | int DataLen = VpdDataLen - 3; | 189 | int DataLen = VpdDataLen - 3; |
190 | u8 PhbId; | ||
234 | 191 | ||
235 | while ((*TagPtr != VpdEndOfAreaTag) && (DataLen > 0)) { | 192 | while ((*TagPtr != VpdEndOfAreaTag) && (DataLen > 0)) { |
236 | int AreaLen = *(TagPtr + 1) + (*(TagPtr + 2) * 256); | 193 | int AreaLen = *(TagPtr + 1) + (*(TagPtr + 2) * 256); |
237 | u8 *AreaData = TagPtr + 3; | 194 | u8 *AreaData = TagPtr + 3; |
238 | 195 | ||
239 | if (*TagPtr == VpdIdStringTag) | 196 | if (*TagPtr == VpdIdStringTag) |
240 | DevNode->PhbId = iSeries_Parse_PhbId(AreaData, AreaLen); | 197 | PhbId = iSeries_Parse_PhbId(AreaData, AreaLen); |
241 | else if (*TagPtr == VpdVendorAreaTag) | 198 | else if (*TagPtr == VpdVendorAreaTag) |
242 | iSeries_Parse_MfgArea(AreaData, AreaLen, DevNode); | 199 | iSeries_Parse_MfgArea(AreaData, AreaLen, |
200 | agent, &PhbId, frame, card); | ||
243 | /* Point to next Area. */ | 201 | /* Point to next Area. */ |
244 | TagPtr = AreaData + AreaLen; | 202 | TagPtr = AreaData + AreaLen; |
245 | DataLen -= AreaLen; | 203 | DataLen -= AreaLen; |
246 | } | 204 | } |
247 | } | 205 | } |
248 | 206 | ||
249 | void iSeries_Get_Location_Code(struct iSeries_Device_Node *DevNode) | 207 | static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent, |
208 | u8 *frame, char card[4]) | ||
250 | { | 209 | { |
251 | int BusVpdLen = 0; | 210 | int BusVpdLen = 0; |
252 | u8 *BusVpdPtr = (u8 *)kmalloc(BUS_VPDSIZE, GFP_KERNEL); | 211 | u8 *BusVpdPtr = kmalloc(BUS_VPDSIZE, GFP_KERNEL); |
253 | 212 | ||
254 | if (BusVpdPtr == NULL) { | 213 | if (BusVpdPtr == NULL) { |
255 | printk("PCI: Bus VPD Buffer allocation failure.\n"); | 214 | printk("PCI: Bus VPD Buffer allocation failure.\n"); |
256 | return; | 215 | return; |
257 | } | 216 | } |
258 | BusVpdLen = HvCallPci_getBusVpd(ISERIES_BUS(DevNode), | 217 | BusVpdLen = HvCallPci_getBusVpd(bus, ISERIES_HV_ADDR(BusVpdPtr), |
259 | ISERIES_HV_ADDR(BusVpdPtr), | ||
260 | BUS_VPDSIZE); | 218 | BUS_VPDSIZE); |
261 | if (BusVpdLen == 0) { | 219 | if (BusVpdLen == 0) { |
262 | kfree(BusVpdPtr); | ||
263 | printk("PCI: Bus VPD Buffer zero length.\n"); | 220 | printk("PCI: Bus VPD Buffer zero length.\n"); |
264 | return; | 221 | goto out_free; |
265 | } | 222 | } |
266 | /* printk("PCI: BusVpdPtr: %p, %d\n",BusVpdPtr, BusVpdLen); */ | 223 | /* printk("PCI: BusVpdPtr: %p, %d\n",BusVpdPtr, BusVpdLen); */ |
267 | /* Make sure this is what I think it is */ | 224 | /* Make sure this is what I think it is */ |
268 | if (*BusVpdPtr != VpdIdStringTag) { /* 0x82 */ | 225 | if (*BusVpdPtr != VpdIdStringTag) { /* 0x82 */ |
269 | printk("PCI: Bus VPD Buffer missing starting tag.\n"); | 226 | printk("PCI: Bus VPD Buffer missing starting tag.\n"); |
270 | kfree(BusVpdPtr); | 227 | goto out_free; |
271 | return; | ||
272 | } | 228 | } |
273 | iSeries_Parse_Vpd(BusVpdPtr,BusVpdLen, DevNode); | 229 | iSeries_Parse_Vpd(BusVpdPtr, BusVpdLen, agent, frame, card); |
274 | sprintf(DevNode->Location, "Frame%3d, Card %-4s", DevNode->FrameId, | 230 | out_free: |
275 | DevNode->CardLocation); | ||
276 | kfree(BusVpdPtr); | 231 | kfree(BusVpdPtr); |
277 | } | 232 | } |
233 | |||
234 | /* | ||
235 | * Prints the device information. | ||
236 | * - Pass in pci_dev* pointer to the device. | ||
237 | * - Pass in the device count | ||
238 | * | ||
239 | * Format: | ||
240 | * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet | ||
241 | * controller | ||
242 | */ | ||
243 | void __init iSeries_Device_Information(struct pci_dev *PciDev, int count) | ||
244 | { | ||
245 | struct iSeries_Device_Node *DevNode = PciDev->sysdata; | ||
246 | u16 bus; | ||
247 | u8 frame; | ||
248 | char card[4]; | ||
249 | HvSubBusNumber subbus; | ||
250 | HvAgentId agent; | ||
251 | |||
252 | if (DevNode == NULL) { | ||
253 | printk("%d. PCI: iSeries_Device_Information DevNode is NULL\n", | ||
254 | count); | ||
255 | return; | ||
256 | } | ||
257 | |||
258 | bus = ISERIES_BUS(DevNode); | ||
259 | subbus = ISERIES_SUBBUS(DevNode); | ||
260 | agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus), | ||
261 | ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus)); | ||
262 | iSeries_Get_Location_Code(bus, agent, &frame, card); | ||
263 | |||
264 | printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, Card %4s ", | ||
265 | count, bus, PCI_SLOT(PciDev->devfn), PciDev->vendor, | ||
266 | frame, card); | ||
267 | if (pci_class_name(PciDev->class >> 8) == 0) | ||
268 | printk("0x%04X\n", (int)(PciDev->class >> 8)); | ||
269 | else | ||
270 | printk("%s\n", pci_class_name(PciDev->class >> 8)); | ||
271 | } | ||
diff --git a/arch/ppc64/kernel/iSeries_iommu.c b/arch/ppc64/kernel/iSeries_iommu.c index 4e1a47c8a802..f8ff1bb054dc 100644 --- a/arch/ppc64/kernel/iSeries_iommu.c +++ b/arch/ppc64/kernel/iSeries_iommu.c | |||
@@ -83,7 +83,7 @@ static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages) | |||
83 | } | 83 | } |
84 | } | 84 | } |
85 | 85 | ||
86 | 86 | #ifdef CONFIG_PCI | |
87 | /* | 87 | /* |
88 | * This function compares the known tables to find an iommu_table | 88 | * This function compares the known tables to find an iommu_table |
89 | * that has already been built for hardware TCEs. | 89 | * that has already been built for hardware TCEs. |
@@ -159,6 +159,7 @@ void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn) | |||
159 | else | 159 | else |
160 | kfree(tbl); | 160 | kfree(tbl); |
161 | } | 161 | } |
162 | #endif | ||
162 | 163 | ||
163 | static void iommu_dev_setup_iSeries(struct pci_dev *dev) { } | 164 | static void iommu_dev_setup_iSeries(struct pci_dev *dev) { } |
164 | static void iommu_bus_setup_iSeries(struct pci_bus *bus) { } | 165 | static void iommu_bus_setup_iSeries(struct pci_bus *bus) { } |
diff --git a/arch/ppc64/kernel/iSeries_irq.c b/arch/ppc64/kernel/iSeries_irq.c index f831d259dbb7..77376c1bd611 100644 --- a/arch/ppc64/kernel/iSeries_irq.c +++ b/arch/ppc64/kernel/iSeries_irq.c | |||
@@ -1,27 +1,29 @@ | |||
1 | /************************************************************************/ | 1 | /* |
2 | /* This module supports the iSeries PCI bus interrupt handling */ | 2 | * This module supports the iSeries PCI bus interrupt handling |
3 | /* Copyright (C) 20yy <Robert L Holtorf> <IBM Corp> */ | 3 | * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp> |
4 | /* */ | 4 | * Copyright (C) 2004-2005 IBM Corporation |
5 | /* This program is free software; you can redistribute it and/or modify */ | 5 | * |
6 | /* it under the terms of the GNU General Public License as published by */ | 6 | * This program is free software; you can redistribute it and/or modify |
7 | /* the Free Software Foundation; either version 2 of the License, or */ | 7 | * it under the terms of the GNU General Public License as published by |
8 | /* (at your option) any later version. */ | 8 | * the Free Software Foundation; either version 2 of the License, or |
9 | /* */ | 9 | * (at your option) any later version. |
10 | /* This program is distributed in the hope that it will be useful, */ | 10 | * |
11 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | 11 | * This program is distributed in the hope that it will be useful, |
12 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | /* GNU General Public License for more details. */ | 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | /* */ | 14 | * GNU General Public License for more details. |
15 | /* You should have received a copy of the GNU General Public License */ | 15 | * |
16 | /* along with this program; if not, write to the: */ | 16 | * You should have received a copy of the GNU General Public License |
17 | /* Free Software Foundation, Inc., */ | 17 | * along with this program; if not, write to the: |
18 | /* 59 Temple Place, Suite 330, */ | 18 | * Free Software Foundation, Inc., |
19 | /* Boston, MA 02111-1307 USA */ | 19 | * 59 Temple Place, Suite 330, |
20 | /************************************************************************/ | 20 | * Boston, MA 02111-1307 USA |
21 | /* Change Activity: */ | 21 | * |
22 | /* Created, December 13, 2000 by Wayne Holm */ | 22 | * Change Activity: |
23 | /* End Change Activity */ | 23 | * Created, December 13, 2000 by Wayne Holm |
24 | /************************************************************************/ | 24 | * End Change Activity |
25 | */ | ||
26 | #include <linux/config.h> | ||
25 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
26 | #include <linux/init.h> | 28 | #include <linux/init.h> |
27 | #include <linux/threads.h> | 29 | #include <linux/threads.h> |
@@ -30,30 +32,15 @@ | |||
30 | #include <linux/string.h> | 32 | #include <linux/string.h> |
31 | #include <linux/bootmem.h> | 33 | #include <linux/bootmem.h> |
32 | #include <linux/ide.h> | 34 | #include <linux/ide.h> |
33 | |||
34 | #include <linux/irq.h> | 35 | #include <linux/irq.h> |
35 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
36 | #include <asm/ppcdebug.h> | ||
37 | 37 | ||
38 | #include <asm/ppcdebug.h> | ||
39 | #include <asm/iSeries/HvTypes.h> | ||
40 | #include <asm/iSeries/HvLpEvent.h> | ||
38 | #include <asm/iSeries/HvCallPci.h> | 41 | #include <asm/iSeries/HvCallPci.h> |
39 | #include <asm/iSeries/HvCallXm.h> | 42 | #include <asm/iSeries/HvCallXm.h> |
40 | #include <asm/iSeries/iSeries_irq.h> | 43 | #include <asm/iSeries/iSeries_irq.h> |
41 | #include <asm/iSeries/XmPciLpEvent.h> | ||
42 | |||
43 | static unsigned int iSeries_startup_IRQ(unsigned int irq); | ||
44 | static void iSeries_shutdown_IRQ(unsigned int irq); | ||
45 | static void iSeries_enable_IRQ(unsigned int irq); | ||
46 | static void iSeries_disable_IRQ(unsigned int irq); | ||
47 | static void iSeries_end_IRQ(unsigned int irq); | ||
48 | |||
49 | static hw_irq_controller iSeries_IRQ_handler = { | ||
50 | .typename = "iSeries irq controller", | ||
51 | .startup = iSeries_startup_IRQ, | ||
52 | .shutdown = iSeries_shutdown_IRQ, | ||
53 | .enable = iSeries_enable_IRQ, | ||
54 | .disable = iSeries_disable_IRQ, | ||
55 | .end = iSeries_end_IRQ | ||
56 | }; | ||
57 | 44 | ||
58 | /* This maps virtual irq numbers to real irqs */ | 45 | /* This maps virtual irq numbers to real irqs */ |
59 | unsigned int virt_irq_to_real_map[NR_IRQS]; | 46 | unsigned int virt_irq_to_real_map[NR_IRQS]; |
@@ -62,37 +49,187 @@ unsigned int virt_irq_to_real_map[NR_IRQS]; | |||
62 | /* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */ | 49 | /* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */ |
63 | static int next_virtual_irq = 2; | 50 | static int next_virtual_irq = 2; |
64 | 51 | ||
65 | /* This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c */ | 52 | static long Pci_Interrupt_Count; |
66 | void __init iSeries_init_IRQ(void) | 53 | static long Pci_Event_Count; |
54 | |||
55 | enum XmPciLpEvent_Subtype { | ||
56 | XmPciLpEvent_BusCreated = 0, // PHB has been created | ||
57 | XmPciLpEvent_BusError = 1, // PHB has failed | ||
58 | XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus | ||
59 | XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed | ||
60 | XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered | ||
61 | XmPciLpEvent_BusRecovered = 12, // PHB has been recovered | ||
62 | XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing | ||
63 | XmPciLpEvent_BridgeError = 21, // Bridge Error | ||
64 | XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt | ||
65 | }; | ||
66 | |||
67 | struct XmPciLpEvent_BusInterrupt { | ||
68 | HvBusNumber busNumber; | ||
69 | HvSubBusNumber subBusNumber; | ||
70 | }; | ||
71 | |||
72 | struct XmPciLpEvent_NodeInterrupt { | ||
73 | HvBusNumber busNumber; | ||
74 | HvSubBusNumber subBusNumber; | ||
75 | HvAgentId deviceId; | ||
76 | }; | ||
77 | |||
78 | struct XmPciLpEvent { | ||
79 | struct HvLpEvent hvLpEvent; | ||
80 | |||
81 | union { | ||
82 | u64 alignData; // Align on an 8-byte boundary | ||
83 | |||
84 | struct { | ||
85 | u32 fisr; | ||
86 | HvBusNumber busNumber; | ||
87 | HvSubBusNumber subBusNumber; | ||
88 | HvAgentId deviceId; | ||
89 | } slotInterrupt; | ||
90 | |||
91 | struct XmPciLpEvent_BusInterrupt busFailed; | ||
92 | struct XmPciLpEvent_BusInterrupt busRecovered; | ||
93 | struct XmPciLpEvent_BusInterrupt busCreated; | ||
94 | |||
95 | struct XmPciLpEvent_NodeInterrupt nodeFailed; | ||
96 | struct XmPciLpEvent_NodeInterrupt nodeRecovered; | ||
97 | |||
98 | } eventData; | ||
99 | |||
100 | }; | ||
101 | |||
102 | static void intReceived(struct XmPciLpEvent *eventParm, | ||
103 | struct pt_regs *regsParm) | ||
67 | { | 104 | { |
68 | /* Register PCI event handler and open an event path */ | 105 | int irq; |
69 | XmPciLpEvent_init(); | 106 | |
107 | ++Pci_Interrupt_Count; | ||
108 | |||
109 | switch (eventParm->hvLpEvent.xSubtype) { | ||
110 | case XmPciLpEvent_SlotInterrupt: | ||
111 | irq = eventParm->hvLpEvent.xCorrelationToken; | ||
112 | /* Dispatch the interrupt handlers for this irq */ | ||
113 | ppc_irq_dispatch_handler(regsParm, irq); | ||
114 | HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, | ||
115 | eventParm->eventData.slotInterrupt.subBusNumber, | ||
116 | eventParm->eventData.slotInterrupt.deviceId); | ||
117 | break; | ||
118 | /* Ignore error recovery events for now */ | ||
119 | case XmPciLpEvent_BusCreated: | ||
120 | printk(KERN_INFO "intReceived: system bus %d created\n", | ||
121 | eventParm->eventData.busCreated.busNumber); | ||
122 | break; | ||
123 | case XmPciLpEvent_BusError: | ||
124 | case XmPciLpEvent_BusFailed: | ||
125 | printk(KERN_INFO "intReceived: system bus %d failed\n", | ||
126 | eventParm->eventData.busFailed.busNumber); | ||
127 | break; | ||
128 | case XmPciLpEvent_BusRecovered: | ||
129 | case XmPciLpEvent_UnQuiesceBus: | ||
130 | printk(KERN_INFO "intReceived: system bus %d recovered\n", | ||
131 | eventParm->eventData.busRecovered.busNumber); | ||
132 | break; | ||
133 | case XmPciLpEvent_NodeFailed: | ||
134 | case XmPciLpEvent_BridgeError: | ||
135 | printk(KERN_INFO | ||
136 | "intReceived: multi-adapter bridge %d/%d/%d failed\n", | ||
137 | eventParm->eventData.nodeFailed.busNumber, | ||
138 | eventParm->eventData.nodeFailed.subBusNumber, | ||
139 | eventParm->eventData.nodeFailed.deviceId); | ||
140 | break; | ||
141 | case XmPciLpEvent_NodeRecovered: | ||
142 | printk(KERN_INFO | ||
143 | "intReceived: multi-adapter bridge %d/%d/%d recovered\n", | ||
144 | eventParm->eventData.nodeRecovered.busNumber, | ||
145 | eventParm->eventData.nodeRecovered.subBusNumber, | ||
146 | eventParm->eventData.nodeRecovered.deviceId); | ||
147 | break; | ||
148 | default: | ||
149 | printk(KERN_ERR | ||
150 | "intReceived: unrecognized event subtype 0x%x\n", | ||
151 | eventParm->hvLpEvent.xSubtype); | ||
152 | break; | ||
153 | } | ||
154 | } | ||
155 | |||
156 | static void XmPciLpEvent_handler(struct HvLpEvent *eventParm, | ||
157 | struct pt_regs *regsParm) | ||
158 | { | ||
159 | #ifdef CONFIG_PCI | ||
160 | ++Pci_Event_Count; | ||
161 | |||
162 | if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) { | ||
163 | switch (eventParm->xFlags.xFunction) { | ||
164 | case HvLpEvent_Function_Int: | ||
165 | intReceived((struct XmPciLpEvent *)eventParm, regsParm); | ||
166 | break; | ||
167 | case HvLpEvent_Function_Ack: | ||
168 | printk(KERN_ERR | ||
169 | "XmPciLpEvent_handler: unexpected ack received\n"); | ||
170 | break; | ||
171 | default: | ||
172 | printk(KERN_ERR | ||
173 | "XmPciLpEvent_handler: unexpected event function %d\n", | ||
174 | (int)eventParm->xFlags.xFunction); | ||
175 | break; | ||
176 | } | ||
177 | } else if (eventParm) | ||
178 | printk(KERN_ERR | ||
179 | "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n", | ||
180 | (int)eventParm->xType); | ||
181 | else | ||
182 | printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n"); | ||
183 | #endif | ||
70 | } | 184 | } |
71 | 185 | ||
72 | /* | 186 | /* |
73 | * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot | 187 | * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c |
74 | * It calculates the irq value for the slot. | 188 | * It must be called before the bus walk. |
75 | * Note that subBusNumber is always 0 (at the moment at least). | ||
76 | */ | 189 | */ |
77 | int __init iSeries_allocate_IRQ(HvBusNumber busNumber, | 190 | void __init iSeries_init_IRQ(void) |
78 | HvSubBusNumber subBusNumber, HvAgentId deviceId) | ||
79 | { | 191 | { |
80 | unsigned int realirq, virtirq; | 192 | /* Register PCI event handler and open an event path */ |
81 | u8 idsel = (deviceId >> 4); | 193 | int xRc; |
82 | u8 function = deviceId & 7; | ||
83 | |||
84 | virtirq = next_virtual_irq++; | ||
85 | realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function; | ||
86 | virt_irq_to_real_map[virtirq] = realirq; | ||
87 | 194 | ||
88 | irq_desc[virtirq].handler = &iSeries_IRQ_handler; | 195 | xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, |
89 | return virtirq; | 196 | &XmPciLpEvent_handler); |
197 | if (xRc == 0) { | ||
198 | xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | ||
199 | if (xRc != 0) | ||
200 | printk(KERN_ERR "iSeries_init_IRQ: open event path " | ||
201 | "failed with rc 0x%x\n", xRc); | ||
202 | } else | ||
203 | printk(KERN_ERR "iSeries_init_IRQ: register handler " | ||
204 | "failed with rc 0x%x\n", xRc); | ||
90 | } | 205 | } |
91 | 206 | ||
92 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) | 207 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) |
93 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) | 208 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) |
94 | #define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) | 209 | #define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) |
95 | 210 | ||
211 | /* | ||
212 | * This will be called by device drivers (via enable_IRQ) | ||
213 | * to enable INTA in the bridge interrupt status register. | ||
214 | */ | ||
215 | static void iSeries_enable_IRQ(unsigned int irq) | ||
216 | { | ||
217 | u32 bus, deviceId, function, mask; | ||
218 | const u32 subBus = 0; | ||
219 | unsigned int rirq = virt_irq_to_real_map[irq]; | ||
220 | |||
221 | /* The IRQ has already been locked by the caller */ | ||
222 | bus = REAL_IRQ_TO_BUS(rirq); | ||
223 | function = REAL_IRQ_TO_FUNC(rirq); | ||
224 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | ||
225 | |||
226 | /* Unmask secondary INTA */ | ||
227 | mask = 0x80000000; | ||
228 | HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask); | ||
229 | PPCDBG(PPCDBG_BUSWALK, "iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n", | ||
230 | bus, subBus, deviceId, irq); | ||
231 | } | ||
232 | |||
96 | /* This is called by iSeries_activate_IRQs */ | 233 | /* This is called by iSeries_activate_IRQs */ |
97 | static unsigned int iSeries_startup_IRQ(unsigned int irq) | 234 | static unsigned int iSeries_startup_IRQ(unsigned int irq) |
98 | { | 235 | { |
@@ -131,7 +268,7 @@ void __init iSeries_activate_IRQs() | |||
131 | desc->handler->startup(irq); | 268 | desc->handler->startup(irq); |
132 | spin_unlock_irqrestore(&desc->lock, flags); | 269 | spin_unlock_irqrestore(&desc->lock, flags); |
133 | } | 270 | } |
134 | } | 271 | } |
135 | } | 272 | } |
136 | 273 | ||
137 | /* this is not called anywhere currently */ | 274 | /* this is not called anywhere currently */ |
@@ -173,29 +310,7 @@ static void iSeries_disable_IRQ(unsigned int irq) | |||
173 | mask = 0x80000000; | 310 | mask = 0x80000000; |
174 | HvCallPci_maskInterrupts(bus, subBus, deviceId, mask); | 311 | HvCallPci_maskInterrupts(bus, subBus, deviceId, mask); |
175 | PPCDBG(PPCDBG_BUSWALK, "iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n", | 312 | PPCDBG(PPCDBG_BUSWALK, "iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n", |
176 | bus, subBus, deviceId, irq); | 313 | bus, subBus, deviceId, irq); |
177 | } | ||
178 | |||
179 | /* | ||
180 | * This will be called by device drivers (via enable_IRQ) | ||
181 | * to enable INTA in the bridge interrupt status register. | ||
182 | */ | ||
183 | static void iSeries_enable_IRQ(unsigned int irq) | ||
184 | { | ||
185 | u32 bus, deviceId, function, mask; | ||
186 | const u32 subBus = 0; | ||
187 | unsigned int rirq = virt_irq_to_real_map[irq]; | ||
188 | |||
189 | /* The IRQ has already been locked by the caller */ | ||
190 | bus = REAL_IRQ_TO_BUS(rirq); | ||
191 | function = REAL_IRQ_TO_FUNC(rirq); | ||
192 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | ||
193 | |||
194 | /* Unmask secondary INTA */ | ||
195 | mask = 0x80000000; | ||
196 | HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask); | ||
197 | PPCDBG(PPCDBG_BUSWALK, "iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n", | ||
198 | bus, subBus, deviceId, irq); | ||
199 | } | 314 | } |
200 | 315 | ||
201 | /* | 316 | /* |
@@ -207,3 +322,32 @@ static void iSeries_enable_IRQ(unsigned int irq) | |||
207 | static void iSeries_end_IRQ(unsigned int irq) | 322 | static void iSeries_end_IRQ(unsigned int irq) |
208 | { | 323 | { |
209 | } | 324 | } |
325 | |||
326 | static hw_irq_controller iSeries_IRQ_handler = { | ||
327 | .typename = "iSeries irq controller", | ||
328 | .startup = iSeries_startup_IRQ, | ||
329 | .shutdown = iSeries_shutdown_IRQ, | ||
330 | .enable = iSeries_enable_IRQ, | ||
331 | .disable = iSeries_disable_IRQ, | ||
332 | .end = iSeries_end_IRQ | ||
333 | }; | ||
334 | |||
335 | /* | ||
336 | * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot | ||
337 | * It calculates the irq value for the slot. | ||
338 | * Note that subBusNumber is always 0 (at the moment at least). | ||
339 | */ | ||
340 | int __init iSeries_allocate_IRQ(HvBusNumber busNumber, | ||
341 | HvSubBusNumber subBusNumber, HvAgentId deviceId) | ||
342 | { | ||
343 | unsigned int realirq, virtirq; | ||
344 | u8 idsel = (deviceId >> 4); | ||
345 | u8 function = deviceId & 7; | ||
346 | |||
347 | virtirq = next_virtual_irq++; | ||
348 | realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function; | ||
349 | virt_irq_to_real_map[virtirq] = realirq; | ||
350 | |||
351 | irq_desc[virtirq].handler = &iSeries_IRQ_handler; | ||
352 | return virtirq; | ||
353 | } | ||
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/ppc64/kernel/iSeries_pci.c index bd4c2554f1a0..356e4fd9a94f 100644 --- a/arch/ppc64/kernel/iSeries_pci.c +++ b/arch/ppc64/kernel/iSeries_pci.c | |||
@@ -38,9 +38,7 @@ | |||
38 | #include <asm/iommu.h> | 38 | #include <asm/iommu.h> |
39 | 39 | ||
40 | #include <asm/iSeries/HvCallPci.h> | 40 | #include <asm/iSeries/HvCallPci.h> |
41 | #include <asm/iSeries/HvCallSm.h> | ||
42 | #include <asm/iSeries/HvCallXm.h> | 41 | #include <asm/iSeries/HvCallXm.h> |
43 | #include <asm/iSeries/LparData.h> | ||
44 | #include <asm/iSeries/iSeries_irq.h> | 42 | #include <asm/iSeries/iSeries_irq.h> |
45 | #include <asm/iSeries/iSeries_pci.h> | 43 | #include <asm/iSeries/iSeries_pci.h> |
46 | #include <asm/iSeries/mf.h> | 44 | #include <asm/iSeries/mf.h> |
@@ -225,10 +223,7 @@ static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus, | |||
225 | node->DsaAddr.Dsa.busNumber = Bus; | 223 | node->DsaAddr.Dsa.busNumber = Bus; |
226 | node->DsaAddr.Dsa.subBusNumber = SubBus; | 224 | node->DsaAddr.Dsa.subBusNumber = SubBus; |
227 | node->DsaAddr.Dsa.deviceId = 0x10; | 225 | node->DsaAddr.Dsa.deviceId = 0x10; |
228 | node->AgentId = AgentId; | ||
229 | node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function); | 226 | node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function); |
230 | node->IoRetry = 0; | ||
231 | iSeries_Get_Location_Code(node); | ||
232 | return node; | 227 | return node; |
233 | } | 228 | } |
234 | 229 | ||
@@ -302,7 +297,6 @@ void __init iSeries_pci_final_fixup(void) | |||
302 | { | 297 | { |
303 | struct pci_dev *pdev = NULL; | 298 | struct pci_dev *pdev = NULL; |
304 | struct iSeries_Device_Node *node; | 299 | struct iSeries_Device_Node *node; |
305 | char Buffer[256]; | ||
306 | int DeviceCount = 0; | 300 | int DeviceCount = 0; |
307 | 301 | ||
308 | PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n"); | 302 | PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n"); |
@@ -324,9 +318,7 @@ void __init iSeries_pci_final_fixup(void) | |||
324 | "pdev 0x%p <==> DevNode 0x%p\n", | 318 | "pdev 0x%p <==> DevNode 0x%p\n", |
325 | pdev, node); | 319 | pdev, node); |
326 | allocate_device_bars(pdev); | 320 | allocate_device_bars(pdev); |
327 | iSeries_Device_Information(pdev, Buffer, | 321 | iSeries_Device_Information(pdev, DeviceCount); |
328 | sizeof(Buffer)); | ||
329 | printk("%d. %s\n", DeviceCount, Buffer); | ||
330 | iommu_devnode_init_iSeries(node); | 322 | iommu_devnode_init_iSeries(node); |
331 | } else | 323 | } else |
332 | printk("PCI: Device Tree not found for 0x%016lX\n", | 324 | printk("PCI: Device Tree not found for 0x%016lX\n", |
@@ -499,7 +491,6 @@ static int scan_bridge_slot(HvBusNumber Bus, | |||
499 | 491 | ||
500 | ++DeviceCount; | 492 | ++DeviceCount; |
501 | node = build_device_node(Bus, SubBus, EADsIdSel, Function); | 493 | node = build_device_node(Bus, SubBus, EADsIdSel, Function); |
502 | node->Vendor = VendorId; | ||
503 | node->Irq = Irq; | 494 | node->Irq = Irq; |
504 | node->LogicalSlot = BridgeInfo->logicalSlotNumber; | 495 | node->LogicalSlot = BridgeInfo->logicalSlotNumber; |
505 | 496 | ||
@@ -661,38 +652,34 @@ static struct pci_ops iSeries_pci_ops = { | |||
661 | * Check Return Code | 652 | * Check Return Code |
662 | * -> On Failure, print and log information. | 653 | * -> On Failure, print and log information. |
663 | * Increment Retry Count, if exceeds max, panic partition. | 654 | * Increment Retry Count, if exceeds max, panic partition. |
664 | * -> If in retry, print and log success | ||
665 | * | 655 | * |
666 | * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234 | 656 | * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234 |
667 | * PCI: Device 23.90 ReadL Retry( 1) | 657 | * PCI: Device 23.90 ReadL Retry( 1) |
668 | * PCI: Device 23.90 ReadL Retry Successful(1) | 658 | * PCI: Device 23.90 ReadL Retry Successful(1) |
669 | */ | 659 | */ |
670 | static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode, | 660 | static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode, |
671 | u64 ret) | 661 | int *retry, u64 ret) |
672 | { | 662 | { |
673 | if (ret != 0) { | 663 | if (ret != 0) { |
674 | ++Pci_Error_Count; | 664 | ++Pci_Error_Count; |
675 | ++DevNode->IoRetry; | 665 | (*retry)++; |
676 | printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", | 666 | printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n", |
677 | TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn, | 667 | TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn, |
678 | DevNode->IoRetry, (int)ret); | 668 | *retry, (int)ret); |
679 | /* | 669 | /* |
680 | * Bump the retry and check for retry count exceeded. | 670 | * Bump the retry and check for retry count exceeded. |
681 | * If, Exceeded, panic the system. | 671 | * If, Exceeded, panic the system. |
682 | */ | 672 | */ |
683 | if ((DevNode->IoRetry > Pci_Retry_Max) && | 673 | if (((*retry) > Pci_Retry_Max) && |
684 | (Pci_Error_Flag > 0)) { | 674 | (Pci_Error_Flag > 0)) { |
685 | mf_display_src(0xB6000103); | 675 | mf_display_src(0xB6000103); |
686 | panic_timeout = 0; | 676 | panic_timeout = 0; |
687 | panic("PCI: Hardware I/O Error, SRC B6000103, " | 677 | panic("PCI: Hardware I/O Error, SRC B6000103, " |
688 | "Automatic Reboot Disabled.\n"); | 678 | "Automatic Reboot Disabled.\n"); |
689 | } | 679 | } |
690 | return -1; /* Retry Try */ | 680 | return -1; /* Retry Try */ |
691 | } | 681 | } |
692 | /* If retry was in progress, log success and rest retry count */ | 682 | return 0; |
693 | if (DevNode->IoRetry > 0) | ||
694 | DevNode->IoRetry = 0; | ||
695 | return 0; | ||
696 | } | 683 | } |
697 | 684 | ||
698 | /* | 685 | /* |
@@ -738,6 +725,7 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) | |||
738 | { | 725 | { |
739 | u64 BarOffset; | 726 | u64 BarOffset; |
740 | u64 dsa; | 727 | u64 dsa; |
728 | int retry = 0; | ||
741 | struct HvCallPci_LoadReturn ret; | 729 | struct HvCallPci_LoadReturn ret; |
742 | struct iSeries_Device_Node *DevNode = | 730 | struct iSeries_Device_Node *DevNode = |
743 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 731 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
@@ -757,7 +745,7 @@ u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress) | |||
757 | do { | 745 | do { |
758 | ++Pci_Io_Read_Count; | 746 | ++Pci_Io_Read_Count; |
759 | HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0); | 747 | HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0); |
760 | } while (CheckReturnCode("RDB", DevNode, ret.rc) != 0); | 748 | } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0); |
761 | 749 | ||
762 | return (u8)ret.value; | 750 | return (u8)ret.value; |
763 | } | 751 | } |
@@ -767,6 +755,7 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) | |||
767 | { | 755 | { |
768 | u64 BarOffset; | 756 | u64 BarOffset; |
769 | u64 dsa; | 757 | u64 dsa; |
758 | int retry = 0; | ||
770 | struct HvCallPci_LoadReturn ret; | 759 | struct HvCallPci_LoadReturn ret; |
771 | struct iSeries_Device_Node *DevNode = | 760 | struct iSeries_Device_Node *DevNode = |
772 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 761 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
@@ -787,7 +776,7 @@ u16 iSeries_Read_Word(const volatile void __iomem *IoAddress) | |||
787 | ++Pci_Io_Read_Count; | 776 | ++Pci_Io_Read_Count; |
788 | HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa, | 777 | HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa, |
789 | BarOffset, 0); | 778 | BarOffset, 0); |
790 | } while (CheckReturnCode("RDW", DevNode, ret.rc) != 0); | 779 | } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0); |
791 | 780 | ||
792 | return swab16((u16)ret.value); | 781 | return swab16((u16)ret.value); |
793 | } | 782 | } |
@@ -797,6 +786,7 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) | |||
797 | { | 786 | { |
798 | u64 BarOffset; | 787 | u64 BarOffset; |
799 | u64 dsa; | 788 | u64 dsa; |
789 | int retry = 0; | ||
800 | struct HvCallPci_LoadReturn ret; | 790 | struct HvCallPci_LoadReturn ret; |
801 | struct iSeries_Device_Node *DevNode = | 791 | struct iSeries_Device_Node *DevNode = |
802 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 792 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
@@ -817,7 +807,7 @@ u32 iSeries_Read_Long(const volatile void __iomem *IoAddress) | |||
817 | ++Pci_Io_Read_Count; | 807 | ++Pci_Io_Read_Count; |
818 | HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa, | 808 | HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa, |
819 | BarOffset, 0); | 809 | BarOffset, 0); |
820 | } while (CheckReturnCode("RDL", DevNode, ret.rc) != 0); | 810 | } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0); |
821 | 811 | ||
822 | return swab32((u32)ret.value); | 812 | return swab32((u32)ret.value); |
823 | } | 813 | } |
@@ -834,6 +824,7 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) | |||
834 | { | 824 | { |
835 | u64 BarOffset; | 825 | u64 BarOffset; |
836 | u64 dsa; | 826 | u64 dsa; |
827 | int retry = 0; | ||
837 | u64 rc; | 828 | u64 rc; |
838 | struct iSeries_Device_Node *DevNode = | 829 | struct iSeries_Device_Node *DevNode = |
839 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 830 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
@@ -853,7 +844,7 @@ void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress) | |||
853 | do { | 844 | do { |
854 | ++Pci_Io_Write_Count; | 845 | ++Pci_Io_Write_Count; |
855 | rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0); | 846 | rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0); |
856 | } while (CheckReturnCode("WWB", DevNode, rc) != 0); | 847 | } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0); |
857 | } | 848 | } |
858 | EXPORT_SYMBOL(iSeries_Write_Byte); | 849 | EXPORT_SYMBOL(iSeries_Write_Byte); |
859 | 850 | ||
@@ -861,6 +852,7 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) | |||
861 | { | 852 | { |
862 | u64 BarOffset; | 853 | u64 BarOffset; |
863 | u64 dsa; | 854 | u64 dsa; |
855 | int retry = 0; | ||
864 | u64 rc; | 856 | u64 rc; |
865 | struct iSeries_Device_Node *DevNode = | 857 | struct iSeries_Device_Node *DevNode = |
866 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 858 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
@@ -880,7 +872,7 @@ void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress) | |||
880 | do { | 872 | do { |
881 | ++Pci_Io_Write_Count; | 873 | ++Pci_Io_Write_Count; |
882 | rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0); | 874 | rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0); |
883 | } while (CheckReturnCode("WWW", DevNode, rc) != 0); | 875 | } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0); |
884 | } | 876 | } |
885 | EXPORT_SYMBOL(iSeries_Write_Word); | 877 | EXPORT_SYMBOL(iSeries_Write_Word); |
886 | 878 | ||
@@ -888,6 +880,7 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) | |||
888 | { | 880 | { |
889 | u64 BarOffset; | 881 | u64 BarOffset; |
890 | u64 dsa; | 882 | u64 dsa; |
883 | int retry = 0; | ||
891 | u64 rc; | 884 | u64 rc; |
892 | struct iSeries_Device_Node *DevNode = | 885 | struct iSeries_Device_Node *DevNode = |
893 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); | 886 | xlate_iomm_address(IoAddress, &dsa, &BarOffset); |
@@ -907,6 +900,6 @@ void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress) | |||
907 | do { | 900 | do { |
908 | ++Pci_Io_Write_Count; | 901 | ++Pci_Io_Write_Count; |
909 | rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0); | 902 | rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0); |
910 | } while (CheckReturnCode("WWL", DevNode, rc) != 0); | 903 | } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0); |
911 | } | 904 | } |
912 | EXPORT_SYMBOL(iSeries_Write_Long); | 905 | EXPORT_SYMBOL(iSeries_Write_Long); |
diff --git a/arch/ppc64/kernel/iSeries_pci_reset.c b/arch/ppc64/kernel/iSeries_pci_reset.c deleted file mode 100644 index 0f785e4584f7..000000000000 --- a/arch/ppc64/kernel/iSeries_pci_reset.c +++ /dev/null | |||
@@ -1,104 +0,0 @@ | |||
1 | #define PCIFR(...) | ||
2 | /************************************************************************/ | ||
3 | /* File iSeries_pci_reset.c created by Allan Trautman on Mar 21 2001. */ | ||
4 | /************************************************************************/ | ||
5 | /* This code supports the pci interface on the IBM iSeries systems. */ | ||
6 | /* Copyright (C) 20yy <Allan H Trautman> <IBM Corp> */ | ||
7 | /* */ | ||
8 | /* This program is free software; you can redistribute it and/or modify */ | ||
9 | /* it under the terms of the GNU General Public License as published by */ | ||
10 | /* the Free Software Foundation; either version 2 of the License, or */ | ||
11 | /* (at your option) any later version. */ | ||
12 | /* */ | ||
13 | /* This program is distributed in the hope that it will be useful, */ | ||
14 | /* but WITHOUT ANY WARRANTY; without even the implied warranty of */ | ||
15 | /* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the */ | ||
16 | /* GNU General Public License for more details. */ | ||
17 | /* */ | ||
18 | /* You should have received a copy of the GNU General Public License */ | ||
19 | /* along with this program; if not, write to the: */ | ||
20 | /* Free Software Foundation, Inc., */ | ||
21 | /* 59 Temple Place, Suite 330, */ | ||
22 | /* Boston, MA 02111-1307 USA */ | ||
23 | /************************************************************************/ | ||
24 | /* Change Activity: */ | ||
25 | /* Created, March 20, 2001 */ | ||
26 | /* April 30, 2001, Added return codes on functions. */ | ||
27 | /* September 10, 2001, Ported to ppc64. */ | ||
28 | /* End Change Activity */ | ||
29 | /************************************************************************/ | ||
30 | #include <linux/kernel.h> | ||
31 | #include <linux/init.h> | ||
32 | #include <linux/module.h> | ||
33 | #include <linux/pci.h> | ||
34 | #include <linux/irq.h> | ||
35 | #include <linux/delay.h> | ||
36 | |||
37 | #include <asm/io.h> | ||
38 | #include <asm/iSeries/HvCallPci.h> | ||
39 | #include <asm/iSeries/HvTypes.h> | ||
40 | #include <asm/iSeries/mf.h> | ||
41 | #include <asm/pci.h> | ||
42 | |||
43 | #include <asm/iSeries/iSeries_pci.h> | ||
44 | #include "pci.h" | ||
45 | |||
46 | /* | ||
47 | * Interface to toggle the reset line | ||
48 | * Time is in .1 seconds, need for seconds. | ||
49 | */ | ||
50 | int iSeries_Device_ToggleReset(struct pci_dev *PciDev, int AssertTime, | ||
51 | int DelayTime) | ||
52 | { | ||
53 | unsigned int AssertDelay, WaitDelay; | ||
54 | struct iSeries_Device_Node *DeviceNode = | ||
55 | (struct iSeries_Device_Node *)PciDev->sysdata; | ||
56 | |||
57 | if (DeviceNode == NULL) { | ||
58 | printk("PCI: Pci Reset Failed, Device Node not found for pci_dev %p\n", | ||
59 | PciDev); | ||
60 | return -1; | ||
61 | } | ||
62 | /* | ||
63 | * Set defaults, Assert is .5 second, Wait is 3 seconds. | ||
64 | */ | ||
65 | if (AssertTime == 0) | ||
66 | AssertDelay = 500; | ||
67 | else | ||
68 | AssertDelay = AssertTime * 100; | ||
69 | |||
70 | if (DelayTime == 0) | ||
71 | WaitDelay = 3000; | ||
72 | else | ||
73 | WaitDelay = DelayTime * 100; | ||
74 | |||
75 | /* | ||
76 | * Assert reset | ||
77 | */ | ||
78 | DeviceNode->ReturnCode = HvCallPci_setSlotReset(ISERIES_BUS(DeviceNode), | ||
79 | 0x00, DeviceNode->AgentId, 1); | ||
80 | if (DeviceNode->ReturnCode == 0) { | ||
81 | msleep(AssertDelay); /* Sleep for the time */ | ||
82 | DeviceNode->ReturnCode = | ||
83 | HvCallPci_setSlotReset(ISERIES_BUS(DeviceNode), | ||
84 | 0x00, DeviceNode->AgentId, 0); | ||
85 | |||
86 | /* | ||
87 | * Wait for device to reset | ||
88 | */ | ||
89 | msleep(WaitDelay); | ||
90 | } | ||
91 | if (DeviceNode->ReturnCode == 0) | ||
92 | PCIFR("Slot 0x%04X.%02 Reset\n", ISERIES_BUS(DeviceNode), | ||
93 | DeviceNode->AgentId); | ||
94 | else { | ||
95 | printk("PCI: Slot 0x%04X.%02X Reset Failed, RCode: %04X\n", | ||
96 | ISERIES_BUS(DeviceNode), DeviceNode->AgentId, | ||
97 | DeviceNode->ReturnCode); | ||
98 | PCIFR("Slot 0x%04X.%02X Reset Failed, RCode: %04X\n", | ||
99 | ISERIES_BUS(DeviceNode), DeviceNode->AgentId, | ||
100 | DeviceNode->ReturnCode); | ||
101 | } | ||
102 | return DeviceNode->ReturnCode; | ||
103 | } | ||
104 | EXPORT_SYMBOL(iSeries_Device_ToggleReset); | ||
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/ppc64/kernel/iSeries_proc.c index 0cc58ddf48de..0fe3116eba29 100644 --- a/arch/ppc64/kernel/iSeries_proc.c +++ b/arch/ppc64/kernel/iSeries_proc.c | |||
@@ -28,8 +28,7 @@ | |||
28 | #include <asm/iSeries/ItLpQueue.h> | 28 | #include <asm/iSeries/ItLpQueue.h> |
29 | #include <asm/iSeries/HvCallXm.h> | 29 | #include <asm/iSeries/HvCallXm.h> |
30 | #include <asm/iSeries/IoHriMainStore.h> | 30 | #include <asm/iSeries/IoHriMainStore.h> |
31 | #include <asm/iSeries/LparData.h> | 31 | #include <asm/iSeries/IoHriProcessorVpd.h> |
32 | #include <asm/iSeries/iSeries_proc.h> | ||
33 | 32 | ||
34 | static int __init iseries_proc_create(void) | 33 | static int __init iseries_proc_create(void) |
35 | { | 34 | { |
@@ -41,50 +40,6 @@ static int __init iseries_proc_create(void) | |||
41 | } | 40 | } |
42 | core_initcall(iseries_proc_create); | 41 | core_initcall(iseries_proc_create); |
43 | 42 | ||
44 | static char *event_types[9] = { | ||
45 | "Hypervisor\t\t", | ||
46 | "Machine Facilities\t", | ||
47 | "Session Manager\t", | ||
48 | "SPD I/O\t\t", | ||
49 | "Virtual Bus\t\t", | ||
50 | "PCI I/O\t\t", | ||
51 | "RIO I/O\t\t", | ||
52 | "Virtual Lan\t\t", | ||
53 | "Virtual I/O\t\t" | ||
54 | }; | ||
55 | |||
56 | static int proc_lpevents_show(struct seq_file *m, void *v) | ||
57 | { | ||
58 | unsigned int i; | ||
59 | |||
60 | seq_printf(m, "LpEventQueue 0\n"); | ||
61 | seq_printf(m, " events processed:\t%lu\n", | ||
62 | (unsigned long)xItLpQueue.xLpIntCount); | ||
63 | |||
64 | for (i = 0; i < 9; ++i) | ||
65 | seq_printf(m, " %s %10lu\n", event_types[i], | ||
66 | (unsigned long)xItLpQueue.xLpIntCountByType[i]); | ||
67 | |||
68 | seq_printf(m, "\n events processed by processor:\n"); | ||
69 | |||
70 | for_each_online_cpu(i) | ||
71 | seq_printf(m, " CPU%02d %10u\n", i, paca[i].lpevent_count); | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int proc_lpevents_open(struct inode *inode, struct file *file) | ||
77 | { | ||
78 | return single_open(file, proc_lpevents_show, NULL); | ||
79 | } | ||
80 | |||
81 | static struct file_operations proc_lpevents_operations = { | ||
82 | .open = proc_lpevents_open, | ||
83 | .read = seq_read, | ||
84 | .llseek = seq_lseek, | ||
85 | .release = single_release, | ||
86 | }; | ||
87 | |||
88 | static unsigned long startTitan = 0; | 43 | static unsigned long startTitan = 0; |
89 | static unsigned long startTb = 0; | 44 | static unsigned long startTb = 0; |
90 | 45 | ||
@@ -149,10 +104,6 @@ static int __init iseries_proc_init(void) | |||
149 | { | 104 | { |
150 | struct proc_dir_entry *e; | 105 | struct proc_dir_entry *e; |
151 | 106 | ||
152 | e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL); | ||
153 | if (e) | ||
154 | e->proc_fops = &proc_lpevents_operations; | ||
155 | |||
156 | e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL); | 107 | e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL); |
157 | if (e) | 108 | if (e) |
158 | e->proc_fops = &proc_titantod_operations; | 109 | e->proc_fops = &proc_titantod_operations; |
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c index 6d06eb550a3f..b3f770f6d402 100644 --- a/arch/ppc64/kernel/iSeries_setup.c +++ b/arch/ppc64/kernel/iSeries_setup.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/smp.h> | 24 | #include <linux/smp.h> |
25 | #include <linux/param.h> | 25 | #include <linux/param.h> |
26 | #include <linux/string.h> | 26 | #include <linux/string.h> |
27 | #include <linux/bootmem.h> | ||
28 | #include <linux/initrd.h> | 27 | #include <linux/initrd.h> |
29 | #include <linux/seq_file.h> | 28 | #include <linux/seq_file.h> |
30 | #include <linux/kdev_t.h> | 29 | #include <linux/kdev_t.h> |
@@ -47,7 +46,7 @@ | |||
47 | #include <asm/paca.h> | 46 | #include <asm/paca.h> |
48 | #include <asm/cache.h> | 47 | #include <asm/cache.h> |
49 | #include <asm/sections.h> | 48 | #include <asm/sections.h> |
50 | #include <asm/iSeries/LparData.h> | 49 | #include <asm/abs_addr.h> |
51 | #include <asm/iSeries/HvCallHpt.h> | 50 | #include <asm/iSeries/HvCallHpt.h> |
52 | #include <asm/iSeries/HvLpConfig.h> | 51 | #include <asm/iSeries/HvLpConfig.h> |
53 | #include <asm/iSeries/HvCallEvent.h> | 52 | #include <asm/iSeries/HvCallEvent.h> |
@@ -55,10 +54,12 @@ | |||
55 | #include <asm/iSeries/HvCallXm.h> | 54 | #include <asm/iSeries/HvCallXm.h> |
56 | #include <asm/iSeries/ItLpQueue.h> | 55 | #include <asm/iSeries/ItLpQueue.h> |
57 | #include <asm/iSeries/IoHriMainStore.h> | 56 | #include <asm/iSeries/IoHriMainStore.h> |
58 | #include <asm/iSeries/iSeries_proc.h> | ||
59 | #include <asm/iSeries/mf.h> | 57 | #include <asm/iSeries/mf.h> |
60 | #include <asm/iSeries/HvLpEvent.h> | 58 | #include <asm/iSeries/HvLpEvent.h> |
61 | #include <asm/iSeries/iSeries_irq.h> | 59 | #include <asm/iSeries/iSeries_irq.h> |
60 | #include <asm/iSeries/IoHriProcessorVpd.h> | ||
61 | #include <asm/iSeries/ItVpdAreas.h> | ||
62 | #include <asm/iSeries/LparMap.h> | ||
62 | 63 | ||
63 | extern void hvlog(char *fmt, ...); | 64 | extern void hvlog(char *fmt, ...); |
64 | 65 | ||
@@ -74,7 +75,11 @@ extern void ppcdbg_initialize(void); | |||
74 | static void build_iSeries_Memory_Map(void); | 75 | static void build_iSeries_Memory_Map(void); |
75 | static void setup_iSeries_cache_sizes(void); | 76 | static void setup_iSeries_cache_sizes(void); |
76 | static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr); | 77 | static void iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr); |
78 | #ifdef CONFIG_PCI | ||
77 | extern void iSeries_pci_final_fixup(void); | 79 | extern void iSeries_pci_final_fixup(void); |
80 | #else | ||
81 | static void iSeries_pci_final_fixup(void) { } | ||
82 | #endif | ||
78 | 83 | ||
79 | /* Global Variables */ | 84 | /* Global Variables */ |
80 | static unsigned long procFreqHz; | 85 | static unsigned long procFreqHz; |
@@ -665,15 +670,11 @@ static void __init iSeries_bolt_kernel(unsigned long saddr, unsigned long eaddr) | |||
665 | } | 670 | } |
666 | } | 671 | } |
667 | 672 | ||
668 | extern unsigned long ppc_proc_freq; | ||
669 | extern unsigned long ppc_tb_freq; | ||
670 | |||
671 | /* | 673 | /* |
672 | * Document me. | 674 | * Document me. |
673 | */ | 675 | */ |
674 | static void __init iSeries_setup_arch(void) | 676 | static void __init iSeries_setup_arch(void) |
675 | { | 677 | { |
676 | void *eventStack; | ||
677 | unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; | 678 | unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; |
678 | 679 | ||
679 | /* Add an eye catcher and the systemcfg layout version number */ | 680 | /* Add an eye catcher and the systemcfg layout version number */ |
@@ -682,24 +683,7 @@ static void __init iSeries_setup_arch(void) | |||
682 | systemcfg->version.minor = SYSTEMCFG_MINOR; | 683 | systemcfg->version.minor = SYSTEMCFG_MINOR; |
683 | 684 | ||
684 | /* Setup the Lp Event Queue */ | 685 | /* Setup the Lp Event Queue */ |
685 | 686 | setup_hvlpevent_queue(); | |
686 | /* Allocate a page for the Event Stack | ||
687 | * The hypervisor wants the absolute real address, so | ||
688 | * we subtract out the KERNELBASE and add in the | ||
689 | * absolute real address of the kernel load area | ||
690 | */ | ||
691 | eventStack = alloc_bootmem_pages(LpEventStackSize); | ||
692 | memset(eventStack, 0, LpEventStackSize); | ||
693 | |||
694 | /* Invoke the hypervisor to initialize the event stack */ | ||
695 | HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize); | ||
696 | |||
697 | /* Initialize fields in our Lp Event Queue */ | ||
698 | xItLpQueue.xSlicEventStackPtr = (char *)eventStack; | ||
699 | xItLpQueue.xSlicCurEventPtr = (char *)eventStack; | ||
700 | xItLpQueue.xSlicLastValidEventPtr = (char *)eventStack + | ||
701 | (LpEventStackSize - LpEventMaxSize); | ||
702 | xItLpQueue.xIndex = 0; | ||
703 | 687 | ||
704 | /* Compute processor frequency */ | 688 | /* Compute processor frequency */ |
705 | procFreqHz = ((1UL << 34) * 1000000) / | 689 | procFreqHz = ((1UL << 34) * 1000000) / |
@@ -766,8 +750,6 @@ static void iSeries_halt(void) | |||
766 | mf_power_off(); | 750 | mf_power_off(); |
767 | } | 751 | } |
768 | 752 | ||
769 | extern void setup_default_decr(void); | ||
770 | |||
771 | /* | 753 | /* |
772 | * void __init iSeries_calibrate_decr() | 754 | * void __init iSeries_calibrate_decr() |
773 | * | 755 | * |
@@ -852,27 +834,9 @@ static int __init iSeries_src_init(void) | |||
852 | 834 | ||
853 | late_initcall(iSeries_src_init); | 835 | late_initcall(iSeries_src_init); |
854 | 836 | ||
855 | static int set_spread_lpevents(char *str) | 837 | #ifndef CONFIG_PCI |
856 | { | 838 | void __init iSeries_init_IRQ(void) { } |
857 | unsigned long i; | 839 | #endif |
858 | unsigned long val = simple_strtoul(str, NULL, 0); | ||
859 | |||
860 | /* | ||
861 | * The parameter is the number of processors to share in processing | ||
862 | * lp events. | ||
863 | */ | ||
864 | if (( val > 0) && (val <= NR_CPUS)) { | ||
865 | for (i = 1; i < val; ++i) | ||
866 | paca[i].lpqueue_ptr = paca[0].lpqueue_ptr; | ||
867 | |||
868 | printk("lpevent processing spread over %ld processors\n", val); | ||
869 | } else { | ||
870 | printk("invalid spread_lpevents %ld\n", val); | ||
871 | } | ||
872 | |||
873 | return 1; | ||
874 | } | ||
875 | __setup("spread_lpevents=", set_spread_lpevents); | ||
876 | 840 | ||
877 | void __init iSeries_early_setup(void) | 841 | void __init iSeries_early_setup(void) |
878 | { | 842 | { |
diff --git a/arch/ppc64/kernel/iSeries_smp.c b/arch/ppc64/kernel/iSeries_smp.c index ba1f084d5462..f74386e31638 100644 --- a/arch/ppc64/kernel/iSeries_smp.c +++ b/arch/ppc64/kernel/iSeries_smp.c | |||
@@ -38,9 +38,7 @@ | |||
38 | #include <asm/io.h> | 38 | #include <asm/io.h> |
39 | #include <asm/smp.h> | 39 | #include <asm/smp.h> |
40 | #include <asm/paca.h> | 40 | #include <asm/paca.h> |
41 | #include <asm/iSeries/LparData.h> | ||
42 | #include <asm/iSeries/HvCall.h> | 41 | #include <asm/iSeries/HvCall.h> |
43 | #include <asm/iSeries/HvCallCfg.h> | ||
44 | #include <asm/time.h> | 42 | #include <asm/time.h> |
45 | #include <asm/ppcdebug.h> | 43 | #include <asm/ppcdebug.h> |
46 | #include <asm/machdep.h> | 44 | #include <asm/machdep.h> |
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c index f24ce2b87200..08952c7e6216 100644 --- a/arch/ppc64/kernel/idle.c +++ b/arch/ppc64/kernel/idle.c | |||
@@ -42,6 +42,11 @@ static int (*idle_loop)(void); | |||
42 | static unsigned long maxYieldTime = 0; | 42 | static unsigned long maxYieldTime = 0; |
43 | static unsigned long minYieldTime = 0xffffffffffffffffUL; | 43 | static unsigned long minYieldTime = 0xffffffffffffffffUL; |
44 | 44 | ||
45 | static inline void process_iSeries_events(void) | ||
46 | { | ||
47 | asm volatile ("li 0,0x5555; sc" : : : "r0", "r3"); | ||
48 | } | ||
49 | |||
45 | static void yield_shared_processor(void) | 50 | static void yield_shared_processor(void) |
46 | { | 51 | { |
47 | unsigned long tb; | 52 | unsigned long tb; |
@@ -83,7 +88,7 @@ static int iSeries_idle(void) | |||
83 | 88 | ||
84 | while (1) { | 89 | while (1) { |
85 | if (lpaca->lppaca.shared_proc) { | 90 | if (lpaca->lppaca.shared_proc) { |
86 | if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr)) | 91 | if (hvlpevent_is_pending()) |
87 | process_iSeries_events(); | 92 | process_iSeries_events(); |
88 | if (!need_resched()) | 93 | if (!need_resched()) |
89 | yield_shared_processor(); | 94 | yield_shared_processor(); |
@@ -95,7 +100,7 @@ static int iSeries_idle(void) | |||
95 | 100 | ||
96 | while (!need_resched()) { | 101 | while (!need_resched()) { |
97 | HMT_medium(); | 102 | HMT_medium(); |
98 | if (ItLpQueue_isLpIntPending(lpaca->lpqueue_ptr)) | 103 | if (hvlpevent_is_pending()) |
99 | process_iSeries_events(); | 104 | process_iSeries_events(); |
100 | HMT_low(); | 105 | HMT_low(); |
101 | } | 106 | } |
@@ -292,7 +297,7 @@ static int native_idle(void) | |||
292 | if (need_resched()) | 297 | if (need_resched()) |
293 | schedule(); | 298 | schedule(); |
294 | 299 | ||
295 | if (cpu_is_offline(_smp_processor_id()) && | 300 | if (cpu_is_offline(raw_smp_processor_id()) && |
296 | system_state == SYSTEM_RUNNING) | 301 | system_state == SYSTEM_RUNNING) |
297 | cpu_die(); | 302 | cpu_die(); |
298 | } | 303 | } |
diff --git a/arch/ppc64/kernel/iommu.c b/arch/ppc64/kernel/iommu.c index 344164681d2c..8316426ccaf6 100644 --- a/arch/ppc64/kernel/iommu.c +++ b/arch/ppc64/kernel/iommu.c | |||
@@ -423,6 +423,9 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl) | |||
423 | tbl->it_largehint = tbl->it_halfpoint; | 423 | tbl->it_largehint = tbl->it_halfpoint; |
424 | spin_lock_init(&tbl->it_lock); | 424 | spin_lock_init(&tbl->it_lock); |
425 | 425 | ||
426 | /* Clear the hardware table in case firmware left allocations in it */ | ||
427 | ppc_md.tce_free(tbl, tbl->it_offset, tbl->it_size); | ||
428 | |||
426 | if (!welcomed) { | 429 | if (!welcomed) { |
427 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", | 430 | printk(KERN_INFO "IOMMU table initialized, virtual merging %s\n", |
428 | novmerge ? "disabled" : "enabled"); | 431 | novmerge ? "disabled" : "enabled"); |
diff --git a/arch/ppc64/kernel/irq.c b/arch/ppc64/kernel/irq.c index 4fd7f203c1e3..f41afe545045 100644 --- a/arch/ppc64/kernel/irq.c +++ b/arch/ppc64/kernel/irq.c | |||
@@ -52,7 +52,7 @@ | |||
52 | #include <asm/cache.h> | 52 | #include <asm/cache.h> |
53 | #include <asm/prom.h> | 53 | #include <asm/prom.h> |
54 | #include <asm/ptrace.h> | 54 | #include <asm/ptrace.h> |
55 | #include <asm/iSeries/LparData.h> | 55 | #include <asm/iSeries/ItLpQueue.h> |
56 | #include <asm/machdep.h> | 56 | #include <asm/machdep.h> |
57 | #include <asm/paca.h> | 57 | #include <asm/paca.h> |
58 | 58 | ||
@@ -66,7 +66,6 @@ EXPORT_SYMBOL(irq_desc); | |||
66 | int distribute_irqs = 1; | 66 | int distribute_irqs = 1; |
67 | int __irq_offset_value; | 67 | int __irq_offset_value; |
68 | int ppc_spurious_interrupts; | 68 | int ppc_spurious_interrupts; |
69 | unsigned long lpevent_count; | ||
70 | u64 ppc64_interrupt_controller; | 69 | u64 ppc64_interrupt_controller; |
71 | 70 | ||
72 | int show_interrupts(struct seq_file *p, void *v) | 71 | int show_interrupts(struct seq_file *p, void *v) |
@@ -245,7 +244,7 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq) | |||
245 | 244 | ||
246 | spin_lock(&desc->lock); | 245 | spin_lock(&desc->lock); |
247 | if (!noirqdebug) | 246 | if (!noirqdebug) |
248 | note_interrupt(irq, desc, action_ret); | 247 | note_interrupt(irq, desc, action_ret, regs); |
249 | if (likely(!(desc->status & IRQ_PENDING))) | 248 | if (likely(!(desc->status & IRQ_PENDING))) |
250 | break; | 249 | break; |
251 | desc->status &= ~IRQ_PENDING; | 250 | desc->status &= ~IRQ_PENDING; |
@@ -269,7 +268,6 @@ out: | |||
269 | void do_IRQ(struct pt_regs *regs) | 268 | void do_IRQ(struct pt_regs *regs) |
270 | { | 269 | { |
271 | struct paca_struct *lpaca; | 270 | struct paca_struct *lpaca; |
272 | struct ItLpQueue *lpq; | ||
273 | 271 | ||
274 | irq_enter(); | 272 | irq_enter(); |
275 | 273 | ||
@@ -295,9 +293,8 @@ void do_IRQ(struct pt_regs *regs) | |||
295 | iSeries_smp_message_recv(regs); | 293 | iSeries_smp_message_recv(regs); |
296 | } | 294 | } |
297 | #endif /* CONFIG_SMP */ | 295 | #endif /* CONFIG_SMP */ |
298 | lpq = lpaca->lpqueue_ptr; | 296 | if (hvlpevent_is_pending()) |
299 | if (lpq && ItLpQueue_isLpIntPending(lpq)) | 297 | process_hvlpevents(regs); |
300 | lpevent_count += ItLpQueue_process(lpq, regs); | ||
301 | 298 | ||
302 | irq_exit(); | 299 | irq_exit(); |
303 | 300 | ||
@@ -395,6 +392,9 @@ int virt_irq_create_mapping(unsigned int real_irq) | |||
395 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | 392 | if (ppc64_interrupt_controller == IC_OPEN_PIC) |
396 | return real_irq; /* no mapping for openpic (for now) */ | 393 | return real_irq; /* no mapping for openpic (for now) */ |
397 | 394 | ||
395 | if (ppc64_interrupt_controller == IC_BPA_IIC) | ||
396 | return real_irq; /* no mapping for iic either */ | ||
397 | |||
398 | /* don't map interrupts < MIN_VIRT_IRQ */ | 398 | /* don't map interrupts < MIN_VIRT_IRQ */ |
399 | if (real_irq < MIN_VIRT_IRQ) { | 399 | if (real_irq < MIN_VIRT_IRQ) { |
400 | virt_irq_to_real_map[real_irq] = real_irq; | 400 | virt_irq_to_real_map[real_irq] = real_irq; |
diff --git a/arch/ppc64/kernel/kprobes.c b/arch/ppc64/kernel/kprobes.c index e950a2058a19..1d2ff6d6b0b3 100644 --- a/arch/ppc64/kernel/kprobes.c +++ b/arch/ppc64/kernel/kprobes.c | |||
@@ -32,15 +32,16 @@ | |||
32 | #include <linux/ptrace.h> | 32 | #include <linux/ptrace.h> |
33 | #include <linux/spinlock.h> | 33 | #include <linux/spinlock.h> |
34 | #include <linux/preempt.h> | 34 | #include <linux/preempt.h> |
35 | #include <asm/cacheflush.h> | ||
35 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
36 | #include <asm/sstep.h> | 37 | #include <asm/sstep.h> |
37 | 38 | ||
38 | /* kprobe_status settings */ | 39 | static DECLARE_MUTEX(kprobe_mutex); |
39 | #define KPROBE_HIT_ACTIVE 0x00000001 | ||
40 | #define KPROBE_HIT_SS 0x00000002 | ||
41 | 40 | ||
42 | static struct kprobe *current_kprobe; | 41 | static struct kprobe *current_kprobe; |
43 | static unsigned long kprobe_status, kprobe_saved_msr; | 42 | static unsigned long kprobe_status, kprobe_saved_msr; |
43 | static struct kprobe *kprobe_prev; | ||
44 | static unsigned long kprobe_status_prev, kprobe_saved_msr_prev; | ||
44 | static struct pt_regs jprobe_saved_regs; | 45 | static struct pt_regs jprobe_saved_regs; |
45 | 46 | ||
46 | int arch_prepare_kprobe(struct kprobe *p) | 47 | int arch_prepare_kprobe(struct kprobe *p) |
@@ -55,32 +56,87 @@ int arch_prepare_kprobe(struct kprobe *p) | |||
55 | printk("Cannot register a kprobe on rfid or mtmsrd\n"); | 56 | printk("Cannot register a kprobe on rfid or mtmsrd\n"); |
56 | ret = -EINVAL; | 57 | ret = -EINVAL; |
57 | } | 58 | } |
59 | |||
60 | /* insn must be on a special executable page on ppc64 */ | ||
61 | if (!ret) { | ||
62 | up(&kprobe_mutex); | ||
63 | p->ainsn.insn = get_insn_slot(); | ||
64 | down(&kprobe_mutex); | ||
65 | if (!p->ainsn.insn) | ||
66 | ret = -ENOMEM; | ||
67 | } | ||
58 | return ret; | 68 | return ret; |
59 | } | 69 | } |
60 | 70 | ||
61 | void arch_copy_kprobe(struct kprobe *p) | 71 | void arch_copy_kprobe(struct kprobe *p) |
62 | { | 72 | { |
63 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); | 73 | memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t)); |
74 | p->opcode = *p->addr; | ||
64 | } | 75 | } |
65 | 76 | ||
66 | void arch_remove_kprobe(struct kprobe *p) | 77 | void arch_arm_kprobe(struct kprobe *p) |
67 | { | 78 | { |
79 | *p->addr = BREAKPOINT_INSTRUCTION; | ||
80 | flush_icache_range((unsigned long) p->addr, | ||
81 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | ||
68 | } | 82 | } |
69 | 83 | ||
70 | static inline void disarm_kprobe(struct kprobe *p, struct pt_regs *regs) | 84 | void arch_disarm_kprobe(struct kprobe *p) |
71 | { | 85 | { |
72 | *p->addr = p->opcode; | 86 | *p->addr = p->opcode; |
73 | regs->nip = (unsigned long)p->addr; | 87 | flush_icache_range((unsigned long) p->addr, |
88 | (unsigned long) p->addr + sizeof(kprobe_opcode_t)); | ||
89 | } | ||
90 | |||
91 | void arch_remove_kprobe(struct kprobe *p) | ||
92 | { | ||
93 | up(&kprobe_mutex); | ||
94 | free_insn_slot(p->ainsn.insn); | ||
95 | down(&kprobe_mutex); | ||
74 | } | 96 | } |
75 | 97 | ||
76 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) | 98 | static inline void prepare_singlestep(struct kprobe *p, struct pt_regs *regs) |
77 | { | 99 | { |
100 | kprobe_opcode_t insn = *p->ainsn.insn; | ||
101 | |||
78 | regs->msr |= MSR_SE; | 102 | regs->msr |= MSR_SE; |
79 | /*single step inline if it a breakpoint instruction*/ | 103 | |
80 | if (p->opcode == BREAKPOINT_INSTRUCTION) | 104 | /* single step inline if it is a trap variant */ |
105 | if (IS_TW(insn) || IS_TD(insn) || IS_TWI(insn) || IS_TDI(insn)) | ||
81 | regs->nip = (unsigned long)p->addr; | 106 | regs->nip = (unsigned long)p->addr; |
82 | else | 107 | else |
83 | regs->nip = (unsigned long)&p->ainsn.insn; | 108 | regs->nip = (unsigned long)p->ainsn.insn; |
109 | } | ||
110 | |||
111 | static inline void save_previous_kprobe(void) | ||
112 | { | ||
113 | kprobe_prev = current_kprobe; | ||
114 | kprobe_status_prev = kprobe_status; | ||
115 | kprobe_saved_msr_prev = kprobe_saved_msr; | ||
116 | } | ||
117 | |||
118 | static inline void restore_previous_kprobe(void) | ||
119 | { | ||
120 | current_kprobe = kprobe_prev; | ||
121 | kprobe_status = kprobe_status_prev; | ||
122 | kprobe_saved_msr = kprobe_saved_msr_prev; | ||
123 | } | ||
124 | |||
125 | void arch_prepare_kretprobe(struct kretprobe *rp, struct pt_regs *regs) | ||
126 | { | ||
127 | struct kretprobe_instance *ri; | ||
128 | |||
129 | if ((ri = get_free_rp_inst(rp)) != NULL) { | ||
130 | ri->rp = rp; | ||
131 | ri->task = current; | ||
132 | ri->ret_addr = (kprobe_opcode_t *)regs->link; | ||
133 | |||
134 | /* Replace the return addr with trampoline addr */ | ||
135 | regs->link = (unsigned long)kretprobe_trampoline; | ||
136 | add_rp_inst(ri); | ||
137 | } else { | ||
138 | rp->nmissed++; | ||
139 | } | ||
84 | } | 140 | } |
85 | 141 | ||
86 | static inline int kprobe_handler(struct pt_regs *regs) | 142 | static inline int kprobe_handler(struct pt_regs *regs) |
@@ -101,8 +157,19 @@ static inline int kprobe_handler(struct pt_regs *regs) | |||
101 | unlock_kprobes(); | 157 | unlock_kprobes(); |
102 | goto no_kprobe; | 158 | goto no_kprobe; |
103 | } | 159 | } |
104 | disarm_kprobe(p, regs); | 160 | /* We have reentered the kprobe_handler(), since |
105 | ret = 1; | 161 | * another probe was hit while within the handler. |
162 | * We here save the original kprobes variables and | ||
163 | * just single step on the instruction of the new probe | ||
164 | * without calling any user handlers. | ||
165 | */ | ||
166 | save_previous_kprobe(); | ||
167 | current_kprobe = p; | ||
168 | kprobe_saved_msr = regs->msr; | ||
169 | p->nmissed++; | ||
170 | prepare_singlestep(p, regs); | ||
171 | kprobe_status = KPROBE_REENTER; | ||
172 | return 1; | ||
106 | } else { | 173 | } else { |
107 | p = current_kprobe; | 174 | p = current_kprobe; |
108 | if (p->break_handler && p->break_handler(p, regs)) { | 175 | if (p->break_handler && p->break_handler(p, regs)) { |
@@ -162,6 +229,78 @@ no_kprobe: | |||
162 | } | 229 | } |
163 | 230 | ||
164 | /* | 231 | /* |
232 | * Function return probe trampoline: | ||
233 | * - init_kprobes() establishes a probepoint here | ||
234 | * - When the probed function returns, this probe | ||
235 | * causes the handlers to fire | ||
236 | */ | ||
237 | void kretprobe_trampoline_holder(void) | ||
238 | { | ||
239 | asm volatile(".global kretprobe_trampoline\n" | ||
240 | "kretprobe_trampoline:\n" | ||
241 | "nop\n"); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Called when the probe at kretprobe trampoline is hit | ||
246 | */ | ||
247 | int trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs) | ||
248 | { | ||
249 | struct kretprobe_instance *ri = NULL; | ||
250 | struct hlist_head *head; | ||
251 | struct hlist_node *node, *tmp; | ||
252 | unsigned long orig_ret_address = 0; | ||
253 | unsigned long trampoline_address =(unsigned long)&kretprobe_trampoline; | ||
254 | |||
255 | head = kretprobe_inst_table_head(current); | ||
256 | |||
257 | /* | ||
258 | * It is possible to have multiple instances associated with a given | ||
259 | * task either because an multiple functions in the call path | ||
260 | * have a return probe installed on them, and/or more then one return | ||
261 | * return probe was registered for a target function. | ||
262 | * | ||
263 | * We can handle this because: | ||
264 | * - instances are always inserted at the head of the list | ||
265 | * - when multiple return probes are registered for the same | ||
266 | * function, the first instance's ret_addr will point to the | ||
267 | * real return address, and all the rest will point to | ||
268 | * kretprobe_trampoline | ||
269 | */ | ||
270 | hlist_for_each_entry_safe(ri, node, tmp, head, hlist) { | ||
271 | if (ri->task != current) | ||
272 | /* another task is sharing our hash bucket */ | ||
273 | continue; | ||
274 | |||
275 | if (ri->rp && ri->rp->handler) | ||
276 | ri->rp->handler(ri, regs); | ||
277 | |||
278 | orig_ret_address = (unsigned long)ri->ret_addr; | ||
279 | recycle_rp_inst(ri); | ||
280 | |||
281 | if (orig_ret_address != trampoline_address) | ||
282 | /* | ||
283 | * This is the real return address. Any other | ||
284 | * instances associated with this task are for | ||
285 | * other calls deeper on the call stack | ||
286 | */ | ||
287 | break; | ||
288 | } | ||
289 | |||
290 | BUG_ON(!orig_ret_address || (orig_ret_address == trampoline_address)); | ||
291 | regs->nip = orig_ret_address; | ||
292 | |||
293 | unlock_kprobes(); | ||
294 | |||
295 | /* | ||
296 | * By returning a non-zero value, we are telling | ||
297 | * kprobe_handler() that we have handled unlocking | ||
298 | * and re-enabling preemption. | ||
299 | */ | ||
300 | return 1; | ||
301 | } | ||
302 | |||
303 | /* | ||
165 | * Called after single-stepping. p->addr is the address of the | 304 | * Called after single-stepping. p->addr is the address of the |
166 | * instruction whose first byte has been replaced by the "breakpoint" | 305 | * instruction whose first byte has been replaced by the "breakpoint" |
167 | * instruction. To avoid the SMP problems that can occur when we | 306 | * instruction. To avoid the SMP problems that can occur when we |
@@ -172,9 +311,10 @@ no_kprobe: | |||
172 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) | 311 | static void resume_execution(struct kprobe *p, struct pt_regs *regs) |
173 | { | 312 | { |
174 | int ret; | 313 | int ret; |
314 | unsigned int insn = *p->ainsn.insn; | ||
175 | 315 | ||
176 | regs->nip = (unsigned long)p->addr; | 316 | regs->nip = (unsigned long)p->addr; |
177 | ret = emulate_step(regs, p->ainsn.insn[0]); | 317 | ret = emulate_step(regs, insn); |
178 | if (ret == 0) | 318 | if (ret == 0) |
179 | regs->nip = (unsigned long)p->addr + 4; | 319 | regs->nip = (unsigned long)p->addr + 4; |
180 | } | 320 | } |
@@ -184,13 +324,21 @@ static inline int post_kprobe_handler(struct pt_regs *regs) | |||
184 | if (!kprobe_running()) | 324 | if (!kprobe_running()) |
185 | return 0; | 325 | return 0; |
186 | 326 | ||
187 | if (current_kprobe->post_handler) | 327 | if ((kprobe_status != KPROBE_REENTER) && current_kprobe->post_handler) { |
328 | kprobe_status = KPROBE_HIT_SSDONE; | ||
188 | current_kprobe->post_handler(current_kprobe, regs, 0); | 329 | current_kprobe->post_handler(current_kprobe, regs, 0); |
330 | } | ||
189 | 331 | ||
190 | resume_execution(current_kprobe, regs); | 332 | resume_execution(current_kprobe, regs); |
191 | regs->msr |= kprobe_saved_msr; | 333 | regs->msr |= kprobe_saved_msr; |
192 | 334 | ||
335 | /*Restore back the original saved kprobes variables and continue. */ | ||
336 | if (kprobe_status == KPROBE_REENTER) { | ||
337 | restore_previous_kprobe(); | ||
338 | goto out; | ||
339 | } | ||
193 | unlock_kprobes(); | 340 | unlock_kprobes(); |
341 | out: | ||
194 | preempt_enable_no_resched(); | 342 | preempt_enable_no_resched(); |
195 | 343 | ||
196 | /* | 344 | /* |
@@ -290,3 +438,13 @@ int longjmp_break_handler(struct kprobe *p, struct pt_regs *regs) | |||
290 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); | 438 | memcpy(regs, &jprobe_saved_regs, sizeof(struct pt_regs)); |
291 | return 1; | 439 | return 1; |
292 | } | 440 | } |
441 | |||
442 | static struct kprobe trampoline_p = { | ||
443 | .addr = (kprobe_opcode_t *) &kretprobe_trampoline, | ||
444 | .pre_handler = trampoline_probe_handler | ||
445 | }; | ||
446 | |||
447 | int __init arch_init(void) | ||
448 | { | ||
449 | return register_kprobe(&trampoline_p); | ||
450 | } | ||
diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/ppc64/kernel/lparcfg.c index a8fd32df848b..02e96627fa66 100644 --- a/arch/ppc64/kernel/lparcfg.c +++ b/arch/ppc64/kernel/lparcfg.c | |||
@@ -28,12 +28,13 @@ | |||
28 | #include <asm/uaccess.h> | 28 | #include <asm/uaccess.h> |
29 | #include <asm/iSeries/HvLpConfig.h> | 29 | #include <asm/iSeries/HvLpConfig.h> |
30 | #include <asm/lppaca.h> | 30 | #include <asm/lppaca.h> |
31 | #include <asm/iSeries/LparData.h> | ||
32 | #include <asm/hvcall.h> | 31 | #include <asm/hvcall.h> |
33 | #include <asm/cputable.h> | 32 | #include <asm/cputable.h> |
34 | #include <asm/rtas.h> | 33 | #include <asm/rtas.h> |
35 | #include <asm/system.h> | 34 | #include <asm/system.h> |
36 | #include <asm/time.h> | 35 | #include <asm/time.h> |
36 | #include <asm/iSeries/ItExtVpdPanel.h> | ||
37 | #include <asm/prom.h> | ||
37 | 38 | ||
38 | #define MODULE_VERS "1.6" | 39 | #define MODULE_VERS "1.6" |
39 | #define MODULE_NAME "lparcfg" | 40 | #define MODULE_NAME "lparcfg" |
diff --git a/arch/ppc64/kernel/machine_kexec.c b/arch/ppc64/kernel/machine_kexec.c new file mode 100644 index 000000000000..fdb2fc649d72 --- /dev/null +++ b/arch/ppc64/kernel/machine_kexec.c | |||
@@ -0,0 +1,302 @@ | |||
1 | /* | ||
2 | * machine_kexec.c - handle transition of Linux booting another kernel | ||
3 | * | ||
4 | * Copyright (C) 2004-2005, IBM Corp. | ||
5 | * | ||
6 | * Created by: Milton D Miller II | ||
7 | * | ||
8 | * This source code is licensed under the GNU General Public License, | ||
9 | * Version 2. See the file COPYING for more details. | ||
10 | */ | ||
11 | |||
12 | |||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/kexec.h> | ||
15 | #include <linux/smp.h> | ||
16 | #include <linux/thread_info.h> | ||
17 | #include <linux/errno.h> | ||
18 | |||
19 | #include <asm/page.h> | ||
20 | #include <asm/current.h> | ||
21 | #include <asm/machdep.h> | ||
22 | #include <asm/cacheflush.h> | ||
23 | #include <asm/paca.h> | ||
24 | #include <asm/mmu.h> | ||
25 | #include <asm/sections.h> /* _end */ | ||
26 | #include <asm/prom.h> | ||
27 | |||
28 | #define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */ | ||
29 | |||
30 | /* Have this around till we move it into crash specific file */ | ||
31 | note_buf_t crash_notes[NR_CPUS]; | ||
32 | |||
33 | /* Dummy for now. Not sure if we need to have a crash shutdown in here | ||
34 | * and if what it will achieve. Letting it be now to compile the code | ||
35 | * in generic kexec environment | ||
36 | */ | ||
37 | void machine_crash_shutdown(struct pt_regs *regs) | ||
38 | { | ||
39 | /* do nothing right now */ | ||
40 | /* smp_relase_cpus() if we want smp on panic kernel */ | ||
41 | /* cpu_irq_down to isolate us until we are ready */ | ||
42 | } | ||
43 | |||
44 | int machine_kexec_prepare(struct kimage *image) | ||
45 | { | ||
46 | int i; | ||
47 | unsigned long begin, end; /* limits of segment */ | ||
48 | unsigned long low, high; /* limits of blocked memory range */ | ||
49 | struct device_node *node; | ||
50 | unsigned long *basep; | ||
51 | unsigned int *sizep; | ||
52 | |||
53 | if (!ppc_md.hpte_clear_all) | ||
54 | return -ENOENT; | ||
55 | |||
56 | /* | ||
57 | * Since we use the kernel fault handlers and paging code to | ||
58 | * handle the virtual mode, we must make sure no destination | ||
59 | * overlaps kernel static data or bss. | ||
60 | */ | ||
61 | for (i = 0; i < image->nr_segments; i++) | ||
62 | if (image->segment[i].mem < __pa(_end)) | ||
63 | return -ETXTBSY; | ||
64 | |||
65 | /* | ||
66 | * For non-LPAR, we absolutely can not overwrite the mmu hash | ||
67 | * table, since we are still using the bolted entries in it to | ||
68 | * do the copy. Check that here. | ||
69 | * | ||
70 | * It is safe if the end is below the start of the blocked | ||
71 | * region (end <= low), or if the beginning is after the | ||
72 | * end of the blocked region (begin >= high). Use the | ||
73 | * boolean identity !(a || b) === (!a && !b). | ||
74 | */ | ||
75 | if (htab_address) { | ||
76 | low = __pa(htab_address); | ||
77 | high = low + (htab_hash_mask + 1) * HASH_GROUP_SIZE; | ||
78 | |||
79 | for (i = 0; i < image->nr_segments; i++) { | ||
80 | begin = image->segment[i].mem; | ||
81 | end = begin + image->segment[i].memsz; | ||
82 | |||
83 | if ((begin < high) && (end > low)) | ||
84 | return -ETXTBSY; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* We also should not overwrite the tce tables */ | ||
89 | for (node = of_find_node_by_type(NULL, "pci"); node != NULL; | ||
90 | node = of_find_node_by_type(node, "pci")) { | ||
91 | basep = (unsigned long *)get_property(node, "linux,tce-base", | ||
92 | NULL); | ||
93 | sizep = (unsigned int *)get_property(node, "linux,tce-size", | ||
94 | NULL); | ||
95 | if (basep == NULL || sizep == NULL) | ||
96 | continue; | ||
97 | |||
98 | low = *basep; | ||
99 | high = low + (*sizep); | ||
100 | |||
101 | for (i = 0; i < image->nr_segments; i++) { | ||
102 | begin = image->segment[i].mem; | ||
103 | end = begin + image->segment[i].memsz; | ||
104 | |||
105 | if ((begin < high) && (end > low)) | ||
106 | return -ETXTBSY; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | return 0; | ||
111 | } | ||
112 | |||
113 | void machine_kexec_cleanup(struct kimage *image) | ||
114 | { | ||
115 | /* we do nothing in prepare that needs to be undone */ | ||
116 | } | ||
117 | |||
118 | #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) | ||
119 | |||
120 | static void copy_segments(unsigned long ind) | ||
121 | { | ||
122 | unsigned long entry; | ||
123 | unsigned long *ptr; | ||
124 | void *dest; | ||
125 | void *addr; | ||
126 | |||
127 | /* | ||
128 | * We rely on kexec_load to create a lists that properly | ||
129 | * initializes these pointers before they are used. | ||
130 | * We will still crash if the list is wrong, but at least | ||
131 | * the compiler will be quiet. | ||
132 | */ | ||
133 | ptr = NULL; | ||
134 | dest = NULL; | ||
135 | |||
136 | for (entry = ind; !(entry & IND_DONE); entry = *ptr++) { | ||
137 | addr = __va(entry & PAGE_MASK); | ||
138 | |||
139 | switch (entry & IND_FLAGS) { | ||
140 | case IND_DESTINATION: | ||
141 | dest = addr; | ||
142 | break; | ||
143 | case IND_INDIRECTION: | ||
144 | ptr = addr; | ||
145 | break; | ||
146 | case IND_SOURCE: | ||
147 | copy_page(dest, addr); | ||
148 | dest += PAGE_SIZE; | ||
149 | } | ||
150 | } | ||
151 | } | ||
152 | |||
153 | void kexec_copy_flush(struct kimage *image) | ||
154 | { | ||
155 | long i, nr_segments = image->nr_segments; | ||
156 | struct kexec_segment ranges[KEXEC_SEGMENT_MAX]; | ||
157 | |||
158 | /* save the ranges on the stack to efficiently flush the icache */ | ||
159 | memcpy(ranges, image->segment, sizeof(ranges)); | ||
160 | |||
161 | /* | ||
162 | * After this call we may not use anything allocated in dynamic | ||
163 | * memory, including *image. | ||
164 | * | ||
165 | * Only globals and the stack are allowed. | ||
166 | */ | ||
167 | copy_segments(image->head); | ||
168 | |||
169 | /* | ||
170 | * we need to clear the icache for all dest pages sometime, | ||
171 | * including ones that were in place on the original copy | ||
172 | */ | ||
173 | for (i = 0; i < nr_segments; i++) | ||
174 | flush_icache_range(ranges[i].mem + KERNELBASE, | ||
175 | ranges[i].mem + KERNELBASE + | ||
176 | ranges[i].memsz); | ||
177 | } | ||
178 | |||
179 | #ifdef CONFIG_SMP | ||
180 | |||
181 | /* FIXME: we should schedule this function to be called on all cpus based | ||
182 | * on calling the interrupts, but we would like to call it off irq level | ||
183 | * so that the interrupt controller is clean. | ||
184 | */ | ||
185 | void kexec_smp_down(void *arg) | ||
186 | { | ||
187 | if (ppc_md.cpu_irq_down) | ||
188 | ppc_md.cpu_irq_down(); | ||
189 | |||
190 | local_irq_disable(); | ||
191 | kexec_smp_wait(); | ||
192 | /* NOTREACHED */ | ||
193 | } | ||
194 | |||
195 | static void kexec_prepare_cpus(void) | ||
196 | { | ||
197 | int my_cpu, i, notified=-1; | ||
198 | |||
199 | smp_call_function(kexec_smp_down, NULL, 0, /* wait */0); | ||
200 | my_cpu = get_cpu(); | ||
201 | |||
202 | /* check the others cpus are now down (via paca hw cpu id == -1) */ | ||
203 | for (i=0; i < NR_CPUS; i++) { | ||
204 | if (i == my_cpu) | ||
205 | continue; | ||
206 | |||
207 | while (paca[i].hw_cpu_id != -1) { | ||
208 | if (!cpu_possible(i)) { | ||
209 | printk("kexec: cpu %d hw_cpu_id %d is not" | ||
210 | " possible, ignoring\n", | ||
211 | i, paca[i].hw_cpu_id); | ||
212 | break; | ||
213 | } | ||
214 | if (!cpu_online(i)) { | ||
215 | /* Fixme: this can be spinning in | ||
216 | * pSeries_secondary_wait with a paca | ||
217 | * waiting for it to go online. | ||
218 | */ | ||
219 | printk("kexec: cpu %d hw_cpu_id %d is not" | ||
220 | " online, ignoring\n", | ||
221 | i, paca[i].hw_cpu_id); | ||
222 | break; | ||
223 | } | ||
224 | if (i != notified) { | ||
225 | printk( "kexec: waiting for cpu %d (physical" | ||
226 | " %d) to go down\n", | ||
227 | i, paca[i].hw_cpu_id); | ||
228 | notified = i; | ||
229 | } | ||
230 | } | ||
231 | } | ||
232 | |||
233 | /* after we tell the others to go down */ | ||
234 | if (ppc_md.cpu_irq_down) | ||
235 | ppc_md.cpu_irq_down(); | ||
236 | |||
237 | put_cpu(); | ||
238 | |||
239 | local_irq_disable(); | ||
240 | } | ||
241 | |||
242 | #else /* ! SMP */ | ||
243 | |||
244 | static void kexec_prepare_cpus(void) | ||
245 | { | ||
246 | /* | ||
247 | * move the secondarys to us so that we can copy | ||
248 | * the new kernel 0-0x100 safely | ||
249 | * | ||
250 | * do this if kexec in setup.c ? | ||
251 | */ | ||
252 | smp_relase_cpus(); | ||
253 | if (ppc_md.cpu_irq_down) | ||
254 | ppc_md.cpu_irq_down(); | ||
255 | local_irq_disable(); | ||
256 | } | ||
257 | |||
258 | #endif /* SMP */ | ||
259 | |||
260 | /* | ||
261 | * kexec thread structure and stack. | ||
262 | * | ||
263 | * We need to make sure that this is 16384-byte aligned due to the | ||
264 | * way process stacks are handled. It also must be statically allocated | ||
265 | * or allocated as part of the kimage, because everything else may be | ||
266 | * overwritten when we copy the kexec image. We piggyback on the | ||
267 | * "init_task" linker section here to statically allocate a stack. | ||
268 | * | ||
269 | * We could use a smaller stack if we don't care about anything using | ||
270 | * current, but that audit has not been performed. | ||
271 | */ | ||
272 | union thread_union kexec_stack | ||
273 | __attribute__((__section__(".data.init_task"))) = { }; | ||
274 | |||
275 | /* Our assembly helper, in kexec_stub.S */ | ||
276 | extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, | ||
277 | void *image, void *control, | ||
278 | void (*clear_all)(void)) ATTRIB_NORET; | ||
279 | |||
280 | /* too late to fail here */ | ||
281 | void machine_kexec(struct kimage *image) | ||
282 | { | ||
283 | |||
284 | /* prepare control code if any */ | ||
285 | |||
286 | /* shutdown other cpus into our wait loop and quiesce interrupts */ | ||
287 | kexec_prepare_cpus(); | ||
288 | |||
289 | /* switch to a staticly allocated stack. Based on irq stack code. | ||
290 | * XXX: the task struct will likely be invalid once we do the copy! | ||
291 | */ | ||
292 | kexec_stack.thread_info.task = current_thread_info()->task; | ||
293 | kexec_stack.thread_info.flags = 0; | ||
294 | |||
295 | /* Some things are best done in assembly. Finding globals with | ||
296 | * a toc is easier in C, so pass in what we can. | ||
297 | */ | ||
298 | kexec_sequence(&kexec_stack, image->start, image, | ||
299 | page_address(image->control_code_page), | ||
300 | ppc_md.hpte_clear_all); | ||
301 | /* NOTREACHED */ | ||
302 | } | ||
diff --git a/arch/ppc64/kernel/maple_setup.c b/arch/ppc64/kernel/maple_setup.c index 8cf95a27178e..da8900b51f40 100644 --- a/arch/ppc64/kernel/maple_setup.c +++ b/arch/ppc64/kernel/maple_setup.c | |||
@@ -78,17 +78,77 @@ extern int maple_pci_get_legacy_ide_irq(struct pci_dev *dev, int channel); | |||
78 | extern void generic_find_legacy_serial_ports(u64 *physport, | 78 | extern void generic_find_legacy_serial_ports(u64 *physport, |
79 | unsigned int *default_speed); | 79 | unsigned int *default_speed); |
80 | 80 | ||
81 | |||
82 | static void maple_restart(char *cmd) | 81 | static void maple_restart(char *cmd) |
83 | { | 82 | { |
83 | unsigned int maple_nvram_base; | ||
84 | unsigned int maple_nvram_offset; | ||
85 | unsigned int maple_nvram_command; | ||
86 | struct device_node *rtcs; | ||
87 | |||
88 | /* find NVRAM device */ | ||
89 | rtcs = find_compatible_devices("nvram", "AMD8111"); | ||
90 | if (rtcs && rtcs->addrs) { | ||
91 | maple_nvram_base = rtcs->addrs[0].address; | ||
92 | } else { | ||
93 | printk(KERN_EMERG "Maple: Unable to find NVRAM\n"); | ||
94 | printk(KERN_EMERG "Maple: Manual Restart Required\n"); | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | /* find service processor device */ | ||
99 | rtcs = find_devices("service-processor"); | ||
100 | if (!rtcs) { | ||
101 | printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); | ||
102 | printk(KERN_EMERG "Maple: Manual Restart Required\n"); | ||
103 | return; | ||
104 | } | ||
105 | maple_nvram_offset = *(unsigned int*) get_property(rtcs, | ||
106 | "restart-addr", NULL); | ||
107 | maple_nvram_command = *(unsigned int*) get_property(rtcs, | ||
108 | "restart-value", NULL); | ||
109 | |||
110 | /* send command */ | ||
111 | outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset); | ||
112 | for (;;) ; | ||
84 | } | 113 | } |
85 | 114 | ||
86 | static void maple_power_off(void) | 115 | static void maple_power_off(void) |
87 | { | 116 | { |
117 | unsigned int maple_nvram_base; | ||
118 | unsigned int maple_nvram_offset; | ||
119 | unsigned int maple_nvram_command; | ||
120 | struct device_node *rtcs; | ||
121 | |||
122 | /* find NVRAM device */ | ||
123 | rtcs = find_compatible_devices("nvram", "AMD8111"); | ||
124 | if (rtcs && rtcs->addrs) { | ||
125 | maple_nvram_base = rtcs->addrs[0].address; | ||
126 | } else { | ||
127 | printk(KERN_EMERG "Maple: Unable to find NVRAM\n"); | ||
128 | printk(KERN_EMERG "Maple: Manual Power-Down Required\n"); | ||
129 | return; | ||
130 | } | ||
131 | |||
132 | /* find service processor device */ | ||
133 | rtcs = find_devices("service-processor"); | ||
134 | if (!rtcs) { | ||
135 | printk(KERN_EMERG "Maple: Unable to find Service Processor\n"); | ||
136 | printk(KERN_EMERG "Maple: Manual Power-Down Required\n"); | ||
137 | return; | ||
138 | } | ||
139 | maple_nvram_offset = *(unsigned int*) get_property(rtcs, | ||
140 | "power-off-addr", NULL); | ||
141 | maple_nvram_command = *(unsigned int*) get_property(rtcs, | ||
142 | "power-off-value", NULL); | ||
143 | |||
144 | /* send command */ | ||
145 | outb_p(maple_nvram_command, maple_nvram_base + maple_nvram_offset); | ||
146 | for (;;) ; | ||
88 | } | 147 | } |
89 | 148 | ||
90 | static void maple_halt(void) | 149 | static void maple_halt(void) |
91 | { | 150 | { |
151 | maple_power_off(); | ||
92 | } | 152 | } |
93 | 153 | ||
94 | #ifdef CONFIG_SMP | 154 | #ifdef CONFIG_SMP |
@@ -235,6 +295,6 @@ struct machdep_calls __initdata maple_md = { | |||
235 | .get_boot_time = maple_get_boot_time, | 295 | .get_boot_time = maple_get_boot_time, |
236 | .set_rtc_time = maple_set_rtc_time, | 296 | .set_rtc_time = maple_set_rtc_time, |
237 | .get_rtc_time = maple_get_rtc_time, | 297 | .get_rtc_time = maple_get_rtc_time, |
238 | .calibrate_decr = maple_calibrate_decr, | 298 | .calibrate_decr = generic_calibrate_decr, |
239 | .progress = maple_progress, | 299 | .progress = maple_progress, |
240 | }; | 300 | }; |
diff --git a/arch/ppc64/kernel/maple_time.c b/arch/ppc64/kernel/maple_time.c index 07ce7895b43d..d65210abcd03 100644 --- a/arch/ppc64/kernel/maple_time.c +++ b/arch/ppc64/kernel/maple_time.c | |||
@@ -42,11 +42,8 @@ | |||
42 | #define DBG(x...) | 42 | #define DBG(x...) |
43 | #endif | 43 | #endif |
44 | 44 | ||
45 | extern void setup_default_decr(void); | ||
46 | extern void GregorianDay(struct rtc_time * tm); | 45 | extern void GregorianDay(struct rtc_time * tm); |
47 | 46 | ||
48 | extern unsigned long ppc_tb_freq; | ||
49 | extern unsigned long ppc_proc_freq; | ||
50 | static int maple_rtc_addr; | 47 | static int maple_rtc_addr; |
51 | 48 | ||
52 | static int maple_clock_read(int addr) | 49 | static int maple_clock_read(int addr) |
@@ -176,51 +173,3 @@ void __init maple_get_boot_time(struct rtc_time *tm) | |||
176 | maple_get_rtc_time(tm); | 173 | maple_get_rtc_time(tm); |
177 | } | 174 | } |
178 | 175 | ||
179 | /* XXX FIXME: Some sane defaults: 125 MHz timebase, 1GHz processor */ | ||
180 | #define DEFAULT_TB_FREQ 125000000UL | ||
181 | #define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8) | ||
182 | |||
183 | void __init maple_calibrate_decr(void) | ||
184 | { | ||
185 | struct device_node *cpu; | ||
186 | struct div_result divres; | ||
187 | unsigned int *fp = NULL; | ||
188 | |||
189 | /* | ||
190 | * The cpu node should have a timebase-frequency property | ||
191 | * to tell us the rate at which the decrementer counts. | ||
192 | */ | ||
193 | cpu = of_find_node_by_type(NULL, "cpu"); | ||
194 | |||
195 | ppc_tb_freq = DEFAULT_TB_FREQ; | ||
196 | if (cpu != 0) | ||
197 | fp = (unsigned int *)get_property(cpu, "timebase-frequency", NULL); | ||
198 | if (fp != NULL) | ||
199 | ppc_tb_freq = *fp; | ||
200 | else | ||
201 | printk(KERN_ERR "WARNING: Estimating decrementer frequency (not found)\n"); | ||
202 | fp = NULL; | ||
203 | ppc_proc_freq = DEFAULT_PROC_FREQ; | ||
204 | if (cpu != 0) | ||
205 | fp = (unsigned int *)get_property(cpu, "clock-frequency", NULL); | ||
206 | if (fp != NULL) | ||
207 | ppc_proc_freq = *fp; | ||
208 | else | ||
209 | printk(KERN_ERR "WARNING: Estimating processor frequency (not found)\n"); | ||
210 | |||
211 | of_node_put(cpu); | ||
212 | |||
213 | printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", | ||
214 | ppc_tb_freq/1000000, ppc_tb_freq%1000000); | ||
215 | printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", | ||
216 | ppc_proc_freq/1000000, ppc_proc_freq%1000000); | ||
217 | |||
218 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; | ||
219 | tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; | ||
220 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | ||
221 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | ||
222 | div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres); | ||
223 | tb_to_xs = divres.result_low; | ||
224 | |||
225 | setup_default_decr(); | ||
226 | } | ||
diff --git a/arch/ppc64/kernel/mf.c b/arch/ppc64/kernel/mf.c index 5aca7e8005a8..ef4a338ebd01 100644 --- a/arch/ppc64/kernel/mf.c +++ b/arch/ppc64/kernel/mf.c | |||
@@ -40,7 +40,6 @@ | |||
40 | #include <asm/iSeries/vio.h> | 40 | #include <asm/iSeries/vio.h> |
41 | #include <asm/iSeries/mf.h> | 41 | #include <asm/iSeries/mf.h> |
42 | #include <asm/iSeries/HvLpConfig.h> | 42 | #include <asm/iSeries/HvLpConfig.h> |
43 | #include <asm/iSeries/ItSpCommArea.h> | ||
44 | #include <asm/iSeries/ItLpQueue.h> | 43 | #include <asm/iSeries/ItLpQueue.h> |
45 | 44 | ||
46 | /* | 45 | /* |
@@ -802,10 +801,8 @@ int mf_get_boot_rtc(struct rtc_time *tm) | |||
802 | return rc; | 801 | return rc; |
803 | /* We need to poll here as we are not yet taking interrupts */ | 802 | /* We need to poll here as we are not yet taking interrupts */ |
804 | while (rtc_data.busy) { | 803 | while (rtc_data.busy) { |
805 | extern unsigned long lpevent_count; | 804 | if (hvlpevent_is_pending()) |
806 | struct ItLpQueue *lpq = get_paca()->lpqueue_ptr; | 805 | process_hvlpevents(NULL); |
807 | if (lpq && ItLpQueue_isLpIntPending(lpq)) | ||
808 | lpevent_count += ItLpQueue_process(lpq, NULL); | ||
809 | } | 806 | } |
810 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | 807 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); |
811 | } | 808 | } |
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S index e3c73b3425dc..f3dea0c5a88c 100644 --- a/arch/ppc64/kernel/misc.S +++ b/arch/ppc64/kernel/misc.S | |||
@@ -680,6 +680,177 @@ _GLOBAL(kernel_thread) | |||
680 | ld r30,-16(r1) | 680 | ld r30,-16(r1) |
681 | blr | 681 | blr |
682 | 682 | ||
683 | /* kexec_wait(phys_cpu) | ||
684 | * | ||
685 | * wait for the flag to change, indicating this kernel is going away but | ||
686 | * the slave code for the next one is at addresses 0 to 100. | ||
687 | * | ||
688 | * This is used by all slaves. | ||
689 | * | ||
690 | * Physical (hardware) cpu id should be in r3. | ||
691 | */ | ||
692 | _GLOBAL(kexec_wait) | ||
693 | bl 1f | ||
694 | 1: mflr r5 | ||
695 | addi r5,r5,kexec_flag-1b | ||
696 | |||
697 | 99: HMT_LOW | ||
698 | #ifdef CONFIG_KEXEC /* use no memory without kexec */ | ||
699 | lwz r4,0(r5) | ||
700 | cmpwi 0,r4,0 | ||
701 | bnea 0x60 | ||
702 | #endif | ||
703 | b 99b | ||
704 | |||
705 | /* this can be in text because we won't change it until we are | ||
706 | * running in real anyways | ||
707 | */ | ||
708 | kexec_flag: | ||
709 | .long 0 | ||
710 | |||
711 | |||
712 | #ifdef CONFIG_KEXEC | ||
713 | |||
714 | /* kexec_smp_wait(void) | ||
715 | * | ||
716 | * call with interrupts off | ||
717 | * note: this is a terminal routine, it does not save lr | ||
718 | * | ||
719 | * get phys id from paca | ||
720 | * set paca id to -1 to say we got here | ||
721 | * switch to real mode | ||
722 | * join other cpus in kexec_wait(phys_id) | ||
723 | */ | ||
724 | _GLOBAL(kexec_smp_wait) | ||
725 | lhz r3,PACAHWCPUID(r13) | ||
726 | li r4,-1 | ||
727 | sth r4,PACAHWCPUID(r13) /* let others know we left */ | ||
728 | bl real_mode | ||
729 | b .kexec_wait | ||
730 | |||
731 | /* | ||
732 | * switch to real mode (turn mmu off) | ||
733 | * we use the early kernel trick that the hardware ignores bits | ||
734 | * 0 and 1 (big endian) of the effective address in real mode | ||
735 | * | ||
736 | * don't overwrite r3 here, it is live for kexec_wait above. | ||
737 | */ | ||
738 | real_mode: /* assume normal blr return */ | ||
739 | 1: li r9,MSR_RI | ||
740 | li r10,MSR_DR|MSR_IR | ||
741 | mflr r11 /* return address to SRR0 */ | ||
742 | mfmsr r12 | ||
743 | andc r9,r12,r9 | ||
744 | andc r10,r12,r10 | ||
745 | |||
746 | mtmsrd r9,1 | ||
747 | mtspr SPRN_SRR1,r10 | ||
748 | mtspr SPRN_SRR0,r11 | ||
749 | rfid | ||
750 | |||
751 | |||
752 | /* | ||
753 | * kexec_sequence(newstack, start, image, control, clear_all()) | ||
754 | * | ||
755 | * does the grungy work with stack switching and real mode switches | ||
756 | * also does simple calls to other code | ||
757 | */ | ||
758 | |||
759 | _GLOBAL(kexec_sequence) | ||
760 | mflr r0 | ||
761 | std r0,16(r1) | ||
762 | |||
763 | /* switch stacks to newstack -- &kexec_stack.stack */ | ||
764 | stdu r1,THREAD_SIZE-112(r3) | ||
765 | mr r1,r3 | ||
766 | |||
767 | li r0,0 | ||
768 | std r0,16(r1) | ||
769 | |||
770 | /* save regs for local vars on new stack. | ||
771 | * yes, we won't go back, but ... | ||
772 | */ | ||
773 | std r31,-8(r1) | ||
774 | std r30,-16(r1) | ||
775 | std r29,-24(r1) | ||
776 | std r28,-32(r1) | ||
777 | std r27,-40(r1) | ||
778 | std r26,-48(r1) | ||
779 | std r25,-56(r1) | ||
780 | |||
781 | stdu r1,-112-64(r1) | ||
782 | |||
783 | /* save args into preserved regs */ | ||
784 | mr r31,r3 /* newstack (both) */ | ||
785 | mr r30,r4 /* start (real) */ | ||
786 | mr r29,r5 /* image (virt) */ | ||
787 | mr r28,r6 /* control, unused */ | ||
788 | mr r27,r7 /* clear_all() fn desc */ | ||
789 | mr r26,r8 /* spare */ | ||
790 | lhz r25,PACAHWCPUID(r13) /* get our phys cpu from paca */ | ||
791 | |||
792 | /* disable interrupts, we are overwriting kernel data next */ | ||
793 | mfmsr r3 | ||
794 | rlwinm r3,r3,0,17,15 | ||
795 | mtmsrd r3,1 | ||
796 | |||
797 | /* copy dest pages, flush whole dest image */ | ||
798 | mr r3,r29 | ||
799 | bl .kexec_copy_flush /* (image) */ | ||
800 | |||
801 | /* turn off mmu */ | ||
802 | bl real_mode | ||
803 | |||
804 | /* clear out hardware hash page table and tlb */ | ||
805 | ld r5,0(r27) /* deref function descriptor */ | ||
806 | mtctr r5 | ||
807 | bctrl /* ppc_md.hash_clear_all(void); */ | ||
808 | |||
809 | /* | ||
810 | * kexec image calling is: | ||
811 | * the first 0x100 bytes of the entry point are copied to 0 | ||
812 | * | ||
813 | * all slaves branch to slave = 0x60 (absolute) | ||
814 | * slave(phys_cpu_id); | ||
815 | * | ||
816 | * master goes to start = entry point | ||
817 | * start(phys_cpu_id, start, 0); | ||
818 | * | ||
819 | * | ||
820 | * a wrapper is needed to call existing kernels, here is an approximate | ||
821 | * description of one method: | ||
822 | * | ||
823 | * v2: (2.6.10) | ||
824 | * start will be near the boot_block (maybe 0x100 bytes before it?) | ||
825 | * it will have a 0x60, which will b to boot_block, where it will wait | ||
826 | * and 0 will store phys into struct boot-block and load r3 from there, | ||
827 | * copy kernel 0-0x100 and tell slaves to back down to 0x60 again | ||
828 | * | ||
829 | * v1: (2.6.9) | ||
830 | * boot block will have all cpus scanning device tree to see if they | ||
831 | * are the boot cpu ????? | ||
832 | * other device tree differences (prop sizes, va vs pa, etc)... | ||
833 | */ | ||
834 | |||
835 | /* copy 0x100 bytes starting at start to 0 */ | ||
836 | li r3,0 | ||
837 | mr r4,r30 | ||
838 | li r5,0x100 | ||
839 | li r6,0 | ||
840 | bl .copy_and_flush /* (dest, src, copy limit, start offset) */ | ||
841 | 1: /* assume normal blr return */ | ||
842 | |||
843 | /* release other cpus to the new kernel secondary start at 0x60 */ | ||
844 | mflr r5 | ||
845 | li r6,1 | ||
846 | stw r6,kexec_flag-1b(5) | ||
847 | mr r3,r25 # my phys cpu | ||
848 | mr r4,r30 # start, aka phys mem offset | ||
849 | mtlr 4 | ||
850 | li r5,0 | ||
851 | blr /* image->start(physid, image->start, 0); */ | ||
852 | #endif /* CONFIG_KEXEC */ | ||
853 | |||
683 | /* Why isn't this a) automatic, b) written in 'C'? */ | 854 | /* Why isn't this a) automatic, b) written in 'C'? */ |
684 | .balign 8 | 855 | .balign 8 |
685 | _GLOBAL(sys_call_table32) | 856 | _GLOBAL(sys_call_table32) |
@@ -951,7 +1122,7 @@ _GLOBAL(sys_call_table32) | |||
951 | .llong .compat_sys_mq_timedreceive /* 265 */ | 1122 | .llong .compat_sys_mq_timedreceive /* 265 */ |
952 | .llong .compat_sys_mq_notify | 1123 | .llong .compat_sys_mq_notify |
953 | .llong .compat_sys_mq_getsetattr | 1124 | .llong .compat_sys_mq_getsetattr |
954 | .llong .sys_ni_syscall /* 268 reserved for sys_kexec_load */ | 1125 | .llong .compat_sys_kexec_load |
955 | .llong .sys32_add_key | 1126 | .llong .sys32_add_key |
956 | .llong .sys32_request_key | 1127 | .llong .sys32_request_key |
957 | .llong .compat_sys_keyctl | 1128 | .llong .compat_sys_keyctl |
@@ -1227,7 +1398,7 @@ _GLOBAL(sys_call_table) | |||
1227 | .llong .sys_mq_timedreceive /* 265 */ | 1398 | .llong .sys_mq_timedreceive /* 265 */ |
1228 | .llong .sys_mq_notify | 1399 | .llong .sys_mq_notify |
1229 | .llong .sys_mq_getsetattr | 1400 | .llong .sys_mq_getsetattr |
1230 | .llong .sys_ni_syscall /* 268 reserved for sys_kexec_load */ | 1401 | .llong .sys_kexec_load |
1231 | .llong .sys_add_key | 1402 | .llong .sys_add_key |
1232 | .llong .sys_request_key /* 270 */ | 1403 | .llong .sys_request_key /* 270 */ |
1233 | .llong .sys_keyctl | 1404 | .llong .sys_keyctl |
diff --git a/arch/ppc64/kernel/mpic.c b/arch/ppc64/kernel/mpic.c index 593ea5b82afa..e8fbab1df37f 100644 --- a/arch/ppc64/kernel/mpic.c +++ b/arch/ppc64/kernel/mpic.c | |||
@@ -792,6 +792,35 @@ void mpic_setup_this_cpu(void) | |||
792 | #endif /* CONFIG_SMP */ | 792 | #endif /* CONFIG_SMP */ |
793 | } | 793 | } |
794 | 794 | ||
795 | /* | ||
796 | * XXX: someone who knows mpic should check this. | ||
797 | * do we need to eoi the ipi here (see xics comments)? | ||
798 | * or can we reset the mpic in the new kernel? | ||
799 | */ | ||
800 | void mpic_teardown_this_cpu(void) | ||
801 | { | ||
802 | struct mpic *mpic = mpic_primary; | ||
803 | unsigned long flags; | ||
804 | u32 msk = 1 << hard_smp_processor_id(); | ||
805 | unsigned int i; | ||
806 | |||
807 | BUG_ON(mpic == NULL); | ||
808 | |||
809 | DBG("%s: teardown_this_cpu(%d)\n", mpic->name, hard_smp_processor_id()); | ||
810 | spin_lock_irqsave(&mpic_lock, flags); | ||
811 | |||
812 | /* let the mpic know we don't want intrs. */ | ||
813 | for (i = 0; i < mpic->num_sources ; i++) | ||
814 | mpic_irq_write(i, MPIC_IRQ_DESTINATION, | ||
815 | mpic_irq_read(i, MPIC_IRQ_DESTINATION) & ~msk); | ||
816 | |||
817 | /* Set current processor priority to max */ | ||
818 | mpic_cpu_write(MPIC_CPU_CURRENT_TASK_PRI, 0xf); | ||
819 | |||
820 | spin_unlock_irqrestore(&mpic_lock, flags); | ||
821 | } | ||
822 | |||
823 | |||
795 | void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) | 824 | void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) |
796 | { | 825 | { |
797 | struct mpic *mpic = mpic_primary; | 826 | struct mpic *mpic = mpic_primary; |
diff --git a/arch/ppc64/kernel/mpic.h b/arch/ppc64/kernel/mpic.h index 571b3c99e062..99fbbc9a084c 100644 --- a/arch/ppc64/kernel/mpic.h +++ b/arch/ppc64/kernel/mpic.h | |||
@@ -255,6 +255,9 @@ extern unsigned int mpic_irq_get_priority(unsigned int irq); | |||
255 | /* Setup a non-boot CPU */ | 255 | /* Setup a non-boot CPU */ |
256 | extern void mpic_setup_this_cpu(void); | 256 | extern void mpic_setup_this_cpu(void); |
257 | 257 | ||
258 | /* Clean up for kexec (or cpu offline or ...) */ | ||
259 | extern void mpic_teardown_this_cpu(void); | ||
260 | |||
258 | /* Request IPIs on primary mpic */ | 261 | /* Request IPIs on primary mpic */ |
259 | extern void mpic_request_ipis(void); | 262 | extern void mpic_request_ipis(void); |
260 | 263 | ||
@@ -265,3 +268,6 @@ extern void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask); | |||
265 | extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); | 268 | extern int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs); |
266 | /* This one gets to the primary mpic */ | 269 | /* This one gets to the primary mpic */ |
267 | extern int mpic_get_irq(struct pt_regs *regs); | 270 | extern int mpic_get_irq(struct pt_regs *regs); |
271 | |||
272 | /* global mpic for pSeries */ | ||
273 | extern struct mpic *pSeries_mpic; | ||
diff --git a/arch/ppc64/kernel/nvram.c b/arch/ppc64/kernel/nvram.c index 4e71781a4414..4fb1a9f5060d 100644 --- a/arch/ppc64/kernel/nvram.c +++ b/arch/ppc64/kernel/nvram.c | |||
@@ -338,9 +338,8 @@ static int nvram_remove_os_partition(void) | |||
338 | */ | 338 | */ |
339 | static int nvram_create_os_partition(void) | 339 | static int nvram_create_os_partition(void) |
340 | { | 340 | { |
341 | struct list_head * p; | 341 | struct nvram_partition *part; |
342 | struct nvram_partition *part = NULL; | 342 | struct nvram_partition *new_part; |
343 | struct nvram_partition *new_part = NULL; | ||
344 | struct nvram_partition *free_part = NULL; | 343 | struct nvram_partition *free_part = NULL; |
345 | int seq_init[2] = { 0, 0 }; | 344 | int seq_init[2] = { 0, 0 }; |
346 | loff_t tmp_index; | 345 | loff_t tmp_index; |
@@ -349,8 +348,7 @@ static int nvram_create_os_partition(void) | |||
349 | 348 | ||
350 | /* Find a free partition that will give us the maximum needed size | 349 | /* Find a free partition that will give us the maximum needed size |
351 | If can't find one that will give us the minimum size needed */ | 350 | If can't find one that will give us the minimum size needed */ |
352 | list_for_each(p, &nvram_part->partition) { | 351 | list_for_each_entry(part, &nvram_part->partition, partition) { |
353 | part = list_entry(p, struct nvram_partition, partition); | ||
354 | if (part->header.signature != NVRAM_SIG_FREE) | 352 | if (part->header.signature != NVRAM_SIG_FREE) |
355 | continue; | 353 | continue; |
356 | 354 | ||
diff --git a/arch/ppc64/kernel/of_device.c b/arch/ppc64/kernel/of_device.c index f4c825a69fa0..66bd5ab7c25a 100644 --- a/arch/ppc64/kernel/of_device.c +++ b/arch/ppc64/kernel/of_device.c | |||
@@ -161,7 +161,7 @@ void of_unregister_driver(struct of_platform_driver *drv) | |||
161 | } | 161 | } |
162 | 162 | ||
163 | 163 | ||
164 | static ssize_t dev_show_devspec(struct device *dev, char *buf) | 164 | static ssize_t dev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) |
165 | { | 165 | { |
166 | struct of_device *ofdev; | 166 | struct of_device *ofdev; |
167 | 167 | ||
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c index 0b1cca281408..1f5f141fb7a1 100644 --- a/arch/ppc64/kernel/pSeries_pci.c +++ b/arch/ppc64/kernel/pSeries_pci.c | |||
@@ -1,13 +1,11 @@ | |||
1 | /* | 1 | /* |
2 | * pSeries_pci.c | 2 | * arch/ppc64/kernel/pSeries_pci.c |
3 | * | 3 | * |
4 | * Copyright (C) 2001 Dave Engebretsen, IBM Corporation | 4 | * Copyright (C) 2001 Dave Engebretsen, IBM Corporation |
5 | * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM | 5 | * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM |
6 | * | 6 | * |
7 | * pSeries specific routines for PCI. | 7 | * pSeries specific routines for PCI. |
8 | * | 8 | * |
9 | * Based on code from pci.c and chrp_pci.c | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | 9 | * This program is free software; you can redistribute it and/or modify |
12 | * it under the terms of the GNU General Public License as published by | 10 | * it under the terms of the GNU General Public License as published by |
13 | * the Free Software Foundation; either version 2 of the License, or | 11 | * the Free Software Foundation; either version 2 of the License, or |
@@ -23,430 +21,18 @@ | |||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 21 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
24 | */ | 22 | */ |
25 | 23 | ||
24 | #include <linux/init.h> | ||
25 | #include <linux/ioport.h> | ||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/threads.h> | ||
28 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
29 | #include <linux/string.h> | 28 | #include <linux/string.h> |
30 | #include <linux/init.h> | ||
31 | #include <linux/bootmem.h> | ||
32 | 29 | ||
33 | #include <asm/io.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/irq.h> | ||
36 | #include <asm/prom.h> | ||
37 | #include <asm/machdep.h> | ||
38 | #include <asm/pci-bridge.h> | 30 | #include <asm/pci-bridge.h> |
39 | #include <asm/iommu.h> | 31 | #include <asm/prom.h> |
40 | #include <asm/rtas.h> | ||
41 | 32 | ||
42 | #include "mpic.h" | ||
43 | #include "pci.h" | 33 | #include "pci.h" |
44 | 34 | ||
45 | /* RTAS tokens */ | 35 | static int __initdata s7a_workaround = -1; |
46 | static int read_pci_config; | ||
47 | static int write_pci_config; | ||
48 | static int ibm_read_pci_config; | ||
49 | static int ibm_write_pci_config; | ||
50 | |||
51 | static int s7a_workaround; | ||
52 | |||
53 | extern struct mpic *pSeries_mpic; | ||
54 | |||
55 | static int config_access_valid(struct device_node *dn, int where) | ||
56 | { | ||
57 | if (where < 256) | ||
58 | return 1; | ||
59 | if (where < 4096 && dn->pci_ext_config_space) | ||
60 | return 1; | ||
61 | |||
62 | return 0; | ||
63 | } | ||
64 | |||
65 | static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val) | ||
66 | { | ||
67 | int returnval = -1; | ||
68 | unsigned long buid, addr; | ||
69 | int ret; | ||
70 | |||
71 | if (!dn) | ||
72 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
73 | if (!config_access_valid(dn, where)) | ||
74 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
75 | |||
76 | addr = ((where & 0xf00) << 20) | (dn->busno << 16) | | ||
77 | (dn->devfn << 8) | (where & 0xff); | ||
78 | buid = dn->phb->buid; | ||
79 | if (buid) { | ||
80 | ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval, | ||
81 | addr, buid >> 32, buid & 0xffffffff, size); | ||
82 | } else { | ||
83 | ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size); | ||
84 | } | ||
85 | *val = returnval; | ||
86 | |||
87 | if (ret) | ||
88 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
89 | |||
90 | if (returnval == EEH_IO_ERROR_VALUE(size) | ||
91 | && eeh_dn_check_failure (dn, NULL)) | ||
92 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
93 | |||
94 | return PCIBIOS_SUCCESSFUL; | ||
95 | } | ||
96 | |||
97 | static int rtas_pci_read_config(struct pci_bus *bus, | ||
98 | unsigned int devfn, | ||
99 | int where, int size, u32 *val) | ||
100 | { | ||
101 | struct device_node *busdn, *dn; | ||
102 | |||
103 | if (bus->self) | ||
104 | busdn = pci_device_to_OF_node(bus->self); | ||
105 | else | ||
106 | busdn = bus->sysdata; /* must be a phb */ | ||
107 | |||
108 | /* Search only direct children of the bus */ | ||
109 | for (dn = busdn->child; dn; dn = dn->sibling) | ||
110 | if (dn->devfn == devfn) | ||
111 | return rtas_read_config(dn, where, size, val); | ||
112 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
113 | } | ||
114 | |||
115 | static int rtas_write_config(struct device_node *dn, int where, int size, u32 val) | ||
116 | { | ||
117 | unsigned long buid, addr; | ||
118 | int ret; | ||
119 | |||
120 | if (!dn) | ||
121 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
122 | if (!config_access_valid(dn, where)) | ||
123 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
124 | |||
125 | addr = ((where & 0xf00) << 20) | (dn->busno << 16) | | ||
126 | (dn->devfn << 8) | (where & 0xff); | ||
127 | buid = dn->phb->buid; | ||
128 | if (buid) { | ||
129 | ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val); | ||
130 | } else { | ||
131 | ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val); | ||
132 | } | ||
133 | |||
134 | if (ret) | ||
135 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
136 | |||
137 | return PCIBIOS_SUCCESSFUL; | ||
138 | } | ||
139 | |||
140 | static int rtas_pci_write_config(struct pci_bus *bus, | ||
141 | unsigned int devfn, | ||
142 | int where, int size, u32 val) | ||
143 | { | ||
144 | struct device_node *busdn, *dn; | ||
145 | |||
146 | if (bus->self) | ||
147 | busdn = pci_device_to_OF_node(bus->self); | ||
148 | else | ||
149 | busdn = bus->sysdata; /* must be a phb */ | ||
150 | |||
151 | /* Search only direct children of the bus */ | ||
152 | for (dn = busdn->child; dn; dn = dn->sibling) | ||
153 | if (dn->devfn == devfn) | ||
154 | return rtas_write_config(dn, where, size, val); | ||
155 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
156 | } | ||
157 | |||
158 | struct pci_ops rtas_pci_ops = { | ||
159 | rtas_pci_read_config, | ||
160 | rtas_pci_write_config | ||
161 | }; | ||
162 | |||
163 | int is_python(struct device_node *dev) | ||
164 | { | ||
165 | char *model = (char *)get_property(dev, "model", NULL); | ||
166 | |||
167 | if (model && strstr(model, "Python")) | ||
168 | return 1; | ||
169 | |||
170 | return 0; | ||
171 | } | ||
172 | |||
173 | static int get_phb_reg_prop(struct device_node *dev, | ||
174 | unsigned int addr_size_words, | ||
175 | struct reg_property64 *reg) | ||
176 | { | ||
177 | unsigned int *ui_ptr = NULL, len; | ||
178 | |||
179 | /* Found a PHB, now figure out where his registers are mapped. */ | ||
180 | ui_ptr = (unsigned int *)get_property(dev, "reg", &len); | ||
181 | if (ui_ptr == NULL) | ||
182 | return 1; | ||
183 | |||
184 | if (addr_size_words == 1) { | ||
185 | reg->address = ((struct reg_property32 *)ui_ptr)->address; | ||
186 | reg->size = ((struct reg_property32 *)ui_ptr)->size; | ||
187 | } else { | ||
188 | *reg = *((struct reg_property64 *)ui_ptr); | ||
189 | } | ||
190 | |||
191 | return 0; | ||
192 | } | ||
193 | |||
194 | static void python_countermeasures(struct device_node *dev, | ||
195 | unsigned int addr_size_words) | ||
196 | { | ||
197 | struct reg_property64 reg_struct; | ||
198 | void __iomem *chip_regs; | ||
199 | volatile u32 val; | ||
200 | |||
201 | if (get_phb_reg_prop(dev, addr_size_words, ®_struct)) | ||
202 | return; | ||
203 | |||
204 | /* Python's register file is 1 MB in size. */ | ||
205 | chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000); | ||
206 | |||
207 | /* | ||
208 | * Firmware doesn't always clear this bit which is critical | ||
209 | * for good performance - Anton | ||
210 | */ | ||
211 | |||
212 | #define PRG_CL_RESET_VALID 0x00010000 | ||
213 | |||
214 | val = in_be32(chip_regs + 0xf6030); | ||
215 | if (val & PRG_CL_RESET_VALID) { | ||
216 | printk(KERN_INFO "Python workaround: "); | ||
217 | val &= ~PRG_CL_RESET_VALID; | ||
218 | out_be32(chip_regs + 0xf6030, val); | ||
219 | /* | ||
220 | * We must read it back for changes to | ||
221 | * take effect | ||
222 | */ | ||
223 | val = in_be32(chip_regs + 0xf6030); | ||
224 | printk("reg0: %x\n", val); | ||
225 | } | ||
226 | |||
227 | iounmap(chip_regs); | ||
228 | } | ||
229 | |||
230 | void __init init_pci_config_tokens (void) | ||
231 | { | ||
232 | read_pci_config = rtas_token("read-pci-config"); | ||
233 | write_pci_config = rtas_token("write-pci-config"); | ||
234 | ibm_read_pci_config = rtas_token("ibm,read-pci-config"); | ||
235 | ibm_write_pci_config = rtas_token("ibm,write-pci-config"); | ||
236 | } | ||
237 | |||
238 | unsigned long __devinit get_phb_buid (struct device_node *phb) | ||
239 | { | ||
240 | int addr_cells; | ||
241 | unsigned int *buid_vals; | ||
242 | unsigned int len; | ||
243 | unsigned long buid; | ||
244 | |||
245 | if (ibm_read_pci_config == -1) return 0; | ||
246 | |||
247 | /* PHB's will always be children of the root node, | ||
248 | * or so it is promised by the current firmware. */ | ||
249 | if (phb->parent == NULL) | ||
250 | return 0; | ||
251 | if (phb->parent->parent) | ||
252 | return 0; | ||
253 | |||
254 | buid_vals = (unsigned int *) get_property(phb, "reg", &len); | ||
255 | if (buid_vals == NULL) | ||
256 | return 0; | ||
257 | |||
258 | addr_cells = prom_n_addr_cells(phb); | ||
259 | if (addr_cells == 1) { | ||
260 | buid = (unsigned long) buid_vals[0]; | ||
261 | } else { | ||
262 | buid = (((unsigned long)buid_vals[0]) << 32UL) | | ||
263 | (((unsigned long)buid_vals[1]) & 0xffffffff); | ||
264 | } | ||
265 | return buid; | ||
266 | } | ||
267 | |||
268 | static int phb_set_bus_ranges(struct device_node *dev, | ||
269 | struct pci_controller *phb) | ||
270 | { | ||
271 | int *bus_range; | ||
272 | unsigned int len; | ||
273 | |||
274 | bus_range = (int *) get_property(dev, "bus-range", &len); | ||
275 | if (bus_range == NULL || len < 2 * sizeof(int)) { | ||
276 | return 1; | ||
277 | } | ||
278 | |||
279 | phb->first_busno = bus_range[0]; | ||
280 | phb->last_busno = bus_range[1]; | ||
281 | |||
282 | return 0; | ||
283 | } | ||
284 | |||
285 | static int __devinit setup_phb(struct device_node *dev, | ||
286 | struct pci_controller *phb, | ||
287 | unsigned int addr_size_words) | ||
288 | { | ||
289 | pci_setup_pci_controller(phb); | ||
290 | |||
291 | if (is_python(dev)) | ||
292 | python_countermeasures(dev, addr_size_words); | ||
293 | |||
294 | if (phb_set_bus_ranges(dev, phb)) | ||
295 | return 1; | ||
296 | |||
297 | phb->arch_data = dev; | ||
298 | phb->ops = &rtas_pci_ops; | ||
299 | phb->buid = get_phb_buid(dev); | ||
300 | |||
301 | return 0; | ||
302 | } | ||
303 | |||
304 | static void __devinit add_linux_pci_domain(struct device_node *dev, | ||
305 | struct pci_controller *phb, | ||
306 | struct property *of_prop) | ||
307 | { | ||
308 | memset(of_prop, 0, sizeof(struct property)); | ||
309 | of_prop->name = "linux,pci-domain"; | ||
310 | of_prop->length = sizeof(phb->global_number); | ||
311 | of_prop->value = (unsigned char *)&of_prop[1]; | ||
312 | memcpy(of_prop->value, &phb->global_number, sizeof(phb->global_number)); | ||
313 | prom_add_property(dev, of_prop); | ||
314 | } | ||
315 | |||
316 | static struct pci_controller * __init alloc_phb(struct device_node *dev, | ||
317 | unsigned int addr_size_words) | ||
318 | { | ||
319 | struct pci_controller *phb; | ||
320 | struct property *of_prop; | ||
321 | |||
322 | phb = alloc_bootmem(sizeof(struct pci_controller)); | ||
323 | if (phb == NULL) | ||
324 | return NULL; | ||
325 | |||
326 | of_prop = alloc_bootmem(sizeof(struct property) + | ||
327 | sizeof(phb->global_number)); | ||
328 | if (!of_prop) | ||
329 | return NULL; | ||
330 | |||
331 | if (setup_phb(dev, phb, addr_size_words)) | ||
332 | return NULL; | ||
333 | |||
334 | add_linux_pci_domain(dev, phb, of_prop); | ||
335 | |||
336 | return phb; | ||
337 | } | ||
338 | |||
339 | static struct pci_controller * __devinit alloc_phb_dynamic(struct device_node *dev, unsigned int addr_size_words) | ||
340 | { | ||
341 | struct pci_controller *phb; | ||
342 | |||
343 | phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller), | ||
344 | GFP_KERNEL); | ||
345 | if (phb == NULL) | ||
346 | return NULL; | ||
347 | |||
348 | if (setup_phb(dev, phb, addr_size_words)) | ||
349 | return NULL; | ||
350 | |||
351 | phb->is_dynamic = 1; | ||
352 | |||
353 | /* TODO: linux,pci-domain? */ | ||
354 | |||
355 | return phb; | ||
356 | } | ||
357 | |||
358 | unsigned long __init find_and_init_phbs(void) | ||
359 | { | ||
360 | struct device_node *node; | ||
361 | struct pci_controller *phb; | ||
362 | unsigned int root_size_cells = 0; | ||
363 | unsigned int index; | ||
364 | unsigned int *opprop = NULL; | ||
365 | struct device_node *root = of_find_node_by_path("/"); | ||
366 | |||
367 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { | ||
368 | opprop = (unsigned int *)get_property(root, | ||
369 | "platform-open-pic", NULL); | ||
370 | } | ||
371 | |||
372 | root_size_cells = prom_n_size_cells(root); | ||
373 | |||
374 | index = 0; | ||
375 | |||
376 | for (node = of_get_next_child(root, NULL); | ||
377 | node != NULL; | ||
378 | node = of_get_next_child(root, node)) { | ||
379 | if (node->type == NULL || strcmp(node->type, "pci") != 0) | ||
380 | continue; | ||
381 | |||
382 | phb = alloc_phb(node, root_size_cells); | ||
383 | if (!phb) | ||
384 | continue; | ||
385 | |||
386 | pci_process_bridge_OF_ranges(phb, node); | ||
387 | pci_setup_phb_io(phb, index == 0); | ||
388 | |||
389 | if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { | ||
390 | int addr = root_size_cells * (index + 2) - 1; | ||
391 | mpic_assign_isu(pSeries_mpic, index, opprop[addr]); | ||
392 | } | ||
393 | |||
394 | index++; | ||
395 | } | ||
396 | |||
397 | of_node_put(root); | ||
398 | pci_devs_phb_init(); | ||
399 | |||
400 | /* | ||
401 | * pci_probe_only and pci_assign_all_buses can be set via properties | ||
402 | * in chosen. | ||
403 | */ | ||
404 | if (of_chosen) { | ||
405 | int *prop; | ||
406 | |||
407 | prop = (int *)get_property(of_chosen, "linux,pci-probe-only", | ||
408 | NULL); | ||
409 | if (prop) | ||
410 | pci_probe_only = *prop; | ||
411 | |||
412 | prop = (int *)get_property(of_chosen, | ||
413 | "linux,pci-assign-all-buses", NULL); | ||
414 | if (prop) | ||
415 | pci_assign_all_buses = *prop; | ||
416 | } | ||
417 | |||
418 | return 0; | ||
419 | } | ||
420 | |||
421 | struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) | ||
422 | { | ||
423 | struct device_node *root = of_find_node_by_path("/"); | ||
424 | unsigned int root_size_cells = 0; | ||
425 | struct pci_controller *phb; | ||
426 | struct pci_bus *bus; | ||
427 | int primary; | ||
428 | |||
429 | root_size_cells = prom_n_size_cells(root); | ||
430 | |||
431 | primary = list_empty(&hose_list); | ||
432 | phb = alloc_phb_dynamic(dn, root_size_cells); | ||
433 | if (!phb) | ||
434 | return NULL; | ||
435 | |||
436 | pci_process_bridge_OF_ranges(phb, dn); | ||
437 | |||
438 | pci_setup_phb_io_dynamic(phb, primary); | ||
439 | of_node_put(root); | ||
440 | |||
441 | pci_devs_phb_init_dynamic(phb); | ||
442 | phb->last_busno = 0xff; | ||
443 | bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data); | ||
444 | phb->bus = bus; | ||
445 | phb->last_busno = bus->subordinate; | ||
446 | |||
447 | return phb; | ||
448 | } | ||
449 | EXPORT_SYMBOL(init_phb_dynamic); | ||
450 | 36 | ||
451 | #if 0 | 37 | #if 0 |
452 | void pcibios_name_device(struct pci_dev *dev) | 38 | void pcibios_name_device(struct pci_dev *dev) |
@@ -474,11 +60,12 @@ void pcibios_name_device(struct pci_dev *dev) | |||
474 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); | 60 | DECLARE_PCI_FIXUP_HEADER(PCI_ANY_ID, PCI_ANY_ID, pcibios_name_device); |
475 | #endif | 61 | #endif |
476 | 62 | ||
477 | static void check_s7a(void) | 63 | static void __init check_s7a(void) |
478 | { | 64 | { |
479 | struct device_node *root; | 65 | struct device_node *root; |
480 | char *model; | 66 | char *model; |
481 | 67 | ||
68 | s7a_workaround = 0; | ||
482 | root = of_find_node_by_path("/"); | 69 | root = of_find_node_by_path("/"); |
483 | if (root) { | 70 | if (root) { |
484 | model = get_property(root, "model", NULL); | 71 | model = get_property(root, "model", NULL); |
@@ -488,55 +75,23 @@ static void check_s7a(void) | |||
488 | } | 75 | } |
489 | } | 76 | } |
490 | 77 | ||
491 | /* RPA-specific bits for removing PHBs */ | 78 | void __devinit pSeries_irq_bus_setup(struct pci_bus *bus) |
492 | int pcibios_remove_root_bus(struct pci_controller *phb) | ||
493 | { | 79 | { |
494 | struct pci_bus *b = phb->bus; | 80 | struct pci_dev *dev; |
495 | struct resource *res; | ||
496 | int rc, i; | ||
497 | |||
498 | res = b->resource[0]; | ||
499 | if (!res->flags) { | ||
500 | printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__, | ||
501 | b->name); | ||
502 | return 1; | ||
503 | } | ||
504 | |||
505 | rc = unmap_bus_range(b); | ||
506 | if (rc) { | ||
507 | printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", | ||
508 | __FUNCTION__, b->name); | ||
509 | return 1; | ||
510 | } | ||
511 | 81 | ||
512 | if (release_resource(res)) { | 82 | if (s7a_workaround < 0) |
513 | printk(KERN_ERR "%s: failed to release IO on bus %s\n", | 83 | check_s7a(); |
514 | __FUNCTION__, b->name); | 84 | list_for_each_entry(dev, &bus->devices, bus_list) { |
515 | return 1; | 85 | pci_read_irq_line(dev); |
516 | } | 86 | if (s7a_workaround) { |
517 | 87 | if (dev->irq > 16) { | |
518 | for (i = 1; i < 3; ++i) { | 88 | dev->irq -= 3; |
519 | res = b->resource[i]; | 89 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, |
520 | if (!res->flags && i == 0) { | 90 | dev->irq); |
521 | printk(KERN_ERR "%s: no MEM resource for PHB %s\n", | 91 | } |
522 | __FUNCTION__, b->name); | ||
523 | return 1; | ||
524 | } | ||
525 | if (res->flags && release_resource(res)) { | ||
526 | printk(KERN_ERR | ||
527 | "%s: failed to release IO %d on bus %s\n", | ||
528 | __FUNCTION__, i, b->name); | ||
529 | return 1; | ||
530 | } | 92 | } |
531 | } | 93 | } |
532 | |||
533 | list_del(&phb->list_node); | ||
534 | if (phb->is_dynamic) | ||
535 | kfree(phb); | ||
536 | |||
537 | return 0; | ||
538 | } | 94 | } |
539 | EXPORT_SYMBOL(pcibios_remove_root_bus); | ||
540 | 95 | ||
541 | static void __init pSeries_request_regions(void) | 96 | static void __init pSeries_request_regions(void) |
542 | { | 97 | { |
@@ -553,20 +108,6 @@ static void __init pSeries_request_regions(void) | |||
553 | 108 | ||
554 | void __init pSeries_final_fixup(void) | 109 | void __init pSeries_final_fixup(void) |
555 | { | 110 | { |
556 | struct pci_dev *dev = NULL; | ||
557 | |||
558 | check_s7a(); | ||
559 | |||
560 | for_each_pci_dev(dev) { | ||
561 | pci_read_irq_line(dev); | ||
562 | if (s7a_workaround) { | ||
563 | if (dev->irq > 16) { | ||
564 | dev->irq -= 3; | ||
565 | pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); | ||
566 | } | ||
567 | } | ||
568 | } | ||
569 | |||
570 | phbs_remap_io(); | 111 | phbs_remap_io(); |
571 | pSeries_request_regions(); | 112 | pSeries_request_regions(); |
572 | 113 | ||
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c index 6c0d1d58a552..44d9af72d225 100644 --- a/arch/ppc64/kernel/pSeries_setup.c +++ b/arch/ppc64/kernel/pSeries_setup.c | |||
@@ -71,11 +71,6 @@ | |||
71 | #define DBG(fmt...) | 71 | #define DBG(fmt...) |
72 | #endif | 72 | #endif |
73 | 73 | ||
74 | extern void pSeries_final_fixup(void); | ||
75 | |||
76 | extern void pSeries_get_boot_time(struct rtc_time *rtc_time); | ||
77 | extern void pSeries_get_rtc_time(struct rtc_time *rtc_time); | ||
78 | extern int pSeries_set_rtc_time(struct rtc_time *rtc_time); | ||
79 | extern void find_udbg_vterm(void); | 74 | extern void find_udbg_vterm(void); |
80 | extern void system_reset_fwnmi(void); /* from head.S */ | 75 | extern void system_reset_fwnmi(void); /* from head.S */ |
81 | extern void machine_check_fwnmi(void); /* from head.S */ | 76 | extern void machine_check_fwnmi(void); /* from head.S */ |
@@ -84,9 +79,6 @@ extern void generic_find_legacy_serial_ports(u64 *physport, | |||
84 | 79 | ||
85 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ | 80 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ |
86 | 81 | ||
87 | extern unsigned long ppc_proc_freq; | ||
88 | extern unsigned long ppc_tb_freq; | ||
89 | |||
90 | extern void pSeries_system_reset_exception(struct pt_regs *regs); | 82 | extern void pSeries_system_reset_exception(struct pt_regs *regs); |
91 | extern int pSeries_machine_check_exception(struct pt_regs *regs); | 83 | extern int pSeries_machine_check_exception(struct pt_regs *regs); |
92 | 84 | ||
@@ -195,14 +187,16 @@ static void __init pSeries_setup_arch(void) | |||
195 | { | 187 | { |
196 | /* Fixup ppc_md depending on the type of interrupt controller */ | 188 | /* Fixup ppc_md depending on the type of interrupt controller */ |
197 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { | 189 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { |
198 | ppc_md.init_IRQ = pSeries_init_mpic; | 190 | ppc_md.init_IRQ = pSeries_init_mpic; |
199 | ppc_md.get_irq = mpic_get_irq; | 191 | ppc_md.get_irq = mpic_get_irq; |
192 | ppc_md.cpu_irq_down = mpic_teardown_this_cpu; | ||
200 | /* Allocate the mpic now, so that find_and_init_phbs() can | 193 | /* Allocate the mpic now, so that find_and_init_phbs() can |
201 | * fill the ISUs */ | 194 | * fill the ISUs */ |
202 | pSeries_setup_mpic(); | 195 | pSeries_setup_mpic(); |
203 | } else { | 196 | } else { |
204 | ppc_md.init_IRQ = xics_init_IRQ; | 197 | ppc_md.init_IRQ = xics_init_IRQ; |
205 | ppc_md.get_irq = xics_get_irq; | 198 | ppc_md.get_irq = xics_get_irq; |
199 | ppc_md.cpu_irq_down = xics_teardown_cpu; | ||
206 | } | 200 | } |
207 | 201 | ||
208 | #ifdef CONFIG_SMP | 202 | #ifdef CONFIG_SMP |
@@ -381,171 +375,6 @@ static void __init pSeries_init_early(void) | |||
381 | } | 375 | } |
382 | 376 | ||
383 | 377 | ||
384 | static void pSeries_progress(char *s, unsigned short hex) | ||
385 | { | ||
386 | struct device_node *root; | ||
387 | int width, *p; | ||
388 | char *os; | ||
389 | static int display_character, set_indicator; | ||
390 | static int max_width; | ||
391 | static DEFINE_SPINLOCK(progress_lock); | ||
392 | static int pending_newline = 0; /* did last write end with unprinted newline? */ | ||
393 | |||
394 | if (!rtas.base) | ||
395 | return; | ||
396 | |||
397 | if (max_width == 0) { | ||
398 | if ((root = find_path_device("/rtas")) && | ||
399 | (p = (unsigned int *)get_property(root, | ||
400 | "ibm,display-line-length", | ||
401 | NULL))) | ||
402 | max_width = *p; | ||
403 | else | ||
404 | max_width = 0x10; | ||
405 | display_character = rtas_token("display-character"); | ||
406 | set_indicator = rtas_token("set-indicator"); | ||
407 | } | ||
408 | |||
409 | if (display_character == RTAS_UNKNOWN_SERVICE) { | ||
410 | /* use hex display if available */ | ||
411 | if (set_indicator != RTAS_UNKNOWN_SERVICE) | ||
412 | rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex); | ||
413 | return; | ||
414 | } | ||
415 | |||
416 | spin_lock(&progress_lock); | ||
417 | |||
418 | /* | ||
419 | * Last write ended with newline, but we didn't print it since | ||
420 | * it would just clear the bottom line of output. Print it now | ||
421 | * instead. | ||
422 | * | ||
423 | * If no newline is pending, print a CR to start output at the | ||
424 | * beginning of the line. | ||
425 | */ | ||
426 | if (pending_newline) { | ||
427 | rtas_call(display_character, 1, 1, NULL, '\r'); | ||
428 | rtas_call(display_character, 1, 1, NULL, '\n'); | ||
429 | pending_newline = 0; | ||
430 | } else { | ||
431 | rtas_call(display_character, 1, 1, NULL, '\r'); | ||
432 | } | ||
433 | |||
434 | width = max_width; | ||
435 | os = s; | ||
436 | while (*os) { | ||
437 | if (*os == '\n' || *os == '\r') { | ||
438 | /* Blank to end of line. */ | ||
439 | while (width-- > 0) | ||
440 | rtas_call(display_character, 1, 1, NULL, ' '); | ||
441 | |||
442 | /* If newline is the last character, save it | ||
443 | * until next call to avoid bumping up the | ||
444 | * display output. | ||
445 | */ | ||
446 | if (*os == '\n' && !os[1]) { | ||
447 | pending_newline = 1; | ||
448 | spin_unlock(&progress_lock); | ||
449 | return; | ||
450 | } | ||
451 | |||
452 | /* RTAS wants CR-LF, not just LF */ | ||
453 | |||
454 | if (*os == '\n') { | ||
455 | rtas_call(display_character, 1, 1, NULL, '\r'); | ||
456 | rtas_call(display_character, 1, 1, NULL, '\n'); | ||
457 | } else { | ||
458 | /* CR might be used to re-draw a line, so we'll | ||
459 | * leave it alone and not add LF. | ||
460 | */ | ||
461 | rtas_call(display_character, 1, 1, NULL, *os); | ||
462 | } | ||
463 | |||
464 | width = max_width; | ||
465 | } else { | ||
466 | width--; | ||
467 | rtas_call(display_character, 1, 1, NULL, *os); | ||
468 | } | ||
469 | |||
470 | os++; | ||
471 | |||
472 | /* if we overwrite the screen length */ | ||
473 | if (width <= 0) | ||
474 | while ((*os != 0) && (*os != '\n') && (*os != '\r')) | ||
475 | os++; | ||
476 | } | ||
477 | |||
478 | /* Blank to end of line. */ | ||
479 | while (width-- > 0) | ||
480 | rtas_call(display_character, 1, 1, NULL, ' '); | ||
481 | |||
482 | spin_unlock(&progress_lock); | ||
483 | } | ||
484 | |||
485 | extern void setup_default_decr(void); | ||
486 | |||
487 | /* Some sane defaults: 125 MHz timebase, 1GHz processor */ | ||
488 | #define DEFAULT_TB_FREQ 125000000UL | ||
489 | #define DEFAULT_PROC_FREQ (DEFAULT_TB_FREQ * 8) | ||
490 | |||
491 | static void __init pSeries_calibrate_decr(void) | ||
492 | { | ||
493 | struct device_node *cpu; | ||
494 | struct div_result divres; | ||
495 | unsigned int *fp; | ||
496 | int node_found; | ||
497 | |||
498 | /* | ||
499 | * The cpu node should have a timebase-frequency property | ||
500 | * to tell us the rate at which the decrementer counts. | ||
501 | */ | ||
502 | cpu = of_find_node_by_type(NULL, "cpu"); | ||
503 | |||
504 | ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ | ||
505 | node_found = 0; | ||
506 | if (cpu != 0) { | ||
507 | fp = (unsigned int *)get_property(cpu, "timebase-frequency", | ||
508 | NULL); | ||
509 | if (fp != 0) { | ||
510 | node_found = 1; | ||
511 | ppc_tb_freq = *fp; | ||
512 | } | ||
513 | } | ||
514 | if (!node_found) | ||
515 | printk(KERN_ERR "WARNING: Estimating decrementer frequency " | ||
516 | "(not found)\n"); | ||
517 | |||
518 | ppc_proc_freq = DEFAULT_PROC_FREQ; | ||
519 | node_found = 0; | ||
520 | if (cpu != 0) { | ||
521 | fp = (unsigned int *)get_property(cpu, "clock-frequency", | ||
522 | NULL); | ||
523 | if (fp != 0) { | ||
524 | node_found = 1; | ||
525 | ppc_proc_freq = *fp; | ||
526 | } | ||
527 | } | ||
528 | if (!node_found) | ||
529 | printk(KERN_ERR "WARNING: Estimating processor frequency " | ||
530 | "(not found)\n"); | ||
531 | |||
532 | of_node_put(cpu); | ||
533 | |||
534 | printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", | ||
535 | ppc_tb_freq/1000000, ppc_tb_freq%1000000); | ||
536 | printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", | ||
537 | ppc_proc_freq/1000000, ppc_proc_freq%1000000); | ||
538 | |||
539 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; | ||
540 | tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; | ||
541 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | ||
542 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | ||
543 | div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres); | ||
544 | tb_to_xs = divres.result_low; | ||
545 | |||
546 | setup_default_decr(); | ||
547 | } | ||
548 | |||
549 | static int pSeries_check_legacy_ioport(unsigned int baseport) | 378 | static int pSeries_check_legacy_ioport(unsigned int baseport) |
550 | { | 379 | { |
551 | struct device_node *np; | 380 | struct device_node *np; |
@@ -596,16 +425,17 @@ struct machdep_calls __initdata pSeries_md = { | |||
596 | .get_cpuinfo = pSeries_get_cpuinfo, | 425 | .get_cpuinfo = pSeries_get_cpuinfo, |
597 | .log_error = pSeries_log_error, | 426 | .log_error = pSeries_log_error, |
598 | .pcibios_fixup = pSeries_final_fixup, | 427 | .pcibios_fixup = pSeries_final_fixup, |
428 | .irq_bus_setup = pSeries_irq_bus_setup, | ||
599 | .restart = rtas_restart, | 429 | .restart = rtas_restart, |
600 | .power_off = rtas_power_off, | 430 | .power_off = rtas_power_off, |
601 | .halt = rtas_halt, | 431 | .halt = rtas_halt, |
602 | .panic = rtas_os_term, | 432 | .panic = rtas_os_term, |
603 | .cpu_die = pSeries_mach_cpu_die, | 433 | .cpu_die = pSeries_mach_cpu_die, |
604 | .get_boot_time = pSeries_get_boot_time, | 434 | .get_boot_time = rtas_get_boot_time, |
605 | .get_rtc_time = pSeries_get_rtc_time, | 435 | .get_rtc_time = rtas_get_rtc_time, |
606 | .set_rtc_time = pSeries_set_rtc_time, | 436 | .set_rtc_time = rtas_set_rtc_time, |
607 | .calibrate_decr = pSeries_calibrate_decr, | 437 | .calibrate_decr = generic_calibrate_decr, |
608 | .progress = pSeries_progress, | 438 | .progress = rtas_progress, |
609 | .check_legacy_ioport = pSeries_check_legacy_ioport, | 439 | .check_legacy_ioport = pSeries_check_legacy_ioport, |
610 | .system_reset_exception = pSeries_system_reset_exception, | 440 | .system_reset_exception = pSeries_system_reset_exception, |
611 | .machine_check_exception = pSeries_machine_check_exception, | 441 | .machine_check_exception = pSeries_machine_check_exception, |
diff --git a/arch/ppc64/kernel/pSeries_smp.c b/arch/ppc64/kernel/pSeries_smp.c index fbad349ec58c..62c55a123560 100644 --- a/arch/ppc64/kernel/pSeries_smp.c +++ b/arch/ppc64/kernel/pSeries_smp.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * SMP support for pSeries machines. | 2 | * SMP support for pSeries and BPA machines. |
3 | * | 3 | * |
4 | * Dave Engebretsen, Peter Bergner, and | 4 | * Dave Engebretsen, Peter Bergner, and |
5 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com | 5 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com |
@@ -47,6 +47,7 @@ | |||
47 | #include <asm/pSeries_reconfig.h> | 47 | #include <asm/pSeries_reconfig.h> |
48 | 48 | ||
49 | #include "mpic.h" | 49 | #include "mpic.h" |
50 | #include "bpa_iic.h" | ||
50 | 51 | ||
51 | #ifdef DEBUG | 52 | #ifdef DEBUG |
52 | #define DBG(fmt...) udbg_printf(fmt) | 53 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -92,10 +93,13 @@ static int query_cpu_stopped(unsigned int pcpu) | |||
92 | 93 | ||
93 | int pSeries_cpu_disable(void) | 94 | int pSeries_cpu_disable(void) |
94 | { | 95 | { |
96 | int cpu = smp_processor_id(); | ||
97 | |||
98 | cpu_clear(cpu, cpu_online_map); | ||
95 | systemcfg->processorCount--; | 99 | systemcfg->processorCount--; |
96 | 100 | ||
97 | /*fix boot_cpuid here*/ | 101 | /*fix boot_cpuid here*/ |
98 | if (smp_processor_id() == boot_cpuid) | 102 | if (cpu == boot_cpuid) |
99 | boot_cpuid = any_online_cpu(cpu_online_map); | 103 | boot_cpuid = any_online_cpu(cpu_online_map); |
100 | 104 | ||
101 | /* FIXME: abstract this to not be platform specific later on */ | 105 | /* FIXME: abstract this to not be platform specific later on */ |
@@ -286,6 +290,7 @@ static inline int __devinit smp_startup_cpu(unsigned int lcpu) | |||
286 | return 1; | 290 | return 1; |
287 | } | 291 | } |
288 | 292 | ||
293 | #ifdef CONFIG_XICS | ||
289 | static inline void smp_xics_do_message(int cpu, int msg) | 294 | static inline void smp_xics_do_message(int cpu, int msg) |
290 | { | 295 | { |
291 | set_bit(msg, &xics_ipi_message[cpu].value); | 296 | set_bit(msg, &xics_ipi_message[cpu].value); |
@@ -327,6 +332,37 @@ static void __devinit smp_xics_setup_cpu(int cpu) | |||
327 | cpu_clear(cpu, of_spin_map); | 332 | cpu_clear(cpu, of_spin_map); |
328 | 333 | ||
329 | } | 334 | } |
335 | #endif /* CONFIG_XICS */ | ||
336 | #ifdef CONFIG_BPA_IIC | ||
337 | static void smp_iic_message_pass(int target, int msg) | ||
338 | { | ||
339 | unsigned int i; | ||
340 | |||
341 | if (target < NR_CPUS) { | ||
342 | iic_cause_IPI(target, msg); | ||
343 | } else { | ||
344 | for_each_online_cpu(i) { | ||
345 | if (target == MSG_ALL_BUT_SELF | ||
346 | && i == smp_processor_id()) | ||
347 | continue; | ||
348 | iic_cause_IPI(i, msg); | ||
349 | } | ||
350 | } | ||
351 | } | ||
352 | |||
353 | static int __init smp_iic_probe(void) | ||
354 | { | ||
355 | iic_request_IPIs(); | ||
356 | |||
357 | return cpus_weight(cpu_possible_map); | ||
358 | } | ||
359 | |||
360 | static void __devinit smp_iic_setup_cpu(int cpu) | ||
361 | { | ||
362 | if (cpu != boot_cpuid) | ||
363 | iic_setup_cpu(); | ||
364 | } | ||
365 | #endif /* CONFIG_BPA_IIC */ | ||
330 | 366 | ||
331 | static DEFINE_SPINLOCK(timebase_lock); | 367 | static DEFINE_SPINLOCK(timebase_lock); |
332 | static unsigned long timebase = 0; | 368 | static unsigned long timebase = 0; |
@@ -375,20 +411,21 @@ static int smp_pSeries_cpu_bootable(unsigned int nr) | |||
375 | * cpus are assumed to be secondary threads. | 411 | * cpus are assumed to be secondary threads. |
376 | */ | 412 | */ |
377 | if (system_state < SYSTEM_RUNNING && | 413 | if (system_state < SYSTEM_RUNNING && |
378 | cur_cpu_spec->cpu_features & CPU_FTR_SMT && | 414 | cpu_has_feature(CPU_FTR_SMT) && |
379 | !smt_enabled_at_boot && nr % 2 != 0) | 415 | !smt_enabled_at_boot && nr % 2 != 0) |
380 | return 0; | 416 | return 0; |
381 | 417 | ||
382 | return 1; | 418 | return 1; |
383 | } | 419 | } |
384 | 420 | #ifdef CONFIG_MPIC | |
385 | static struct smp_ops_t pSeries_mpic_smp_ops = { | 421 | static struct smp_ops_t pSeries_mpic_smp_ops = { |
386 | .message_pass = smp_mpic_message_pass, | 422 | .message_pass = smp_mpic_message_pass, |
387 | .probe = smp_mpic_probe, | 423 | .probe = smp_mpic_probe, |
388 | .kick_cpu = smp_pSeries_kick_cpu, | 424 | .kick_cpu = smp_pSeries_kick_cpu, |
389 | .setup_cpu = smp_mpic_setup_cpu, | 425 | .setup_cpu = smp_mpic_setup_cpu, |
390 | }; | 426 | }; |
391 | 427 | #endif | |
428 | #ifdef CONFIG_XICS | ||
392 | static struct smp_ops_t pSeries_xics_smp_ops = { | 429 | static struct smp_ops_t pSeries_xics_smp_ops = { |
393 | .message_pass = smp_xics_message_pass, | 430 | .message_pass = smp_xics_message_pass, |
394 | .probe = smp_xics_probe, | 431 | .probe = smp_xics_probe, |
@@ -396,6 +433,16 @@ static struct smp_ops_t pSeries_xics_smp_ops = { | |||
396 | .setup_cpu = smp_xics_setup_cpu, | 433 | .setup_cpu = smp_xics_setup_cpu, |
397 | .cpu_bootable = smp_pSeries_cpu_bootable, | 434 | .cpu_bootable = smp_pSeries_cpu_bootable, |
398 | }; | 435 | }; |
436 | #endif | ||
437 | #ifdef CONFIG_BPA_IIC | ||
438 | static struct smp_ops_t bpa_iic_smp_ops = { | ||
439 | .message_pass = smp_iic_message_pass, | ||
440 | .probe = smp_iic_probe, | ||
441 | .kick_cpu = smp_pSeries_kick_cpu, | ||
442 | .setup_cpu = smp_iic_setup_cpu, | ||
443 | .cpu_bootable = smp_pSeries_cpu_bootable, | ||
444 | }; | ||
445 | #endif | ||
399 | 446 | ||
400 | /* This is called very early */ | 447 | /* This is called very early */ |
401 | void __init smp_init_pSeries(void) | 448 | void __init smp_init_pSeries(void) |
@@ -404,10 +451,25 @@ void __init smp_init_pSeries(void) | |||
404 | 451 | ||
405 | DBG(" -> smp_init_pSeries()\n"); | 452 | DBG(" -> smp_init_pSeries()\n"); |
406 | 453 | ||
407 | if (ppc64_interrupt_controller == IC_OPEN_PIC) | 454 | switch (ppc64_interrupt_controller) { |
455 | #ifdef CONFIG_MPIC | ||
456 | case IC_OPEN_PIC: | ||
408 | smp_ops = &pSeries_mpic_smp_ops; | 457 | smp_ops = &pSeries_mpic_smp_ops; |
409 | else | 458 | break; |
459 | #endif | ||
460 | #ifdef CONFIG_XICS | ||
461 | case IC_PPC_XIC: | ||
410 | smp_ops = &pSeries_xics_smp_ops; | 462 | smp_ops = &pSeries_xics_smp_ops; |
463 | break; | ||
464 | #endif | ||
465 | #ifdef CONFIG_BPA_IIC | ||
466 | case IC_BPA_IIC: | ||
467 | smp_ops = &bpa_iic_smp_ops; | ||
468 | break; | ||
469 | #endif | ||
470 | default: | ||
471 | panic("Invalid interrupt controller"); | ||
472 | } | ||
411 | 473 | ||
412 | #ifdef CONFIG_HOTPLUG_CPU | 474 | #ifdef CONFIG_HOTPLUG_CPU |
413 | smp_ops->cpu_disable = pSeries_cpu_disable; | 475 | smp_ops->cpu_disable = pSeries_cpu_disable; |
@@ -419,8 +481,8 @@ void __init smp_init_pSeries(void) | |||
419 | #endif | 481 | #endif |
420 | 482 | ||
421 | /* Mark threads which are still spinning in hold loops. */ | 483 | /* Mark threads which are still spinning in hold loops. */ |
422 | if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) | 484 | if (cpu_has_feature(CPU_FTR_SMT)) { |
423 | for_each_present_cpu(i) { | 485 | for_each_present_cpu(i) { |
424 | if (i % 2 == 0) | 486 | if (i % 2 == 0) |
425 | /* | 487 | /* |
426 | * Even-numbered logical cpus correspond to | 488 | * Even-numbered logical cpus correspond to |
@@ -428,8 +490,9 @@ void __init smp_init_pSeries(void) | |||
428 | */ | 490 | */ |
429 | cpu_set(i, of_spin_map); | 491 | cpu_set(i, of_spin_map); |
430 | } | 492 | } |
431 | else | 493 | } else { |
432 | of_spin_map = cpu_present_map; | 494 | of_spin_map = cpu_present_map; |
495 | } | ||
433 | 496 | ||
434 | cpu_clear(boot_cpuid, of_spin_map); | 497 | cpu_clear(boot_cpuid, of_spin_map); |
435 | 498 | ||
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/ppc64/kernel/pacaData.c index a3e0975c26c1..6316188737b6 100644 --- a/arch/ppc64/kernel/pacaData.c +++ b/arch/ppc64/kernel/pacaData.c | |||
@@ -42,21 +42,7 @@ extern unsigned long __toc_start; | |||
42 | * processors. The processor VPD array needs one entry per physical | 42 | * processors. The processor VPD array needs one entry per physical |
43 | * processor (not thread). | 43 | * processor (not thread). |
44 | */ | 44 | */ |
45 | #ifdef CONFIG_PPC_ISERIES | 45 | #define PACA_INIT_COMMON(number, start, asrr, asrv) \ |
46 | #define EXTRA_INITS(number, lpq) \ | ||
47 | .lppaca_ptr = &paca[number].lppaca, \ | ||
48 | .lpqueue_ptr = (lpq), /* &xItLpQueue, */ \ | ||
49 | .reg_save_ptr = &paca[number].reg_save, \ | ||
50 | .reg_save = { \ | ||
51 | .xDesc = 0xd397d9e2, /* "LpRS" */ \ | ||
52 | .xSize = sizeof(struct ItLpRegSave) \ | ||
53 | }, | ||
54 | #else | ||
55 | #define EXTRA_INITS(number, lpq) | ||
56 | #endif | ||
57 | |||
58 | #define PACAINITDATA(number,start,lpq,asrr,asrv) \ | ||
59 | { \ | ||
60 | .lock_token = 0x8000, \ | 46 | .lock_token = 0x8000, \ |
61 | .paca_index = (number), /* Paca Index */ \ | 47 | .paca_index = (number), /* Paca Index */ \ |
62 | .default_decr = 0x00ff0000, /* Initial Decr */ \ | 48 | .default_decr = 0x00ff0000, /* Initial Decr */ \ |
@@ -74,147 +60,79 @@ extern unsigned long __toc_start; | |||
74 | .end_of_quantum = 0xfffffffffffffffful, \ | 60 | .end_of_quantum = 0xfffffffffffffffful, \ |
75 | .slb_count = 64, \ | 61 | .slb_count = 64, \ |
76 | }, \ | 62 | }, \ |
77 | EXTRA_INITS((number), (lpq)) \ | ||
78 | } | ||
79 | 63 | ||
80 | struct paca_struct paca[] = { | ||
81 | #ifdef CONFIG_PPC_ISERIES | 64 | #ifdef CONFIG_PPC_ISERIES |
82 | PACAINITDATA( 0, 1, &xItLpQueue, 0, STAB0_VIRT_ADDR), | 65 | #define PACA_INIT_ISERIES(number) \ |
66 | .lppaca_ptr = &paca[number].lppaca, \ | ||
67 | .reg_save_ptr = &paca[number].reg_save, \ | ||
68 | .reg_save = { \ | ||
69 | .xDesc = 0xd397d9e2, /* "LpRS" */ \ | ||
70 | .xSize = sizeof(struct ItLpRegSave) \ | ||
71 | } | ||
72 | |||
73 | #define PACA_INIT(number) \ | ||
74 | { \ | ||
75 | PACA_INIT_COMMON(number, 0, 0, 0) \ | ||
76 | PACA_INIT_ISERIES(number) \ | ||
77 | } | ||
78 | |||
79 | #define BOOTCPU_PACA_INIT(number) \ | ||
80 | { \ | ||
81 | PACA_INIT_COMMON(number, 1, 0, STAB0_VIRT_ADDR) \ | ||
82 | PACA_INIT_ISERIES(number) \ | ||
83 | } | ||
84 | |||
83 | #else | 85 | #else |
84 | PACAINITDATA( 0, 1, NULL, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR), | 86 | #define PACA_INIT(number) \ |
87 | { \ | ||
88 | PACA_INIT_COMMON(number, 0, 0, 0) \ | ||
89 | } | ||
90 | |||
91 | #define BOOTCPU_PACA_INIT(number) \ | ||
92 | { \ | ||
93 | PACA_INIT_COMMON(number, 1, STAB0_PHYS_ADDR, STAB0_VIRT_ADDR) \ | ||
94 | } | ||
85 | #endif | 95 | #endif |
96 | |||
97 | struct paca_struct paca[] = { | ||
98 | BOOTCPU_PACA_INIT(0), | ||
86 | #if NR_CPUS > 1 | 99 | #if NR_CPUS > 1 |
87 | PACAINITDATA( 1, 0, NULL, 0, 0), | 100 | PACA_INIT( 1), PACA_INIT( 2), PACA_INIT( 3), |
88 | PACAINITDATA( 2, 0, NULL, 0, 0), | ||
89 | PACAINITDATA( 3, 0, NULL, 0, 0), | ||
90 | #if NR_CPUS > 4 | 101 | #if NR_CPUS > 4 |
91 | PACAINITDATA( 4, 0, NULL, 0, 0), | 102 | PACA_INIT( 4), PACA_INIT( 5), PACA_INIT( 6), PACA_INIT( 7), |
92 | PACAINITDATA( 5, 0, NULL, 0, 0), | ||
93 | PACAINITDATA( 6, 0, NULL, 0, 0), | ||
94 | PACAINITDATA( 7, 0, NULL, 0, 0), | ||
95 | #if NR_CPUS > 8 | 103 | #if NR_CPUS > 8 |
96 | PACAINITDATA( 8, 0, NULL, 0, 0), | 104 | PACA_INIT( 8), PACA_INIT( 9), PACA_INIT( 10), PACA_INIT( 11), |
97 | PACAINITDATA( 9, 0, NULL, 0, 0), | 105 | PACA_INIT( 12), PACA_INIT( 13), PACA_INIT( 14), PACA_INIT( 15), |
98 | PACAINITDATA(10, 0, NULL, 0, 0), | 106 | PACA_INIT( 16), PACA_INIT( 17), PACA_INIT( 18), PACA_INIT( 19), |
99 | PACAINITDATA(11, 0, NULL, 0, 0), | 107 | PACA_INIT( 20), PACA_INIT( 21), PACA_INIT( 22), PACA_INIT( 23), |
100 | PACAINITDATA(12, 0, NULL, 0, 0), | 108 | PACA_INIT( 24), PACA_INIT( 25), PACA_INIT( 26), PACA_INIT( 27), |
101 | PACAINITDATA(13, 0, NULL, 0, 0), | 109 | PACA_INIT( 28), PACA_INIT( 29), PACA_INIT( 30), PACA_INIT( 31), |
102 | PACAINITDATA(14, 0, NULL, 0, 0), | ||
103 | PACAINITDATA(15, 0, NULL, 0, 0), | ||
104 | PACAINITDATA(16, 0, NULL, 0, 0), | ||
105 | PACAINITDATA(17, 0, NULL, 0, 0), | ||
106 | PACAINITDATA(18, 0, NULL, 0, 0), | ||
107 | PACAINITDATA(19, 0, NULL, 0, 0), | ||
108 | PACAINITDATA(20, 0, NULL, 0, 0), | ||
109 | PACAINITDATA(21, 0, NULL, 0, 0), | ||
110 | PACAINITDATA(22, 0, NULL, 0, 0), | ||
111 | PACAINITDATA(23, 0, NULL, 0, 0), | ||
112 | PACAINITDATA(24, 0, NULL, 0, 0), | ||
113 | PACAINITDATA(25, 0, NULL, 0, 0), | ||
114 | PACAINITDATA(26, 0, NULL, 0, 0), | ||
115 | PACAINITDATA(27, 0, NULL, 0, 0), | ||
116 | PACAINITDATA(28, 0, NULL, 0, 0), | ||
117 | PACAINITDATA(29, 0, NULL, 0, 0), | ||
118 | PACAINITDATA(30, 0, NULL, 0, 0), | ||
119 | PACAINITDATA(31, 0, NULL, 0, 0), | ||
120 | #if NR_CPUS > 32 | 110 | #if NR_CPUS > 32 |
121 | PACAINITDATA(32, 0, NULL, 0, 0), | 111 | PACA_INIT( 32), PACA_INIT( 33), PACA_INIT( 34), PACA_INIT( 35), |
122 | PACAINITDATA(33, 0, NULL, 0, 0), | 112 | PACA_INIT( 36), PACA_INIT( 37), PACA_INIT( 38), PACA_INIT( 39), |
123 | PACAINITDATA(34, 0, NULL, 0, 0), | 113 | PACA_INIT( 40), PACA_INIT( 41), PACA_INIT( 42), PACA_INIT( 43), |
124 | PACAINITDATA(35, 0, NULL, 0, 0), | 114 | PACA_INIT( 44), PACA_INIT( 45), PACA_INIT( 46), PACA_INIT( 47), |
125 | PACAINITDATA(36, 0, NULL, 0, 0), | 115 | PACA_INIT( 48), PACA_INIT( 49), PACA_INIT( 50), PACA_INIT( 51), |
126 | PACAINITDATA(37, 0, NULL, 0, 0), | 116 | PACA_INIT( 52), PACA_INIT( 53), PACA_INIT( 54), PACA_INIT( 55), |
127 | PACAINITDATA(38, 0, NULL, 0, 0), | 117 | PACA_INIT( 56), PACA_INIT( 57), PACA_INIT( 58), PACA_INIT( 59), |
128 | PACAINITDATA(39, 0, NULL, 0, 0), | 118 | PACA_INIT( 60), PACA_INIT( 61), PACA_INIT( 62), PACA_INIT( 63), |
129 | PACAINITDATA(40, 0, NULL, 0, 0), | ||
130 | PACAINITDATA(41, 0, NULL, 0, 0), | ||
131 | PACAINITDATA(42, 0, NULL, 0, 0), | ||
132 | PACAINITDATA(43, 0, NULL, 0, 0), | ||
133 | PACAINITDATA(44, 0, NULL, 0, 0), | ||
134 | PACAINITDATA(45, 0, NULL, 0, 0), | ||
135 | PACAINITDATA(46, 0, NULL, 0, 0), | ||
136 | PACAINITDATA(47, 0, NULL, 0, 0), | ||
137 | PACAINITDATA(48, 0, NULL, 0, 0), | ||
138 | PACAINITDATA(49, 0, NULL, 0, 0), | ||
139 | PACAINITDATA(50, 0, NULL, 0, 0), | ||
140 | PACAINITDATA(51, 0, NULL, 0, 0), | ||
141 | PACAINITDATA(52, 0, NULL, 0, 0), | ||
142 | PACAINITDATA(53, 0, NULL, 0, 0), | ||
143 | PACAINITDATA(54, 0, NULL, 0, 0), | ||
144 | PACAINITDATA(55, 0, NULL, 0, 0), | ||
145 | PACAINITDATA(56, 0, NULL, 0, 0), | ||
146 | PACAINITDATA(57, 0, NULL, 0, 0), | ||
147 | PACAINITDATA(58, 0, NULL, 0, 0), | ||
148 | PACAINITDATA(59, 0, NULL, 0, 0), | ||
149 | PACAINITDATA(60, 0, NULL, 0, 0), | ||
150 | PACAINITDATA(61, 0, NULL, 0, 0), | ||
151 | PACAINITDATA(62, 0, NULL, 0, 0), | ||
152 | PACAINITDATA(63, 0, NULL, 0, 0), | ||
153 | #if NR_CPUS > 64 | 119 | #if NR_CPUS > 64 |
154 | PACAINITDATA(64, 0, NULL, 0, 0), | 120 | PACA_INIT( 64), PACA_INIT( 65), PACA_INIT( 66), PACA_INIT( 67), |
155 | PACAINITDATA(65, 0, NULL, 0, 0), | 121 | PACA_INIT( 68), PACA_INIT( 69), PACA_INIT( 70), PACA_INIT( 71), |
156 | PACAINITDATA(66, 0, NULL, 0, 0), | 122 | PACA_INIT( 72), PACA_INIT( 73), PACA_INIT( 74), PACA_INIT( 75), |
157 | PACAINITDATA(67, 0, NULL, 0, 0), | 123 | PACA_INIT( 76), PACA_INIT( 77), PACA_INIT( 78), PACA_INIT( 79), |
158 | PACAINITDATA(68, 0, NULL, 0, 0), | 124 | PACA_INIT( 80), PACA_INIT( 81), PACA_INIT( 82), PACA_INIT( 83), |
159 | PACAINITDATA(69, 0, NULL, 0, 0), | 125 | PACA_INIT( 84), PACA_INIT( 85), PACA_INIT( 86), PACA_INIT( 87), |
160 | PACAINITDATA(70, 0, NULL, 0, 0), | 126 | PACA_INIT( 88), PACA_INIT( 89), PACA_INIT( 90), PACA_INIT( 91), |
161 | PACAINITDATA(71, 0, NULL, 0, 0), | 127 | PACA_INIT( 92), PACA_INIT( 93), PACA_INIT( 94), PACA_INIT( 95), |
162 | PACAINITDATA(72, 0, NULL, 0, 0), | 128 | PACA_INIT( 96), PACA_INIT( 97), PACA_INIT( 98), PACA_INIT( 99), |
163 | PACAINITDATA(73, 0, NULL, 0, 0), | 129 | PACA_INIT(100), PACA_INIT(101), PACA_INIT(102), PACA_INIT(103), |
164 | PACAINITDATA(74, 0, NULL, 0, 0), | 130 | PACA_INIT(104), PACA_INIT(105), PACA_INIT(106), PACA_INIT(107), |
165 | PACAINITDATA(75, 0, NULL, 0, 0), | 131 | PACA_INIT(108), PACA_INIT(109), PACA_INIT(110), PACA_INIT(111), |
166 | PACAINITDATA(76, 0, NULL, 0, 0), | 132 | PACA_INIT(112), PACA_INIT(113), PACA_INIT(114), PACA_INIT(115), |
167 | PACAINITDATA(77, 0, NULL, 0, 0), | 133 | PACA_INIT(116), PACA_INIT(117), PACA_INIT(118), PACA_INIT(119), |
168 | PACAINITDATA(78, 0, NULL, 0, 0), | 134 | PACA_INIT(120), PACA_INIT(121), PACA_INIT(122), PACA_INIT(123), |
169 | PACAINITDATA(79, 0, NULL, 0, 0), | 135 | PACA_INIT(124), PACA_INIT(125), PACA_INIT(126), PACA_INIT(127), |
170 | PACAINITDATA(80, 0, NULL, 0, 0), | ||
171 | PACAINITDATA(81, 0, NULL, 0, 0), | ||
172 | PACAINITDATA(82, 0, NULL, 0, 0), | ||
173 | PACAINITDATA(83, 0, NULL, 0, 0), | ||
174 | PACAINITDATA(84, 0, NULL, 0, 0), | ||
175 | PACAINITDATA(85, 0, NULL, 0, 0), | ||
176 | PACAINITDATA(86, 0, NULL, 0, 0), | ||
177 | PACAINITDATA(87, 0, NULL, 0, 0), | ||
178 | PACAINITDATA(88, 0, NULL, 0, 0), | ||
179 | PACAINITDATA(89, 0, NULL, 0, 0), | ||
180 | PACAINITDATA(90, 0, NULL, 0, 0), | ||
181 | PACAINITDATA(91, 0, NULL, 0, 0), | ||
182 | PACAINITDATA(92, 0, NULL, 0, 0), | ||
183 | PACAINITDATA(93, 0, NULL, 0, 0), | ||
184 | PACAINITDATA(94, 0, NULL, 0, 0), | ||
185 | PACAINITDATA(95, 0, NULL, 0, 0), | ||
186 | PACAINITDATA(96, 0, NULL, 0, 0), | ||
187 | PACAINITDATA(97, 0, NULL, 0, 0), | ||
188 | PACAINITDATA(98, 0, NULL, 0, 0), | ||
189 | PACAINITDATA(99, 0, NULL, 0, 0), | ||
190 | PACAINITDATA(100, 0, NULL, 0, 0), | ||
191 | PACAINITDATA(101, 0, NULL, 0, 0), | ||
192 | PACAINITDATA(102, 0, NULL, 0, 0), | ||
193 | PACAINITDATA(103, 0, NULL, 0, 0), | ||
194 | PACAINITDATA(104, 0, NULL, 0, 0), | ||
195 | PACAINITDATA(105, 0, NULL, 0, 0), | ||
196 | PACAINITDATA(106, 0, NULL, 0, 0), | ||
197 | PACAINITDATA(107, 0, NULL, 0, 0), | ||
198 | PACAINITDATA(108, 0, NULL, 0, 0), | ||
199 | PACAINITDATA(109, 0, NULL, 0, 0), | ||
200 | PACAINITDATA(110, 0, NULL, 0, 0), | ||
201 | PACAINITDATA(111, 0, NULL, 0, 0), | ||
202 | PACAINITDATA(112, 0, NULL, 0, 0), | ||
203 | PACAINITDATA(113, 0, NULL, 0, 0), | ||
204 | PACAINITDATA(114, 0, NULL, 0, 0), | ||
205 | PACAINITDATA(115, 0, NULL, 0, 0), | ||
206 | PACAINITDATA(116, 0, NULL, 0, 0), | ||
207 | PACAINITDATA(117, 0, NULL, 0, 0), | ||
208 | PACAINITDATA(118, 0, NULL, 0, 0), | ||
209 | PACAINITDATA(119, 0, NULL, 0, 0), | ||
210 | PACAINITDATA(120, 0, NULL, 0, 0), | ||
211 | PACAINITDATA(121, 0, NULL, 0, 0), | ||
212 | PACAINITDATA(122, 0, NULL, 0, 0), | ||
213 | PACAINITDATA(123, 0, NULL, 0, 0), | ||
214 | PACAINITDATA(124, 0, NULL, 0, 0), | ||
215 | PACAINITDATA(125, 0, NULL, 0, 0), | ||
216 | PACAINITDATA(126, 0, NULL, 0, 0), | ||
217 | PACAINITDATA(127, 0, NULL, 0, 0), | ||
218 | #endif | 136 | #endif |
219 | #endif | 137 | #endif |
220 | #endif | 138 | #endif |
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c index d786d4b6af0b..ae6f579d3fa0 100644 --- a/arch/ppc64/kernel/pci.c +++ b/arch/ppc64/kernel/pci.c | |||
@@ -351,7 +351,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, | |||
351 | *offset += hose->pci_mem_offset; | 351 | *offset += hose->pci_mem_offset; |
352 | res_bit = IORESOURCE_MEM; | 352 | res_bit = IORESOURCE_MEM; |
353 | } else { | 353 | } else { |
354 | io_offset = (unsigned long)hose->io_base_virt; | 354 | io_offset = (unsigned long)hose->io_base_virt - pci_io_base; |
355 | *offset += io_offset; | 355 | *offset += io_offset; |
356 | res_bit = IORESOURCE_IO; | 356 | res_bit = IORESOURCE_IO; |
357 | } | 357 | } |
@@ -378,7 +378,7 @@ static struct resource *__pci_mmap_make_offset(struct pci_dev *dev, | |||
378 | 378 | ||
379 | /* found it! construct the final physical address */ | 379 | /* found it! construct the final physical address */ |
380 | if (mmap_state == pci_mmap_io) | 380 | if (mmap_state == pci_mmap_io) |
381 | *offset += hose->io_base_phys - io_offset; | 381 | *offset += hose->io_base_phys - io_offset; |
382 | return rp; | 382 | return rp; |
383 | } | 383 | } |
384 | 384 | ||
@@ -507,7 +507,7 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | |||
507 | } | 507 | } |
508 | 508 | ||
509 | #ifdef CONFIG_PPC_MULTIPLATFORM | 509 | #ifdef CONFIG_PPC_MULTIPLATFORM |
510 | static ssize_t pci_show_devspec(struct device *dev, char *buf) | 510 | static ssize_t pci_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) |
511 | { | 511 | { |
512 | struct pci_dev *pdev; | 512 | struct pci_dev *pdev; |
513 | struct device_node *np; | 513 | struct device_node *np; |
@@ -902,6 +902,9 @@ void __devinit pcibios_fixup_bus(struct pci_bus *bus) | |||
902 | list_for_each_entry(dev, &bus->devices, bus_list) | 902 | list_for_each_entry(dev, &bus->devices, bus_list) |
903 | ppc_md.iommu_dev_setup(dev); | 903 | ppc_md.iommu_dev_setup(dev); |
904 | 904 | ||
905 | if (ppc_md.irq_bus_setup) | ||
906 | ppc_md.irq_bus_setup(bus); | ||
907 | |||
905 | if (!pci_probe_only) | 908 | if (!pci_probe_only) |
906 | return; | 909 | return; |
907 | 910 | ||
@@ -941,4 +944,22 @@ int pci_read_irq_line(struct pci_dev *pci_dev) | |||
941 | } | 944 | } |
942 | EXPORT_SYMBOL(pci_read_irq_line); | 945 | EXPORT_SYMBOL(pci_read_irq_line); |
943 | 946 | ||
947 | void pci_resource_to_user(const struct pci_dev *dev, int bar, | ||
948 | const struct resource *rsrc, | ||
949 | u64 *start, u64 *end) | ||
950 | { | ||
951 | struct pci_controller *hose = pci_bus_to_host(dev->bus); | ||
952 | unsigned long offset = 0; | ||
953 | |||
954 | if (hose == NULL) | ||
955 | return; | ||
956 | |||
957 | if (rsrc->flags & IORESOURCE_IO) | ||
958 | offset = pci_io_base - (unsigned long)hose->io_base_virt + | ||
959 | hose->io_base_phys; | ||
960 | |||
961 | *start = rsrc->start + offset; | ||
962 | *end = rsrc->end + offset; | ||
963 | } | ||
964 | |||
944 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 965 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
diff --git a/arch/ppc64/kernel/pci.h b/arch/ppc64/kernel/pci.h index 0fd7d849aa77..26be78b13af1 100644 --- a/arch/ppc64/kernel/pci.h +++ b/arch/ppc64/kernel/pci.h | |||
@@ -40,10 +40,14 @@ struct device_node *fetch_dev_dn(struct pci_dev *dev); | |||
40 | void pci_addr_cache_insert_device(struct pci_dev *dev); | 40 | void pci_addr_cache_insert_device(struct pci_dev *dev); |
41 | void pci_addr_cache_remove_device(struct pci_dev *dev); | 41 | void pci_addr_cache_remove_device(struct pci_dev *dev); |
42 | 42 | ||
43 | /* From pSeries_pci.h */ | 43 | /* From rtas_pci.h */ |
44 | void init_pci_config_tokens (void); | 44 | void init_pci_config_tokens (void); |
45 | unsigned long get_phb_buid (struct device_node *); | 45 | unsigned long get_phb_buid (struct device_node *); |
46 | 46 | ||
47 | /* From pSeries_pci.h */ | ||
48 | extern void pSeries_final_fixup(void); | ||
49 | extern void pSeries_irq_bus_setup(struct pci_bus *bus); | ||
50 | |||
47 | extern unsigned long pci_probe_only; | 51 | extern unsigned long pci_probe_only; |
48 | extern unsigned long pci_assign_all_buses; | 52 | extern unsigned long pci_assign_all_buses; |
49 | extern int pci_read_irq_line(struct pci_dev *pci_dev); | 53 | extern int pci_read_irq_line(struct pci_dev *pci_dev); |
diff --git a/arch/ppc64/kernel/pmac_time.c b/arch/ppc64/kernel/pmac_time.c index f24827581dd7..3059edb09cc8 100644 --- a/arch/ppc64/kernel/pmac_time.c +++ b/arch/ppc64/kernel/pmac_time.c | |||
@@ -40,11 +40,6 @@ | |||
40 | #define DBG(x...) | 40 | #define DBG(x...) |
41 | #endif | 41 | #endif |
42 | 42 | ||
43 | extern void setup_default_decr(void); | ||
44 | |||
45 | extern unsigned long ppc_tb_freq; | ||
46 | extern unsigned long ppc_proc_freq; | ||
47 | |||
48 | /* Apparently the RTC stores seconds since 1 Jan 1904 */ | 43 | /* Apparently the RTC stores seconds since 1 Jan 1904 */ |
49 | #define RTC_OFFSET 2082844800 | 44 | #define RTC_OFFSET 2082844800 |
50 | 45 | ||
@@ -161,8 +156,7 @@ void __init pmac_get_boot_time(struct rtc_time *tm) | |||
161 | 156 | ||
162 | /* | 157 | /* |
163 | * Query the OF and get the decr frequency. | 158 | * Query the OF and get the decr frequency. |
164 | * This was taken from the pmac time_init() when merging the prep/pmac | 159 | * FIXME: merge this with generic_calibrate_decr |
165 | * time functions. | ||
166 | */ | 160 | */ |
167 | void __init pmac_calibrate_decr(void) | 161 | void __init pmac_calibrate_decr(void) |
168 | { | 162 | { |
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c index b230a63fe4c8..705742f4eec6 100644 --- a/arch/ppc64/kernel/ppc_ksyms.c +++ b/arch/ppc64/kernel/ppc_ksyms.c | |||
@@ -75,6 +75,7 @@ EXPORT_SYMBOL(giveup_fpu); | |||
75 | EXPORT_SYMBOL(giveup_altivec); | 75 | EXPORT_SYMBOL(giveup_altivec); |
76 | #endif | 76 | #endif |
77 | EXPORT_SYMBOL(__flush_icache_range); | 77 | EXPORT_SYMBOL(__flush_icache_range); |
78 | EXPORT_SYMBOL(flush_dcache_range); | ||
78 | 79 | ||
79 | #ifdef CONFIG_SMP | 80 | #ifdef CONFIG_SMP |
80 | #ifdef CONFIG_PPC_ISERIES | 81 | #ifdef CONFIG_PPC_ISERIES |
diff --git a/arch/ppc64/kernel/proc_ppc64.c b/arch/ppc64/kernel/proc_ppc64.c index 0914b0669b05..a87c66a9652a 100644 --- a/arch/ppc64/kernel/proc_ppc64.c +++ b/arch/ppc64/kernel/proc_ppc64.c | |||
@@ -53,7 +53,7 @@ static int __init proc_ppc64_create(void) | |||
53 | if (!root) | 53 | if (!root) |
54 | return 1; | 54 | return 1; |
55 | 55 | ||
56 | if (!(systemcfg->platform & PLATFORM_PSERIES)) | 56 | if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_BPA))) |
57 | return 0; | 57 | return 0; |
58 | 58 | ||
59 | if (!proc_mkdir("rtas", root)) | 59 | if (!proc_mkdir("rtas", root)) |
diff --git a/arch/ppc64/kernel/process.c b/arch/ppc64/kernel/process.c index cdfecbeb331f..f7cae05e40fb 100644 --- a/arch/ppc64/kernel/process.c +++ b/arch/ppc64/kernel/process.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/kallsyms.h> | 36 | #include <linux/kallsyms.h> |
37 | #include <linux/interrupt.h> | 37 | #include <linux/interrupt.h> |
38 | #include <linux/utsname.h> | 38 | #include <linux/utsname.h> |
39 | #include <linux/kprobes.h> | ||
39 | 40 | ||
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
@@ -58,14 +59,6 @@ struct task_struct *last_task_used_math = NULL; | |||
58 | struct task_struct *last_task_used_altivec = NULL; | 59 | struct task_struct *last_task_used_altivec = NULL; |
59 | #endif | 60 | #endif |
60 | 61 | ||
61 | struct mm_struct ioremap_mm = { | ||
62 | .pgd = ioremap_dir, | ||
63 | .mm_users = ATOMIC_INIT(2), | ||
64 | .mm_count = ATOMIC_INIT(1), | ||
65 | .cpu_vm_mask = CPU_MASK_ALL, | ||
66 | .page_table_lock = SPIN_LOCK_UNLOCKED, | ||
67 | }; | ||
68 | |||
69 | /* | 62 | /* |
70 | * Make sure the floating-point register state in the | 63 | * Make sure the floating-point register state in the |
71 | * the thread_struct is up to date for task tsk. | 64 | * the thread_struct is up to date for task tsk. |
@@ -315,6 +308,8 @@ void show_regs(struct pt_regs * regs) | |||
315 | 308 | ||
316 | void exit_thread(void) | 309 | void exit_thread(void) |
317 | { | 310 | { |
311 | kprobe_flush_task(current); | ||
312 | |||
318 | #ifndef CONFIG_SMP | 313 | #ifndef CONFIG_SMP |
319 | if (last_task_used_math == current) | 314 | if (last_task_used_math == current) |
320 | last_task_used_math = NULL; | 315 | last_task_used_math = NULL; |
@@ -329,6 +324,7 @@ void flush_thread(void) | |||
329 | { | 324 | { |
330 | struct thread_info *t = current_thread_info(); | 325 | struct thread_info *t = current_thread_info(); |
331 | 326 | ||
327 | kprobe_flush_task(current); | ||
332 | if (t->flags & _TIF_ABI_PENDING) | 328 | if (t->flags & _TIF_ABI_PENDING) |
333 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); | 329 | t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT); |
334 | 330 | ||
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c index eb6538b58008..47727a6f7346 100644 --- a/arch/ppc64/kernel/prom.c +++ b/arch/ppc64/kernel/prom.c | |||
@@ -884,6 +884,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
884 | { | 884 | { |
885 | char *type = get_flat_dt_prop(node, "device_type", NULL); | 885 | char *type = get_flat_dt_prop(node, "device_type", NULL); |
886 | u32 *prop; | 886 | u32 *prop; |
887 | unsigned long size; | ||
887 | 888 | ||
888 | /* We are scanning "cpu" nodes only */ | 889 | /* We are scanning "cpu" nodes only */ |
889 | if (type == NULL || strcmp(type, "cpu") != 0) | 890 | if (type == NULL || strcmp(type, "cpu") != 0) |
@@ -929,6 +930,17 @@ static int __init early_init_dt_scan_cpus(unsigned long node, | |||
929 | cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; | 930 | cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC; |
930 | } | 931 | } |
931 | 932 | ||
933 | /* | ||
934 | * Check for an SMT capable CPU and set the CPU feature. We do | ||
935 | * this by looking at the size of the ibm,ppc-interrupt-server#s | ||
936 | * property | ||
937 | */ | ||
938 | prop = (u32 *)get_flat_dt_prop(node, "ibm,ppc-interrupt-server#s", | ||
939 | &size); | ||
940 | cur_cpu_spec->cpu_features &= ~CPU_FTR_SMT; | ||
941 | if (prop && ((size / sizeof(u32)) > 1)) | ||
942 | cur_cpu_spec->cpu_features |= CPU_FTR_SMT; | ||
943 | |||
932 | return 0; | 944 | return 0; |
933 | } | 945 | } |
934 | 946 | ||
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c index b7683abfbe6a..e248a7950aeb 100644 --- a/arch/ppc64/kernel/prom_init.c +++ b/arch/ppc64/kernel/prom_init.c | |||
@@ -1915,9 +1915,9 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long | |||
1915 | prom_send_capabilities(); | 1915 | prom_send_capabilities(); |
1916 | 1916 | ||
1917 | /* | 1917 | /* |
1918 | * On pSeries, copy the CPU hold code | 1918 | * On pSeries and BPA, copy the CPU hold code |
1919 | */ | 1919 | */ |
1920 | if (RELOC(of_platform) & PLATFORM_PSERIES) | 1920 | if (RELOC(of_platform) & (PLATFORM_PSERIES | PLATFORM_BPA)) |
1921 | copy_and_flush(0, KERNELBASE - offset, 0x100, 0); | 1921 | copy_and_flush(0, KERNELBASE - offset, 0x100, 0); |
1922 | 1922 | ||
1923 | /* | 1923 | /* |
diff --git a/arch/ppc64/kernel/ptrace.c b/arch/ppc64/kernel/ptrace.c index 9f8c6087ae56..2993f108d96d 100644 --- a/arch/ppc64/kernel/ptrace.c +++ b/arch/ppc64/kernel/ptrace.c | |||
@@ -305,6 +305,8 @@ static void do_syscall_trace(void) | |||
305 | 305 | ||
306 | void do_syscall_trace_enter(struct pt_regs *regs) | 306 | void do_syscall_trace_enter(struct pt_regs *regs) |
307 | { | 307 | { |
308 | secure_computing(regs->gpr[0]); | ||
309 | |||
308 | if (test_thread_flag(TIF_SYSCALL_TRACE) | 310 | if (test_thread_flag(TIF_SYSCALL_TRACE) |
309 | && (current->ptrace & PT_PTRACED)) | 311 | && (current->ptrace & PT_PTRACED)) |
310 | do_syscall_trace(); | 312 | do_syscall_trace(); |
@@ -320,8 +322,6 @@ void do_syscall_trace_enter(struct pt_regs *regs) | |||
320 | 322 | ||
321 | void do_syscall_trace_leave(struct pt_regs *regs) | 323 | void do_syscall_trace_leave(struct pt_regs *regs) |
322 | { | 324 | { |
323 | secure_computing(regs->gpr[0]); | ||
324 | |||
325 | if (unlikely(current->audit_context)) | 325 | if (unlikely(current->audit_context)) |
326 | audit_syscall_exit(current, | 326 | audit_syscall_exit(current, |
327 | (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, | 327 | (regs->ccr&0x1000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, |
diff --git a/arch/ppc64/kernel/ras.c b/arch/ppc64/kernel/ras.c index 1c4c796b212b..3c00f7bfc1b5 100644 --- a/arch/ppc64/kernel/ras.c +++ b/arch/ppc64/kernel/ras.c | |||
@@ -47,7 +47,6 @@ | |||
47 | #include <asm/cache.h> | 47 | #include <asm/cache.h> |
48 | #include <asm/prom.h> | 48 | #include <asm/prom.h> |
49 | #include <asm/ptrace.h> | 49 | #include <asm/ptrace.h> |
50 | #include <asm/iSeries/LparData.h> | ||
51 | #include <asm/machdep.h> | 50 | #include <asm/machdep.h> |
52 | #include <asm/rtas.h> | 51 | #include <asm/rtas.h> |
53 | #include <asm/ppcdebug.h> | 52 | #include <asm/ppcdebug.h> |
diff --git a/arch/ppc64/kernel/rtas-proc.c b/arch/ppc64/kernel/rtas-proc.c index 28b1f1521f21..1f3ff860fdf0 100644 --- a/arch/ppc64/kernel/rtas-proc.c +++ b/arch/ppc64/kernel/rtas-proc.c | |||
@@ -371,11 +371,11 @@ static ssize_t ppc_rtas_progress_write(struct file *file, | |||
371 | /* Lets see if the user passed hexdigits */ | 371 | /* Lets see if the user passed hexdigits */ |
372 | hex = simple_strtoul(progress_led, NULL, 10); | 372 | hex = simple_strtoul(progress_led, NULL, 10); |
373 | 373 | ||
374 | ppc_md.progress ((char *)progress_led, hex); | 374 | rtas_progress ((char *)progress_led, hex); |
375 | return count; | 375 | return count; |
376 | 376 | ||
377 | /* clear the line */ | 377 | /* clear the line */ |
378 | /* ppc_md.progress(" ", 0xffff);*/ | 378 | /* rtas_progress(" ", 0xffff);*/ |
379 | } | 379 | } |
380 | /* ****************************************************************** */ | 380 | /* ****************************************************************** */ |
381 | static int ppc_rtas_progress_show(struct seq_file *m, void *v) | 381 | static int ppc_rtas_progress_show(struct seq_file *m, void *v) |
diff --git a/arch/ppc64/kernel/rtas.c b/arch/ppc64/kernel/rtas.c index 5575603def27..5e8eb33b8e54 100644 --- a/arch/ppc64/kernel/rtas.c +++ b/arch/ppc64/kernel/rtas.c | |||
@@ -91,6 +91,123 @@ call_rtas_display_status_delay(unsigned char c) | |||
91 | } | 91 | } |
92 | } | 92 | } |
93 | 93 | ||
94 | void | ||
95 | rtas_progress(char *s, unsigned short hex) | ||
96 | { | ||
97 | struct device_node *root; | ||
98 | int width, *p; | ||
99 | char *os; | ||
100 | static int display_character, set_indicator; | ||
101 | static int display_width, display_lines, *row_width, form_feed; | ||
102 | static DEFINE_SPINLOCK(progress_lock); | ||
103 | static int current_line; | ||
104 | static int pending_newline = 0; /* did last write end with unprinted newline? */ | ||
105 | |||
106 | if (!rtas.base) | ||
107 | return; | ||
108 | |||
109 | if (display_width == 0) { | ||
110 | display_width = 0x10; | ||
111 | if ((root = find_path_device("/rtas"))) { | ||
112 | if ((p = (unsigned int *)get_property(root, | ||
113 | "ibm,display-line-length", NULL))) | ||
114 | display_width = *p; | ||
115 | if ((p = (unsigned int *)get_property(root, | ||
116 | "ibm,form-feed", NULL))) | ||
117 | form_feed = *p; | ||
118 | if ((p = (unsigned int *)get_property(root, | ||
119 | "ibm,display-number-of-lines", NULL))) | ||
120 | display_lines = *p; | ||
121 | row_width = (unsigned int *)get_property(root, | ||
122 | "ibm,display-truncation-length", NULL); | ||
123 | } | ||
124 | display_character = rtas_token("display-character"); | ||
125 | set_indicator = rtas_token("set-indicator"); | ||
126 | } | ||
127 | |||
128 | if (display_character == RTAS_UNKNOWN_SERVICE) { | ||
129 | /* use hex display if available */ | ||
130 | if (set_indicator != RTAS_UNKNOWN_SERVICE) | ||
131 | rtas_call(set_indicator, 3, 1, NULL, 6, 0, hex); | ||
132 | return; | ||
133 | } | ||
134 | |||
135 | spin_lock(&progress_lock); | ||
136 | |||
137 | /* | ||
138 | * Last write ended with newline, but we didn't print it since | ||
139 | * it would just clear the bottom line of output. Print it now | ||
140 | * instead. | ||
141 | * | ||
142 | * If no newline is pending and form feed is supported, clear the | ||
143 | * display with a form feed; otherwise, print a CR to start output | ||
144 | * at the beginning of the line. | ||
145 | */ | ||
146 | if (pending_newline) { | ||
147 | rtas_call(display_character, 1, 1, NULL, '\r'); | ||
148 | rtas_call(display_character, 1, 1, NULL, '\n'); | ||
149 | pending_newline = 0; | ||
150 | } else { | ||
151 | current_line = 0; | ||
152 | if (form_feed) | ||
153 | rtas_call(display_character, 1, 1, NULL, | ||
154 | (char)form_feed); | ||
155 | else | ||
156 | rtas_call(display_character, 1, 1, NULL, '\r'); | ||
157 | } | ||
158 | |||
159 | if (row_width) | ||
160 | width = row_width[current_line]; | ||
161 | else | ||
162 | width = display_width; | ||
163 | os = s; | ||
164 | while (*os) { | ||
165 | if (*os == '\n' || *os == '\r') { | ||
166 | /* If newline is the last character, save it | ||
167 | * until next call to avoid bumping up the | ||
168 | * display output. | ||
169 | */ | ||
170 | if (*os == '\n' && !os[1]) { | ||
171 | pending_newline = 1; | ||
172 | current_line++; | ||
173 | if (current_line > display_lines-1) | ||
174 | current_line = display_lines-1; | ||
175 | spin_unlock(&progress_lock); | ||
176 | return; | ||
177 | } | ||
178 | |||
179 | /* RTAS wants CR-LF, not just LF */ | ||
180 | |||
181 | if (*os == '\n') { | ||
182 | rtas_call(display_character, 1, 1, NULL, '\r'); | ||
183 | rtas_call(display_character, 1, 1, NULL, '\n'); | ||
184 | } else { | ||
185 | /* CR might be used to re-draw a line, so we'll | ||
186 | * leave it alone and not add LF. | ||
187 | */ | ||
188 | rtas_call(display_character, 1, 1, NULL, *os); | ||
189 | } | ||
190 | |||
191 | if (row_width) | ||
192 | width = row_width[current_line]; | ||
193 | else | ||
194 | width = display_width; | ||
195 | } else { | ||
196 | width--; | ||
197 | rtas_call(display_character, 1, 1, NULL, *os); | ||
198 | } | ||
199 | |||
200 | os++; | ||
201 | |||
202 | /* if we overwrite the screen length */ | ||
203 | if (width <= 0) | ||
204 | while ((*os != 0) && (*os != '\n') && (*os != '\r')) | ||
205 | os++; | ||
206 | } | ||
207 | |||
208 | spin_unlock(&progress_lock); | ||
209 | } | ||
210 | |||
94 | int | 211 | int |
95 | rtas_token(const char *service) | 212 | rtas_token(const char *service) |
96 | { | 213 | { |
@@ -425,8 +542,8 @@ rtas_flash_firmware(void) | |||
425 | 542 | ||
426 | printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); | 543 | printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size); |
427 | printk(KERN_ALERT "FLASH: performing flash and reboot\n"); | 544 | printk(KERN_ALERT "FLASH: performing flash and reboot\n"); |
428 | ppc_md.progress("Flashing \n", 0x0); | 545 | rtas_progress("Flashing \n", 0x0); |
429 | ppc_md.progress("Please Wait... ", 0x0); | 546 | rtas_progress("Please Wait... ", 0x0); |
430 | printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n"); | 547 | printk(KERN_ALERT "FLASH: this will take several minutes. Do not power off!\n"); |
431 | status = rtas_call(update_token, 1, 1, NULL, rtas_block_list); | 548 | status = rtas_call(update_token, 1, 1, NULL, rtas_block_list); |
432 | switch (status) { /* should only get "bad" status */ | 549 | switch (status) { /* should only get "bad" status */ |
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c new file mode 100644 index 000000000000..1048817befb8 --- /dev/null +++ b/arch/ppc64/kernel/rtas_pci.c | |||
@@ -0,0 +1,495 @@ | |||
1 | /* | ||
2 | * arch/ppc64/kernel/rtas_pci.c | ||
3 | * | ||
4 | * Copyright (C) 2001 Dave Engebretsen, IBM Corporation | ||
5 | * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM | ||
6 | * | ||
7 | * RTAS specific routines for PCI. | ||
8 | * | ||
9 | * Based on code from pci.c, chrp_pci.c and pSeries_pci.c | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2 of the License, or | ||
14 | * (at your option) any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/threads.h> | ||
28 | #include <linux/pci.h> | ||
29 | #include <linux/string.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/bootmem.h> | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | #include <asm/pgtable.h> | ||
35 | #include <asm/irq.h> | ||
36 | #include <asm/prom.h> | ||
37 | #include <asm/machdep.h> | ||
38 | #include <asm/pci-bridge.h> | ||
39 | #include <asm/iommu.h> | ||
40 | #include <asm/rtas.h> | ||
41 | |||
42 | #include "mpic.h" | ||
43 | #include "pci.h" | ||
44 | |||
45 | /* RTAS tokens */ | ||
46 | static int read_pci_config; | ||
47 | static int write_pci_config; | ||
48 | static int ibm_read_pci_config; | ||
49 | static int ibm_write_pci_config; | ||
50 | |||
51 | static int config_access_valid(struct device_node *dn, int where) | ||
52 | { | ||
53 | if (where < 256) | ||
54 | return 1; | ||
55 | if (where < 4096 && dn->pci_ext_config_space) | ||
56 | return 1; | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val) | ||
62 | { | ||
63 | int returnval = -1; | ||
64 | unsigned long buid, addr; | ||
65 | int ret; | ||
66 | |||
67 | if (!dn) | ||
68 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
69 | if (!config_access_valid(dn, where)) | ||
70 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
71 | |||
72 | addr = ((where & 0xf00) << 20) | (dn->busno << 16) | | ||
73 | (dn->devfn << 8) | (where & 0xff); | ||
74 | buid = dn->phb->buid; | ||
75 | if (buid) { | ||
76 | ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval, | ||
77 | addr, buid >> 32, buid & 0xffffffff, size); | ||
78 | } else { | ||
79 | ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size); | ||
80 | } | ||
81 | *val = returnval; | ||
82 | |||
83 | if (ret) | ||
84 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
85 | |||
86 | if (returnval == EEH_IO_ERROR_VALUE(size) | ||
87 | && eeh_dn_check_failure (dn, NULL)) | ||
88 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
89 | |||
90 | return PCIBIOS_SUCCESSFUL; | ||
91 | } | ||
92 | |||
93 | static int rtas_pci_read_config(struct pci_bus *bus, | ||
94 | unsigned int devfn, | ||
95 | int where, int size, u32 *val) | ||
96 | { | ||
97 | struct device_node *busdn, *dn; | ||
98 | |||
99 | if (bus->self) | ||
100 | busdn = pci_device_to_OF_node(bus->self); | ||
101 | else | ||
102 | busdn = bus->sysdata; /* must be a phb */ | ||
103 | |||
104 | /* Search only direct children of the bus */ | ||
105 | for (dn = busdn->child; dn; dn = dn->sibling) | ||
106 | if (dn->devfn == devfn) | ||
107 | return rtas_read_config(dn, where, size, val); | ||
108 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
109 | } | ||
110 | |||
111 | static int rtas_write_config(struct device_node *dn, int where, int size, u32 val) | ||
112 | { | ||
113 | unsigned long buid, addr; | ||
114 | int ret; | ||
115 | |||
116 | if (!dn) | ||
117 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
118 | if (!config_access_valid(dn, where)) | ||
119 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
120 | |||
121 | addr = ((where & 0xf00) << 20) | (dn->busno << 16) | | ||
122 | (dn->devfn << 8) | (where & 0xff); | ||
123 | buid = dn->phb->buid; | ||
124 | if (buid) { | ||
125 | ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val); | ||
126 | } else { | ||
127 | ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val); | ||
128 | } | ||
129 | |||
130 | if (ret) | ||
131 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
132 | |||
133 | return PCIBIOS_SUCCESSFUL; | ||
134 | } | ||
135 | |||
136 | static int rtas_pci_write_config(struct pci_bus *bus, | ||
137 | unsigned int devfn, | ||
138 | int where, int size, u32 val) | ||
139 | { | ||
140 | struct device_node *busdn, *dn; | ||
141 | |||
142 | if (bus->self) | ||
143 | busdn = pci_device_to_OF_node(bus->self); | ||
144 | else | ||
145 | busdn = bus->sysdata; /* must be a phb */ | ||
146 | |||
147 | /* Search only direct children of the bus */ | ||
148 | for (dn = busdn->child; dn; dn = dn->sibling) | ||
149 | if (dn->devfn == devfn) | ||
150 | return rtas_write_config(dn, where, size, val); | ||
151 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
152 | } | ||
153 | |||
154 | struct pci_ops rtas_pci_ops = { | ||
155 | rtas_pci_read_config, | ||
156 | rtas_pci_write_config | ||
157 | }; | ||
158 | |||
159 | int is_python(struct device_node *dev) | ||
160 | { | ||
161 | char *model = (char *)get_property(dev, "model", NULL); | ||
162 | |||
163 | if (model && strstr(model, "Python")) | ||
164 | return 1; | ||
165 | |||
166 | return 0; | ||
167 | } | ||
168 | |||
169 | static int get_phb_reg_prop(struct device_node *dev, | ||
170 | unsigned int addr_size_words, | ||
171 | struct reg_property64 *reg) | ||
172 | { | ||
173 | unsigned int *ui_ptr = NULL, len; | ||
174 | |||
175 | /* Found a PHB, now figure out where his registers are mapped. */ | ||
176 | ui_ptr = (unsigned int *)get_property(dev, "reg", &len); | ||
177 | if (ui_ptr == NULL) | ||
178 | return 1; | ||
179 | |||
180 | if (addr_size_words == 1) { | ||
181 | reg->address = ((struct reg_property32 *)ui_ptr)->address; | ||
182 | reg->size = ((struct reg_property32 *)ui_ptr)->size; | ||
183 | } else { | ||
184 | *reg = *((struct reg_property64 *)ui_ptr); | ||
185 | } | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static void python_countermeasures(struct device_node *dev, | ||
191 | unsigned int addr_size_words) | ||
192 | { | ||
193 | struct reg_property64 reg_struct; | ||
194 | void __iomem *chip_regs; | ||
195 | volatile u32 val; | ||
196 | |||
197 | if (get_phb_reg_prop(dev, addr_size_words, ®_struct)) | ||
198 | return; | ||
199 | |||
200 | /* Python's register file is 1 MB in size. */ | ||
201 | chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000); | ||
202 | |||
203 | /* | ||
204 | * Firmware doesn't always clear this bit which is critical | ||
205 | * for good performance - Anton | ||
206 | */ | ||
207 | |||
208 | #define PRG_CL_RESET_VALID 0x00010000 | ||
209 | |||
210 | val = in_be32(chip_regs + 0xf6030); | ||
211 | if (val & PRG_CL_RESET_VALID) { | ||
212 | printk(KERN_INFO "Python workaround: "); | ||
213 | val &= ~PRG_CL_RESET_VALID; | ||
214 | out_be32(chip_regs + 0xf6030, val); | ||
215 | /* | ||
216 | * We must read it back for changes to | ||
217 | * take effect | ||
218 | */ | ||
219 | val = in_be32(chip_regs + 0xf6030); | ||
220 | printk("reg0: %x\n", val); | ||
221 | } | ||
222 | |||
223 | iounmap(chip_regs); | ||
224 | } | ||
225 | |||
226 | void __init init_pci_config_tokens (void) | ||
227 | { | ||
228 | read_pci_config = rtas_token("read-pci-config"); | ||
229 | write_pci_config = rtas_token("write-pci-config"); | ||
230 | ibm_read_pci_config = rtas_token("ibm,read-pci-config"); | ||
231 | ibm_write_pci_config = rtas_token("ibm,write-pci-config"); | ||
232 | } | ||
233 | |||
234 | unsigned long __devinit get_phb_buid (struct device_node *phb) | ||
235 | { | ||
236 | int addr_cells; | ||
237 | unsigned int *buid_vals; | ||
238 | unsigned int len; | ||
239 | unsigned long buid; | ||
240 | |||
241 | if (ibm_read_pci_config == -1) return 0; | ||
242 | |||
243 | /* PHB's will always be children of the root node, | ||
244 | * or so it is promised by the current firmware. */ | ||
245 | if (phb->parent == NULL) | ||
246 | return 0; | ||
247 | if (phb->parent->parent) | ||
248 | return 0; | ||
249 | |||
250 | buid_vals = (unsigned int *) get_property(phb, "reg", &len); | ||
251 | if (buid_vals == NULL) | ||
252 | return 0; | ||
253 | |||
254 | addr_cells = prom_n_addr_cells(phb); | ||
255 | if (addr_cells == 1) { | ||
256 | buid = (unsigned long) buid_vals[0]; | ||
257 | } else { | ||
258 | buid = (((unsigned long)buid_vals[0]) << 32UL) | | ||
259 | (((unsigned long)buid_vals[1]) & 0xffffffff); | ||
260 | } | ||
261 | return buid; | ||
262 | } | ||
263 | |||
264 | static int phb_set_bus_ranges(struct device_node *dev, | ||
265 | struct pci_controller *phb) | ||
266 | { | ||
267 | int *bus_range; | ||
268 | unsigned int len; | ||
269 | |||
270 | bus_range = (int *) get_property(dev, "bus-range", &len); | ||
271 | if (bus_range == NULL || len < 2 * sizeof(int)) { | ||
272 | return 1; | ||
273 | } | ||
274 | |||
275 | phb->first_busno = bus_range[0]; | ||
276 | phb->last_busno = bus_range[1]; | ||
277 | |||
278 | return 0; | ||
279 | } | ||
280 | |||
281 | static int __devinit setup_phb(struct device_node *dev, | ||
282 | struct pci_controller *phb, | ||
283 | unsigned int addr_size_words) | ||
284 | { | ||
285 | pci_setup_pci_controller(phb); | ||
286 | |||
287 | if (is_python(dev)) | ||
288 | python_countermeasures(dev, addr_size_words); | ||
289 | |||
290 | if (phb_set_bus_ranges(dev, phb)) | ||
291 | return 1; | ||
292 | |||
293 | phb->arch_data = dev; | ||
294 | phb->ops = &rtas_pci_ops; | ||
295 | phb->buid = get_phb_buid(dev); | ||
296 | |||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static void __devinit add_linux_pci_domain(struct device_node *dev, | ||
301 | struct pci_controller *phb, | ||
302 | struct property *of_prop) | ||
303 | { | ||
304 | memset(of_prop, 0, sizeof(struct property)); | ||
305 | of_prop->name = "linux,pci-domain"; | ||
306 | of_prop->length = sizeof(phb->global_number); | ||
307 | of_prop->value = (unsigned char *)&of_prop[1]; | ||
308 | memcpy(of_prop->value, &phb->global_number, sizeof(phb->global_number)); | ||
309 | prom_add_property(dev, of_prop); | ||
310 | } | ||
311 | |||
312 | static struct pci_controller * __init alloc_phb(struct device_node *dev, | ||
313 | unsigned int addr_size_words) | ||
314 | { | ||
315 | struct pci_controller *phb; | ||
316 | struct property *of_prop; | ||
317 | |||
318 | phb = alloc_bootmem(sizeof(struct pci_controller)); | ||
319 | if (phb == NULL) | ||
320 | return NULL; | ||
321 | |||
322 | of_prop = alloc_bootmem(sizeof(struct property) + | ||
323 | sizeof(phb->global_number)); | ||
324 | if (!of_prop) | ||
325 | return NULL; | ||
326 | |||
327 | if (setup_phb(dev, phb, addr_size_words)) | ||
328 | return NULL; | ||
329 | |||
330 | add_linux_pci_domain(dev, phb, of_prop); | ||
331 | |||
332 | return phb; | ||
333 | } | ||
334 | |||
335 | static struct pci_controller * __devinit alloc_phb_dynamic(struct device_node *dev, unsigned int addr_size_words) | ||
336 | { | ||
337 | struct pci_controller *phb; | ||
338 | |||
339 | phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller), | ||
340 | GFP_KERNEL); | ||
341 | if (phb == NULL) | ||
342 | return NULL; | ||
343 | |||
344 | if (setup_phb(dev, phb, addr_size_words)) | ||
345 | return NULL; | ||
346 | |||
347 | phb->is_dynamic = 1; | ||
348 | |||
349 | /* TODO: linux,pci-domain? */ | ||
350 | |||
351 | return phb; | ||
352 | } | ||
353 | |||
354 | unsigned long __init find_and_init_phbs(void) | ||
355 | { | ||
356 | struct device_node *node; | ||
357 | struct pci_controller *phb; | ||
358 | unsigned int root_size_cells = 0; | ||
359 | unsigned int index; | ||
360 | unsigned int *opprop = NULL; | ||
361 | struct device_node *root = of_find_node_by_path("/"); | ||
362 | |||
363 | if (ppc64_interrupt_controller == IC_OPEN_PIC) { | ||
364 | opprop = (unsigned int *)get_property(root, | ||
365 | "platform-open-pic", NULL); | ||
366 | } | ||
367 | |||
368 | root_size_cells = prom_n_size_cells(root); | ||
369 | |||
370 | index = 0; | ||
371 | |||
372 | for (node = of_get_next_child(root, NULL); | ||
373 | node != NULL; | ||
374 | node = of_get_next_child(root, node)) { | ||
375 | if (node->type == NULL || strcmp(node->type, "pci") != 0) | ||
376 | continue; | ||
377 | |||
378 | phb = alloc_phb(node, root_size_cells); | ||
379 | if (!phb) | ||
380 | continue; | ||
381 | |||
382 | pci_process_bridge_OF_ranges(phb, node); | ||
383 | pci_setup_phb_io(phb, index == 0); | ||
384 | #ifdef CONFIG_PPC_PSERIES | ||
385 | if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { | ||
386 | int addr = root_size_cells * (index + 2) - 1; | ||
387 | mpic_assign_isu(pSeries_mpic, index, opprop[addr]); | ||
388 | } | ||
389 | #endif | ||
390 | index++; | ||
391 | } | ||
392 | |||
393 | of_node_put(root); | ||
394 | pci_devs_phb_init(); | ||
395 | |||
396 | /* | ||
397 | * pci_probe_only and pci_assign_all_buses can be set via properties | ||
398 | * in chosen. | ||
399 | */ | ||
400 | if (of_chosen) { | ||
401 | int *prop; | ||
402 | |||
403 | prop = (int *)get_property(of_chosen, "linux,pci-probe-only", | ||
404 | NULL); | ||
405 | if (prop) | ||
406 | pci_probe_only = *prop; | ||
407 | |||
408 | prop = (int *)get_property(of_chosen, | ||
409 | "linux,pci-assign-all-buses", NULL); | ||
410 | if (prop) | ||
411 | pci_assign_all_buses = *prop; | ||
412 | } | ||
413 | |||
414 | return 0; | ||
415 | } | ||
416 | |||
417 | struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) | ||
418 | { | ||
419 | struct device_node *root = of_find_node_by_path("/"); | ||
420 | unsigned int root_size_cells = 0; | ||
421 | struct pci_controller *phb; | ||
422 | struct pci_bus *bus; | ||
423 | int primary; | ||
424 | |||
425 | root_size_cells = prom_n_size_cells(root); | ||
426 | |||
427 | primary = list_empty(&hose_list); | ||
428 | phb = alloc_phb_dynamic(dn, root_size_cells); | ||
429 | if (!phb) | ||
430 | return NULL; | ||
431 | |||
432 | pci_process_bridge_OF_ranges(phb, dn); | ||
433 | |||
434 | pci_setup_phb_io_dynamic(phb, primary); | ||
435 | of_node_put(root); | ||
436 | |||
437 | pci_devs_phb_init_dynamic(phb); | ||
438 | phb->last_busno = 0xff; | ||
439 | bus = pci_scan_bus(phb->first_busno, phb->ops, phb->arch_data); | ||
440 | phb->bus = bus; | ||
441 | phb->last_busno = bus->subordinate; | ||
442 | |||
443 | return phb; | ||
444 | } | ||
445 | EXPORT_SYMBOL(init_phb_dynamic); | ||
446 | |||
447 | /* RPA-specific bits for removing PHBs */ | ||
448 | int pcibios_remove_root_bus(struct pci_controller *phb) | ||
449 | { | ||
450 | struct pci_bus *b = phb->bus; | ||
451 | struct resource *res; | ||
452 | int rc, i; | ||
453 | |||
454 | res = b->resource[0]; | ||
455 | if (!res->flags) { | ||
456 | printk(KERN_ERR "%s: no IO resource for PHB %s\n", __FUNCTION__, | ||
457 | b->name); | ||
458 | return 1; | ||
459 | } | ||
460 | |||
461 | rc = unmap_bus_range(b); | ||
462 | if (rc) { | ||
463 | printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", | ||
464 | __FUNCTION__, b->name); | ||
465 | return 1; | ||
466 | } | ||
467 | |||
468 | if (release_resource(res)) { | ||
469 | printk(KERN_ERR "%s: failed to release IO on bus %s\n", | ||
470 | __FUNCTION__, b->name); | ||
471 | return 1; | ||
472 | } | ||
473 | |||
474 | for (i = 1; i < 3; ++i) { | ||
475 | res = b->resource[i]; | ||
476 | if (!res->flags && i == 0) { | ||
477 | printk(KERN_ERR "%s: no MEM resource for PHB %s\n", | ||
478 | __FUNCTION__, b->name); | ||
479 | return 1; | ||
480 | } | ||
481 | if (res->flags && release_resource(res)) { | ||
482 | printk(KERN_ERR | ||
483 | "%s: failed to release IO %d on bus %s\n", | ||
484 | __FUNCTION__, i, b->name); | ||
485 | return 1; | ||
486 | } | ||
487 | } | ||
488 | |||
489 | list_del(&phb->list_node); | ||
490 | if (phb->is_dynamic) | ||
491 | kfree(phb); | ||
492 | |||
493 | return 0; | ||
494 | } | ||
495 | EXPORT_SYMBOL(pcibios_remove_root_bus); | ||
diff --git a/arch/ppc64/kernel/rtasd.c b/arch/ppc64/kernel/rtasd.c index ff65dc33320e..b0c3b829fe47 100644 --- a/arch/ppc64/kernel/rtasd.c +++ b/arch/ppc64/kernel/rtasd.c | |||
@@ -440,7 +440,7 @@ static int rtasd(void *unused) | |||
440 | goto error; | 440 | goto error; |
441 | } | 441 | } |
442 | 442 | ||
443 | printk(KERN_ERR "RTAS daemon started\n"); | 443 | printk(KERN_INFO "RTAS daemon started\n"); |
444 | 444 | ||
445 | DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2); | 445 | DEBUG("will sleep for %d jiffies\n", (HZ*60/rtas_event_scan_rate) / 2); |
446 | 446 | ||
@@ -485,7 +485,7 @@ static int __init rtas_init(void) | |||
485 | /* No RTAS, only warn if we are on a pSeries box */ | 485 | /* No RTAS, only warn if we are on a pSeries box */ |
486 | if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) { | 486 | if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) { |
487 | if (systemcfg->platform & PLATFORM_PSERIES) | 487 | if (systemcfg->platform & PLATFORM_PSERIES) |
488 | printk(KERN_ERR "rtasd: no event-scan on system\n"); | 488 | printk(KERN_INFO "rtasd: no event-scan on system\n"); |
489 | return 1; | 489 | return 1; |
490 | } | 490 | } |
491 | 491 | ||
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c index 67989055a9fe..d729fefa0df5 100644 --- a/arch/ppc64/kernel/rtc.c +++ b/arch/ppc64/kernel/rtc.c | |||
@@ -42,10 +42,8 @@ | |||
42 | #include <asm/time.h> | 42 | #include <asm/time.h> |
43 | #include <asm/rtas.h> | 43 | #include <asm/rtas.h> |
44 | 44 | ||
45 | #include <asm/iSeries/LparData.h> | ||
46 | #include <asm/iSeries/mf.h> | 45 | #include <asm/iSeries/mf.h> |
47 | #include <asm/machdep.h> | 46 | #include <asm/machdep.h> |
48 | #include <asm/iSeries/ItSpCommArea.h> | ||
49 | 47 | ||
50 | extern int piranha_simulator; | 48 | extern int piranha_simulator; |
51 | 49 | ||
@@ -303,7 +301,7 @@ void iSeries_get_boot_time(struct rtc_time *tm) | |||
303 | #ifdef CONFIG_PPC_RTAS | 301 | #ifdef CONFIG_PPC_RTAS |
304 | #define MAX_RTC_WAIT 5000 /* 5 sec */ | 302 | #define MAX_RTC_WAIT 5000 /* 5 sec */ |
305 | #define RTAS_CLOCK_BUSY (-2) | 303 | #define RTAS_CLOCK_BUSY (-2) |
306 | void pSeries_get_boot_time(struct rtc_time *rtc_tm) | 304 | void rtas_get_boot_time(struct rtc_time *rtc_tm) |
307 | { | 305 | { |
308 | int ret[8]; | 306 | int ret[8]; |
309 | int error, wait_time; | 307 | int error, wait_time; |
@@ -338,7 +336,7 @@ void pSeries_get_boot_time(struct rtc_time *rtc_tm) | |||
338 | * and if a delay is needed to read the clock. In this case we just | 336 | * and if a delay is needed to read the clock. In this case we just |
339 | * silently return without updating rtc_tm. | 337 | * silently return without updating rtc_tm. |
340 | */ | 338 | */ |
341 | void pSeries_get_rtc_time(struct rtc_time *rtc_tm) | 339 | void rtas_get_rtc_time(struct rtc_time *rtc_tm) |
342 | { | 340 | { |
343 | int ret[8]; | 341 | int ret[8]; |
344 | int error, wait_time; | 342 | int error, wait_time; |
@@ -373,7 +371,7 @@ void pSeries_get_rtc_time(struct rtc_time *rtc_tm) | |||
373 | rtc_tm->tm_year = ret[0] - 1900; | 371 | rtc_tm->tm_year = ret[0] - 1900; |
374 | } | 372 | } |
375 | 373 | ||
376 | int pSeries_set_rtc_time(struct rtc_time *tm) | 374 | int rtas_set_rtc_time(struct rtc_time *tm) |
377 | { | 375 | { |
378 | int error, wait_time; | 376 | int error, wait_time; |
379 | unsigned long max_wait_tb; | 377 | unsigned long max_wait_tb; |
diff --git a/arch/ppc64/kernel/setup.c b/arch/ppc64/kernel/setup.c index dce198d39328..d5e4866e9ac2 100644 --- a/arch/ppc64/kernel/setup.c +++ b/arch/ppc64/kernel/setup.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <asm/smp.h> | 41 | #include <asm/smp.h> |
42 | #include <asm/elf.h> | 42 | #include <asm/elf.h> |
43 | #include <asm/machdep.h> | 43 | #include <asm/machdep.h> |
44 | #include <asm/iSeries/LparData.h> | ||
45 | #include <asm/paca.h> | 44 | #include <asm/paca.h> |
46 | #include <asm/ppcdebug.h> | 45 | #include <asm/ppcdebug.h> |
47 | #include <asm/time.h> | 46 | #include <asm/time.h> |
@@ -57,6 +56,8 @@ | |||
57 | #include <asm/cache.h> | 56 | #include <asm/cache.h> |
58 | #include <asm/page.h> | 57 | #include <asm/page.h> |
59 | #include <asm/mmu.h> | 58 | #include <asm/mmu.h> |
59 | #include <asm/lmb.h> | ||
60 | #include <asm/iSeries/ItLpNaca.h> | ||
60 | 61 | ||
61 | #ifdef DEBUG | 62 | #ifdef DEBUG |
62 | #define DBG(fmt...) udbg_printf(fmt) | 63 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -343,6 +344,7 @@ static void __init setup_cpu_maps(void) | |||
343 | extern struct machdep_calls pSeries_md; | 344 | extern struct machdep_calls pSeries_md; |
344 | extern struct machdep_calls pmac_md; | 345 | extern struct machdep_calls pmac_md; |
345 | extern struct machdep_calls maple_md; | 346 | extern struct machdep_calls maple_md; |
347 | extern struct machdep_calls bpa_md; | ||
346 | 348 | ||
347 | /* Ultimately, stuff them in an elf section like initcalls... */ | 349 | /* Ultimately, stuff them in an elf section like initcalls... */ |
348 | static struct machdep_calls __initdata *machines[] = { | 350 | static struct machdep_calls __initdata *machines[] = { |
@@ -355,6 +357,9 @@ static struct machdep_calls __initdata *machines[] = { | |||
355 | #ifdef CONFIG_PPC_MAPLE | 357 | #ifdef CONFIG_PPC_MAPLE |
356 | &maple_md, | 358 | &maple_md, |
357 | #endif /* CONFIG_PPC_MAPLE */ | 359 | #endif /* CONFIG_PPC_MAPLE */ |
360 | #ifdef CONFIG_PPC_BPA | ||
361 | &bpa_md, | ||
362 | #endif | ||
358 | NULL | 363 | NULL |
359 | }; | 364 | }; |
360 | 365 | ||
@@ -672,37 +677,52 @@ void __init setup_system(void) | |||
672 | DBG(" <- setup_system()\n"); | 677 | DBG(" <- setup_system()\n"); |
673 | } | 678 | } |
674 | 679 | ||
675 | 680 | /* also used by kexec */ | |
676 | void machine_restart(char *cmd) | 681 | void machine_shutdown(void) |
677 | { | 682 | { |
678 | if (ppc_md.nvram_sync) | 683 | if (ppc_md.nvram_sync) |
679 | ppc_md.nvram_sync(); | 684 | ppc_md.nvram_sync(); |
680 | ppc_md.restart(cmd); | ||
681 | } | 685 | } |
682 | 686 | ||
687 | void machine_restart(char *cmd) | ||
688 | { | ||
689 | machine_shutdown(); | ||
690 | ppc_md.restart(cmd); | ||
691 | #ifdef CONFIG_SMP | ||
692 | smp_send_stop(); | ||
693 | #endif | ||
694 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); | ||
695 | local_irq_disable(); | ||
696 | while (1) ; | ||
697 | } | ||
683 | EXPORT_SYMBOL(machine_restart); | 698 | EXPORT_SYMBOL(machine_restart); |
684 | 699 | ||
685 | void machine_power_off(void) | 700 | void machine_power_off(void) |
686 | { | 701 | { |
687 | if (ppc_md.nvram_sync) | 702 | machine_shutdown(); |
688 | ppc_md.nvram_sync(); | ||
689 | ppc_md.power_off(); | 703 | ppc_md.power_off(); |
704 | #ifdef CONFIG_SMP | ||
705 | smp_send_stop(); | ||
706 | #endif | ||
707 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); | ||
708 | local_irq_disable(); | ||
709 | while (1) ; | ||
690 | } | 710 | } |
691 | |||
692 | EXPORT_SYMBOL(machine_power_off); | 711 | EXPORT_SYMBOL(machine_power_off); |
693 | 712 | ||
694 | void machine_halt(void) | 713 | void machine_halt(void) |
695 | { | 714 | { |
696 | if (ppc_md.nvram_sync) | 715 | machine_shutdown(); |
697 | ppc_md.nvram_sync(); | ||
698 | ppc_md.halt(); | 716 | ppc_md.halt(); |
717 | #ifdef CONFIG_SMP | ||
718 | smp_send_stop(); | ||
719 | #endif | ||
720 | printk(KERN_EMERG "System Halted, OK to turn off power\n"); | ||
721 | local_irq_disable(); | ||
722 | while (1) ; | ||
699 | } | 723 | } |
700 | |||
701 | EXPORT_SYMBOL(machine_halt); | 724 | EXPORT_SYMBOL(machine_halt); |
702 | 725 | ||
703 | unsigned long ppc_proc_freq; | ||
704 | unsigned long ppc_tb_freq; | ||
705 | |||
706 | static int ppc64_panic_event(struct notifier_block *this, | 726 | static int ppc64_panic_event(struct notifier_block *this, |
707 | unsigned long event, void *ptr) | 727 | unsigned long event, void *ptr) |
708 | { | 728 | { |
@@ -1054,6 +1074,7 @@ void __init setup_arch(char **cmdline_p) | |||
1054 | 1074 | ||
1055 | /* set up the bootmem stuff with available memory */ | 1075 | /* set up the bootmem stuff with available memory */ |
1056 | do_init_bootmem(); | 1076 | do_init_bootmem(); |
1077 | sparse_init(); | ||
1057 | 1078 | ||
1058 | /* initialize the syscall map in systemcfg */ | 1079 | /* initialize the syscall map in systemcfg */ |
1059 | setup_syscall_map(); | 1080 | setup_syscall_map(); |
@@ -1078,11 +1099,11 @@ void __init setup_arch(char **cmdline_p) | |||
1078 | static void ppc64_do_msg(unsigned int src, const char *msg) | 1099 | static void ppc64_do_msg(unsigned int src, const char *msg) |
1079 | { | 1100 | { |
1080 | if (ppc_md.progress) { | 1101 | if (ppc_md.progress) { |
1081 | char buf[32]; | 1102 | char buf[128]; |
1082 | 1103 | ||
1083 | sprintf(buf, "%08x \n", src); | 1104 | sprintf(buf, "%08X\n", src); |
1084 | ppc_md.progress(buf, 0); | 1105 | ppc_md.progress(buf, 0); |
1085 | sprintf(buf, "%-16s", msg); | 1106 | snprintf(buf, 128, "%s", msg); |
1086 | ppc_md.progress(buf, 0); | 1107 | ppc_md.progress(buf, 0); |
1087 | } | 1108 | } |
1088 | } | 1109 | } |
@@ -1116,7 +1137,7 @@ void ppc64_dump_msg(unsigned int src, const char *msg) | |||
1116 | } | 1137 | } |
1117 | 1138 | ||
1118 | /* This should only be called on processor 0 during calibrate decr */ | 1139 | /* This should only be called on processor 0 during calibrate decr */ |
1119 | void setup_default_decr(void) | 1140 | void __init setup_default_decr(void) |
1120 | { | 1141 | { |
1121 | struct paca_struct *lpaca = get_paca(); | 1142 | struct paca_struct *lpaca = get_paca(); |
1122 | 1143 | ||
diff --git a/arch/ppc64/kernel/smp.c b/arch/ppc64/kernel/smp.c index 9ef5d36d6b25..2fcddfcb594d 100644 --- a/arch/ppc64/kernel/smp.c +++ b/arch/ppc64/kernel/smp.c | |||
@@ -71,7 +71,7 @@ void smp_call_function_interrupt(void); | |||
71 | 71 | ||
72 | int smt_enabled_at_boot = 1; | 72 | int smt_enabled_at_boot = 1; |
73 | 73 | ||
74 | #ifdef CONFIG_PPC_MULTIPLATFORM | 74 | #ifdef CONFIG_MPIC |
75 | void smp_mpic_message_pass(int target, int msg) | 75 | void smp_mpic_message_pass(int target, int msg) |
76 | { | 76 | { |
77 | /* make sure we're sending something that translates to an IPI */ | 77 | /* make sure we're sending something that translates to an IPI */ |
@@ -128,7 +128,7 @@ void __devinit smp_generic_kick_cpu(int nr) | |||
128 | smp_mb(); | 128 | smp_mb(); |
129 | } | 129 | } |
130 | 130 | ||
131 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 131 | #endif /* CONFIG_MPIC */ |
132 | 132 | ||
133 | static void __init smp_space_timers(unsigned int max_cpus) | 133 | static void __init smp_space_timers(unsigned int max_cpus) |
134 | { | 134 | { |
diff --git a/arch/ppc64/kernel/spider-pic.c b/arch/ppc64/kernel/spider-pic.c new file mode 100644 index 000000000000..d5c9a02fb119 --- /dev/null +++ b/arch/ppc64/kernel/spider-pic.c | |||
@@ -0,0 +1,191 @@ | |||
1 | /* | ||
2 | * External Interrupt Controller on Spider South Bridge | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/interrupt.h> | ||
24 | #include <linux/irq.h> | ||
25 | |||
26 | #include <asm/pgtable.h> | ||
27 | #include <asm/prom.h> | ||
28 | #include <asm/io.h> | ||
29 | |||
30 | #include "bpa_iic.h" | ||
31 | |||
32 | /* register layout taken from Spider spec, table 7.4-4 */ | ||
33 | enum { | ||
34 | TIR_DEN = 0x004, /* Detection Enable Register */ | ||
35 | TIR_MSK = 0x084, /* Mask Level Register */ | ||
36 | TIR_EDC = 0x0c0, /* Edge Detection Clear Register */ | ||
37 | TIR_PNDA = 0x100, /* Pending Register A */ | ||
38 | TIR_PNDB = 0x104, /* Pending Register B */ | ||
39 | TIR_CS = 0x144, /* Current Status Register */ | ||
40 | TIR_LCSA = 0x150, /* Level Current Status Register A */ | ||
41 | TIR_LCSB = 0x154, /* Level Current Status Register B */ | ||
42 | TIR_LCSC = 0x158, /* Level Current Status Register C */ | ||
43 | TIR_LCSD = 0x15c, /* Level Current Status Register D */ | ||
44 | TIR_CFGA = 0x200, /* Setting Register A0 */ | ||
45 | TIR_CFGB = 0x204, /* Setting Register B0 */ | ||
46 | /* 0x208 ... 0x3ff Setting Register An/Bn */ | ||
47 | TIR_PPNDA = 0x400, /* Packet Pending Register A */ | ||
48 | TIR_PPNDB = 0x404, /* Packet Pending Register B */ | ||
49 | TIR_PIERA = 0x408, /* Packet Output Error Register A */ | ||
50 | TIR_PIERB = 0x40c, /* Packet Output Error Register B */ | ||
51 | TIR_PIEN = 0x444, /* Packet Output Enable Register */ | ||
52 | TIR_PIPND = 0x454, /* Packet Output Pending Register */ | ||
53 | TIRDID = 0x484, /* Spider Device ID Register */ | ||
54 | REISTIM = 0x500, /* Reissue Command Timeout Time Setting */ | ||
55 | REISTIMEN = 0x504, /* Reissue Command Timeout Setting */ | ||
56 | REISWAITEN = 0x508, /* Reissue Wait Control*/ | ||
57 | }; | ||
58 | |||
59 | static void __iomem *spider_pics[4]; | ||
60 | |||
61 | static void __iomem *spider_get_pic(int irq) | ||
62 | { | ||
63 | int node = irq / IIC_NODE_STRIDE; | ||
64 | irq %= IIC_NODE_STRIDE; | ||
65 | |||
66 | if (irq >= IIC_EXT_OFFSET && | ||
67 | irq < IIC_EXT_OFFSET + IIC_NUM_EXT && | ||
68 | spider_pics) | ||
69 | return spider_pics[node]; | ||
70 | return NULL; | ||
71 | } | ||
72 | |||
73 | static int spider_get_nr(unsigned int irq) | ||
74 | { | ||
75 | return (irq % IIC_NODE_STRIDE) - IIC_EXT_OFFSET; | ||
76 | } | ||
77 | |||
78 | static void __iomem *spider_get_irq_config(int irq) | ||
79 | { | ||
80 | void __iomem *pic; | ||
81 | pic = spider_get_pic(irq); | ||
82 | return pic + TIR_CFGA + 8 * spider_get_nr(irq); | ||
83 | } | ||
84 | |||
85 | static void spider_enable_irq(unsigned int irq) | ||
86 | { | ||
87 | void __iomem *cfg = spider_get_irq_config(irq); | ||
88 | irq = spider_get_nr(irq); | ||
89 | |||
90 | out_be32(cfg, in_be32(cfg) | 0x3107000eu); | ||
91 | out_be32(cfg + 4, in_be32(cfg + 4) | 0x00020000u | irq); | ||
92 | } | ||
93 | |||
94 | static void spider_disable_irq(unsigned int irq) | ||
95 | { | ||
96 | void __iomem *cfg = spider_get_irq_config(irq); | ||
97 | irq = spider_get_nr(irq); | ||
98 | |||
99 | out_be32(cfg, in_be32(cfg) & ~0x30000000u); | ||
100 | } | ||
101 | |||
102 | static unsigned int spider_startup_irq(unsigned int irq) | ||
103 | { | ||
104 | spider_enable_irq(irq); | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | static void spider_shutdown_irq(unsigned int irq) | ||
109 | { | ||
110 | spider_disable_irq(irq); | ||
111 | } | ||
112 | |||
113 | static void spider_end_irq(unsigned int irq) | ||
114 | { | ||
115 | spider_enable_irq(irq); | ||
116 | } | ||
117 | |||
118 | static void spider_ack_irq(unsigned int irq) | ||
119 | { | ||
120 | spider_disable_irq(irq); | ||
121 | iic_local_enable(); | ||
122 | } | ||
123 | |||
124 | static struct hw_interrupt_type spider_pic = { | ||
125 | .typename = " SPIDER ", | ||
126 | .startup = spider_startup_irq, | ||
127 | .shutdown = spider_shutdown_irq, | ||
128 | .enable = spider_enable_irq, | ||
129 | .disable = spider_disable_irq, | ||
130 | .ack = spider_ack_irq, | ||
131 | .end = spider_end_irq, | ||
132 | }; | ||
133 | |||
134 | |||
135 | int spider_get_irq(unsigned long int_pending) | ||
136 | { | ||
137 | void __iomem *regs = spider_get_pic(int_pending); | ||
138 | unsigned long cs; | ||
139 | int irq; | ||
140 | |||
141 | cs = in_be32(regs + TIR_CS); | ||
142 | |||
143 | irq = cs >> 24; | ||
144 | if (irq != 63) | ||
145 | return irq; | ||
146 | |||
147 | return -1; | ||
148 | } | ||
149 | |||
150 | void spider_init_IRQ(void) | ||
151 | { | ||
152 | int node; | ||
153 | struct device_node *dn; | ||
154 | unsigned int *property; | ||
155 | long spiderpic; | ||
156 | int n; | ||
157 | |||
158 | /* FIXME: detect multiple PICs as soon as the device tree has them */ | ||
159 | for (node = 0; node < 1; node++) { | ||
160 | dn = of_find_node_by_path("/"); | ||
161 | n = prom_n_addr_cells(dn); | ||
162 | property = (unsigned int *) get_property(dn, | ||
163 | "platform-spider-pic", NULL); | ||
164 | |||
165 | if (!property) | ||
166 | continue; | ||
167 | for (spiderpic = 0; n > 0; --n) | ||
168 | spiderpic = (spiderpic << 32) + *property++; | ||
169 | printk(KERN_DEBUG "SPIDER addr: %lx\n", spiderpic); | ||
170 | spider_pics[node] = __ioremap(spiderpic, 0x800, _PAGE_NO_CACHE); | ||
171 | for (n = 0; n < IIC_NUM_EXT; n++) { | ||
172 | int irq = n + IIC_EXT_OFFSET + node * IIC_NODE_STRIDE; | ||
173 | get_irq_desc(irq)->handler = &spider_pic; | ||
174 | |||
175 | /* do not mask any interrupts because of level */ | ||
176 | out_be32(spider_pics[node] + TIR_MSK, 0x0); | ||
177 | |||
178 | /* disable edge detection clear */ | ||
179 | /* out_be32(spider_pics[node] + TIR_EDC, 0x0); */ | ||
180 | |||
181 | /* enable interrupt packets to be output */ | ||
182 | out_be32(spider_pics[node] + TIR_PIEN, | ||
183 | in_be32(spider_pics[node] + TIR_PIEN) | 0x1); | ||
184 | |||
185 | /* Enable the interrupt detection enable bit. Do this last! */ | ||
186 | out_be32(spider_pics[node] + TIR_DEN, | ||
187 | in_be32(spider_pics[node] +TIR_DEN) | 0x1); | ||
188 | |||
189 | } | ||
190 | } | ||
191 | } | ||
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c index 9c8e317c598d..118436e8085a 100644 --- a/arch/ppc64/kernel/sys_ppc32.c +++ b/arch/ppc64/kernel/sys_ppc32.c | |||
@@ -741,6 +741,7 @@ asmlinkage int sys32_pciconfig_write(u32 bus, u32 dfn, u32 off, u32 len, u32 ubu | |||
741 | 741 | ||
742 | asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn) | 742 | asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn) |
743 | { | 743 | { |
744 | #ifdef CONFIG_PCI | ||
744 | struct pci_controller* hose; | 745 | struct pci_controller* hose; |
745 | struct list_head *ln; | 746 | struct list_head *ln; |
746 | struct pci_bus *bus = NULL; | 747 | struct pci_bus *bus = NULL; |
@@ -786,7 +787,7 @@ asmlinkage int sys32_pciconfig_iobase(u32 which, u32 in_bus, u32 in_devfn) | |||
786 | case IOBASE_ISA_MEM: | 787 | case IOBASE_ISA_MEM: |
787 | return -EINVAL; | 788 | return -EINVAL; |
788 | } | 789 | } |
789 | 790 | #endif /* CONFIG_PCI */ | |
790 | return -EOPNOTSUPP; | 791 | return -EOPNOTSUPP; |
791 | } | 792 | } |
792 | 793 | ||
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/ppc64/kernel/sysfs.c index c8fa6569b2fd..2f704a2cafb1 100644 --- a/arch/ppc64/kernel/sysfs.c +++ b/arch/ppc64/kernel/sysfs.c | |||
@@ -400,7 +400,12 @@ static int __init topology_init(void) | |||
400 | struct cpu *c = &per_cpu(cpu_devices, cpu); | 400 | struct cpu *c = &per_cpu(cpu_devices, cpu); |
401 | 401 | ||
402 | #ifdef CONFIG_NUMA | 402 | #ifdef CONFIG_NUMA |
403 | parent = &node_devices[cpu_to_node(cpu)]; | 403 | /* The node to which a cpu belongs can't be known |
404 | * until the cpu is made present. | ||
405 | */ | ||
406 | parent = NULL; | ||
407 | if (cpu_present(cpu)) | ||
408 | parent = &node_devices[cpu_to_node(cpu)]; | ||
404 | #endif | 409 | #endif |
405 | /* | 410 | /* |
406 | * For now, we just see if the system supports making | 411 | * For now, we just see if the system supports making |
diff --git a/arch/ppc64/kernel/time.c b/arch/ppc64/kernel/time.c index 33364a7d2cd2..909462e1adea 100644 --- a/arch/ppc64/kernel/time.c +++ b/arch/ppc64/kernel/time.c | |||
@@ -91,6 +91,7 @@ unsigned long tb_to_xs; | |||
91 | unsigned tb_to_us; | 91 | unsigned tb_to_us; |
92 | unsigned long processor_freq; | 92 | unsigned long processor_freq; |
93 | DEFINE_SPINLOCK(rtc_lock); | 93 | DEFINE_SPINLOCK(rtc_lock); |
94 | EXPORT_SYMBOL_GPL(rtc_lock); | ||
94 | 95 | ||
95 | unsigned long tb_to_ns_scale; | 96 | unsigned long tb_to_ns_scale; |
96 | unsigned long tb_to_ns_shift; | 97 | unsigned long tb_to_ns_shift; |
@@ -98,7 +99,6 @@ unsigned long tb_to_ns_shift; | |||
98 | struct gettimeofday_struct do_gtod; | 99 | struct gettimeofday_struct do_gtod; |
99 | 100 | ||
100 | extern unsigned long wall_jiffies; | 101 | extern unsigned long wall_jiffies; |
101 | extern unsigned long lpevent_count; | ||
102 | extern int smp_tb_synchronized; | 102 | extern int smp_tb_synchronized; |
103 | 103 | ||
104 | extern struct timezone sys_tz; | 104 | extern struct timezone sys_tz; |
@@ -107,6 +107,9 @@ void ppc_adjtimex(void); | |||
107 | 107 | ||
108 | static unsigned adjusting_time = 0; | 108 | static unsigned adjusting_time = 0; |
109 | 109 | ||
110 | unsigned long ppc_proc_freq; | ||
111 | unsigned long ppc_tb_freq; | ||
112 | |||
110 | static __inline__ void timer_check_rtc(void) | 113 | static __inline__ void timer_check_rtc(void) |
111 | { | 114 | { |
112 | /* | 115 | /* |
@@ -363,11 +366,8 @@ int timer_interrupt(struct pt_regs * regs) | |||
363 | set_dec(next_dec); | 366 | set_dec(next_dec); |
364 | 367 | ||
365 | #ifdef CONFIG_PPC_ISERIES | 368 | #ifdef CONFIG_PPC_ISERIES |
366 | { | 369 | if (hvlpevent_is_pending()) |
367 | struct ItLpQueue *lpq = lpaca->lpqueue_ptr; | 370 | process_hvlpevents(regs); |
368 | if (lpq && ItLpQueue_isLpIntPending(lpq)) | ||
369 | lpevent_count += ItLpQueue_process(lpq, regs); | ||
370 | } | ||
371 | #endif | 371 | #endif |
372 | 372 | ||
373 | /* collect purr register values often, for accurate calculations */ | 373 | /* collect purr register values often, for accurate calculations */ |
@@ -472,6 +472,66 @@ int do_settimeofday(struct timespec *tv) | |||
472 | 472 | ||
473 | EXPORT_SYMBOL(do_settimeofday); | 473 | EXPORT_SYMBOL(do_settimeofday); |
474 | 474 | ||
475 | #if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_MAPLE) || defined(CONFIG_PPC_BPA) | ||
476 | void __init generic_calibrate_decr(void) | ||
477 | { | ||
478 | struct device_node *cpu; | ||
479 | struct div_result divres; | ||
480 | unsigned int *fp; | ||
481 | int node_found; | ||
482 | |||
483 | /* | ||
484 | * The cpu node should have a timebase-frequency property | ||
485 | * to tell us the rate at which the decrementer counts. | ||
486 | */ | ||
487 | cpu = of_find_node_by_type(NULL, "cpu"); | ||
488 | |||
489 | ppc_tb_freq = DEFAULT_TB_FREQ; /* hardcoded default */ | ||
490 | node_found = 0; | ||
491 | if (cpu != 0) { | ||
492 | fp = (unsigned int *)get_property(cpu, "timebase-frequency", | ||
493 | NULL); | ||
494 | if (fp != 0) { | ||
495 | node_found = 1; | ||
496 | ppc_tb_freq = *fp; | ||
497 | } | ||
498 | } | ||
499 | if (!node_found) | ||
500 | printk(KERN_ERR "WARNING: Estimating decrementer frequency " | ||
501 | "(not found)\n"); | ||
502 | |||
503 | ppc_proc_freq = DEFAULT_PROC_FREQ; | ||
504 | node_found = 0; | ||
505 | if (cpu != 0) { | ||
506 | fp = (unsigned int *)get_property(cpu, "clock-frequency", | ||
507 | NULL); | ||
508 | if (fp != 0) { | ||
509 | node_found = 1; | ||
510 | ppc_proc_freq = *fp; | ||
511 | } | ||
512 | } | ||
513 | if (!node_found) | ||
514 | printk(KERN_ERR "WARNING: Estimating processor frequency " | ||
515 | "(not found)\n"); | ||
516 | |||
517 | of_node_put(cpu); | ||
518 | |||
519 | printk(KERN_INFO "time_init: decrementer frequency = %lu.%.6lu MHz\n", | ||
520 | ppc_tb_freq/1000000, ppc_tb_freq%1000000); | ||
521 | printk(KERN_INFO "time_init: processor frequency = %lu.%.6lu MHz\n", | ||
522 | ppc_proc_freq/1000000, ppc_proc_freq%1000000); | ||
523 | |||
524 | tb_ticks_per_jiffy = ppc_tb_freq / HZ; | ||
525 | tb_ticks_per_sec = tb_ticks_per_jiffy * HZ; | ||
526 | tb_ticks_per_usec = ppc_tb_freq / 1000000; | ||
527 | tb_to_us = mulhwu_scale_factor(ppc_tb_freq, 1000000); | ||
528 | div128_by_32(1024*1024, 0, tb_ticks_per_sec, &divres); | ||
529 | tb_to_xs = divres.result_low; | ||
530 | |||
531 | setup_default_decr(); | ||
532 | } | ||
533 | #endif | ||
534 | |||
475 | void __init time_init(void) | 535 | void __init time_init(void) |
476 | { | 536 | { |
477 | /* This function is only called on the boot processor */ | 537 | /* This function is only called on the boot processor */ |
diff --git a/arch/ppc64/kernel/traps.c b/arch/ppc64/kernel/traps.c index 7e52cb2605e0..a8d5e83ee89f 100644 --- a/arch/ppc64/kernel/traps.c +++ b/arch/ppc64/kernel/traps.c | |||
@@ -126,6 +126,10 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
126 | printk("POWERMAC "); | 126 | printk("POWERMAC "); |
127 | nl = 1; | 127 | nl = 1; |
128 | break; | 128 | break; |
129 | case PLATFORM_BPA: | ||
130 | printk("BPA "); | ||
131 | nl = 1; | ||
132 | break; | ||
129 | } | 133 | } |
130 | if (nl) | 134 | if (nl) |
131 | printk("\n"); | 135 | printk("\n"); |
diff --git a/arch/ppc64/kernel/vio.c b/arch/ppc64/kernel/vio.c index cdd830cb2768..0c0ba71ac0e8 100644 --- a/arch/ppc64/kernel/vio.c +++ b/arch/ppc64/kernel/vio.c | |||
@@ -41,20 +41,25 @@ static const struct vio_device_id *vio_match_device( | |||
41 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *); | 41 | static struct iommu_table *vio_build_iommu_table(struct vio_dev *); |
42 | static int vio_num_address_cells; | 42 | static int vio_num_address_cells; |
43 | #endif | 43 | #endif |
44 | static struct vio_dev *vio_bus_device; /* fake "parent" device */ | ||
45 | |||
46 | #ifdef CONFIG_PPC_ISERIES | 44 | #ifdef CONFIG_PPC_ISERIES |
47 | static struct vio_dev *__init vio_register_device_iseries(char *type, | ||
48 | uint32_t unit_num); | ||
49 | |||
50 | static struct iommu_table veth_iommu_table; | 45 | static struct iommu_table veth_iommu_table; |
51 | static struct iommu_table vio_iommu_table; | 46 | static struct iommu_table vio_iommu_table; |
52 | 47 | #endif | |
53 | static struct vio_dev _vio_dev = { | 48 | static struct vio_dev vio_bus_device = { /* fake "parent" device */ |
49 | .name = vio_bus_device.dev.bus_id, | ||
50 | .type = "", | ||
51 | #ifdef CONFIG_PPC_ISERIES | ||
54 | .iommu_table = &vio_iommu_table, | 52 | .iommu_table = &vio_iommu_table, |
55 | .dev.bus = &vio_bus_type | 53 | #endif |
54 | .dev.bus_id = "vio", | ||
55 | .dev.bus = &vio_bus_type, | ||
56 | }; | 56 | }; |
57 | struct device *iSeries_vio_dev = &_vio_dev.dev; | 57 | |
58 | #ifdef CONFIG_PPC_ISERIES | ||
59 | static struct vio_dev *__init vio_register_device_iseries(char *type, | ||
60 | uint32_t unit_num); | ||
61 | |||
62 | struct device *iSeries_vio_dev = &vio_bus_device.dev; | ||
58 | EXPORT_SYMBOL(iSeries_vio_dev); | 63 | EXPORT_SYMBOL(iSeries_vio_dev); |
59 | 64 | ||
60 | #define device_is_compatible(a, b) 1 | 65 | #define device_is_compatible(a, b) 1 |
@@ -260,18 +265,10 @@ static int __init vio_bus_init(void) | |||
260 | } | 265 | } |
261 | 266 | ||
262 | /* the fake parent of all vio devices, just to give us a nice directory */ | 267 | /* the fake parent of all vio devices, just to give us a nice directory */ |
263 | vio_bus_device = kmalloc(sizeof(struct vio_dev), GFP_KERNEL); | 268 | err = device_register(&vio_bus_device.dev); |
264 | if (!vio_bus_device) { | ||
265 | return 1; | ||
266 | } | ||
267 | memset(vio_bus_device, 0, sizeof(struct vio_dev)); | ||
268 | strcpy(vio_bus_device->dev.bus_id, "vio"); | ||
269 | |||
270 | err = device_register(&vio_bus_device->dev); | ||
271 | if (err) { | 269 | if (err) { |
272 | printk(KERN_WARNING "%s: device_register returned %i\n", __FUNCTION__, | 270 | printk(KERN_WARNING "%s: device_register returned %i\n", __FUNCTION__, |
273 | err); | 271 | err); |
274 | kfree(vio_bus_device); | ||
275 | return err; | 272 | return err; |
276 | } | 273 | } |
277 | 274 | ||
@@ -300,7 +297,7 @@ static void __devinit vio_dev_release(struct device *dev) | |||
300 | } | 297 | } |
301 | 298 | ||
302 | #ifdef CONFIG_PPC_PSERIES | 299 | #ifdef CONFIG_PPC_PSERIES |
303 | static ssize_t viodev_show_devspec(struct device *dev, char *buf) | 300 | static ssize_t viodev_show_devspec(struct device *dev, struct device_attribute *attr, char *buf) |
304 | { | 301 | { |
305 | struct device_node *of_node = dev->platform_data; | 302 | struct device_node *of_node = dev->platform_data; |
306 | 303 | ||
@@ -309,7 +306,7 @@ static ssize_t viodev_show_devspec(struct device *dev, char *buf) | |||
309 | DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL); | 306 | DEVICE_ATTR(devspec, S_IRUSR | S_IRGRP | S_IROTH, viodev_show_devspec, NULL); |
310 | #endif | 307 | #endif |
311 | 308 | ||
312 | static ssize_t viodev_show_name(struct device *dev, char *buf) | 309 | static ssize_t viodev_show_name(struct device *dev, struct device_attribute *attr, char *buf) |
313 | { | 310 | { |
314 | return sprintf(buf, "%s\n", to_vio_dev(dev)->name); | 311 | return sprintf(buf, "%s\n", to_vio_dev(dev)->name); |
315 | } | 312 | } |
@@ -326,7 +323,7 @@ static struct vio_dev * __devinit vio_register_device_common( | |||
326 | viodev->unit_address = unit_address; | 323 | viodev->unit_address = unit_address; |
327 | viodev->iommu_table = iommu_table; | 324 | viodev->iommu_table = iommu_table; |
328 | /* init generic 'struct device' fields: */ | 325 | /* init generic 'struct device' fields: */ |
329 | viodev->dev.parent = &vio_bus_device->dev; | 326 | viodev->dev.parent = &vio_bus_device.dev; |
330 | viodev->dev.bus = &vio_bus_type; | 327 | viodev->dev.bus = &vio_bus_type; |
331 | viodev->dev.release = vio_dev_release; | 328 | viodev->dev.release = vio_dev_release; |
332 | 329 | ||
@@ -636,5 +633,3 @@ struct bus_type vio_bus_type = { | |||
636 | .name = "vio", | 633 | .name = "vio", |
637 | .match = vio_bus_match, | 634 | .match = vio_bus_match, |
638 | }; | 635 | }; |
639 | |||
640 | EXPORT_SYMBOL(vio_bus_type); | ||
diff --git a/arch/ppc64/kernel/viopath.c b/arch/ppc64/kernel/viopath.c index 2ed8ee075680..2a6c4f01c45e 100644 --- a/arch/ppc64/kernel/viopath.c +++ b/arch/ppc64/kernel/viopath.c | |||
@@ -43,12 +43,10 @@ | |||
43 | #include <asm/system.h> | 43 | #include <asm/system.h> |
44 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
45 | #include <asm/iSeries/HvTypes.h> | 45 | #include <asm/iSeries/HvTypes.h> |
46 | #include <asm/iSeries/LparData.h> | 46 | #include <asm/iSeries/ItExtVpdPanel.h> |
47 | #include <asm/iSeries/HvLpEvent.h> | 47 | #include <asm/iSeries/HvLpEvent.h> |
48 | #include <asm/iSeries/HvLpConfig.h> | 48 | #include <asm/iSeries/HvLpConfig.h> |
49 | #include <asm/iSeries/HvCallCfg.h> | ||
50 | #include <asm/iSeries/mf.h> | 49 | #include <asm/iSeries/mf.h> |
51 | #include <asm/iSeries/iSeries_proc.h> | ||
52 | #include <asm/iSeries/vio.h> | 50 | #include <asm/iSeries/vio.h> |
53 | 51 | ||
54 | /* Status of the path to each other partition in the system. | 52 | /* Status of the path to each other partition in the system. |
@@ -365,7 +363,7 @@ void vio_set_hostlp(void) | |||
365 | * while we're active | 363 | * while we're active |
366 | */ | 364 | */ |
367 | viopath_ourLp = HvLpConfig_getLpIndex(); | 365 | viopath_ourLp = HvLpConfig_getLpIndex(); |
368 | viopath_hostLp = HvCallCfg_getHostingLpIndex(viopath_ourLp); | 366 | viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp); |
369 | 367 | ||
370 | if (viopath_hostLp != HvLpIndexInvalid) | 368 | if (viopath_hostLp != HvLpIndexInvalid) |
371 | vio_setHandler(viomajorsubtype_config, handleConfig); | 369 | vio_setHandler(viomajorsubtype_config, handleConfig); |
@@ -487,7 +485,7 @@ int viopath_open(HvLpIndex remoteLp, int subtype, int numReq) | |||
487 | unsigned long flags; | 485 | unsigned long flags; |
488 | int tempNumAllocated; | 486 | int tempNumAllocated; |
489 | 487 | ||
490 | if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid)) | 488 | if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid)) |
491 | return -EINVAL; | 489 | return -EINVAL; |
492 | 490 | ||
493 | subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; | 491 | subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; |
@@ -558,7 +556,7 @@ int viopath_close(HvLpIndex remoteLp, int subtype, int numReq) | |||
558 | int numOpen; | 556 | int numOpen; |
559 | struct alloc_parms parms; | 557 | struct alloc_parms parms; |
560 | 558 | ||
561 | if ((remoteLp >= HvMaxArchitectedLps) || (remoteLp == HvLpIndexInvalid)) | 559 | if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid)) |
562 | return -EINVAL; | 560 | return -EINVAL; |
563 | 561 | ||
564 | subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; | 562 | subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT; |
diff --git a/arch/ppc64/kernel/xics.c b/arch/ppc64/kernel/xics.c index 879f39b90a33..677c4450984a 100644 --- a/arch/ppc64/kernel/xics.c +++ b/arch/ppc64/kernel/xics.c | |||
@@ -647,6 +647,31 @@ static void xics_set_affinity(unsigned int virq, cpumask_t cpumask) | |||
647 | } | 647 | } |
648 | } | 648 | } |
649 | 649 | ||
650 | void xics_teardown_cpu(void) | ||
651 | { | ||
652 | int cpu = smp_processor_id(); | ||
653 | int status; | ||
654 | |||
655 | ops->cppr_info(cpu, 0x00); | ||
656 | iosync(); | ||
657 | |||
658 | /* | ||
659 | * we need to EOI the IPI if we got here from kexec down IPI | ||
660 | * | ||
661 | * xics doesn't care if we duplicate an EOI as long as we | ||
662 | * don't EOI and raise priority. | ||
663 | * | ||
664 | * probably need to check all the other interrupts too | ||
665 | * should we be flagging idle loop instead? | ||
666 | * or creating some task to be scheduled? | ||
667 | */ | ||
668 | ops->xirr_info_set(cpu, XICS_IPI); | ||
669 | |||
670 | status = rtas_set_indicator(GLOBAL_INTERRUPT_QUEUE, | ||
671 | (1UL << interrupt_server_size) - 1 - default_distrib_server, 0); | ||
672 | WARN_ON(status != 0); | ||
673 | } | ||
674 | |||
650 | #ifdef CONFIG_HOTPLUG_CPU | 675 | #ifdef CONFIG_HOTPLUG_CPU |
651 | 676 | ||
652 | /* Interrupts are disabled. */ | 677 | /* Interrupts are disabled. */ |
diff --git a/arch/ppc64/lib/Makefile b/arch/ppc64/lib/Makefile index bf7b5bbfc04e..76fbfa9f706f 100644 --- a/arch/ppc64/lib/Makefile +++ b/arch/ppc64/lib/Makefile | |||
@@ -12,7 +12,7 @@ lib-$(CONFIG_SMP) += locks.o | |||
12 | 12 | ||
13 | # e2a provides EBCDIC to ASCII conversions. | 13 | # e2a provides EBCDIC to ASCII conversions. |
14 | ifdef CONFIG_PPC_ISERIES | 14 | ifdef CONFIG_PPC_ISERIES |
15 | obj-$(CONFIG_PCI) += e2a.o | 15 | obj-y += e2a.o |
16 | endif | 16 | endif |
17 | 17 | ||
18 | lib-$(CONFIG_DEBUG_KERNEL) += sstep.o | 18 | lib-$(CONFIG_DEBUG_KERNEL) += sstep.o |
diff --git a/arch/ppc64/mm/Makefile b/arch/ppc64/mm/Makefile index ac522d57b2a7..3695d00d347f 100644 --- a/arch/ppc64/mm/Makefile +++ b/arch/ppc64/mm/Makefile | |||
@@ -6,6 +6,6 @@ EXTRA_CFLAGS += -mno-minimal-toc | |||
6 | 6 | ||
7 | obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \ | 7 | obj-y := fault.o init.o imalloc.o hash_utils.o hash_low.o tlb.o \ |
8 | slb_low.o slb.o stab.o mmap.o | 8 | slb_low.o slb.o stab.o mmap.o |
9 | obj-$(CONFIG_DISCONTIGMEM) += numa.o | 9 | obj-$(CONFIG_NEED_MULTIPLE_NODES) += numa.o |
10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o | 10 | obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o |
11 | obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o | 11 | obj-$(CONFIG_PPC_MULTIPLATFORM) += hash_native.o |
diff --git a/arch/ppc64/mm/hash_native.c b/arch/ppc64/mm/hash_native.c index 52b6b9305341..4fec05817d66 100644 --- a/arch/ppc64/mm/hash_native.c +++ b/arch/ppc64/mm/hash_native.c | |||
@@ -304,6 +304,50 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long va, | |||
304 | local_irq_restore(flags); | 304 | local_irq_restore(flags); |
305 | } | 305 | } |
306 | 306 | ||
307 | /* | ||
308 | * clear all mappings on kexec. All cpus are in real mode (or they will | ||
309 | * be when they isi), and we are the only one left. We rely on our kernel | ||
310 | * mapping being 0xC0's and the hardware ignoring those two real bits. | ||
311 | * | ||
312 | * TODO: add batching support when enabled. remember, no dynamic memory here, | ||
313 | * athough there is the control page available... | ||
314 | */ | ||
315 | static void native_hpte_clear(void) | ||
316 | { | ||
317 | unsigned long slot, slots, flags; | ||
318 | HPTE *hptep = htab_address; | ||
319 | Hpte_dword0 dw0; | ||
320 | unsigned long pteg_count; | ||
321 | |||
322 | pteg_count = htab_hash_mask + 1; | ||
323 | |||
324 | local_irq_save(flags); | ||
325 | |||
326 | /* we take the tlbie lock and hold it. Some hardware will | ||
327 | * deadlock if we try to tlbie from two processors at once. | ||
328 | */ | ||
329 | spin_lock(&native_tlbie_lock); | ||
330 | |||
331 | slots = pteg_count * HPTES_PER_GROUP; | ||
332 | |||
333 | for (slot = 0; slot < slots; slot++, hptep++) { | ||
334 | /* | ||
335 | * we could lock the pte here, but we are the only cpu | ||
336 | * running, right? and for crash dump, we probably | ||
337 | * don't want to wait for a maybe bad cpu. | ||
338 | */ | ||
339 | dw0 = hptep->dw0.dw0; | ||
340 | |||
341 | if (dw0.v) { | ||
342 | hptep->dw0.dword0 = 0; | ||
343 | tlbie(slot2va(dw0.avpn, dw0.l, dw0.h, slot), dw0.l); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | spin_unlock(&native_tlbie_lock); | ||
348 | local_irq_restore(flags); | ||
349 | } | ||
350 | |||
307 | static void native_flush_hash_range(unsigned long context, | 351 | static void native_flush_hash_range(unsigned long context, |
308 | unsigned long number, int local) | 352 | unsigned long number, int local) |
309 | { | 353 | { |
@@ -415,7 +459,8 @@ void hpte_init_native(void) | |||
415 | ppc_md.hpte_updatepp = native_hpte_updatepp; | 459 | ppc_md.hpte_updatepp = native_hpte_updatepp; |
416 | ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp; | 460 | ppc_md.hpte_updateboltedpp = native_hpte_updateboltedpp; |
417 | ppc_md.hpte_insert = native_hpte_insert; | 461 | ppc_md.hpte_insert = native_hpte_insert; |
418 | ppc_md.hpte_remove = native_hpte_remove; | 462 | ppc_md.hpte_remove = native_hpte_remove; |
463 | ppc_md.hpte_clear_all = native_hpte_clear; | ||
419 | if (tlb_batching_enabled()) | 464 | if (tlb_batching_enabled()) |
420 | ppc_md.flush_hash_range = native_flush_hash_range; | 465 | ppc_md.flush_hash_range = native_flush_hash_range; |
421 | htab_finish_init(); | 466 | htab_finish_init(); |
diff --git a/arch/ppc64/mm/hash_utils.c b/arch/ppc64/mm/hash_utils.c index 0a0f97008d02..1647b1c6f28e 100644 --- a/arch/ppc64/mm/hash_utils.c +++ b/arch/ppc64/mm/hash_utils.c | |||
@@ -195,7 +195,7 @@ void __init htab_initialize(void) | |||
195 | memset((void *)table, 0, htab_size_bytes); | 195 | memset((void *)table, 0, htab_size_bytes); |
196 | } | 196 | } |
197 | 197 | ||
198 | mode_rw = _PAGE_ACCESSED | _PAGE_COHERENT | PP_RWXX; | 198 | mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX; |
199 | 199 | ||
200 | /* On U3 based machines, we need to reserve the DART area and | 200 | /* On U3 based machines, we need to reserve the DART area and |
201 | * _NOT_ map it to avoid cache paradoxes as it's remapped non | 201 | * _NOT_ map it to avoid cache paradoxes as it's remapped non |
@@ -310,10 +310,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
310 | 310 | ||
311 | vsid = get_vsid(mm->context.id, ea); | 311 | vsid = get_vsid(mm->context.id, ea); |
312 | break; | 312 | break; |
313 | case IO_REGION_ID: | ||
314 | mm = &ioremap_mm; | ||
315 | vsid = get_kernel_vsid(ea); | ||
316 | break; | ||
317 | case VMALLOC_REGION_ID: | 313 | case VMALLOC_REGION_ID: |
318 | mm = &init_mm; | 314 | mm = &init_mm; |
319 | vsid = get_kernel_vsid(ea); | 315 | vsid = get_kernel_vsid(ea); |
diff --git a/arch/ppc64/mm/hugetlbpage.c b/arch/ppc64/mm/hugetlbpage.c index d3bf86a5c1ad..fdcfe97c75c1 100644 --- a/arch/ppc64/mm/hugetlbpage.c +++ b/arch/ppc64/mm/hugetlbpage.c | |||
@@ -121,7 +121,7 @@ static pte_t *hugepte_alloc(struct mm_struct *mm, pud_t *dir, unsigned long addr | |||
121 | return hugepte_offset(dir, addr); | 121 | return hugepte_offset(dir, addr); |
122 | } | 122 | } |
123 | 123 | ||
124 | static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | 124 | pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) |
125 | { | 125 | { |
126 | pud_t *pud; | 126 | pud_t *pud; |
127 | 127 | ||
@@ -134,7 +134,7 @@ static pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) | |||
134 | return hugepte_offset(pud, addr); | 134 | return hugepte_offset(pud, addr); |
135 | } | 135 | } |
136 | 136 | ||
137 | static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | 137 | pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) |
138 | { | 138 | { |
139 | pud_t *pud; | 139 | pud_t *pud; |
140 | 140 | ||
@@ -147,25 +147,6 @@ static pte_t *huge_pte_alloc(struct mm_struct *mm, unsigned long addr) | |||
147 | return hugepte_alloc(mm, pud, addr); | 147 | return hugepte_alloc(mm, pud, addr); |
148 | } | 148 | } |
149 | 149 | ||
150 | static void set_huge_pte(struct mm_struct *mm, struct vm_area_struct *vma, | ||
151 | unsigned long addr, struct page *page, | ||
152 | pte_t *ptep, int write_access) | ||
153 | { | ||
154 | pte_t entry; | ||
155 | |||
156 | add_mm_counter(mm, rss, HPAGE_SIZE / PAGE_SIZE); | ||
157 | if (write_access) { | ||
158 | entry = | ||
159 | pte_mkwrite(pte_mkdirty(mk_pte(page, vma->vm_page_prot))); | ||
160 | } else { | ||
161 | entry = pte_wrprotect(mk_pte(page, vma->vm_page_prot)); | ||
162 | } | ||
163 | entry = pte_mkyoung(entry); | ||
164 | entry = pte_mkhuge(entry); | ||
165 | |||
166 | set_pte_at(mm, addr, ptep, entry); | ||
167 | } | ||
168 | |||
169 | /* | 150 | /* |
170 | * This function checks for proper alignment of input addr and len parameters. | 151 | * This function checks for proper alignment of input addr and len parameters. |
171 | */ | 152 | */ |
@@ -259,80 +240,6 @@ int prepare_hugepage_range(unsigned long addr, unsigned long len) | |||
259 | return -EINVAL; | 240 | return -EINVAL; |
260 | } | 241 | } |
261 | 242 | ||
262 | int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, | ||
263 | struct vm_area_struct *vma) | ||
264 | { | ||
265 | pte_t *src_pte, *dst_pte, entry; | ||
266 | struct page *ptepage; | ||
267 | unsigned long addr = vma->vm_start; | ||
268 | unsigned long end = vma->vm_end; | ||
269 | int err = -ENOMEM; | ||
270 | |||
271 | while (addr < end) { | ||
272 | dst_pte = huge_pte_alloc(dst, addr); | ||
273 | if (!dst_pte) | ||
274 | goto out; | ||
275 | |||
276 | src_pte = huge_pte_offset(src, addr); | ||
277 | entry = *src_pte; | ||
278 | |||
279 | ptepage = pte_page(entry); | ||
280 | get_page(ptepage); | ||
281 | add_mm_counter(dst, rss, HPAGE_SIZE / PAGE_SIZE); | ||
282 | set_pte_at(dst, addr, dst_pte, entry); | ||
283 | |||
284 | addr += HPAGE_SIZE; | ||
285 | } | ||
286 | |||
287 | err = 0; | ||
288 | out: | ||
289 | return err; | ||
290 | } | ||
291 | |||
292 | int | ||
293 | follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, | ||
294 | struct page **pages, struct vm_area_struct **vmas, | ||
295 | unsigned long *position, int *length, int i) | ||
296 | { | ||
297 | unsigned long vpfn, vaddr = *position; | ||
298 | int remainder = *length; | ||
299 | |||
300 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
301 | |||
302 | vpfn = vaddr/PAGE_SIZE; | ||
303 | while (vaddr < vma->vm_end && remainder) { | ||
304 | if (pages) { | ||
305 | pte_t *pte; | ||
306 | struct page *page; | ||
307 | |||
308 | pte = huge_pte_offset(mm, vaddr); | ||
309 | |||
310 | /* hugetlb should be locked, and hence, prefaulted */ | ||
311 | WARN_ON(!pte || pte_none(*pte)); | ||
312 | |||
313 | page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; | ||
314 | |||
315 | WARN_ON(!PageCompound(page)); | ||
316 | |||
317 | get_page(page); | ||
318 | pages[i] = page; | ||
319 | } | ||
320 | |||
321 | if (vmas) | ||
322 | vmas[i] = vma; | ||
323 | |||
324 | vaddr += PAGE_SIZE; | ||
325 | ++vpfn; | ||
326 | --remainder; | ||
327 | ++i; | ||
328 | } | ||
329 | |||
330 | *length = remainder; | ||
331 | *position = vaddr; | ||
332 | |||
333 | return i; | ||
334 | } | ||
335 | |||
336 | struct page * | 243 | struct page * |
337 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) | 244 | follow_huge_addr(struct mm_struct *mm, unsigned long address, int write) |
338 | { | 245 | { |
@@ -363,89 +270,6 @@ follow_huge_pmd(struct mm_struct *mm, unsigned long address, | |||
363 | return NULL; | 270 | return NULL; |
364 | } | 271 | } |
365 | 272 | ||
366 | void unmap_hugepage_range(struct vm_area_struct *vma, | ||
367 | unsigned long start, unsigned long end) | ||
368 | { | ||
369 | struct mm_struct *mm = vma->vm_mm; | ||
370 | unsigned long addr; | ||
371 | pte_t *ptep; | ||
372 | struct page *page; | ||
373 | |||
374 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
375 | BUG_ON((start % HPAGE_SIZE) != 0); | ||
376 | BUG_ON((end % HPAGE_SIZE) != 0); | ||
377 | |||
378 | for (addr = start; addr < end; addr += HPAGE_SIZE) { | ||
379 | pte_t pte; | ||
380 | |||
381 | ptep = huge_pte_offset(mm, addr); | ||
382 | if (!ptep || pte_none(*ptep)) | ||
383 | continue; | ||
384 | |||
385 | pte = *ptep; | ||
386 | page = pte_page(pte); | ||
387 | pte_clear(mm, addr, ptep); | ||
388 | |||
389 | put_page(page); | ||
390 | } | ||
391 | add_mm_counter(mm, rss, -((end - start) >> PAGE_SHIFT)); | ||
392 | flush_tlb_pending(); | ||
393 | } | ||
394 | |||
395 | int hugetlb_prefault(struct address_space *mapping, struct vm_area_struct *vma) | ||
396 | { | ||
397 | struct mm_struct *mm = current->mm; | ||
398 | unsigned long addr; | ||
399 | int ret = 0; | ||
400 | |||
401 | WARN_ON(!is_vm_hugetlb_page(vma)); | ||
402 | BUG_ON((vma->vm_start % HPAGE_SIZE) != 0); | ||
403 | BUG_ON((vma->vm_end % HPAGE_SIZE) != 0); | ||
404 | |||
405 | spin_lock(&mm->page_table_lock); | ||
406 | for (addr = vma->vm_start; addr < vma->vm_end; addr += HPAGE_SIZE) { | ||
407 | unsigned long idx; | ||
408 | pte_t *pte = huge_pte_alloc(mm, addr); | ||
409 | struct page *page; | ||
410 | |||
411 | if (!pte) { | ||
412 | ret = -ENOMEM; | ||
413 | goto out; | ||
414 | } | ||
415 | if (! pte_none(*pte)) | ||
416 | continue; | ||
417 | |||
418 | idx = ((addr - vma->vm_start) >> HPAGE_SHIFT) | ||
419 | + (vma->vm_pgoff >> (HPAGE_SHIFT - PAGE_SHIFT)); | ||
420 | page = find_get_page(mapping, idx); | ||
421 | if (!page) { | ||
422 | /* charge the fs quota first */ | ||
423 | if (hugetlb_get_quota(mapping)) { | ||
424 | ret = -ENOMEM; | ||
425 | goto out; | ||
426 | } | ||
427 | page = alloc_huge_page(); | ||
428 | if (!page) { | ||
429 | hugetlb_put_quota(mapping); | ||
430 | ret = -ENOMEM; | ||
431 | goto out; | ||
432 | } | ||
433 | ret = add_to_page_cache(page, mapping, idx, GFP_ATOMIC); | ||
434 | if (! ret) { | ||
435 | unlock_page(page); | ||
436 | } else { | ||
437 | hugetlb_put_quota(mapping); | ||
438 | free_huge_page(page); | ||
439 | goto out; | ||
440 | } | ||
441 | } | ||
442 | set_huge_pte(mm, vma, addr, page, pte, vma->vm_flags & VM_WRITE); | ||
443 | } | ||
444 | out: | ||
445 | spin_unlock(&mm->page_table_lock); | ||
446 | return ret; | ||
447 | } | ||
448 | |||
449 | /* Because we have an exclusive hugepage region which lies within the | 273 | /* Because we have an exclusive hugepage region which lies within the |
450 | * normal user address space, we have to take special measures to make | 274 | * normal user address space, we have to take special measures to make |
451 | * non-huge mmap()s evade the hugepage reserved regions. */ | 275 | * non-huge mmap()s evade the hugepage reserved regions. */ |
@@ -468,7 +292,12 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, | |||
468 | && !is_hugepage_only_range(mm, addr,len)) | 292 | && !is_hugepage_only_range(mm, addr,len)) |
469 | return addr; | 293 | return addr; |
470 | } | 294 | } |
471 | start_addr = addr = mm->free_area_cache; | 295 | if (len > mm->cached_hole_size) { |
296 | start_addr = addr = mm->free_area_cache; | ||
297 | } else { | ||
298 | start_addr = addr = TASK_UNMAPPED_BASE; | ||
299 | mm->cached_hole_size = 0; | ||
300 | } | ||
472 | 301 | ||
473 | full_search: | 302 | full_search: |
474 | vma = find_vma(mm, addr); | 303 | vma = find_vma(mm, addr); |
@@ -492,6 +321,8 @@ full_search: | |||
492 | mm->free_area_cache = addr + len; | 321 | mm->free_area_cache = addr + len; |
493 | return addr; | 322 | return addr; |
494 | } | 323 | } |
324 | if (addr + mm->cached_hole_size < vma->vm_start) | ||
325 | mm->cached_hole_size = vma->vm_start - addr; | ||
495 | addr = vma->vm_end; | 326 | addr = vma->vm_end; |
496 | vma = vma->vm_next; | 327 | vma = vma->vm_next; |
497 | } | 328 | } |
@@ -499,6 +330,7 @@ full_search: | |||
499 | /* Make sure we didn't miss any holes */ | 330 | /* Make sure we didn't miss any holes */ |
500 | if (start_addr != TASK_UNMAPPED_BASE) { | 331 | if (start_addr != TASK_UNMAPPED_BASE) { |
501 | start_addr = addr = TASK_UNMAPPED_BASE; | 332 | start_addr = addr = TASK_UNMAPPED_BASE; |
333 | mm->cached_hole_size = 0; | ||
502 | goto full_search; | 334 | goto full_search; |
503 | } | 335 | } |
504 | return -ENOMEM; | 336 | return -ENOMEM; |
@@ -520,6 +352,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
520 | struct vm_area_struct *vma, *prev_vma; | 352 | struct vm_area_struct *vma, *prev_vma; |
521 | struct mm_struct *mm = current->mm; | 353 | struct mm_struct *mm = current->mm; |
522 | unsigned long base = mm->mmap_base, addr = addr0; | 354 | unsigned long base = mm->mmap_base, addr = addr0; |
355 | unsigned long largest_hole = mm->cached_hole_size; | ||
523 | int first_time = 1; | 356 | int first_time = 1; |
524 | 357 | ||
525 | /* requested length too big for entire address space */ | 358 | /* requested length too big for entire address space */ |
@@ -540,6 +373,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, | |||
540 | return addr; | 373 | return addr; |
541 | } | 374 | } |
542 | 375 | ||
376 | if (len <= largest_hole) { | ||
377 | largest_hole = 0; | ||
378 | mm->free_area_cache = base; | ||
379 | } | ||
543 | try_again: | 380 | try_again: |
544 | /* make sure it can fit in the remaining address space */ | 381 | /* make sure it can fit in the remaining address space */ |
545 | if (mm->free_area_cache < len) | 382 | if (mm->free_area_cache < len) |
@@ -568,13 +405,21 @@ hugepage_recheck: | |||
568 | * vma->vm_start, use it: | 405 | * vma->vm_start, use it: |
569 | */ | 406 | */ |
570 | if (addr+len <= vma->vm_start && | 407 | if (addr+len <= vma->vm_start && |
571 | (!prev_vma || (addr >= prev_vma->vm_end))) | 408 | (!prev_vma || (addr >= prev_vma->vm_end))) { |
572 | /* remember the address as a hint for next time */ | 409 | /* remember the address as a hint for next time */ |
573 | return (mm->free_area_cache = addr); | 410 | mm->cached_hole_size = largest_hole; |
574 | else | 411 | return (mm->free_area_cache = addr); |
412 | } else { | ||
575 | /* pull free_area_cache down to the first hole */ | 413 | /* pull free_area_cache down to the first hole */ |
576 | if (mm->free_area_cache == vma->vm_end) | 414 | if (mm->free_area_cache == vma->vm_end) { |
577 | mm->free_area_cache = vma->vm_start; | 415 | mm->free_area_cache = vma->vm_start; |
416 | mm->cached_hole_size = largest_hole; | ||
417 | } | ||
418 | } | ||
419 | |||
420 | /* remember the largest hole we saw so far */ | ||
421 | if (addr + largest_hole < vma->vm_start) | ||
422 | largest_hole = vma->vm_start - addr; | ||
578 | 423 | ||
579 | /* try just below the current vma->vm_start */ | 424 | /* try just below the current vma->vm_start */ |
580 | addr = vma->vm_start-len; | 425 | addr = vma->vm_start-len; |
@@ -587,6 +432,7 @@ fail: | |||
587 | */ | 432 | */ |
588 | if (first_time) { | 433 | if (first_time) { |
589 | mm->free_area_cache = base; | 434 | mm->free_area_cache = base; |
435 | largest_hole = 0; | ||
590 | first_time = 0; | 436 | first_time = 0; |
591 | goto try_again; | 437 | goto try_again; |
592 | } | 438 | } |
@@ -597,11 +443,13 @@ fail: | |||
597 | * allocations. | 443 | * allocations. |
598 | */ | 444 | */ |
599 | mm->free_area_cache = TASK_UNMAPPED_BASE; | 445 | mm->free_area_cache = TASK_UNMAPPED_BASE; |
446 | mm->cached_hole_size = ~0UL; | ||
600 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); | 447 | addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); |
601 | /* | 448 | /* |
602 | * Restore the topdown base: | 449 | * Restore the topdown base: |
603 | */ | 450 | */ |
604 | mm->free_area_cache = base; | 451 | mm->free_area_cache = base; |
452 | mm->cached_hole_size = ~0UL; | ||
605 | 453 | ||
606 | return addr; | 454 | return addr; |
607 | } | 455 | } |
diff --git a/arch/ppc64/mm/imalloc.c b/arch/ppc64/mm/imalloc.c index cb8727f3267a..b6e75b891ac0 100644 --- a/arch/ppc64/mm/imalloc.c +++ b/arch/ppc64/mm/imalloc.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/pgtable.h> | 15 | #include <asm/pgtable.h> |
16 | #include <asm/semaphore.h> | 16 | #include <asm/semaphore.h> |
17 | #include <asm/imalloc.h> | 17 | #include <asm/imalloc.h> |
18 | #include <asm/cacheflush.h> | ||
18 | 19 | ||
19 | static DECLARE_MUTEX(imlist_sem); | 20 | static DECLARE_MUTEX(imlist_sem); |
20 | struct vm_struct * imlist = NULL; | 21 | struct vm_struct * imlist = NULL; |
@@ -285,29 +286,32 @@ struct vm_struct * im_get_area(unsigned long v_addr, unsigned long size, | |||
285 | return area; | 286 | return area; |
286 | } | 287 | } |
287 | 288 | ||
288 | unsigned long im_free(void * addr) | 289 | void im_free(void * addr) |
289 | { | 290 | { |
290 | struct vm_struct **p, *tmp; | 291 | struct vm_struct **p, *tmp; |
291 | unsigned long ret_size = 0; | ||
292 | 292 | ||
293 | if (!addr) | 293 | if (!addr) |
294 | return ret_size; | 294 | return; |
295 | if ((PAGE_SIZE-1) & (unsigned long) addr) { | 295 | if ((unsigned long) addr & ~PAGE_MASK) { |
296 | printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); | 296 | printk(KERN_ERR "Trying to %s bad address (%p)\n", __FUNCTION__, addr); |
297 | return ret_size; | 297 | return; |
298 | } | 298 | } |
299 | down(&imlist_sem); | 299 | down(&imlist_sem); |
300 | for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { | 300 | for (p = &imlist ; (tmp = *p) ; p = &tmp->next) { |
301 | if (tmp->addr == addr) { | 301 | if (tmp->addr == addr) { |
302 | ret_size = tmp->size; | ||
303 | *p = tmp->next; | 302 | *p = tmp->next; |
303 | |||
304 | /* XXX: do we need the lock? */ | ||
305 | spin_lock(&init_mm.page_table_lock); | ||
306 | unmap_vm_area(tmp); | ||
307 | spin_unlock(&init_mm.page_table_lock); | ||
308 | |||
304 | kfree(tmp); | 309 | kfree(tmp); |
305 | up(&imlist_sem); | 310 | up(&imlist_sem); |
306 | return ret_size; | 311 | return; |
307 | } | 312 | } |
308 | } | 313 | } |
309 | up(&imlist_sem); | 314 | up(&imlist_sem); |
310 | printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, | 315 | printk(KERN_ERR "Trying to %s nonexistent area (%p)\n", __FUNCTION__, |
311 | addr); | 316 | addr); |
312 | return ret_size; | ||
313 | } | 317 | } |
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c index 4b42aff74d73..b50b3a446dbe 100644 --- a/arch/ppc64/mm/init.c +++ b/arch/ppc64/mm/init.c | |||
@@ -73,9 +73,6 @@ static unsigned long phbs_io_bot = PHBS_IO_BASE; | |||
73 | extern pgd_t swapper_pg_dir[]; | 73 | extern pgd_t swapper_pg_dir[]; |
74 | extern struct task_struct *current_set[NR_CPUS]; | 74 | extern struct task_struct *current_set[NR_CPUS]; |
75 | 75 | ||
76 | extern pgd_t ioremap_dir[]; | ||
77 | pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir; | ||
78 | |||
79 | unsigned long klimit = (unsigned long)_end; | 76 | unsigned long klimit = (unsigned long)_end; |
80 | 77 | ||
81 | unsigned long _SDR1=0; | 78 | unsigned long _SDR1=0; |
@@ -101,7 +98,7 @@ void show_mem(void) | |||
101 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); | 98 | printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
102 | for_each_pgdat(pgdat) { | 99 | for_each_pgdat(pgdat) { |
103 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | 100 | for (i = 0; i < pgdat->node_spanned_pages; i++) { |
104 | page = pgdat->node_mem_map + i; | 101 | page = pgdat_page_nr(pgdat, i); |
105 | total++; | 102 | total++; |
106 | if (PageReserved(page)) | 103 | if (PageReserved(page)) |
107 | reserved++; | 104 | reserved++; |
@@ -137,69 +134,6 @@ void iounmap(volatile void __iomem *addr) | |||
137 | 134 | ||
138 | #else | 135 | #else |
139 | 136 | ||
140 | static void unmap_im_area_pte(pmd_t *pmd, unsigned long addr, | ||
141 | unsigned long end) | ||
142 | { | ||
143 | pte_t *pte; | ||
144 | |||
145 | pte = pte_offset_kernel(pmd, addr); | ||
146 | do { | ||
147 | pte_t ptent = ptep_get_and_clear(&ioremap_mm, addr, pte); | ||
148 | WARN_ON(!pte_none(ptent) && !pte_present(ptent)); | ||
149 | } while (pte++, addr += PAGE_SIZE, addr != end); | ||
150 | } | ||
151 | |||
152 | static inline void unmap_im_area_pmd(pud_t *pud, unsigned long addr, | ||
153 | unsigned long end) | ||
154 | { | ||
155 | pmd_t *pmd; | ||
156 | unsigned long next; | ||
157 | |||
158 | pmd = pmd_offset(pud, addr); | ||
159 | do { | ||
160 | next = pmd_addr_end(addr, end); | ||
161 | if (pmd_none_or_clear_bad(pmd)) | ||
162 | continue; | ||
163 | unmap_im_area_pte(pmd, addr, next); | ||
164 | } while (pmd++, addr = next, addr != end); | ||
165 | } | ||
166 | |||
167 | static inline void unmap_im_area_pud(pgd_t *pgd, unsigned long addr, | ||
168 | unsigned long end) | ||
169 | { | ||
170 | pud_t *pud; | ||
171 | unsigned long next; | ||
172 | |||
173 | pud = pud_offset(pgd, addr); | ||
174 | do { | ||
175 | next = pud_addr_end(addr, end); | ||
176 | if (pud_none_or_clear_bad(pud)) | ||
177 | continue; | ||
178 | unmap_im_area_pmd(pud, addr, next); | ||
179 | } while (pud++, addr = next, addr != end); | ||
180 | } | ||
181 | |||
182 | static void unmap_im_area(unsigned long addr, unsigned long end) | ||
183 | { | ||
184 | struct mm_struct *mm = &ioremap_mm; | ||
185 | unsigned long next; | ||
186 | pgd_t *pgd; | ||
187 | |||
188 | spin_lock(&mm->page_table_lock); | ||
189 | |||
190 | pgd = pgd_offset_i(addr); | ||
191 | flush_cache_vunmap(addr, end); | ||
192 | do { | ||
193 | next = pgd_addr_end(addr, end); | ||
194 | if (pgd_none_or_clear_bad(pgd)) | ||
195 | continue; | ||
196 | unmap_im_area_pud(pgd, addr, next); | ||
197 | } while (pgd++, addr = next, addr != end); | ||
198 | flush_tlb_kernel_range(start, end); | ||
199 | |||
200 | spin_unlock(&mm->page_table_lock); | ||
201 | } | ||
202 | |||
203 | /* | 137 | /* |
204 | * map_io_page currently only called by __ioremap | 138 | * map_io_page currently only called by __ioremap |
205 | * map_io_page adds an entry to the ioremap page table | 139 | * map_io_page adds an entry to the ioremap page table |
@@ -214,21 +148,21 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags) | |||
214 | unsigned long vsid; | 148 | unsigned long vsid; |
215 | 149 | ||
216 | if (mem_init_done) { | 150 | if (mem_init_done) { |
217 | spin_lock(&ioremap_mm.page_table_lock); | 151 | spin_lock(&init_mm.page_table_lock); |
218 | pgdp = pgd_offset_i(ea); | 152 | pgdp = pgd_offset_k(ea); |
219 | pudp = pud_alloc(&ioremap_mm, pgdp, ea); | 153 | pudp = pud_alloc(&init_mm, pgdp, ea); |
220 | if (!pudp) | 154 | if (!pudp) |
221 | return -ENOMEM; | 155 | return -ENOMEM; |
222 | pmdp = pmd_alloc(&ioremap_mm, pudp, ea); | 156 | pmdp = pmd_alloc(&init_mm, pudp, ea); |
223 | if (!pmdp) | 157 | if (!pmdp) |
224 | return -ENOMEM; | 158 | return -ENOMEM; |
225 | ptep = pte_alloc_kernel(&ioremap_mm, pmdp, ea); | 159 | ptep = pte_alloc_kernel(&init_mm, pmdp, ea); |
226 | if (!ptep) | 160 | if (!ptep) |
227 | return -ENOMEM; | 161 | return -ENOMEM; |
228 | pa = abs_to_phys(pa); | 162 | pa = abs_to_phys(pa); |
229 | set_pte_at(&ioremap_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, | 163 | set_pte_at(&init_mm, ea, ptep, pfn_pte(pa >> PAGE_SHIFT, |
230 | __pgprot(flags))); | 164 | __pgprot(flags))); |
231 | spin_unlock(&ioremap_mm.page_table_lock); | 165 | spin_unlock(&init_mm.page_table_lock); |
232 | } else { | 166 | } else { |
233 | unsigned long va, vpn, hash, hpteg; | 167 | unsigned long va, vpn, hash, hpteg; |
234 | 168 | ||
@@ -267,13 +201,9 @@ static void __iomem * __ioremap_com(unsigned long addr, unsigned long pa, | |||
267 | 201 | ||
268 | for (i = 0; i < size; i += PAGE_SIZE) | 202 | for (i = 0; i < size; i += PAGE_SIZE) |
269 | if (map_io_page(ea+i, pa+i, flags)) | 203 | if (map_io_page(ea+i, pa+i, flags)) |
270 | goto failure; | 204 | return NULL; |
271 | 205 | ||
272 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); | 206 | return (void __iomem *) (ea + (addr & ~PAGE_MASK)); |
273 | failure: | ||
274 | if (mem_init_done) | ||
275 | unmap_im_area(ea, ea + size); | ||
276 | return NULL; | ||
277 | } | 207 | } |
278 | 208 | ||
279 | 209 | ||
@@ -381,19 +311,14 @@ int __ioremap_explicit(unsigned long pa, unsigned long ea, | |||
381 | */ | 311 | */ |
382 | void iounmap(volatile void __iomem *token) | 312 | void iounmap(volatile void __iomem *token) |
383 | { | 313 | { |
384 | unsigned long address, size; | ||
385 | void *addr; | 314 | void *addr; |
386 | 315 | ||
387 | if (!mem_init_done) | 316 | if (!mem_init_done) |
388 | return; | 317 | return; |
389 | 318 | ||
390 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); | 319 | addr = (void *) ((unsigned long __force) token & PAGE_MASK); |
391 | |||
392 | if ((size = im_free(addr)) == 0) | ||
393 | return; | ||
394 | 320 | ||
395 | address = (unsigned long)addr; | 321 | im_free(addr); |
396 | unmap_im_area(address, address + size); | ||
397 | } | 322 | } |
398 | 323 | ||
399 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) | 324 | static int iounmap_subset_regions(unsigned long addr, unsigned long size) |
@@ -606,7 +531,7 @@ EXPORT_SYMBOL(page_is_ram); | |||
606 | * Initialize the bootmem system and give it all the memory we | 531 | * Initialize the bootmem system and give it all the memory we |
607 | * have available. | 532 | * have available. |
608 | */ | 533 | */ |
609 | #ifndef CONFIG_DISCONTIGMEM | 534 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
610 | void __init do_init_bootmem(void) | 535 | void __init do_init_bootmem(void) |
611 | { | 536 | { |
612 | unsigned long i; | 537 | unsigned long i; |
@@ -628,12 +553,20 @@ void __init do_init_bootmem(void) | |||
628 | 553 | ||
629 | max_pfn = max_low_pfn; | 554 | max_pfn = max_low_pfn; |
630 | 555 | ||
631 | /* add all physical memory to the bootmem map. Also find the first */ | 556 | /* Add all physical memory to the bootmem map, mark each area |
557 | * present. | ||
558 | */ | ||
632 | for (i=0; i < lmb.memory.cnt; i++) { | 559 | for (i=0; i < lmb.memory.cnt; i++) { |
633 | unsigned long physbase, size; | 560 | unsigned long physbase, size; |
561 | unsigned long start_pfn, end_pfn; | ||
634 | 562 | ||
635 | physbase = lmb.memory.region[i].physbase; | 563 | physbase = lmb.memory.region[i].physbase; |
636 | size = lmb.memory.region[i].size; | 564 | size = lmb.memory.region[i].size; |
565 | |||
566 | start_pfn = physbase >> PAGE_SHIFT; | ||
567 | end_pfn = start_pfn + (size >> PAGE_SHIFT); | ||
568 | memory_present(0, start_pfn, end_pfn); | ||
569 | |||
637 | free_bootmem(physbase, size); | 570 | free_bootmem(physbase, size); |
638 | } | 571 | } |
639 | 572 | ||
@@ -672,7 +605,7 @@ void __init paging_init(void) | |||
672 | free_area_init_node(0, NODE_DATA(0), zones_size, | 605 | free_area_init_node(0, NODE_DATA(0), zones_size, |
673 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | 606 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); |
674 | } | 607 | } |
675 | #endif /* CONFIG_DISCONTIGMEM */ | 608 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ |
676 | 609 | ||
677 | static struct kcore_list kcore_vmem; | 610 | static struct kcore_list kcore_vmem; |
678 | 611 | ||
@@ -703,7 +636,7 @@ module_init(setup_kcore); | |||
703 | 636 | ||
704 | void __init mem_init(void) | 637 | void __init mem_init(void) |
705 | { | 638 | { |
706 | #ifdef CONFIG_DISCONTIGMEM | 639 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
707 | int nid; | 640 | int nid; |
708 | #endif | 641 | #endif |
709 | pg_data_t *pgdat; | 642 | pg_data_t *pgdat; |
@@ -714,7 +647,7 @@ void __init mem_init(void) | |||
714 | num_physpages = max_low_pfn; /* RAM is assumed contiguous */ | 647 | num_physpages = max_low_pfn; /* RAM is assumed contiguous */ |
715 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); | 648 | high_memory = (void *) __va(max_low_pfn * PAGE_SIZE); |
716 | 649 | ||
717 | #ifdef CONFIG_DISCONTIGMEM | 650 | #ifdef CONFIG_NEED_MULTIPLE_NODES |
718 | for_each_online_node(nid) { | 651 | for_each_online_node(nid) { |
719 | if (NODE_DATA(nid)->node_spanned_pages != 0) { | 652 | if (NODE_DATA(nid)->node_spanned_pages != 0) { |
720 | printk("freeing bootmem node %x\n", nid); | 653 | printk("freeing bootmem node %x\n", nid); |
@@ -729,7 +662,7 @@ void __init mem_init(void) | |||
729 | 662 | ||
730 | for_each_pgdat(pgdat) { | 663 | for_each_pgdat(pgdat) { |
731 | for (i = 0; i < pgdat->node_spanned_pages; i++) { | 664 | for (i = 0; i < pgdat->node_spanned_pages; i++) { |
732 | page = pgdat->node_mem_map + i; | 665 | page = pgdat_page_nr(pgdat, i); |
733 | if (PageReserved(page)) | 666 | if (PageReserved(page)) |
734 | reservedpages++; | 667 | reservedpages++; |
735 | } | 668 | } |
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c index ea862ec643d3..cafd91aef289 100644 --- a/arch/ppc64/mm/numa.c +++ b/arch/ppc64/mm/numa.c | |||
@@ -440,6 +440,8 @@ new_range: | |||
440 | for (i = start ; i < (start+size); i += MEMORY_INCREMENT) | 440 | for (i = start ; i < (start+size); i += MEMORY_INCREMENT) |
441 | numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = | 441 | numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = |
442 | numa_domain; | 442 | numa_domain; |
443 | memory_present(numa_domain, start >> PAGE_SHIFT, | ||
444 | (start + size) >> PAGE_SHIFT); | ||
443 | 445 | ||
444 | if (--ranges) | 446 | if (--ranges) |
445 | goto new_range; | 447 | goto new_range; |
@@ -481,6 +483,7 @@ static void __init setup_nonnuma(void) | |||
481 | 483 | ||
482 | for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT) | 484 | for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT) |
483 | numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0; | 485 | numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0; |
486 | memory_present(0, 0, init_node_data[0].node_end_pfn); | ||
484 | } | 487 | } |
485 | 488 | ||
486 | static void __init dump_numa_topology(void) | 489 | static void __init dump_numa_topology(void) |
diff --git a/arch/ppc64/xmon/xmon.c b/arch/ppc64/xmon/xmon.c index 3c0ccb2623ae..7f6e13a4b71e 100644 --- a/arch/ppc64/xmon/xmon.c +++ b/arch/ppc64/xmon/xmon.c | |||
@@ -2247,7 +2247,14 @@ scanhex(unsigned long *vp) | |||
2247 | tmpstr[i] = c; | 2247 | tmpstr[i] = c; |
2248 | } | 2248 | } |
2249 | tmpstr[i++] = 0; | 2249 | tmpstr[i++] = 0; |
2250 | *vp = kallsyms_lookup_name(tmpstr); | 2250 | *vp = 0; |
2251 | if (setjmp(bus_error_jmp) == 0) { | ||
2252 | catch_memory_errors = 1; | ||
2253 | sync(); | ||
2254 | *vp = kallsyms_lookup_name(tmpstr); | ||
2255 | sync(); | ||
2256 | } | ||
2257 | catch_memory_errors = 0; | ||
2251 | if (!(*vp)) { | 2258 | if (!(*vp)) { |
2252 | printf("unknown symbol '%s'\n", tmpstr); | 2259 | printf("unknown symbol '%s'\n", tmpstr); |
2253 | return 0; | 2260 | return 0; |