diff options
Diffstat (limited to 'arch/powerpc')
131 files changed, 19574 insertions, 3538 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 331483ace0d9..28004f002ec9 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -47,7 +47,7 @@ config PPC | |||
47 | 47 | ||
48 | config EARLY_PRINTK | 48 | config EARLY_PRINTK |
49 | bool | 49 | bool |
50 | default y if PPC64 | 50 | default y |
51 | 51 | ||
52 | config COMPAT | 52 | config COMPAT |
53 | bool | 53 | bool |
@@ -297,6 +297,7 @@ config PPC_PMAC64 | |||
297 | bool | 297 | bool |
298 | depends on PPC_PMAC && POWER4 | 298 | depends on PPC_PMAC && POWER4 |
299 | select U3_DART | 299 | select U3_DART |
300 | select MPIC_BROKEN_U3 | ||
300 | select GENERIC_TBSYNC | 301 | select GENERIC_TBSYNC |
301 | default y | 302 | default y |
302 | 303 | ||
@@ -325,9 +326,7 @@ config PPC_CELL | |||
325 | select MMIO_NVRAM | 326 | select MMIO_NVRAM |
326 | 327 | ||
327 | config PPC_OF | 328 | config PPC_OF |
328 | bool | 329 | def_bool y |
329 | depends on PPC_MULTIPLATFORM # for now | ||
330 | default y | ||
331 | 330 | ||
332 | config XICS | 331 | config XICS |
333 | depends on PPC_PSERIES | 332 | depends on PPC_PSERIES |
@@ -376,11 +375,28 @@ config CELL_IIC | |||
376 | bool | 375 | bool |
377 | default y | 376 | default y |
378 | 377 | ||
378 | config CRASH_DUMP | ||
379 | bool "kernel crash dumps (EXPERIMENTAL)" | ||
380 | depends on PPC_MULTIPLATFORM | ||
381 | depends on EXPERIMENTAL | ||
382 | help | ||
383 | Build a kernel suitable for use as a kdump capture kernel. | ||
384 | The kernel will be linked at a different address than normal, and | ||
385 | so can only be used for Kdump. | ||
386 | |||
387 | Don't change this unless you know what you are doing. | ||
388 | |||
379 | config IBMVIO | 389 | config IBMVIO |
380 | depends on PPC_PSERIES || PPC_ISERIES | 390 | depends on PPC_PSERIES || PPC_ISERIES |
381 | bool | 391 | bool |
382 | default y | 392 | default y |
383 | 393 | ||
394 | config IBMEBUS | ||
395 | depends on PPC_PSERIES | ||
396 | bool "Support for GX bus based adapters" | ||
397 | help | ||
398 | Bus device driver for GX bus based adapters. | ||
399 | |||
384 | config PPC_MPC106 | 400 | config PPC_MPC106 |
385 | bool | 401 | bool |
386 | default n | 402 | default n |
@@ -472,6 +488,7 @@ source arch/powerpc/platforms/embedded6xx/Kconfig | |||
472 | source arch/powerpc/platforms/4xx/Kconfig | 488 | source arch/powerpc/platforms/4xx/Kconfig |
473 | source arch/powerpc/platforms/85xx/Kconfig | 489 | source arch/powerpc/platforms/85xx/Kconfig |
474 | source arch/powerpc/platforms/8xx/Kconfig | 490 | source arch/powerpc/platforms/8xx/Kconfig |
491 | source arch/powerpc/platforms/cell/Kconfig | ||
475 | 492 | ||
476 | menu "Kernel options" | 493 | menu "Kernel options" |
477 | 494 | ||
@@ -575,11 +592,12 @@ config ARCH_SELECT_MEMORY_MODEL | |||
575 | depends on PPC64 | 592 | depends on PPC64 |
576 | 593 | ||
577 | config ARCH_FLATMEM_ENABLE | 594 | config ARCH_FLATMEM_ENABLE |
578 | def_bool y | 595 | def_bool y |
579 | depends on PPC64 && !NUMA | 596 | depends on (PPC64 && !NUMA) || PPC32 |
580 | 597 | ||
581 | config ARCH_SPARSEMEM_ENABLE | 598 | config ARCH_SPARSEMEM_ENABLE |
582 | def_bool y | 599 | def_bool y |
600 | depends on PPC64 | ||
583 | 601 | ||
584 | config ARCH_SPARSEMEM_DEFAULT | 602 | config ARCH_SPARSEMEM_DEFAULT |
585 | def_bool y | 603 | def_bool y |
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index a13eb575f834..5f80e58e5cb3 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile | |||
@@ -151,7 +151,7 @@ CPPFLAGS_vmlinux.lds := -Upowerpc | |||
151 | # All the instructions talk about "make bzImage". | 151 | # All the instructions talk about "make bzImage". |
152 | bzImage: zImage | 152 | bzImage: zImage |
153 | 153 | ||
154 | BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm | 154 | BOOT_TARGETS = zImage zImage.initrd znetboot znetboot.initrd vmlinux.sm uImage |
155 | 155 | ||
156 | .PHONY: $(BOOT_TARGETS) | 156 | .PHONY: $(BOOT_TARGETS) |
157 | 157 | ||
diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile index 9770f587af73..22726aefc8ea 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile | |||
@@ -143,6 +143,36 @@ $(obj)/zImage.initrd: $(obj)/zImage.initrd.vmode $(obj)/addnote | |||
143 | @cp -f $< $@ | 143 | @cp -f $< $@ |
144 | $(call if_changed,addnote) | 144 | $(call if_changed,addnote) |
145 | 145 | ||
146 | #----------------------------------------------------------- | ||
147 | # build u-boot images | ||
148 | #----------------------------------------------------------- | ||
149 | quiet_cmd_mygzip = GZIP $@ | ||
150 | cmd_mygzip = gzip -f -9 < $< > $@.$$$$ && mv $@.$$$$ $@ | ||
151 | |||
152 | quiet_cmd_objbin = OBJCOPY $@ | ||
153 | cmd_objbin = $(OBJCOPY) -O binary $< $@ | ||
154 | |||
155 | quiet_cmd_uimage = UIMAGE $@ | ||
156 | cmd_uimage = $(CONFIG_SHELL) $(MKIMAGE) -A ppc -O linux -T kernel \ | ||
157 | -C gzip -a 00000000 -e 00000000 -n 'Linux-$(KERNELRELEASE)' \ | ||
158 | -d $< $@ | ||
159 | |||
160 | MKIMAGE := $(srctree)/scripts/mkuboot.sh | ||
161 | targets += uImage | ||
162 | extra-y += vmlinux.bin vmlinux.gz | ||
163 | |||
164 | $(obj)/vmlinux.bin: vmlinux FORCE | ||
165 | $(call if_changed,objbin) | ||
166 | |||
167 | $(obj)/vmlinux.gz: $(obj)/vmlinux.bin FORCE | ||
168 | $(call if_changed,mygzip) | ||
169 | |||
170 | $(obj)/uImage: $(obj)/vmlinux.gz | ||
171 | $(Q)rm -f $@ | ||
172 | $(call cmd,uimage) | ||
173 | @echo -n ' Image: $@ ' | ||
174 | @if [ -f $@ ]; then echo 'is ready' ; else echo 'not made'; fi | ||
175 | |||
146 | install: $(CONFIGURE) $(BOOTIMAGE) | 176 | install: $(CONFIGURE) $(BOOTIMAGE) |
147 | sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)" | 177 | sh -x $(srctree)/$(src)/install.sh "$(KERNELRELEASE)" vmlinux System.map "$(INSTALL_PATH)" "$(BOOTIMAGE)" |
148 | 178 | ||
diff --git a/arch/powerpc/configs/pmac32_defconfig b/arch/powerpc/configs/pmac32_defconfig new file mode 100644 index 000000000000..398203bd98eb --- /dev/null +++ b/arch/powerpc/configs/pmac32_defconfig | |||
@@ -0,0 +1,1729 @@ | |||
1 | # | ||
2 | # Automatically generated make config: don't edit | ||
3 | # Linux kernel version: 2.6.15-rc5 | ||
4 | # Tue Dec 13 17:24:05 2005 | ||
5 | # | ||
6 | # CONFIG_PPC64 is not set | ||
7 | CONFIG_PPC32=y | ||
8 | CONFIG_PPC_MERGE=y | ||
9 | CONFIG_MMU=y | ||
10 | CONFIG_GENERIC_HARDIRQS=y | ||
11 | CONFIG_RWSEM_XCHGADD_ALGORITHM=y | ||
12 | CONFIG_GENERIC_CALIBRATE_DELAY=y | ||
13 | CONFIG_PPC=y | ||
14 | CONFIG_EARLY_PRINTK=y | ||
15 | CONFIG_GENERIC_NVRAM=y | ||
16 | CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y | ||
17 | CONFIG_ARCH_MAY_HAVE_PC_FDC=y | ||
18 | |||
19 | # | ||
20 | # Processor support | ||
21 | # | ||
22 | CONFIG_6xx=y | ||
23 | # CONFIG_PPC_52xx is not set | ||
24 | # CONFIG_PPC_82xx is not set | ||
25 | # CONFIG_PPC_83xx is not set | ||
26 | # CONFIG_40x is not set | ||
27 | # CONFIG_44x is not set | ||
28 | # CONFIG_8xx is not set | ||
29 | # CONFIG_E200 is not set | ||
30 | # CONFIG_E500 is not set | ||
31 | CONFIG_PPC_FPU=y | ||
32 | CONFIG_ALTIVEC=y | ||
33 | CONFIG_PPC_STD_MMU=y | ||
34 | CONFIG_PPC_STD_MMU_32=y | ||
35 | # CONFIG_SMP is not set | ||
36 | |||
37 | # | ||
38 | # Code maturity level options | ||
39 | # | ||
40 | CONFIG_EXPERIMENTAL=y | ||
41 | CONFIG_CLEAN_COMPILE=y | ||
42 | CONFIG_BROKEN_ON_SMP=y | ||
43 | CONFIG_INIT_ENV_ARG_LIMIT=32 | ||
44 | |||
45 | # | ||
46 | # General setup | ||
47 | # | ||
48 | CONFIG_LOCALVERSION="" | ||
49 | # CONFIG_LOCALVERSION_AUTO is not set | ||
50 | CONFIG_SWAP=y | ||
51 | CONFIG_SYSVIPC=y | ||
52 | CONFIG_POSIX_MQUEUE=y | ||
53 | # CONFIG_BSD_PROCESS_ACCT is not set | ||
54 | CONFIG_SYSCTL=y | ||
55 | # CONFIG_AUDIT is not set | ||
56 | CONFIG_HOTPLUG=y | ||
57 | CONFIG_KOBJECT_UEVENT=y | ||
58 | CONFIG_IKCONFIG=y | ||
59 | CONFIG_IKCONFIG_PROC=y | ||
60 | CONFIG_INITRAMFS_SOURCE="" | ||
61 | # CONFIG_EMBEDDED is not set | ||
62 | CONFIG_KALLSYMS=y | ||
63 | # CONFIG_KALLSYMS_ALL is not set | ||
64 | # CONFIG_KALLSYMS_EXTRA_PASS is not set | ||
65 | CONFIG_PRINTK=y | ||
66 | CONFIG_BUG=y | ||
67 | CONFIG_BASE_FULL=y | ||
68 | CONFIG_FUTEX=y | ||
69 | CONFIG_EPOLL=y | ||
70 | CONFIG_SHMEM=y | ||
71 | CONFIG_CC_ALIGN_FUNCTIONS=0 | ||
72 | CONFIG_CC_ALIGN_LABELS=0 | ||
73 | CONFIG_CC_ALIGN_LOOPS=0 | ||
74 | CONFIG_CC_ALIGN_JUMPS=0 | ||
75 | # CONFIG_TINY_SHMEM is not set | ||
76 | CONFIG_BASE_SMALL=0 | ||
77 | |||
78 | # | ||
79 | # Loadable module support | ||
80 | # | ||
81 | CONFIG_MODULES=y | ||
82 | CONFIG_MODULE_UNLOAD=y | ||
83 | CONFIG_MODULE_FORCE_UNLOAD=y | ||
84 | CONFIG_OBSOLETE_MODPARM=y | ||
85 | # CONFIG_MODVERSIONS is not set | ||
86 | # CONFIG_MODULE_SRCVERSION_ALL is not set | ||
87 | CONFIG_KMOD=y | ||
88 | |||
89 | # | ||
90 | # Block layer | ||
91 | # | ||
92 | CONFIG_LBD=y | ||
93 | |||
94 | # | ||
95 | # IO Schedulers | ||
96 | # | ||
97 | CONFIG_IOSCHED_NOOP=y | ||
98 | CONFIG_IOSCHED_AS=y | ||
99 | CONFIG_IOSCHED_DEADLINE=y | ||
100 | CONFIG_IOSCHED_CFQ=y | ||
101 | CONFIG_DEFAULT_AS=y | ||
102 | # CONFIG_DEFAULT_DEADLINE is not set | ||
103 | # CONFIG_DEFAULT_CFQ is not set | ||
104 | # CONFIG_DEFAULT_NOOP is not set | ||
105 | CONFIG_DEFAULT_IOSCHED="anticipatory" | ||
106 | |||
107 | # | ||
108 | # Platform support | ||
109 | # | ||
110 | CONFIG_PPC_MULTIPLATFORM=y | ||
111 | # CONFIG_PPC_ISERIES is not set | ||
112 | # CONFIG_EMBEDDED6xx is not set | ||
113 | # CONFIG_APUS is not set | ||
114 | # CONFIG_PPC_CHRP is not set | ||
115 | CONFIG_PPC_PMAC=y | ||
116 | CONFIG_PPC_OF=y | ||
117 | CONFIG_MPIC=y | ||
118 | # CONFIG_PPC_RTAS is not set | ||
119 | # CONFIG_MMIO_NVRAM is not set | ||
120 | # CONFIG_CRASH_DUMP is not set | ||
121 | CONFIG_PPC_MPC106=y | ||
122 | # CONFIG_GENERIC_TBSYNC is not set | ||
123 | CONFIG_CPU_FREQ=y | ||
124 | CONFIG_CPU_FREQ_TABLE=y | ||
125 | # CONFIG_CPU_FREQ_DEBUG is not set | ||
126 | CONFIG_CPU_FREQ_STAT=y | ||
127 | # CONFIG_CPU_FREQ_STAT_DETAILS is not set | ||
128 | CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y | ||
129 | # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set | ||
130 | CONFIG_CPU_FREQ_GOV_PERFORMANCE=y | ||
131 | CONFIG_CPU_FREQ_GOV_POWERSAVE=y | ||
132 | CONFIG_CPU_FREQ_GOV_USERSPACE=y | ||
133 | # CONFIG_CPU_FREQ_GOV_ONDEMAND is not set | ||
134 | # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set | ||
135 | CONFIG_CPU_FREQ_PMAC=y | ||
136 | CONFIG_PPC601_SYNC_FIX=y | ||
137 | # CONFIG_TAU is not set | ||
138 | # CONFIG_WANT_EARLY_SERIAL is not set | ||
139 | |||
140 | # | ||
141 | # Kernel options | ||
142 | # | ||
143 | # CONFIG_HIGHMEM is not set | ||
144 | # CONFIG_HZ_100 is not set | ||
145 | CONFIG_HZ_250=y | ||
146 | # CONFIG_HZ_1000 is not set | ||
147 | CONFIG_HZ=250 | ||
148 | CONFIG_PREEMPT_NONE=y | ||
149 | # CONFIG_PREEMPT_VOLUNTARY is not set | ||
150 | # CONFIG_PREEMPT is not set | ||
151 | CONFIG_BINFMT_ELF=y | ||
152 | CONFIG_BINFMT_MISC=m | ||
153 | # CONFIG_KEXEC is not set | ||
154 | CONFIG_ARCH_FLATMEM_ENABLE=y | ||
155 | CONFIG_SELECT_MEMORY_MODEL=y | ||
156 | CONFIG_FLATMEM_MANUAL=y | ||
157 | # CONFIG_DISCONTIGMEM_MANUAL is not set | ||
158 | # CONFIG_SPARSEMEM_MANUAL is not set | ||
159 | CONFIG_FLATMEM=y | ||
160 | CONFIG_FLAT_NODE_MEM_MAP=y | ||
161 | # CONFIG_SPARSEMEM_STATIC is not set | ||
162 | CONFIG_SPLIT_PTLOCK_CPUS=4 | ||
163 | CONFIG_PROC_DEVICETREE=y | ||
164 | # CONFIG_CMDLINE_BOOL is not set | ||
165 | CONFIG_PM=y | ||
166 | # CONFIG_PM_LEGACY is not set | ||
167 | CONFIG_PM_DEBUG=y | ||
168 | CONFIG_SOFTWARE_SUSPEND=y | ||
169 | CONFIG_PM_STD_PARTITION="" | ||
170 | CONFIG_SECCOMP=y | ||
171 | CONFIG_ISA_DMA_API=y | ||
172 | |||
173 | # | ||
174 | # Bus options | ||
175 | # | ||
176 | CONFIG_GENERIC_ISA_DMA=y | ||
177 | # CONFIG_PPC_I8259 is not set | ||
178 | CONFIG_PPC_INDIRECT_PCI=y | ||
179 | CONFIG_PCI=y | ||
180 | CONFIG_PCI_DOMAINS=y | ||
181 | CONFIG_PCI_LEGACY_PROC=y | ||
182 | # CONFIG_PCI_DEBUG is not set | ||
183 | |||
184 | # | ||
185 | # PCCARD (PCMCIA/CardBus) support | ||
186 | # | ||
187 | CONFIG_PCCARD=m | ||
188 | # CONFIG_PCMCIA_DEBUG is not set | ||
189 | CONFIG_PCMCIA=m | ||
190 | CONFIG_PCMCIA_LOAD_CIS=y | ||
191 | CONFIG_PCMCIA_IOCTL=y | ||
192 | CONFIG_CARDBUS=y | ||
193 | |||
194 | # | ||
195 | # PC-card bridges | ||
196 | # | ||
197 | CONFIG_YENTA=m | ||
198 | # CONFIG_PD6729 is not set | ||
199 | # CONFIG_I82092 is not set | ||
200 | CONFIG_PCCARD_NONSTATIC=m | ||
201 | |||
202 | # | ||
203 | # PCI Hotplug Support | ||
204 | # | ||
205 | # CONFIG_HOTPLUG_PCI is not set | ||
206 | |||
207 | # | ||
208 | # Advanced setup | ||
209 | # | ||
210 | # CONFIG_ADVANCED_OPTIONS is not set | ||
211 | |||
212 | # | ||
213 | # Default settings for advanced configuration options are used | ||
214 | # | ||
215 | CONFIG_HIGHMEM_START=0xfe000000 | ||
216 | CONFIG_LOWMEM_SIZE=0x30000000 | ||
217 | CONFIG_KERNEL_START=0xc0000000 | ||
218 | CONFIG_TASK_SIZE=0x80000000 | ||
219 | CONFIG_BOOT_LOAD=0x00800000 | ||
220 | |||
221 | # | ||
222 | # Networking | ||
223 | # | ||
224 | CONFIG_NET=y | ||
225 | |||
226 | # | ||
227 | # Networking options | ||
228 | # | ||
229 | CONFIG_PACKET=y | ||
230 | # CONFIG_PACKET_MMAP is not set | ||
231 | CONFIG_UNIX=y | ||
232 | # CONFIG_NET_KEY is not set | ||
233 | CONFIG_INET=y | ||
234 | CONFIG_IP_MULTICAST=y | ||
235 | # CONFIG_IP_ADVANCED_ROUTER is not set | ||
236 | CONFIG_IP_FIB_HASH=y | ||
237 | # CONFIG_IP_PNP is not set | ||
238 | # CONFIG_NET_IPIP is not set | ||
239 | # CONFIG_NET_IPGRE is not set | ||
240 | # CONFIG_IP_MROUTE is not set | ||
241 | # CONFIG_ARPD is not set | ||
242 | CONFIG_SYN_COOKIES=y | ||
243 | # CONFIG_INET_AH is not set | ||
244 | # CONFIG_INET_ESP is not set | ||
245 | # CONFIG_INET_IPCOMP is not set | ||
246 | # CONFIG_INET_TUNNEL is not set | ||
247 | CONFIG_INET_DIAG=y | ||
248 | CONFIG_INET_TCP_DIAG=y | ||
249 | # CONFIG_TCP_CONG_ADVANCED is not set | ||
250 | CONFIG_TCP_CONG_BIC=y | ||
251 | |||
252 | # | ||
253 | # IP: Virtual Server Configuration | ||
254 | # | ||
255 | # CONFIG_IP_VS is not set | ||
256 | # CONFIG_IPV6 is not set | ||
257 | CONFIG_NETFILTER=y | ||
258 | # CONFIG_NETFILTER_DEBUG is not set | ||
259 | |||
260 | # | ||
261 | # Core Netfilter Configuration | ||
262 | # | ||
263 | # CONFIG_NETFILTER_NETLINK is not set | ||
264 | |||
265 | # | ||
266 | # IP: Netfilter Configuration | ||
267 | # | ||
268 | CONFIG_IP_NF_CONNTRACK=m | ||
269 | # CONFIG_IP_NF_CT_ACCT is not set | ||
270 | # CONFIG_IP_NF_CONNTRACK_MARK is not set | ||
271 | # CONFIG_IP_NF_CONNTRACK_EVENTS is not set | ||
272 | # CONFIG_IP_NF_CT_PROTO_SCTP is not set | ||
273 | CONFIG_IP_NF_FTP=m | ||
274 | CONFIG_IP_NF_IRC=m | ||
275 | CONFIG_IP_NF_NETBIOS_NS=m | ||
276 | CONFIG_IP_NF_TFTP=m | ||
277 | CONFIG_IP_NF_AMANDA=m | ||
278 | CONFIG_IP_NF_PPTP=m | ||
279 | # CONFIG_IP_NF_QUEUE is not set | ||
280 | CONFIG_IP_NF_IPTABLES=m | ||
281 | CONFIG_IP_NF_MATCH_LIMIT=m | ||
282 | CONFIG_IP_NF_MATCH_IPRANGE=m | ||
283 | CONFIG_IP_NF_MATCH_MAC=m | ||
284 | CONFIG_IP_NF_MATCH_PKTTYPE=m | ||
285 | CONFIG_IP_NF_MATCH_MARK=m | ||
286 | CONFIG_IP_NF_MATCH_MULTIPORT=m | ||
287 | CONFIG_IP_NF_MATCH_TOS=m | ||
288 | CONFIG_IP_NF_MATCH_RECENT=m | ||
289 | CONFIG_IP_NF_MATCH_ECN=m | ||
290 | CONFIG_IP_NF_MATCH_DSCP=m | ||
291 | CONFIG_IP_NF_MATCH_AH_ESP=m | ||
292 | CONFIG_IP_NF_MATCH_LENGTH=m | ||
293 | CONFIG_IP_NF_MATCH_TTL=m | ||
294 | CONFIG_IP_NF_MATCH_TCPMSS=m | ||
295 | CONFIG_IP_NF_MATCH_HELPER=m | ||
296 | CONFIG_IP_NF_MATCH_STATE=m | ||
297 | CONFIG_IP_NF_MATCH_CONNTRACK=m | ||
298 | CONFIG_IP_NF_MATCH_OWNER=m | ||
299 | # CONFIG_IP_NF_MATCH_ADDRTYPE is not set | ||
300 | # CONFIG_IP_NF_MATCH_REALM is not set | ||
301 | # CONFIG_IP_NF_MATCH_SCTP is not set | ||
302 | CONFIG_IP_NF_MATCH_DCCP=m | ||
303 | # CONFIG_IP_NF_MATCH_COMMENT is not set | ||
304 | # CONFIG_IP_NF_MATCH_HASHLIMIT is not set | ||
305 | CONFIG_IP_NF_MATCH_STRING=m | ||
306 | CONFIG_IP_NF_FILTER=m | ||
307 | CONFIG_IP_NF_TARGET_REJECT=m | ||
308 | # CONFIG_IP_NF_TARGET_LOG is not set | ||
309 | CONFIG_IP_NF_TARGET_ULOG=m | ||
310 | CONFIG_IP_NF_TARGET_TCPMSS=m | ||
311 | # CONFIG_IP_NF_TARGET_NFQUEUE is not set | ||
312 | CONFIG_IP_NF_NAT=m | ||
313 | CONFIG_IP_NF_NAT_NEEDED=y | ||
314 | CONFIG_IP_NF_TARGET_MASQUERADE=m | ||
315 | CONFIG_IP_NF_TARGET_REDIRECT=m | ||
316 | CONFIG_IP_NF_TARGET_NETMAP=m | ||
317 | CONFIG_IP_NF_TARGET_SAME=m | ||
318 | CONFIG_IP_NF_NAT_SNMP_BASIC=m | ||
319 | CONFIG_IP_NF_NAT_IRC=m | ||
320 | CONFIG_IP_NF_NAT_FTP=m | ||
321 | CONFIG_IP_NF_NAT_TFTP=m | ||
322 | CONFIG_IP_NF_NAT_AMANDA=m | ||
323 | CONFIG_IP_NF_NAT_PPTP=m | ||
324 | # CONFIG_IP_NF_MANGLE is not set | ||
325 | CONFIG_IP_NF_RAW=m | ||
326 | CONFIG_IP_NF_TARGET_NOTRACK=m | ||
327 | CONFIG_IP_NF_ARPTABLES=m | ||
328 | CONFIG_IP_NF_ARPFILTER=m | ||
329 | CONFIG_IP_NF_ARP_MANGLE=m | ||
330 | |||
331 | # | ||
332 | # DCCP Configuration (EXPERIMENTAL) | ||
333 | # | ||
334 | CONFIG_IP_DCCP=m | ||
335 | CONFIG_INET_DCCP_DIAG=m | ||
336 | |||
337 | # | ||
338 | # DCCP CCIDs Configuration (EXPERIMENTAL) | ||
339 | # | ||
340 | CONFIG_IP_DCCP_CCID3=m | ||
341 | CONFIG_IP_DCCP_TFRC_LIB=m | ||
342 | |||
343 | # | ||
344 | # DCCP Kernel Hacking | ||
345 | # | ||
346 | # CONFIG_IP_DCCP_DEBUG is not set | ||
347 | # CONFIG_IP_DCCP_UNLOAD_HACK is not set | ||
348 | |||
349 | # | ||
350 | # SCTP Configuration (EXPERIMENTAL) | ||
351 | # | ||
352 | # CONFIG_IP_SCTP is not set | ||
353 | # CONFIG_ATM is not set | ||
354 | # CONFIG_BRIDGE is not set | ||
355 | # CONFIG_VLAN_8021Q is not set | ||
356 | # CONFIG_DECNET is not set | ||
357 | # CONFIG_LLC2 is not set | ||
358 | # CONFIG_IPX is not set | ||
359 | # CONFIG_ATALK is not set | ||
360 | # CONFIG_X25 is not set | ||
361 | # CONFIG_LAPB is not set | ||
362 | # CONFIG_NET_DIVERT is not set | ||
363 | # CONFIG_ECONET is not set | ||
364 | # CONFIG_WAN_ROUTER is not set | ||
365 | |||
366 | # | ||
367 | # QoS and/or fair queueing | ||
368 | # | ||
369 | # CONFIG_NET_SCHED is not set | ||
370 | |||
371 | # | ||
372 | # Network testing | ||
373 | # | ||
374 | # CONFIG_NET_PKTGEN is not set | ||
375 | # CONFIG_HAMRADIO is not set | ||
376 | CONFIG_IRDA=m | ||
377 | |||
378 | # | ||
379 | # IrDA protocols | ||
380 | # | ||
381 | CONFIG_IRLAN=m | ||
382 | CONFIG_IRNET=m | ||
383 | CONFIG_IRCOMM=m | ||
384 | # CONFIG_IRDA_ULTRA is not set | ||
385 | |||
386 | # | ||
387 | # IrDA options | ||
388 | # | ||
389 | CONFIG_IRDA_CACHE_LAST_LSAP=y | ||
390 | CONFIG_IRDA_FAST_RR=y | ||
391 | # CONFIG_IRDA_DEBUG is not set | ||
392 | |||
393 | # | ||
394 | # Infrared-port device drivers | ||
395 | # | ||
396 | |||
397 | # | ||
398 | # SIR device drivers | ||
399 | # | ||
400 | CONFIG_IRTTY_SIR=m | ||
401 | |||
402 | # | ||
403 | # Dongle support | ||
404 | # | ||
405 | # CONFIG_DONGLE is not set | ||
406 | |||
407 | # | ||
408 | # Old SIR device drivers | ||
409 | # | ||
410 | # CONFIG_IRPORT_SIR is not set | ||
411 | |||
412 | # | ||
413 | # Old Serial dongle support | ||
414 | # | ||
415 | |||
416 | # | ||
417 | # FIR device drivers | ||
418 | # | ||
419 | # CONFIG_USB_IRDA is not set | ||
420 | # CONFIG_SIGMATEL_FIR is not set | ||
421 | # CONFIG_NSC_FIR is not set | ||
422 | # CONFIG_WINBOND_FIR is not set | ||
423 | # CONFIG_TOSHIBA_FIR is not set | ||
424 | # CONFIG_SMC_IRCC_FIR is not set | ||
425 | # CONFIG_ALI_FIR is not set | ||
426 | # CONFIG_VLSI_FIR is not set | ||
427 | # CONFIG_VIA_FIR is not set | ||
428 | CONFIG_BT=m | ||
429 | CONFIG_BT_L2CAP=m | ||
430 | CONFIG_BT_SCO=m | ||
431 | CONFIG_BT_RFCOMM=m | ||
432 | CONFIG_BT_RFCOMM_TTY=y | ||
433 | CONFIG_BT_BNEP=m | ||
434 | CONFIG_BT_BNEP_MC_FILTER=y | ||
435 | CONFIG_BT_BNEP_PROTO_FILTER=y | ||
436 | CONFIG_BT_HIDP=m | ||
437 | |||
438 | # | ||
439 | # Bluetooth device drivers | ||
440 | # | ||
441 | CONFIG_BT_HCIUSB=m | ||
442 | # CONFIG_BT_HCIUSB_SCO is not set | ||
443 | # CONFIG_BT_HCIUART is not set | ||
444 | CONFIG_BT_HCIBCM203X=m | ||
445 | # CONFIG_BT_HCIBPA10X is not set | ||
446 | CONFIG_BT_HCIBFUSB=m | ||
447 | # CONFIG_BT_HCIDTL1 is not set | ||
448 | # CONFIG_BT_HCIBT3C is not set | ||
449 | # CONFIG_BT_HCIBLUECARD is not set | ||
450 | # CONFIG_BT_HCIBTUART is not set | ||
451 | # CONFIG_BT_HCIVHCI is not set | ||
452 | CONFIG_IEEE80211=m | ||
453 | # CONFIG_IEEE80211_DEBUG is not set | ||
454 | CONFIG_IEEE80211_CRYPT_WEP=m | ||
455 | CONFIG_IEEE80211_CRYPT_CCMP=m | ||
456 | CONFIG_IEEE80211_CRYPT_TKIP=m | ||
457 | |||
458 | # | ||
459 | # Device Drivers | ||
460 | # | ||
461 | |||
462 | # | ||
463 | # Generic Driver Options | ||
464 | # | ||
465 | # CONFIG_STANDALONE is not set | ||
466 | CONFIG_PREVENT_FIRMWARE_BUILD=y | ||
467 | CONFIG_FW_LOADER=m | ||
468 | # CONFIG_DEBUG_DRIVER is not set | ||
469 | |||
470 | # | ||
471 | # Connector - unified userspace <-> kernelspace linker | ||
472 | # | ||
473 | CONFIG_CONNECTOR=y | ||
474 | CONFIG_PROC_EVENTS=y | ||
475 | |||
476 | # | ||
477 | # Memory Technology Devices (MTD) | ||
478 | # | ||
479 | # CONFIG_MTD is not set | ||
480 | |||
481 | # | ||
482 | # Parallel port support | ||
483 | # | ||
484 | # CONFIG_PARPORT is not set | ||
485 | |||
486 | # | ||
487 | # Plug and Play support | ||
488 | # | ||
489 | |||
490 | # | ||
491 | # Block devices | ||
492 | # | ||
493 | # CONFIG_BLK_DEV_FD is not set | ||
494 | CONFIG_MAC_FLOPPY=y | ||
495 | # CONFIG_BLK_CPQ_DA is not set | ||
496 | # CONFIG_BLK_CPQ_CISS_DA is not set | ||
497 | # CONFIG_BLK_DEV_DAC960 is not set | ||
498 | # CONFIG_BLK_DEV_UMEM is not set | ||
499 | # CONFIG_BLK_DEV_COW_COMMON is not set | ||
500 | CONFIG_BLK_DEV_LOOP=y | ||
501 | # CONFIG_BLK_DEV_CRYPTOLOOP is not set | ||
502 | # CONFIG_BLK_DEV_NBD is not set | ||
503 | # CONFIG_BLK_DEV_SX8 is not set | ||
504 | CONFIG_BLK_DEV_UB=m | ||
505 | CONFIG_BLK_DEV_RAM=y | ||
506 | CONFIG_BLK_DEV_RAM_COUNT=16 | ||
507 | CONFIG_BLK_DEV_RAM_SIZE=4096 | ||
508 | CONFIG_BLK_DEV_INITRD=y | ||
509 | # CONFIG_CDROM_PKTCDVD is not set | ||
510 | # CONFIG_ATA_OVER_ETH is not set | ||
511 | |||
512 | # | ||
513 | # ATA/ATAPI/MFM/RLL support | ||
514 | # | ||
515 | CONFIG_IDE=y | ||
516 | CONFIG_BLK_DEV_IDE=y | ||
517 | |||
518 | # | ||
519 | # Please see Documentation/ide.txt for help/info on IDE drives | ||
520 | # | ||
521 | # CONFIG_BLK_DEV_IDE_SATA is not set | ||
522 | CONFIG_BLK_DEV_IDEDISK=y | ||
523 | # CONFIG_IDEDISK_MULTI_MODE is not set | ||
524 | CONFIG_BLK_DEV_IDECS=m | ||
525 | CONFIG_BLK_DEV_IDECD=y | ||
526 | # CONFIG_BLK_DEV_IDETAPE is not set | ||
527 | CONFIG_BLK_DEV_IDEFLOPPY=y | ||
528 | CONFIG_BLK_DEV_IDESCSI=y | ||
529 | # CONFIG_IDE_TASK_IOCTL is not set | ||
530 | |||
531 | # | ||
532 | # IDE chipset support/bugfixes | ||
533 | # | ||
534 | # CONFIG_IDE_GENERIC is not set | ||
535 | CONFIG_BLK_DEV_IDEPCI=y | ||
536 | CONFIG_IDEPCI_SHARE_IRQ=y | ||
537 | # CONFIG_BLK_DEV_OFFBOARD is not set | ||
538 | CONFIG_BLK_DEV_GENERIC=y | ||
539 | # CONFIG_BLK_DEV_OPTI621 is not set | ||
540 | CONFIG_BLK_DEV_SL82C105=y | ||
541 | CONFIG_BLK_DEV_IDEDMA_PCI=y | ||
542 | # CONFIG_BLK_DEV_IDEDMA_FORCED is not set | ||
543 | CONFIG_IDEDMA_PCI_AUTO=y | ||
544 | # CONFIG_IDEDMA_ONLYDISK is not set | ||
545 | # CONFIG_BLK_DEV_AEC62XX is not set | ||
546 | # CONFIG_BLK_DEV_ALI15X3 is not set | ||
547 | # CONFIG_BLK_DEV_AMD74XX is not set | ||
548 | # CONFIG_BLK_DEV_CMD64X is not set | ||
549 | # CONFIG_BLK_DEV_TRIFLEX is not set | ||
550 | # CONFIG_BLK_DEV_CY82C693 is not set | ||
551 | # CONFIG_BLK_DEV_CS5520 is not set | ||
552 | # CONFIG_BLK_DEV_CS5530 is not set | ||
553 | # CONFIG_BLK_DEV_HPT34X is not set | ||
554 | # CONFIG_BLK_DEV_HPT366 is not set | ||
555 | # CONFIG_BLK_DEV_SC1200 is not set | ||
556 | # CONFIG_BLK_DEV_PIIX is not set | ||
557 | # CONFIG_BLK_DEV_IT821X is not set | ||
558 | # CONFIG_BLK_DEV_NS87415 is not set | ||
559 | # CONFIG_BLK_DEV_PDC202XX_OLD is not set | ||
560 | CONFIG_BLK_DEV_PDC202XX_NEW=y | ||
561 | # CONFIG_PDC202XX_FORCE is not set | ||
562 | # CONFIG_BLK_DEV_SVWKS is not set | ||
563 | # CONFIG_BLK_DEV_SIIMAGE is not set | ||
564 | # CONFIG_BLK_DEV_SLC90E66 is not set | ||
565 | # CONFIG_BLK_DEV_TRM290 is not set | ||
566 | # CONFIG_BLK_DEV_VIA82CXXX is not set | ||
567 | CONFIG_BLK_DEV_IDE_PMAC=y | ||
568 | CONFIG_BLK_DEV_IDE_PMAC_ATA100FIRST=y | ||
569 | CONFIG_BLK_DEV_IDEDMA_PMAC=y | ||
570 | CONFIG_BLK_DEV_IDE_PMAC_BLINK=y | ||
571 | # CONFIG_IDE_ARM is not set | ||
572 | CONFIG_BLK_DEV_IDEDMA=y | ||
573 | # CONFIG_IDEDMA_IVB is not set | ||
574 | CONFIG_IDEDMA_AUTO=y | ||
575 | # CONFIG_BLK_DEV_HD is not set | ||
576 | |||
577 | # | ||
578 | # SCSI device support | ||
579 | # | ||
580 | # CONFIG_RAID_ATTRS is not set | ||
581 | CONFIG_SCSI=y | ||
582 | CONFIG_SCSI_PROC_FS=y | ||
583 | |||
584 | # | ||
585 | # SCSI support type (disk, tape, CD-ROM) | ||
586 | # | ||
587 | CONFIG_BLK_DEV_SD=y | ||
588 | CONFIG_CHR_DEV_ST=y | ||
589 | # CONFIG_CHR_DEV_OSST is not set | ||
590 | CONFIG_BLK_DEV_SR=y | ||
591 | CONFIG_BLK_DEV_SR_VENDOR=y | ||
592 | CONFIG_CHR_DEV_SG=y | ||
593 | # CONFIG_CHR_DEV_SCH is not set | ||
594 | |||
595 | # | ||
596 | # Some SCSI devices (e.g. CD jukebox) support multiple LUNs | ||
597 | # | ||
598 | # CONFIG_SCSI_MULTI_LUN is not set | ||
599 | CONFIG_SCSI_CONSTANTS=y | ||
600 | # CONFIG_SCSI_LOGGING is not set | ||
601 | |||
602 | # | ||
603 | # SCSI Transport Attributes | ||
604 | # | ||
605 | CONFIG_SCSI_SPI_ATTRS=y | ||
606 | # CONFIG_SCSI_FC_ATTRS is not set | ||
607 | # CONFIG_SCSI_ISCSI_ATTRS is not set | ||
608 | # CONFIG_SCSI_SAS_ATTRS is not set | ||
609 | |||
610 | # | ||
611 | # SCSI low-level drivers | ||
612 | # | ||
613 | # CONFIG_ISCSI_TCP is not set | ||
614 | # CONFIG_BLK_DEV_3W_XXXX_RAID is not set | ||
615 | # CONFIG_SCSI_3W_9XXX is not set | ||
616 | # CONFIG_SCSI_ACARD is not set | ||
617 | # CONFIG_SCSI_AACRAID is not set | ||
618 | CONFIG_SCSI_AIC7XXX=m | ||
619 | CONFIG_AIC7XXX_CMDS_PER_DEVICE=253 | ||
620 | CONFIG_AIC7XXX_RESET_DELAY_MS=15000 | ||
621 | CONFIG_AIC7XXX_DEBUG_ENABLE=y | ||
622 | CONFIG_AIC7XXX_DEBUG_MASK=0 | ||
623 | CONFIG_AIC7XXX_REG_PRETTY_PRINT=y | ||
624 | CONFIG_SCSI_AIC7XXX_OLD=m | ||
625 | # CONFIG_SCSI_AIC79XX is not set | ||
626 | # CONFIG_SCSI_DPT_I2O is not set | ||
627 | # CONFIG_MEGARAID_NEWGEN is not set | ||
628 | # CONFIG_MEGARAID_LEGACY is not set | ||
629 | # CONFIG_MEGARAID_SAS is not set | ||
630 | # CONFIG_SCSI_SATA is not set | ||
631 | # CONFIG_SCSI_BUSLOGIC is not set | ||
632 | # CONFIG_SCSI_DMX3191D is not set | ||
633 | # CONFIG_SCSI_EATA is not set | ||
634 | # CONFIG_SCSI_FUTURE_DOMAIN is not set | ||
635 | # CONFIG_SCSI_GDTH is not set | ||
636 | # CONFIG_SCSI_IPS is not set | ||
637 | # CONFIG_SCSI_INITIO is not set | ||
638 | # CONFIG_SCSI_INIA100 is not set | ||
639 | CONFIG_SCSI_SYM53C8XX_2=y | ||
640 | CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=0 | ||
641 | CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 | ||
642 | CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 | ||
643 | # CONFIG_SCSI_SYM53C8XX_IOMAPPED is not set | ||
644 | # CONFIG_SCSI_IPR is not set | ||
645 | # CONFIG_SCSI_QLOGIC_FC is not set | ||
646 | # CONFIG_SCSI_QLOGIC_1280 is not set | ||
647 | CONFIG_SCSI_QLA2XXX=y | ||
648 | # CONFIG_SCSI_QLA21XX is not set | ||
649 | # CONFIG_SCSI_QLA22XX is not set | ||
650 | # CONFIG_SCSI_QLA2300 is not set | ||
651 | # CONFIG_SCSI_QLA2322 is not set | ||
652 | # CONFIG_SCSI_QLA6312 is not set | ||
653 | # CONFIG_SCSI_QLA24XX is not set | ||
654 | # CONFIG_SCSI_LPFC is not set | ||
655 | # CONFIG_SCSI_DC395x is not set | ||
656 | # CONFIG_SCSI_DC390T is not set | ||
657 | # CONFIG_SCSI_NSP32 is not set | ||
658 | # CONFIG_SCSI_DEBUG is not set | ||
659 | CONFIG_SCSI_MESH=y | ||
660 | CONFIG_SCSI_MESH_SYNC_RATE=5 | ||
661 | CONFIG_SCSI_MESH_RESET_DELAY_MS=1000 | ||
662 | CONFIG_SCSI_MAC53C94=y | ||
663 | |||
664 | # | ||
665 | # PCMCIA SCSI adapter support | ||
666 | # | ||
667 | # CONFIG_PCMCIA_AHA152X is not set | ||
668 | # CONFIG_PCMCIA_FDOMAIN is not set | ||
669 | # CONFIG_PCMCIA_NINJA_SCSI is not set | ||
670 | # CONFIG_PCMCIA_QLOGIC is not set | ||
671 | # CONFIG_PCMCIA_SYM53C500 is not set | ||
672 | |||
673 | # | ||
674 | # Multi-device support (RAID and LVM) | ||
675 | # | ||
676 | CONFIG_MD=y | ||
677 | CONFIG_BLK_DEV_MD=m | ||
678 | CONFIG_MD_LINEAR=m | ||
679 | CONFIG_MD_RAID0=m | ||
680 | CONFIG_MD_RAID1=m | ||
681 | # CONFIG_MD_RAID10 is not set | ||
682 | CONFIG_MD_RAID5=m | ||
683 | CONFIG_MD_RAID6=m | ||
684 | CONFIG_MD_MULTIPATH=m | ||
685 | CONFIG_MD_FAULTY=m | ||
686 | CONFIG_BLK_DEV_DM=m | ||
687 | CONFIG_DM_CRYPT=m | ||
688 | # CONFIG_DM_SNAPSHOT is not set | ||
689 | # CONFIG_DM_MIRROR is not set | ||
690 | # CONFIG_DM_ZERO is not set | ||
691 | # CONFIG_DM_MULTIPATH is not set | ||
692 | |||
693 | # | ||
694 | # Fusion MPT device support | ||
695 | # | ||
696 | # CONFIG_FUSION is not set | ||
697 | # CONFIG_FUSION_SPI is not set | ||
698 | # CONFIG_FUSION_FC is not set | ||
699 | # CONFIG_FUSION_SAS is not set | ||
700 | |||
701 | # | ||
702 | # IEEE 1394 (FireWire) support | ||
703 | # | ||
704 | CONFIG_IEEE1394=m | ||
705 | |||
706 | # | ||
707 | # Subsystem Options | ||
708 | # | ||
709 | # CONFIG_IEEE1394_VERBOSEDEBUG is not set | ||
710 | # CONFIG_IEEE1394_OUI_DB is not set | ||
711 | CONFIG_IEEE1394_EXTRA_CONFIG_ROMS=y | ||
712 | CONFIG_IEEE1394_CONFIG_ROM_IP1394=y | ||
713 | # CONFIG_IEEE1394_EXPORT_FULL_API is not set | ||
714 | |||
715 | # | ||
716 | # Device Drivers | ||
717 | # | ||
718 | # CONFIG_IEEE1394_PCILYNX is not set | ||
719 | CONFIG_IEEE1394_OHCI1394=m | ||
720 | |||
721 | # | ||
722 | # Protocol Drivers | ||
723 | # | ||
724 | CONFIG_IEEE1394_VIDEO1394=m | ||
725 | CONFIG_IEEE1394_SBP2=m | ||
726 | # CONFIG_IEEE1394_SBP2_PHYS_DMA is not set | ||
727 | CONFIG_IEEE1394_ETH1394=m | ||
728 | CONFIG_IEEE1394_DV1394=m | ||
729 | CONFIG_IEEE1394_RAWIO=m | ||
730 | # CONFIG_IEEE1394_CMP is not set | ||
731 | |||
732 | # | ||
733 | # I2O device support | ||
734 | # | ||
735 | # CONFIG_I2O is not set | ||
736 | |||
737 | # | ||
738 | # Macintosh device drivers | ||
739 | # | ||
740 | CONFIG_ADB=y | ||
741 | CONFIG_ADB_CUDA=y | ||
742 | CONFIG_ADB_PMU=y | ||
743 | CONFIG_PMAC_APM_EMU=y | ||
744 | CONFIG_PMAC_MEDIABAY=y | ||
745 | CONFIG_PMAC_BACKLIGHT=y | ||
746 | CONFIG_INPUT_ADBHID=y | ||
747 | CONFIG_MAC_EMUMOUSEBTN=y | ||
748 | CONFIG_THERM_WINDTUNNEL=m | ||
749 | CONFIG_THERM_ADT746X=m | ||
750 | # CONFIG_WINDFARM is not set | ||
751 | # CONFIG_ANSLCD is not set | ||
752 | |||
753 | # | ||
754 | # Network device support | ||
755 | # | ||
756 | CONFIG_NETDEVICES=y | ||
757 | # CONFIG_DUMMY is not set | ||
758 | # CONFIG_BONDING is not set | ||
759 | # CONFIG_EQUALIZER is not set | ||
760 | # CONFIG_TUN is not set | ||
761 | |||
762 | # | ||
763 | # ARCnet devices | ||
764 | # | ||
765 | # CONFIG_ARCNET is not set | ||
766 | |||
767 | # | ||
768 | # PHY device support | ||
769 | # | ||
770 | # CONFIG_PHYLIB is not set | ||
771 | |||
772 | # | ||
773 | # Ethernet (10 or 100Mbit) | ||
774 | # | ||
775 | CONFIG_NET_ETHERNET=y | ||
776 | CONFIG_MII=y | ||
777 | CONFIG_MACE=y | ||
778 | # CONFIG_MACE_AAUI_PORT is not set | ||
779 | CONFIG_BMAC=y | ||
780 | # CONFIG_HAPPYMEAL is not set | ||
781 | CONFIG_SUNGEM=y | ||
782 | # CONFIG_CASSINI is not set | ||
783 | # CONFIG_NET_VENDOR_3COM is not set | ||
784 | |||
785 | # | ||
786 | # Tulip family network device support | ||
787 | # | ||
788 | # CONFIG_NET_TULIP is not set | ||
789 | # CONFIG_HP100 is not set | ||
790 | CONFIG_NET_PCI=y | ||
791 | CONFIG_PCNET32=y | ||
792 | # CONFIG_AMD8111_ETH is not set | ||
793 | # CONFIG_ADAPTEC_STARFIRE is not set | ||
794 | # CONFIG_B44 is not set | ||
795 | # CONFIG_FORCEDETH is not set | ||
796 | # CONFIG_DGRS is not set | ||
797 | # CONFIG_EEPRO100 is not set | ||
798 | # CONFIG_E100 is not set | ||
799 | # CONFIG_FEALNX is not set | ||
800 | # CONFIG_NATSEMI is not set | ||
801 | # CONFIG_NE2K_PCI is not set | ||
802 | # CONFIG_8139CP is not set | ||
803 | # CONFIG_8139TOO is not set | ||
804 | # CONFIG_SIS900 is not set | ||
805 | # CONFIG_EPIC100 is not set | ||
806 | # CONFIG_SUNDANCE is not set | ||
807 | # CONFIG_TLAN is not set | ||
808 | # CONFIG_VIA_RHINE is not set | ||
809 | |||
810 | # | ||
811 | # Ethernet (1000 Mbit) | ||
812 | # | ||
813 | # CONFIG_ACENIC is not set | ||
814 | # CONFIG_DL2K is not set | ||
815 | # CONFIG_E1000 is not set | ||
816 | # CONFIG_NS83820 is not set | ||
817 | # CONFIG_HAMACHI is not set | ||
818 | # CONFIG_YELLOWFIN is not set | ||
819 | # CONFIG_R8169 is not set | ||
820 | # CONFIG_SIS190 is not set | ||
821 | # CONFIG_SKGE is not set | ||
822 | # CONFIG_SK98LIN is not set | ||
823 | # CONFIG_VIA_VELOCITY is not set | ||
824 | # CONFIG_TIGON3 is not set | ||
825 | # CONFIG_BNX2 is not set | ||
826 | # CONFIG_MV643XX_ETH is not set | ||
827 | |||
828 | # | ||
829 | # Ethernet (10000 Mbit) | ||
830 | # | ||
831 | # CONFIG_CHELSIO_T1 is not set | ||
832 | # CONFIG_IXGB is not set | ||
833 | # CONFIG_S2IO is not set | ||
834 | |||
835 | # | ||
836 | # Token Ring devices | ||
837 | # | ||
838 | # CONFIG_TR is not set | ||
839 | |||
840 | # | ||
841 | # Wireless LAN (non-hamradio) | ||
842 | # | ||
843 | CONFIG_NET_RADIO=y | ||
844 | |||
845 | # | ||
846 | # Obsolete Wireless cards support (pre-802.11) | ||
847 | # | ||
848 | # CONFIG_STRIP is not set | ||
849 | # CONFIG_PCMCIA_WAVELAN is not set | ||
850 | # CONFIG_PCMCIA_NETWAVE is not set | ||
851 | |||
852 | # | ||
853 | # Wireless 802.11 Frequency Hopping cards support | ||
854 | # | ||
855 | # CONFIG_PCMCIA_RAYCS is not set | ||
856 | |||
857 | # | ||
858 | # Wireless 802.11b ISA/PCI cards support | ||
859 | # | ||
860 | # CONFIG_IPW2100 is not set | ||
861 | # CONFIG_IPW2200 is not set | ||
862 | # CONFIG_AIRO is not set | ||
863 | CONFIG_HERMES=m | ||
864 | CONFIG_APPLE_AIRPORT=m | ||
865 | # CONFIG_PLX_HERMES is not set | ||
866 | # CONFIG_TMD_HERMES is not set | ||
867 | # CONFIG_NORTEL_HERMES is not set | ||
868 | # CONFIG_PCI_HERMES is not set | ||
869 | # CONFIG_ATMEL is not set | ||
870 | |||
871 | # | ||
872 | # Wireless 802.11b Pcmcia/Cardbus cards support | ||
873 | # | ||
874 | # CONFIG_PCMCIA_HERMES is not set | ||
875 | # CONFIG_PCMCIA_SPECTRUM is not set | ||
876 | # CONFIG_AIRO_CS is not set | ||
877 | # CONFIG_PCMCIA_WL3501 is not set | ||
878 | |||
879 | # | ||
880 | # Prism GT/Duette 802.11(a/b/g) PCI/Cardbus support | ||
881 | # | ||
882 | CONFIG_PRISM54=m | ||
883 | # CONFIG_HOSTAP is not set | ||
884 | CONFIG_NET_WIRELESS=y | ||
885 | |||
886 | # | ||
887 | # PCMCIA network device support | ||
888 | # | ||
889 | # CONFIG_NET_PCMCIA is not set | ||
890 | |||
891 | # | ||
892 | # Wan interfaces | ||
893 | # | ||
894 | # CONFIG_WAN is not set | ||
895 | # CONFIG_FDDI is not set | ||
896 | # CONFIG_HIPPI is not set | ||
897 | CONFIG_PPP=y | ||
898 | CONFIG_PPP_MULTILINK=y | ||
899 | # CONFIG_PPP_FILTER is not set | ||
900 | CONFIG_PPP_ASYNC=y | ||
901 | CONFIG_PPP_SYNC_TTY=m | ||
902 | CONFIG_PPP_DEFLATE=y | ||
903 | CONFIG_PPP_BSDCOMP=m | ||
904 | # CONFIG_PPP_MPPE is not set | ||
905 | # CONFIG_PPPOE is not set | ||
906 | # CONFIG_SLIP is not set | ||
907 | # CONFIG_NET_FC is not set | ||
908 | # CONFIG_SHAPER is not set | ||
909 | # CONFIG_NETCONSOLE is not set | ||
910 | # CONFIG_NETPOLL is not set | ||
911 | # CONFIG_NET_POLL_CONTROLLER is not set | ||
912 | |||
913 | # | ||
914 | # ISDN subsystem | ||
915 | # | ||
916 | # CONFIG_ISDN is not set | ||
917 | |||
918 | # | ||
919 | # Telephony Support | ||
920 | # | ||
921 | # CONFIG_PHONE is not set | ||
922 | |||
923 | # | ||
924 | # Input device support | ||
925 | # | ||
926 | CONFIG_INPUT=y | ||
927 | |||
928 | # | ||
929 | # Userland interfaces | ||
930 | # | ||
931 | CONFIG_INPUT_MOUSEDEV=y | ||
932 | CONFIG_INPUT_MOUSEDEV_PSAUX=y | ||
933 | CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 | ||
934 | CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 | ||
935 | # CONFIG_INPUT_JOYDEV is not set | ||
936 | # CONFIG_INPUT_TSDEV is not set | ||
937 | CONFIG_INPUT_EVDEV=y | ||
938 | # CONFIG_INPUT_EVBUG is not set | ||
939 | |||
940 | # | ||
941 | # Input Device Drivers | ||
942 | # | ||
943 | CONFIG_INPUT_KEYBOARD=y | ||
944 | # CONFIG_KEYBOARD_ATKBD is not set | ||
945 | # CONFIG_KEYBOARD_SUNKBD is not set | ||
946 | # CONFIG_KEYBOARD_LKKBD is not set | ||
947 | # CONFIG_KEYBOARD_XTKBD is not set | ||
948 | # CONFIG_KEYBOARD_NEWTON is not set | ||
949 | CONFIG_INPUT_MOUSE=y | ||
950 | # CONFIG_MOUSE_PS2 is not set | ||
951 | # CONFIG_MOUSE_SERIAL is not set | ||
952 | # CONFIG_MOUSE_VSXXXAA is not set | ||
953 | # CONFIG_INPUT_JOYSTICK is not set | ||
954 | # CONFIG_INPUT_TOUCHSCREEN is not set | ||
955 | # CONFIG_INPUT_MISC is not set | ||
956 | |||
957 | # | ||
958 | # Hardware I/O ports | ||
959 | # | ||
960 | CONFIG_SERIO=y | ||
961 | # CONFIG_SERIO_I8042 is not set | ||
962 | # CONFIG_SERIO_SERPORT is not set | ||
963 | # CONFIG_SERIO_PCIPS2 is not set | ||
964 | # CONFIG_SERIO_RAW is not set | ||
965 | # CONFIG_GAMEPORT is not set | ||
966 | |||
967 | # | ||
968 | # Character devices | ||
969 | # | ||
970 | CONFIG_VT=y | ||
971 | CONFIG_VT_CONSOLE=y | ||
972 | CONFIG_HW_CONSOLE=y | ||
973 | # CONFIG_SERIAL_NONSTANDARD is not set | ||
974 | |||
975 | # | ||
976 | # Serial drivers | ||
977 | # | ||
978 | CONFIG_SERIAL_8250=m | ||
979 | # CONFIG_SERIAL_8250_CS is not set | ||
980 | CONFIG_SERIAL_8250_NR_UARTS=4 | ||
981 | # CONFIG_SERIAL_8250_EXTENDED is not set | ||
982 | |||
983 | # | ||
984 | # Non-8250 serial port support | ||
985 | # | ||
986 | CONFIG_SERIAL_CORE=m | ||
987 | # CONFIG_SERIAL_PMACZILOG is not set | ||
988 | # CONFIG_SERIAL_JSM is not set | ||
989 | CONFIG_UNIX98_PTYS=y | ||
990 | CONFIG_LEGACY_PTYS=y | ||
991 | CONFIG_LEGACY_PTY_COUNT=256 | ||
992 | |||
993 | # | ||
994 | # IPMI | ||
995 | # | ||
996 | # CONFIG_IPMI_HANDLER is not set | ||
997 | |||
998 | # | ||
999 | # Watchdog Cards | ||
1000 | # | ||
1001 | # CONFIG_WATCHDOG is not set | ||
1002 | CONFIG_NVRAM=y | ||
1003 | CONFIG_GEN_RTC=y | ||
1004 | # CONFIG_GEN_RTC_X is not set | ||
1005 | # CONFIG_DTLK is not set | ||
1006 | # CONFIG_R3964 is not set | ||
1007 | # CONFIG_APPLICOM is not set | ||
1008 | |||
1009 | # | ||
1010 | # Ftape, the floppy tape device driver | ||
1011 | # | ||
1012 | CONFIG_AGP=m | ||
1013 | CONFIG_AGP_UNINORTH=m | ||
1014 | CONFIG_DRM=m | ||
1015 | # CONFIG_DRM_TDFX is not set | ||
1016 | CONFIG_DRM_R128=m | ||
1017 | CONFIG_DRM_RADEON=m | ||
1018 | # CONFIG_DRM_MGA is not set | ||
1019 | # CONFIG_DRM_SIS is not set | ||
1020 | # CONFIG_DRM_VIA is not set | ||
1021 | # CONFIG_DRM_SAVAGE is not set | ||
1022 | |||
1023 | # | ||
1024 | # PCMCIA character devices | ||
1025 | # | ||
1026 | # CONFIG_SYNCLINK_CS is not set | ||
1027 | # CONFIG_CARDMAN_4000 is not set | ||
1028 | # CONFIG_CARDMAN_4040 is not set | ||
1029 | # CONFIG_RAW_DRIVER is not set | ||
1030 | |||
1031 | # | ||
1032 | # TPM devices | ||
1033 | # | ||
1034 | # CONFIG_TCG_TPM is not set | ||
1035 | # CONFIG_TELCLOCK is not set | ||
1036 | |||
1037 | # | ||
1038 | # I2C support | ||
1039 | # | ||
1040 | CONFIG_I2C=y | ||
1041 | CONFIG_I2C_CHARDEV=m | ||
1042 | |||
1043 | # | ||
1044 | # I2C Algorithms | ||
1045 | # | ||
1046 | CONFIG_I2C_ALGOBIT=y | ||
1047 | # CONFIG_I2C_ALGOPCF is not set | ||
1048 | # CONFIG_I2C_ALGOPCA is not set | ||
1049 | |||
1050 | # | ||
1051 | # I2C Hardware Bus support | ||
1052 | # | ||
1053 | # CONFIG_I2C_ALI1535 is not set | ||
1054 | # CONFIG_I2C_ALI1563 is not set | ||
1055 | # CONFIG_I2C_ALI15X3 is not set | ||
1056 | # CONFIG_I2C_AMD756 is not set | ||
1057 | # CONFIG_I2C_AMD8111 is not set | ||
1058 | # CONFIG_I2C_I801 is not set | ||
1059 | # CONFIG_I2C_I810 is not set | ||
1060 | # CONFIG_I2C_PIIX4 is not set | ||
1061 | CONFIG_I2C_KEYWEST=m | ||
1062 | # CONFIG_I2C_MPC is not set | ||
1063 | # CONFIG_I2C_NFORCE2 is not set | ||
1064 | # CONFIG_I2C_PARPORT_LIGHT is not set | ||
1065 | # CONFIG_I2C_PROSAVAGE is not set | ||
1066 | # CONFIG_I2C_SAVAGE4 is not set | ||
1067 | # CONFIG_SCx200_ACB is not set | ||
1068 | # CONFIG_I2C_SIS5595 is not set | ||
1069 | # CONFIG_I2C_SIS630 is not set | ||
1070 | # CONFIG_I2C_SIS96X is not set | ||
1071 | # CONFIG_I2C_STUB is not set | ||
1072 | # CONFIG_I2C_VIA is not set | ||
1073 | # CONFIG_I2C_VIAPRO is not set | ||
1074 | # CONFIG_I2C_VOODOO3 is not set | ||
1075 | # CONFIG_I2C_PCA_ISA is not set | ||
1076 | |||
1077 | # | ||
1078 | # Miscellaneous I2C Chip support | ||
1079 | # | ||
1080 | # CONFIG_SENSORS_DS1337 is not set | ||
1081 | # CONFIG_SENSORS_DS1374 is not set | ||
1082 | # CONFIG_SENSORS_EEPROM is not set | ||
1083 | # CONFIG_SENSORS_PCF8574 is not set | ||
1084 | # CONFIG_SENSORS_PCA9539 is not set | ||
1085 | # CONFIG_SENSORS_PCF8591 is not set | ||
1086 | # CONFIG_SENSORS_RTC8564 is not set | ||
1087 | # CONFIG_SENSORS_M41T00 is not set | ||
1088 | # CONFIG_SENSORS_MAX6875 is not set | ||
1089 | # CONFIG_RTC_X1205_I2C is not set | ||
1090 | # CONFIG_I2C_DEBUG_CORE is not set | ||
1091 | # CONFIG_I2C_DEBUG_ALGO is not set | ||
1092 | # CONFIG_I2C_DEBUG_BUS is not set | ||
1093 | # CONFIG_I2C_DEBUG_CHIP is not set | ||
1094 | |||
1095 | # | ||
1096 | # Dallas's 1-wire bus | ||
1097 | # | ||
1098 | # CONFIG_W1 is not set | ||
1099 | |||
1100 | # | ||
1101 | # Hardware Monitoring support | ||
1102 | # | ||
1103 | # CONFIG_HWMON is not set | ||
1104 | # CONFIG_HWMON_VID is not set | ||
1105 | |||
1106 | # | ||
1107 | # Misc devices | ||
1108 | # | ||
1109 | |||
1110 | # | ||
1111 | # Multimedia Capabilities Port drivers | ||
1112 | # | ||
1113 | |||
1114 | # | ||
1115 | # Multimedia devices | ||
1116 | # | ||
1117 | # CONFIG_VIDEO_DEV is not set | ||
1118 | |||
1119 | # | ||
1120 | # Digital Video Broadcasting Devices | ||
1121 | # | ||
1122 | # CONFIG_DVB is not set | ||
1123 | |||
1124 | # | ||
1125 | # Graphics support | ||
1126 | # | ||
1127 | CONFIG_FB=y | ||
1128 | CONFIG_FB_CFB_FILLRECT=y | ||
1129 | CONFIG_FB_CFB_COPYAREA=y | ||
1130 | CONFIG_FB_CFB_IMAGEBLIT=y | ||
1131 | CONFIG_FB_MACMODES=y | ||
1132 | CONFIG_FB_MODE_HELPERS=y | ||
1133 | CONFIG_FB_TILEBLITTING=y | ||
1134 | # CONFIG_FB_CIRRUS is not set | ||
1135 | # CONFIG_FB_PM2 is not set | ||
1136 | # CONFIG_FB_CYBER2000 is not set | ||
1137 | CONFIG_FB_OF=y | ||
1138 | CONFIG_FB_CONTROL=y | ||
1139 | CONFIG_FB_PLATINUM=y | ||
1140 | CONFIG_FB_VALKYRIE=y | ||
1141 | CONFIG_FB_CT65550=y | ||
1142 | # CONFIG_FB_ASILIANT is not set | ||
1143 | CONFIG_FB_IMSTT=y | ||
1144 | # CONFIG_FB_VGA16 is not set | ||
1145 | # CONFIG_FB_S1D13XXX is not set | ||
1146 | CONFIG_FB_NVIDIA=y | ||
1147 | CONFIG_FB_NVIDIA_I2C=y | ||
1148 | # CONFIG_FB_RIVA is not set | ||
1149 | CONFIG_FB_MATROX=y | ||
1150 | CONFIG_FB_MATROX_MILLENIUM=y | ||
1151 | CONFIG_FB_MATROX_MYSTIQUE=y | ||
1152 | # CONFIG_FB_MATROX_G is not set | ||
1153 | # CONFIG_FB_MATROX_I2C is not set | ||
1154 | # CONFIG_FB_MATROX_MULTIHEAD is not set | ||
1155 | # CONFIG_FB_RADEON_OLD is not set | ||
1156 | CONFIG_FB_RADEON=y | ||
1157 | CONFIG_FB_RADEON_I2C=y | ||
1158 | # CONFIG_FB_RADEON_DEBUG is not set | ||
1159 | CONFIG_FB_ATY128=y | ||
1160 | CONFIG_FB_ATY=y | ||
1161 | CONFIG_FB_ATY_CT=y | ||
1162 | # CONFIG_FB_ATY_GENERIC_LCD is not set | ||
1163 | # CONFIG_FB_ATY_XL_INIT is not set | ||
1164 | CONFIG_FB_ATY_GX=y | ||
1165 | # CONFIG_FB_SAVAGE is not set | ||
1166 | # CONFIG_FB_SIS is not set | ||
1167 | # CONFIG_FB_NEOMAGIC is not set | ||
1168 | # CONFIG_FB_KYRO is not set | ||
1169 | CONFIG_FB_3DFX=y | ||
1170 | # CONFIG_FB_3DFX_ACCEL is not set | ||
1171 | # CONFIG_FB_VOODOO1 is not set | ||
1172 | # CONFIG_FB_CYBLA is not set | ||
1173 | # CONFIG_FB_TRIDENT is not set | ||
1174 | # CONFIG_FB_VIRTUAL is not set | ||
1175 | |||
1176 | # | ||
1177 | # Console display driver support | ||
1178 | # | ||
1179 | # CONFIG_VGA_CONSOLE is not set | ||
1180 | CONFIG_DUMMY_CONSOLE=y | ||
1181 | CONFIG_FRAMEBUFFER_CONSOLE=y | ||
1182 | # CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set | ||
1183 | # CONFIG_FONTS is not set | ||
1184 | CONFIG_FONT_8x8=y | ||
1185 | CONFIG_FONT_8x16=y | ||
1186 | |||
1187 | # | ||
1188 | # Logo configuration | ||
1189 | # | ||
1190 | CONFIG_LOGO=y | ||
1191 | CONFIG_LOGO_LINUX_MONO=y | ||
1192 | CONFIG_LOGO_LINUX_VGA16=y | ||
1193 | CONFIG_LOGO_LINUX_CLUT224=y | ||
1194 | # CONFIG_BACKLIGHT_LCD_SUPPORT is not set | ||
1195 | |||
1196 | # | ||
1197 | # Sound | ||
1198 | # | ||
1199 | CONFIG_SOUND=m | ||
1200 | CONFIG_DMASOUND_PMAC=m | ||
1201 | CONFIG_DMASOUND=m | ||
1202 | |||
1203 | # | ||
1204 | # Advanced Linux Sound Architecture | ||
1205 | # | ||
1206 | CONFIG_SND=m | ||
1207 | CONFIG_SND_TIMER=m | ||
1208 | CONFIG_SND_PCM=m | ||
1209 | CONFIG_SND_HWDEP=m | ||
1210 | CONFIG_SND_RAWMIDI=m | ||
1211 | CONFIG_SND_SEQUENCER=m | ||
1212 | CONFIG_SND_SEQ_DUMMY=m | ||
1213 | CONFIG_SND_OSSEMUL=y | ||
1214 | CONFIG_SND_MIXER_OSS=m | ||
1215 | CONFIG_SND_PCM_OSS=m | ||
1216 | CONFIG_SND_SEQUENCER_OSS=y | ||
1217 | # CONFIG_SND_VERBOSE_PRINTK is not set | ||
1218 | # CONFIG_SND_DEBUG is not set | ||
1219 | CONFIG_SND_GENERIC_DRIVER=y | ||
1220 | |||
1221 | # | ||
1222 | # Generic devices | ||
1223 | # | ||
1224 | CONFIG_SND_DUMMY=m | ||
1225 | # CONFIG_SND_VIRMIDI is not set | ||
1226 | # CONFIG_SND_MTPAV is not set | ||
1227 | # CONFIG_SND_SERIAL_U16550 is not set | ||
1228 | # CONFIG_SND_MPU401 is not set | ||
1229 | |||
1230 | # | ||
1231 | # PCI devices | ||
1232 | # | ||
1233 | # CONFIG_SND_ALI5451 is not set | ||
1234 | # CONFIG_SND_ATIIXP is not set | ||
1235 | # CONFIG_SND_ATIIXP_MODEM is not set | ||
1236 | # CONFIG_SND_AU8810 is not set | ||
1237 | # CONFIG_SND_AU8820 is not set | ||
1238 | # CONFIG_SND_AU8830 is not set | ||
1239 | # CONFIG_SND_AZT3328 is not set | ||
1240 | # CONFIG_SND_BT87X is not set | ||
1241 | # CONFIG_SND_CS46XX is not set | ||
1242 | # CONFIG_SND_CS4281 is not set | ||
1243 | # CONFIG_SND_EMU10K1 is not set | ||
1244 | # CONFIG_SND_EMU10K1X is not set | ||
1245 | # CONFIG_SND_CA0106 is not set | ||
1246 | # CONFIG_SND_KORG1212 is not set | ||
1247 | # CONFIG_SND_MIXART is not set | ||
1248 | # CONFIG_SND_NM256 is not set | ||
1249 | # CONFIG_SND_RME32 is not set | ||
1250 | # CONFIG_SND_RME96 is not set | ||
1251 | # CONFIG_SND_RME9652 is not set | ||
1252 | # CONFIG_SND_HDSP is not set | ||
1253 | # CONFIG_SND_HDSPM is not set | ||
1254 | # CONFIG_SND_TRIDENT is not set | ||
1255 | # CONFIG_SND_YMFPCI is not set | ||
1256 | # CONFIG_SND_AD1889 is not set | ||
1257 | # CONFIG_SND_ALS4000 is not set | ||
1258 | # CONFIG_SND_CMIPCI is not set | ||
1259 | # CONFIG_SND_ENS1370 is not set | ||
1260 | # CONFIG_SND_ENS1371 is not set | ||
1261 | # CONFIG_SND_ES1938 is not set | ||
1262 | # CONFIG_SND_ES1968 is not set | ||
1263 | # CONFIG_SND_MAESTRO3 is not set | ||
1264 | # CONFIG_SND_FM801 is not set | ||
1265 | # CONFIG_SND_ICE1712 is not set | ||
1266 | # CONFIG_SND_ICE1724 is not set | ||
1267 | # CONFIG_SND_INTEL8X0 is not set | ||
1268 | # CONFIG_SND_INTEL8X0M is not set | ||
1269 | # CONFIG_SND_SONICVIBES is not set | ||
1270 | # CONFIG_SND_VIA82XX is not set | ||
1271 | # CONFIG_SND_VIA82XX_MODEM is not set | ||
1272 | # CONFIG_SND_VX222 is not set | ||
1273 | # CONFIG_SND_HDA_INTEL is not set | ||
1274 | |||
1275 | # | ||
1276 | # ALSA PowerMac devices | ||
1277 | # | ||
1278 | CONFIG_SND_POWERMAC=m | ||
1279 | # CONFIG_SND_POWERMAC_AUTO_DRC is not set | ||
1280 | |||
1281 | # | ||
1282 | # USB devices | ||
1283 | # | ||
1284 | CONFIG_SND_USB_AUDIO=m | ||
1285 | # CONFIG_SND_USB_USX2Y is not set | ||
1286 | |||
1287 | # | ||
1288 | # PCMCIA devices | ||
1289 | # | ||
1290 | |||
1291 | # | ||
1292 | # Open Sound System | ||
1293 | # | ||
1294 | # CONFIG_SOUND_PRIME is not set | ||
1295 | |||
1296 | # | ||
1297 | # USB support | ||
1298 | # | ||
1299 | CONFIG_USB_ARCH_HAS_HCD=y | ||
1300 | CONFIG_USB_ARCH_HAS_OHCI=y | ||
1301 | CONFIG_USB=y | ||
1302 | # CONFIG_USB_DEBUG is not set | ||
1303 | |||
1304 | # | ||
1305 | # Miscellaneous USB options | ||
1306 | # | ||
1307 | CONFIG_USB_DEVICEFS=y | ||
1308 | # CONFIG_USB_BANDWIDTH is not set | ||
1309 | CONFIG_USB_DYNAMIC_MINORS=y | ||
1310 | # CONFIG_USB_SUSPEND is not set | ||
1311 | # CONFIG_USB_OTG is not set | ||
1312 | |||
1313 | # | ||
1314 | # USB Host Controller Drivers | ||
1315 | # | ||
1316 | # CONFIG_USB_EHCI_HCD is not set | ||
1317 | # CONFIG_USB_ISP116X_HCD is not set | ||
1318 | CONFIG_USB_OHCI_HCD=y | ||
1319 | # CONFIG_USB_OHCI_BIG_ENDIAN is not set | ||
1320 | CONFIG_USB_OHCI_LITTLE_ENDIAN=y | ||
1321 | # CONFIG_USB_UHCI_HCD is not set | ||
1322 | # CONFIG_USB_SL811_HCD is not set | ||
1323 | |||
1324 | # | ||
1325 | # USB Device Class drivers | ||
1326 | # | ||
1327 | # CONFIG_OBSOLETE_OSS_USB_DRIVER is not set | ||
1328 | CONFIG_USB_ACM=m | ||
1329 | CONFIG_USB_PRINTER=m | ||
1330 | |||
1331 | # | ||
1332 | # NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' | ||
1333 | # | ||
1334 | |||
1335 | # | ||
1336 | # may also be needed; see USB_STORAGE Help for more information | ||
1337 | # | ||
1338 | # CONFIG_USB_STORAGE is not set | ||
1339 | |||
1340 | # | ||
1341 | # USB Input Devices | ||
1342 | # | ||
1343 | CONFIG_USB_HID=y | ||
1344 | CONFIG_USB_HIDINPUT=y | ||
1345 | # CONFIG_HID_FF is not set | ||
1346 | # CONFIG_USB_HIDDEV is not set | ||
1347 | # CONFIG_USB_AIPTEK is not set | ||
1348 | # CONFIG_USB_WACOM is not set | ||
1349 | # CONFIG_USB_ACECAD is not set | ||
1350 | # CONFIG_USB_KBTAB is not set | ||
1351 | # CONFIG_USB_POWERMATE is not set | ||
1352 | # CONFIG_USB_MTOUCH is not set | ||
1353 | # CONFIG_USB_ITMTOUCH is not set | ||
1354 | # CONFIG_USB_EGALAX is not set | ||
1355 | # CONFIG_USB_YEALINK is not set | ||
1356 | # CONFIG_USB_XPAD is not set | ||
1357 | # CONFIG_USB_ATI_REMOTE is not set | ||
1358 | # CONFIG_USB_KEYSPAN_REMOTE is not set | ||
1359 | CONFIG_USB_APPLETOUCH=y | ||
1360 | |||
1361 | # | ||
1362 | # USB Imaging devices | ||
1363 | # | ||
1364 | # CONFIG_USB_MDC800 is not set | ||
1365 | # CONFIG_USB_MICROTEK is not set | ||
1366 | |||
1367 | # | ||
1368 | # USB Multimedia devices | ||
1369 | # | ||
1370 | # CONFIG_USB_DABUSB is not set | ||
1371 | |||
1372 | # | ||
1373 | # Video4Linux support is needed for USB Multimedia device support | ||
1374 | # | ||
1375 | |||
1376 | # | ||
1377 | # USB Network Adapters | ||
1378 | # | ||
1379 | # CONFIG_USB_CATC is not set | ||
1380 | # CONFIG_USB_KAWETH is not set | ||
1381 | # CONFIG_USB_PEGASUS is not set | ||
1382 | # CONFIG_USB_RTL8150 is not set | ||
1383 | CONFIG_USB_USBNET=m | ||
1384 | CONFIG_USB_NET_AX8817X=m | ||
1385 | CONFIG_USB_NET_CDCETHER=m | ||
1386 | # CONFIG_USB_NET_GL620A is not set | ||
1387 | CONFIG_USB_NET_NET1080=m | ||
1388 | # CONFIG_USB_NET_PLUSB is not set | ||
1389 | # CONFIG_USB_NET_RNDIS_HOST is not set | ||
1390 | # CONFIG_USB_NET_CDC_SUBSET is not set | ||
1391 | CONFIG_USB_NET_ZAURUS=m | ||
1392 | # CONFIG_USB_ZD1201 is not set | ||
1393 | CONFIG_USB_MON=y | ||
1394 | |||
1395 | # | ||
1396 | # USB port drivers | ||
1397 | # | ||
1398 | |||
1399 | # | ||
1400 | # USB Serial Converter support | ||
1401 | # | ||
1402 | CONFIG_USB_SERIAL=m | ||
1403 | # CONFIG_USB_SERIAL_GENERIC is not set | ||
1404 | # CONFIG_USB_SERIAL_AIRPRIME is not set | ||
1405 | # CONFIG_USB_SERIAL_ANYDATA is not set | ||
1406 | # CONFIG_USB_SERIAL_BELKIN is not set | ||
1407 | # CONFIG_USB_SERIAL_WHITEHEAT is not set | ||
1408 | # CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set | ||
1409 | # CONFIG_USB_SERIAL_CP2101 is not set | ||
1410 | # CONFIG_USB_SERIAL_CYPRESS_M8 is not set | ||
1411 | # CONFIG_USB_SERIAL_EMPEG is not set | ||
1412 | # CONFIG_USB_SERIAL_FTDI_SIO is not set | ||
1413 | CONFIG_USB_SERIAL_VISOR=m | ||
1414 | CONFIG_USB_SERIAL_IPAQ=m | ||
1415 | # CONFIG_USB_SERIAL_IR is not set | ||
1416 | # CONFIG_USB_SERIAL_EDGEPORT is not set | ||
1417 | # CONFIG_USB_SERIAL_EDGEPORT_TI is not set | ||
1418 | # CONFIG_USB_SERIAL_GARMIN is not set | ||
1419 | # CONFIG_USB_SERIAL_IPW is not set | ||
1420 | CONFIG_USB_SERIAL_KEYSPAN_PDA=m | ||
1421 | CONFIG_USB_SERIAL_KEYSPAN=m | ||
1422 | CONFIG_USB_SERIAL_KEYSPAN_MPR=y | ||
1423 | CONFIG_USB_SERIAL_KEYSPAN_USA28=y | ||
1424 | CONFIG_USB_SERIAL_KEYSPAN_USA28X=y | ||
1425 | CONFIG_USB_SERIAL_KEYSPAN_USA28XA=y | ||
1426 | CONFIG_USB_SERIAL_KEYSPAN_USA28XB=y | ||
1427 | CONFIG_USB_SERIAL_KEYSPAN_USA19=y | ||
1428 | CONFIG_USB_SERIAL_KEYSPAN_USA18X=y | ||
1429 | CONFIG_USB_SERIAL_KEYSPAN_USA19W=y | ||
1430 | CONFIG_USB_SERIAL_KEYSPAN_USA19QW=y | ||
1431 | CONFIG_USB_SERIAL_KEYSPAN_USA19QI=y | ||
1432 | CONFIG_USB_SERIAL_KEYSPAN_USA49W=y | ||
1433 | CONFIG_USB_SERIAL_KEYSPAN_USA49WLC=y | ||
1434 | # CONFIG_USB_SERIAL_KLSI is not set | ||
1435 | # CONFIG_USB_SERIAL_KOBIL_SCT is not set | ||
1436 | # CONFIG_USB_SERIAL_MCT_U232 is not set | ||
1437 | # CONFIG_USB_SERIAL_PL2303 is not set | ||
1438 | # CONFIG_USB_SERIAL_HP4X is not set | ||
1439 | # CONFIG_USB_SERIAL_SAFE is not set | ||
1440 | # CONFIG_USB_SERIAL_TI is not set | ||
1441 | # CONFIG_USB_SERIAL_CYBERJACK is not set | ||
1442 | # CONFIG_USB_SERIAL_XIRCOM is not set | ||
1443 | # CONFIG_USB_SERIAL_OPTION is not set | ||
1444 | # CONFIG_USB_SERIAL_OMNINET is not set | ||
1445 | CONFIG_USB_EZUSB=y | ||
1446 | |||
1447 | # | ||
1448 | # USB Miscellaneous drivers | ||
1449 | # | ||
1450 | # CONFIG_USB_EMI62 is not set | ||
1451 | # CONFIG_USB_EMI26 is not set | ||
1452 | # CONFIG_USB_AUERSWALD is not set | ||
1453 | # CONFIG_USB_RIO500 is not set | ||
1454 | # CONFIG_USB_LEGOTOWER is not set | ||
1455 | # CONFIG_USB_LCD is not set | ||
1456 | # CONFIG_USB_LED is not set | ||
1457 | # CONFIG_USB_CYTHERM is not set | ||
1458 | # CONFIG_USB_PHIDGETKIT is not set | ||
1459 | # CONFIG_USB_PHIDGETSERVO is not set | ||
1460 | # CONFIG_USB_IDMOUSE is not set | ||
1461 | # CONFIG_USB_LD is not set | ||
1462 | # CONFIG_USB_TEST is not set | ||
1463 | |||
1464 | # | ||
1465 | # USB DSL modem support | ||
1466 | # | ||
1467 | |||
1468 | # | ||
1469 | # USB Gadget Support | ||
1470 | # | ||
1471 | # CONFIG_USB_GADGET is not set | ||
1472 | |||
1473 | # | ||
1474 | # MMC/SD Card support | ||
1475 | # | ||
1476 | # CONFIG_MMC is not set | ||
1477 | |||
1478 | # | ||
1479 | # InfiniBand support | ||
1480 | # | ||
1481 | # CONFIG_INFINIBAND is not set | ||
1482 | |||
1483 | # | ||
1484 | # SN Devices | ||
1485 | # | ||
1486 | |||
1487 | # | ||
1488 | # File systems | ||
1489 | # | ||
1490 | CONFIG_EXT2_FS=y | ||
1491 | # CONFIG_EXT2_FS_XATTR is not set | ||
1492 | # CONFIG_EXT2_FS_XIP is not set | ||
1493 | CONFIG_EXT3_FS=y | ||
1494 | CONFIG_EXT3_FS_XATTR=y | ||
1495 | # CONFIG_EXT3_FS_POSIX_ACL is not set | ||
1496 | # CONFIG_EXT3_FS_SECURITY is not set | ||
1497 | CONFIG_JBD=y | ||
1498 | # CONFIG_JBD_DEBUG is not set | ||
1499 | CONFIG_FS_MBCACHE=y | ||
1500 | # CONFIG_REISERFS_FS is not set | ||
1501 | # CONFIG_JFS_FS is not set | ||
1502 | # CONFIG_FS_POSIX_ACL is not set | ||
1503 | # CONFIG_XFS_FS is not set | ||
1504 | # CONFIG_MINIX_FS is not set | ||
1505 | # CONFIG_ROMFS_FS is not set | ||
1506 | CONFIG_INOTIFY=y | ||
1507 | # CONFIG_QUOTA is not set | ||
1508 | CONFIG_DNOTIFY=y | ||
1509 | # CONFIG_AUTOFS_FS is not set | ||
1510 | # CONFIG_AUTOFS4_FS is not set | ||
1511 | CONFIG_FUSE_FS=m | ||
1512 | |||
1513 | # | ||
1514 | # CD-ROM/DVD Filesystems | ||
1515 | # | ||
1516 | CONFIG_ISO9660_FS=y | ||
1517 | CONFIG_JOLIET=y | ||
1518 | CONFIG_ZISOFS=y | ||
1519 | CONFIG_ZISOFS_FS=y | ||
1520 | CONFIG_UDF_FS=m | ||
1521 | CONFIG_UDF_NLS=y | ||
1522 | |||
1523 | # | ||
1524 | # DOS/FAT/NT Filesystems | ||
1525 | # | ||
1526 | CONFIG_FAT_FS=m | ||
1527 | CONFIG_MSDOS_FS=m | ||
1528 | CONFIG_VFAT_FS=m | ||
1529 | CONFIG_FAT_DEFAULT_CODEPAGE=437 | ||
1530 | CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" | ||
1531 | # CONFIG_NTFS_FS is not set | ||
1532 | |||
1533 | # | ||
1534 | # Pseudo filesystems | ||
1535 | # | ||
1536 | CONFIG_PROC_FS=y | ||
1537 | CONFIG_PROC_KCORE=y | ||
1538 | CONFIG_SYSFS=y | ||
1539 | CONFIG_TMPFS=y | ||
1540 | # CONFIG_HUGETLB_PAGE is not set | ||
1541 | CONFIG_RAMFS=y | ||
1542 | CONFIG_RELAYFS_FS=m | ||
1543 | |||
1544 | # | ||
1545 | # Miscellaneous filesystems | ||
1546 | # | ||
1547 | # CONFIG_ADFS_FS is not set | ||
1548 | # CONFIG_AFFS_FS is not set | ||
1549 | CONFIG_HFS_FS=m | ||
1550 | CONFIG_HFSPLUS_FS=m | ||
1551 | # CONFIG_BEFS_FS is not set | ||
1552 | # CONFIG_BFS_FS is not set | ||
1553 | # CONFIG_EFS_FS is not set | ||
1554 | # CONFIG_CRAMFS is not set | ||
1555 | # CONFIG_VXFS_FS is not set | ||
1556 | # CONFIG_HPFS_FS is not set | ||
1557 | # CONFIG_QNX4FS_FS is not set | ||
1558 | # CONFIG_SYSV_FS is not set | ||
1559 | # CONFIG_UFS_FS is not set | ||
1560 | |||
1561 | # | ||
1562 | # Network File Systems | ||
1563 | # | ||
1564 | CONFIG_NFS_FS=y | ||
1565 | # CONFIG_NFS_V3 is not set | ||
1566 | # CONFIG_NFS_V4 is not set | ||
1567 | # CONFIG_NFS_DIRECTIO is not set | ||
1568 | CONFIG_NFSD=y | ||
1569 | # CONFIG_NFSD_V3 is not set | ||
1570 | # CONFIG_NFSD_TCP is not set | ||
1571 | CONFIG_LOCKD=y | ||
1572 | CONFIG_EXPORTFS=y | ||
1573 | CONFIG_NFS_COMMON=y | ||
1574 | CONFIG_SUNRPC=y | ||
1575 | # CONFIG_RPCSEC_GSS_KRB5 is not set | ||
1576 | # CONFIG_RPCSEC_GSS_SPKM3 is not set | ||
1577 | CONFIG_SMB_FS=m | ||
1578 | # CONFIG_SMB_NLS_DEFAULT is not set | ||
1579 | # CONFIG_CIFS is not set | ||
1580 | # CONFIG_NCP_FS is not set | ||
1581 | # CONFIG_CODA_FS is not set | ||
1582 | # CONFIG_AFS_FS is not set | ||
1583 | # CONFIG_9P_FS is not set | ||
1584 | |||
1585 | # | ||
1586 | # Partition Types | ||
1587 | # | ||
1588 | CONFIG_PARTITION_ADVANCED=y | ||
1589 | # CONFIG_ACORN_PARTITION is not set | ||
1590 | # CONFIG_OSF_PARTITION is not set | ||
1591 | # CONFIG_AMIGA_PARTITION is not set | ||
1592 | # CONFIG_ATARI_PARTITION is not set | ||
1593 | CONFIG_MAC_PARTITION=y | ||
1594 | CONFIG_MSDOS_PARTITION=y | ||
1595 | # CONFIG_BSD_DISKLABEL is not set | ||
1596 | # CONFIG_MINIX_SUBPARTITION is not set | ||
1597 | # CONFIG_SOLARIS_X86_PARTITION is not set | ||
1598 | # CONFIG_UNIXWARE_DISKLABEL is not set | ||
1599 | # CONFIG_LDM_PARTITION is not set | ||
1600 | # CONFIG_SGI_PARTITION is not set | ||
1601 | # CONFIG_ULTRIX_PARTITION is not set | ||
1602 | # CONFIG_SUN_PARTITION is not set | ||
1603 | # CONFIG_EFI_PARTITION is not set | ||
1604 | |||
1605 | # | ||
1606 | # Native Language Support | ||
1607 | # | ||
1608 | CONFIG_NLS=y | ||
1609 | CONFIG_NLS_DEFAULT="iso8859-1" | ||
1610 | CONFIG_NLS_CODEPAGE_437=m | ||
1611 | # CONFIG_NLS_CODEPAGE_737 is not set | ||
1612 | # CONFIG_NLS_CODEPAGE_775 is not set | ||
1613 | # CONFIG_NLS_CODEPAGE_850 is not set | ||
1614 | # CONFIG_NLS_CODEPAGE_852 is not set | ||
1615 | # CONFIG_NLS_CODEPAGE_855 is not set | ||
1616 | # CONFIG_NLS_CODEPAGE_857 is not set | ||
1617 | # CONFIG_NLS_CODEPAGE_860 is not set | ||
1618 | # CONFIG_NLS_CODEPAGE_861 is not set | ||
1619 | # CONFIG_NLS_CODEPAGE_862 is not set | ||
1620 | # CONFIG_NLS_CODEPAGE_863 is not set | ||
1621 | # CONFIG_NLS_CODEPAGE_864 is not set | ||
1622 | # CONFIG_NLS_CODEPAGE_865 is not set | ||
1623 | # CONFIG_NLS_CODEPAGE_866 is not set | ||
1624 | # CONFIG_NLS_CODEPAGE_869 is not set | ||
1625 | # CONFIG_NLS_CODEPAGE_936 is not set | ||
1626 | # CONFIG_NLS_CODEPAGE_950 is not set | ||
1627 | # CONFIG_NLS_CODEPAGE_932 is not set | ||
1628 | # CONFIG_NLS_CODEPAGE_949 is not set | ||
1629 | # CONFIG_NLS_CODEPAGE_874 is not set | ||
1630 | # CONFIG_NLS_ISO8859_8 is not set | ||
1631 | # CONFIG_NLS_CODEPAGE_1250 is not set | ||
1632 | # CONFIG_NLS_CODEPAGE_1251 is not set | ||
1633 | # CONFIG_NLS_ASCII is not set | ||
1634 | CONFIG_NLS_ISO8859_1=m | ||
1635 | # CONFIG_NLS_ISO8859_2 is not set | ||
1636 | # CONFIG_NLS_ISO8859_3 is not set | ||
1637 | # CONFIG_NLS_ISO8859_4 is not set | ||
1638 | # CONFIG_NLS_ISO8859_5 is not set | ||
1639 | # CONFIG_NLS_ISO8859_6 is not set | ||
1640 | # CONFIG_NLS_ISO8859_7 is not set | ||
1641 | # CONFIG_NLS_ISO8859_9 is not set | ||
1642 | # CONFIG_NLS_ISO8859_13 is not set | ||
1643 | # CONFIG_NLS_ISO8859_14 is not set | ||
1644 | # CONFIG_NLS_ISO8859_15 is not set | ||
1645 | # CONFIG_NLS_KOI8_R is not set | ||
1646 | # CONFIG_NLS_KOI8_U is not set | ||
1647 | CONFIG_NLS_UTF8=m | ||
1648 | |||
1649 | # | ||
1650 | # Library routines | ||
1651 | # | ||
1652 | CONFIG_CRC_CCITT=y | ||
1653 | CONFIG_CRC16=y | ||
1654 | CONFIG_CRC32=y | ||
1655 | # CONFIG_LIBCRC32C is not set | ||
1656 | CONFIG_ZLIB_INFLATE=y | ||
1657 | CONFIG_ZLIB_DEFLATE=y | ||
1658 | CONFIG_TEXTSEARCH=y | ||
1659 | CONFIG_TEXTSEARCH_KMP=m | ||
1660 | CONFIG_TEXTSEARCH_BM=m | ||
1661 | CONFIG_TEXTSEARCH_FSM=m | ||
1662 | |||
1663 | # | ||
1664 | # Instrumentation Support | ||
1665 | # | ||
1666 | CONFIG_PROFILING=y | ||
1667 | CONFIG_OPROFILE=y | ||
1668 | |||
1669 | # | ||
1670 | # Kernel hacking | ||
1671 | # | ||
1672 | # CONFIG_PRINTK_TIME is not set | ||
1673 | CONFIG_DEBUG_KERNEL=y | ||
1674 | # CONFIG_MAGIC_SYSRQ is not set | ||
1675 | CONFIG_LOG_BUF_SHIFT=14 | ||
1676 | CONFIG_DETECT_SOFTLOCKUP=y | ||
1677 | # CONFIG_SCHEDSTATS is not set | ||
1678 | # CONFIG_DEBUG_SLAB is not set | ||
1679 | # CONFIG_DEBUG_SPINLOCK is not set | ||
1680 | # CONFIG_DEBUG_SPINLOCK_SLEEP is not set | ||
1681 | # CONFIG_DEBUG_KOBJECT is not set | ||
1682 | # CONFIG_DEBUG_INFO is not set | ||
1683 | # CONFIG_DEBUG_FS is not set | ||
1684 | # CONFIG_DEBUG_VM is not set | ||
1685 | # CONFIG_RCU_TORTURE_TEST is not set | ||
1686 | CONFIG_DEBUGGER=y | ||
1687 | CONFIG_XMON=y | ||
1688 | CONFIG_XMON_DEFAULT=y | ||
1689 | # CONFIG_BDI_SWITCH is not set | ||
1690 | CONFIG_BOOTX_TEXT=y | ||
1691 | |||
1692 | # | ||
1693 | # Security options | ||
1694 | # | ||
1695 | # CONFIG_KEYS is not set | ||
1696 | # CONFIG_SECURITY is not set | ||
1697 | |||
1698 | # | ||
1699 | # Cryptographic options | ||
1700 | # | ||
1701 | CONFIG_CRYPTO=y | ||
1702 | # CONFIG_CRYPTO_HMAC is not set | ||
1703 | # CONFIG_CRYPTO_NULL is not set | ||
1704 | # CONFIG_CRYPTO_MD4 is not set | ||
1705 | # CONFIG_CRYPTO_MD5 is not set | ||
1706 | # CONFIG_CRYPTO_SHA1 is not set | ||
1707 | # CONFIG_CRYPTO_SHA256 is not set | ||
1708 | # CONFIG_CRYPTO_SHA512 is not set | ||
1709 | # CONFIG_CRYPTO_WP512 is not set | ||
1710 | # CONFIG_CRYPTO_TGR192 is not set | ||
1711 | # CONFIG_CRYPTO_DES is not set | ||
1712 | # CONFIG_CRYPTO_BLOWFISH is not set | ||
1713 | # CONFIG_CRYPTO_TWOFISH is not set | ||
1714 | # CONFIG_CRYPTO_SERPENT is not set | ||
1715 | CONFIG_CRYPTO_AES=m | ||
1716 | # CONFIG_CRYPTO_CAST5 is not set | ||
1717 | # CONFIG_CRYPTO_CAST6 is not set | ||
1718 | # CONFIG_CRYPTO_TEA is not set | ||
1719 | CONFIG_CRYPTO_ARC4=m | ||
1720 | # CONFIG_CRYPTO_KHAZAD is not set | ||
1721 | # CONFIG_CRYPTO_ANUBIS is not set | ||
1722 | # CONFIG_CRYPTO_DEFLATE is not set | ||
1723 | CONFIG_CRYPTO_MICHAEL_MIC=m | ||
1724 | # CONFIG_CRYPTO_CRC32C is not set | ||
1725 | # CONFIG_CRYPTO_TEST is not set | ||
1726 | |||
1727 | # | ||
1728 | # Hardware crypto devices | ||
1729 | # | ||
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 9ed551b6c172..6e03b595b6c8 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -17,11 +17,11 @@ obj-y += vdso32/ | |||
17 | obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ | 17 | obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \ |
18 | signal_64.o ptrace32.o systbl.o \ | 18 | signal_64.o ptrace32.o systbl.o \ |
19 | paca.o ioctl32.o cpu_setup_power4.o \ | 19 | paca.o ioctl32.o cpu_setup_power4.o \ |
20 | firmware.o sysfs.o udbg.o idle_64.o | 20 | firmware.o sysfs.o idle_64.o |
21 | obj-$(CONFIG_PPC64) += vdso64/ | 21 | obj-$(CONFIG_PPC64) += vdso64/ |
22 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o | 22 | obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o |
23 | obj-$(CONFIG_POWER4) += idle_power4.o | 23 | obj-$(CONFIG_POWER4) += idle_power4.o |
24 | obj-$(CONFIG_PPC_OF) += of_device.o | 24 | obj-$(CONFIG_PPC_OF) += of_device.o prom_parse.o |
25 | procfs-$(CONFIG_PPC64) := proc_ppc64.o | 25 | procfs-$(CONFIG_PPC64) := proc_ppc64.o |
26 | obj-$(CONFIG_PROC_FS) += $(procfs-y) | 26 | obj-$(CONFIG_PROC_FS) += $(procfs-y) |
27 | rtaspci-$(CONFIG_PPC64) := rtas_pci.o | 27 | rtaspci-$(CONFIG_PPC64) := rtas_pci.o |
@@ -30,12 +30,10 @@ obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o | |||
30 | obj-$(CONFIG_RTAS_PROC) += rtas-proc.o | 30 | obj-$(CONFIG_RTAS_PROC) += rtas-proc.o |
31 | obj-$(CONFIG_LPARCFG) += lparcfg.o | 31 | obj-$(CONFIG_LPARCFG) += lparcfg.o |
32 | obj-$(CONFIG_IBMVIO) += vio.o | 32 | obj-$(CONFIG_IBMVIO) += vio.o |
33 | obj-$(CONFIG_IBMEBUS) += ibmebus.o | ||
33 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o | 34 | obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o |
34 | obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o | ||
35 | obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o | ||
36 | udbgscc-$(CONFIG_PPC64) := udbg_scc.o | ||
37 | obj-$(CONFIG_PPC_PMAC) += $(udbgscc-y) | ||
38 | obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o | 35 | obj64-$(CONFIG_PPC_MULTIPLATFORM) += nvram_64.o |
36 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
39 | 37 | ||
40 | ifeq ($(CONFIG_PPC_MERGE),y) | 38 | ifeq ($(CONFIG_PPC_MERGE),y) |
41 | 39 | ||
@@ -48,25 +46,25 @@ extra-$(CONFIG_8xx) := head_8xx.o | |||
48 | extra-y += vmlinux.lds | 46 | extra-y += vmlinux.lds |
49 | 47 | ||
50 | obj-y += process.o init_task.o time.o \ | 48 | obj-y += process.o init_task.o time.o \ |
51 | prom.o traps.o setup-common.o | 49 | prom.o traps.o setup-common.o udbg.o |
52 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o | 50 | obj-$(CONFIG_PPC32) += entry_32.o setup_32.o misc_32.o systbl.o |
53 | obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o | 51 | obj-$(CONFIG_PPC64) += misc_64.o dma_64.o iommu.o |
54 | obj-$(CONFIG_PPC_OF) += prom_init.o | 52 | obj-$(CONFIG_PPC_MULTIPLATFORM) += prom_init.o |
55 | obj-$(CONFIG_MODULES) += ppc_ksyms.o | 53 | obj-$(CONFIG_MODULES) += ppc_ksyms.o |
56 | obj-$(CONFIG_BOOTX_TEXT) += btext.o | 54 | obj-$(CONFIG_BOOTX_TEXT) += btext.o |
57 | obj-$(CONFIG_6xx) += idle_6xx.o | 55 | obj-$(CONFIG_6xx) += idle_6xx.o |
58 | obj-$(CONFIG_SMP) += smp.o | 56 | obj-$(CONFIG_SMP) += smp.o |
59 | obj-$(CONFIG_KPROBES) += kprobes.o | 57 | obj-$(CONFIG_KPROBES) += kprobes.o |
60 | 58 | obj-$(CONFIG_SERIAL_8250) += legacy_serial.o udbg_16550.o | |
61 | module-$(CONFIG_PPC64) += module_64.o | 59 | module-$(CONFIG_PPC64) += module_64.o |
62 | obj-$(CONFIG_MODULES) += $(module-y) | 60 | obj-$(CONFIG_MODULES) += $(module-y) |
63 | 61 | ||
64 | pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \ | 62 | pci64-$(CONFIG_PPC64) += pci_64.o pci_dn.o pci_iommu.o \ |
65 | pci_direct_iommu.o iomap.o | 63 | pci_direct_iommu.o iomap.o |
66 | obj-$(CONFIG_PCI) += $(pci64-y) | 64 | obj-$(CONFIG_PCI) += $(pci64-y) |
67 | 65 | kexec-$(CONFIG_PPC64) := machine_kexec_64.o | |
68 | kexec64-$(CONFIG_PPC64) += machine_kexec_64.o | 66 | kexec-$(CONFIG_PPC32) := machine_kexec_32.o |
69 | obj-$(CONFIG_KEXEC) += $(kexec64-y) | 67 | obj-$(CONFIG_KEXEC) += machine_kexec.o crash.o $(kexec-y) |
70 | 68 | ||
71 | ifeq ($(CONFIG_PPC_ISERIES),y) | 69 | ifeq ($(CONFIG_PPC_ISERIES),y) |
72 | $(obj)/head_64.o: $(obj)/lparmap.s | 70 | $(obj)/head_64.o: $(obj)/lparmap.s |
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c index 91538d2445bf..56399c5c931a 100644 --- a/arch/powerpc/kernel/asm-offsets.c +++ b/arch/powerpc/kernel/asm-offsets.c | |||
@@ -92,9 +92,9 @@ int main(void) | |||
92 | 92 | ||
93 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); | 93 | DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); |
94 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); | 94 | DEFINE(TI_PREEMPT, offsetof(struct thread_info, preempt_count)); |
95 | DEFINE(TI_SC_NOERR, offsetof(struct thread_info, syscall_noerror)); | 95 | DEFINE(TI_SIGFRAME, offsetof(struct thread_info, nvgprs_frame)); |
96 | #ifdef CONFIG_PPC32 | ||
97 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); | 96 | DEFINE(TI_TASK, offsetof(struct thread_info, task)); |
97 | #ifdef CONFIG_PPC32 | ||
98 | DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); | 98 | DEFINE(TI_EXECDOMAIN, offsetof(struct thread_info, exec_domain)); |
99 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); | 99 | DEFINE(TI_CPU, offsetof(struct thread_info, cpu)); |
100 | #endif /* CONFIG_PPC32 */ | 100 | #endif /* CONFIG_PPC32 */ |
@@ -131,11 +131,9 @@ int main(void) | |||
131 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); | 131 | DEFINE(PACALOWHTLBAREAS, offsetof(struct paca_struct, context.low_htlb_areas)); |
132 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); | 132 | DEFINE(PACAHIGHHTLBAREAS, offsetof(struct paca_struct, context.high_htlb_areas)); |
133 | #endif /* CONFIG_HUGETLB_PAGE */ | 133 | #endif /* CONFIG_HUGETLB_PAGE */ |
134 | DEFINE(PACADEFAULTDECR, offsetof(struct paca_struct, default_decr)); | ||
135 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); | 134 | DEFINE(PACA_EXGEN, offsetof(struct paca_struct, exgen)); |
136 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); | 135 | DEFINE(PACA_EXMC, offsetof(struct paca_struct, exmc)); |
137 | DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); | 136 | DEFINE(PACA_EXSLB, offsetof(struct paca_struct, exslb)); |
138 | DEFINE(PACA_EXDSI, offsetof(struct paca_struct, exdsi)); | ||
139 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); | 137 | DEFINE(PACAEMERGSP, offsetof(struct paca_struct, emergency_sp)); |
140 | DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca)); | 138 | DEFINE(PACALPPACA, offsetof(struct paca_struct, lppaca)); |
141 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); | 139 | DEFINE(PACAHWCPUID, offsetof(struct paca_struct, hw_cpu_id)); |
diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index bdfba92b2b38..6223d39177cb 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c | |||
@@ -31,15 +31,18 @@ static void draw_byte_32(unsigned char *bits, unsigned int *base, int rb); | |||
31 | static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); | 31 | static void draw_byte_16(unsigned char *bits, unsigned int *base, int rb); |
32 | static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); | 32 | static void draw_byte_8(unsigned char *bits, unsigned int *base, int rb); |
33 | 33 | ||
34 | static int g_loc_X; | 34 | #define __force_data __attribute__((__section__(".data"))) |
35 | static int g_loc_Y; | ||
36 | static int g_max_loc_X; | ||
37 | static int g_max_loc_Y; | ||
38 | 35 | ||
39 | static int dispDeviceRowBytes; | 36 | static int g_loc_X __force_data; |
40 | static int dispDeviceDepth; | 37 | static int g_loc_Y __force_data; |
41 | static int dispDeviceRect[4]; | 38 | static int g_max_loc_X __force_data; |
42 | static unsigned char *dispDeviceBase, *logicalDisplayBase; | 39 | static int g_max_loc_Y __force_data; |
40 | |||
41 | static int dispDeviceRowBytes __force_data; | ||
42 | static int dispDeviceDepth __force_data; | ||
43 | static int dispDeviceRect[4] __force_data; | ||
44 | static unsigned char *dispDeviceBase __force_data; | ||
45 | static unsigned char *logicalDisplayBase __force_data; | ||
43 | 46 | ||
44 | unsigned long disp_BAT[2] __initdata = {0, 0}; | 47 | unsigned long disp_BAT[2] __initdata = {0, 0}; |
45 | 48 | ||
@@ -47,7 +50,7 @@ unsigned long disp_BAT[2] __initdata = {0, 0}; | |||
47 | 50 | ||
48 | static unsigned char vga_font[cmapsz]; | 51 | static unsigned char vga_font[cmapsz]; |
49 | 52 | ||
50 | int boot_text_mapped; | 53 | int boot_text_mapped __force_data = 0; |
51 | int force_printk_to_btext = 0; | 54 | int force_printk_to_btext = 0; |
52 | 55 | ||
53 | #ifdef CONFIG_PPC32 | 56 | #ifdef CONFIG_PPC32 |
@@ -57,7 +60,7 @@ int force_printk_to_btext = 0; | |||
57 | * | 60 | * |
58 | * The display is mapped to virtual address 0xD0000000, rather | 61 | * The display is mapped to virtual address 0xD0000000, rather |
59 | * than 1:1, because some some CHRP machines put the frame buffer | 62 | * than 1:1, because some some CHRP machines put the frame buffer |
60 | * in the region starting at 0xC0000000 (KERNELBASE). | 63 | * in the region starting at 0xC0000000 (PAGE_OFFSET). |
61 | * This mapping is temporary and will disappear as soon as the | 64 | * This mapping is temporary and will disappear as soon as the |
62 | * setup done by MMU_Init() is applied. | 65 | * setup done by MMU_Init() is applied. |
63 | * | 66 | * |
@@ -66,10 +69,9 @@ int force_printk_to_btext = 0; | |||
66 | * is really badly aligned, but I didn't encounter this case | 69 | * is really badly aligned, but I didn't encounter this case |
67 | * yet. | 70 | * yet. |
68 | */ | 71 | */ |
69 | void __init | 72 | void __init btext_prepare_BAT(void) |
70 | btext_prepare_BAT(void) | ||
71 | { | 73 | { |
72 | unsigned long vaddr = KERNELBASE + 0x10000000; | 74 | unsigned long vaddr = PAGE_OFFSET + 0x10000000; |
73 | unsigned long addr; | 75 | unsigned long addr; |
74 | unsigned long lowbits; | 76 | unsigned long lowbits; |
75 | 77 | ||
@@ -95,12 +97,13 @@ btext_prepare_BAT(void) | |||
95 | } | 97 | } |
96 | #endif | 98 | #endif |
97 | 99 | ||
98 | /* This function will enable the early boot text when doing OF booting. This | 100 | |
99 | * way, xmon output should work too | 101 | /* This function can be used to enable the early boot text when doing |
102 | * OF booting or within bootx init. It must be followed by a btext_unmap() | ||
103 | * call before the logical address becomes unuseable | ||
100 | */ | 104 | */ |
101 | void __init | 105 | void __init btext_setup_display(int width, int height, int depth, int pitch, |
102 | btext_setup_display(int width, int height, int depth, int pitch, | 106 | unsigned long address) |
103 | unsigned long address) | ||
104 | { | 107 | { |
105 | g_loc_X = 0; | 108 | g_loc_X = 0; |
106 | g_loc_Y = 0; | 109 | g_loc_Y = 0; |
@@ -116,6 +119,11 @@ btext_setup_display(int width, int height, int depth, int pitch, | |||
116 | boot_text_mapped = 1; | 119 | boot_text_mapped = 1; |
117 | } | 120 | } |
118 | 121 | ||
122 | void __init btext_unmap(void) | ||
123 | { | ||
124 | boot_text_mapped = 0; | ||
125 | } | ||
126 | |||
119 | /* Here's a small text engine to use during early boot | 127 | /* Here's a small text engine to use during early boot |
120 | * or for debugging purposes | 128 | * or for debugging purposes |
121 | * | 129 | * |
@@ -127,7 +135,7 @@ btext_setup_display(int width, int height, int depth, int pitch, | |||
127 | * changes. | 135 | * changes. |
128 | */ | 136 | */ |
129 | 137 | ||
130 | void map_boot_text(void) | 138 | static void map_boot_text(void) |
131 | { | 139 | { |
132 | unsigned long base, offset, size; | 140 | unsigned long base, offset, size; |
133 | unsigned char *vbase; | 141 | unsigned char *vbase; |
@@ -175,8 +183,9 @@ int btext_initialize(struct device_node *np) | |||
175 | if (prop) | 183 | if (prop) |
176 | address = *prop; | 184 | address = *prop; |
177 | 185 | ||
178 | /* FIXME: Add support for PCI reg properties */ | 186 | /* FIXME: Add support for PCI reg properties. Right now, only |
179 | 187 | * reliable on macs | |
188 | */ | ||
180 | if (address == 0) | 189 | if (address == 0) |
181 | return -EINVAL; | 190 | return -EINVAL; |
182 | 191 | ||
@@ -184,7 +193,6 @@ int btext_initialize(struct device_node *np) | |||
184 | g_loc_Y = 0; | 193 | g_loc_Y = 0; |
185 | g_max_loc_X = width / 8; | 194 | g_max_loc_X = width / 8; |
186 | g_max_loc_Y = height / 16; | 195 | g_max_loc_Y = height / 16; |
187 | logicalDisplayBase = (unsigned char *)address; | ||
188 | dispDeviceBase = (unsigned char *)address; | 196 | dispDeviceBase = (unsigned char *)address; |
189 | dispDeviceRowBytes = pitch; | 197 | dispDeviceRowBytes = pitch; |
190 | dispDeviceDepth = depth; | 198 | dispDeviceDepth = depth; |
@@ -197,14 +205,12 @@ int btext_initialize(struct device_node *np) | |||
197 | return 0; | 205 | return 0; |
198 | } | 206 | } |
199 | 207 | ||
200 | void __init init_boot_display(void) | 208 | int __init btext_find_display(int allow_nonstdout) |
201 | { | 209 | { |
202 | char *name; | 210 | char *name; |
203 | struct device_node *np = NULL; | 211 | struct device_node *np = NULL; |
204 | int rc = -ENODEV; | 212 | int rc = -ENODEV; |
205 | 213 | ||
206 | printk("trying to initialize btext ...\n"); | ||
207 | |||
208 | name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); | 214 | name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); |
209 | if (name != NULL) { | 215 | if (name != NULL) { |
210 | np = of_find_node_by_path(name); | 216 | np = of_find_node_by_path(name); |
@@ -218,8 +224,8 @@ void __init init_boot_display(void) | |||
218 | } | 224 | } |
219 | if (np) | 225 | if (np) |
220 | rc = btext_initialize(np); | 226 | rc = btext_initialize(np); |
221 | if (rc == 0) | 227 | if (rc == 0 || !allow_nonstdout) |
222 | return; | 228 | return rc; |
223 | 229 | ||
224 | for (np = NULL; (np = of_find_node_by_type(np, "display"));) { | 230 | for (np = NULL; (np = of_find_node_by_type(np, "display"));) { |
225 | if (get_property(np, "linux,opened", NULL)) { | 231 | if (get_property(np, "linux,opened", NULL)) { |
@@ -228,8 +234,9 @@ void __init init_boot_display(void) | |||
228 | printk("result: %d\n", rc); | 234 | printk("result: %d\n", rc); |
229 | } | 235 | } |
230 | if (rc == 0) | 236 | if (rc == 0) |
231 | return; | 237 | break; |
232 | } | 238 | } |
239 | return rc; | ||
233 | } | 240 | } |
234 | 241 | ||
235 | /* Calc the base address of a given point (x,y) */ | 242 | /* Calc the base address of a given point (x,y) */ |
@@ -277,44 +284,83 @@ EXPORT_SYMBOL(btext_update_display); | |||
277 | 284 | ||
278 | void btext_clearscreen(void) | 285 | void btext_clearscreen(void) |
279 | { | 286 | { |
280 | unsigned long *base = (unsigned long *)calc_base(0, 0); | 287 | unsigned int *base = (unsigned int *)calc_base(0, 0); |
281 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * | 288 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * |
282 | (dispDeviceDepth >> 3)) >> 3; | 289 | (dispDeviceDepth >> 3)) >> 2; |
283 | int i,j; | 290 | int i,j; |
284 | 291 | ||
285 | for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) | 292 | for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1]); i++) |
286 | { | 293 | { |
287 | unsigned long *ptr = base; | 294 | unsigned int *ptr = base; |
288 | for(j=width; j; --j) | 295 | for(j=width; j; --j) |
289 | *(ptr++) = 0; | 296 | *(ptr++) = 0; |
290 | base += (dispDeviceRowBytes >> 3); | 297 | base += (dispDeviceRowBytes >> 2); |
291 | } | 298 | } |
292 | } | 299 | } |
293 | 300 | ||
301 | void btext_flushscreen(void) | ||
302 | { | ||
303 | unsigned int *base = (unsigned int *)calc_base(0, 0); | ||
304 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * | ||
305 | (dispDeviceDepth >> 3)) >> 2; | ||
306 | int i,j; | ||
307 | |||
308 | for (i=0; i < (dispDeviceRect[3] - dispDeviceRect[1]); i++) | ||
309 | { | ||
310 | unsigned int *ptr = base; | ||
311 | for(j = width; j > 0; j -= 8) { | ||
312 | __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr)); | ||
313 | ptr += 8; | ||
314 | } | ||
315 | base += (dispDeviceRowBytes >> 2); | ||
316 | } | ||
317 | __asm__ __volatile__ ("sync" ::: "memory"); | ||
318 | } | ||
319 | |||
320 | void btext_flushline(void) | ||
321 | { | ||
322 | unsigned int *base = (unsigned int *)calc_base(0, g_loc_Y << 4); | ||
323 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * | ||
324 | (dispDeviceDepth >> 3)) >> 2; | ||
325 | int i,j; | ||
326 | |||
327 | for (i=0; i < 16; i++) | ||
328 | { | ||
329 | unsigned int *ptr = base; | ||
330 | for(j = width; j > 0; j -= 8) { | ||
331 | __asm__ __volatile__ ("dcbst 0,%0" :: "r" (ptr)); | ||
332 | ptr += 8; | ||
333 | } | ||
334 | base += (dispDeviceRowBytes >> 2); | ||
335 | } | ||
336 | __asm__ __volatile__ ("sync" ::: "memory"); | ||
337 | } | ||
338 | |||
339 | |||
294 | #ifndef NO_SCROLL | 340 | #ifndef NO_SCROLL |
295 | static void scrollscreen(void) | 341 | static void scrollscreen(void) |
296 | { | 342 | { |
297 | unsigned long *src = (unsigned long *)calc_base(0,16); | 343 | unsigned int *src = (unsigned int *)calc_base(0,16); |
298 | unsigned long *dst = (unsigned long *)calc_base(0,0); | 344 | unsigned int *dst = (unsigned int *)calc_base(0,0); |
299 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * | 345 | unsigned long width = ((dispDeviceRect[2] - dispDeviceRect[0]) * |
300 | (dispDeviceDepth >> 3)) >> 3; | 346 | (dispDeviceDepth >> 3)) >> 2; |
301 | int i,j; | 347 | int i,j; |
302 | 348 | ||
303 | for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) | 349 | for (i=0; i<(dispDeviceRect[3] - dispDeviceRect[1] - 16); i++) |
304 | { | 350 | { |
305 | unsigned long *src_ptr = src; | 351 | unsigned int *src_ptr = src; |
306 | unsigned long *dst_ptr = dst; | 352 | unsigned int *dst_ptr = dst; |
307 | for(j=width; j; --j) | 353 | for(j=width; j; --j) |
308 | *(dst_ptr++) = *(src_ptr++); | 354 | *(dst_ptr++) = *(src_ptr++); |
309 | src += (dispDeviceRowBytes >> 3); | 355 | src += (dispDeviceRowBytes >> 2); |
310 | dst += (dispDeviceRowBytes >> 3); | 356 | dst += (dispDeviceRowBytes >> 2); |
311 | } | 357 | } |
312 | for (i=0; i<16; i++) | 358 | for (i=0; i<16; i++) |
313 | { | 359 | { |
314 | unsigned long *dst_ptr = dst; | 360 | unsigned int *dst_ptr = dst; |
315 | for(j=width; j; --j) | 361 | for(j=width; j; --j) |
316 | *(dst_ptr++) = 0; | 362 | *(dst_ptr++) = 0; |
317 | dst += (dispDeviceRowBytes >> 3); | 363 | dst += (dispDeviceRowBytes >> 2); |
318 | } | 364 | } |
319 | } | 365 | } |
320 | #endif /* ndef NO_SCROLL */ | 366 | #endif /* ndef NO_SCROLL */ |
@@ -377,6 +423,14 @@ void btext_drawstring(const char *c) | |||
377 | btext_drawchar(*c++); | 423 | btext_drawchar(*c++); |
378 | } | 424 | } |
379 | 425 | ||
426 | void btext_drawtext(const char *c, unsigned int len) | ||
427 | { | ||
428 | if (!boot_text_mapped) | ||
429 | return; | ||
430 | while (len--) | ||
431 | btext_drawchar(*c++); | ||
432 | } | ||
433 | |||
380 | void btext_drawhex(unsigned long v) | 434 | void btext_drawhex(unsigned long v) |
381 | { | 435 | { |
382 | char *hex_table = "0123456789abcdef"; | 436 | char *hex_table = "0123456789abcdef"; |
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c index 1d85cedbbb7b..43c74a6b07b1 100644 --- a/arch/powerpc/kernel/cputable.c +++ b/arch/powerpc/kernel/cputable.c | |||
@@ -78,10 +78,8 @@ struct cpu_spec cpu_specs[] = { | |||
78 | .dcache_bsize = 128, | 78 | .dcache_bsize = 128, |
79 | .num_pmcs = 8, | 79 | .num_pmcs = 8, |
80 | .cpu_setup = __setup_cpu_power3, | 80 | .cpu_setup = __setup_cpu_power3, |
81 | #ifdef CONFIG_OPROFILE | ||
82 | .oprofile_cpu_type = "ppc64/power3", | 81 | .oprofile_cpu_type = "ppc64/power3", |
83 | .oprofile_model = &op_model_rs64, | 82 | .oprofile_type = RS64, |
84 | #endif | ||
85 | }, | 83 | }, |
86 | { /* Power3+ */ | 84 | { /* Power3+ */ |
87 | .pvr_mask = 0xffff0000, | 85 | .pvr_mask = 0xffff0000, |
@@ -93,10 +91,8 @@ struct cpu_spec cpu_specs[] = { | |||
93 | .dcache_bsize = 128, | 91 | .dcache_bsize = 128, |
94 | .num_pmcs = 8, | 92 | .num_pmcs = 8, |
95 | .cpu_setup = __setup_cpu_power3, | 93 | .cpu_setup = __setup_cpu_power3, |
96 | #ifdef CONFIG_OPROFILE | ||
97 | .oprofile_cpu_type = "ppc64/power3", | 94 | .oprofile_cpu_type = "ppc64/power3", |
98 | .oprofile_model = &op_model_rs64, | 95 | .oprofile_type = RS64, |
99 | #endif | ||
100 | }, | 96 | }, |
101 | { /* Northstar */ | 97 | { /* Northstar */ |
102 | .pvr_mask = 0xffff0000, | 98 | .pvr_mask = 0xffff0000, |
@@ -108,10 +104,8 @@ struct cpu_spec cpu_specs[] = { | |||
108 | .dcache_bsize = 128, | 104 | .dcache_bsize = 128, |
109 | .num_pmcs = 8, | 105 | .num_pmcs = 8, |
110 | .cpu_setup = __setup_cpu_power3, | 106 | .cpu_setup = __setup_cpu_power3, |
111 | #ifdef CONFIG_OPROFILE | ||
112 | .oprofile_cpu_type = "ppc64/rs64", | 107 | .oprofile_cpu_type = "ppc64/rs64", |
113 | .oprofile_model = &op_model_rs64, | 108 | .oprofile_type = RS64, |
114 | #endif | ||
115 | }, | 109 | }, |
116 | { /* Pulsar */ | 110 | { /* Pulsar */ |
117 | .pvr_mask = 0xffff0000, | 111 | .pvr_mask = 0xffff0000, |
@@ -123,10 +117,8 @@ struct cpu_spec cpu_specs[] = { | |||
123 | .dcache_bsize = 128, | 117 | .dcache_bsize = 128, |
124 | .num_pmcs = 8, | 118 | .num_pmcs = 8, |
125 | .cpu_setup = __setup_cpu_power3, | 119 | .cpu_setup = __setup_cpu_power3, |
126 | #ifdef CONFIG_OPROFILE | ||
127 | .oprofile_cpu_type = "ppc64/rs64", | 120 | .oprofile_cpu_type = "ppc64/rs64", |
128 | .oprofile_model = &op_model_rs64, | 121 | .oprofile_type = RS64, |
129 | #endif | ||
130 | }, | 122 | }, |
131 | { /* I-star */ | 123 | { /* I-star */ |
132 | .pvr_mask = 0xffff0000, | 124 | .pvr_mask = 0xffff0000, |
@@ -138,10 +130,8 @@ struct cpu_spec cpu_specs[] = { | |||
138 | .dcache_bsize = 128, | 130 | .dcache_bsize = 128, |
139 | .num_pmcs = 8, | 131 | .num_pmcs = 8, |
140 | .cpu_setup = __setup_cpu_power3, | 132 | .cpu_setup = __setup_cpu_power3, |
141 | #ifdef CONFIG_OPROFILE | ||
142 | .oprofile_cpu_type = "ppc64/rs64", | 133 | .oprofile_cpu_type = "ppc64/rs64", |
143 | .oprofile_model = &op_model_rs64, | 134 | .oprofile_type = RS64, |
144 | #endif | ||
145 | }, | 135 | }, |
146 | { /* S-star */ | 136 | { /* S-star */ |
147 | .pvr_mask = 0xffff0000, | 137 | .pvr_mask = 0xffff0000, |
@@ -153,10 +143,8 @@ struct cpu_spec cpu_specs[] = { | |||
153 | .dcache_bsize = 128, | 143 | .dcache_bsize = 128, |
154 | .num_pmcs = 8, | 144 | .num_pmcs = 8, |
155 | .cpu_setup = __setup_cpu_power3, | 145 | .cpu_setup = __setup_cpu_power3, |
156 | #ifdef CONFIG_OPROFILE | ||
157 | .oprofile_cpu_type = "ppc64/rs64", | 146 | .oprofile_cpu_type = "ppc64/rs64", |
158 | .oprofile_model = &op_model_rs64, | 147 | .oprofile_type = RS64, |
159 | #endif | ||
160 | }, | 148 | }, |
161 | { /* Power4 */ | 149 | { /* Power4 */ |
162 | .pvr_mask = 0xffff0000, | 150 | .pvr_mask = 0xffff0000, |
@@ -168,10 +156,8 @@ struct cpu_spec cpu_specs[] = { | |||
168 | .dcache_bsize = 128, | 156 | .dcache_bsize = 128, |
169 | .num_pmcs = 8, | 157 | .num_pmcs = 8, |
170 | .cpu_setup = __setup_cpu_power4, | 158 | .cpu_setup = __setup_cpu_power4, |
171 | #ifdef CONFIG_OPROFILE | ||
172 | .oprofile_cpu_type = "ppc64/power4", | 159 | .oprofile_cpu_type = "ppc64/power4", |
173 | .oprofile_model = &op_model_rs64, | 160 | .oprofile_type = POWER4, |
174 | #endif | ||
175 | }, | 161 | }, |
176 | { /* Power4+ */ | 162 | { /* Power4+ */ |
177 | .pvr_mask = 0xffff0000, | 163 | .pvr_mask = 0xffff0000, |
@@ -183,10 +169,8 @@ struct cpu_spec cpu_specs[] = { | |||
183 | .dcache_bsize = 128, | 169 | .dcache_bsize = 128, |
184 | .num_pmcs = 8, | 170 | .num_pmcs = 8, |
185 | .cpu_setup = __setup_cpu_power4, | 171 | .cpu_setup = __setup_cpu_power4, |
186 | #ifdef CONFIG_OPROFILE | ||
187 | .oprofile_cpu_type = "ppc64/power4", | 172 | .oprofile_cpu_type = "ppc64/power4", |
188 | .oprofile_model = &op_model_power4, | 173 | .oprofile_type = POWER4, |
189 | #endif | ||
190 | }, | 174 | }, |
191 | { /* PPC970 */ | 175 | { /* PPC970 */ |
192 | .pvr_mask = 0xffff0000, | 176 | .pvr_mask = 0xffff0000, |
@@ -199,10 +183,8 @@ struct cpu_spec cpu_specs[] = { | |||
199 | .dcache_bsize = 128, | 183 | .dcache_bsize = 128, |
200 | .num_pmcs = 8, | 184 | .num_pmcs = 8, |
201 | .cpu_setup = __setup_cpu_ppc970, | 185 | .cpu_setup = __setup_cpu_ppc970, |
202 | #ifdef CONFIG_OPROFILE | ||
203 | .oprofile_cpu_type = "ppc64/970", | 186 | .oprofile_cpu_type = "ppc64/970", |
204 | .oprofile_model = &op_model_power4, | 187 | .oprofile_type = POWER4, |
205 | #endif | ||
206 | }, | 188 | }, |
207 | #endif /* CONFIG_PPC64 */ | 189 | #endif /* CONFIG_PPC64 */ |
208 | #if defined(CONFIG_PPC64) || defined(CONFIG_POWER4) | 190 | #if defined(CONFIG_PPC64) || defined(CONFIG_POWER4) |
@@ -221,10 +203,8 @@ struct cpu_spec cpu_specs[] = { | |||
221 | .dcache_bsize = 128, | 203 | .dcache_bsize = 128, |
222 | .num_pmcs = 8, | 204 | .num_pmcs = 8, |
223 | .cpu_setup = __setup_cpu_ppc970, | 205 | .cpu_setup = __setup_cpu_ppc970, |
224 | #ifdef CONFIG_OPROFILE | ||
225 | .oprofile_cpu_type = "ppc64/970", | 206 | .oprofile_cpu_type = "ppc64/970", |
226 | .oprofile_model = &op_model_power4, | 207 | .oprofile_type = POWER4, |
227 | #endif | ||
228 | }, | 208 | }, |
229 | #endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */ | 209 | #endif /* defined(CONFIG_PPC64) || defined(CONFIG_POWER4) */ |
230 | #ifdef CONFIG_PPC64 | 210 | #ifdef CONFIG_PPC64 |
@@ -238,10 +218,8 @@ struct cpu_spec cpu_specs[] = { | |||
238 | .icache_bsize = 128, | 218 | .icache_bsize = 128, |
239 | .dcache_bsize = 128, | 219 | .dcache_bsize = 128, |
240 | .cpu_setup = __setup_cpu_ppc970, | 220 | .cpu_setup = __setup_cpu_ppc970, |
241 | #ifdef CONFIG_OPROFILE | ||
242 | .oprofile_cpu_type = "ppc64/970", | 221 | .oprofile_cpu_type = "ppc64/970", |
243 | .oprofile_model = &op_model_power4, | 222 | .oprofile_type = POWER4, |
244 | #endif | ||
245 | }, | 223 | }, |
246 | { /* Power5 GR */ | 224 | { /* Power5 GR */ |
247 | .pvr_mask = 0xffff0000, | 225 | .pvr_mask = 0xffff0000, |
@@ -253,27 +231,23 @@ struct cpu_spec cpu_specs[] = { | |||
253 | .dcache_bsize = 128, | 231 | .dcache_bsize = 128, |
254 | .num_pmcs = 6, | 232 | .num_pmcs = 6, |
255 | .cpu_setup = __setup_cpu_power4, | 233 | .cpu_setup = __setup_cpu_power4, |
256 | #ifdef CONFIG_OPROFILE | ||
257 | .oprofile_cpu_type = "ppc64/power5", | 234 | .oprofile_cpu_type = "ppc64/power5", |
258 | .oprofile_model = &op_model_power4, | 235 | .oprofile_type = POWER4, |
259 | #endif | ||
260 | }, | 236 | }, |
261 | { /* Power5 GS */ | 237 | { /* Power5 GS */ |
262 | .pvr_mask = 0xffff0000, | 238 | .pvr_mask = 0xffff0000, |
263 | .pvr_value = 0x003b0000, | 239 | .pvr_value = 0x003b0000, |
264 | .cpu_name = "POWER5 (gs)", | 240 | .cpu_name = "POWER5+ (gs)", |
265 | .cpu_features = CPU_FTRS_POWER5, | 241 | .cpu_features = CPU_FTRS_POWER5, |
266 | .cpu_user_features = COMMON_USER_POWER5_PLUS, | 242 | .cpu_user_features = COMMON_USER_POWER5_PLUS, |
267 | .icache_bsize = 128, | 243 | .icache_bsize = 128, |
268 | .dcache_bsize = 128, | 244 | .dcache_bsize = 128, |
269 | .num_pmcs = 6, | 245 | .num_pmcs = 6, |
270 | .cpu_setup = __setup_cpu_power4, | 246 | .cpu_setup = __setup_cpu_power4, |
271 | #ifdef CONFIG_OPROFILE | 247 | .oprofile_cpu_type = "ppc64/power5+", |
272 | .oprofile_cpu_type = "ppc64/power5", | 248 | .oprofile_type = POWER4, |
273 | .oprofile_model = &op_model_power4, | ||
274 | #endif | ||
275 | }, | 249 | }, |
276 | { /* BE DD1.x */ | 250 | { /* Cell Broadband Engine */ |
277 | .pvr_mask = 0xffff0000, | 251 | .pvr_mask = 0xffff0000, |
278 | .pvr_value = 0x00700000, | 252 | .pvr_value = 0x00700000, |
279 | .cpu_name = "Cell Broadband Engine", | 253 | .cpu_name = "Cell Broadband Engine", |
@@ -545,7 +519,9 @@ struct cpu_spec cpu_specs[] = { | |||
545 | .icache_bsize = 32, | 519 | .icache_bsize = 32, |
546 | .dcache_bsize = 32, | 520 | .dcache_bsize = 32, |
547 | .num_pmcs = 6, | 521 | .num_pmcs = 6, |
548 | .cpu_setup = __setup_cpu_745x | 522 | .cpu_setup = __setup_cpu_745x, |
523 | .oprofile_cpu_type = "ppc/7450", | ||
524 | .oprofile_type = G4, | ||
549 | }, | 525 | }, |
550 | { /* 7450 2.1 */ | 526 | { /* 7450 2.1 */ |
551 | .pvr_mask = 0xffffffff, | 527 | .pvr_mask = 0xffffffff, |
@@ -556,7 +532,9 @@ struct cpu_spec cpu_specs[] = { | |||
556 | .icache_bsize = 32, | 532 | .icache_bsize = 32, |
557 | .dcache_bsize = 32, | 533 | .dcache_bsize = 32, |
558 | .num_pmcs = 6, | 534 | .num_pmcs = 6, |
559 | .cpu_setup = __setup_cpu_745x | 535 | .cpu_setup = __setup_cpu_745x, |
536 | .oprofile_cpu_type = "ppc/7450", | ||
537 | .oprofile_type = G4, | ||
560 | }, | 538 | }, |
561 | { /* 7450 2.3 and newer */ | 539 | { /* 7450 2.3 and newer */ |
562 | .pvr_mask = 0xffff0000, | 540 | .pvr_mask = 0xffff0000, |
@@ -567,7 +545,9 @@ struct cpu_spec cpu_specs[] = { | |||
567 | .icache_bsize = 32, | 545 | .icache_bsize = 32, |
568 | .dcache_bsize = 32, | 546 | .dcache_bsize = 32, |
569 | .num_pmcs = 6, | 547 | .num_pmcs = 6, |
570 | .cpu_setup = __setup_cpu_745x | 548 | .cpu_setup = __setup_cpu_745x, |
549 | .oprofile_cpu_type = "ppc/7450", | ||
550 | .oprofile_type = G4, | ||
571 | }, | 551 | }, |
572 | { /* 7455 rev 1.x */ | 552 | { /* 7455 rev 1.x */ |
573 | .pvr_mask = 0xffffff00, | 553 | .pvr_mask = 0xffffff00, |
@@ -578,7 +558,9 @@ struct cpu_spec cpu_specs[] = { | |||
578 | .icache_bsize = 32, | 558 | .icache_bsize = 32, |
579 | .dcache_bsize = 32, | 559 | .dcache_bsize = 32, |
580 | .num_pmcs = 6, | 560 | .num_pmcs = 6, |
581 | .cpu_setup = __setup_cpu_745x | 561 | .cpu_setup = __setup_cpu_745x, |
562 | .oprofile_cpu_type = "ppc/7450", | ||
563 | .oprofile_type = G4, | ||
582 | }, | 564 | }, |
583 | { /* 7455 rev 2.0 */ | 565 | { /* 7455 rev 2.0 */ |
584 | .pvr_mask = 0xffffffff, | 566 | .pvr_mask = 0xffffffff, |
@@ -589,7 +571,9 @@ struct cpu_spec cpu_specs[] = { | |||
589 | .icache_bsize = 32, | 571 | .icache_bsize = 32, |
590 | .dcache_bsize = 32, | 572 | .dcache_bsize = 32, |
591 | .num_pmcs = 6, | 573 | .num_pmcs = 6, |
592 | .cpu_setup = __setup_cpu_745x | 574 | .cpu_setup = __setup_cpu_745x, |
575 | .oprofile_cpu_type = "ppc/7450", | ||
576 | .oprofile_type = G4, | ||
593 | }, | 577 | }, |
594 | { /* 7455 others */ | 578 | { /* 7455 others */ |
595 | .pvr_mask = 0xffff0000, | 579 | .pvr_mask = 0xffff0000, |
@@ -600,7 +584,9 @@ struct cpu_spec cpu_specs[] = { | |||
600 | .icache_bsize = 32, | 584 | .icache_bsize = 32, |
601 | .dcache_bsize = 32, | 585 | .dcache_bsize = 32, |
602 | .num_pmcs = 6, | 586 | .num_pmcs = 6, |
603 | .cpu_setup = __setup_cpu_745x | 587 | .cpu_setup = __setup_cpu_745x, |
588 | .oprofile_cpu_type = "ppc/7450", | ||
589 | .oprofile_type = G4, | ||
604 | }, | 590 | }, |
605 | { /* 7447/7457 Rev 1.0 */ | 591 | { /* 7447/7457 Rev 1.0 */ |
606 | .pvr_mask = 0xffffffff, | 592 | .pvr_mask = 0xffffffff, |
@@ -611,7 +597,9 @@ struct cpu_spec cpu_specs[] = { | |||
611 | .icache_bsize = 32, | 597 | .icache_bsize = 32, |
612 | .dcache_bsize = 32, | 598 | .dcache_bsize = 32, |
613 | .num_pmcs = 6, | 599 | .num_pmcs = 6, |
614 | .cpu_setup = __setup_cpu_745x | 600 | .cpu_setup = __setup_cpu_745x, |
601 | .oprofile_cpu_type = "ppc/7450", | ||
602 | .oprofile_type = G4, | ||
615 | }, | 603 | }, |
616 | { /* 7447/7457 Rev 1.1 */ | 604 | { /* 7447/7457 Rev 1.1 */ |
617 | .pvr_mask = 0xffffffff, | 605 | .pvr_mask = 0xffffffff, |
@@ -622,7 +610,9 @@ struct cpu_spec cpu_specs[] = { | |||
622 | .icache_bsize = 32, | 610 | .icache_bsize = 32, |
623 | .dcache_bsize = 32, | 611 | .dcache_bsize = 32, |
624 | .num_pmcs = 6, | 612 | .num_pmcs = 6, |
625 | .cpu_setup = __setup_cpu_745x | 613 | .cpu_setup = __setup_cpu_745x, |
614 | .oprofile_cpu_type = "ppc/7450", | ||
615 | .oprofile_type = G4, | ||
626 | }, | 616 | }, |
627 | { /* 7447/7457 Rev 1.2 and later */ | 617 | { /* 7447/7457 Rev 1.2 and later */ |
628 | .pvr_mask = 0xffff0000, | 618 | .pvr_mask = 0xffff0000, |
@@ -633,7 +623,9 @@ struct cpu_spec cpu_specs[] = { | |||
633 | .icache_bsize = 32, | 623 | .icache_bsize = 32, |
634 | .dcache_bsize = 32, | 624 | .dcache_bsize = 32, |
635 | .num_pmcs = 6, | 625 | .num_pmcs = 6, |
636 | .cpu_setup = __setup_cpu_745x | 626 | .cpu_setup = __setup_cpu_745x, |
627 | .oprofile_cpu_type = "ppc/7450", | ||
628 | .oprofile_type = G4, | ||
637 | }, | 629 | }, |
638 | { /* 7447A */ | 630 | { /* 7447A */ |
639 | .pvr_mask = 0xffff0000, | 631 | .pvr_mask = 0xffff0000, |
@@ -644,7 +636,9 @@ struct cpu_spec cpu_specs[] = { | |||
644 | .icache_bsize = 32, | 636 | .icache_bsize = 32, |
645 | .dcache_bsize = 32, | 637 | .dcache_bsize = 32, |
646 | .num_pmcs = 6, | 638 | .num_pmcs = 6, |
647 | .cpu_setup = __setup_cpu_745x | 639 | .cpu_setup = __setup_cpu_745x, |
640 | .oprofile_cpu_type = "ppc/7450", | ||
641 | .oprofile_type = G4, | ||
648 | }, | 642 | }, |
649 | { /* 7448 */ | 643 | { /* 7448 */ |
650 | .pvr_mask = 0xffff0000, | 644 | .pvr_mask = 0xffff0000, |
@@ -655,7 +649,9 @@ struct cpu_spec cpu_specs[] = { | |||
655 | .icache_bsize = 32, | 649 | .icache_bsize = 32, |
656 | .dcache_bsize = 32, | 650 | .dcache_bsize = 32, |
657 | .num_pmcs = 6, | 651 | .num_pmcs = 6, |
658 | .cpu_setup = __setup_cpu_745x | 652 | .cpu_setup = __setup_cpu_745x, |
653 | .oprofile_cpu_type = "ppc/7450", | ||
654 | .oprofile_type = G4, | ||
659 | }, | 655 | }, |
660 | { /* 82xx (8240, 8245, 8260 are all 603e cores) */ | 656 | { /* 82xx (8240, 8245, 8260 are all 603e cores) */ |
661 | .pvr_mask = 0x7fff0000, | 657 | .pvr_mask = 0x7fff0000, |
@@ -979,6 +975,8 @@ struct cpu_spec cpu_specs[] = { | |||
979 | .icache_bsize = 32, | 975 | .icache_bsize = 32, |
980 | .dcache_bsize = 32, | 976 | .dcache_bsize = 32, |
981 | .num_pmcs = 4, | 977 | .num_pmcs = 4, |
978 | .oprofile_cpu_type = "ppc/e500", | ||
979 | .oprofile_type = BOOKE, | ||
982 | }, | 980 | }, |
983 | { /* e500v2 */ | 981 | { /* e500v2 */ |
984 | .pvr_mask = 0xffff0000, | 982 | .pvr_mask = 0xffff0000, |
@@ -992,6 +990,8 @@ struct cpu_spec cpu_specs[] = { | |||
992 | .icache_bsize = 32, | 990 | .icache_bsize = 32, |
993 | .dcache_bsize = 32, | 991 | .dcache_bsize = 32, |
994 | .num_pmcs = 4, | 992 | .num_pmcs = 4, |
993 | .oprofile_cpu_type = "ppc/e500", | ||
994 | .oprofile_type = BOOKE, | ||
995 | }, | 995 | }, |
996 | #endif | 996 | #endif |
997 | #if !CLASSIC_PPC | 997 | #if !CLASSIC_PPC |
diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c new file mode 100644 index 000000000000..4681155121ef --- /dev/null +++ b/arch/powerpc/kernel/crash.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * Architecture specific (PPC64) functions for kexec based crash dumps. | ||
3 | * | ||
4 | * Copyright (C) 2005, IBM Corp. | ||
5 | * | ||
6 | * Created by: Haren Myneni | ||
7 | * | ||
8 | * This source code is licensed under the GNU General Public License, | ||
9 | * Version 2. See the file COPYING for more details. | ||
10 | * | ||
11 | */ | ||
12 | |||
13 | #undef DEBUG | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/smp.h> | ||
17 | #include <linux/reboot.h> | ||
18 | #include <linux/kexec.h> | ||
19 | #include <linux/bootmem.h> | ||
20 | #include <linux/crash_dump.h> | ||
21 | #include <linux/irq.h> | ||
22 | #include <linux/delay.h> | ||
23 | #include <linux/elf.h> | ||
24 | #include <linux/elfcore.h> | ||
25 | #include <linux/init.h> | ||
26 | #include <linux/types.h> | ||
27 | |||
28 | #include <asm/processor.h> | ||
29 | #include <asm/machdep.h> | ||
30 | #include <asm/kdump.h> | ||
31 | #include <asm/lmb.h> | ||
32 | #include <asm/firmware.h> | ||
33 | |||
34 | #ifdef DEBUG | ||
35 | #include <asm/udbg.h> | ||
36 | #define DBG(fmt...) udbg_printf(fmt) | ||
37 | #else | ||
38 | #define DBG(fmt...) | ||
39 | #endif | ||
40 | |||
41 | /* This keeps a track of which one is crashing cpu. */ | ||
42 | int crashing_cpu = -1; | ||
43 | |||
44 | static u32 *append_elf_note(u32 *buf, char *name, unsigned type, void *data, | ||
45 | size_t data_len) | ||
46 | { | ||
47 | struct elf_note note; | ||
48 | |||
49 | note.n_namesz = strlen(name) + 1; | ||
50 | note.n_descsz = data_len; | ||
51 | note.n_type = type; | ||
52 | memcpy(buf, ¬e, sizeof(note)); | ||
53 | buf += (sizeof(note) +3)/4; | ||
54 | memcpy(buf, name, note.n_namesz); | ||
55 | buf += (note.n_namesz + 3)/4; | ||
56 | memcpy(buf, data, note.n_descsz); | ||
57 | buf += (note.n_descsz + 3)/4; | ||
58 | |||
59 | return buf; | ||
60 | } | ||
61 | |||
62 | static void final_note(u32 *buf) | ||
63 | { | ||
64 | struct elf_note note; | ||
65 | |||
66 | note.n_namesz = 0; | ||
67 | note.n_descsz = 0; | ||
68 | note.n_type = 0; | ||
69 | memcpy(buf, ¬e, sizeof(note)); | ||
70 | } | ||
71 | |||
72 | static void crash_save_this_cpu(struct pt_regs *regs, int cpu) | ||
73 | { | ||
74 | struct elf_prstatus prstatus; | ||
75 | u32 *buf; | ||
76 | |||
77 | if ((cpu < 0) || (cpu >= NR_CPUS)) | ||
78 | return; | ||
79 | |||
80 | /* Using ELF notes here is opportunistic. | ||
81 | * I need a well defined structure format | ||
82 | * for the data I pass, and I need tags | ||
83 | * on the data to indicate what information I have | ||
84 | * squirrelled away. ELF notes happen to provide | ||
85 | * all of that that no need to invent something new. | ||
86 | */ | ||
87 | buf = &crash_notes[cpu][0]; | ||
88 | memset(&prstatus, 0, sizeof(prstatus)); | ||
89 | prstatus.pr_pid = current->pid; | ||
90 | elf_core_copy_regs(&prstatus.pr_reg, regs); | ||
91 | buf = append_elf_note(buf, "CORE", NT_PRSTATUS, &prstatus, | ||
92 | sizeof(prstatus)); | ||
93 | final_note(buf); | ||
94 | } | ||
95 | |||
96 | /* FIXME Merge this with xmon_save_regs ?? */ | ||
97 | static inline void crash_get_current_regs(struct pt_regs *regs) | ||
98 | { | ||
99 | unsigned long tmp1, tmp2; | ||
100 | |||
101 | __asm__ __volatile__ ( | ||
102 | "std 0,0(%2)\n" | ||
103 | "std 1,8(%2)\n" | ||
104 | "std 2,16(%2)\n" | ||
105 | "std 3,24(%2)\n" | ||
106 | "std 4,32(%2)\n" | ||
107 | "std 5,40(%2)\n" | ||
108 | "std 6,48(%2)\n" | ||
109 | "std 7,56(%2)\n" | ||
110 | "std 8,64(%2)\n" | ||
111 | "std 9,72(%2)\n" | ||
112 | "std 10,80(%2)\n" | ||
113 | "std 11,88(%2)\n" | ||
114 | "std 12,96(%2)\n" | ||
115 | "std 13,104(%2)\n" | ||
116 | "std 14,112(%2)\n" | ||
117 | "std 15,120(%2)\n" | ||
118 | "std 16,128(%2)\n" | ||
119 | "std 17,136(%2)\n" | ||
120 | "std 18,144(%2)\n" | ||
121 | "std 19,152(%2)\n" | ||
122 | "std 20,160(%2)\n" | ||
123 | "std 21,168(%2)\n" | ||
124 | "std 22,176(%2)\n" | ||
125 | "std 23,184(%2)\n" | ||
126 | "std 24,192(%2)\n" | ||
127 | "std 25,200(%2)\n" | ||
128 | "std 26,208(%2)\n" | ||
129 | "std 27,216(%2)\n" | ||
130 | "std 28,224(%2)\n" | ||
131 | "std 29,232(%2)\n" | ||
132 | "std 30,240(%2)\n" | ||
133 | "std 31,248(%2)\n" | ||
134 | "mfmsr %0\n" | ||
135 | "std %0, 264(%2)\n" | ||
136 | "mfctr %0\n" | ||
137 | "std %0, 280(%2)\n" | ||
138 | "mflr %0\n" | ||
139 | "std %0, 288(%2)\n" | ||
140 | "bl 1f\n" | ||
141 | "1: mflr %1\n" | ||
142 | "std %1, 256(%2)\n" | ||
143 | "mtlr %0\n" | ||
144 | "mfxer %0\n" | ||
145 | "std %0, 296(%2)\n" | ||
146 | : "=&r" (tmp1), "=&r" (tmp2) | ||
147 | : "b" (regs)); | ||
148 | } | ||
149 | |||
150 | /* We may have saved_regs from where the error came from | ||
151 | * or it is NULL if via a direct panic(). | ||
152 | */ | ||
153 | static void crash_save_self(struct pt_regs *saved_regs) | ||
154 | { | ||
155 | struct pt_regs regs; | ||
156 | int cpu; | ||
157 | |||
158 | cpu = smp_processor_id(); | ||
159 | if (saved_regs) | ||
160 | memcpy(®s, saved_regs, sizeof(regs)); | ||
161 | else | ||
162 | crash_get_current_regs(®s); | ||
163 | crash_save_this_cpu(®s, cpu); | ||
164 | } | ||
165 | |||
166 | #ifdef CONFIG_SMP | ||
167 | static atomic_t waiting_for_crash_ipi; | ||
168 | |||
169 | void crash_ipi_callback(struct pt_regs *regs) | ||
170 | { | ||
171 | int cpu = smp_processor_id(); | ||
172 | |||
173 | if (cpu == crashing_cpu) | ||
174 | return; | ||
175 | |||
176 | if (!cpu_online(cpu)) | ||
177 | return; | ||
178 | |||
179 | if (ppc_md.kexec_cpu_down) | ||
180 | ppc_md.kexec_cpu_down(1, 1); | ||
181 | |||
182 | local_irq_disable(); | ||
183 | |||
184 | crash_save_this_cpu(regs, cpu); | ||
185 | atomic_dec(&waiting_for_crash_ipi); | ||
186 | kexec_smp_wait(); | ||
187 | /* NOTREACHED */ | ||
188 | } | ||
189 | |||
190 | static void crash_kexec_prepare_cpus(void) | ||
191 | { | ||
192 | unsigned int msecs; | ||
193 | |||
194 | atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1); | ||
195 | |||
196 | crash_send_ipi(crash_ipi_callback); | ||
197 | smp_wmb(); | ||
198 | |||
199 | /* | ||
200 | * FIXME: Until we will have the way to stop other CPUSs reliabally, | ||
201 | * the crash CPU will send an IPI and wait for other CPUs to | ||
202 | * respond. If not, proceed the kexec boot even though we failed to | ||
203 | * capture other CPU states. | ||
204 | */ | ||
205 | msecs = 1000000; | ||
206 | while ((atomic_read(&waiting_for_crash_ipi) > 0) && (--msecs > 0)) { | ||
207 | barrier(); | ||
208 | mdelay(1); | ||
209 | } | ||
210 | |||
211 | /* Would it be better to replace the trap vector here? */ | ||
212 | |||
213 | /* | ||
214 | * FIXME: In case if we do not get all CPUs, one possibility: ask the | ||
215 | * user to do soft reset such that we get all. | ||
216 | * IPI handler is already set by the panic cpu initially. Therefore, | ||
217 | * all cpus could invoke this handler from die() and the panic CPU | ||
218 | * will call machine_kexec() directly from this handler to do | ||
219 | * kexec boot. | ||
220 | */ | ||
221 | if (atomic_read(&waiting_for_crash_ipi)) | ||
222 | printk(KERN_ALERT "done waiting: %d cpus not responding\n", | ||
223 | atomic_read(&waiting_for_crash_ipi)); | ||
224 | /* Leave the IPI callback set */ | ||
225 | } | ||
226 | #else | ||
227 | static void crash_kexec_prepare_cpus(void) | ||
228 | { | ||
229 | /* | ||
230 | * move the secondarys to us so that we can copy | ||
231 | * the new kernel 0-0x100 safely | ||
232 | * | ||
233 | * do this if kexec in setup.c ? | ||
234 | */ | ||
235 | smp_release_cpus(); | ||
236 | } | ||
237 | |||
238 | #endif | ||
239 | |||
240 | void default_machine_crash_shutdown(struct pt_regs *regs) | ||
241 | { | ||
242 | /* | ||
243 | * This function is only called after the system | ||
244 | * has paniced or is otherwise in a critical state. | ||
245 | * The minimum amount of code to allow a kexec'd kernel | ||
246 | * to run successfully needs to happen here. | ||
247 | * | ||
248 | * In practice this means stopping other cpus in | ||
249 | * an SMP system. | ||
250 | * The kernel is broken so disable interrupts. | ||
251 | */ | ||
252 | local_irq_disable(); | ||
253 | |||
254 | if (ppc_md.kexec_cpu_down) | ||
255 | ppc_md.kexec_cpu_down(1, 0); | ||
256 | |||
257 | /* | ||
258 | * Make a note of crashing cpu. Will be used in machine_kexec | ||
259 | * such that another IPI will not be sent. | ||
260 | */ | ||
261 | crashing_cpu = smp_processor_id(); | ||
262 | crash_kexec_prepare_cpus(); | ||
263 | crash_save_self(regs); | ||
264 | } | ||
diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c new file mode 100644 index 000000000000..87effa3f21a7 --- /dev/null +++ b/arch/powerpc/kernel/crash_dump.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * Routines for doing kexec-based kdump. | ||
3 | * | ||
4 | * Copyright (C) 2005, IBM Corp. | ||
5 | * | ||
6 | * Created by: Michael Ellerman | ||
7 | * | ||
8 | * This source code is licensed under the GNU General Public License, | ||
9 | * Version 2. See the file COPYING for more details. | ||
10 | */ | ||
11 | |||
12 | #undef DEBUG | ||
13 | |||
14 | #include <linux/crash_dump.h> | ||
15 | #include <linux/bootmem.h> | ||
16 | #include <asm/kdump.h> | ||
17 | #include <asm/lmb.h> | ||
18 | #include <asm/firmware.h> | ||
19 | #include <asm/uaccess.h> | ||
20 | |||
21 | #ifdef DEBUG | ||
22 | #include <asm/udbg.h> | ||
23 | #define DBG(fmt...) udbg_printf(fmt) | ||
24 | #else | ||
25 | #define DBG(fmt...) | ||
26 | #endif | ||
27 | |||
28 | static void __init create_trampoline(unsigned long addr) | ||
29 | { | ||
30 | /* The maximum range of a single instruction branch, is the current | ||
31 | * instruction's address + (32 MB - 4) bytes. For the trampoline we | ||
32 | * need to branch to current address + 32 MB. So we insert a nop at | ||
33 | * the trampoline address, then the next instruction (+ 4 bytes) | ||
34 | * does a branch to (32 MB - 4). The net effect is that when we | ||
35 | * branch to "addr" we jump to ("addr" + 32 MB). Although it requires | ||
36 | * two instructions it doesn't require any registers. | ||
37 | */ | ||
38 | create_instruction(addr, 0x60000000); /* nop */ | ||
39 | create_branch(addr + 4, addr + PHYSICAL_START, 0); | ||
40 | } | ||
41 | |||
42 | void __init kdump_setup(void) | ||
43 | { | ||
44 | unsigned long i; | ||
45 | |||
46 | DBG(" -> kdump_setup()\n"); | ||
47 | |||
48 | for (i = KDUMP_TRAMPOLINE_START; i < KDUMP_TRAMPOLINE_END; i += 8) { | ||
49 | create_trampoline(i); | ||
50 | } | ||
51 | |||
52 | create_trampoline(__pa(system_reset_fwnmi) - PHYSICAL_START); | ||
53 | create_trampoline(__pa(machine_check_fwnmi) - PHYSICAL_START); | ||
54 | |||
55 | DBG(" <- kdump_setup()\n"); | ||
56 | } | ||
57 | |||
58 | static int __init parse_elfcorehdr(char *p) | ||
59 | { | ||
60 | if (p) | ||
61 | elfcorehdr_addr = memparse(p, &p); | ||
62 | |||
63 | return 0; | ||
64 | } | ||
65 | __setup("elfcorehdr=", parse_elfcorehdr); | ||
66 | |||
67 | static int __init parse_savemaxmem(char *p) | ||
68 | { | ||
69 | if (p) | ||
70 | saved_max_pfn = (memparse(p, &p) >> PAGE_SHIFT) - 1; | ||
71 | |||
72 | return 0; | ||
73 | } | ||
74 | __setup("savemaxmem=", parse_savemaxmem); | ||
75 | |||
76 | /* | ||
77 | * copy_oldmem_page - copy one page from "oldmem" | ||
78 | * @pfn: page frame number to be copied | ||
79 | * @buf: target memory address for the copy; this can be in kernel address | ||
80 | * space or user address space (see @userbuf) | ||
81 | * @csize: number of bytes to copy | ||
82 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | ||
83 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | ||
84 | * otherwise @buf is in kernel address space, use memcpy(). | ||
85 | * | ||
86 | * Copy a page from "oldmem". For this page, there is no pte mapped | ||
87 | * in the current kernel. We stitch up a pte, similar to kmap_atomic. | ||
88 | */ | ||
89 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
90 | size_t csize, unsigned long offset, int userbuf) | ||
91 | { | ||
92 | void *vaddr; | ||
93 | |||
94 | if (!csize) | ||
95 | return 0; | ||
96 | |||
97 | vaddr = __ioremap(pfn << PAGE_SHIFT, PAGE_SIZE, 0); | ||
98 | |||
99 | if (userbuf) { | ||
100 | if (copy_to_user((char __user *)buf, (vaddr + offset), csize)) { | ||
101 | iounmap(vaddr); | ||
102 | return -EFAULT; | ||
103 | } | ||
104 | } else | ||
105 | memcpy(buf, (vaddr + offset), csize); | ||
106 | |||
107 | iounmap(vaddr); | ||
108 | return csize; | ||
109 | } | ||
diff --git a/arch/powerpc/kernel/dma_64.c b/arch/powerpc/kernel/dma_64.c index 7c3419656ccc..36aaa7663f02 100644 --- a/arch/powerpc/kernel/dma_64.c +++ b/arch/powerpc/kernel/dma_64.c | |||
@@ -10,6 +10,7 @@ | |||
10 | /* Include the busses we support */ | 10 | /* Include the busses we support */ |
11 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
12 | #include <asm/vio.h> | 12 | #include <asm/vio.h> |
13 | #include <asm/ibmebus.h> | ||
13 | #include <asm/scatterlist.h> | 14 | #include <asm/scatterlist.h> |
14 | #include <asm/bug.h> | 15 | #include <asm/bug.h> |
15 | 16 | ||
@@ -23,6 +24,10 @@ static struct dma_mapping_ops *get_dma_ops(struct device *dev) | |||
23 | if (dev->bus == &vio_bus_type) | 24 | if (dev->bus == &vio_bus_type) |
24 | return &vio_dma_ops; | 25 | return &vio_dma_ops; |
25 | #endif | 26 | #endif |
27 | #ifdef CONFIG_IBMEBUS | ||
28 | if (dev->bus == &ibmebus_bus_type) | ||
29 | return &ibmebus_dma_ops; | ||
30 | #endif | ||
26 | return NULL; | 31 | return NULL; |
27 | } | 32 | } |
28 | 33 | ||
@@ -47,6 +52,10 @@ int dma_set_mask(struct device *dev, u64 dma_mask) | |||
47 | if (dev->bus == &vio_bus_type) | 52 | if (dev->bus == &vio_bus_type) |
48 | return -EIO; | 53 | return -EIO; |
49 | #endif /* CONFIG_IBMVIO */ | 54 | #endif /* CONFIG_IBMVIO */ |
55 | #ifdef CONFIG_IBMEBUS | ||
56 | if (dev->bus == &ibmebus_bus_type) | ||
57 | return -EIO; | ||
58 | #endif | ||
50 | BUG(); | 59 | BUG(); |
51 | return 0; | 60 | return 0; |
52 | } | 61 | } |
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S index 2e99ae41723c..036b71d2adfc 100644 --- a/arch/powerpc/kernel/entry_32.S +++ b/arch/powerpc/kernel/entry_32.S | |||
@@ -200,8 +200,6 @@ _GLOBAL(DoSyscall) | |||
200 | bl do_show_syscall | 200 | bl do_show_syscall |
201 | #endif /* SHOW_SYSCALLS */ | 201 | #endif /* SHOW_SYSCALLS */ |
202 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 202 | rlwinm r10,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
203 | li r11,0 | ||
204 | stb r11,TI_SC_NOERR(r10) | ||
205 | lwz r11,TI_FLAGS(r10) | 203 | lwz r11,TI_FLAGS(r10) |
206 | andi. r11,r11,_TIF_SYSCALL_T_OR_A | 204 | andi. r11,r11,_TIF_SYSCALL_T_OR_A |
207 | bne- syscall_dotrace | 205 | bne- syscall_dotrace |
@@ -222,25 +220,21 @@ ret_from_syscall: | |||
222 | bl do_show_syscall_exit | 220 | bl do_show_syscall_exit |
223 | #endif | 221 | #endif |
224 | mr r6,r3 | 222 | mr r6,r3 |
225 | li r11,-_LAST_ERRNO | ||
226 | cmplw 0,r3,r11 | ||
227 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 223 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
228 | blt+ 30f | ||
229 | lbz r11,TI_SC_NOERR(r12) | ||
230 | cmpwi r11,0 | ||
231 | bne 30f | ||
232 | neg r3,r3 | ||
233 | lwz r10,_CCR(r1) /* Set SO bit in CR */ | ||
234 | oris r10,r10,0x1000 | ||
235 | stw r10,_CCR(r1) | ||
236 | |||
237 | /* disable interrupts so current_thread_info()->flags can't change */ | 224 | /* disable interrupts so current_thread_info()->flags can't change */ |
238 | 30: LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | 225 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ |
239 | SYNC | 226 | SYNC |
240 | MTMSRD(r10) | 227 | MTMSRD(r10) |
241 | lwz r9,TI_FLAGS(r12) | 228 | lwz r9,TI_FLAGS(r12) |
242 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED) | 229 | li r8,-_LAST_ERRNO |
230 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL) | ||
243 | bne- syscall_exit_work | 231 | bne- syscall_exit_work |
232 | cmplw 0,r3,r8 | ||
233 | blt+ syscall_exit_cont | ||
234 | lwz r11,_CCR(r1) /* Load CR */ | ||
235 | neg r3,r3 | ||
236 | oris r11,r11,0x1000 /* Set SO bit in CR */ | ||
237 | stw r11,_CCR(r1) | ||
244 | syscall_exit_cont: | 238 | syscall_exit_cont: |
245 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) | 239 | #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE) |
246 | /* If the process has its own DBCR0 value, load it up. The single | 240 | /* If the process has its own DBCR0 value, load it up. The single |
@@ -292,46 +286,113 @@ syscall_dotrace: | |||
292 | b syscall_dotrace_cont | 286 | b syscall_dotrace_cont |
293 | 287 | ||
294 | syscall_exit_work: | 288 | syscall_exit_work: |
295 | stw r6,RESULT(r1) /* Save result */ | 289 | andi. r0,r9,_TIF_RESTOREALL |
290 | bne- 2f | ||
291 | cmplw 0,r3,r8 | ||
292 | blt+ 1f | ||
293 | andi. r0,r9,_TIF_NOERROR | ||
294 | bne- 1f | ||
295 | lwz r11,_CCR(r1) /* Load CR */ | ||
296 | neg r3,r3 | ||
297 | oris r11,r11,0x1000 /* Set SO bit in CR */ | ||
298 | stw r11,_CCR(r1) | ||
299 | |||
300 | 1: stw r6,RESULT(r1) /* Save result */ | ||
296 | stw r3,GPR3(r1) /* Update return value */ | 301 | stw r3,GPR3(r1) /* Update return value */ |
297 | andi. r0,r9,_TIF_SYSCALL_T_OR_A | 302 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) |
298 | beq 5f | 303 | beq 4f |
299 | ori r10,r10,MSR_EE | 304 | |
300 | SYNC | 305 | /* Clear per-syscall TIF flags if any are set, but _leave_ |
301 | MTMSRD(r10) /* re-enable interrupts */ | 306 | _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that |
307 | yet. */ | ||
308 | |||
309 | li r11,_TIF_PERSYSCALL_MASK | ||
310 | addi r12,r12,TI_FLAGS | ||
311 | 3: lwarx r8,0,r12 | ||
312 | andc r8,r8,r11 | ||
313 | #ifdef CONFIG_IBM405_ERR77 | ||
314 | dcbt 0,r12 | ||
315 | #endif | ||
316 | stwcx. r8,0,r12 | ||
317 | bne- 3b | ||
318 | subi r12,r12,TI_FLAGS | ||
319 | |||
320 | 4: /* Anything which requires enabling interrupts? */ | ||
321 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) | ||
322 | beq 7f | ||
323 | |||
324 | /* Save NVGPRS if they're not saved already */ | ||
302 | lwz r4,_TRAP(r1) | 325 | lwz r4,_TRAP(r1) |
303 | andi. r4,r4,1 | 326 | andi. r4,r4,1 |
304 | beq 4f | 327 | beq 5f |
305 | SAVE_NVGPRS(r1) | 328 | SAVE_NVGPRS(r1) |
306 | li r4,0xc00 | 329 | li r4,0xc00 |
307 | stw r4,_TRAP(r1) | 330 | stw r4,_TRAP(r1) |
308 | 4: | 331 | |
332 | /* Re-enable interrupts */ | ||
333 | 5: ori r10,r10,MSR_EE | ||
334 | SYNC | ||
335 | MTMSRD(r10) | ||
336 | |||
337 | andi. r0,r9,_TIF_SAVE_NVGPRS | ||
338 | bne save_user_nvgprs | ||
339 | |||
340 | save_user_nvgprs_cont: | ||
341 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
342 | beq 7f | ||
343 | |||
309 | addi r3,r1,STACK_FRAME_OVERHEAD | 344 | addi r3,r1,STACK_FRAME_OVERHEAD |
310 | bl do_syscall_trace_leave | 345 | bl do_syscall_trace_leave |
311 | REST_NVGPRS(r1) | 346 | REST_NVGPRS(r1) |
312 | 2: | 347 | |
313 | lwz r3,GPR3(r1) | 348 | 6: lwz r3,GPR3(r1) |
314 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ | 349 | LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */ |
315 | SYNC | 350 | SYNC |
316 | MTMSRD(r10) /* disable interrupts again */ | 351 | MTMSRD(r10) /* disable interrupts again */ |
317 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | 352 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ |
318 | lwz r9,TI_FLAGS(r12) | 353 | lwz r9,TI_FLAGS(r12) |
319 | 5: | 354 | 7: |
320 | andi. r0,r9,_TIF_NEED_RESCHED | 355 | andi. r0,r9,_TIF_NEED_RESCHED |
321 | bne 1f | 356 | bne 8f |
322 | lwz r5,_MSR(r1) | 357 | lwz r5,_MSR(r1) |
323 | andi. r5,r5,MSR_PR | 358 | andi. r5,r5,MSR_PR |
324 | beq syscall_exit_cont | 359 | beq ret_from_except |
325 | andi. r0,r9,_TIF_SIGPENDING | 360 | andi. r0,r9,_TIF_SIGPENDING |
326 | beq syscall_exit_cont | 361 | beq ret_from_except |
327 | b do_user_signal | 362 | b do_user_signal |
328 | 1: | 363 | 8: |
329 | ori r10,r10,MSR_EE | 364 | ori r10,r10,MSR_EE |
330 | SYNC | 365 | SYNC |
331 | MTMSRD(r10) /* re-enable interrupts */ | 366 | MTMSRD(r10) /* re-enable interrupts */ |
332 | bl schedule | 367 | bl schedule |
333 | b 2b | 368 | b 6b |
369 | |||
370 | save_user_nvgprs: | ||
371 | lwz r8,TI_SIGFRAME(r12) | ||
372 | |||
373 | .macro savewords start, end | ||
374 | 1: stw \start,4*(\start)(r8) | ||
375 | .section __ex_table,"a" | ||
376 | .align 2 | ||
377 | .long 1b,save_user_nvgprs_fault | ||
378 | .previous | ||
379 | .if \end - \start | ||
380 | savewords "(\start+1)",\end | ||
381 | .endif | ||
382 | .endm | ||
383 | savewords 14,31 | ||
384 | b save_user_nvgprs_cont | ||
385 | |||
386 | |||
387 | save_user_nvgprs_fault: | ||
388 | li r3,11 /* SIGSEGV */ | ||
389 | lwz r4,TI_TASK(r12) | ||
390 | bl force_sigsegv | ||
334 | 391 | ||
392 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | ||
393 | lwz r9,TI_FLAGS(r12) | ||
394 | b save_user_nvgprs_cont | ||
395 | |||
335 | #ifdef SHOW_SYSCALLS | 396 | #ifdef SHOW_SYSCALLS |
336 | do_show_syscall: | 397 | do_show_syscall: |
337 | #ifdef SHOW_SYSCALLS_TASK | 398 | #ifdef SHOW_SYSCALLS_TASK |
@@ -401,28 +462,10 @@ show_syscalls_task: | |||
401 | #endif /* SHOW_SYSCALLS */ | 462 | #endif /* SHOW_SYSCALLS */ |
402 | 463 | ||
403 | /* | 464 | /* |
404 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | 465 | * The fork/clone functions need to copy the full register set into |
405 | * and thus put the process into the stopped state where we might | 466 | * the child process. Therefore we need to save all the nonvolatile |
406 | * want to examine its user state with ptrace. Therefore we need | 467 | * registers (r13 - r31) before calling the C code. |
407 | * to save all the nonvolatile registers (r13 - r31) before calling | ||
408 | * the C code. | ||
409 | */ | 468 | */ |
410 | .globl ppc_sigsuspend | ||
411 | ppc_sigsuspend: | ||
412 | SAVE_NVGPRS(r1) | ||
413 | lwz r0,_TRAP(r1) | ||
414 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
415 | stw r0,_TRAP(r1) /* register set saved */ | ||
416 | b sys_sigsuspend | ||
417 | |||
418 | .globl ppc_rt_sigsuspend | ||
419 | ppc_rt_sigsuspend: | ||
420 | SAVE_NVGPRS(r1) | ||
421 | lwz r0,_TRAP(r1) | ||
422 | rlwinm r0,r0,0,0,30 | ||
423 | stw r0,_TRAP(r1) | ||
424 | b sys_rt_sigsuspend | ||
425 | |||
426 | .globl ppc_fork | 469 | .globl ppc_fork |
427 | ppc_fork: | 470 | ppc_fork: |
428 | SAVE_NVGPRS(r1) | 471 | SAVE_NVGPRS(r1) |
@@ -447,14 +490,6 @@ ppc_clone: | |||
447 | stw r0,_TRAP(r1) /* register set saved */ | 490 | stw r0,_TRAP(r1) /* register set saved */ |
448 | b sys_clone | 491 | b sys_clone |
449 | 492 | ||
450 | .globl ppc_swapcontext | ||
451 | ppc_swapcontext: | ||
452 | SAVE_NVGPRS(r1) | ||
453 | lwz r0,_TRAP(r1) | ||
454 | rlwinm r0,r0,0,0,30 /* clear LSB to indicate full */ | ||
455 | stw r0,_TRAP(r1) /* register set saved */ | ||
456 | b sys_swapcontext | ||
457 | |||
458 | /* | 493 | /* |
459 | * Top-level page fault handling. | 494 | * Top-level page fault handling. |
460 | * This is in assembler because if do_page_fault tells us that | 495 | * This is in assembler because if do_page_fault tells us that |
@@ -626,16 +661,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_601) | |||
626 | .long ret_from_except | 661 | .long ret_from_except |
627 | #endif | 662 | #endif |
628 | 663 | ||
629 | .globl sigreturn_exit | ||
630 | sigreturn_exit: | ||
631 | subi r1,r3,STACK_FRAME_OVERHEAD | ||
632 | rlwinm r12,r1,0,0,(31-THREAD_SHIFT) /* current_thread_info() */ | ||
633 | lwz r9,TI_FLAGS(r12) | ||
634 | andi. r0,r9,_TIF_SYSCALL_T_OR_A | ||
635 | beq+ ret_from_except_full | ||
636 | bl do_syscall_trace_leave | ||
637 | /* fall through */ | ||
638 | |||
639 | .globl ret_from_except_full | 664 | .globl ret_from_except_full |
640 | ret_from_except_full: | 665 | ret_from_except_full: |
641 | REST_NVGPRS(r1) | 666 | REST_NVGPRS(r1) |
@@ -658,7 +683,7 @@ user_exc_return: /* r10 contains MSR_KERNEL here */ | |||
658 | /* Check current_thread_info()->flags */ | 683 | /* Check current_thread_info()->flags */ |
659 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) | 684 | rlwinm r9,r1,0,0,(31-THREAD_SHIFT) |
660 | lwz r9,TI_FLAGS(r9) | 685 | lwz r9,TI_FLAGS(r9) |
661 | andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED) | 686 | andi. r0,r9,(_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL) |
662 | bne do_work | 687 | bne do_work |
663 | 688 | ||
664 | restore_user: | 689 | restore_user: |
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S index bce33a38399f..aacebb33e98a 100644 --- a/arch/powerpc/kernel/entry_64.S +++ b/arch/powerpc/kernel/entry_64.S | |||
@@ -113,9 +113,7 @@ system_call_common: | |||
113 | addi r9,r1,STACK_FRAME_OVERHEAD | 113 | addi r9,r1,STACK_FRAME_OVERHEAD |
114 | #endif | 114 | #endif |
115 | clrrdi r11,r1,THREAD_SHIFT | 115 | clrrdi r11,r1,THREAD_SHIFT |
116 | li r12,0 | ||
117 | ld r10,TI_FLAGS(r11) | 116 | ld r10,TI_FLAGS(r11) |
118 | stb r12,TI_SC_NOERR(r11) | ||
119 | andi. r11,r10,_TIF_SYSCALL_T_OR_A | 117 | andi. r11,r10,_TIF_SYSCALL_T_OR_A |
120 | bne- syscall_dotrace | 118 | bne- syscall_dotrace |
121 | syscall_dotrace_cont: | 119 | syscall_dotrace_cont: |
@@ -144,24 +142,12 @@ system_call: /* label this so stack traces look sane */ | |||
144 | bctrl /* Call handler */ | 142 | bctrl /* Call handler */ |
145 | 143 | ||
146 | syscall_exit: | 144 | syscall_exit: |
145 | std r3,RESULT(r1) | ||
147 | #ifdef SHOW_SYSCALLS | 146 | #ifdef SHOW_SYSCALLS |
148 | std r3,GPR3(r1) | ||
149 | bl .do_show_syscall_exit | 147 | bl .do_show_syscall_exit |
150 | ld r3,GPR3(r1) | 148 | ld r3,RESULT(r1) |
151 | #endif | 149 | #endif |
152 | std r3,RESULT(r1) | ||
153 | ld r5,_CCR(r1) | ||
154 | li r10,-_LAST_ERRNO | ||
155 | cmpld r3,r10 | ||
156 | clrrdi r12,r1,THREAD_SHIFT | 150 | clrrdi r12,r1,THREAD_SHIFT |
157 | bge- syscall_error | ||
158 | syscall_error_cont: | ||
159 | |||
160 | /* check for syscall tracing or audit */ | ||
161 | ld r9,TI_FLAGS(r12) | ||
162 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
163 | bne- syscall_exit_trace | ||
164 | syscall_exit_trace_cont: | ||
165 | 151 | ||
166 | /* disable interrupts so current_thread_info()->flags can't change, | 152 | /* disable interrupts so current_thread_info()->flags can't change, |
167 | and so that we don't get interrupted after loading SRR0/1. */ | 153 | and so that we don't get interrupted after loading SRR0/1. */ |
@@ -173,8 +159,13 @@ syscall_exit_trace_cont: | |||
173 | rotldi r10,r10,16 | 159 | rotldi r10,r10,16 |
174 | mtmsrd r10,1 | 160 | mtmsrd r10,1 |
175 | ld r9,TI_FLAGS(r12) | 161 | ld r9,TI_FLAGS(r12) |
176 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SIGPENDING|_TIF_NEED_RESCHED) | 162 | li r11,-_LAST_ERRNO |
163 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SIGPENDING|_TIF_NEED_RESCHED|_TIF_RESTOREALL|_TIF_SAVE_NVGPRS|_TIF_NOERROR) | ||
177 | bne- syscall_exit_work | 164 | bne- syscall_exit_work |
165 | cmpld r3,r11 | ||
166 | ld r5,_CCR(r1) | ||
167 | bge- syscall_error | ||
168 | syscall_error_cont: | ||
178 | ld r7,_NIP(r1) | 169 | ld r7,_NIP(r1) |
179 | stdcx. r0,0,r1 /* to clear the reservation */ | 170 | stdcx. r0,0,r1 /* to clear the reservation */ |
180 | andi. r6,r8,MSR_PR | 171 | andi. r6,r8,MSR_PR |
@@ -193,21 +184,12 @@ syscall_exit_trace_cont: | |||
193 | rfid | 184 | rfid |
194 | b . /* prevent speculative execution */ | 185 | b . /* prevent speculative execution */ |
195 | 186 | ||
196 | syscall_enosys: | 187 | syscall_error: |
197 | li r3,-ENOSYS | ||
198 | std r3,RESULT(r1) | ||
199 | clrrdi r12,r1,THREAD_SHIFT | ||
200 | ld r5,_CCR(r1) | ||
201 | |||
202 | syscall_error: | ||
203 | lbz r11,TI_SC_NOERR(r12) | ||
204 | cmpwi 0,r11,0 | ||
205 | bne- syscall_error_cont | ||
206 | neg r3,r3 | ||
207 | oris r5,r5,0x1000 /* Set SO bit in CR */ | 188 | oris r5,r5,0x1000 /* Set SO bit in CR */ |
189 | neg r3,r3 | ||
208 | std r5,_CCR(r1) | 190 | std r5,_CCR(r1) |
209 | b syscall_error_cont | 191 | b syscall_error_cont |
210 | 192 | ||
211 | /* Traced system call support */ | 193 | /* Traced system call support */ |
212 | syscall_dotrace: | 194 | syscall_dotrace: |
213 | bl .save_nvgprs | 195 | bl .save_nvgprs |
@@ -225,21 +207,69 @@ syscall_dotrace: | |||
225 | ld r10,TI_FLAGS(r10) | 207 | ld r10,TI_FLAGS(r10) |
226 | b syscall_dotrace_cont | 208 | b syscall_dotrace_cont |
227 | 209 | ||
228 | syscall_exit_trace: | 210 | syscall_enosys: |
229 | std r3,GPR3(r1) | 211 | li r3,-ENOSYS |
230 | bl .save_nvgprs | 212 | b syscall_exit |
213 | |||
214 | syscall_exit_work: | ||
215 | /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr. | ||
216 | If TIF_NOERROR is set, just save r3 as it is. */ | ||
217 | |||
218 | andi. r0,r9,_TIF_RESTOREALL | ||
219 | bne- 2f | ||
220 | cmpld r3,r11 /* r10 is -LAST_ERRNO */ | ||
221 | blt+ 1f | ||
222 | andi. r0,r9,_TIF_NOERROR | ||
223 | bne- 1f | ||
224 | ld r5,_CCR(r1) | ||
225 | neg r3,r3 | ||
226 | oris r5,r5,0x1000 /* Set SO bit in CR */ | ||
227 | std r5,_CCR(r1) | ||
228 | 1: std r3,GPR3(r1) | ||
229 | 2: andi. r0,r9,(_TIF_PERSYSCALL_MASK) | ||
230 | beq 4f | ||
231 | |||
232 | /* Clear per-syscall TIF flags if any are set, but _leave_ | ||
233 | _TIF_SAVE_NVGPRS set in r9 since we haven't dealt with that | ||
234 | yet. */ | ||
235 | |||
236 | li r11,_TIF_PERSYSCALL_MASK | ||
237 | addi r12,r12,TI_FLAGS | ||
238 | 3: ldarx r10,0,r12 | ||
239 | andc r10,r10,r11 | ||
240 | stdcx. r10,0,r12 | ||
241 | bne- 3b | ||
242 | subi r12,r12,TI_FLAGS | ||
243 | |||
244 | 4: bl .save_nvgprs | ||
245 | /* Anything else left to do? */ | ||
246 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_SAVE_NVGPRS) | ||
247 | beq .ret_from_except_lite | ||
248 | |||
249 | /* Re-enable interrupts */ | ||
250 | mfmsr r10 | ||
251 | ori r10,r10,MSR_EE | ||
252 | mtmsrd r10,1 | ||
253 | |||
254 | andi. r0,r9,_TIF_SAVE_NVGPRS | ||
255 | bne save_user_nvgprs | ||
256 | |||
257 | /* If tracing, re-enable interrupts and do it */ | ||
258 | save_user_nvgprs_cont: | ||
259 | andi. r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
260 | beq 5f | ||
261 | |||
231 | addi r3,r1,STACK_FRAME_OVERHEAD | 262 | addi r3,r1,STACK_FRAME_OVERHEAD |
232 | bl .do_syscall_trace_leave | 263 | bl .do_syscall_trace_leave |
233 | REST_NVGPRS(r1) | 264 | REST_NVGPRS(r1) |
234 | ld r3,GPR3(r1) | ||
235 | ld r5,_CCR(r1) | ||
236 | clrrdi r12,r1,THREAD_SHIFT | 265 | clrrdi r12,r1,THREAD_SHIFT |
237 | b syscall_exit_trace_cont | ||
238 | 266 | ||
239 | /* Stuff to do on exit from a system call. */ | 267 | /* Disable interrupts again and handle other work if any */ |
240 | syscall_exit_work: | 268 | 5: mfmsr r10 |
241 | std r3,GPR3(r1) | 269 | rldicl r10,r10,48,1 |
242 | std r5,_CCR(r1) | 270 | rotldi r10,r10,16 |
271 | mtmsrd r10,1 | ||
272 | |||
243 | b .ret_from_except_lite | 273 | b .ret_from_except_lite |
244 | 274 | ||
245 | /* Save non-volatile GPRs, if not already saved. */ | 275 | /* Save non-volatile GPRs, if not already saved. */ |
@@ -252,6 +282,52 @@ _GLOBAL(save_nvgprs) | |||
252 | std r0,_TRAP(r1) | 282 | std r0,_TRAP(r1) |
253 | blr | 283 | blr |
254 | 284 | ||
285 | |||
286 | save_user_nvgprs: | ||
287 | ld r10,TI_SIGFRAME(r12) | ||
288 | andi. r0,r9,_TIF_32BIT | ||
289 | beq- save_user_nvgprs_64 | ||
290 | |||
291 | /* 32-bit save to userspace */ | ||
292 | |||
293 | .macro savewords start, end | ||
294 | 1: stw \start,4*(\start)(r10) | ||
295 | .section __ex_table,"a" | ||
296 | .align 3 | ||
297 | .llong 1b,save_user_nvgprs_fault | ||
298 | .previous | ||
299 | .if \end - \start | ||
300 | savewords "(\start+1)",\end | ||
301 | .endif | ||
302 | .endm | ||
303 | savewords 14,31 | ||
304 | b save_user_nvgprs_cont | ||
305 | |||
306 | save_user_nvgprs_64: | ||
307 | /* 64-bit save to userspace */ | ||
308 | |||
309 | .macro savelongs start, end | ||
310 | 1: std \start,8*(\start)(r10) | ||
311 | .section __ex_table,"a" | ||
312 | .align 3 | ||
313 | .llong 1b,save_user_nvgprs_fault | ||
314 | .previous | ||
315 | .if \end - \start | ||
316 | savelongs "(\start+1)",\end | ||
317 | .endif | ||
318 | .endm | ||
319 | savelongs 14,31 | ||
320 | b save_user_nvgprs_cont | ||
321 | |||
322 | save_user_nvgprs_fault: | ||
323 | li r3,11 /* SIGSEGV */ | ||
324 | ld r4,TI_TASK(r12) | ||
325 | bl .force_sigsegv | ||
326 | |||
327 | clrrdi r12,r1,THREAD_SHIFT | ||
328 | ld r9,TI_FLAGS(r12) | ||
329 | b save_user_nvgprs_cont | ||
330 | |||
255 | /* | 331 | /* |
256 | * The sigsuspend and rt_sigsuspend system calls can call do_signal | 332 | * The sigsuspend and rt_sigsuspend system calls can call do_signal |
257 | * and thus put the process into the stopped state where we might | 333 | * and thus put the process into the stopped state where we might |
@@ -260,35 +336,6 @@ _GLOBAL(save_nvgprs) | |||
260 | * the C code. Similarly, fork, vfork and clone need the full | 336 | * the C code. Similarly, fork, vfork and clone need the full |
261 | * register state on the stack so that it can be copied to the child. | 337 | * register state on the stack so that it can be copied to the child. |
262 | */ | 338 | */ |
263 | _GLOBAL(ppc32_sigsuspend) | ||
264 | bl .save_nvgprs | ||
265 | bl .compat_sys_sigsuspend | ||
266 | b 70f | ||
267 | |||
268 | _GLOBAL(ppc64_rt_sigsuspend) | ||
269 | bl .save_nvgprs | ||
270 | bl .sys_rt_sigsuspend | ||
271 | b 70f | ||
272 | |||
273 | _GLOBAL(ppc32_rt_sigsuspend) | ||
274 | bl .save_nvgprs | ||
275 | bl .compat_sys_rt_sigsuspend | ||
276 | 70: cmpdi 0,r3,0 | ||
277 | /* If it returned an error, we need to return via syscall_exit to set | ||
278 | the SO bit in cr0 and potentially stop for ptrace. */ | ||
279 | bne syscall_exit | ||
280 | /* If sigsuspend() returns zero, we are going into a signal handler. We | ||
281 | may need to call audit_syscall_exit() to mark the exit from sigsuspend() */ | ||
282 | #ifdef CONFIG_AUDITSYSCALL | ||
283 | ld r3,PACACURRENT(r13) | ||
284 | ld r4,AUDITCONTEXT(r3) | ||
285 | cmpdi 0,r4,0 | ||
286 | beq .ret_from_except /* No audit_context: Leave immediately. */ | ||
287 | li r4, 2 /* AUDITSC_FAILURE */ | ||
288 | li r5,-4 /* It's always -EINTR */ | ||
289 | bl .audit_syscall_exit | ||
290 | #endif | ||
291 | b .ret_from_except | ||
292 | 339 | ||
293 | _GLOBAL(ppc_fork) | 340 | _GLOBAL(ppc_fork) |
294 | bl .save_nvgprs | 341 | bl .save_nvgprs |
@@ -305,37 +352,6 @@ _GLOBAL(ppc_clone) | |||
305 | bl .sys_clone | 352 | bl .sys_clone |
306 | b syscall_exit | 353 | b syscall_exit |
307 | 354 | ||
308 | _GLOBAL(ppc32_swapcontext) | ||
309 | bl .save_nvgprs | ||
310 | bl .compat_sys_swapcontext | ||
311 | b 80f | ||
312 | |||
313 | _GLOBAL(ppc64_swapcontext) | ||
314 | bl .save_nvgprs | ||
315 | bl .sys_swapcontext | ||
316 | b 80f | ||
317 | |||
318 | _GLOBAL(ppc32_sigreturn) | ||
319 | bl .compat_sys_sigreturn | ||
320 | b 80f | ||
321 | |||
322 | _GLOBAL(ppc32_rt_sigreturn) | ||
323 | bl .compat_sys_rt_sigreturn | ||
324 | b 80f | ||
325 | |||
326 | _GLOBAL(ppc64_rt_sigreturn) | ||
327 | bl .sys_rt_sigreturn | ||
328 | |||
329 | 80: cmpdi 0,r3,0 | ||
330 | blt syscall_exit | ||
331 | clrrdi r4,r1,THREAD_SHIFT | ||
332 | ld r4,TI_FLAGS(r4) | ||
333 | andi. r4,r4,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP) | ||
334 | beq+ 81f | ||
335 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
336 | bl .do_syscall_trace_leave | ||
337 | 81: b .ret_from_except | ||
338 | |||
339 | _GLOBAL(ret_from_fork) | 355 | _GLOBAL(ret_from_fork) |
340 | bl .schedule_tail | 356 | bl .schedule_tail |
341 | REST_NVGPRS(r1) | 357 | REST_NVGPRS(r1) |
@@ -674,7 +690,7 @@ _GLOBAL(enter_rtas) | |||
674 | 690 | ||
675 | /* Setup our real return addr */ | 691 | /* Setup our real return addr */ |
676 | SET_REG_TO_LABEL(r4,.rtas_return_loc) | 692 | SET_REG_TO_LABEL(r4,.rtas_return_loc) |
677 | SET_REG_TO_CONST(r9,KERNELBASE) | 693 | SET_REG_TO_CONST(r9,PAGE_OFFSET) |
678 | sub r4,r4,r9 | 694 | sub r4,r4,r9 |
679 | mtlr r4 | 695 | mtlr r4 |
680 | 696 | ||
@@ -702,7 +718,7 @@ _GLOBAL(enter_rtas) | |||
702 | _STATIC(rtas_return_loc) | 718 | _STATIC(rtas_return_loc) |
703 | /* relocation is off at this point */ | 719 | /* relocation is off at this point */ |
704 | mfspr r4,SPRN_SPRG3 /* Get PACA */ | 720 | mfspr r4,SPRN_SPRG3 /* Get PACA */ |
705 | SET_REG_TO_CONST(r5, KERNELBASE) | 721 | SET_REG_TO_CONST(r5, PAGE_OFFSET) |
706 | sub r4,r4,r5 /* RELOC the PACA base pointer */ | 722 | sub r4,r4,r5 /* RELOC the PACA base pointer */ |
707 | 723 | ||
708 | mfmsr r6 | 724 | mfmsr r6 |
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index ccdf94731e30..03b25f9359f8 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S | |||
@@ -120,10 +120,25 @@ __start: | |||
120 | * because OF may have I/O devices mapped into that area | 120 | * because OF may have I/O devices mapped into that area |
121 | * (particularly on CHRP). | 121 | * (particularly on CHRP). |
122 | */ | 122 | */ |
123 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
123 | cmpwi 0,r5,0 | 124 | cmpwi 0,r5,0 |
124 | beq 1f | 125 | beq 1f |
125 | bl prom_init | 126 | bl prom_init |
126 | trap | 127 | trap |
128 | #endif | ||
129 | |||
130 | /* | ||
131 | * Check for BootX signature when supporting PowerMac and branch to | ||
132 | * appropriate trampoline if it's present | ||
133 | */ | ||
134 | #ifdef CONFIG_PPC_PMAC | ||
135 | 1: lis r31,0x426f | ||
136 | ori r31,r31,0x6f58 | ||
137 | cmpw 0,r3,r31 | ||
138 | bne 1f | ||
139 | bl bootx_init | ||
140 | trap | ||
141 | #endif /* CONFIG_PPC_PMAC */ | ||
127 | 142 | ||
128 | 1: mr r31,r3 /* save parameters */ | 143 | 1: mr r31,r3 /* save parameters */ |
129 | mr r30,r4 | 144 | mr r30,r4 |
@@ -153,6 +168,9 @@ __after_mmu_off: | |||
153 | bl flush_tlbs | 168 | bl flush_tlbs |
154 | 169 | ||
155 | bl initial_bats | 170 | bl initial_bats |
171 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
172 | bl setup_disp_bat | ||
173 | #endif | ||
156 | 174 | ||
157 | /* | 175 | /* |
158 | * Call setup_cpu for CPU 0 and initialize 6xx Idle | 176 | * Call setup_cpu for CPU 0 and initialize 6xx Idle |
@@ -450,16 +468,11 @@ SystemCall: | |||
450 | * by executing an altivec instruction. | 468 | * by executing an altivec instruction. |
451 | */ | 469 | */ |
452 | . = 0xf00 | 470 | . = 0xf00 |
453 | b Trap_0f | 471 | b PerformanceMonitor |
454 | 472 | ||
455 | . = 0xf20 | 473 | . = 0xf20 |
456 | b AltiVecUnavailable | 474 | b AltiVecUnavailable |
457 | 475 | ||
458 | Trap_0f: | ||
459 | EXCEPTION_PROLOG | ||
460 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
461 | EXC_XFER_EE(0xf00, unknown_exception) | ||
462 | |||
463 | /* | 476 | /* |
464 | * Handle TLB miss for instruction on 603/603e. | 477 | * Handle TLB miss for instruction on 603/603e. |
465 | * Note: we get an alternate set of r0 - r3 to use automatically. | 478 | * Note: we get an alternate set of r0 - r3 to use automatically. |
@@ -703,6 +716,11 @@ AltiVecUnavailable: | |||
703 | #endif /* CONFIG_ALTIVEC */ | 716 | #endif /* CONFIG_ALTIVEC */ |
704 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) | 717 | EXC_XFER_EE_LITE(0xf20, altivec_unavailable_exception) |
705 | 718 | ||
719 | PerformanceMonitor: | ||
720 | EXCEPTION_PROLOG | ||
721 | addi r3,r1,STACK_FRAME_OVERHEAD | ||
722 | EXC_XFER_STD(0xf00, performance_monitor_exception) | ||
723 | |||
706 | #ifdef CONFIG_ALTIVEC | 724 | #ifdef CONFIG_ALTIVEC |
707 | /* Note that the AltiVec support is closely modeled after the FP | 725 | /* Note that the AltiVec support is closely modeled after the FP |
708 | * support. Changes to one are likely to be applicable to the | 726 | * support. Changes to one are likely to be applicable to the |
@@ -1306,6 +1324,32 @@ initial_bats: | |||
1306 | blr | 1324 | blr |
1307 | 1325 | ||
1308 | 1326 | ||
1327 | #if !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) | ||
1328 | setup_disp_bat: | ||
1329 | /* | ||
1330 | * setup the display bat prepared for us in prom.c | ||
1331 | */ | ||
1332 | mflr r8 | ||
1333 | bl reloc_offset | ||
1334 | mtlr r8 | ||
1335 | addis r8,r3,disp_BAT@ha | ||
1336 | addi r8,r8,disp_BAT@l | ||
1337 | cmpwi cr0,r8,0 | ||
1338 | beqlr | ||
1339 | lwz r11,0(r8) | ||
1340 | lwz r8,4(r8) | ||
1341 | mfspr r9,SPRN_PVR | ||
1342 | rlwinm r9,r9,16,16,31 /* r9 = 1 for 601, 4 for 604 */ | ||
1343 | cmpwi 0,r9,1 | ||
1344 | beq 1f | ||
1345 | mtspr SPRN_DBAT3L,r8 | ||
1346 | mtspr SPRN_DBAT3U,r11 | ||
1347 | blr | ||
1348 | 1: mtspr SPRN_IBAT3L,r8 | ||
1349 | mtspr SPRN_IBAT3U,r11 | ||
1350 | blr | ||
1351 | #endif /* !defined(CONFIG_APUS) && defined(CONFIG_BOOTX_TEXT) */ | ||
1352 | |||
1309 | #ifdef CONFIG_8260 | 1353 | #ifdef CONFIG_8260 |
1310 | /* Jump into the system reset for the rom. | 1354 | /* Jump into the system reset for the rom. |
1311 | * We first disable the MMU, and then jump to the ROM reset address. | 1355 | * We first disable the MMU, and then jump to the ROM reset address. |
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index 8a8bf79ef044..1c066d125375 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S | |||
@@ -154,11 +154,15 @@ _GLOBAL(__secondary_hold) | |||
154 | bne 100b | 154 | bne 100b |
155 | 155 | ||
156 | #ifdef CONFIG_HMT | 156 | #ifdef CONFIG_HMT |
157 | b .hmt_init | 157 | LOADADDR(r4, .hmt_init) |
158 | mtctr r4 | ||
159 | bctr | ||
158 | #else | 160 | #else |
159 | #ifdef CONFIG_SMP | 161 | #ifdef CONFIG_SMP |
162 | LOADADDR(r4, .pSeries_secondary_smp_init) | ||
163 | mtctr r4 | ||
160 | mr r3,r24 | 164 | mr r3,r24 |
161 | b .pSeries_secondary_smp_init | 165 | bctr |
162 | #else | 166 | #else |
163 | BUG_OPCODE | 167 | BUG_OPCODE |
164 | #endif | 168 | #endif |
@@ -200,6 +204,20 @@ exception_marker: | |||
200 | #define EX_R3 64 | 204 | #define EX_R3 64 |
201 | #define EX_LR 72 | 205 | #define EX_LR 72 |
202 | 206 | ||
207 | /* | ||
208 | * We're short on space and time in the exception prolog, so we can't use | ||
209 | * the normal LOADADDR macro. Normally we just need the low halfword of the | ||
210 | * address, but for Kdump we need the whole low word. | ||
211 | */ | ||
212 | #ifdef CONFIG_CRASH_DUMP | ||
213 | #define LOAD_HANDLER(reg, label) \ | ||
214 | oris reg,reg,(label)@h; /* virt addr of handler ... */ \ | ||
215 | ori reg,reg,(label)@l; /* .. and the rest */ | ||
216 | #else | ||
217 | #define LOAD_HANDLER(reg, label) \ | ||
218 | ori reg,reg,(label)@l; /* virt addr of handler ... */ | ||
219 | #endif | ||
220 | |||
203 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ | 221 | #define EXCEPTION_PROLOG_PSERIES(area, label) \ |
204 | mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ | 222 | mfspr r13,SPRN_SPRG3; /* get paca address into r13 */ \ |
205 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ | 223 | std r9,area+EX_R9(r13); /* save r9 - r12 */ \ |
@@ -212,7 +230,7 @@ exception_marker: | |||
212 | clrrdi r12,r13,32; /* get high part of &label */ \ | 230 | clrrdi r12,r13,32; /* get high part of &label */ \ |
213 | mfmsr r10; \ | 231 | mfmsr r10; \ |
214 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ | 232 | mfspr r11,SPRN_SRR0; /* save SRR0 */ \ |
215 | ori r12,r12,(label)@l; /* virt addr of handler */ \ | 233 | LOAD_HANDLER(r12,label) \ |
216 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ | 234 | ori r10,r10,MSR_IR|MSR_DR|MSR_RI; \ |
217 | mtspr SPRN_SRR0,r12; \ | 235 | mtspr SPRN_SRR0,r12; \ |
218 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ | 236 | mfspr r12,SPRN_SRR1; /* and SRR1 */ \ |
@@ -553,6 +571,7 @@ slb_miss_user_pseries: | |||
553 | * Vectors for the FWNMI option. Share common code. | 571 | * Vectors for the FWNMI option. Share common code. |
554 | */ | 572 | */ |
555 | .globl system_reset_fwnmi | 573 | .globl system_reset_fwnmi |
574 | .align 7 | ||
556 | system_reset_fwnmi: | 575 | system_reset_fwnmi: |
557 | HMT_MEDIUM | 576 | HMT_MEDIUM |
558 | mtspr SPRN_SPRG1,r13 /* save r13 */ | 577 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
@@ -560,6 +579,7 @@ system_reset_fwnmi: | |||
560 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) | 579 | EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common) |
561 | 580 | ||
562 | .globl machine_check_fwnmi | 581 | .globl machine_check_fwnmi |
582 | .align 7 | ||
563 | machine_check_fwnmi: | 583 | machine_check_fwnmi: |
564 | HMT_MEDIUM | 584 | HMT_MEDIUM |
565 | mtspr SPRN_SPRG1,r13 /* save r13 */ | 585 | mtspr SPRN_SPRG1,r13 /* save r13 */ |
@@ -726,7 +746,8 @@ iSeries_secondary_smp_loop: | |||
726 | decrementer_iSeries_masked: | 746 | decrementer_iSeries_masked: |
727 | li r11,1 | 747 | li r11,1 |
728 | stb r11,PACALPPACA+LPPACADECRINT(r13) | 748 | stb r11,PACALPPACA+LPPACADECRINT(r13) |
729 | lwz r12,PACADEFAULTDECR(r13) | 749 | LOADBASE(r12,tb_ticks_per_jiffy) |
750 | lwz r12,OFF(tb_ticks_per_jiffy)(r12) | ||
730 | mtspr SPRN_DEC,r12 | 751 | mtspr SPRN_DEC,r12 |
731 | /* fall through */ | 752 | /* fall through */ |
732 | 753 | ||
@@ -1345,7 +1366,7 @@ _GLOBAL(do_stab_bolted) | |||
1345 | * fixed address (the linker can't compute (u64)&initial_stab >> | 1366 | * fixed address (the linker can't compute (u64)&initial_stab >> |
1346 | * PAGE_SHIFT). | 1367 | * PAGE_SHIFT). |
1347 | */ | 1368 | */ |
1348 | . = STAB0_PHYS_ADDR /* 0x6000 */ | 1369 | . = STAB0_OFFSET /* 0x6000 */ |
1349 | .globl initial_stab | 1370 | .globl initial_stab |
1350 | initial_stab: | 1371 | initial_stab: |
1351 | .space 4096 | 1372 | .space 4096 |
@@ -1485,11 +1506,13 @@ _STATIC(__mmu_off) | |||
1485 | * | 1506 | * |
1486 | */ | 1507 | */ |
1487 | _GLOBAL(__start_initialization_multiplatform) | 1508 | _GLOBAL(__start_initialization_multiplatform) |
1509 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
1488 | /* | 1510 | /* |
1489 | * Are we booted from a PROM Of-type client-interface ? | 1511 | * Are we booted from a PROM Of-type client-interface ? |
1490 | */ | 1512 | */ |
1491 | cmpldi cr0,r5,0 | 1513 | cmpldi cr0,r5,0 |
1492 | bne .__boot_from_prom /* yes -> prom */ | 1514 | bne .__boot_from_prom /* yes -> prom */ |
1515 | #endif | ||
1493 | 1516 | ||
1494 | /* Save parameters */ | 1517 | /* Save parameters */ |
1495 | mr r31,r3 | 1518 | mr r31,r3 |
@@ -1510,6 +1533,7 @@ _GLOBAL(__start_initialization_multiplatform) | |||
1510 | bl .__mmu_off | 1533 | bl .__mmu_off |
1511 | b .__after_prom_start | 1534 | b .__after_prom_start |
1512 | 1535 | ||
1536 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
1513 | _STATIC(__boot_from_prom) | 1537 | _STATIC(__boot_from_prom) |
1514 | /* Save parameters */ | 1538 | /* Save parameters */ |
1515 | mr r31,r3 | 1539 | mr r31,r3 |
@@ -1542,6 +1566,7 @@ _STATIC(__boot_from_prom) | |||
1542 | bl .prom_init | 1566 | bl .prom_init |
1543 | /* We never return */ | 1567 | /* We never return */ |
1544 | trap | 1568 | trap |
1569 | #endif | ||
1545 | 1570 | ||
1546 | /* | 1571 | /* |
1547 | * At this point, r3 contains the physical address we are running at, | 1572 | * At this point, r3 contains the physical address we are running at, |
@@ -1550,7 +1575,7 @@ _STATIC(__boot_from_prom) | |||
1550 | _STATIC(__after_prom_start) | 1575 | _STATIC(__after_prom_start) |
1551 | 1576 | ||
1552 | /* | 1577 | /* |
1553 | * We need to run with __start at physical address 0. | 1578 | * We need to run with __start at physical address PHYSICAL_START. |
1554 | * This will leave some code in the first 256B of | 1579 | * This will leave some code in the first 256B of |
1555 | * real memory, which are reserved for software use. | 1580 | * real memory, which are reserved for software use. |
1556 | * The remainder of the first page is loaded with the fixed | 1581 | * The remainder of the first page is loaded with the fixed |
@@ -1565,7 +1590,7 @@ _STATIC(__after_prom_start) | |||
1565 | mr r26,r3 | 1590 | mr r26,r3 |
1566 | SET_REG_TO_CONST(r27,KERNELBASE) | 1591 | SET_REG_TO_CONST(r27,KERNELBASE) |
1567 | 1592 | ||
1568 | li r3,0 /* target addr */ | 1593 | LOADADDR(r3, PHYSICAL_START) /* target addr */ |
1569 | 1594 | ||
1570 | // XXX FIXME: Use phys returned by OF (r30) | 1595 | // XXX FIXME: Use phys returned by OF (r30) |
1571 | add r4,r27,r26 /* source addr */ | 1596 | add r4,r27,r26 /* source addr */ |
@@ -1846,7 +1871,7 @@ _STATIC(start_here_multiplatform) | |||
1846 | mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ | 1871 | mulli r13,r27,PACA_SIZE /* Calculate vaddr of right paca */ |
1847 | add r13,r13,r24 /* for this processor. */ | 1872 | add r13,r13,r24 /* for this processor. */ |
1848 | add r13,r13,r26 /* convert to physical addr */ | 1873 | add r13,r13,r26 /* convert to physical addr */ |
1849 | mtspr SPRN_SPRG3,r13 /* PPPBBB: Temp... -Peter */ | 1874 | mtspr SPRN_SPRG3,r13 |
1850 | 1875 | ||
1851 | /* Do very early kernel initializations, including initial hash table, | 1876 | /* Do very early kernel initializations, including initial hash table, |
1852 | * stab and slb setup before we turn on relocation. */ | 1877 | * stab and slb setup before we turn on relocation. */ |
diff --git a/arch/powerpc/kernel/ibmebus.c b/arch/powerpc/kernel/ibmebus.c new file mode 100644 index 000000000000..e47d40ac6f39 --- /dev/null +++ b/arch/powerpc/kernel/ibmebus.c | |||
@@ -0,0 +1,396 @@ | |||
1 | /* | ||
2 | * IBM PowerPC IBM eBus Infrastructure Support. | ||
3 | * | ||
4 | * Copyright (c) 2005 IBM Corporation | ||
5 | * Heiko J Schick <schickhj@de.ibm.com> | ||
6 | * | ||
7 | * All rights reserved. | ||
8 | * | ||
9 | * This source code is distributed under a dual license of GPL v2.0 and OpenIB | ||
10 | * BSD. | ||
11 | * | ||
12 | * OpenIB BSD License | ||
13 | * | ||
14 | * Redistribution and use in source and binary forms, with or without | ||
15 | * modification, are permitted provided that the following conditions are met: | ||
16 | * | ||
17 | * Redistributions of source code must retain the above copyright notice, this | ||
18 | * list of conditions and the following disclaimer. | ||
19 | * | ||
20 | * Redistributions in binary form must reproduce the above copyright notice, | ||
21 | * this list of conditions and the following disclaimer in the documentation | ||
22 | * and/or other materials | ||
23 | * provided with the distribution. | ||
24 | * | ||
25 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" | ||
26 | * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | ||
27 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | ||
28 | * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE | ||
29 | * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR | ||
30 | * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF | ||
31 | * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR | ||
32 | * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER | ||
33 | * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) | ||
34 | * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE | ||
35 | * POSSIBILITY OF SUCH DAMAGE. | ||
36 | */ | ||
37 | |||
38 | #include <linux/init.h> | ||
39 | #include <linux/console.h> | ||
40 | #include <linux/kobject.h> | ||
41 | #include <linux/dma-mapping.h> | ||
42 | #include <linux/interrupt.h> | ||
43 | #include <asm/ibmebus.h> | ||
44 | #include <asm/abs_addr.h> | ||
45 | |||
46 | static struct ibmebus_dev ibmebus_bus_device = { /* fake "parent" device */ | ||
47 | .name = ibmebus_bus_device.ofdev.dev.bus_id, | ||
48 | .ofdev.dev.bus_id = "ibmebus", | ||
49 | .ofdev.dev.bus = &ibmebus_bus_type, | ||
50 | }; | ||
51 | |||
52 | static void *ibmebus_alloc_coherent(struct device *dev, | ||
53 | size_t size, | ||
54 | dma_addr_t *dma_handle, | ||
55 | gfp_t flag) | ||
56 | { | ||
57 | void *mem; | ||
58 | |||
59 | mem = kmalloc(size, flag); | ||
60 | *dma_handle = (dma_addr_t)mem; | ||
61 | |||
62 | return mem; | ||
63 | } | ||
64 | |||
65 | static void ibmebus_free_coherent(struct device *dev, | ||
66 | size_t size, void *vaddr, | ||
67 | dma_addr_t dma_handle) | ||
68 | { | ||
69 | kfree(vaddr); | ||
70 | } | ||
71 | |||
72 | static dma_addr_t ibmebus_map_single(struct device *dev, | ||
73 | void *ptr, | ||
74 | size_t size, | ||
75 | enum dma_data_direction direction) | ||
76 | { | ||
77 | return (dma_addr_t)(ptr); | ||
78 | } | ||
79 | |||
80 | static void ibmebus_unmap_single(struct device *dev, | ||
81 | dma_addr_t dma_addr, | ||
82 | size_t size, | ||
83 | enum dma_data_direction direction) | ||
84 | { | ||
85 | return; | ||
86 | } | ||
87 | |||
88 | static int ibmebus_map_sg(struct device *dev, | ||
89 | struct scatterlist *sg, | ||
90 | int nents, enum dma_data_direction direction) | ||
91 | { | ||
92 | int i; | ||
93 | |||
94 | for (i = 0; i < nents; i++) { | ||
95 | sg[i].dma_address = (dma_addr_t)page_address(sg[i].page) | ||
96 | + sg[i].offset; | ||
97 | sg[i].dma_length = sg[i].length; | ||
98 | } | ||
99 | |||
100 | return nents; | ||
101 | } | ||
102 | |||
103 | static void ibmebus_unmap_sg(struct device *dev, | ||
104 | struct scatterlist *sg, | ||
105 | int nents, enum dma_data_direction direction) | ||
106 | { | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | static int ibmebus_dma_supported(struct device *dev, u64 mask) | ||
111 | { | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | struct dma_mapping_ops ibmebus_dma_ops = { | ||
116 | .alloc_coherent = ibmebus_alloc_coherent, | ||
117 | .free_coherent = ibmebus_free_coherent, | ||
118 | .map_single = ibmebus_map_single, | ||
119 | .unmap_single = ibmebus_unmap_single, | ||
120 | .map_sg = ibmebus_map_sg, | ||
121 | .unmap_sg = ibmebus_unmap_sg, | ||
122 | .dma_supported = ibmebus_dma_supported, | ||
123 | }; | ||
124 | |||
125 | static int ibmebus_bus_probe(struct device *dev) | ||
126 | { | ||
127 | struct ibmebus_dev *ibmebusdev = to_ibmebus_dev(dev); | ||
128 | struct ibmebus_driver *ibmebusdrv = to_ibmebus_driver(dev->driver); | ||
129 | const struct of_device_id *id; | ||
130 | int error = -ENODEV; | ||
131 | |||
132 | if (!ibmebusdrv->probe) | ||
133 | return error; | ||
134 | |||
135 | id = of_match_device(ibmebusdrv->id_table, &ibmebusdev->ofdev); | ||
136 | if (id) { | ||
137 | error = ibmebusdrv->probe(ibmebusdev, id); | ||
138 | } | ||
139 | |||
140 | return error; | ||
141 | } | ||
142 | |||
143 | static int ibmebus_bus_remove(struct device *dev) | ||
144 | { | ||
145 | struct ibmebus_dev *ibmebusdev = to_ibmebus_dev(dev); | ||
146 | struct ibmebus_driver *ibmebusdrv = to_ibmebus_driver(dev->driver); | ||
147 | |||
148 | if (ibmebusdrv->remove) { | ||
149 | return ibmebusdrv->remove(ibmebusdev); | ||
150 | } | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static void __devinit ibmebus_dev_release(struct device *dev) | ||
156 | { | ||
157 | of_node_put(to_ibmebus_dev(dev)->ofdev.node); | ||
158 | kfree(to_ibmebus_dev(dev)); | ||
159 | } | ||
160 | |||
161 | static ssize_t ibmebusdev_show_name(struct device *dev, | ||
162 | struct device_attribute *attr, char *buf) | ||
163 | { | ||
164 | return sprintf(buf, "%s\n", to_ibmebus_dev(dev)->name); | ||
165 | } | ||
166 | static DEVICE_ATTR(name, S_IRUSR | S_IRGRP | S_IROTH, ibmebusdev_show_name, | ||
167 | NULL); | ||
168 | |||
169 | static struct ibmebus_dev* __devinit ibmebus_register_device_common( | ||
170 | struct ibmebus_dev *dev, char *name) | ||
171 | { | ||
172 | int err = 0; | ||
173 | |||
174 | dev->name = name; | ||
175 | dev->ofdev.dev.parent = &ibmebus_bus_device.ofdev.dev; | ||
176 | dev->ofdev.dev.bus = &ibmebus_bus_type; | ||
177 | dev->ofdev.dev.release = ibmebus_dev_release; | ||
178 | |||
179 | /* An ibmebusdev is based on a of_device. We have to change the | ||
180 | * bus type to use our own DMA mapping operations. | ||
181 | */ | ||
182 | if ((err = of_device_register(&dev->ofdev)) != 0) { | ||
183 | printk(KERN_ERR "%s: failed to register device (%d).\n", | ||
184 | __FUNCTION__, err); | ||
185 | return NULL; | ||
186 | } | ||
187 | |||
188 | device_create_file(&dev->ofdev.dev, &dev_attr_name); | ||
189 | |||
190 | return dev; | ||
191 | } | ||
192 | |||
193 | static struct ibmebus_dev* __devinit ibmebus_register_device_node( | ||
194 | struct device_node *dn) | ||
195 | { | ||
196 | struct ibmebus_dev *dev; | ||
197 | char *loc_code; | ||
198 | int length; | ||
199 | |||
200 | loc_code = (char *)get_property(dn, "ibm,loc-code", NULL); | ||
201 | if (!loc_code) { | ||
202 | printk(KERN_WARNING "%s: node %s missing 'ibm,loc-code'\n", | ||
203 | __FUNCTION__, dn->name ? dn->name : "<unknown>"); | ||
204 | return NULL; | ||
205 | } | ||
206 | |||
207 | if (strlen(loc_code) == 0) { | ||
208 | printk(KERN_WARNING "%s: 'ibm,loc-code' is invalid\n", | ||
209 | __FUNCTION__); | ||
210 | return NULL; | ||
211 | } | ||
212 | |||
213 | dev = kmalloc(sizeof(struct ibmebus_dev), GFP_KERNEL); | ||
214 | if (!dev) { | ||
215 | return NULL; | ||
216 | } | ||
217 | memset(dev, 0, sizeof(struct ibmebus_dev)); | ||
218 | |||
219 | dev->ofdev.node = of_node_get(dn); | ||
220 | |||
221 | length = strlen(loc_code); | ||
222 | memcpy(dev->ofdev.dev.bus_id, loc_code | ||
223 | + (length - min(length, BUS_ID_SIZE - 1)), | ||
224 | min(length, BUS_ID_SIZE - 1)); | ||
225 | |||
226 | /* Register with generic device framework. */ | ||
227 | if (ibmebus_register_device_common(dev, dn->name) == NULL) { | ||
228 | kfree(dev); | ||
229 | return NULL; | ||
230 | } | ||
231 | |||
232 | return dev; | ||
233 | } | ||
234 | |||
235 | static void ibmebus_probe_of_nodes(char* name) | ||
236 | { | ||
237 | struct device_node *dn = NULL; | ||
238 | |||
239 | while ((dn = of_find_node_by_name(dn, name))) { | ||
240 | if (ibmebus_register_device_node(dn) == NULL) { | ||
241 | of_node_put(dn); | ||
242 | |||
243 | return; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | of_node_put(dn); | ||
248 | |||
249 | return; | ||
250 | } | ||
251 | |||
252 | static void ibmebus_add_devices_by_id(struct of_device_id *idt) | ||
253 | { | ||
254 | while (strlen(idt->name) > 0) { | ||
255 | ibmebus_probe_of_nodes(idt->name); | ||
256 | idt++; | ||
257 | } | ||
258 | |||
259 | return; | ||
260 | } | ||
261 | |||
262 | static int ibmebus_match_helper(struct device *dev, void *data) | ||
263 | { | ||
264 | if (strcmp((char*)data, to_ibmebus_dev(dev)->name) == 0) | ||
265 | return 1; | ||
266 | |||
267 | return 0; | ||
268 | } | ||
269 | |||
270 | static int ibmebus_unregister_device(struct device *dev) | ||
271 | { | ||
272 | device_remove_file(dev, &dev_attr_name); | ||
273 | of_device_unregister(to_of_device(dev)); | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static void ibmebus_remove_devices_by_id(struct of_device_id *idt) | ||
279 | { | ||
280 | struct device *dev; | ||
281 | |||
282 | while (strlen(idt->name) > 0) { | ||
283 | while ((dev = bus_find_device(&ibmebus_bus_type, NULL, | ||
284 | (void*)idt->name, | ||
285 | ibmebus_match_helper))) { | ||
286 | ibmebus_unregister_device(dev); | ||
287 | } | ||
288 | idt++; | ||
289 | |||
290 | } | ||
291 | |||
292 | return; | ||
293 | } | ||
294 | |||
295 | int ibmebus_register_driver(struct ibmebus_driver *drv) | ||
296 | { | ||
297 | int err = 0; | ||
298 | |||
299 | drv->driver.name = drv->name; | ||
300 | drv->driver.bus = &ibmebus_bus_type; | ||
301 | drv->driver.probe = ibmebus_bus_probe; | ||
302 | drv->driver.remove = ibmebus_bus_remove; | ||
303 | |||
304 | if ((err = driver_register(&drv->driver) != 0)) | ||
305 | return err; | ||
306 | |||
307 | ibmebus_add_devices_by_id(drv->id_table); | ||
308 | |||
309 | return 0; | ||
310 | } | ||
311 | EXPORT_SYMBOL(ibmebus_register_driver); | ||
312 | |||
313 | void ibmebus_unregister_driver(struct ibmebus_driver *drv) | ||
314 | { | ||
315 | driver_unregister(&drv->driver); | ||
316 | ibmebus_remove_devices_by_id(drv->id_table); | ||
317 | } | ||
318 | EXPORT_SYMBOL(ibmebus_unregister_driver); | ||
319 | |||
320 | int ibmebus_request_irq(struct ibmebus_dev *dev, | ||
321 | u32 ist, | ||
322 | irqreturn_t (*handler)(int, void*, struct pt_regs *), | ||
323 | unsigned long irq_flags, const char * devname, | ||
324 | void *dev_id) | ||
325 | { | ||
326 | unsigned int irq = virt_irq_create_mapping(ist); | ||
327 | |||
328 | if (irq == NO_IRQ) | ||
329 | return -EINVAL; | ||
330 | |||
331 | irq = irq_offset_up(irq); | ||
332 | |||
333 | return request_irq(irq, handler, | ||
334 | irq_flags, devname, dev_id); | ||
335 | } | ||
336 | EXPORT_SYMBOL(ibmebus_request_irq); | ||
337 | |||
338 | void ibmebus_free_irq(struct ibmebus_dev *dev, u32 ist, void *dev_id) | ||
339 | { | ||
340 | unsigned int irq = virt_irq_create_mapping(ist); | ||
341 | |||
342 | irq = irq_offset_up(irq); | ||
343 | free_irq(irq, dev_id); | ||
344 | |||
345 | return; | ||
346 | } | ||
347 | EXPORT_SYMBOL(ibmebus_free_irq); | ||
348 | |||
349 | static int ibmebus_bus_match(struct device *dev, struct device_driver *drv) | ||
350 | { | ||
351 | const struct ibmebus_dev *ebus_dev = to_ibmebus_dev(dev); | ||
352 | struct ibmebus_driver *ebus_drv = to_ibmebus_driver(drv); | ||
353 | const struct of_device_id *ids = ebus_drv->id_table; | ||
354 | const struct of_device_id *found_id; | ||
355 | |||
356 | if (!ids) | ||
357 | return 0; | ||
358 | |||
359 | found_id = of_match_device(ids, &ebus_dev->ofdev); | ||
360 | if (found_id) | ||
361 | return 1; | ||
362 | |||
363 | return 0; | ||
364 | } | ||
365 | |||
366 | struct bus_type ibmebus_bus_type = { | ||
367 | .name = "ibmebus", | ||
368 | .match = ibmebus_bus_match, | ||
369 | }; | ||
370 | EXPORT_SYMBOL(ibmebus_bus_type); | ||
371 | |||
372 | static int __init ibmebus_bus_init(void) | ||
373 | { | ||
374 | int err; | ||
375 | |||
376 | printk(KERN_INFO "IBM eBus Device Driver\n"); | ||
377 | |||
378 | err = bus_register(&ibmebus_bus_type); | ||
379 | if (err) { | ||
380 | printk(KERN_ERR ":%s: failed to register IBM eBus.\n", | ||
381 | __FUNCTION__); | ||
382 | return err; | ||
383 | } | ||
384 | |||
385 | err = device_register(&ibmebus_bus_device.ofdev.dev); | ||
386 | if (err) { | ||
387 | printk(KERN_WARNING "%s: device_register returned %i\n", | ||
388 | __FUNCTION__, err); | ||
389 | bus_unregister(&ibmebus_bus_type); | ||
390 | |||
391 | return err; | ||
392 | } | ||
393 | |||
394 | return 0; | ||
395 | } | ||
396 | __initcall(ibmebus_bus_init); | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index 5a71ed9612fe..5651032d8706 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -31,7 +31,6 @@ | |||
31 | * to reduce code space and undefined function references. | 31 | * to reduce code space and undefined function references. |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #include <linux/errno.h> | ||
35 | #include <linux/module.h> | 34 | #include <linux/module.h> |
36 | #include <linux/threads.h> | 35 | #include <linux/threads.h> |
37 | #include <linux/kernel_stat.h> | 36 | #include <linux/kernel_stat.h> |
@@ -44,18 +43,12 @@ | |||
44 | #include <linux/config.h> | 43 | #include <linux/config.h> |
45 | #include <linux/init.h> | 44 | #include <linux/init.h> |
46 | #include <linux/slab.h> | 45 | #include <linux/slab.h> |
47 | #include <linux/pci.h> | ||
48 | #include <linux/delay.h> | 46 | #include <linux/delay.h> |
49 | #include <linux/irq.h> | 47 | #include <linux/irq.h> |
50 | #include <linux/proc_fs.h> | ||
51 | #include <linux/random.h> | ||
52 | #include <linux/seq_file.h> | 48 | #include <linux/seq_file.h> |
53 | #include <linux/cpumask.h> | 49 | #include <linux/cpumask.h> |
54 | #include <linux/profile.h> | 50 | #include <linux/profile.h> |
55 | #include <linux/bitops.h> | 51 | #include <linux/bitops.h> |
56 | #ifdef CONFIG_PPC64 | ||
57 | #include <linux/kallsyms.h> | ||
58 | #endif | ||
59 | 52 | ||
60 | #include <asm/uaccess.h> | 53 | #include <asm/uaccess.h> |
61 | #include <asm/system.h> | 54 | #include <asm/system.h> |
@@ -66,8 +59,7 @@ | |||
66 | #include <asm/prom.h> | 59 | #include <asm/prom.h> |
67 | #include <asm/ptrace.h> | 60 | #include <asm/ptrace.h> |
68 | #include <asm/machdep.h> | 61 | #include <asm/machdep.h> |
69 | #ifdef CONFIG_PPC64 | 62 | #ifdef CONFIG_PPC_ISERIES |
70 | #include <asm/iseries/it_lp_queue.h> | ||
71 | #include <asm/paca.h> | 63 | #include <asm/paca.h> |
72 | #endif | 64 | #endif |
73 | 65 | ||
@@ -78,10 +70,6 @@ EXPORT_SYMBOL(__irq_offset_value); | |||
78 | 70 | ||
79 | static int ppc_spurious_interrupts; | 71 | static int ppc_spurious_interrupts; |
80 | 72 | ||
81 | #if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP) | ||
82 | extern void iSeries_smp_message_recv(struct pt_regs *); | ||
83 | #endif | ||
84 | |||
85 | #ifdef CONFIG_PPC32 | 73 | #ifdef CONFIG_PPC32 |
86 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) | 74 | #define NR_MASK_WORDS ((NR_IRQS + 31) / 32) |
87 | 75 | ||
@@ -195,49 +183,6 @@ void fixup_irqs(cpumask_t map) | |||
195 | } | 183 | } |
196 | #endif | 184 | #endif |
197 | 185 | ||
198 | #ifdef CONFIG_PPC_ISERIES | ||
199 | void do_IRQ(struct pt_regs *regs) | ||
200 | { | ||
201 | struct paca_struct *lpaca; | ||
202 | |||
203 | irq_enter(); | ||
204 | |||
205 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
206 | /* Debugging check for stack overflow: is there less than 2KB free? */ | ||
207 | { | ||
208 | long sp; | ||
209 | |||
210 | sp = __get_SP() & (THREAD_SIZE-1); | ||
211 | |||
212 | if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { | ||
213 | printk("do_IRQ: stack overflow: %ld\n", | ||
214 | sp - sizeof(struct thread_info)); | ||
215 | dump_stack(); | ||
216 | } | ||
217 | } | ||
218 | #endif | ||
219 | |||
220 | lpaca = get_paca(); | ||
221 | #ifdef CONFIG_SMP | ||
222 | if (lpaca->lppaca.int_dword.fields.ipi_cnt) { | ||
223 | lpaca->lppaca.int_dword.fields.ipi_cnt = 0; | ||
224 | iSeries_smp_message_recv(regs); | ||
225 | } | ||
226 | #endif /* CONFIG_SMP */ | ||
227 | if (hvlpevent_is_pending()) | ||
228 | process_hvlpevents(regs); | ||
229 | |||
230 | irq_exit(); | ||
231 | |||
232 | if (lpaca->lppaca.int_dword.fields.decr_int) { | ||
233 | lpaca->lppaca.int_dword.fields.decr_int = 0; | ||
234 | /* Signal a fake decrementer interrupt */ | ||
235 | timer_interrupt(regs); | ||
236 | } | ||
237 | } | ||
238 | |||
239 | #else /* CONFIG_PPC_ISERIES */ | ||
240 | |||
241 | void do_IRQ(struct pt_regs *regs) | 186 | void do_IRQ(struct pt_regs *regs) |
242 | { | 187 | { |
243 | int irq; | 188 | int irq; |
@@ -286,16 +231,24 @@ void do_IRQ(struct pt_regs *regs) | |||
286 | } else | 231 | } else |
287 | #endif | 232 | #endif |
288 | __do_IRQ(irq, regs); | 233 | __do_IRQ(irq, regs); |
289 | } else | 234 | } else if (irq != -2) |
290 | #ifdef CONFIG_PPC32 | 235 | /* That's not SMP safe ... but who cares ? */ |
291 | if (irq != -2) | 236 | ppc_spurious_interrupts++; |
292 | #endif | 237 | |
293 | /* That's not SMP safe ... but who cares ? */ | ||
294 | ppc_spurious_interrupts++; | ||
295 | irq_exit(); | 238 | irq_exit(); |
296 | } | ||
297 | 239 | ||
298 | #endif /* CONFIG_PPC_ISERIES */ | 240 | #ifdef CONFIG_PPC_ISERIES |
241 | { | ||
242 | struct paca_struct *lpaca = get_paca(); | ||
243 | |||
244 | if (lpaca->lppaca.int_dword.fields.decr_int) { | ||
245 | lpaca->lppaca.int_dword.fields.decr_int = 0; | ||
246 | /* Signal a fake decrementer interrupt */ | ||
247 | timer_interrupt(regs); | ||
248 | } | ||
249 | } | ||
250 | #endif | ||
251 | } | ||
299 | 252 | ||
300 | void __init init_IRQ(void) | 253 | void __init init_IRQ(void) |
301 | { | 254 | { |
diff --git a/arch/powerpc/kernel/legacy_serial.c b/arch/powerpc/kernel/legacy_serial.c new file mode 100644 index 000000000000..f970ace208d3 --- /dev/null +++ b/arch/powerpc/kernel/legacy_serial.c | |||
@@ -0,0 +1,557 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/serial.h> | ||
4 | #include <linux/serial_8250.h> | ||
5 | #include <linux/serial_core.h> | ||
6 | #include <linux/console.h> | ||
7 | #include <linux/pci.h> | ||
8 | #include <asm/io.h> | ||
9 | #include <asm/mmu.h> | ||
10 | #include <asm/prom.h> | ||
11 | #include <asm/serial.h> | ||
12 | #include <asm/udbg.h> | ||
13 | #include <asm/pci-bridge.h> | ||
14 | #include <asm/ppc-pci.h> | ||
15 | |||
16 | #undef DEBUG | ||
17 | |||
18 | #ifdef DEBUG | ||
19 | #define DBG(fmt...) do { printk(fmt); } while(0) | ||
20 | #else | ||
21 | #define DBG(fmt...) do { } while(0) | ||
22 | #endif | ||
23 | |||
24 | #define MAX_LEGACY_SERIAL_PORTS 8 | ||
25 | |||
26 | static struct plat_serial8250_port | ||
27 | legacy_serial_ports[MAX_LEGACY_SERIAL_PORTS+1]; | ||
28 | static struct legacy_serial_info { | ||
29 | struct device_node *np; | ||
30 | unsigned int speed; | ||
31 | unsigned int clock; | ||
32 | phys_addr_t taddr; | ||
33 | } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; | ||
34 | static unsigned int legacy_serial_count; | ||
35 | static int legacy_serial_console = -1; | ||
36 | |||
37 | static int __init add_legacy_port(struct device_node *np, int want_index, | ||
38 | int iotype, phys_addr_t base, | ||
39 | phys_addr_t taddr, unsigned long irq, | ||
40 | unsigned int flags) | ||
41 | { | ||
42 | u32 *clk, *spd, clock = BASE_BAUD * 16; | ||
43 | int index; | ||
44 | |||
45 | /* get clock freq. if present */ | ||
46 | clk = (u32 *)get_property(np, "clock-frequency", NULL); | ||
47 | if (clk && *clk) | ||
48 | clock = *clk; | ||
49 | |||
50 | /* get default speed if present */ | ||
51 | spd = (u32 *)get_property(np, "current-speed", NULL); | ||
52 | |||
53 | /* If we have a location index, then try to use it */ | ||
54 | if (want_index >= 0 && want_index < MAX_LEGACY_SERIAL_PORTS) | ||
55 | index = want_index; | ||
56 | else | ||
57 | index = legacy_serial_count; | ||
58 | |||
59 | /* if our index is still out of range, that mean that | ||
60 | * array is full, we could scan for a free slot but that | ||
61 | * make little sense to bother, just skip the port | ||
62 | */ | ||
63 | if (index >= MAX_LEGACY_SERIAL_PORTS) | ||
64 | return -1; | ||
65 | if (index >= legacy_serial_count) | ||
66 | legacy_serial_count = index + 1; | ||
67 | |||
68 | /* Check if there is a port who already claimed our slot */ | ||
69 | if (legacy_serial_infos[index].np != 0) { | ||
70 | /* if we still have some room, move it, else override */ | ||
71 | if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) { | ||
72 | printk(KERN_INFO "Moved legacy port %d -> %d\n", | ||
73 | index, legacy_serial_count); | ||
74 | legacy_serial_ports[legacy_serial_count] = | ||
75 | legacy_serial_ports[index]; | ||
76 | legacy_serial_infos[legacy_serial_count] = | ||
77 | legacy_serial_infos[index]; | ||
78 | legacy_serial_count++; | ||
79 | } else { | ||
80 | printk(KERN_INFO "Replacing legacy port %d\n", index); | ||
81 | } | ||
82 | } | ||
83 | |||
84 | /* Now fill the entry */ | ||
85 | memset(&legacy_serial_ports[index], 0, | ||
86 | sizeof(struct plat_serial8250_port)); | ||
87 | if (iotype == UPIO_PORT) | ||
88 | legacy_serial_ports[index].iobase = base; | ||
89 | else | ||
90 | legacy_serial_ports[index].mapbase = base; | ||
91 | legacy_serial_ports[index].iotype = iotype; | ||
92 | legacy_serial_ports[index].uartclk = clock; | ||
93 | legacy_serial_ports[index].irq = irq; | ||
94 | legacy_serial_ports[index].flags = flags; | ||
95 | legacy_serial_infos[index].taddr = taddr; | ||
96 | legacy_serial_infos[index].np = of_node_get(np); | ||
97 | legacy_serial_infos[index].clock = clock; | ||
98 | legacy_serial_infos[index].speed = spd ? *spd : 0; | ||
99 | |||
100 | printk(KERN_INFO "Found legacy serial port %d for %s\n", | ||
101 | index, np->full_name); | ||
102 | printk(KERN_INFO " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", | ||
103 | (iotype == UPIO_PORT) ? "port" : "mem", | ||
104 | (unsigned long long)base, (unsigned long long)taddr, irq, | ||
105 | legacy_serial_ports[index].uartclk, | ||
106 | legacy_serial_infos[index].speed); | ||
107 | |||
108 | return index; | ||
109 | } | ||
110 | |||
111 | static int __init add_legacy_soc_port(struct device_node *np, | ||
112 | struct device_node *soc_dev) | ||
113 | { | ||
114 | phys_addr_t addr; | ||
115 | u32 *addrp; | ||
116 | unsigned int flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ; | ||
117 | |||
118 | /* We only support ports that have a clock frequency properly | ||
119 | * encoded in the device-tree. | ||
120 | */ | ||
121 | if (get_property(np, "clock-frequency", NULL) == NULL) | ||
122 | return -1; | ||
123 | |||
124 | /* Get the address */ | ||
125 | addrp = of_get_address(soc_dev, 0, NULL, NULL); | ||
126 | if (addrp == NULL) | ||
127 | return -1; | ||
128 | |||
129 | addr = of_translate_address(soc_dev, addrp); | ||
130 | |||
131 | /* Add port, irq will be dealt with later. We passed a translated | ||
132 | * IO port value. It will be fixed up later along with the irq | ||
133 | */ | ||
134 | return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags); | ||
135 | } | ||
136 | |||
137 | #ifdef CONFIG_ISA | ||
138 | static int __init add_legacy_isa_port(struct device_node *np, | ||
139 | struct device_node *isa_brg) | ||
140 | { | ||
141 | u32 *reg; | ||
142 | char *typep; | ||
143 | int index = -1; | ||
144 | phys_addr_t taddr; | ||
145 | |||
146 | /* Get the ISA port number */ | ||
147 | reg = (u32 *)get_property(np, "reg", NULL); | ||
148 | if (reg == NULL) | ||
149 | return -1; | ||
150 | |||
151 | /* Verify it's an IO port, we don't support anything else */ | ||
152 | if (!(reg[0] & 0x00000001)) | ||
153 | return -1; | ||
154 | |||
155 | /* Now look for an "ibm,aix-loc" property that gives us ordering | ||
156 | * if any... | ||
157 | */ | ||
158 | typep = (char *)get_property(np, "ibm,aix-loc", NULL); | ||
159 | |||
160 | /* If we have a location index, then use it */ | ||
161 | if (typep && *typep == 'S') | ||
162 | index = simple_strtol(typep+1, NULL, 0) - 1; | ||
163 | |||
164 | /* Translate ISA address */ | ||
165 | taddr = of_translate_address(np, reg); | ||
166 | |||
167 | /* Add port, irq will be dealt with later */ | ||
168 | return add_legacy_port(np, index, UPIO_PORT, reg[1], taddr, NO_IRQ, UPF_BOOT_AUTOCONF); | ||
169 | |||
170 | } | ||
171 | #endif | ||
172 | |||
173 | #ifdef CONFIG_PCI | ||
174 | static int __init add_legacy_pci_port(struct device_node *np, | ||
175 | struct device_node *pci_dev) | ||
176 | { | ||
177 | phys_addr_t addr, base; | ||
178 | u32 *addrp; | ||
179 | unsigned int flags; | ||
180 | int iotype, index = -1, lindex = 0; | ||
181 | |||
182 | /* We only support ports that have a clock frequency properly | ||
183 | * encoded in the device-tree (that is have an fcode). Anything | ||
184 | * else can't be used that early and will be normally probed by | ||
185 | * the generic 8250_pci driver later on. The reason is that 8250 | ||
186 | * compatible UARTs on PCI need all sort of quirks (port offsets | ||
187 | * etc...) that this code doesn't know about | ||
188 | */ | ||
189 | if (get_property(np, "clock-frequency", NULL) == NULL) | ||
190 | return -1; | ||
191 | |||
192 | /* Get the PCI address. Assume BAR 0 */ | ||
193 | addrp = of_get_pci_address(pci_dev, 0, NULL, &flags); | ||
194 | if (addrp == NULL) | ||
195 | return -1; | ||
196 | |||
197 | /* We only support BAR 0 for now */ | ||
198 | iotype = (flags & IORESOURCE_MEM) ? UPIO_MEM : UPIO_PORT; | ||
199 | addr = of_translate_address(pci_dev, addrp); | ||
200 | |||
201 | /* Set the IO base to the same as the translated address for MMIO, | ||
202 | * or to the domain local IO base for PIO (it will be fixed up later) | ||
203 | */ | ||
204 | if (iotype == UPIO_MEM) | ||
205 | base = addr; | ||
206 | else | ||
207 | base = addrp[2]; | ||
208 | |||
209 | /* Try to guess an index... If we have subdevices of the pci dev, | ||
210 | * we get to their "reg" property | ||
211 | */ | ||
212 | if (np != pci_dev) { | ||
213 | u32 *reg = (u32 *)get_property(np, "reg", NULL); | ||
214 | if (reg && (*reg < 4)) | ||
215 | index = lindex = *reg; | ||
216 | } | ||
217 | |||
218 | /* Local index means it's the Nth port in the PCI chip. Unfortunately | ||
219 | * the offset to add here is device specific. We know about those | ||
220 | * EXAR ports and we default to the most common case. If your UART | ||
221 | * doesn't work for these settings, you'll have to add your own special | ||
222 | * cases here | ||
223 | */ | ||
224 | if (device_is_compatible(pci_dev, "pci13a8,152") || | ||
225 | device_is_compatible(pci_dev, "pci13a8,154") || | ||
226 | device_is_compatible(pci_dev, "pci13a8,158")) { | ||
227 | addr += 0x200 * lindex; | ||
228 | base += 0x200 * lindex; | ||
229 | } else { | ||
230 | addr += 8 * lindex; | ||
231 | base += 8 * lindex; | ||
232 | } | ||
233 | |||
234 | /* Add port, irq will be dealt with later. We passed a translated | ||
235 | * IO port value. It will be fixed up later along with the irq | ||
236 | */ | ||
237 | return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF); | ||
238 | } | ||
239 | #endif | ||
240 | |||
241 | /* | ||
242 | * This is called very early, as part of setup_system() or eventually | ||
243 | * setup_arch(), basically before anything else in this file. This function | ||
244 | * will try to build a list of all the available 8250-compatible serial ports | ||
245 | * in the machine using the Open Firmware device-tree. It currently only deals | ||
246 | * with ISA and PCI busses but could be extended. It allows a very early boot | ||
247 | * console to be initialized, that list is also used later to provide 8250 with | ||
248 | * the machine non-PCI ports and to properly pick the default console port | ||
249 | */ | ||
250 | void __init find_legacy_serial_ports(void) | ||
251 | { | ||
252 | struct device_node *np, *stdout = NULL; | ||
253 | char *path; | ||
254 | int index; | ||
255 | |||
256 | DBG(" -> find_legacy_serial_port()\n"); | ||
257 | |||
258 | /* Now find out if one of these is out firmware console */ | ||
259 | path = (char *)get_property(of_chosen, "linux,stdout-path", NULL); | ||
260 | if (path != NULL) { | ||
261 | stdout = of_find_node_by_path(path); | ||
262 | if (stdout) | ||
263 | DBG("stdout is %s\n", stdout->full_name); | ||
264 | } else { | ||
265 | DBG(" no linux,stdout-path !\n"); | ||
266 | } | ||
267 | |||
268 | /* First fill our array with SOC ports */ | ||
269 | for (np = NULL; (np = of_find_compatible_node(np, "serial", "ns16550")) != NULL;) { | ||
270 | struct device_node *soc = of_get_parent(np); | ||
271 | if (soc && !strcmp(soc->type, "soc")) { | ||
272 | index = add_legacy_soc_port(np, np); | ||
273 | if (index >= 0 && np == stdout) | ||
274 | legacy_serial_console = index; | ||
275 | } | ||
276 | of_node_put(soc); | ||
277 | } | ||
278 | |||
279 | #ifdef CONFIG_ISA | ||
280 | /* First fill our array with ISA ports */ | ||
281 | for (np = NULL; (np = of_find_node_by_type(np, "serial"));) { | ||
282 | struct device_node *isa = of_get_parent(np); | ||
283 | if (isa && !strcmp(isa->name, "isa")) { | ||
284 | index = add_legacy_isa_port(np, isa); | ||
285 | if (index >= 0 && np == stdout) | ||
286 | legacy_serial_console = index; | ||
287 | } | ||
288 | of_node_put(isa); | ||
289 | } | ||
290 | #endif | ||
291 | |||
292 | #ifdef CONFIG_PCI | ||
293 | /* Next, try to locate PCI ports */ | ||
294 | for (np = NULL; (np = of_find_all_nodes(np));) { | ||
295 | struct device_node *pci, *parent = of_get_parent(np); | ||
296 | if (parent && !strcmp(parent->name, "isa")) { | ||
297 | of_node_put(parent); | ||
298 | continue; | ||
299 | } | ||
300 | if (strcmp(np->name, "serial") && strcmp(np->type, "serial")) { | ||
301 | of_node_put(parent); | ||
302 | continue; | ||
303 | } | ||
304 | /* Check for known pciclass, and also check wether we have | ||
305 | * a device with child nodes for ports or not | ||
306 | */ | ||
307 | if (device_is_compatible(np, "pciclass,0700") || | ||
308 | device_is_compatible(np, "pciclass,070002")) | ||
309 | pci = np; | ||
310 | else if (device_is_compatible(parent, "pciclass,0700") || | ||
311 | device_is_compatible(parent, "pciclass,070002")) | ||
312 | pci = parent; | ||
313 | else { | ||
314 | of_node_put(parent); | ||
315 | continue; | ||
316 | } | ||
317 | index = add_legacy_pci_port(np, pci); | ||
318 | if (index >= 0 && np == stdout) | ||
319 | legacy_serial_console = index; | ||
320 | of_node_put(parent); | ||
321 | } | ||
322 | #endif | ||
323 | |||
324 | DBG("legacy_serial_console = %d\n", legacy_serial_console); | ||
325 | |||
326 | /* udbg is 64 bits only for now, that will change soon though ... */ | ||
327 | while (legacy_serial_console >= 0) { | ||
328 | struct legacy_serial_info *info = | ||
329 | &legacy_serial_infos[legacy_serial_console]; | ||
330 | void __iomem *addr; | ||
331 | |||
332 | if (info->taddr == 0) | ||
333 | break; | ||
334 | addr = ioremap(info->taddr, 0x1000); | ||
335 | if (addr == NULL) | ||
336 | break; | ||
337 | if (info->speed == 0) | ||
338 | info->speed = udbg_probe_uart_speed(addr, info->clock); | ||
339 | DBG("default console speed = %d\n", info->speed); | ||
340 | udbg_init_uart(addr, info->speed, info->clock); | ||
341 | break; | ||
342 | } | ||
343 | |||
344 | DBG(" <- find_legacy_serial_port()\n"); | ||
345 | } | ||
346 | |||
347 | static struct platform_device serial_device = { | ||
348 | .name = "serial8250", | ||
349 | .id = PLAT8250_DEV_PLATFORM, | ||
350 | .dev = { | ||
351 | .platform_data = legacy_serial_ports, | ||
352 | }, | ||
353 | }; | ||
354 | |||
355 | static void __init fixup_port_irq(int index, | ||
356 | struct device_node *np, | ||
357 | struct plat_serial8250_port *port) | ||
358 | { | ||
359 | DBG("fixup_port_irq(%d)\n", index); | ||
360 | |||
361 | /* Check for interrupts in that node */ | ||
362 | if (np->n_intrs > 0) { | ||
363 | port->irq = np->intrs[0].line; | ||
364 | DBG(" port %d (%s), irq=%d\n", | ||
365 | index, np->full_name, port->irq); | ||
366 | return; | ||
367 | } | ||
368 | |||
369 | /* Check for interrupts in the parent */ | ||
370 | np = of_get_parent(np); | ||
371 | if (np == NULL) | ||
372 | return; | ||
373 | |||
374 | if (np->n_intrs > 0) { | ||
375 | port->irq = np->intrs[0].line; | ||
376 | DBG(" port %d (%s), irq=%d\n", | ||
377 | index, np->full_name, port->irq); | ||
378 | } | ||
379 | of_node_put(np); | ||
380 | } | ||
381 | |||
382 | static void __init fixup_port_pio(int index, | ||
383 | struct device_node *np, | ||
384 | struct plat_serial8250_port *port) | ||
385 | { | ||
386 | #ifdef CONFIG_PCI | ||
387 | struct pci_controller *hose; | ||
388 | |||
389 | DBG("fixup_port_pio(%d)\n", index); | ||
390 | |||
391 | hose = pci_find_hose_for_OF_device(np); | ||
392 | if (hose) { | ||
393 | unsigned long offset = (unsigned long)hose->io_base_virt - | ||
394 | #ifdef CONFIG_PPC64 | ||
395 | pci_io_base; | ||
396 | #else | ||
397 | isa_io_base; | ||
398 | #endif | ||
399 | DBG("port %d, IO %lx -> %lx\n", | ||
400 | index, port->iobase, port->iobase + offset); | ||
401 | port->iobase += offset; | ||
402 | } | ||
403 | #endif | ||
404 | } | ||
405 | |||
406 | static void __init fixup_port_mmio(int index, | ||
407 | struct device_node *np, | ||
408 | struct plat_serial8250_port *port) | ||
409 | { | ||
410 | DBG("fixup_port_mmio(%d)\n", index); | ||
411 | |||
412 | port->membase = ioremap(port->mapbase, 0x100); | ||
413 | } | ||
414 | |||
415 | /* | ||
416 | * This is called as an arch initcall, hopefully before the PCI bus is | ||
417 | * probed and/or the 8250 driver loaded since we need to register our | ||
418 | * platform devices before 8250 PCI ones are detected as some of them | ||
419 | * must properly "override" the platform ones. | ||
420 | * | ||
421 | * This function fixes up the interrupt value for platform ports as it | ||
422 | * couldn't be done earlier before interrupt maps have been parsed. It | ||
423 | * also "corrects" the IO address for PIO ports for the same reason, | ||
424 | * since earlier, the PHBs virtual IO space wasn't assigned yet. It then | ||
425 | * registers all those platform ports for use by the 8250 driver when it | ||
426 | * finally loads. | ||
427 | */ | ||
428 | static int __init serial_dev_init(void) | ||
429 | { | ||
430 | int i; | ||
431 | |||
432 | if (legacy_serial_count == 0) | ||
433 | return -ENODEV; | ||
434 | |||
435 | /* | ||
436 | * Before we register the platfrom serial devices, we need | ||
437 | * to fixup their interrutps and their IO ports. | ||
438 | */ | ||
439 | DBG("Fixing serial ports interrupts and IO ports ...\n"); | ||
440 | |||
441 | for (i = 0; i < legacy_serial_count; i++) { | ||
442 | struct plat_serial8250_port *port = &legacy_serial_ports[i]; | ||
443 | struct device_node *np = legacy_serial_infos[i].np; | ||
444 | |||
445 | if (port->irq == NO_IRQ) | ||
446 | fixup_port_irq(i, np, port); | ||
447 | if (port->iotype == UPIO_PORT) | ||
448 | fixup_port_pio(i, np, port); | ||
449 | if (port->iotype == UPIO_MEM) | ||
450 | fixup_port_mmio(i, np, port); | ||
451 | } | ||
452 | |||
453 | DBG("Registering platform serial ports\n"); | ||
454 | |||
455 | return platform_device_register(&serial_device); | ||
456 | } | ||
457 | arch_initcall(serial_dev_init); | ||
458 | |||
459 | |||
460 | /* | ||
461 | * This is called very early, as part of console_init() (typically just after | ||
462 | * time_init()). This function is respondible for trying to find a good | ||
463 | * default console on serial ports. It tries to match the open firmware | ||
464 | * default output with one of the available serial console drivers, either | ||
465 | * one of the platform serial ports that have been probed earlier by | ||
466 | * find_legacy_serial_ports() or some more platform specific ones. | ||
467 | */ | ||
468 | static int __init check_legacy_serial_console(void) | ||
469 | { | ||
470 | struct device_node *prom_stdout = NULL; | ||
471 | int speed = 0, offset = 0; | ||
472 | char *name; | ||
473 | u32 *spd; | ||
474 | |||
475 | DBG(" -> check_legacy_serial_console()\n"); | ||
476 | |||
477 | /* The user has requested a console so this is already set up. */ | ||
478 | if (strstr(saved_command_line, "console=")) { | ||
479 | DBG(" console was specified !\n"); | ||
480 | return -EBUSY; | ||
481 | } | ||
482 | |||
483 | if (!of_chosen) { | ||
484 | DBG(" of_chosen is NULL !\n"); | ||
485 | return -ENODEV; | ||
486 | } | ||
487 | |||
488 | if (legacy_serial_console < 0) { | ||
489 | DBG(" legacy_serial_console not found !\n"); | ||
490 | return -ENODEV; | ||
491 | } | ||
492 | /* We are getting a weird phandle from OF ... */ | ||
493 | /* ... So use the full path instead */ | ||
494 | name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); | ||
495 | if (name == NULL) { | ||
496 | DBG(" no linux,stdout-path !\n"); | ||
497 | return -ENODEV; | ||
498 | } | ||
499 | prom_stdout = of_find_node_by_path(name); | ||
500 | if (!prom_stdout) { | ||
501 | DBG(" can't find stdout package %s !\n", name); | ||
502 | return -ENODEV; | ||
503 | } | ||
504 | DBG("stdout is %s\n", prom_stdout->full_name); | ||
505 | |||
506 | name = (char *)get_property(prom_stdout, "name", NULL); | ||
507 | if (!name) { | ||
508 | DBG(" stdout package has no name !\n"); | ||
509 | goto not_found; | ||
510 | } | ||
511 | spd = (u32 *)get_property(prom_stdout, "current-speed", NULL); | ||
512 | if (spd) | ||
513 | speed = *spd; | ||
514 | |||
515 | if (0) | ||
516 | ; | ||
517 | #ifdef CONFIG_SERIAL_8250_CONSOLE | ||
518 | else if (strcmp(name, "serial") == 0) { | ||
519 | int i; | ||
520 | /* Look for it in probed array */ | ||
521 | for (i = 0; i < legacy_serial_count; i++) { | ||
522 | if (prom_stdout != legacy_serial_infos[i].np) | ||
523 | continue; | ||
524 | offset = i; | ||
525 | speed = legacy_serial_infos[i].speed; | ||
526 | break; | ||
527 | } | ||
528 | if (i >= legacy_serial_count) | ||
529 | goto not_found; | ||
530 | } | ||
531 | #endif /* CONFIG_SERIAL_8250_CONSOLE */ | ||
532 | #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE | ||
533 | else if (strcmp(name, "ch-a") == 0) | ||
534 | offset = 0; | ||
535 | else if (strcmp(name, "ch-b") == 0) | ||
536 | offset = 1; | ||
537 | #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ | ||
538 | else | ||
539 | goto not_found; | ||
540 | of_node_put(prom_stdout); | ||
541 | |||
542 | DBG("Found serial console at ttyS%d\n", offset); | ||
543 | |||
544 | if (speed) { | ||
545 | static char __initdata opt[16]; | ||
546 | sprintf(opt, "%d", speed); | ||
547 | return add_preferred_console("ttyS", offset, opt); | ||
548 | } else | ||
549 | return add_preferred_console("ttyS", offset, NULL); | ||
550 | |||
551 | not_found: | ||
552 | DBG("No preferred console found !\n"); | ||
553 | of_node_put(prom_stdout); | ||
554 | return -ENODEV; | ||
555 | } | ||
556 | console_initcall(check_legacy_serial_console); | ||
557 | |||
diff --git a/arch/powerpc/kernel/lparmap.c b/arch/powerpc/kernel/lparmap.c index 5a05a797485f..584d1e3c013d 100644 --- a/arch/powerpc/kernel/lparmap.c +++ b/arch/powerpc/kernel/lparmap.c | |||
@@ -7,7 +7,7 @@ | |||
7 | * 2 of the License, or (at your option) any later version. | 7 | * 2 of the License, or (at your option) any later version. |
8 | */ | 8 | */ |
9 | #include <asm/mmu.h> | 9 | #include <asm/mmu.h> |
10 | #include <asm/page.h> | 10 | #include <asm/pgtable.h> |
11 | #include <asm/iseries/lpar_map.h> | 11 | #include <asm/iseries/lpar_map.h> |
12 | 12 | ||
13 | const struct LparMap __attribute__((__section__(".text"))) xLparMap = { | 13 | const struct LparMap __attribute__((__section__(".text"))) xLparMap = { |
@@ -16,16 +16,16 @@ const struct LparMap __attribute__((__section__(".text"))) xLparMap = { | |||
16 | .xSegmentTableOffs = STAB0_PAGE, | 16 | .xSegmentTableOffs = STAB0_PAGE, |
17 | 17 | ||
18 | .xEsids = { | 18 | .xEsids = { |
19 | { .xKernelEsid = GET_ESID(KERNELBASE), | 19 | { .xKernelEsid = GET_ESID(PAGE_OFFSET), |
20 | .xKernelVsid = KERNEL_VSID(KERNELBASE), }, | 20 | .xKernelVsid = KERNEL_VSID(PAGE_OFFSET), }, |
21 | { .xKernelEsid = GET_ESID(VMALLOCBASE), | 21 | { .xKernelEsid = GET_ESID(VMALLOC_START), |
22 | .xKernelVsid = KERNEL_VSID(VMALLOCBASE), }, | 22 | .xKernelVsid = KERNEL_VSID(VMALLOC_START), }, |
23 | }, | 23 | }, |
24 | 24 | ||
25 | .xRanges = { | 25 | .xRanges = { |
26 | { .xPages = HvPagesToMap, | 26 | { .xPages = HvPagesToMap, |
27 | .xOffset = 0, | 27 | .xOffset = 0, |
28 | .xVPN = KERNEL_VSID(KERNELBASE) << (SID_SHIFT - HW_PAGE_SHIFT), | 28 | .xVPN = KERNEL_VSID(PAGE_OFFSET) << (SID_SHIFT - HW_PAGE_SHIFT), |
29 | }, | 29 | }, |
30 | }, | 30 | }, |
31 | }; | 31 | }; |
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c new file mode 100644 index 000000000000..a91e40c9ae45 --- /dev/null +++ b/arch/powerpc/kernel/machine_kexec.c | |||
@@ -0,0 +1,67 @@ | |||
1 | /* | ||
2 | * Code to handle transition of Linux booting another kernel. | ||
3 | * | ||
4 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> | ||
5 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz | ||
6 | * Copyright (C) 2005 IBM Corporation. | ||
7 | * | ||
8 | * This source code is licensed under the GNU General Public License, | ||
9 | * Version 2. See the file COPYING for more details. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kexec.h> | ||
13 | #include <linux/reboot.h> | ||
14 | #include <linux/threads.h> | ||
15 | #include <asm/machdep.h> | ||
16 | |||
17 | /* | ||
18 | * Provide a dummy crash_notes definition until crash dump is implemented. | ||
19 | * This prevents breakage of crash_notes attribute in kernel/ksysfs.c. | ||
20 | */ | ||
21 | note_buf_t crash_notes[NR_CPUS]; | ||
22 | |||
23 | void machine_crash_shutdown(struct pt_regs *regs) | ||
24 | { | ||
25 | if (ppc_md.machine_crash_shutdown) | ||
26 | ppc_md.machine_crash_shutdown(regs); | ||
27 | } | ||
28 | |||
29 | /* | ||
30 | * Do what every setup is needed on image and the | ||
31 | * reboot code buffer to allow us to avoid allocations | ||
32 | * later. | ||
33 | */ | ||
34 | int machine_kexec_prepare(struct kimage *image) | ||
35 | { | ||
36 | if (ppc_md.machine_kexec_prepare) | ||
37 | return ppc_md.machine_kexec_prepare(image); | ||
38 | /* | ||
39 | * Fail if platform doesn't provide its own machine_kexec_prepare | ||
40 | * implementation. | ||
41 | */ | ||
42 | return -ENOSYS; | ||
43 | } | ||
44 | |||
45 | void machine_kexec_cleanup(struct kimage *image) | ||
46 | { | ||
47 | if (ppc_md.machine_kexec_cleanup) | ||
48 | ppc_md.machine_kexec_cleanup(image); | ||
49 | } | ||
50 | |||
51 | /* | ||
52 | * Do not allocate memory (or fail in any way) in machine_kexec(). | ||
53 | * We are past the point of no return, committed to rebooting now. | ||
54 | */ | ||
55 | NORET_TYPE void machine_kexec(struct kimage *image) | ||
56 | { | ||
57 | if (ppc_md.machine_kexec) | ||
58 | ppc_md.machine_kexec(image); | ||
59 | else { | ||
60 | /* | ||
61 | * Fall back to normal restart if platform doesn't provide | ||
62 | * its own kexec function, and user insist to kexec... | ||
63 | */ | ||
64 | machine_restart(NULL); | ||
65 | } | ||
66 | for(;;); | ||
67 | } | ||
diff --git a/arch/powerpc/kernel/machine_kexec_32.c b/arch/powerpc/kernel/machine_kexec_32.c new file mode 100644 index 000000000000..443606134dff --- /dev/null +++ b/arch/powerpc/kernel/machine_kexec_32.c | |||
@@ -0,0 +1,65 @@ | |||
1 | /* | ||
2 | * PPC32 code to handle Linux booting another kernel. | ||
3 | * | ||
4 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> | ||
5 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz | ||
6 | * Copyright (C) 2005 IBM Corporation. | ||
7 | * | ||
8 | * This source code is licensed under the GNU General Public License, | ||
9 | * Version 2. See the file COPYING for more details. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kexec.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <asm/cacheflush.h> | ||
16 | #include <asm/hw_irq.h> | ||
17 | #include <asm/io.h> | ||
18 | |||
19 | typedef NORET_TYPE void (*relocate_new_kernel_t)( | ||
20 | unsigned long indirection_page, | ||
21 | unsigned long reboot_code_buffer, | ||
22 | unsigned long start_address) ATTRIB_NORET; | ||
23 | |||
24 | /* | ||
25 | * This is a generic machine_kexec function suitable at least for | ||
26 | * non-OpenFirmware embedded platforms. | ||
27 | * It merely copies the image relocation code to the control page and | ||
28 | * jumps to it. | ||
29 | * A platform specific function may just call this one. | ||
30 | */ | ||
31 | void default_machine_kexec(struct kimage *image) | ||
32 | { | ||
33 | const extern unsigned char relocate_new_kernel[]; | ||
34 | const extern unsigned int relocate_new_kernel_size; | ||
35 | unsigned long page_list; | ||
36 | unsigned long reboot_code_buffer, reboot_code_buffer_phys; | ||
37 | relocate_new_kernel_t rnk; | ||
38 | |||
39 | /* Interrupts aren't acceptable while we reboot */ | ||
40 | local_irq_disable(); | ||
41 | |||
42 | page_list = image->head; | ||
43 | |||
44 | /* we need both effective and real address here */ | ||
45 | reboot_code_buffer = | ||
46 | (unsigned long)page_address(image->control_code_page); | ||
47 | reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); | ||
48 | |||
49 | /* copy our kernel relocation code to the control code page */ | ||
50 | memcpy((void *)reboot_code_buffer, relocate_new_kernel, | ||
51 | relocate_new_kernel_size); | ||
52 | |||
53 | flush_icache_range(reboot_code_buffer, | ||
54 | reboot_code_buffer + KEXEC_CONTROL_CODE_SIZE); | ||
55 | printk(KERN_INFO "Bye!\n"); | ||
56 | |||
57 | /* now call it */ | ||
58 | rnk = (relocate_new_kernel_t) reboot_code_buffer; | ||
59 | (*rnk)(page_list, reboot_code_buffer_phys, image->start); | ||
60 | } | ||
61 | |||
62 | int default_machine_kexec_prepare(struct kimage *image) | ||
63 | { | ||
64 | return 0; | ||
65 | } | ||
diff --git a/arch/powerpc/kernel/machine_kexec_64.c b/arch/powerpc/kernel/machine_kexec_64.c index 97c51e452be7..d6431440c54f 100644 --- a/arch/powerpc/kernel/machine_kexec_64.c +++ b/arch/powerpc/kernel/machine_kexec_64.c | |||
@@ -1,5 +1,5 @@ | |||
1 | /* | 1 | /* |
2 | * machine_kexec.c - handle transition of Linux booting another kernel | 2 | * PPC64 code to handle Linux booting another kernel. |
3 | * | 3 | * |
4 | * Copyright (C) 2004-2005, IBM Corp. | 4 | * Copyright (C) 2004-2005, IBM Corp. |
5 | * | 5 | * |
@@ -28,21 +28,7 @@ | |||
28 | 28 | ||
29 | #define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */ | 29 | #define HASH_GROUP_SIZE 0x80 /* size of each hash group, asm/mmu.h */ |
30 | 30 | ||
31 | /* Have this around till we move it into crash specific file */ | 31 | int default_machine_kexec_prepare(struct kimage *image) |
32 | note_buf_t crash_notes[NR_CPUS]; | ||
33 | |||
34 | /* Dummy for now. Not sure if we need to have a crash shutdown in here | ||
35 | * and if what it will achieve. Letting it be now to compile the code | ||
36 | * in generic kexec environment | ||
37 | */ | ||
38 | void machine_crash_shutdown(struct pt_regs *regs) | ||
39 | { | ||
40 | /* do nothing right now */ | ||
41 | /* smp_relase_cpus() if we want smp on panic kernel */ | ||
42 | /* cpu_irq_down to isolate us until we are ready */ | ||
43 | } | ||
44 | |||
45 | int machine_kexec_prepare(struct kimage *image) | ||
46 | { | 32 | { |
47 | int i; | 33 | int i; |
48 | unsigned long begin, end; /* limits of segment */ | 34 | unsigned long begin, end; /* limits of segment */ |
@@ -111,11 +97,6 @@ int machine_kexec_prepare(struct kimage *image) | |||
111 | return 0; | 97 | return 0; |
112 | } | 98 | } |
113 | 99 | ||
114 | void machine_kexec_cleanup(struct kimage *image) | ||
115 | { | ||
116 | /* we do nothing in prepare that needs to be undone */ | ||
117 | } | ||
118 | |||
119 | #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) | 100 | #define IND_FLAGS (IND_DESTINATION | IND_INDIRECTION | IND_DONE | IND_SOURCE) |
120 | 101 | ||
121 | static void copy_segments(unsigned long ind) | 102 | static void copy_segments(unsigned long ind) |
@@ -172,9 +153,8 @@ void kexec_copy_flush(struct kimage *image) | |||
172 | * including ones that were in place on the original copy | 153 | * including ones that were in place on the original copy |
173 | */ | 154 | */ |
174 | for (i = 0; i < nr_segments; i++) | 155 | for (i = 0; i < nr_segments; i++) |
175 | flush_icache_range(ranges[i].mem + KERNELBASE, | 156 | flush_icache_range((unsigned long)__va(ranges[i].mem), |
176 | ranges[i].mem + KERNELBASE + | 157 | (unsigned long)__va(ranges[i].mem + ranges[i].memsz)); |
177 | ranges[i].memsz); | ||
178 | } | 158 | } |
179 | 159 | ||
180 | #ifdef CONFIG_SMP | 160 | #ifdef CONFIG_SMP |
@@ -283,13 +263,20 @@ extern NORET_TYPE void kexec_sequence(void *newstack, unsigned long start, | |||
283 | void (*clear_all)(void)) ATTRIB_NORET; | 263 | void (*clear_all)(void)) ATTRIB_NORET; |
284 | 264 | ||
285 | /* too late to fail here */ | 265 | /* too late to fail here */ |
286 | void machine_kexec(struct kimage *image) | 266 | void default_machine_kexec(struct kimage *image) |
287 | { | 267 | { |
288 | |||
289 | /* prepare control code if any */ | 268 | /* prepare control code if any */ |
290 | 269 | ||
291 | /* shutdown other cpus into our wait loop and quiesce interrupts */ | 270 | /* |
292 | kexec_prepare_cpus(); | 271 | * If the kexec boot is the normal one, need to shutdown other cpus |
272 | * into our wait loop and quiesce interrupts. | ||
273 | * Otherwise, in the case of crashed mode (crashing_cpu >= 0), | ||
274 | * stopping other CPUs and collecting their pt_regs is done before | ||
275 | * using debugger IPI. | ||
276 | */ | ||
277 | |||
278 | if (crashing_cpu == -1) | ||
279 | kexec_prepare_cpus(); | ||
293 | 280 | ||
294 | /* switch to a staticly allocated stack. Based on irq stack code. | 281 | /* switch to a staticly allocated stack. Based on irq stack code. |
295 | * XXX: the task struct will likely be invalid once we do the copy! | 282 | * XXX: the task struct will likely be invalid once we do the copy! |
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S index 624a983a9676..01d0d97a16e1 100644 --- a/arch/powerpc/kernel/misc_32.S +++ b/arch/powerpc/kernel/misc_32.S | |||
@@ -5,6 +5,10 @@ | |||
5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) | 5 | * Largely rewritten by Cort Dougan (cort@cs.nmt.edu) |
6 | * and Paul Mackerras. | 6 | * and Paul Mackerras. |
7 | * | 7 | * |
8 | * kexec bits: | ||
9 | * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> | ||
10 | * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz | ||
11 | * | ||
8 | * This program is free software; you can redistribute it and/or | 12 | * This program is free software; you can redistribute it and/or |
9 | * modify it under the terms of the GNU General Public License | 13 | * modify it under the terms of the GNU General Public License |
10 | * as published by the Free Software Foundation; either version | 14 | * as published by the Free Software Foundation; either version |
@@ -24,6 +28,8 @@ | |||
24 | #include <asm/ppc_asm.h> | 28 | #include <asm/ppc_asm.h> |
25 | #include <asm/thread_info.h> | 29 | #include <asm/thread_info.h> |
26 | #include <asm/asm-offsets.h> | 30 | #include <asm/asm-offsets.h> |
31 | #include <asm/processor.h> | ||
32 | #include <asm/kexec.h> | ||
27 | 33 | ||
28 | .text | 34 | .text |
29 | 35 | ||
@@ -1006,3 +1012,110 @@ _GLOBAL(execve) | |||
1006 | */ | 1012 | */ |
1007 | _GLOBAL(__main) | 1013 | _GLOBAL(__main) |
1008 | blr | 1014 | blr |
1015 | |||
1016 | #ifdef CONFIG_KEXEC | ||
1017 | /* | ||
1018 | * Must be relocatable PIC code callable as a C function. | ||
1019 | */ | ||
1020 | .globl relocate_new_kernel | ||
1021 | relocate_new_kernel: | ||
1022 | /* r3 = page_list */ | ||
1023 | /* r4 = reboot_code_buffer */ | ||
1024 | /* r5 = start_address */ | ||
1025 | |||
1026 | li r0, 0 | ||
1027 | |||
1028 | /* | ||
1029 | * Set Machine Status Register to a known status, | ||
1030 | * switch the MMU off and jump to 1: in a single step. | ||
1031 | */ | ||
1032 | |||
1033 | mr r8, r0 | ||
1034 | ori r8, r8, MSR_RI|MSR_ME | ||
1035 | mtspr SPRN_SRR1, r8 | ||
1036 | addi r8, r4, 1f - relocate_new_kernel | ||
1037 | mtspr SPRN_SRR0, r8 | ||
1038 | sync | ||
1039 | rfi | ||
1040 | |||
1041 | 1: | ||
1042 | /* from this point address translation is turned off */ | ||
1043 | /* and interrupts are disabled */ | ||
1044 | |||
1045 | /* set a new stack at the bottom of our page... */ | ||
1046 | /* (not really needed now) */ | ||
1047 | addi r1, r4, KEXEC_CONTROL_CODE_SIZE - 8 /* for LR Save+Back Chain */ | ||
1048 | stw r0, 0(r1) | ||
1049 | |||
1050 | /* Do the copies */ | ||
1051 | li r6, 0 /* checksum */ | ||
1052 | mr r0, r3 | ||
1053 | b 1f | ||
1054 | |||
1055 | 0: /* top, read another word for the indirection page */ | ||
1056 | lwzu r0, 4(r3) | ||
1057 | |||
1058 | 1: | ||
1059 | /* is it a destination page? (r8) */ | ||
1060 | rlwinm. r7, r0, 0, 31, 31 /* IND_DESTINATION (1<<0) */ | ||
1061 | beq 2f | ||
1062 | |||
1063 | rlwinm r8, r0, 0, 0, 19 /* clear kexec flags, page align */ | ||
1064 | b 0b | ||
1065 | |||
1066 | 2: /* is it an indirection page? (r3) */ | ||
1067 | rlwinm. r7, r0, 0, 30, 30 /* IND_INDIRECTION (1<<1) */ | ||
1068 | beq 2f | ||
1069 | |||
1070 | rlwinm r3, r0, 0, 0, 19 /* clear kexec flags, page align */ | ||
1071 | subi r3, r3, 4 | ||
1072 | b 0b | ||
1073 | |||
1074 | 2: /* are we done? */ | ||
1075 | rlwinm. r7, r0, 0, 29, 29 /* IND_DONE (1<<2) */ | ||
1076 | beq 2f | ||
1077 | b 3f | ||
1078 | |||
1079 | 2: /* is it a source page? (r9) */ | ||
1080 | rlwinm. r7, r0, 0, 28, 28 /* IND_SOURCE (1<<3) */ | ||
1081 | beq 0b | ||
1082 | |||
1083 | rlwinm r9, r0, 0, 0, 19 /* clear kexec flags, page align */ | ||
1084 | |||
1085 | li r7, PAGE_SIZE / 4 | ||
1086 | mtctr r7 | ||
1087 | subi r9, r9, 4 | ||
1088 | subi r8, r8, 4 | ||
1089 | 9: | ||
1090 | lwzu r0, 4(r9) /* do the copy */ | ||
1091 | xor r6, r6, r0 | ||
1092 | stwu r0, 4(r8) | ||
1093 | dcbst 0, r8 | ||
1094 | sync | ||
1095 | icbi 0, r8 | ||
1096 | bdnz 9b | ||
1097 | |||
1098 | addi r9, r9, 4 | ||
1099 | addi r8, r8, 4 | ||
1100 | b 0b | ||
1101 | |||
1102 | 3: | ||
1103 | |||
1104 | /* To be certain of avoiding problems with self-modifying code | ||
1105 | * execute a serializing instruction here. | ||
1106 | */ | ||
1107 | isync | ||
1108 | sync | ||
1109 | |||
1110 | /* jump to the entry point, usually the setup routine */ | ||
1111 | mtlr r5 | ||
1112 | blrl | ||
1113 | |||
1114 | 1: b 1b | ||
1115 | |||
1116 | relocate_new_kernel_end: | ||
1117 | |||
1118 | .globl relocate_new_kernel_size | ||
1119 | relocate_new_kernel_size: | ||
1120 | .long relocate_new_kernel_end - relocate_new_kernel | ||
1121 | #endif | ||
diff --git a/arch/powerpc/kernel/nvram_64.c b/arch/powerpc/kernel/nvram_64.c index c0fcd29918ce..fd7db8d542db 100644 --- a/arch/powerpc/kernel/nvram_64.c +++ b/arch/powerpc/kernel/nvram_64.c | |||
@@ -80,80 +80,74 @@ static loff_t dev_nvram_llseek(struct file *file, loff_t offset, int origin) | |||
80 | static ssize_t dev_nvram_read(struct file *file, char __user *buf, | 80 | static ssize_t dev_nvram_read(struct file *file, char __user *buf, |
81 | size_t count, loff_t *ppos) | 81 | size_t count, loff_t *ppos) |
82 | { | 82 | { |
83 | ssize_t len; | 83 | ssize_t ret; |
84 | char *tmp_buffer; | 84 | char *tmp = NULL; |
85 | int size; | 85 | ssize_t size; |
86 | 86 | ||
87 | if (ppc_md.nvram_size == NULL) | 87 | ret = -ENODEV; |
88 | return -ENODEV; | 88 | if (!ppc_md.nvram_size) |
89 | goto out; | ||
90 | |||
91 | ret = 0; | ||
89 | size = ppc_md.nvram_size(); | 92 | size = ppc_md.nvram_size(); |
93 | if (*ppos >= size || size < 0) | ||
94 | goto out; | ||
90 | 95 | ||
91 | if (!access_ok(VERIFY_WRITE, buf, count)) | 96 | count = min_t(size_t, count, size - *ppos); |
92 | return -EFAULT; | 97 | count = min(count, PAGE_SIZE); |
93 | if (*ppos >= size) | ||
94 | return 0; | ||
95 | if (count > size) | ||
96 | count = size; | ||
97 | 98 | ||
98 | tmp_buffer = (char *) kmalloc(count, GFP_KERNEL); | 99 | ret = -ENOMEM; |
99 | if (!tmp_buffer) { | 100 | tmp = kmalloc(count, GFP_KERNEL); |
100 | printk(KERN_ERR "dev_read_nvram: kmalloc failed\n"); | 101 | if (!tmp) |
101 | return -ENOMEM; | 102 | goto out; |
102 | } | ||
103 | 103 | ||
104 | len = ppc_md.nvram_read(tmp_buffer, count, ppos); | 104 | ret = ppc_md.nvram_read(tmp, count, ppos); |
105 | if ((long)len <= 0) { | 105 | if (ret <= 0) |
106 | kfree(tmp_buffer); | 106 | goto out; |
107 | return len; | ||
108 | } | ||
109 | 107 | ||
110 | if (copy_to_user(buf, tmp_buffer, len)) { | 108 | if (copy_to_user(buf, tmp, ret)) |
111 | kfree(tmp_buffer); | 109 | ret = -EFAULT; |
112 | return -EFAULT; | ||
113 | } | ||
114 | 110 | ||
115 | kfree(tmp_buffer); | 111 | out: |
116 | return len; | 112 | kfree(tmp); |
113 | return ret; | ||
117 | 114 | ||
118 | } | 115 | } |
119 | 116 | ||
120 | static ssize_t dev_nvram_write(struct file *file, const char __user *buf, | 117 | static ssize_t dev_nvram_write(struct file *file, const char __user *buf, |
121 | size_t count, loff_t *ppos) | 118 | size_t count, loff_t *ppos) |
122 | { | 119 | { |
123 | ssize_t len; | 120 | ssize_t ret; |
124 | char * tmp_buffer; | 121 | char *tmp = NULL; |
125 | int size; | 122 | ssize_t size; |
126 | 123 | ||
127 | if (ppc_md.nvram_size == NULL) | 124 | ret = -ENODEV; |
128 | return -ENODEV; | 125 | if (!ppc_md.nvram_size) |
126 | goto out; | ||
127 | |||
128 | ret = 0; | ||
129 | size = ppc_md.nvram_size(); | 129 | size = ppc_md.nvram_size(); |
130 | if (*ppos >= size || size < 0) | ||
131 | goto out; | ||
130 | 132 | ||
131 | if (!access_ok(VERIFY_READ, buf, count)) | 133 | count = min_t(size_t, count, size - *ppos); |
132 | return -EFAULT; | 134 | count = min(count, PAGE_SIZE); |
133 | if (*ppos >= size) | ||
134 | return 0; | ||
135 | if (count > size) | ||
136 | count = size; | ||
137 | 135 | ||
138 | tmp_buffer = (char *) kmalloc(count, GFP_KERNEL); | 136 | ret = -ENOMEM; |
139 | if (!tmp_buffer) { | 137 | tmp = kmalloc(count, GFP_KERNEL); |
140 | printk(KERN_ERR "dev_nvram_write: kmalloc failed\n"); | 138 | if (!tmp) |
141 | return -ENOMEM; | 139 | goto out; |
142 | } | ||
143 | |||
144 | if (copy_from_user(tmp_buffer, buf, count)) { | ||
145 | kfree(tmp_buffer); | ||
146 | return -EFAULT; | ||
147 | } | ||
148 | 140 | ||
149 | len = ppc_md.nvram_write(tmp_buffer, count, ppos); | 141 | ret = -EFAULT; |
150 | if ((long)len <= 0) { | 142 | if (copy_from_user(tmp, buf, count)) |
151 | kfree(tmp_buffer); | 143 | goto out; |
152 | return len; | 144 | |
153 | } | 145 | ret = ppc_md.nvram_write(tmp, count, ppos); |
146 | |||
147 | out: | ||
148 | kfree(tmp); | ||
149 | return ret; | ||
154 | 150 | ||
155 | kfree(tmp_buffer); | ||
156 | return len; | ||
157 | } | 151 | } |
158 | 152 | ||
159 | static int dev_nvram_ioctl(struct inode *inode, struct file *file, | 153 | static int dev_nvram_ioctl(struct inode *inode, struct file *file, |
diff --git a/arch/powerpc/kernel/paca.c b/arch/powerpc/kernel/paca.c index a7b68f911eb1..999bdd816769 100644 --- a/arch/powerpc/kernel/paca.c +++ b/arch/powerpc/kernel/paca.c | |||
@@ -17,6 +17,7 @@ | |||
17 | #include <asm/page.h> | 17 | #include <asm/page.h> |
18 | #include <asm/lppaca.h> | 18 | #include <asm/lppaca.h> |
19 | #include <asm/iseries/it_lp_queue.h> | 19 | #include <asm/iseries/it_lp_queue.h> |
20 | #include <asm/iseries/it_lp_reg_save.h> | ||
20 | #include <asm/paca.h> | 21 | #include <asm/paca.h> |
21 | 22 | ||
22 | 23 | ||
@@ -26,8 +27,7 @@ extern unsigned long __toc_start; | |||
26 | 27 | ||
27 | /* The Paca is an array with one entry per processor. Each contains an | 28 | /* The Paca is an array with one entry per processor. Each contains an |
28 | * lppaca, which contains the information shared between the | 29 | * lppaca, which contains the information shared between the |
29 | * hypervisor and Linux. Each also contains an ItLpRegSave area which | 30 | * hypervisor and Linux. |
30 | * is used by the hypervisor to save registers. | ||
31 | * On systems with hardware multi-threading, there are two threads | 31 | * On systems with hardware multi-threading, there are two threads |
32 | * per processor. The Paca array must contain an entry for each thread. | 32 | * per processor. The Paca array must contain an entry for each thread. |
33 | * The VPD Areas will give a max logical processors = 2 * max physical | 33 | * The VPD Areas will give a max logical processors = 2 * max physical |
@@ -37,7 +37,6 @@ extern unsigned long __toc_start; | |||
37 | #define PACA_INIT_COMMON(number, start, asrr, asrv) \ | 37 | #define PACA_INIT_COMMON(number, start, asrr, asrv) \ |
38 | .lock_token = 0x8000, \ | 38 | .lock_token = 0x8000, \ |
39 | .paca_index = (number), /* Paca Index */ \ | 39 | .paca_index = (number), /* Paca Index */ \ |
40 | .default_decr = 0x00ff0000, /* Initial Decr */ \ | ||
41 | .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ | 40 | .kernel_toc = (unsigned long)(&__toc_start) + 0x8000UL, \ |
42 | .stab_real = (asrr), /* Real pointer to segment table */ \ | 41 | .stab_real = (asrr), /* Real pointer to segment table */ \ |
43 | .stab_addr = (asrv), /* Virt pointer to segment table */ \ | 42 | .stab_addr = (asrv), /* Virt pointer to segment table */ \ |
@@ -57,11 +56,7 @@ extern unsigned long __toc_start; | |||
57 | #ifdef CONFIG_PPC_ISERIES | 56 | #ifdef CONFIG_PPC_ISERIES |
58 | #define PACA_INIT_ISERIES(number) \ | 57 | #define PACA_INIT_ISERIES(number) \ |
59 | .lppaca_ptr = &paca[number].lppaca, \ | 58 | .lppaca_ptr = &paca[number].lppaca, \ |
60 | .reg_save_ptr = &paca[number].reg_save, \ | 59 | .reg_save_ptr = &iseries_reg_save[number], |
61 | .reg_save = { \ | ||
62 | .xDesc = 0xd397d9e2, /* "LpRS" */ \ | ||
63 | .xSize = sizeof(struct ItLpRegSave) \ | ||
64 | } | ||
65 | 60 | ||
66 | #define PACA_INIT(number) \ | 61 | #define PACA_INIT(number) \ |
67 | { \ | 62 | { \ |
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c index 8b6008ab217d..fc60a773af7d 100644 --- a/arch/powerpc/kernel/pci_64.c +++ b/arch/powerpc/kernel/pci_64.c | |||
@@ -34,7 +34,7 @@ | |||
34 | 34 | ||
35 | #ifdef DEBUG | 35 | #ifdef DEBUG |
36 | #include <asm/udbg.h> | 36 | #include <asm/udbg.h> |
37 | #define DBG(fmt...) udbg_printf(fmt) | 37 | #define DBG(fmt...) printk(fmt) |
38 | #else | 38 | #else |
39 | #define DBG(fmt...) | 39 | #define DBG(fmt...) |
40 | #endif | 40 | #endif |
@@ -251,7 +251,7 @@ void pcibios_free_controller(struct pci_controller *phb) | |||
251 | kfree(phb); | 251 | kfree(phb); |
252 | } | 252 | } |
253 | 253 | ||
254 | static void __init pcibios_claim_one_bus(struct pci_bus *b) | 254 | void __devinit pcibios_claim_one_bus(struct pci_bus *b) |
255 | { | 255 | { |
256 | struct pci_dev *dev; | 256 | struct pci_dev *dev; |
257 | struct pci_bus *child_bus; | 257 | struct pci_bus *child_bus; |
@@ -323,6 +323,7 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) | |||
323 | addrs = (u32 *) get_property(node, "assigned-addresses", &proplen); | 323 | addrs = (u32 *) get_property(node, "assigned-addresses", &proplen); |
324 | if (!addrs) | 324 | if (!addrs) |
325 | return; | 325 | return; |
326 | DBG(" parse addresses (%d bytes) @ %p\n", proplen, addrs); | ||
326 | for (; proplen >= 20; proplen -= 20, addrs += 5) { | 327 | for (; proplen >= 20; proplen -= 20, addrs += 5) { |
327 | flags = pci_parse_of_flags(addrs[0]); | 328 | flags = pci_parse_of_flags(addrs[0]); |
328 | if (!flags) | 329 | if (!flags) |
@@ -332,6 +333,9 @@ static void pci_parse_of_addrs(struct device_node *node, struct pci_dev *dev) | |||
332 | if (!size) | 333 | if (!size) |
333 | continue; | 334 | continue; |
334 | i = addrs[0] & 0xff; | 335 | i = addrs[0] & 0xff; |
336 | DBG(" base: %llx, size: %llx, i: %x\n", | ||
337 | (unsigned long long)base, (unsigned long long)size, i); | ||
338 | |||
335 | if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { | 339 | if (PCI_BASE_ADDRESS_0 <= i && i <= PCI_BASE_ADDRESS_5) { |
336 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; | 340 | res = &dev->resource[(i - PCI_BASE_ADDRESS_0) >> 2]; |
337 | } else if (i == dev->rom_base_reg) { | 341 | } else if (i == dev->rom_base_reg) { |
@@ -362,6 +366,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
362 | if (type == NULL) | 366 | if (type == NULL) |
363 | type = ""; | 367 | type = ""; |
364 | 368 | ||
369 | DBG(" create device, devfn: %x, type: %s\n", devfn, type); | ||
370 | |||
365 | memset(dev, 0, sizeof(struct pci_dev)); | 371 | memset(dev, 0, sizeof(struct pci_dev)); |
366 | dev->bus = bus; | 372 | dev->bus = bus; |
367 | dev->sysdata = node; | 373 | dev->sysdata = node; |
@@ -381,6 +387,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
381 | dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); | 387 | dev->bus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); |
382 | dev->class = get_int_prop(node, "class-code", 0); | 388 | dev->class = get_int_prop(node, "class-code", 0); |
383 | 389 | ||
390 | DBG(" class: 0x%x\n", dev->class); | ||
391 | |||
384 | dev->current_state = 4; /* unknown power state */ | 392 | dev->current_state = 4; /* unknown power state */ |
385 | 393 | ||
386 | if (!strcmp(type, "pci")) { | 394 | if (!strcmp(type, "pci")) { |
@@ -402,6 +410,8 @@ struct pci_dev *of_create_pci_dev(struct device_node *node, | |||
402 | 410 | ||
403 | pci_parse_of_addrs(node, dev); | 411 | pci_parse_of_addrs(node, dev); |
404 | 412 | ||
413 | DBG(" adding to system ...\n"); | ||
414 | |||
405 | pci_device_add(dev, bus); | 415 | pci_device_add(dev, bus); |
406 | 416 | ||
407 | /* XXX pci_scan_msi_device(dev); */ | 417 | /* XXX pci_scan_msi_device(dev); */ |
@@ -418,15 +428,21 @@ void __devinit of_scan_bus(struct device_node *node, | |||
418 | int reglen, devfn; | 428 | int reglen, devfn; |
419 | struct pci_dev *dev; | 429 | struct pci_dev *dev; |
420 | 430 | ||
431 | DBG("of_scan_bus(%s) bus no %d... \n", node->full_name, bus->number); | ||
432 | |||
421 | while ((child = of_get_next_child(node, child)) != NULL) { | 433 | while ((child = of_get_next_child(node, child)) != NULL) { |
434 | DBG(" * %s\n", child->full_name); | ||
422 | reg = (u32 *) get_property(child, "reg", ®len); | 435 | reg = (u32 *) get_property(child, "reg", ®len); |
423 | if (reg == NULL || reglen < 20) | 436 | if (reg == NULL || reglen < 20) |
424 | continue; | 437 | continue; |
425 | devfn = (reg[0] >> 8) & 0xff; | 438 | devfn = (reg[0] >> 8) & 0xff; |
439 | |||
426 | /* create a new pci_dev for this device */ | 440 | /* create a new pci_dev for this device */ |
427 | dev = of_create_pci_dev(child, bus, devfn); | 441 | dev = of_create_pci_dev(child, bus, devfn); |
428 | if (!dev) | 442 | if (!dev) |
429 | continue; | 443 | continue; |
444 | DBG("dev header type: %x\n", dev->hdr_type); | ||
445 | |||
430 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || | 446 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || |
431 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) | 447 | dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) |
432 | of_scan_pci_bridge(child, dev); | 448 | of_scan_pci_bridge(child, dev); |
@@ -446,16 +462,18 @@ void __devinit of_scan_pci_bridge(struct device_node *node, | |||
446 | unsigned int flags; | 462 | unsigned int flags; |
447 | u64 size; | 463 | u64 size; |
448 | 464 | ||
465 | DBG("of_scan_pci_bridge(%s)\n", node->full_name); | ||
466 | |||
449 | /* parse bus-range property */ | 467 | /* parse bus-range property */ |
450 | busrange = (u32 *) get_property(node, "bus-range", &len); | 468 | busrange = (u32 *) get_property(node, "bus-range", &len); |
451 | if (busrange == NULL || len != 8) { | 469 | if (busrange == NULL || len != 8) { |
452 | printk(KERN_ERR "Can't get bus-range for PCI-PCI bridge %s\n", | 470 | printk(KERN_DEBUG "Can't get bus-range for PCI-PCI bridge %s\n", |
453 | node->full_name); | 471 | node->full_name); |
454 | return; | 472 | return; |
455 | } | 473 | } |
456 | ranges = (u32 *) get_property(node, "ranges", &len); | 474 | ranges = (u32 *) get_property(node, "ranges", &len); |
457 | if (ranges == NULL) { | 475 | if (ranges == NULL) { |
458 | printk(KERN_ERR "Can't get ranges for PCI-PCI bridge %s\n", | 476 | printk(KERN_DEBUG "Can't get ranges for PCI-PCI bridge %s\n", |
459 | node->full_name); | 477 | node->full_name); |
460 | return; | 478 | return; |
461 | } | 479 | } |
@@ -509,10 +527,13 @@ void __devinit of_scan_pci_bridge(struct device_node *node, | |||
509 | } | 527 | } |
510 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), | 528 | sprintf(bus->name, "PCI Bus %04x:%02x", pci_domain_nr(bus), |
511 | bus->number); | 529 | bus->number); |
530 | DBG(" bus name: %s\n", bus->name); | ||
512 | 531 | ||
513 | mode = PCI_PROBE_NORMAL; | 532 | mode = PCI_PROBE_NORMAL; |
514 | if (ppc_md.pci_probe_mode) | 533 | if (ppc_md.pci_probe_mode) |
515 | mode = ppc_md.pci_probe_mode(bus); | 534 | mode = ppc_md.pci_probe_mode(bus); |
535 | DBG(" probe mode: %d\n", mode); | ||
536 | |||
516 | if (mode == PCI_PROBE_DEVTREE) | 537 | if (mode == PCI_PROBE_DEVTREE) |
517 | of_scan_bus(node, bus); | 538 | of_scan_bus(node, bus); |
518 | else if (mode == PCI_PROBE_NORMAL) | 539 | else if (mode == PCI_PROBE_NORMAL) |
@@ -528,6 +549,8 @@ void __devinit scan_phb(struct pci_controller *hose) | |||
528 | int i, mode; | 549 | int i, mode; |
529 | struct resource *res; | 550 | struct resource *res; |
530 | 551 | ||
552 | DBG("Scanning PHB %s\n", node ? node->full_name : "<NO NAME>"); | ||
553 | |||
531 | bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node); | 554 | bus = pci_create_bus(NULL, hose->first_busno, hose->ops, node); |
532 | if (bus == NULL) { | 555 | if (bus == NULL) { |
533 | printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", | 556 | printk(KERN_ERR "Failed to create bus for PCI domain %04x\n", |
@@ -552,8 +575,9 @@ void __devinit scan_phb(struct pci_controller *hose) | |||
552 | 575 | ||
553 | mode = PCI_PROBE_NORMAL; | 576 | mode = PCI_PROBE_NORMAL; |
554 | #ifdef CONFIG_PPC_MULTIPLATFORM | 577 | #ifdef CONFIG_PPC_MULTIPLATFORM |
555 | if (ppc_md.pci_probe_mode) | 578 | if (node && ppc_md.pci_probe_mode) |
556 | mode = ppc_md.pci_probe_mode(bus); | 579 | mode = ppc_md.pci_probe_mode(bus); |
580 | DBG(" probe mode: %d\n", mode); | ||
557 | if (mode == PCI_PROBE_DEVTREE) { | 581 | if (mode == PCI_PROBE_DEVTREE) { |
558 | bus->subordinate = hose->last_busno; | 582 | bus->subordinate = hose->last_busno; |
559 | of_scan_bus(node, bus); | 583 | of_scan_bus(node, bus); |
@@ -842,8 +866,7 @@ pgprot_t pci_phys_mem_access_prot(struct file *file, | |||
842 | * Returns a negative error code on failure, zero on success. | 866 | * Returns a negative error code on failure, zero on success. |
843 | */ | 867 | */ |
844 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, | 868 | int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma, |
845 | enum pci_mmap_state mmap_state, | 869 | enum pci_mmap_state mmap_state, int write_combine) |
846 | int write_combine) | ||
847 | { | 870 | { |
848 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; | 871 | unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; |
849 | struct resource *rp; | 872 | struct resource *rp; |
@@ -896,6 +919,25 @@ static void __devinit pci_process_ISA_OF_ranges(struct device_node *isa_node, | |||
896 | unsigned long phb_io_base_phys, | 919 | unsigned long phb_io_base_phys, |
897 | void __iomem * phb_io_base_virt) | 920 | void __iomem * phb_io_base_virt) |
898 | { | 921 | { |
922 | /* Remove these asap */ | ||
923 | |||
924 | struct pci_address { | ||
925 | u32 a_hi; | ||
926 | u32 a_mid; | ||
927 | u32 a_lo; | ||
928 | }; | ||
929 | |||
930 | struct isa_address { | ||
931 | u32 a_hi; | ||
932 | u32 a_lo; | ||
933 | }; | ||
934 | |||
935 | struct isa_range { | ||
936 | struct isa_address isa_addr; | ||
937 | struct pci_address pci_addr; | ||
938 | unsigned int size; | ||
939 | }; | ||
940 | |||
899 | struct isa_range *range; | 941 | struct isa_range *range; |
900 | unsigned long pci_addr; | 942 | unsigned long pci_addr; |
901 | unsigned int isa_addr; | 943 | unsigned int isa_addr; |
@@ -1223,6 +1265,7 @@ void __devinit pcibios_fixup_device_resources(struct pci_dev *dev, | |||
1223 | } | 1265 | } |
1224 | EXPORT_SYMBOL(pcibios_fixup_device_resources); | 1266 | EXPORT_SYMBOL(pcibios_fixup_device_resources); |
1225 | 1267 | ||
1268 | |||
1226 | static void __devinit do_bus_setup(struct pci_bus *bus) | 1269 | static void __devinit do_bus_setup(struct pci_bus *bus) |
1227 | { | 1270 | { |
1228 | struct pci_dev *dev; | 1271 | struct pci_dev *dev; |
@@ -1306,8 +1349,38 @@ void pci_resource_to_user(const struct pci_dev *dev, int bar, | |||
1306 | *end = rsrc->end + offset; | 1349 | *end = rsrc->end + offset; |
1307 | } | 1350 | } |
1308 | 1351 | ||
1352 | struct pci_controller* pci_find_hose_for_OF_device(struct device_node* node) | ||
1353 | { | ||
1354 | if (!have_of) | ||
1355 | return NULL; | ||
1356 | while(node) { | ||
1357 | struct pci_controller *hose, *tmp; | ||
1358 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) | ||
1359 | if (hose->arch_data == node) | ||
1360 | return hose; | ||
1361 | node = node->parent; | ||
1362 | } | ||
1363 | return NULL; | ||
1364 | } | ||
1365 | |||
1309 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | 1366 | #endif /* CONFIG_PPC_MULTIPLATFORM */ |
1310 | 1367 | ||
1368 | unsigned long pci_address_to_pio(phys_addr_t address) | ||
1369 | { | ||
1370 | struct pci_controller *hose, *tmp; | ||
1371 | |||
1372 | list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { | ||
1373 | if (address >= hose->io_base_phys && | ||
1374 | address < (hose->io_base_phys + hose->pci_io_size)) { | ||
1375 | unsigned long base = | ||
1376 | (unsigned long)hose->io_base_virt - pci_io_base; | ||
1377 | return base + (address - hose->io_base_phys); | ||
1378 | } | ||
1379 | } | ||
1380 | return (unsigned int)-1; | ||
1381 | } | ||
1382 | EXPORT_SYMBOL_GPL(pci_address_to_pio); | ||
1383 | |||
1311 | 1384 | ||
1312 | #define IOBASE_BRIDGE_NUMBER 0 | 1385 | #define IOBASE_BRIDGE_NUMBER 0 |
1313 | #define IOBASE_MEMORY 1 | 1386 | #define IOBASE_MEMORY 1 |
diff --git a/arch/powerpc/kernel/pmc.c b/arch/powerpc/kernel/pmc.c index 2d333cc84082..e6fb194fe537 100644 --- a/arch/powerpc/kernel/pmc.c +++ b/arch/powerpc/kernel/pmc.c | |||
@@ -43,8 +43,13 @@ static void dummy_perf(struct pt_regs *regs) | |||
43 | mtspr(SPRN_MMCR0, mmcr0); | 43 | mtspr(SPRN_MMCR0, mmcr0); |
44 | } | 44 | } |
45 | #else | 45 | #else |
46 | /* Ensure exceptions are disabled */ | ||
46 | static void dummy_perf(struct pt_regs *regs) | 47 | static void dummy_perf(struct pt_regs *regs) |
47 | { | 48 | { |
49 | unsigned int mmcr0 = mfspr(SPRN_MMCR0); | ||
50 | |||
51 | mmcr0 &= ~(MMCR0_PMXE); | ||
52 | mtspr(SPRN_MMCR0, mmcr0); | ||
48 | } | 53 | } |
49 | #endif | 54 | #endif |
50 | 55 | ||
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c index 94db25708456..b2758148a0de 100644 --- a/arch/powerpc/kernel/ppc_ksyms.c +++ b/arch/powerpc/kernel/ppc_ksyms.c | |||
@@ -76,11 +76,6 @@ EXPORT_SYMBOL(single_step_exception); | |||
76 | EXPORT_SYMBOL(sys_sigreturn); | 76 | EXPORT_SYMBOL(sys_sigreturn); |
77 | #endif | 77 | #endif |
78 | 78 | ||
79 | #if defined(CONFIG_PPC_PREP) | ||
80 | EXPORT_SYMBOL(_prep_type); | ||
81 | EXPORT_SYMBOL(ucSystemType); | ||
82 | #endif | ||
83 | |||
84 | EXPORT_SYMBOL(strcpy); | 79 | EXPORT_SYMBOL(strcpy); |
85 | EXPORT_SYMBOL(strncpy); | 80 | EXPORT_SYMBOL(strncpy); |
86 | EXPORT_SYMBOL(strcat); | 81 | EXPORT_SYMBOL(strcat); |
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 3bf968e74095..977ee3adaf2d 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/initrd.h> | 29 | #include <linux/initrd.h> |
30 | #include <linux/bitops.h> | 30 | #include <linux/bitops.h> |
31 | #include <linux/module.h> | 31 | #include <linux/module.h> |
32 | #include <linux/kexec.h> | ||
32 | 33 | ||
33 | #include <asm/prom.h> | 34 | #include <asm/prom.h> |
34 | #include <asm/rtas.h> | 35 | #include <asm/rtas.h> |
@@ -37,6 +38,7 @@ | |||
37 | #include <asm/processor.h> | 38 | #include <asm/processor.h> |
38 | #include <asm/irq.h> | 39 | #include <asm/irq.h> |
39 | #include <asm/io.h> | 40 | #include <asm/io.h> |
41 | #include <asm/kdump.h> | ||
40 | #include <asm/smp.h> | 42 | #include <asm/smp.h> |
41 | #include <asm/system.h> | 43 | #include <asm/system.h> |
42 | #include <asm/mmu.h> | 44 | #include <asm/mmu.h> |
@@ -55,21 +57,6 @@ | |||
55 | #define DBG(fmt...) | 57 | #define DBG(fmt...) |
56 | #endif | 58 | #endif |
57 | 59 | ||
58 | struct pci_reg_property { | ||
59 | struct pci_address addr; | ||
60 | u32 size_hi; | ||
61 | u32 size_lo; | ||
62 | }; | ||
63 | |||
64 | struct isa_reg_property { | ||
65 | u32 space; | ||
66 | u32 address; | ||
67 | u32 size; | ||
68 | }; | ||
69 | |||
70 | |||
71 | typedef int interpret_func(struct device_node *, unsigned long *, | ||
72 | int, int, int); | ||
73 | 60 | ||
74 | static int __initdata dt_root_addr_cells; | 61 | static int __initdata dt_root_addr_cells; |
75 | static int __initdata dt_root_size_cells; | 62 | static int __initdata dt_root_size_cells; |
@@ -311,6 +298,16 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
311 | int i, j, n, sense; | 298 | int i, j, n, sense; |
312 | unsigned int *irq, virq; | 299 | unsigned int *irq, virq; |
313 | struct device_node *ic; | 300 | struct device_node *ic; |
301 | int trace = 0; | ||
302 | |||
303 | //#define TRACE(fmt...) do { if (trace) { printk(fmt); mdelay(1000); } } while(0) | ||
304 | #define TRACE(fmt...) | ||
305 | |||
306 | if (!strcmp(np->name, "smu-doorbell")) | ||
307 | trace = 1; | ||
308 | |||
309 | TRACE("Finishing SMU doorbell ! num_interrupt_controllers = %d\n", | ||
310 | num_interrupt_controllers); | ||
314 | 311 | ||
315 | if (num_interrupt_controllers == 0) { | 312 | if (num_interrupt_controllers == 0) { |
316 | /* | 313 | /* |
@@ -345,11 +342,12 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
345 | } | 342 | } |
346 | 343 | ||
347 | ints = (unsigned int *) get_property(np, "interrupts", &intlen); | 344 | ints = (unsigned int *) get_property(np, "interrupts", &intlen); |
345 | TRACE("ints=%p, intlen=%d\n", ints, intlen); | ||
348 | if (ints == NULL) | 346 | if (ints == NULL) |
349 | return 0; | 347 | return 0; |
350 | intrcells = prom_n_intr_cells(np); | 348 | intrcells = prom_n_intr_cells(np); |
351 | intlen /= intrcells * sizeof(unsigned int); | 349 | intlen /= intrcells * sizeof(unsigned int); |
352 | 350 | TRACE("intrcells=%d, new intlen=%d\n", intrcells, intlen); | |
353 | np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start); | 351 | np->intrs = prom_alloc(intlen * sizeof(*(np->intrs)), mem_start); |
354 | if (!np->intrs) | 352 | if (!np->intrs) |
355 | return -ENOMEM; | 353 | return -ENOMEM; |
@@ -360,6 +358,7 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
360 | intrcount = 0; | 358 | intrcount = 0; |
361 | for (i = 0; i < intlen; ++i, ints += intrcells) { | 359 | for (i = 0; i < intlen; ++i, ints += intrcells) { |
362 | n = map_interrupt(&irq, &ic, np, ints, intrcells); | 360 | n = map_interrupt(&irq, &ic, np, ints, intrcells); |
361 | TRACE("map, irq=%d, ic=%p, n=%d\n", irq, ic, n); | ||
363 | if (n <= 0) | 362 | if (n <= 0) |
364 | continue; | 363 | continue; |
365 | 364 | ||
@@ -370,6 +369,7 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
370 | np->intrs[intrcount].sense = map_isa_senses[sense]; | 369 | np->intrs[intrcount].sense = map_isa_senses[sense]; |
371 | } else { | 370 | } else { |
372 | virq = virt_irq_create_mapping(irq[0]); | 371 | virq = virt_irq_create_mapping(irq[0]); |
372 | TRACE("virq=%d\n", virq); | ||
373 | #ifdef CONFIG_PPC64 | 373 | #ifdef CONFIG_PPC64 |
374 | if (virq == NO_IRQ) { | 374 | if (virq == NO_IRQ) { |
375 | printk(KERN_CRIT "Could not allocate interrupt" | 375 | printk(KERN_CRIT "Could not allocate interrupt" |
@@ -379,6 +379,12 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
379 | #endif | 379 | #endif |
380 | np->intrs[intrcount].line = irq_offset_up(virq); | 380 | np->intrs[intrcount].line = irq_offset_up(virq); |
381 | sense = (n > 1)? (irq[1] & 3): 1; | 381 | sense = (n > 1)? (irq[1] & 3): 1; |
382 | |||
383 | /* Apple uses bits in there in a different way, let's | ||
384 | * only keep the real sense bit on macs | ||
385 | */ | ||
386 | if (_machine == PLATFORM_POWERMAC) | ||
387 | sense &= 0x1; | ||
382 | np->intrs[intrcount].sense = map_mpic_senses[sense]; | 388 | np->intrs[intrcount].sense = map_mpic_senses[sense]; |
383 | } | 389 | } |
384 | 390 | ||
@@ -388,12 +394,13 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
388 | char *name = get_property(ic->parent, "name", NULL); | 394 | char *name = get_property(ic->parent, "name", NULL); |
389 | if (name && !strcmp(name, "u3")) | 395 | if (name && !strcmp(name, "u3")) |
390 | np->intrs[intrcount].line += 128; | 396 | np->intrs[intrcount].line += 128; |
391 | else if (!(name && !strcmp(name, "mac-io"))) | 397 | else if (!(name && (!strcmp(name, "mac-io") || |
398 | !strcmp(name, "u4")))) | ||
392 | /* ignore other cascaded controllers, such as | 399 | /* ignore other cascaded controllers, such as |
393 | the k2-sata-root */ | 400 | the k2-sata-root */ |
394 | break; | 401 | break; |
395 | } | 402 | } |
396 | #endif | 403 | #endif /* CONFIG_PPC64 */ |
397 | if (n > 2) { | 404 | if (n > 2) { |
398 | printk("hmmm, got %d intr cells for %s:", n, | 405 | printk("hmmm, got %d intr cells for %s:", n, |
399 | np->full_name); | 406 | np->full_name); |
@@ -408,234 +415,19 @@ static int __devinit finish_node_interrupts(struct device_node *np, | |||
408 | return 0; | 415 | return 0; |
409 | } | 416 | } |
410 | 417 | ||
411 | static int __devinit interpret_pci_props(struct device_node *np, | ||
412 | unsigned long *mem_start, | ||
413 | int naddrc, int nsizec, | ||
414 | int measure_only) | ||
415 | { | ||
416 | struct address_range *adr; | ||
417 | struct pci_reg_property *pci_addrs; | ||
418 | int i, l, n_addrs; | ||
419 | |||
420 | pci_addrs = (struct pci_reg_property *) | ||
421 | get_property(np, "assigned-addresses", &l); | ||
422 | if (!pci_addrs) | ||
423 | return 0; | ||
424 | |||
425 | n_addrs = l / sizeof(*pci_addrs); | ||
426 | |||
427 | adr = prom_alloc(n_addrs * sizeof(*adr), mem_start); | ||
428 | if (!adr) | ||
429 | return -ENOMEM; | ||
430 | |||
431 | if (measure_only) | ||
432 | return 0; | ||
433 | |||
434 | np->addrs = adr; | ||
435 | np->n_addrs = n_addrs; | ||
436 | |||
437 | for (i = 0; i < n_addrs; i++) { | ||
438 | adr[i].space = pci_addrs[i].addr.a_hi; | ||
439 | adr[i].address = pci_addrs[i].addr.a_lo | | ||
440 | ((u64)pci_addrs[i].addr.a_mid << 32); | ||
441 | adr[i].size = pci_addrs[i].size_lo; | ||
442 | } | ||
443 | |||
444 | return 0; | ||
445 | } | ||
446 | |||
447 | static int __init interpret_dbdma_props(struct device_node *np, | ||
448 | unsigned long *mem_start, | ||
449 | int naddrc, int nsizec, | ||
450 | int measure_only) | ||
451 | { | ||
452 | struct reg_property32 *rp; | ||
453 | struct address_range *adr; | ||
454 | unsigned long base_address; | ||
455 | int i, l; | ||
456 | struct device_node *db; | ||
457 | |||
458 | base_address = 0; | ||
459 | if (!measure_only) { | ||
460 | for (db = np->parent; db != NULL; db = db->parent) { | ||
461 | if (!strcmp(db->type, "dbdma") && db->n_addrs != 0) { | ||
462 | base_address = db->addrs[0].address; | ||
463 | break; | ||
464 | } | ||
465 | } | ||
466 | } | ||
467 | |||
468 | rp = (struct reg_property32 *) get_property(np, "reg", &l); | ||
469 | if (rp != 0 && l >= sizeof(struct reg_property32)) { | ||
470 | i = 0; | ||
471 | adr = (struct address_range *) (*mem_start); | ||
472 | while ((l -= sizeof(struct reg_property32)) >= 0) { | ||
473 | if (!measure_only) { | ||
474 | adr[i].space = 2; | ||
475 | adr[i].address = rp[i].address + base_address; | ||
476 | adr[i].size = rp[i].size; | ||
477 | } | ||
478 | ++i; | ||
479 | } | ||
480 | np->addrs = adr; | ||
481 | np->n_addrs = i; | ||
482 | (*mem_start) += i * sizeof(struct address_range); | ||
483 | } | ||
484 | |||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | static int __init interpret_macio_props(struct device_node *np, | ||
489 | unsigned long *mem_start, | ||
490 | int naddrc, int nsizec, | ||
491 | int measure_only) | ||
492 | { | ||
493 | struct reg_property32 *rp; | ||
494 | struct address_range *adr; | ||
495 | unsigned long base_address; | ||
496 | int i, l; | ||
497 | struct device_node *db; | ||
498 | |||
499 | base_address = 0; | ||
500 | if (!measure_only) { | ||
501 | for (db = np->parent; db != NULL; db = db->parent) { | ||
502 | if (!strcmp(db->type, "mac-io") && db->n_addrs != 0) { | ||
503 | base_address = db->addrs[0].address; | ||
504 | break; | ||
505 | } | ||
506 | } | ||
507 | } | ||
508 | |||
509 | rp = (struct reg_property32 *) get_property(np, "reg", &l); | ||
510 | if (rp != 0 && l >= sizeof(struct reg_property32)) { | ||
511 | i = 0; | ||
512 | adr = (struct address_range *) (*mem_start); | ||
513 | while ((l -= sizeof(struct reg_property32)) >= 0) { | ||
514 | if (!measure_only) { | ||
515 | adr[i].space = 2; | ||
516 | adr[i].address = rp[i].address + base_address; | ||
517 | adr[i].size = rp[i].size; | ||
518 | } | ||
519 | ++i; | ||
520 | } | ||
521 | np->addrs = adr; | ||
522 | np->n_addrs = i; | ||
523 | (*mem_start) += i * sizeof(struct address_range); | ||
524 | } | ||
525 | |||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | static int __init interpret_isa_props(struct device_node *np, | ||
530 | unsigned long *mem_start, | ||
531 | int naddrc, int nsizec, | ||
532 | int measure_only) | ||
533 | { | ||
534 | struct isa_reg_property *rp; | ||
535 | struct address_range *adr; | ||
536 | int i, l; | ||
537 | |||
538 | rp = (struct isa_reg_property *) get_property(np, "reg", &l); | ||
539 | if (rp != 0 && l >= sizeof(struct isa_reg_property)) { | ||
540 | i = 0; | ||
541 | adr = (struct address_range *) (*mem_start); | ||
542 | while ((l -= sizeof(struct isa_reg_property)) >= 0) { | ||
543 | if (!measure_only) { | ||
544 | adr[i].space = rp[i].space; | ||
545 | adr[i].address = rp[i].address; | ||
546 | adr[i].size = rp[i].size; | ||
547 | } | ||
548 | ++i; | ||
549 | } | ||
550 | np->addrs = adr; | ||
551 | np->n_addrs = i; | ||
552 | (*mem_start) += i * sizeof(struct address_range); | ||
553 | } | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | static int __init interpret_root_props(struct device_node *np, | ||
559 | unsigned long *mem_start, | ||
560 | int naddrc, int nsizec, | ||
561 | int measure_only) | ||
562 | { | ||
563 | struct address_range *adr; | ||
564 | int i, l; | ||
565 | unsigned int *rp; | ||
566 | int rpsize = (naddrc + nsizec) * sizeof(unsigned int); | ||
567 | |||
568 | rp = (unsigned int *) get_property(np, "reg", &l); | ||
569 | if (rp != 0 && l >= rpsize) { | ||
570 | i = 0; | ||
571 | adr = (struct address_range *) (*mem_start); | ||
572 | while ((l -= rpsize) >= 0) { | ||
573 | if (!measure_only) { | ||
574 | adr[i].space = 0; | ||
575 | adr[i].address = rp[naddrc - 1]; | ||
576 | adr[i].size = rp[naddrc + nsizec - 1]; | ||
577 | } | ||
578 | ++i; | ||
579 | rp += naddrc + nsizec; | ||
580 | } | ||
581 | np->addrs = adr; | ||
582 | np->n_addrs = i; | ||
583 | (*mem_start) += i * sizeof(struct address_range); | ||
584 | } | ||
585 | |||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | static int __devinit finish_node(struct device_node *np, | 418 | static int __devinit finish_node(struct device_node *np, |
590 | unsigned long *mem_start, | 419 | unsigned long *mem_start, |
591 | interpret_func *ifunc, | ||
592 | int naddrc, int nsizec, | ||
593 | int measure_only) | 420 | int measure_only) |
594 | { | 421 | { |
595 | struct device_node *child; | 422 | struct device_node *child; |
596 | int *ip, rc = 0; | 423 | int rc = 0; |
597 | |||
598 | /* get the device addresses and interrupts */ | ||
599 | if (ifunc != NULL) | ||
600 | rc = ifunc(np, mem_start, naddrc, nsizec, measure_only); | ||
601 | if (rc) | ||
602 | goto out; | ||
603 | 424 | ||
604 | rc = finish_node_interrupts(np, mem_start, measure_only); | 425 | rc = finish_node_interrupts(np, mem_start, measure_only); |
605 | if (rc) | 426 | if (rc) |
606 | goto out; | 427 | goto out; |
607 | 428 | ||
608 | /* Look for #address-cells and #size-cells properties. */ | ||
609 | ip = (int *) get_property(np, "#address-cells", NULL); | ||
610 | if (ip != NULL) | ||
611 | naddrc = *ip; | ||
612 | ip = (int *) get_property(np, "#size-cells", NULL); | ||
613 | if (ip != NULL) | ||
614 | nsizec = *ip; | ||
615 | |||
616 | if (!strcmp(np->name, "device-tree") || np->parent == NULL) | ||
617 | ifunc = interpret_root_props; | ||
618 | else if (np->type == 0) | ||
619 | ifunc = NULL; | ||
620 | else if (!strcmp(np->type, "pci") || !strcmp(np->type, "vci")) | ||
621 | ifunc = interpret_pci_props; | ||
622 | else if (!strcmp(np->type, "dbdma")) | ||
623 | ifunc = interpret_dbdma_props; | ||
624 | else if (!strcmp(np->type, "mac-io") || ifunc == interpret_macio_props) | ||
625 | ifunc = interpret_macio_props; | ||
626 | else if (!strcmp(np->type, "isa")) | ||
627 | ifunc = interpret_isa_props; | ||
628 | else if (!strcmp(np->name, "uni-n") || !strcmp(np->name, "u3")) | ||
629 | ifunc = interpret_root_props; | ||
630 | else if (!((ifunc == interpret_dbdma_props | ||
631 | || ifunc == interpret_macio_props) | ||
632 | && (!strcmp(np->type, "escc") | ||
633 | || !strcmp(np->type, "media-bay")))) | ||
634 | ifunc = NULL; | ||
635 | |||
636 | for (child = np->child; child != NULL; child = child->sibling) { | 429 | for (child = np->child; child != NULL; child = child->sibling) { |
637 | rc = finish_node(child, mem_start, ifunc, | 430 | rc = finish_node(child, mem_start, measure_only); |
638 | naddrc, nsizec, measure_only); | ||
639 | if (rc) | 431 | if (rc) |
640 | goto out; | 432 | goto out; |
641 | } | 433 | } |
@@ -697,10 +489,10 @@ void __init finish_device_tree(void) | |||
697 | * reason and then remove those additional 16 bytes | 489 | * reason and then remove those additional 16 bytes |
698 | */ | 490 | */ |
699 | size = 16; | 491 | size = 16; |
700 | finish_node(allnodes, &size, NULL, 0, 0, 1); | 492 | finish_node(allnodes, &size, 1); |
701 | size -= 16; | 493 | size -= 16; |
702 | end = start = (unsigned long) __va(lmb_alloc(size, 128)); | 494 | end = start = (unsigned long) __va(lmb_alloc(size, 128)); |
703 | finish_node(allnodes, &end, NULL, 0, 0, 0); | 495 | finish_node(allnodes, &end, 0); |
704 | BUG_ON(end != start + size); | 496 | BUG_ON(end != start + size); |
705 | 497 | ||
706 | DBG(" <- finish_device_tree\n"); | 498 | DBG(" <- finish_device_tree\n"); |
@@ -1197,6 +989,16 @@ static int __init early_init_dt_scan_chosen(unsigned long node, | |||
1197 | } | 989 | } |
1198 | #endif /* CONFIG_PPC_RTAS */ | 990 | #endif /* CONFIG_PPC_RTAS */ |
1199 | 991 | ||
992 | #ifdef CONFIG_KEXEC | ||
993 | lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-base", NULL); | ||
994 | if (lprop) | ||
995 | crashk_res.start = *lprop; | ||
996 | |||
997 | lprop = (u64*)of_get_flat_dt_prop(node, "linux,crashkernel-size", NULL); | ||
998 | if (lprop) | ||
999 | crashk_res.end = crashk_res.start + *lprop - 1; | ||
1000 | #endif | ||
1001 | |||
1200 | /* break now */ | 1002 | /* break now */ |
1201 | return 1; | 1003 | return 1; |
1202 | } | 1004 | } |
@@ -1263,7 +1065,9 @@ static int __init early_init_dt_scan_memory(unsigned long node, | |||
1263 | } else if (strcmp(type, "memory") != 0) | 1065 | } else if (strcmp(type, "memory") != 0) |
1264 | return 0; | 1066 | return 0; |
1265 | 1067 | ||
1266 | reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); | 1068 | reg = (cell_t *)of_get_flat_dt_prop(node, "linux,usable-memory", &l); |
1069 | if (reg == NULL) | ||
1070 | reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l); | ||
1267 | if (reg == NULL) | 1071 | if (reg == NULL) |
1268 | return 0; | 1072 | return 0; |
1269 | 1073 | ||
@@ -1335,11 +1139,14 @@ void __init early_init_devtree(void *params) | |||
1335 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); | 1139 | of_scan_flat_dt(early_init_dt_scan_memory, NULL); |
1336 | lmb_enforce_memory_limit(memory_limit); | 1140 | lmb_enforce_memory_limit(memory_limit); |
1337 | lmb_analyze(); | 1141 | lmb_analyze(); |
1338 | lmb_reserve(0, __pa(klimit)); | ||
1339 | 1142 | ||
1340 | DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); | 1143 | DBG("Phys. mem: %lx\n", lmb_phys_mem_size()); |
1341 | 1144 | ||
1342 | /* Reserve LMB regions used by kernel, initrd, dt, etc... */ | 1145 | /* Reserve LMB regions used by kernel, initrd, dt, etc... */ |
1146 | lmb_reserve(PHYSICAL_START, __pa(klimit) - PHYSICAL_START); | ||
1147 | #ifdef CONFIG_CRASH_DUMP | ||
1148 | lmb_reserve(0, KDUMP_RESERVE_LIMIT); | ||
1149 | #endif | ||
1343 | early_reserve_mem(); | 1150 | early_reserve_mem(); |
1344 | 1151 | ||
1345 | DBG("Scanning CPUs ...\n"); | 1152 | DBG("Scanning CPUs ...\n"); |
@@ -1802,7 +1609,6 @@ static void of_node_release(struct kref *kref) | |||
1802 | prop = next; | 1609 | prop = next; |
1803 | } | 1610 | } |
1804 | kfree(node->intrs); | 1611 | kfree(node->intrs); |
1805 | kfree(node->addrs); | ||
1806 | kfree(node->full_name); | 1612 | kfree(node->full_name); |
1807 | kfree(node->data); | 1613 | kfree(node->data); |
1808 | kfree(node); | 1614 | kfree(node); |
@@ -1884,9 +1690,7 @@ void of_detach_node(const struct device_node *np) | |||
1884 | * This should probably be split up into smaller chunks. | 1690 | * This should probably be split up into smaller chunks. |
1885 | */ | 1691 | */ |
1886 | 1692 | ||
1887 | static int of_finish_dynamic_node(struct device_node *node, | 1693 | static int of_finish_dynamic_node(struct device_node *node) |
1888 | unsigned long *unused1, int unused2, | ||
1889 | int unused3, int unused4) | ||
1890 | { | 1694 | { |
1891 | struct device_node *parent = of_get_parent(node); | 1695 | struct device_node *parent = of_get_parent(node); |
1892 | int err = 0; | 1696 | int err = 0; |
@@ -1907,7 +1711,8 @@ static int of_finish_dynamic_node(struct device_node *node, | |||
1907 | return -ENODEV; | 1711 | return -ENODEV; |
1908 | 1712 | ||
1909 | /* fix up new node's linux_phandle field */ | 1713 | /* fix up new node's linux_phandle field */ |
1910 | if ((ibm_phandle = (unsigned int *)get_property(node, "ibm,phandle", NULL))) | 1714 | if ((ibm_phandle = (unsigned int *)get_property(node, |
1715 | "ibm,phandle", NULL))) | ||
1911 | node->linux_phandle = *ibm_phandle; | 1716 | node->linux_phandle = *ibm_phandle; |
1912 | 1717 | ||
1913 | out: | 1718 | out: |
@@ -1922,7 +1727,9 @@ static int prom_reconfig_notifier(struct notifier_block *nb, | |||
1922 | 1727 | ||
1923 | switch (action) { | 1728 | switch (action) { |
1924 | case PSERIES_RECONFIG_ADD: | 1729 | case PSERIES_RECONFIG_ADD: |
1925 | err = finish_node(node, NULL, of_finish_dynamic_node, 0, 0, 0); | 1730 | err = of_finish_dynamic_node(node); |
1731 | if (!err) | ||
1732 | finish_node(node, NULL, 0); | ||
1926 | if (err < 0) { | 1733 | if (err < 0) { |
1927 | printk(KERN_ERR "finish_node returned %d\n", err); | 1734 | printk(KERN_ERR "finish_node returned %d\n", err); |
1928 | err = NOTIFY_BAD; | 1735 | err = NOTIFY_BAD; |
@@ -1996,175 +1803,4 @@ int prom_add_property(struct device_node* np, struct property* prop) | |||
1996 | return 0; | 1803 | return 0; |
1997 | } | 1804 | } |
1998 | 1805 | ||
1999 | /* I quickly hacked that one, check against spec ! */ | ||
2000 | static inline unsigned long | ||
2001 | bus_space_to_resource_flags(unsigned int bus_space) | ||
2002 | { | ||
2003 | u8 space = (bus_space >> 24) & 0xf; | ||
2004 | if (space == 0) | ||
2005 | space = 0x02; | ||
2006 | if (space == 0x02) | ||
2007 | return IORESOURCE_MEM; | ||
2008 | else if (space == 0x01) | ||
2009 | return IORESOURCE_IO; | ||
2010 | else { | ||
2011 | printk(KERN_WARNING "prom.c: bus_space_to_resource_flags(), space: %x\n", | ||
2012 | bus_space); | ||
2013 | return 0; | ||
2014 | } | ||
2015 | } | ||
2016 | |||
2017 | #ifdef CONFIG_PCI | ||
2018 | static struct resource *find_parent_pci_resource(struct pci_dev* pdev, | ||
2019 | struct address_range *range) | ||
2020 | { | ||
2021 | unsigned long mask; | ||
2022 | int i; | ||
2023 | |||
2024 | /* Check this one */ | ||
2025 | mask = bus_space_to_resource_flags(range->space); | ||
2026 | for (i=0; i<DEVICE_COUNT_RESOURCE; i++) { | ||
2027 | if ((pdev->resource[i].flags & mask) == mask && | ||
2028 | pdev->resource[i].start <= range->address && | ||
2029 | pdev->resource[i].end > range->address) { | ||
2030 | if ((range->address + range->size - 1) > pdev->resource[i].end) { | ||
2031 | /* Add better message */ | ||
2032 | printk(KERN_WARNING "PCI/OF resource overlap !\n"); | ||
2033 | return NULL; | ||
2034 | } | ||
2035 | break; | ||
2036 | } | ||
2037 | } | ||
2038 | if (i == DEVICE_COUNT_RESOURCE) | ||
2039 | return NULL; | ||
2040 | return &pdev->resource[i]; | ||
2041 | } | ||
2042 | |||
2043 | /* | ||
2044 | * Request an OF device resource. Currently handles child of PCI devices, | ||
2045 | * or other nodes attached to the root node. Ultimately, put some | ||
2046 | * link to resources in the OF node. | ||
2047 | */ | ||
2048 | struct resource *request_OF_resource(struct device_node* node, int index, | ||
2049 | const char* name_postfix) | ||
2050 | { | ||
2051 | struct pci_dev* pcidev; | ||
2052 | u8 pci_bus, pci_devfn; | ||
2053 | unsigned long iomask; | ||
2054 | struct device_node* nd; | ||
2055 | struct resource* parent; | ||
2056 | struct resource *res = NULL; | ||
2057 | int nlen, plen; | ||
2058 | |||
2059 | if (index >= node->n_addrs) | ||
2060 | goto fail; | ||
2061 | |||
2062 | /* Sanity check on bus space */ | ||
2063 | iomask = bus_space_to_resource_flags(node->addrs[index].space); | ||
2064 | if (iomask & IORESOURCE_MEM) | ||
2065 | parent = &iomem_resource; | ||
2066 | else if (iomask & IORESOURCE_IO) | ||
2067 | parent = &ioport_resource; | ||
2068 | else | ||
2069 | goto fail; | ||
2070 | |||
2071 | /* Find a PCI parent if any */ | ||
2072 | nd = node; | ||
2073 | pcidev = NULL; | ||
2074 | while (nd) { | ||
2075 | if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn)) | ||
2076 | pcidev = pci_find_slot(pci_bus, pci_devfn); | ||
2077 | if (pcidev) break; | ||
2078 | nd = nd->parent; | ||
2079 | } | ||
2080 | if (pcidev) | ||
2081 | parent = find_parent_pci_resource(pcidev, &node->addrs[index]); | ||
2082 | if (!parent) { | ||
2083 | printk(KERN_WARNING "request_OF_resource(%s), parent not found\n", | ||
2084 | node->name); | ||
2085 | goto fail; | ||
2086 | } | ||
2087 | 1806 | ||
2088 | res = __request_region(parent, node->addrs[index].address, | ||
2089 | node->addrs[index].size, NULL); | ||
2090 | if (!res) | ||
2091 | goto fail; | ||
2092 | nlen = strlen(node->name); | ||
2093 | plen = name_postfix ? strlen(name_postfix) : 0; | ||
2094 | res->name = (const char *)kmalloc(nlen+plen+1, GFP_KERNEL); | ||
2095 | if (res->name) { | ||
2096 | strcpy((char *)res->name, node->name); | ||
2097 | if (plen) | ||
2098 | strcpy((char *)res->name+nlen, name_postfix); | ||
2099 | } | ||
2100 | return res; | ||
2101 | fail: | ||
2102 | return NULL; | ||
2103 | } | ||
2104 | EXPORT_SYMBOL(request_OF_resource); | ||
2105 | |||
2106 | int release_OF_resource(struct device_node *node, int index) | ||
2107 | { | ||
2108 | struct pci_dev* pcidev; | ||
2109 | u8 pci_bus, pci_devfn; | ||
2110 | unsigned long iomask, start, end; | ||
2111 | struct device_node* nd; | ||
2112 | struct resource* parent; | ||
2113 | struct resource *res = NULL; | ||
2114 | |||
2115 | if (index >= node->n_addrs) | ||
2116 | return -EINVAL; | ||
2117 | |||
2118 | /* Sanity check on bus space */ | ||
2119 | iomask = bus_space_to_resource_flags(node->addrs[index].space); | ||
2120 | if (iomask & IORESOURCE_MEM) | ||
2121 | parent = &iomem_resource; | ||
2122 | else if (iomask & IORESOURCE_IO) | ||
2123 | parent = &ioport_resource; | ||
2124 | else | ||
2125 | return -EINVAL; | ||
2126 | |||
2127 | /* Find a PCI parent if any */ | ||
2128 | nd = node; | ||
2129 | pcidev = NULL; | ||
2130 | while(nd) { | ||
2131 | if (!pci_device_from_OF_node(nd, &pci_bus, &pci_devfn)) | ||
2132 | pcidev = pci_find_slot(pci_bus, pci_devfn); | ||
2133 | if (pcidev) break; | ||
2134 | nd = nd->parent; | ||
2135 | } | ||
2136 | if (pcidev) | ||
2137 | parent = find_parent_pci_resource(pcidev, &node->addrs[index]); | ||
2138 | if (!parent) { | ||
2139 | printk(KERN_WARNING "release_OF_resource(%s), parent not found\n", | ||
2140 | node->name); | ||
2141 | return -ENODEV; | ||
2142 | } | ||
2143 | |||
2144 | /* Find us in the parent and its childs */ | ||
2145 | res = parent->child; | ||
2146 | start = node->addrs[index].address; | ||
2147 | end = start + node->addrs[index].size - 1; | ||
2148 | while (res) { | ||
2149 | if (res->start == start && res->end == end && | ||
2150 | (res->flags & IORESOURCE_BUSY)) | ||
2151 | break; | ||
2152 | if (res->start <= start && res->end >= end) | ||
2153 | res = res->child; | ||
2154 | else | ||
2155 | res = res->sibling; | ||
2156 | } | ||
2157 | if (!res) | ||
2158 | return -ENODEV; | ||
2159 | |||
2160 | if (res->name) { | ||
2161 | kfree(res->name); | ||
2162 | res->name = NULL; | ||
2163 | } | ||
2164 | release_resource(res); | ||
2165 | kfree(res); | ||
2166 | |||
2167 | return 0; | ||
2168 | } | ||
2169 | EXPORT_SYMBOL(release_OF_resource); | ||
2170 | #endif /* CONFIG_PCI */ | ||
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c index bcdc209dca85..e381f2fc121c 100644 --- a/arch/powerpc/kernel/prom_init.c +++ b/arch/powerpc/kernel/prom_init.c | |||
@@ -192,6 +192,11 @@ static unsigned long __initdata alloc_bottom; | |||
192 | static unsigned long __initdata rmo_top; | 192 | static unsigned long __initdata rmo_top; |
193 | static unsigned long __initdata ram_top; | 193 | static unsigned long __initdata ram_top; |
194 | 194 | ||
195 | #ifdef CONFIG_KEXEC | ||
196 | static unsigned long __initdata prom_crashk_base; | ||
197 | static unsigned long __initdata prom_crashk_size; | ||
198 | #endif | ||
199 | |||
195 | static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE]; | 200 | static struct mem_map_entry __initdata mem_reserve_map[MEM_RESERVE_MAP_SIZE]; |
196 | static int __initdata mem_reserve_cnt; | 201 | static int __initdata mem_reserve_cnt; |
197 | 202 | ||
@@ -553,7 +558,8 @@ unsigned long prom_memparse(const char *ptr, const char **retptr) | |||
553 | static void __init early_cmdline_parse(void) | 558 | static void __init early_cmdline_parse(void) |
554 | { | 559 | { |
555 | struct prom_t *_prom = &RELOC(prom); | 560 | struct prom_t *_prom = &RELOC(prom); |
556 | char *opt, *p; | 561 | const char *opt; |
562 | char *p; | ||
557 | int l = 0; | 563 | int l = 0; |
558 | 564 | ||
559 | RELOC(prom_cmd_line[0]) = 0; | 565 | RELOC(prom_cmd_line[0]) = 0; |
@@ -590,6 +596,34 @@ static void __init early_cmdline_parse(void) | |||
590 | RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000); | 596 | RELOC(prom_memory_limit) = ALIGN(RELOC(prom_memory_limit), 0x1000000); |
591 | #endif | 597 | #endif |
592 | } | 598 | } |
599 | |||
600 | #ifdef CONFIG_KEXEC | ||
601 | /* | ||
602 | * crashkernel=size@addr specifies the location to reserve for | ||
603 | * crash kernel. | ||
604 | */ | ||
605 | opt = strstr(RELOC(prom_cmd_line), RELOC("crashkernel=")); | ||
606 | if (opt) { | ||
607 | opt += 12; | ||
608 | RELOC(prom_crashk_size) = prom_memparse(opt, &opt); | ||
609 | |||
610 | if (ALIGN(RELOC(prom_crashk_size), 0x1000000) != | ||
611 | RELOC(prom_crashk_size)) { | ||
612 | prom_printf("Warning: crashkernel size is not " | ||
613 | "aligned to 16MB\n"); | ||
614 | } | ||
615 | |||
616 | /* | ||
617 | * At present, the crash kernel always run at 32MB. | ||
618 | * Just ignore whatever user passed. | ||
619 | */ | ||
620 | RELOC(prom_crashk_base) = 0x2000000; | ||
621 | if (*opt == '@') { | ||
622 | prom_printf("Warning: PPC64 kdump kernel always runs " | ||
623 | "at 32 MB\n"); | ||
624 | } | ||
625 | } | ||
626 | #endif | ||
593 | } | 627 | } |
594 | 628 | ||
595 | #ifdef CONFIG_PPC_PSERIES | 629 | #ifdef CONFIG_PPC_PSERIES |
@@ -1011,6 +1045,12 @@ static void __init prom_init_mem(void) | |||
1011 | prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high)); | 1045 | prom_printf(" alloc_top_hi : %x\n", RELOC(alloc_top_high)); |
1012 | prom_printf(" rmo_top : %x\n", RELOC(rmo_top)); | 1046 | prom_printf(" rmo_top : %x\n", RELOC(rmo_top)); |
1013 | prom_printf(" ram_top : %x\n", RELOC(ram_top)); | 1047 | prom_printf(" ram_top : %x\n", RELOC(ram_top)); |
1048 | #ifdef CONFIG_KEXEC | ||
1049 | if (RELOC(prom_crashk_base)) { | ||
1050 | prom_printf(" crashk_base : %x\n", RELOC(prom_crashk_base)); | ||
1051 | prom_printf(" crashk_size : %x\n", RELOC(prom_crashk_size)); | ||
1052 | } | ||
1053 | #endif | ||
1014 | } | 1054 | } |
1015 | 1055 | ||
1016 | 1056 | ||
@@ -1500,6 +1540,8 @@ static int __init prom_find_machine_type(void) | |||
1500 | #ifdef CONFIG_PPC64 | 1540 | #ifdef CONFIG_PPC64 |
1501 | if (strstr(p, RELOC("Momentum,Maple"))) | 1541 | if (strstr(p, RELOC("Momentum,Maple"))) |
1502 | return PLATFORM_MAPLE; | 1542 | return PLATFORM_MAPLE; |
1543 | if (strstr(p, RELOC("IBM,CPB"))) | ||
1544 | return PLATFORM_CELL; | ||
1503 | #endif | 1545 | #endif |
1504 | i += sl + 1; | 1546 | i += sl + 1; |
1505 | } | 1547 | } |
@@ -1994,7 +2036,7 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4) | |||
1994 | if (r3 && r4 && r4 != 0xdeadbeef) { | 2036 | if (r3 && r4 && r4 != 0xdeadbeef) { |
1995 | unsigned long val; | 2037 | unsigned long val; |
1996 | 2038 | ||
1997 | RELOC(prom_initrd_start) = (r3 >= KERNELBASE) ? __pa(r3) : r3; | 2039 | RELOC(prom_initrd_start) = is_kernel_addr(r3) ? __pa(r3) : r3; |
1998 | RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4; | 2040 | RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4; |
1999 | 2041 | ||
2000 | val = RELOC(prom_initrd_start); | 2042 | val = RELOC(prom_initrd_start); |
@@ -2094,6 +2136,10 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2094 | */ | 2136 | */ |
2095 | prom_init_mem(); | 2137 | prom_init_mem(); |
2096 | 2138 | ||
2139 | #ifdef CONFIG_KEXEC | ||
2140 | if (RELOC(prom_crashk_base)) | ||
2141 | reserve_mem(RELOC(prom_crashk_base), RELOC(prom_crashk_size)); | ||
2142 | #endif | ||
2097 | /* | 2143 | /* |
2098 | * Determine which cpu is actually running right _now_ | 2144 | * Determine which cpu is actually running right _now_ |
2099 | */ | 2145 | */ |
@@ -2150,6 +2196,16 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, | |||
2150 | } | 2196 | } |
2151 | #endif | 2197 | #endif |
2152 | 2198 | ||
2199 | #ifdef CONFIG_KEXEC | ||
2200 | if (RELOC(prom_crashk_base)) { | ||
2201 | prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-base", | ||
2202 | PTRRELOC(&prom_crashk_base), | ||
2203 | sizeof(RELOC(prom_crashk_base))); | ||
2204 | prom_setprop(_prom->chosen, "/chosen", "linux,crashkernel-size", | ||
2205 | PTRRELOC(&prom_crashk_size), | ||
2206 | sizeof(RELOC(prom_crashk_size))); | ||
2207 | } | ||
2208 | #endif | ||
2153 | /* | 2209 | /* |
2154 | * Fixup any known bugs in the device-tree | 2210 | * Fixup any known bugs in the device-tree |
2155 | */ | 2211 | */ |
diff --git a/arch/powerpc/kernel/prom_parse.c b/arch/powerpc/kernel/prom_parse.c new file mode 100644 index 000000000000..309ae1d5fa77 --- /dev/null +++ b/arch/powerpc/kernel/prom_parse.c | |||
@@ -0,0 +1,547 @@ | |||
1 | #undef DEBUG | ||
2 | |||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/string.h> | ||
5 | #include <linux/pci_regs.h> | ||
6 | #include <linux/module.h> | ||
7 | #include <linux/ioport.h> | ||
8 | #include <asm/prom.h> | ||
9 | #include <asm/pci-bridge.h> | ||
10 | |||
11 | #ifdef DEBUG | ||
12 | #define DBG(fmt...) do { printk(fmt); } while(0) | ||
13 | #else | ||
14 | #define DBG(fmt...) do { } while(0) | ||
15 | #endif | ||
16 | |||
17 | #ifdef CONFIG_PPC64 | ||
18 | #define PRu64 "%lx" | ||
19 | #else | ||
20 | #define PRu64 "%llx" | ||
21 | #endif | ||
22 | |||
23 | /* Max address size we deal with */ | ||
24 | #define OF_MAX_ADDR_CELLS 4 | ||
25 | #define OF_CHECK_COUNTS(na, ns) ((na) > 0 && (na) <= OF_MAX_ADDR_CELLS && \ | ||
26 | (ns) > 0) | ||
27 | |||
28 | /* Debug utility */ | ||
29 | #ifdef DEBUG | ||
30 | static void of_dump_addr(const char *s, u32 *addr, int na) | ||
31 | { | ||
32 | printk("%s", s); | ||
33 | while(na--) | ||
34 | printk(" %08x", *(addr++)); | ||
35 | printk("\n"); | ||
36 | } | ||
37 | #else | ||
38 | static void of_dump_addr(const char *s, u32 *addr, int na) { } | ||
39 | #endif | ||
40 | |||
41 | /* Read a big address */ | ||
42 | static inline u64 of_read_addr(u32 *cell, int size) | ||
43 | { | ||
44 | u64 r = 0; | ||
45 | while (size--) | ||
46 | r = (r << 32) | *(cell++); | ||
47 | return r; | ||
48 | } | ||
49 | |||
50 | /* Callbacks for bus specific translators */ | ||
51 | struct of_bus { | ||
52 | const char *name; | ||
53 | const char *addresses; | ||
54 | int (*match)(struct device_node *parent); | ||
55 | void (*count_cells)(struct device_node *child, | ||
56 | int *addrc, int *sizec); | ||
57 | u64 (*map)(u32 *addr, u32 *range, int na, int ns, int pna); | ||
58 | int (*translate)(u32 *addr, u64 offset, int na); | ||
59 | unsigned int (*get_flags)(u32 *addr); | ||
60 | }; | ||
61 | |||
62 | |||
63 | /* | ||
64 | * Default translator (generic bus) | ||
65 | */ | ||
66 | |||
67 | static void of_bus_default_count_cells(struct device_node *dev, | ||
68 | int *addrc, int *sizec) | ||
69 | { | ||
70 | if (addrc) | ||
71 | *addrc = prom_n_addr_cells(dev); | ||
72 | if (sizec) | ||
73 | *sizec = prom_n_size_cells(dev); | ||
74 | } | ||
75 | |||
76 | static u64 of_bus_default_map(u32 *addr, u32 *range, int na, int ns, int pna) | ||
77 | { | ||
78 | u64 cp, s, da; | ||
79 | |||
80 | cp = of_read_addr(range, na); | ||
81 | s = of_read_addr(range + na + pna, ns); | ||
82 | da = of_read_addr(addr, na); | ||
83 | |||
84 | DBG("OF: default map, cp="PRu64", s="PRu64", da="PRu64"\n", | ||
85 | cp, s, da); | ||
86 | |||
87 | if (da < cp || da >= (cp + s)) | ||
88 | return OF_BAD_ADDR; | ||
89 | return da - cp; | ||
90 | } | ||
91 | |||
92 | static int of_bus_default_translate(u32 *addr, u64 offset, int na) | ||
93 | { | ||
94 | u64 a = of_read_addr(addr, na); | ||
95 | memset(addr, 0, na * 4); | ||
96 | a += offset; | ||
97 | if (na > 1) | ||
98 | addr[na - 2] = a >> 32; | ||
99 | addr[na - 1] = a & 0xffffffffu; | ||
100 | |||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static unsigned int of_bus_default_get_flags(u32 *addr) | ||
105 | { | ||
106 | return IORESOURCE_MEM; | ||
107 | } | ||
108 | |||
109 | |||
110 | /* | ||
111 | * PCI bus specific translator | ||
112 | */ | ||
113 | |||
114 | static int of_bus_pci_match(struct device_node *np) | ||
115 | { | ||
116 | return !strcmp(np->type, "pci"); | ||
117 | } | ||
118 | |||
119 | static void of_bus_pci_count_cells(struct device_node *np, | ||
120 | int *addrc, int *sizec) | ||
121 | { | ||
122 | if (addrc) | ||
123 | *addrc = 3; | ||
124 | if (sizec) | ||
125 | *sizec = 2; | ||
126 | } | ||
127 | |||
128 | static u64 of_bus_pci_map(u32 *addr, u32 *range, int na, int ns, int pna) | ||
129 | { | ||
130 | u64 cp, s, da; | ||
131 | |||
132 | /* Check address type match */ | ||
133 | if ((addr[0] ^ range[0]) & 0x03000000) | ||
134 | return OF_BAD_ADDR; | ||
135 | |||
136 | /* Read address values, skipping high cell */ | ||
137 | cp = of_read_addr(range + 1, na - 1); | ||
138 | s = of_read_addr(range + na + pna, ns); | ||
139 | da = of_read_addr(addr + 1, na - 1); | ||
140 | |||
141 | DBG("OF: PCI map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da); | ||
142 | |||
143 | if (da < cp || da >= (cp + s)) | ||
144 | return OF_BAD_ADDR; | ||
145 | return da - cp; | ||
146 | } | ||
147 | |||
148 | static int of_bus_pci_translate(u32 *addr, u64 offset, int na) | ||
149 | { | ||
150 | return of_bus_default_translate(addr + 1, offset, na - 1); | ||
151 | } | ||
152 | |||
153 | static unsigned int of_bus_pci_get_flags(u32 *addr) | ||
154 | { | ||
155 | unsigned int flags = 0; | ||
156 | u32 w = addr[0]; | ||
157 | |||
158 | switch((w >> 24) & 0x03) { | ||
159 | case 0x01: | ||
160 | flags |= IORESOURCE_IO; | ||
161 | case 0x02: /* 32 bits */ | ||
162 | case 0x03: /* 64 bits */ | ||
163 | flags |= IORESOURCE_MEM; | ||
164 | } | ||
165 | if (w & 0x40000000) | ||
166 | flags |= IORESOURCE_PREFETCH; | ||
167 | return flags; | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * ISA bus specific translator | ||
172 | */ | ||
173 | |||
174 | static int of_bus_isa_match(struct device_node *np) | ||
175 | { | ||
176 | return !strcmp(np->name, "isa"); | ||
177 | } | ||
178 | |||
179 | static void of_bus_isa_count_cells(struct device_node *child, | ||
180 | int *addrc, int *sizec) | ||
181 | { | ||
182 | if (addrc) | ||
183 | *addrc = 2; | ||
184 | if (sizec) | ||
185 | *sizec = 1; | ||
186 | } | ||
187 | |||
188 | static u64 of_bus_isa_map(u32 *addr, u32 *range, int na, int ns, int pna) | ||
189 | { | ||
190 | u64 cp, s, da; | ||
191 | |||
192 | /* Check address type match */ | ||
193 | if ((addr[0] ^ range[0]) & 0x00000001) | ||
194 | return OF_BAD_ADDR; | ||
195 | |||
196 | /* Read address values, skipping high cell */ | ||
197 | cp = of_read_addr(range + 1, na - 1); | ||
198 | s = of_read_addr(range + na + pna, ns); | ||
199 | da = of_read_addr(addr + 1, na - 1); | ||
200 | |||
201 | DBG("OF: ISA map, cp="PRu64", s="PRu64", da="PRu64"\n", cp, s, da); | ||
202 | |||
203 | if (da < cp || da >= (cp + s)) | ||
204 | return OF_BAD_ADDR; | ||
205 | return da - cp; | ||
206 | } | ||
207 | |||
208 | static int of_bus_isa_translate(u32 *addr, u64 offset, int na) | ||
209 | { | ||
210 | return of_bus_default_translate(addr + 1, offset, na - 1); | ||
211 | } | ||
212 | |||
213 | static unsigned int of_bus_isa_get_flags(u32 *addr) | ||
214 | { | ||
215 | unsigned int flags = 0; | ||
216 | u32 w = addr[0]; | ||
217 | |||
218 | if (w & 1) | ||
219 | flags |= IORESOURCE_IO; | ||
220 | else | ||
221 | flags |= IORESOURCE_MEM; | ||
222 | return flags; | ||
223 | } | ||
224 | |||
225 | |||
226 | /* | ||
227 | * Array of bus specific translators | ||
228 | */ | ||
229 | |||
230 | static struct of_bus of_busses[] = { | ||
231 | /* PCI */ | ||
232 | { | ||
233 | .name = "pci", | ||
234 | .addresses = "assigned-addresses", | ||
235 | .match = of_bus_pci_match, | ||
236 | .count_cells = of_bus_pci_count_cells, | ||
237 | .map = of_bus_pci_map, | ||
238 | .translate = of_bus_pci_translate, | ||
239 | .get_flags = of_bus_pci_get_flags, | ||
240 | }, | ||
241 | /* ISA */ | ||
242 | { | ||
243 | .name = "isa", | ||
244 | .addresses = "reg", | ||
245 | .match = of_bus_isa_match, | ||
246 | .count_cells = of_bus_isa_count_cells, | ||
247 | .map = of_bus_isa_map, | ||
248 | .translate = of_bus_isa_translate, | ||
249 | .get_flags = of_bus_isa_get_flags, | ||
250 | }, | ||
251 | /* Default */ | ||
252 | { | ||
253 | .name = "default", | ||
254 | .addresses = "reg", | ||
255 | .match = NULL, | ||
256 | .count_cells = of_bus_default_count_cells, | ||
257 | .map = of_bus_default_map, | ||
258 | .translate = of_bus_default_translate, | ||
259 | .get_flags = of_bus_default_get_flags, | ||
260 | }, | ||
261 | }; | ||
262 | |||
263 | static struct of_bus *of_match_bus(struct device_node *np) | ||
264 | { | ||
265 | int i; | ||
266 | |||
267 | for (i = 0; i < ARRAY_SIZE(of_busses); i ++) | ||
268 | if (!of_busses[i].match || of_busses[i].match(np)) | ||
269 | return &of_busses[i]; | ||
270 | BUG(); | ||
271 | return NULL; | ||
272 | } | ||
273 | |||
274 | static int of_translate_one(struct device_node *parent, struct of_bus *bus, | ||
275 | struct of_bus *pbus, u32 *addr, | ||
276 | int na, int ns, int pna) | ||
277 | { | ||
278 | u32 *ranges; | ||
279 | unsigned int rlen; | ||
280 | int rone; | ||
281 | u64 offset = OF_BAD_ADDR; | ||
282 | |||
283 | /* Normally, an absence of a "ranges" property means we are | ||
284 | * crossing a non-translatable boundary, and thus the addresses | ||
285 | * below the current not cannot be converted to CPU physical ones. | ||
286 | * Unfortunately, while this is very clear in the spec, it's not | ||
287 | * what Apple understood, and they do have things like /uni-n or | ||
288 | * /ht nodes with no "ranges" property and a lot of perfectly | ||
289 | * useable mapped devices below them. Thus we treat the absence of | ||
290 | * "ranges" as equivalent to an empty "ranges" property which means | ||
291 | * a 1:1 translation at that level. It's up to the caller not to try | ||
292 | * to translate addresses that aren't supposed to be translated in | ||
293 | * the first place. --BenH. | ||
294 | */ | ||
295 | ranges = (u32 *)get_property(parent, "ranges", &rlen); | ||
296 | if (ranges == NULL || rlen == 0) { | ||
297 | offset = of_read_addr(addr, na); | ||
298 | memset(addr, 0, pna * 4); | ||
299 | DBG("OF: no ranges, 1:1 translation\n"); | ||
300 | goto finish; | ||
301 | } | ||
302 | |||
303 | DBG("OF: walking ranges...\n"); | ||
304 | |||
305 | /* Now walk through the ranges */ | ||
306 | rlen /= 4; | ||
307 | rone = na + pna + ns; | ||
308 | for (; rlen >= rone; rlen -= rone, ranges += rone) { | ||
309 | offset = bus->map(addr, ranges, na, ns, pna); | ||
310 | if (offset != OF_BAD_ADDR) | ||
311 | break; | ||
312 | } | ||
313 | if (offset == OF_BAD_ADDR) { | ||
314 | DBG("OF: not found !\n"); | ||
315 | return 1; | ||
316 | } | ||
317 | memcpy(addr, ranges + na, 4 * pna); | ||
318 | |||
319 | finish: | ||
320 | of_dump_addr("OF: parent translation for:", addr, pna); | ||
321 | DBG("OF: with offset: "PRu64"\n", offset); | ||
322 | |||
323 | /* Translate it into parent bus space */ | ||
324 | return pbus->translate(addr, offset, pna); | ||
325 | } | ||
326 | |||
327 | |||
328 | /* | ||
329 | * Translate an address from the device-tree into a CPU physical address, | ||
330 | * this walks up the tree and applies the various bus mappings on the | ||
331 | * way. | ||
332 | * | ||
333 | * Note: We consider that crossing any level with #size-cells == 0 to mean | ||
334 | * that translation is impossible (that is we are not dealing with a value | ||
335 | * that can be mapped to a cpu physical address). This is not really specified | ||
336 | * that way, but this is traditionally the way IBM at least do things | ||
337 | */ | ||
338 | u64 of_translate_address(struct device_node *dev, u32 *in_addr) | ||
339 | { | ||
340 | struct device_node *parent = NULL; | ||
341 | struct of_bus *bus, *pbus; | ||
342 | u32 addr[OF_MAX_ADDR_CELLS]; | ||
343 | int na, ns, pna, pns; | ||
344 | u64 result = OF_BAD_ADDR; | ||
345 | |||
346 | DBG("OF: ** translation for device %s **\n", dev->full_name); | ||
347 | |||
348 | /* Increase refcount at current level */ | ||
349 | of_node_get(dev); | ||
350 | |||
351 | /* Get parent & match bus type */ | ||
352 | parent = of_get_parent(dev); | ||
353 | if (parent == NULL) | ||
354 | goto bail; | ||
355 | bus = of_match_bus(parent); | ||
356 | |||
357 | /* Cound address cells & copy address locally */ | ||
358 | bus->count_cells(dev, &na, &ns); | ||
359 | if (!OF_CHECK_COUNTS(na, ns)) { | ||
360 | printk(KERN_ERR "prom_parse: Bad cell count for %s\n", | ||
361 | dev->full_name); | ||
362 | goto bail; | ||
363 | } | ||
364 | memcpy(addr, in_addr, na * 4); | ||
365 | |||
366 | DBG("OF: bus is %s (na=%d, ns=%d) on %s\n", | ||
367 | bus->name, na, ns, parent->full_name); | ||
368 | of_dump_addr("OF: translating address:", addr, na); | ||
369 | |||
370 | /* Translate */ | ||
371 | for (;;) { | ||
372 | /* Switch to parent bus */ | ||
373 | of_node_put(dev); | ||
374 | dev = parent; | ||
375 | parent = of_get_parent(dev); | ||
376 | |||
377 | /* If root, we have finished */ | ||
378 | if (parent == NULL) { | ||
379 | DBG("OF: reached root node\n"); | ||
380 | result = of_read_addr(addr, na); | ||
381 | break; | ||
382 | } | ||
383 | |||
384 | /* Get new parent bus and counts */ | ||
385 | pbus = of_match_bus(parent); | ||
386 | pbus->count_cells(dev, &pna, &pns); | ||
387 | if (!OF_CHECK_COUNTS(pna, pns)) { | ||
388 | printk(KERN_ERR "prom_parse: Bad cell count for %s\n", | ||
389 | dev->full_name); | ||
390 | break; | ||
391 | } | ||
392 | |||
393 | DBG("OF: parent bus is %s (na=%d, ns=%d) on %s\n", | ||
394 | pbus->name, pna, pns, parent->full_name); | ||
395 | |||
396 | /* Apply bus translation */ | ||
397 | if (of_translate_one(dev, bus, pbus, addr, na, ns, pna)) | ||
398 | break; | ||
399 | |||
400 | /* Complete the move up one level */ | ||
401 | na = pna; | ||
402 | ns = pns; | ||
403 | bus = pbus; | ||
404 | |||
405 | of_dump_addr("OF: one level translation:", addr, na); | ||
406 | } | ||
407 | bail: | ||
408 | of_node_put(parent); | ||
409 | of_node_put(dev); | ||
410 | |||
411 | return result; | ||
412 | } | ||
413 | EXPORT_SYMBOL(of_translate_address); | ||
414 | |||
415 | u32 *of_get_address(struct device_node *dev, int index, u64 *size, | ||
416 | unsigned int *flags) | ||
417 | { | ||
418 | u32 *prop; | ||
419 | unsigned int psize; | ||
420 | struct device_node *parent; | ||
421 | struct of_bus *bus; | ||
422 | int onesize, i, na, ns; | ||
423 | |||
424 | /* Get parent & match bus type */ | ||
425 | parent = of_get_parent(dev); | ||
426 | if (parent == NULL) | ||
427 | return NULL; | ||
428 | bus = of_match_bus(parent); | ||
429 | bus->count_cells(dev, &na, &ns); | ||
430 | of_node_put(parent); | ||
431 | if (!OF_CHECK_COUNTS(na, ns)) | ||
432 | return NULL; | ||
433 | |||
434 | /* Get "reg" or "assigned-addresses" property */ | ||
435 | prop = (u32 *)get_property(dev, bus->addresses, &psize); | ||
436 | if (prop == NULL) | ||
437 | return NULL; | ||
438 | psize /= 4; | ||
439 | |||
440 | onesize = na + ns; | ||
441 | for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) | ||
442 | if (i == index) { | ||
443 | if (size) | ||
444 | *size = of_read_addr(prop + na, ns); | ||
445 | if (flags) | ||
446 | *flags = bus->get_flags(prop); | ||
447 | return prop; | ||
448 | } | ||
449 | return NULL; | ||
450 | } | ||
451 | EXPORT_SYMBOL(of_get_address); | ||
452 | |||
453 | u32 *of_get_pci_address(struct device_node *dev, int bar_no, u64 *size, | ||
454 | unsigned int *flags) | ||
455 | { | ||
456 | u32 *prop; | ||
457 | unsigned int psize; | ||
458 | struct device_node *parent; | ||
459 | struct of_bus *bus; | ||
460 | int onesize, i, na, ns; | ||
461 | |||
462 | /* Get parent & match bus type */ | ||
463 | parent = of_get_parent(dev); | ||
464 | if (parent == NULL) | ||
465 | return NULL; | ||
466 | bus = of_match_bus(parent); | ||
467 | if (strcmp(bus->name, "pci")) | ||
468 | return NULL; | ||
469 | bus->count_cells(dev, &na, &ns); | ||
470 | of_node_put(parent); | ||
471 | if (!OF_CHECK_COUNTS(na, ns)) | ||
472 | return NULL; | ||
473 | |||
474 | /* Get "reg" or "assigned-addresses" property */ | ||
475 | prop = (u32 *)get_property(dev, bus->addresses, &psize); | ||
476 | if (prop == NULL) | ||
477 | return NULL; | ||
478 | psize /= 4; | ||
479 | |||
480 | onesize = na + ns; | ||
481 | for (i = 0; psize >= onesize; psize -= onesize, prop += onesize, i++) | ||
482 | if ((prop[0] & 0xff) == ((bar_no * 4) + PCI_BASE_ADDRESS_0)) { | ||
483 | if (size) | ||
484 | *size = of_read_addr(prop + na, ns); | ||
485 | if (flags) | ||
486 | *flags = bus->get_flags(prop); | ||
487 | return prop; | ||
488 | } | ||
489 | return NULL; | ||
490 | } | ||
491 | EXPORT_SYMBOL(of_get_pci_address); | ||
492 | |||
493 | static int __of_address_to_resource(struct device_node *dev, u32 *addrp, | ||
494 | u64 size, unsigned int flags, | ||
495 | struct resource *r) | ||
496 | { | ||
497 | u64 taddr; | ||
498 | |||
499 | if ((flags & (IORESOURCE_IO | IORESOURCE_MEM)) == 0) | ||
500 | return -EINVAL; | ||
501 | taddr = of_translate_address(dev, addrp); | ||
502 | if (taddr == OF_BAD_ADDR) | ||
503 | return -EINVAL; | ||
504 | memset(r, 0, sizeof(struct resource)); | ||
505 | if (flags & IORESOURCE_IO) { | ||
506 | unsigned long port; | ||
507 | port = pci_address_to_pio(taddr); | ||
508 | if (port == (unsigned long)-1) | ||
509 | return -EINVAL; | ||
510 | r->start = port; | ||
511 | r->end = port + size - 1; | ||
512 | } else { | ||
513 | r->start = taddr; | ||
514 | r->end = taddr + size - 1; | ||
515 | } | ||
516 | r->flags = flags; | ||
517 | r->name = dev->name; | ||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | int of_address_to_resource(struct device_node *dev, int index, | ||
522 | struct resource *r) | ||
523 | { | ||
524 | u32 *addrp; | ||
525 | u64 size; | ||
526 | unsigned int flags; | ||
527 | |||
528 | addrp = of_get_address(dev, index, &size, &flags); | ||
529 | if (addrp == NULL) | ||
530 | return -EINVAL; | ||
531 | return __of_address_to_resource(dev, addrp, size, flags, r); | ||
532 | } | ||
533 | EXPORT_SYMBOL_GPL(of_address_to_resource); | ||
534 | |||
535 | int of_pci_address_to_resource(struct device_node *dev, int bar, | ||
536 | struct resource *r) | ||
537 | { | ||
538 | u32 *addrp; | ||
539 | u64 size; | ||
540 | unsigned int flags; | ||
541 | |||
542 | addrp = of_get_pci_address(dev, bar, &size, &flags); | ||
543 | if (addrp == NULL) | ||
544 | return -EINVAL; | ||
545 | return __of_address_to_resource(dev, addrp, size, flags, r); | ||
546 | } | ||
547 | EXPORT_SYMBOL_GPL(of_pci_address_to_resource); | ||
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c index 60dec2401c26..45b8109951fe 100644 --- a/arch/powerpc/kernel/rtas_pci.c +++ b/arch/powerpc/kernel/rtas_pci.c | |||
@@ -188,39 +188,19 @@ int is_python(struct device_node *dev) | |||
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
190 | 190 | ||
191 | static int get_phb_reg_prop(struct device_node *dev, | 191 | static void python_countermeasures(struct device_node *dev) |
192 | unsigned int addr_size_words, | ||
193 | struct reg_property64 *reg) | ||
194 | { | 192 | { |
195 | unsigned int *ui_ptr = NULL, len; | 193 | struct resource registers; |
196 | |||
197 | /* Found a PHB, now figure out where his registers are mapped. */ | ||
198 | ui_ptr = (unsigned int *)get_property(dev, "reg", &len); | ||
199 | if (ui_ptr == NULL) | ||
200 | return 1; | ||
201 | |||
202 | if (addr_size_words == 1) { | ||
203 | reg->address = ((struct reg_property32 *)ui_ptr)->address; | ||
204 | reg->size = ((struct reg_property32 *)ui_ptr)->size; | ||
205 | } else { | ||
206 | *reg = *((struct reg_property64 *)ui_ptr); | ||
207 | } | ||
208 | |||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static void python_countermeasures(struct device_node *dev, | ||
213 | unsigned int addr_size_words) | ||
214 | { | ||
215 | struct reg_property64 reg_struct; | ||
216 | void __iomem *chip_regs; | 194 | void __iomem *chip_regs; |
217 | volatile u32 val; | 195 | volatile u32 val; |
218 | 196 | ||
219 | if (get_phb_reg_prop(dev, addr_size_words, ®_struct)) | 197 | if (of_address_to_resource(dev, 0, ®isters)) { |
198 | printk(KERN_ERR "Can't get address for Python workarounds !\n"); | ||
220 | return; | 199 | return; |
200 | } | ||
221 | 201 | ||
222 | /* Python's register file is 1 MB in size. */ | 202 | /* Python's register file is 1 MB in size. */ |
223 | chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000); | 203 | chip_regs = ioremap(registers.start & ~(0xfffffUL), 0x100000); |
224 | 204 | ||
225 | /* | 205 | /* |
226 | * Firmware doesn't always clear this bit which is critical | 206 | * Firmware doesn't always clear this bit which is critical |
@@ -301,11 +281,10 @@ static int phb_set_bus_ranges(struct device_node *dev, | |||
301 | } | 281 | } |
302 | 282 | ||
303 | static int __devinit setup_phb(struct device_node *dev, | 283 | static int __devinit setup_phb(struct device_node *dev, |
304 | struct pci_controller *phb, | 284 | struct pci_controller *phb) |
305 | unsigned int addr_size_words) | ||
306 | { | 285 | { |
307 | if (is_python(dev)) | 286 | if (is_python(dev)) |
308 | python_countermeasures(dev, addr_size_words); | 287 | python_countermeasures(dev); |
309 | 288 | ||
310 | if (phb_set_bus_ranges(dev, phb)) | 289 | if (phb_set_bus_ranges(dev, phb)) |
311 | return 1; | 290 | return 1; |
@@ -320,8 +299,8 @@ unsigned long __init find_and_init_phbs(void) | |||
320 | { | 299 | { |
321 | struct device_node *node; | 300 | struct device_node *node; |
322 | struct pci_controller *phb; | 301 | struct pci_controller *phb; |
323 | unsigned int root_size_cells = 0; | ||
324 | unsigned int index; | 302 | unsigned int index; |
303 | unsigned int root_size_cells = 0; | ||
325 | unsigned int *opprop = NULL; | 304 | unsigned int *opprop = NULL; |
326 | struct device_node *root = of_find_node_by_path("/"); | 305 | struct device_node *root = of_find_node_by_path("/"); |
327 | 306 | ||
@@ -343,10 +322,11 @@ unsigned long __init find_and_init_phbs(void) | |||
343 | phb = pcibios_alloc_controller(node); | 322 | phb = pcibios_alloc_controller(node); |
344 | if (!phb) | 323 | if (!phb) |
345 | continue; | 324 | continue; |
346 | setup_phb(node, phb, root_size_cells); | 325 | setup_phb(node, phb); |
347 | pci_process_bridge_OF_ranges(phb, node, 0); | 326 | pci_process_bridge_OF_ranges(phb, node, 0); |
348 | pci_setup_phb_io(phb, index == 0); | 327 | pci_setup_phb_io(phb, index == 0); |
349 | #ifdef CONFIG_PPC_PSERIES | 328 | #ifdef CONFIG_PPC_PSERIES |
329 | /* XXX This code need serious fixing ... --BenH */ | ||
350 | if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { | 330 | if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { |
351 | int addr = root_size_cells * (index + 2) - 1; | 331 | int addr = root_size_cells * (index + 2) - 1; |
352 | mpic_assign_isu(pSeries_mpic, index, opprop[addr]); | 332 | mpic_assign_isu(pSeries_mpic, index, opprop[addr]); |
@@ -381,22 +361,17 @@ unsigned long __init find_and_init_phbs(void) | |||
381 | 361 | ||
382 | struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) | 362 | struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) |
383 | { | 363 | { |
384 | struct device_node *root = of_find_node_by_path("/"); | ||
385 | unsigned int root_size_cells = 0; | ||
386 | struct pci_controller *phb; | 364 | struct pci_controller *phb; |
387 | int primary; | 365 | int primary; |
388 | 366 | ||
389 | root_size_cells = prom_n_size_cells(root); | ||
390 | |||
391 | primary = list_empty(&hose_list); | 367 | primary = list_empty(&hose_list); |
392 | phb = pcibios_alloc_controller(dn); | 368 | phb = pcibios_alloc_controller(dn); |
393 | if (!phb) | 369 | if (!phb) |
394 | return NULL; | 370 | return NULL; |
395 | setup_phb(dn, phb, root_size_cells); | 371 | setup_phb(dn, phb); |
396 | pci_process_bridge_OF_ranges(phb, dn, primary); | 372 | pci_process_bridge_OF_ranges(phb, dn, primary); |
397 | 373 | ||
398 | pci_setup_phb_io_dynamic(phb, primary); | 374 | pci_setup_phb_io_dynamic(phb, primary); |
399 | of_node_put(root); | ||
400 | 375 | ||
401 | pci_devs_phb_init_dynamic(phb); | 376 | pci_devs_phb_init_dynamic(phb); |
402 | scan_phb(phb); | 377 | scan_phb(phb); |
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index bd3eb4292b53..d5c52fae023a 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c | |||
@@ -93,8 +93,8 @@ EXPORT_SYMBOL(ppc_do_canonicalize_irqs); | |||
93 | /* also used by kexec */ | 93 | /* also used by kexec */ |
94 | void machine_shutdown(void) | 94 | void machine_shutdown(void) |
95 | { | 95 | { |
96 | if (ppc_md.nvram_sync) | 96 | if (ppc_md.machine_shutdown) |
97 | ppc_md.nvram_sync(); | 97 | ppc_md.machine_shutdown(); |
98 | } | 98 | } |
99 | 99 | ||
100 | void machine_restart(char *cmd) | 100 | void machine_restart(char *cmd) |
@@ -294,129 +294,6 @@ struct seq_operations cpuinfo_op = { | |||
294 | .show = show_cpuinfo, | 294 | .show = show_cpuinfo, |
295 | }; | 295 | }; |
296 | 296 | ||
297 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
298 | static int __init set_preferred_console(void) | ||
299 | { | ||
300 | struct device_node *prom_stdout = NULL; | ||
301 | char *name; | ||
302 | u32 *spd; | ||
303 | int offset = 0; | ||
304 | |||
305 | DBG(" -> set_preferred_console()\n"); | ||
306 | |||
307 | /* The user has requested a console so this is already set up. */ | ||
308 | if (strstr(saved_command_line, "console=")) { | ||
309 | DBG(" console was specified !\n"); | ||
310 | return -EBUSY; | ||
311 | } | ||
312 | |||
313 | if (!of_chosen) { | ||
314 | DBG(" of_chosen is NULL !\n"); | ||
315 | return -ENODEV; | ||
316 | } | ||
317 | /* We are getting a weird phandle from OF ... */ | ||
318 | /* ... So use the full path instead */ | ||
319 | name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); | ||
320 | if (name == NULL) { | ||
321 | DBG(" no linux,stdout-path !\n"); | ||
322 | return -ENODEV; | ||
323 | } | ||
324 | prom_stdout = of_find_node_by_path(name); | ||
325 | if (!prom_stdout) { | ||
326 | DBG(" can't find stdout package %s !\n", name); | ||
327 | return -ENODEV; | ||
328 | } | ||
329 | DBG("stdout is %s\n", prom_stdout->full_name); | ||
330 | |||
331 | name = (char *)get_property(prom_stdout, "name", NULL); | ||
332 | if (!name) { | ||
333 | DBG(" stdout package has no name !\n"); | ||
334 | goto not_found; | ||
335 | } | ||
336 | spd = (u32 *)get_property(prom_stdout, "current-speed", NULL); | ||
337 | |||
338 | if (0) | ||
339 | ; | ||
340 | #ifdef CONFIG_SERIAL_8250_CONSOLE | ||
341 | else if (strcmp(name, "serial") == 0) { | ||
342 | int i; | ||
343 | u32 *reg = (u32 *)get_property(prom_stdout, "reg", &i); | ||
344 | if (i > 8) { | ||
345 | switch (reg[1]) { | ||
346 | case 0x3f8: | ||
347 | offset = 0; | ||
348 | break; | ||
349 | case 0x2f8: | ||
350 | offset = 1; | ||
351 | break; | ||
352 | case 0x898: | ||
353 | offset = 2; | ||
354 | break; | ||
355 | case 0x890: | ||
356 | offset = 3; | ||
357 | break; | ||
358 | default: | ||
359 | /* We dont recognise the serial port */ | ||
360 | goto not_found; | ||
361 | } | ||
362 | } | ||
363 | } | ||
364 | #endif /* CONFIG_SERIAL_8250_CONSOLE */ | ||
365 | #ifdef CONFIG_PPC_PSERIES | ||
366 | else if (strcmp(name, "vty") == 0) { | ||
367 | u32 *reg = (u32 *)get_property(prom_stdout, "reg", NULL); | ||
368 | char *compat = (char *)get_property(prom_stdout, "compatible", NULL); | ||
369 | |||
370 | if (reg && compat && (strcmp(compat, "hvterm-protocol") == 0)) { | ||
371 | /* Host Virtual Serial Interface */ | ||
372 | switch (reg[0]) { | ||
373 | case 0x30000000: | ||
374 | offset = 0; | ||
375 | break; | ||
376 | case 0x30000001: | ||
377 | offset = 1; | ||
378 | break; | ||
379 | default: | ||
380 | goto not_found; | ||
381 | } | ||
382 | of_node_put(prom_stdout); | ||
383 | DBG("Found hvsi console at offset %d\n", offset); | ||
384 | return add_preferred_console("hvsi", offset, NULL); | ||
385 | } else { | ||
386 | /* pSeries LPAR virtual console */ | ||
387 | of_node_put(prom_stdout); | ||
388 | DBG("Found hvc console\n"); | ||
389 | return add_preferred_console("hvc", 0, NULL); | ||
390 | } | ||
391 | } | ||
392 | #endif /* CONFIG_PPC_PSERIES */ | ||
393 | #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE | ||
394 | else if (strcmp(name, "ch-a") == 0) | ||
395 | offset = 0; | ||
396 | else if (strcmp(name, "ch-b") == 0) | ||
397 | offset = 1; | ||
398 | #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ | ||
399 | else | ||
400 | goto not_found; | ||
401 | of_node_put(prom_stdout); | ||
402 | |||
403 | DBG("Found serial console at ttyS%d\n", offset); | ||
404 | |||
405 | if (spd) { | ||
406 | static char __initdata opt[16]; | ||
407 | sprintf(opt, "%d", *spd); | ||
408 | return add_preferred_console("ttyS", offset, opt); | ||
409 | } else | ||
410 | return add_preferred_console("ttyS", offset, NULL); | ||
411 | |||
412 | not_found: | ||
413 | DBG("No preferred console found !\n"); | ||
414 | of_node_put(prom_stdout); | ||
415 | return -ENODEV; | ||
416 | } | ||
417 | console_initcall(set_preferred_console); | ||
418 | #endif /* CONFIG_PPC_MULTIPLATFORM */ | ||
419 | |||
420 | void __init check_for_initrd(void) | 297 | void __init check_for_initrd(void) |
421 | { | 298 | { |
422 | #ifdef CONFIG_BLK_DEV_INITRD | 299 | #ifdef CONFIG_BLK_DEV_INITRD |
@@ -442,7 +319,7 @@ void __init check_for_initrd(void) | |||
442 | /* If we were passed an initrd, set the ROOT_DEV properly if the values | 319 | /* If we were passed an initrd, set the ROOT_DEV properly if the values |
443 | * look sensible. If not, clear initrd reference. | 320 | * look sensible. If not, clear initrd reference. |
444 | */ | 321 | */ |
445 | if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE && | 322 | if (is_kernel_addr(initrd_start) && is_kernel_addr(initrd_end) && |
446 | initrd_end > initrd_start) | 323 | initrd_end > initrd_start) |
447 | ROOT_DEV = Root_RAM0; | 324 | ROOT_DEV = Root_RAM0; |
448 | else | 325 | else |
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c index e5694335bf10..e5d285adb496 100644 --- a/arch/powerpc/kernel/setup_32.c +++ b/arch/powerpc/kernel/setup_32.c | |||
@@ -39,6 +39,8 @@ | |||
39 | #include <asm/nvram.h> | 39 | #include <asm/nvram.h> |
40 | #include <asm/xmon.h> | 40 | #include <asm/xmon.h> |
41 | #include <asm/time.h> | 41 | #include <asm/time.h> |
42 | #include <asm/serial.h> | ||
43 | #include <asm/udbg.h> | ||
42 | 44 | ||
43 | #include "setup.h" | 45 | #include "setup.h" |
44 | 46 | ||
@@ -172,12 +174,23 @@ void __init platform_init(void) | |||
172 | */ | 174 | */ |
173 | void __init machine_init(unsigned long dt_ptr, unsigned long phys) | 175 | void __init machine_init(unsigned long dt_ptr, unsigned long phys) |
174 | { | 176 | { |
177 | /* If btext is enabled, we might have a BAT setup for early display, | ||
178 | * thus we do enable some very basic udbg output | ||
179 | */ | ||
180 | #ifdef CONFIG_BOOTX_TEXT | ||
181 | udbg_putc = btext_drawchar; | ||
182 | #endif | ||
183 | |||
184 | /* Do some early initialization based on the flat device tree */ | ||
175 | early_init_devtree(__va(dt_ptr)); | 185 | early_init_devtree(__va(dt_ptr)); |
176 | 186 | ||
187 | /* Check default command line */ | ||
177 | #ifdef CONFIG_CMDLINE | 188 | #ifdef CONFIG_CMDLINE |
178 | strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); | 189 | if (cmd_line[0] == 0) |
190 | strlcpy(cmd_line, CONFIG_CMDLINE, sizeof(cmd_line)); | ||
179 | #endif /* CONFIG_CMDLINE */ | 191 | #endif /* CONFIG_CMDLINE */ |
180 | 192 | ||
193 | /* Base init based on machine type */ | ||
181 | platform_init(); | 194 | platform_init(); |
182 | 195 | ||
183 | #ifdef CONFIG_6xx | 196 | #ifdef CONFIG_6xx |
@@ -282,25 +295,22 @@ void __init setup_arch(char **cmdline_p) | |||
282 | 295 | ||
283 | unflatten_device_tree(); | 296 | unflatten_device_tree(); |
284 | check_for_initrd(); | 297 | check_for_initrd(); |
285 | finish_device_tree(); | ||
286 | 298 | ||
287 | smp_setup_cpu_maps(); | 299 | if (ppc_md.init_early) |
300 | ppc_md.init_early(); | ||
288 | 301 | ||
289 | #ifdef CONFIG_BOOTX_TEXT | 302 | #ifdef CONFIG_SERIAL_8250 |
290 | init_boot_display(); | 303 | find_legacy_serial_ports(); |
291 | #endif | 304 | #endif |
305 | finish_device_tree(); | ||
292 | 306 | ||
293 | #ifdef CONFIG_PPC_PMAC | 307 | smp_setup_cpu_maps(); |
294 | /* This could be called "early setup arch", it must be done | ||
295 | * now because xmon need it | ||
296 | */ | ||
297 | if (_machine == _MACH_Pmac) | ||
298 | pmac_feature_init(); /* New cool way */ | ||
299 | #endif | ||
300 | 308 | ||
301 | #ifdef CONFIG_XMON_DEFAULT | 309 | #ifdef CONFIG_XMON_DEFAULT |
302 | xmon_init(1); | 310 | xmon_init(1); |
303 | #endif | 311 | #endif |
312 | /* Register early console */ | ||
313 | register_early_udbg_console(); | ||
304 | 314 | ||
305 | #if defined(CONFIG_KGDB) | 315 | #if defined(CONFIG_KGDB) |
306 | if (ppc_md.kgdb_map_scc) | 316 | if (ppc_md.kgdb_map_scc) |
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index e3fb78397dc6..98e9f0595dd8 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/serial.h> | 34 | #include <linux/serial.h> |
35 | #include <linux/serial_8250.h> | 35 | #include <linux/serial_8250.h> |
36 | #include <asm/io.h> | 36 | #include <asm/io.h> |
37 | #include <asm/kdump.h> | ||
37 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
38 | #include <asm/processor.h> | 39 | #include <asm/processor.h> |
39 | #include <asm/pgtable.h> | 40 | #include <asm/pgtable.h> |
@@ -268,6 +269,10 @@ void __init early_setup(unsigned long dt_ptr) | |||
268 | } | 269 | } |
269 | ppc_md = **mach; | 270 | ppc_md = **mach; |
270 | 271 | ||
272 | #ifdef CONFIG_CRASH_DUMP | ||
273 | kdump_setup(); | ||
274 | #endif | ||
275 | |||
271 | DBG("Found, Initializing memory management...\n"); | 276 | DBG("Found, Initializing memory management...\n"); |
272 | 277 | ||
273 | /* | 278 | /* |
@@ -317,6 +322,7 @@ void early_setup_secondary(void) | |||
317 | void smp_release_cpus(void) | 322 | void smp_release_cpus(void) |
318 | { | 323 | { |
319 | extern unsigned long __secondary_hold_spinloop; | 324 | extern unsigned long __secondary_hold_spinloop; |
325 | unsigned long *ptr; | ||
320 | 326 | ||
321 | DBG(" -> smp_release_cpus()\n"); | 327 | DBG(" -> smp_release_cpus()\n"); |
322 | 328 | ||
@@ -327,7 +333,9 @@ void smp_release_cpus(void) | |||
327 | * This is useless but harmless on iSeries, secondaries are already | 333 | * This is useless but harmless on iSeries, secondaries are already |
328 | * waiting on their paca spinloops. */ | 334 | * waiting on their paca spinloops. */ |
329 | 335 | ||
330 | __secondary_hold_spinloop = 1; | 336 | ptr = (unsigned long *)((unsigned long)&__secondary_hold_spinloop |
337 | - PHYSICAL_START); | ||
338 | *ptr = 1; | ||
331 | mb(); | 339 | mb(); |
332 | 340 | ||
333 | DBG(" <- smp_release_cpus()\n"); | 341 | DBG(" <- smp_release_cpus()\n"); |
@@ -459,16 +467,21 @@ void __init setup_system(void) | |||
459 | */ | 467 | */ |
460 | ppc_md.init_early(); | 468 | ppc_md.init_early(); |
461 | 469 | ||
470 | /* | ||
471 | * We can discover serial ports now since the above did setup the | ||
472 | * hash table management for us, thus ioremap works. We do that early | ||
473 | * so that further code can be debugged | ||
474 | */ | ||
475 | #ifdef CONFIG_SERIAL_8250 | ||
476 | find_legacy_serial_ports(); | ||
477 | #endif | ||
478 | |||
462 | /* | 479 | /* |
463 | * "Finish" the device-tree, that is do the actual parsing of | 480 | * "Finish" the device-tree, that is do the actual parsing of |
464 | * some of the properties like the interrupt map | 481 | * some of the properties like the interrupt map |
465 | */ | 482 | */ |
466 | finish_device_tree(); | 483 | finish_device_tree(); |
467 | 484 | ||
468 | #ifdef CONFIG_BOOTX_TEXT | ||
469 | init_boot_display(); | ||
470 | #endif | ||
471 | |||
472 | /* | 485 | /* |
473 | * Initialize xmon | 486 | * Initialize xmon |
474 | */ | 487 | */ |
@@ -507,6 +520,9 @@ void __init setup_system(void) | |||
507 | ppc64_caches.iline_size); | 520 | ppc64_caches.iline_size); |
508 | printk("htab_address = 0x%p\n", htab_address); | 521 | printk("htab_address = 0x%p\n", htab_address); |
509 | printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); | 522 | printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); |
523 | #if PHYSICAL_START > 0 | ||
524 | printk("physical_start = 0x%x\n", PHYSICAL_START); | ||
525 | #endif | ||
510 | printk("-----------------------------------------------------\n"); | 526 | printk("-----------------------------------------------------\n"); |
511 | 527 | ||
512 | mm_init_ppc64(); | 528 | mm_init_ppc64(); |
@@ -657,187 +673,6 @@ void ppc64_terminate_msg(unsigned int src, const char *msg) | |||
657 | printk("[terminate]%04x %s\n", src, msg); | 673 | printk("[terminate]%04x %s\n", src, msg); |
658 | } | 674 | } |
659 | 675 | ||
660 | #ifndef CONFIG_PPC_ISERIES | ||
661 | /* | ||
662 | * This function can be used by platforms to "find" legacy serial ports. | ||
663 | * It works for "serial" nodes under an "isa" node, and will try to | ||
664 | * respect the "ibm,aix-loc" property if any. It works with up to 8 | ||
665 | * ports. | ||
666 | */ | ||
667 | |||
668 | #define MAX_LEGACY_SERIAL_PORTS 8 | ||
669 | static struct plat_serial8250_port serial_ports[MAX_LEGACY_SERIAL_PORTS+1]; | ||
670 | static unsigned int old_serial_count; | ||
671 | |||
672 | void __init generic_find_legacy_serial_ports(u64 *physport, | ||
673 | unsigned int *default_speed) | ||
674 | { | ||
675 | struct device_node *np; | ||
676 | u32 *sizeprop; | ||
677 | |||
678 | struct isa_reg_property { | ||
679 | u32 space; | ||
680 | u32 address; | ||
681 | u32 size; | ||
682 | }; | ||
683 | struct pci_reg_property { | ||
684 | struct pci_address addr; | ||
685 | u32 size_hi; | ||
686 | u32 size_lo; | ||
687 | }; | ||
688 | |||
689 | DBG(" -> generic_find_legacy_serial_port()\n"); | ||
690 | |||
691 | *physport = 0; | ||
692 | if (default_speed) | ||
693 | *default_speed = 0; | ||
694 | |||
695 | np = of_find_node_by_path("/"); | ||
696 | if (!np) | ||
697 | return; | ||
698 | |||
699 | /* First fill our array */ | ||
700 | for (np = NULL; (np = of_find_node_by_type(np, "serial"));) { | ||
701 | struct device_node *isa, *pci; | ||
702 | struct isa_reg_property *reg; | ||
703 | unsigned long phys_size, addr_size, io_base; | ||
704 | u32 *rangesp; | ||
705 | u32 *interrupts, *clk, *spd; | ||
706 | char *typep; | ||
707 | int index, rlen, rentsize; | ||
708 | |||
709 | /* Ok, first check if it's under an "isa" parent */ | ||
710 | isa = of_get_parent(np); | ||
711 | if (!isa || strcmp(isa->name, "isa")) { | ||
712 | DBG("%s: no isa parent found\n", np->full_name); | ||
713 | continue; | ||
714 | } | ||
715 | |||
716 | /* Now look for an "ibm,aix-loc" property that gives us ordering | ||
717 | * if any... | ||
718 | */ | ||
719 | typep = (char *)get_property(np, "ibm,aix-loc", NULL); | ||
720 | |||
721 | /* Get the ISA port number */ | ||
722 | reg = (struct isa_reg_property *)get_property(np, "reg", NULL); | ||
723 | if (reg == NULL) | ||
724 | goto next_port; | ||
725 | /* We assume the interrupt number isn't translated ... */ | ||
726 | interrupts = (u32 *)get_property(np, "interrupts", NULL); | ||
727 | /* get clock freq. if present */ | ||
728 | clk = (u32 *)get_property(np, "clock-frequency", NULL); | ||
729 | /* get default speed if present */ | ||
730 | spd = (u32 *)get_property(np, "current-speed", NULL); | ||
731 | /* Default to locate at end of array */ | ||
732 | index = old_serial_count; /* end of the array by default */ | ||
733 | |||
734 | /* If we have a location index, then use it */ | ||
735 | if (typep && *typep == 'S') { | ||
736 | index = simple_strtol(typep+1, NULL, 0) - 1; | ||
737 | /* if index is out of range, use end of array instead */ | ||
738 | if (index >= MAX_LEGACY_SERIAL_PORTS) | ||
739 | index = old_serial_count; | ||
740 | /* if our index is still out of range, that mean that | ||
741 | * array is full, we could scan for a free slot but that | ||
742 | * make little sense to bother, just skip the port | ||
743 | */ | ||
744 | if (index >= MAX_LEGACY_SERIAL_PORTS) | ||
745 | goto next_port; | ||
746 | if (index >= old_serial_count) | ||
747 | old_serial_count = index + 1; | ||
748 | /* Check if there is a port who already claimed our slot */ | ||
749 | if (serial_ports[index].iobase != 0) { | ||
750 | /* if we still have some room, move it, else override */ | ||
751 | if (old_serial_count < MAX_LEGACY_SERIAL_PORTS) { | ||
752 | DBG("Moved legacy port %d -> %d\n", index, | ||
753 | old_serial_count); | ||
754 | serial_ports[old_serial_count++] = | ||
755 | serial_ports[index]; | ||
756 | } else { | ||
757 | DBG("Replacing legacy port %d\n", index); | ||
758 | } | ||
759 | } | ||
760 | } | ||
761 | if (index >= MAX_LEGACY_SERIAL_PORTS) | ||
762 | goto next_port; | ||
763 | if (index >= old_serial_count) | ||
764 | old_serial_count = index + 1; | ||
765 | |||
766 | /* Now fill the entry */ | ||
767 | memset(&serial_ports[index], 0, sizeof(struct plat_serial8250_port)); | ||
768 | serial_ports[index].uartclk = clk ? *clk : BASE_BAUD * 16; | ||
769 | serial_ports[index].iobase = reg->address; | ||
770 | serial_ports[index].irq = interrupts ? interrupts[0] : 0; | ||
771 | serial_ports[index].flags = ASYNC_BOOT_AUTOCONF; | ||
772 | |||
773 | DBG("Added legacy port, index: %d, port: %x, irq: %d, clk: %d\n", | ||
774 | index, | ||
775 | serial_ports[index].iobase, | ||
776 | serial_ports[index].irq, | ||
777 | serial_ports[index].uartclk); | ||
778 | |||
779 | /* Get phys address of IO reg for port 1 */ | ||
780 | if (index != 0) | ||
781 | goto next_port; | ||
782 | |||
783 | pci = of_get_parent(isa); | ||
784 | if (!pci) { | ||
785 | DBG("%s: no pci parent found\n", np->full_name); | ||
786 | goto next_port; | ||
787 | } | ||
788 | |||
789 | rangesp = (u32 *)get_property(pci, "ranges", &rlen); | ||
790 | if (rangesp == NULL) { | ||
791 | of_node_put(pci); | ||
792 | goto next_port; | ||
793 | } | ||
794 | rlen /= 4; | ||
795 | |||
796 | /* we need the #size-cells of the PCI bridge node itself */ | ||
797 | phys_size = 1; | ||
798 | sizeprop = (u32 *)get_property(pci, "#size-cells", NULL); | ||
799 | if (sizeprop != NULL) | ||
800 | phys_size = *sizeprop; | ||
801 | /* we need the parent #addr-cells */ | ||
802 | addr_size = prom_n_addr_cells(pci); | ||
803 | rentsize = 3 + addr_size + phys_size; | ||
804 | io_base = 0; | ||
805 | for (;rlen >= rentsize; rlen -= rentsize,rangesp += rentsize) { | ||
806 | if (((rangesp[0] >> 24) & 0x3) != 1) | ||
807 | continue; /* not IO space */ | ||
808 | io_base = rangesp[3]; | ||
809 | if (addr_size == 2) | ||
810 | io_base = (io_base << 32) | rangesp[4]; | ||
811 | } | ||
812 | if (io_base != 0) { | ||
813 | *physport = io_base + reg->address; | ||
814 | if (default_speed && spd) | ||
815 | *default_speed = *spd; | ||
816 | } | ||
817 | of_node_put(pci); | ||
818 | next_port: | ||
819 | of_node_put(isa); | ||
820 | } | ||
821 | |||
822 | DBG(" <- generic_find_legacy_serial_port()\n"); | ||
823 | } | ||
824 | |||
825 | static struct platform_device serial_device = { | ||
826 | .name = "serial8250", | ||
827 | .id = PLAT8250_DEV_PLATFORM, | ||
828 | .dev = { | ||
829 | .platform_data = serial_ports, | ||
830 | }, | ||
831 | }; | ||
832 | |||
833 | static int __init serial_dev_init(void) | ||
834 | { | ||
835 | return platform_device_register(&serial_device); | ||
836 | } | ||
837 | arch_initcall(serial_dev_init); | ||
838 | |||
839 | #endif /* CONFIG_PPC_ISERIES */ | ||
840 | |||
841 | int check_legacy_ioport(unsigned long base_port) | 676 | int check_legacy_ioport(unsigned long base_port) |
842 | { | 677 | { |
843 | if (ppc_md.check_legacy_ioport == NULL) | 678 | if (ppc_md.check_legacy_ioport == NULL) |
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c index 5a2eba60dd39..d3f0b6d452fb 100644 --- a/arch/powerpc/kernel/signal_32.c +++ b/arch/powerpc/kernel/signal_32.c | |||
@@ -76,7 +76,6 @@ | |||
76 | * registers from *regs. This is what we need | 76 | * registers from *regs. This is what we need |
77 | * to do when a signal has been delivered. | 77 | * to do when a signal has been delivered. |
78 | */ | 78 | */ |
79 | #define sigreturn_exit(regs) return 0 | ||
80 | 79 | ||
81 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) | 80 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32)) |
82 | #undef __SIGNAL_FRAMESIZE | 81 | #undef __SIGNAL_FRAMESIZE |
@@ -156,9 +155,17 @@ static inline int save_general_regs(struct pt_regs *regs, | |||
156 | elf_greg_t64 *gregs = (elf_greg_t64 *)regs; | 155 | elf_greg_t64 *gregs = (elf_greg_t64 *)regs; |
157 | int i; | 156 | int i; |
158 | 157 | ||
159 | for (i = 0; i <= PT_RESULT; i ++) | 158 | if (!FULL_REGS(regs)) { |
159 | set_thread_flag(TIF_SAVE_NVGPRS); | ||
160 | current_thread_info()->nvgprs_frame = frame->mc_gregs; | ||
161 | } | ||
162 | |||
163 | for (i = 0; i <= PT_RESULT; i ++) { | ||
164 | if (i == 14 && !FULL_REGS(regs)) | ||
165 | i = 32; | ||
160 | if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) | 166 | if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i])) |
161 | return -EFAULT; | 167 | return -EFAULT; |
168 | } | ||
162 | return 0; | 169 | return 0; |
163 | } | 170 | } |
164 | 171 | ||
@@ -179,8 +186,6 @@ static inline int restore_general_regs(struct pt_regs *regs, | |||
179 | 186 | ||
180 | #else /* CONFIG_PPC64 */ | 187 | #else /* CONFIG_PPC64 */ |
181 | 188 | ||
182 | extern void sigreturn_exit(struct pt_regs *); | ||
183 | |||
184 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) | 189 | #define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) |
185 | 190 | ||
186 | static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set) | 191 | static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set) |
@@ -214,6 +219,15 @@ static inline int get_old_sigaction(struct k_sigaction *new_ka, | |||
214 | static inline int save_general_regs(struct pt_regs *regs, | 219 | static inline int save_general_regs(struct pt_regs *regs, |
215 | struct mcontext __user *frame) | 220 | struct mcontext __user *frame) |
216 | { | 221 | { |
222 | if (!FULL_REGS(regs)) { | ||
223 | /* Zero out the unsaved GPRs to avoid information | ||
224 | leak, and set TIF_SAVE_NVGPRS to ensure that the | ||
225 | registers do actually get saved later. */ | ||
226 | memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); | ||
227 | current_thread_info()->nvgprs_frame = &frame->mc_gregs; | ||
228 | set_thread_flag(TIF_SAVE_NVGPRS); | ||
229 | } | ||
230 | |||
217 | return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); | 231 | return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE); |
218 | } | 232 | } |
219 | 233 | ||
@@ -256,8 +270,10 @@ long sys_sigsuspend(old_sigset_t mask, int p2, int p3, int p4, int p6, int p7, | |||
256 | while (1) { | 270 | while (1) { |
257 | current->state = TASK_INTERRUPTIBLE; | 271 | current->state = TASK_INTERRUPTIBLE; |
258 | schedule(); | 272 | schedule(); |
259 | if (do_signal(&saveset, regs)) | 273 | if (do_signal(&saveset, regs)) { |
260 | sigreturn_exit(regs); | 274 | set_thread_flag(TIF_RESTOREALL); |
275 | return 0; | ||
276 | } | ||
261 | } | 277 | } |
262 | } | 278 | } |
263 | 279 | ||
@@ -292,8 +308,10 @@ long sys_rt_sigsuspend( | |||
292 | while (1) { | 308 | while (1) { |
293 | current->state = TASK_INTERRUPTIBLE; | 309 | current->state = TASK_INTERRUPTIBLE; |
294 | schedule(); | 310 | schedule(); |
295 | if (do_signal(&saveset, regs)) | 311 | if (do_signal(&saveset, regs)) { |
296 | sigreturn_exit(regs); | 312 | set_thread_flag(TIF_RESTOREALL); |
313 | return 0; | ||
314 | } | ||
297 | } | 315 | } |
298 | } | 316 | } |
299 | 317 | ||
@@ -391,9 +409,6 @@ struct rt_sigframe { | |||
391 | static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, | 409 | static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame, |
392 | int sigret) | 410 | int sigret) |
393 | { | 411 | { |
394 | #ifdef CONFIG_PPC32 | ||
395 | CHECK_FULL_REGS(regs); | ||
396 | #endif | ||
397 | /* Make sure floating point registers are stored in regs */ | 412 | /* Make sure floating point registers are stored in regs */ |
398 | flush_fp_to_thread(current); | 413 | flush_fp_to_thread(current); |
399 | 414 | ||
@@ -828,12 +843,6 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka, | |||
828 | regs->gpr[6] = (unsigned long) rt_sf; | 843 | regs->gpr[6] = (unsigned long) rt_sf; |
829 | regs->nip = (unsigned long) ka->sa.sa_handler; | 844 | regs->nip = (unsigned long) ka->sa.sa_handler; |
830 | regs->trap = 0; | 845 | regs->trap = 0; |
831 | #ifdef CONFIG_PPC64 | ||
832 | regs->result = 0; | ||
833 | |||
834 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
835 | ptrace_notify(SIGTRAP); | ||
836 | #endif | ||
837 | return 1; | 846 | return 1; |
838 | 847 | ||
839 | badframe: | 848 | badframe: |
@@ -911,8 +920,8 @@ long sys_swapcontext(struct ucontext __user *old_ctx, | |||
911 | */ | 920 | */ |
912 | if (do_setcontext(new_ctx, regs, 0)) | 921 | if (do_setcontext(new_ctx, regs, 0)) |
913 | do_exit(SIGSEGV); | 922 | do_exit(SIGSEGV); |
914 | sigreturn_exit(regs); | 923 | |
915 | /* doesn't actually return back to here */ | 924 | set_thread_flag(TIF_RESTOREALL); |
916 | return 0; | 925 | return 0; |
917 | } | 926 | } |
918 | 927 | ||
@@ -945,12 +954,11 @@ long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | |||
945 | * nobody does any... | 954 | * nobody does any... |
946 | */ | 955 | */ |
947 | compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); | 956 | compat_sys_sigaltstack((u32)(u64)&rt_sf->uc.uc_stack, 0, 0, 0, 0, 0, regs); |
948 | return (int)regs->result; | ||
949 | #else | 957 | #else |
950 | do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]); | 958 | do_sigaltstack(&rt_sf->uc.uc_stack, NULL, regs->gpr[1]); |
951 | sigreturn_exit(regs); /* doesn't return here */ | ||
952 | return 0; | ||
953 | #endif | 959 | #endif |
960 | set_thread_flag(TIF_RESTOREALL); | ||
961 | return 0; | ||
954 | 962 | ||
955 | bad: | 963 | bad: |
956 | force_sig(SIGSEGV, current); | 964 | force_sig(SIGSEGV, current); |
@@ -1041,9 +1049,7 @@ int sys_debug_setcontext(struct ucontext __user *ctx, | |||
1041 | */ | 1049 | */ |
1042 | do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]); | 1050 | do_sigaltstack(&ctx->uc_stack, NULL, regs->gpr[1]); |
1043 | 1051 | ||
1044 | sigreturn_exit(regs); | 1052 | set_thread_flag(TIF_RESTOREALL); |
1045 | /* doesn't actually return back to here */ | ||
1046 | |||
1047 | out: | 1053 | out: |
1048 | return 0; | 1054 | return 0; |
1049 | } | 1055 | } |
@@ -1107,12 +1113,6 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka, | |||
1107 | regs->gpr[4] = (unsigned long) sc; | 1113 | regs->gpr[4] = (unsigned long) sc; |
1108 | regs->nip = (unsigned long) ka->sa.sa_handler; | 1114 | regs->nip = (unsigned long) ka->sa.sa_handler; |
1109 | regs->trap = 0; | 1115 | regs->trap = 0; |
1110 | #ifdef CONFIG_PPC64 | ||
1111 | regs->result = 0; | ||
1112 | |||
1113 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
1114 | ptrace_notify(SIGTRAP); | ||
1115 | #endif | ||
1116 | 1116 | ||
1117 | return 1; | 1117 | return 1; |
1118 | 1118 | ||
@@ -1160,12 +1160,8 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8, | |||
1160 | || restore_user_regs(regs, sr, 1)) | 1160 | || restore_user_regs(regs, sr, 1)) |
1161 | goto badframe; | 1161 | goto badframe; |
1162 | 1162 | ||
1163 | #ifdef CONFIG_PPC64 | 1163 | set_thread_flag(TIF_RESTOREALL); |
1164 | return (int)regs->result; | ||
1165 | #else | ||
1166 | sigreturn_exit(regs); /* doesn't return */ | ||
1167 | return 0; | 1164 | return 0; |
1168 | #endif | ||
1169 | 1165 | ||
1170 | badframe: | 1166 | badframe: |
1171 | force_sig(SIGSEGV, current); | 1167 | force_sig(SIGSEGV, current); |
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c index 1decf2785530..5462bef898f6 100644 --- a/arch/powerpc/kernel/signal_64.c +++ b/arch/powerpc/kernel/signal_64.c | |||
@@ -96,8 +96,10 @@ long sys_rt_sigsuspend(sigset_t __user *unewset, size_t sigsetsize, int p3, int | |||
96 | while (1) { | 96 | while (1) { |
97 | current->state = TASK_INTERRUPTIBLE; | 97 | current->state = TASK_INTERRUPTIBLE; |
98 | schedule(); | 98 | schedule(); |
99 | if (do_signal(&saveset, regs)) | 99 | if (do_signal(&saveset, regs)) { |
100 | set_thread_flag(TIF_RESTOREALL); | ||
100 | return 0; | 101 | return 0; |
102 | } | ||
101 | } | 103 | } |
102 | } | 104 | } |
103 | 105 | ||
@@ -152,6 +154,14 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, | |||
152 | err |= __put_user(0, &sc->v_regs); | 154 | err |= __put_user(0, &sc->v_regs); |
153 | #endif /* CONFIG_ALTIVEC */ | 155 | #endif /* CONFIG_ALTIVEC */ |
154 | err |= __put_user(&sc->gp_regs, &sc->regs); | 156 | err |= __put_user(&sc->gp_regs, &sc->regs); |
157 | if (!FULL_REGS(regs)) { | ||
158 | /* Zero out the unsaved GPRs to avoid information | ||
159 | leak, and set TIF_SAVE_NVGPRS to ensure that the | ||
160 | registers do actually get saved later. */ | ||
161 | memset(®s->gpr[14], 0, 18 * sizeof(unsigned long)); | ||
162 | set_thread_flag(TIF_SAVE_NVGPRS); | ||
163 | current_thread_info()->nvgprs_frame = &sc->gp_regs; | ||
164 | } | ||
155 | err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); | 165 | err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE); |
156 | err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); | 166 | err |= __copy_to_user(&sc->fp_regs, ¤t->thread.fpr, FP_REGS_SIZE); |
157 | err |= __put_user(signr, &sc->signal); | 167 | err |= __put_user(signr, &sc->signal); |
@@ -340,6 +350,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx, | |||
340 | do_exit(SIGSEGV); | 350 | do_exit(SIGSEGV); |
341 | 351 | ||
342 | /* This returns like rt_sigreturn */ | 352 | /* This returns like rt_sigreturn */ |
353 | set_thread_flag(TIF_RESTOREALL); | ||
343 | return 0; | 354 | return 0; |
344 | } | 355 | } |
345 | 356 | ||
@@ -372,7 +383,8 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5, | |||
372 | */ | 383 | */ |
373 | do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]); | 384 | do_sigaltstack(&uc->uc_stack, NULL, regs->gpr[1]); |
374 | 385 | ||
375 | return regs->result; | 386 | set_thread_flag(TIF_RESTOREALL); |
387 | return 0; | ||
376 | 388 | ||
377 | badframe: | 389 | badframe: |
378 | #if DEBUG_SIG | 390 | #if DEBUG_SIG |
@@ -454,9 +466,6 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, | |||
454 | if (err) | 466 | if (err) |
455 | goto badframe; | 467 | goto badframe; |
456 | 468 | ||
457 | if (test_thread_flag(TIF_SINGLESTEP)) | ||
458 | ptrace_notify(SIGTRAP); | ||
459 | |||
460 | return 1; | 469 | return 1; |
461 | 470 | ||
462 | badframe: | 471 | badframe: |
@@ -502,6 +511,8 @@ static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) | |||
502 | * we only get here if there is a handler, we dont restart. | 511 | * we only get here if there is a handler, we dont restart. |
503 | */ | 512 | */ |
504 | regs->result = -EINTR; | 513 | regs->result = -EINTR; |
514 | regs->gpr[3] = EINTR; | ||
515 | regs->ccr |= 0x10000000; | ||
505 | break; | 516 | break; |
506 | case -ERESTARTSYS: | 517 | case -ERESTARTSYS: |
507 | /* ERESTARTSYS means to restart the syscall if there is no | 518 | /* ERESTARTSYS means to restart the syscall if there is no |
@@ -509,6 +520,8 @@ static inline void syscall_restart(struct pt_regs *regs, struct k_sigaction *ka) | |||
509 | */ | 520 | */ |
510 | if (!(ka->sa.sa_flags & SA_RESTART)) { | 521 | if (!(ka->sa.sa_flags & SA_RESTART)) { |
511 | regs->result = -EINTR; | 522 | regs->result = -EINTR; |
523 | regs->gpr[3] = EINTR; | ||
524 | regs->ccr |= 0x10000000; | ||
512 | break; | 525 | break; |
513 | } | 526 | } |
514 | /* fallthrough */ | 527 | /* fallthrough */ |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 30374d2f88e5..d381ec90b759 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/sysdev.h> | 31 | #include <linux/sysdev.h> |
32 | #include <linux/cpu.h> | 32 | #include <linux/cpu.h> |
33 | #include <linux/notifier.h> | 33 | #include <linux/notifier.h> |
34 | #include <linux/topology.h> | ||
34 | 35 | ||
35 | #include <asm/ptrace.h> | 36 | #include <asm/ptrace.h> |
36 | #include <asm/atomic.h> | 37 | #include <asm/atomic.h> |
@@ -75,6 +76,8 @@ void smp_call_function_interrupt(void); | |||
75 | 76 | ||
76 | int smt_enabled_at_boot = 1; | 77 | int smt_enabled_at_boot = 1; |
77 | 78 | ||
79 | static void (*crash_ipi_function_ptr)(struct pt_regs *) = NULL; | ||
80 | |||
78 | #ifdef CONFIG_MPIC | 81 | #ifdef CONFIG_MPIC |
79 | int __init smp_mpic_probe(void) | 82 | int __init smp_mpic_probe(void) |
80 | { | 83 | { |
@@ -123,11 +126,16 @@ void smp_message_recv(int msg, struct pt_regs *regs) | |||
123 | /* XXX Do we have to do this? */ | 126 | /* XXX Do we have to do this? */ |
124 | set_need_resched(); | 127 | set_need_resched(); |
125 | break; | 128 | break; |
126 | #ifdef CONFIG_DEBUGGER | ||
127 | case PPC_MSG_DEBUGGER_BREAK: | 129 | case PPC_MSG_DEBUGGER_BREAK: |
130 | if (crash_ipi_function_ptr) { | ||
131 | crash_ipi_function_ptr(regs); | ||
132 | break; | ||
133 | } | ||
134 | #ifdef CONFIG_DEBUGGER | ||
128 | debugger_ipi(regs); | 135 | debugger_ipi(regs); |
129 | break; | 136 | break; |
130 | #endif | 137 | #endif /* CONFIG_DEBUGGER */ |
138 | /* FALLTHROUGH */ | ||
131 | default: | 139 | default: |
132 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", | 140 | printk("SMP %d: smp_message_recv(): unknown msg %d\n", |
133 | smp_processor_id(), msg); | 141 | smp_processor_id(), msg); |
@@ -147,6 +155,17 @@ void smp_send_debugger_break(int cpu) | |||
147 | } | 155 | } |
148 | #endif | 156 | #endif |
149 | 157 | ||
158 | #ifdef CONFIG_KEXEC | ||
159 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) | ||
160 | { | ||
161 | crash_ipi_function_ptr = crash_ipi_callback; | ||
162 | if (crash_ipi_callback) { | ||
163 | mb(); | ||
164 | smp_ops->message_pass(MSG_ALL_BUT_SELF, PPC_MSG_DEBUGGER_BREAK); | ||
165 | } | ||
166 | } | ||
167 | #endif | ||
168 | |||
150 | static void stop_this_cpu(void *dummy) | 169 | static void stop_this_cpu(void *dummy) |
151 | { | 170 | { |
152 | local_irq_disable(); | 171 | local_irq_disable(); |
@@ -452,10 +471,6 @@ int __devinit __cpu_up(unsigned int cpu) | |||
452 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) | 471 | if (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu)) |
453 | return -EINVAL; | 472 | return -EINVAL; |
454 | 473 | ||
455 | #ifdef CONFIG_PPC64 | ||
456 | paca[cpu].default_decr = tb_ticks_per_jiffy; | ||
457 | #endif | ||
458 | |||
459 | /* Make sure callin-map entry is 0 (can be leftover a CPU | 474 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
460 | * hotplug | 475 | * hotplug |
461 | */ | 476 | */ |
@@ -554,6 +569,8 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
554 | smp_ops->setup_cpu(boot_cpuid); | 569 | smp_ops->setup_cpu(boot_cpuid); |
555 | 570 | ||
556 | set_cpus_allowed(current, old_mask); | 571 | set_cpus_allowed(current, old_mask); |
572 | |||
573 | dump_numa_cpu_topology(); | ||
557 | } | 574 | } |
558 | 575 | ||
559 | #ifdef CONFIG_HOTPLUG_CPU | 576 | #ifdef CONFIG_HOTPLUG_CPU |
diff --git a/arch/powerpc/kernel/syscalls.c b/arch/powerpc/kernel/syscalls.c index 91b93d917b64..ad895c99813b 100644 --- a/arch/powerpc/kernel/syscalls.c +++ b/arch/powerpc/kernel/syscalls.c | |||
@@ -43,9 +43,6 @@ | |||
43 | #include <asm/time.h> | 43 | #include <asm/time.h> |
44 | #include <asm/unistd.h> | 44 | #include <asm/unistd.h> |
45 | 45 | ||
46 | extern unsigned long wall_jiffies; | ||
47 | |||
48 | |||
49 | /* | 46 | /* |
50 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. | 47 | * sys_ipc() is the de-multiplexer for the SysV IPC calls.. |
51 | * | 48 | * |
@@ -311,31 +308,6 @@ int sys_olduname(struct oldold_utsname __user *name) | |||
311 | return error? -EFAULT: 0; | 308 | return error? -EFAULT: 0; |
312 | } | 309 | } |
313 | 310 | ||
314 | #ifdef CONFIG_PPC64 | ||
315 | time_t sys64_time(time_t __user * tloc) | ||
316 | { | ||
317 | time_t secs; | ||
318 | time_t usecs; | ||
319 | |||
320 | long tb_delta = tb_ticks_since(tb_last_stamp); | ||
321 | tb_delta += (jiffies - wall_jiffies) * tb_ticks_per_jiffy; | ||
322 | |||
323 | secs = xtime.tv_sec; | ||
324 | usecs = (xtime.tv_nsec/1000) + tb_delta / tb_ticks_per_usec; | ||
325 | while (usecs >= USEC_PER_SEC) { | ||
326 | ++secs; | ||
327 | usecs -= USEC_PER_SEC; | ||
328 | } | ||
329 | |||
330 | if (tloc) { | ||
331 | if (put_user(secs,tloc)) | ||
332 | secs = -EFAULT; | ||
333 | } | ||
334 | |||
335 | return secs; | ||
336 | } | ||
337 | #endif | ||
338 | |||
339 | long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, | 311 | long ppc_fadvise64_64(int fd, int advice, u32 offset_high, u32 offset_low, |
340 | u32 len_high, u32 len_low) | 312 | u32 len_high, u32 len_low) |
341 | { | 313 | { |
diff --git a/arch/powerpc/kernel/systbl.S b/arch/powerpc/kernel/systbl.S index 65eaea91b499..65463a1076e8 100644 --- a/arch/powerpc/kernel/systbl.S +++ b/arch/powerpc/kernel/systbl.S | |||
@@ -54,7 +54,7 @@ SYSCALL(link) | |||
54 | SYSCALL(unlink) | 54 | SYSCALL(unlink) |
55 | COMPAT_SYS(execve) | 55 | COMPAT_SYS(execve) |
56 | SYSCALL(chdir) | 56 | SYSCALL(chdir) |
57 | SYSX(sys64_time,compat_sys_time,sys_time) | 57 | COMPAT_SYS(time) |
58 | SYSCALL(mknod) | 58 | SYSCALL(mknod) |
59 | SYSCALL(chmod) | 59 | SYSCALL(chmod) |
60 | SYSCALL(lchown) | 60 | SYSCALL(lchown) |
@@ -113,7 +113,7 @@ SYSCALL(sgetmask) | |||
113 | COMPAT_SYS(ssetmask) | 113 | COMPAT_SYS(ssetmask) |
114 | SYSCALL(setreuid) | 114 | SYSCALL(setreuid) |
115 | SYSCALL(setregid) | 115 | SYSCALL(setregid) |
116 | SYSX(sys_ni_syscall,ppc32_sigsuspend,ppc_sigsuspend) | 116 | SYS32ONLY(sigsuspend) |
117 | COMPAT_SYS(sigpending) | 117 | COMPAT_SYS(sigpending) |
118 | COMPAT_SYS(sethostname) | 118 | COMPAT_SYS(sethostname) |
119 | COMPAT_SYS(setrlimit) | 119 | COMPAT_SYS(setrlimit) |
@@ -160,7 +160,7 @@ SYSCALL(swapoff) | |||
160 | COMPAT_SYS(sysinfo) | 160 | COMPAT_SYS(sysinfo) |
161 | COMPAT_SYS(ipc) | 161 | COMPAT_SYS(ipc) |
162 | SYSCALL(fsync) | 162 | SYSCALL(fsync) |
163 | SYSX(sys_ni_syscall,ppc32_sigreturn,sys_sigreturn) | 163 | SYS32ONLY(sigreturn) |
164 | PPC_SYS(clone) | 164 | PPC_SYS(clone) |
165 | COMPAT_SYS(setdomainname) | 165 | COMPAT_SYS(setdomainname) |
166 | PPC_SYS(newuname) | 166 | PPC_SYS(newuname) |
@@ -213,13 +213,13 @@ COMPAT_SYS(nfsservctl) | |||
213 | SYSCALL(setresgid) | 213 | SYSCALL(setresgid) |
214 | SYSCALL(getresgid) | 214 | SYSCALL(getresgid) |
215 | COMPAT_SYS(prctl) | 215 | COMPAT_SYS(prctl) |
216 | SYSX(ppc64_rt_sigreturn,ppc32_rt_sigreturn,sys_rt_sigreturn) | 216 | COMPAT_SYS(rt_sigreturn) |
217 | COMPAT_SYS(rt_sigaction) | 217 | COMPAT_SYS(rt_sigaction) |
218 | COMPAT_SYS(rt_sigprocmask) | 218 | COMPAT_SYS(rt_sigprocmask) |
219 | COMPAT_SYS(rt_sigpending) | 219 | COMPAT_SYS(rt_sigpending) |
220 | COMPAT_SYS(rt_sigtimedwait) | 220 | COMPAT_SYS(rt_sigtimedwait) |
221 | COMPAT_SYS(rt_sigqueueinfo) | 221 | COMPAT_SYS(rt_sigqueueinfo) |
222 | SYSX(ppc64_rt_sigsuspend,ppc32_rt_sigsuspend,ppc_rt_sigsuspend) | 222 | COMPAT_SYS(rt_sigsuspend) |
223 | COMPAT_SYS(pread64) | 223 | COMPAT_SYS(pread64) |
224 | COMPAT_SYS(pwrite64) | 224 | COMPAT_SYS(pwrite64) |
225 | SYSCALL(chown) | 225 | SYSCALL(chown) |
@@ -290,7 +290,7 @@ COMPAT_SYS(clock_settime) | |||
290 | COMPAT_SYS(clock_gettime) | 290 | COMPAT_SYS(clock_gettime) |
291 | COMPAT_SYS(clock_getres) | 291 | COMPAT_SYS(clock_getres) |
292 | COMPAT_SYS(clock_nanosleep) | 292 | COMPAT_SYS(clock_nanosleep) |
293 | SYSX(ppc64_swapcontext,ppc32_swapcontext,ppc_swapcontext) | 293 | COMPAT_SYS(swapcontext) |
294 | COMPAT_SYS(tgkill) | 294 | COMPAT_SYS(tgkill) |
295 | COMPAT_SYS(utimes) | 295 | COMPAT_SYS(utimes) |
296 | COMPAT_SYS(statfs64) | 296 | COMPAT_SYS(statfs64) |
@@ -319,3 +319,5 @@ COMPAT_SYS(ioprio_get) | |||
319 | SYSCALL(inotify_init) | 319 | SYSCALL(inotify_init) |
320 | SYSCALL(inotify_add_watch) | 320 | SYSCALL(inotify_add_watch) |
321 | SYSCALL(inotify_rm_watch) | 321 | SYSCALL(inotify_rm_watch) |
322 | SYSCALL(spu_run) | ||
323 | SYSCALL(spu_create) | ||
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c index de8479769bb7..56f50e91bddb 100644 --- a/arch/powerpc/kernel/time.c +++ b/arch/powerpc/kernel/time.c | |||
@@ -699,10 +699,6 @@ void __init time_init(void) | |||
699 | div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); | 699 | div128_by_32(1024*1024, 0, tb_ticks_per_sec, &res); |
700 | tb_to_xs = res.result_low; | 700 | tb_to_xs = res.result_low; |
701 | 701 | ||
702 | #ifdef CONFIG_PPC64 | ||
703 | get_paca()->default_decr = tb_ticks_per_jiffy; | ||
704 | #endif | ||
705 | |||
706 | /* | 702 | /* |
707 | * Compute scale factor for sched_clock. | 703 | * Compute scale factor for sched_clock. |
708 | * The calibrate_decr() function has set tb_ticks_per_sec, | 704 | * The calibrate_decr() function has set tb_ticks_per_sec, |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index 1511454c4690..7509aa6474f2 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/prctl.h> | 31 | #include <linux/prctl.h> |
32 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
33 | #include <linux/kprobes.h> | 33 | #include <linux/kprobes.h> |
34 | #include <linux/kexec.h> | ||
34 | 35 | ||
35 | #include <asm/kdebug.h> | 36 | #include <asm/kdebug.h> |
36 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
@@ -95,7 +96,7 @@ static DEFINE_SPINLOCK(die_lock); | |||
95 | 96 | ||
96 | int die(const char *str, struct pt_regs *regs, long err) | 97 | int die(const char *str, struct pt_regs *regs, long err) |
97 | { | 98 | { |
98 | static int die_counter; | 99 | static int die_counter, crash_dump_start = 0; |
99 | int nl = 0; | 100 | int nl = 0; |
100 | 101 | ||
101 | if (debugger(regs)) | 102 | if (debugger(regs)) |
@@ -156,7 +157,21 @@ int die(const char *str, struct pt_regs *regs, long err) | |||
156 | print_modules(); | 157 | print_modules(); |
157 | show_regs(regs); | 158 | show_regs(regs); |
158 | bust_spinlocks(0); | 159 | bust_spinlocks(0); |
160 | |||
161 | if (!crash_dump_start && kexec_should_crash(current)) { | ||
162 | crash_dump_start = 1; | ||
163 | spin_unlock_irq(&die_lock); | ||
164 | crash_kexec(regs); | ||
165 | /* NOTREACHED */ | ||
166 | } | ||
159 | spin_unlock_irq(&die_lock); | 167 | spin_unlock_irq(&die_lock); |
168 | if (crash_dump_start) | ||
169 | /* | ||
170 | * Only for soft-reset: Other CPUs will be responded to an IPI | ||
171 | * sent by first kexec CPU. | ||
172 | */ | ||
173 | for(;;) | ||
174 | ; | ||
160 | 175 | ||
161 | if (in_interrupt()) | 176 | if (in_interrupt()) |
162 | panic("Fatal exception in interrupt"); | 177 | panic("Fatal exception in interrupt"); |
@@ -215,8 +230,10 @@ void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr) | |||
215 | void system_reset_exception(struct pt_regs *regs) | 230 | void system_reset_exception(struct pt_regs *regs) |
216 | { | 231 | { |
217 | /* See if any machine dependent calls */ | 232 | /* See if any machine dependent calls */ |
218 | if (ppc_md.system_reset_exception) | 233 | if (ppc_md.system_reset_exception) { |
219 | ppc_md.system_reset_exception(regs); | 234 | if (ppc_md.system_reset_exception(regs)) |
235 | return; | ||
236 | } | ||
220 | 237 | ||
221 | die("System Reset", regs, SIGABRT); | 238 | die("System Reset", regs, SIGABRT); |
222 | 239 | ||
@@ -886,12 +903,10 @@ void altivec_unavailable_exception(struct pt_regs *regs) | |||
886 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); | 903 | die("Unrecoverable VMX/Altivec Unavailable Exception", regs, SIGABRT); |
887 | } | 904 | } |
888 | 905 | ||
889 | #if defined(CONFIG_PPC64) || defined(CONFIG_E500) | ||
890 | void performance_monitor_exception(struct pt_regs *regs) | 906 | void performance_monitor_exception(struct pt_regs *regs) |
891 | { | 907 | { |
892 | perf_irq(regs); | 908 | perf_irq(regs); |
893 | } | 909 | } |
894 | #endif | ||
895 | 910 | ||
896 | #ifdef CONFIG_8xx | 911 | #ifdef CONFIG_8xx |
897 | void SoftwareEmulation(struct pt_regs *regs) | 912 | void SoftwareEmulation(struct pt_regs *regs) |
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c index 0d878e72fc44..558c1ceb2b93 100644 --- a/arch/powerpc/kernel/udbg.c +++ b/arch/powerpc/kernel/udbg.c | |||
@@ -16,8 +16,8 @@ | |||
16 | #include <linux/console.h> | 16 | #include <linux/console.h> |
17 | #include <asm/processor.h> | 17 | #include <asm/processor.h> |
18 | 18 | ||
19 | void (*udbg_putc)(unsigned char c); | 19 | void (*udbg_putc)(char c); |
20 | unsigned char (*udbg_getc)(void); | 20 | int (*udbg_getc)(void); |
21 | int (*udbg_getc_poll)(void); | 21 | int (*udbg_getc_poll)(void); |
22 | 22 | ||
23 | /* udbg library, used by xmon et al */ | 23 | /* udbg library, used by xmon et al */ |
@@ -57,8 +57,8 @@ int udbg_write(const char *s, int n) | |||
57 | 57 | ||
58 | int udbg_read(char *buf, int buflen) | 58 | int udbg_read(char *buf, int buflen) |
59 | { | 59 | { |
60 | char c, *p = buf; | 60 | char *p = buf; |
61 | int i; | 61 | int i, c; |
62 | 62 | ||
63 | if (!udbg_getc) | 63 | if (!udbg_getc) |
64 | return 0; | 64 | return 0; |
@@ -66,8 +66,11 @@ int udbg_read(char *buf, int buflen) | |||
66 | for (i = 0; i < buflen; ++i) { | 66 | for (i = 0; i < buflen; ++i) { |
67 | do { | 67 | do { |
68 | c = udbg_getc(); | 68 | c = udbg_getc(); |
69 | if (c == -1 && i == 0) | ||
70 | return -1; | ||
71 | |||
69 | } while (c == 0x11 || c == 0x13); | 72 | } while (c == 0x11 || c == 0x13); |
70 | if (c == 0) | 73 | if (c == 0 || c == -1) |
71 | break; | 74 | break; |
72 | *p++ = c; | 75 | *p++ = c; |
73 | } | 76 | } |
@@ -78,7 +81,7 @@ int udbg_read(char *buf, int buflen) | |||
78 | #define UDBG_BUFSIZE 256 | 81 | #define UDBG_BUFSIZE 256 |
79 | void udbg_printf(const char *fmt, ...) | 82 | void udbg_printf(const char *fmt, ...) |
80 | { | 83 | { |
81 | unsigned char buf[UDBG_BUFSIZE]; | 84 | char buf[UDBG_BUFSIZE]; |
82 | va_list args; | 85 | va_list args; |
83 | 86 | ||
84 | va_start(args, fmt); | 87 | va_start(args, fmt); |
@@ -87,6 +90,12 @@ void udbg_printf(const char *fmt, ...) | |||
87 | va_end(args); | 90 | va_end(args); |
88 | } | 91 | } |
89 | 92 | ||
93 | void __init udbg_progress(char *s, unsigned short hex) | ||
94 | { | ||
95 | udbg_puts(s); | ||
96 | udbg_puts("\n"); | ||
97 | } | ||
98 | |||
90 | /* | 99 | /* |
91 | * Early boot console based on udbg | 100 | * Early boot console based on udbg |
92 | */ | 101 | */ |
@@ -99,7 +108,7 @@ static void udbg_console_write(struct console *con, const char *s, | |||
99 | static struct console udbg_console = { | 108 | static struct console udbg_console = { |
100 | .name = "udbg", | 109 | .name = "udbg", |
101 | .write = udbg_console_write, | 110 | .write = udbg_console_write, |
102 | .flags = CON_PRINTBUFFER, | 111 | .flags = CON_PRINTBUFFER | CON_ENABLED, |
103 | .index = -1, | 112 | .index = -1, |
104 | }; | 113 | }; |
105 | 114 | ||
@@ -107,15 +116,19 @@ static int early_console_initialized; | |||
107 | 116 | ||
108 | void __init disable_early_printk(void) | 117 | void __init disable_early_printk(void) |
109 | { | 118 | { |
119 | #if 1 | ||
110 | if (!early_console_initialized) | 120 | if (!early_console_initialized) |
111 | return; | 121 | return; |
112 | unregister_console(&udbg_console); | 122 | unregister_console(&udbg_console); |
113 | early_console_initialized = 0; | 123 | early_console_initialized = 0; |
124 | #endif | ||
114 | } | 125 | } |
115 | 126 | ||
116 | /* called by setup_system */ | 127 | /* called by setup_system */ |
117 | void register_early_udbg_console(void) | 128 | void register_early_udbg_console(void) |
118 | { | 129 | { |
130 | if (early_console_initialized) | ||
131 | return; | ||
119 | early_console_initialized = 1; | 132 | early_console_initialized = 1; |
120 | register_console(&udbg_console); | 133 | register_console(&udbg_console); |
121 | } | 134 | } |
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c index 9313574ab935..7541bf44d2da 100644 --- a/arch/powerpc/kernel/udbg_16550.c +++ b/arch/powerpc/kernel/udbg_16550.c | |||
@@ -43,9 +43,11 @@ struct NS16550 { | |||
43 | #define LSR_TEMT 0x40 /* Xmitter empty */ | 43 | #define LSR_TEMT 0x40 /* Xmitter empty */ |
44 | #define LSR_ERR 0x80 /* Error */ | 44 | #define LSR_ERR 0x80 /* Error */ |
45 | 45 | ||
46 | #define LCR_DLAB 0x80 | ||
47 | |||
46 | static volatile struct NS16550 __iomem *udbg_comport; | 48 | static volatile struct NS16550 __iomem *udbg_comport; |
47 | 49 | ||
48 | static void udbg_550_putc(unsigned char c) | 50 | static void udbg_550_putc(char c) |
49 | { | 51 | { |
50 | if (udbg_comport) { | 52 | if (udbg_comport) { |
51 | while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0) | 53 | while ((in_8(&udbg_comport->lsr) & LSR_THRE) == 0) |
@@ -67,39 +69,80 @@ static int udbg_550_getc_poll(void) | |||
67 | return -1; | 69 | return -1; |
68 | } | 70 | } |
69 | 71 | ||
70 | static unsigned char udbg_550_getc(void) | 72 | static int udbg_550_getc(void) |
71 | { | 73 | { |
72 | if (udbg_comport) { | 74 | if (udbg_comport) { |
73 | while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0) | 75 | while ((in_8(&udbg_comport->lsr) & LSR_DR) == 0) |
74 | /* wait for char */; | 76 | /* wait for char */; |
75 | return in_8(&udbg_comport->rbr); | 77 | return in_8(&udbg_comport->rbr); |
76 | } | 78 | } |
77 | return 0; | 79 | return -1; |
78 | } | 80 | } |
79 | 81 | ||
80 | void udbg_init_uart(void __iomem *comport, unsigned int speed) | 82 | void udbg_init_uart(void __iomem *comport, unsigned int speed, |
83 | unsigned int clock) | ||
81 | { | 84 | { |
82 | u16 dll = speed ? (115200 / speed) : 12; | 85 | unsigned int dll, base_bauds = clock / 16; |
86 | |||
87 | if (speed == 0) | ||
88 | speed = 9600; | ||
89 | dll = base_bauds / speed; | ||
83 | 90 | ||
84 | if (comport) { | 91 | if (comport) { |
85 | udbg_comport = (struct NS16550 __iomem *)comport; | 92 | udbg_comport = (struct NS16550 __iomem *)comport; |
86 | out_8(&udbg_comport->lcr, 0x00); | 93 | out_8(&udbg_comport->lcr, 0x00); |
87 | out_8(&udbg_comport->ier, 0xff); | 94 | out_8(&udbg_comport->ier, 0xff); |
88 | out_8(&udbg_comport->ier, 0x00); | 95 | out_8(&udbg_comport->ier, 0x00); |
89 | out_8(&udbg_comport->lcr, 0x80); /* Access baud rate */ | 96 | out_8(&udbg_comport->lcr, LCR_DLAB); |
90 | out_8(&udbg_comport->dll, dll & 0xff); /* 1 = 115200, 2 = 57600, | 97 | out_8(&udbg_comport->dll, dll & 0xff); |
91 | 3 = 38400, 12 = 9600 baud */ | 98 | out_8(&udbg_comport->dlm, dll >> 8); |
92 | out_8(&udbg_comport->dlm, dll >> 8); /* dll >> 8 which should be zero | 99 | /* 8 data, 1 stop, no parity */ |
93 | for fast rates; */ | 100 | out_8(&udbg_comport->lcr, 0x03); |
94 | out_8(&udbg_comport->lcr, 0x03); /* 8 data, 1 stop, no parity */ | 101 | /* RTS/DTR */ |
95 | out_8(&udbg_comport->mcr, 0x03); /* RTS/DTR */ | 102 | out_8(&udbg_comport->mcr, 0x03); |
96 | out_8(&udbg_comport->fcr ,0x07); /* Clear & enable FIFOs */ | 103 | /* Clear & enable FIFOs */ |
104 | out_8(&udbg_comport->fcr ,0x07); | ||
97 | udbg_putc = udbg_550_putc; | 105 | udbg_putc = udbg_550_putc; |
98 | udbg_getc = udbg_550_getc; | 106 | udbg_getc = udbg_550_getc; |
99 | udbg_getc_poll = udbg_550_getc_poll; | 107 | udbg_getc_poll = udbg_550_getc_poll; |
100 | } | 108 | } |
101 | } | 109 | } |
102 | 110 | ||
111 | unsigned int udbg_probe_uart_speed(void __iomem *comport, unsigned int clock) | ||
112 | { | ||
113 | unsigned int dll, dlm, divisor, prescaler, speed; | ||
114 | u8 old_lcr; | ||
115 | volatile struct NS16550 __iomem *port = comport; | ||
116 | |||
117 | old_lcr = in_8(&port->lcr); | ||
118 | |||
119 | /* select divisor latch registers. */ | ||
120 | out_8(&port->lcr, LCR_DLAB); | ||
121 | |||
122 | /* now, read the divisor */ | ||
123 | dll = in_8(&port->dll); | ||
124 | dlm = in_8(&port->dlm); | ||
125 | divisor = dlm << 8 | dll; | ||
126 | |||
127 | /* check prescaling */ | ||
128 | if (in_8(&port->mcr) & 0x80) | ||
129 | prescaler = 4; | ||
130 | else | ||
131 | prescaler = 1; | ||
132 | |||
133 | /* restore the LCR */ | ||
134 | out_8(&port->lcr, old_lcr); | ||
135 | |||
136 | /* calculate speed */ | ||
137 | speed = (clock / prescaler) / (divisor * 16); | ||
138 | |||
139 | /* sanity check */ | ||
140 | if (speed < 0 || speed > (clock / 16)) | ||
141 | speed = 9600; | ||
142 | |||
143 | return speed; | ||
144 | } | ||
145 | |||
103 | #ifdef CONFIG_PPC_MAPLE | 146 | #ifdef CONFIG_PPC_MAPLE |
104 | void udbg_maple_real_putc(unsigned char c) | 147 | void udbg_maple_real_putc(unsigned char c) |
105 | { | 148 | { |
diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c index 93d4fbfdb724..a4815d316722 100644 --- a/arch/powerpc/mm/fault.c +++ b/arch/powerpc/mm/fault.c | |||
@@ -81,7 +81,8 @@ static int store_updates_sp(struct pt_regs *regs) | |||
81 | } | 81 | } |
82 | 82 | ||
83 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 83 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
84 | static void do_dabr(struct pt_regs *regs, unsigned long error_code) | 84 | static void do_dabr(struct pt_regs *regs, unsigned long address, |
85 | unsigned long error_code) | ||
85 | { | 86 | { |
86 | siginfo_t info; | 87 | siginfo_t info; |
87 | 88 | ||
@@ -99,7 +100,7 @@ static void do_dabr(struct pt_regs *regs, unsigned long error_code) | |||
99 | info.si_signo = SIGTRAP; | 100 | info.si_signo = SIGTRAP; |
100 | info.si_errno = 0; | 101 | info.si_errno = 0; |
101 | info.si_code = TRAP_HWBKPT; | 102 | info.si_code = TRAP_HWBKPT; |
102 | info.si_addr = (void __user *)regs->nip; | 103 | info.si_addr = (void __user *)address; |
103 | force_sig_info(SIGTRAP, &info, current); | 104 | force_sig_info(SIGTRAP, &info, current); |
104 | } | 105 | } |
105 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ | 106 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ |
@@ -159,7 +160,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address, | |||
159 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) | 160 | #if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE)) |
160 | if (error_code & DSISR_DABRMATCH) { | 161 | if (error_code & DSISR_DABRMATCH) { |
161 | /* DABR match */ | 162 | /* DABR match */ |
162 | do_dabr(regs, error_code); | 163 | do_dabr(regs, address, error_code); |
163 | return 0; | 164 | return 0; |
164 | } | 165 | } |
165 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ | 166 | #endif /* !(CONFIG_4xx || CONFIG_BOOKE)*/ |
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index a606504678bd..5bb433cbe41b 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c | |||
@@ -456,7 +456,7 @@ void __init htab_initialize(void) | |||
456 | 456 | ||
457 | /* create bolted the linear mapping in the hash table */ | 457 | /* create bolted the linear mapping in the hash table */ |
458 | for (i=0; i < lmb.memory.cnt; i++) { | 458 | for (i=0; i < lmb.memory.cnt; i++) { |
459 | base = lmb.memory.region[i].base + KERNELBASE; | 459 | base = (unsigned long)__va(lmb.memory.region[i].base); |
460 | size = lmb.memory.region[i].size; | 460 | size = lmb.memory.region[i].size; |
461 | 461 | ||
462 | DBG("creating mapping for region: %lx : %lx\n", base, size); | 462 | DBG("creating mapping for region: %lx : %lx\n", base, size); |
@@ -498,8 +498,8 @@ void __init htab_initialize(void) | |||
498 | * for either 4K or 16MB pages. | 498 | * for either 4K or 16MB pages. |
499 | */ | 499 | */ |
500 | if (tce_alloc_start) { | 500 | if (tce_alloc_start) { |
501 | tce_alloc_start += KERNELBASE; | 501 | tce_alloc_start = (unsigned long)__va(tce_alloc_start); |
502 | tce_alloc_end += KERNELBASE; | 502 | tce_alloc_end = (unsigned long)__va(tce_alloc_end); |
503 | 503 | ||
504 | if (base + size >= tce_alloc_start) | 504 | if (base + size >= tce_alloc_start) |
505 | tce_alloc_start = base + size + 1; | 505 | tce_alloc_start = base + size + 1; |
@@ -644,6 +644,7 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) | |||
644 | DBG_LOW(" -> rc=%d\n", rc); | 644 | DBG_LOW(" -> rc=%d\n", rc); |
645 | return rc; | 645 | return rc; |
646 | } | 646 | } |
647 | EXPORT_SYMBOL_GPL(hash_page); | ||
647 | 648 | ||
648 | void hash_preload(struct mm_struct *mm, unsigned long ea, | 649 | void hash_preload(struct mm_struct *mm, unsigned long ea, |
649 | unsigned long access, unsigned long trap) | 650 | unsigned long access, unsigned long trap) |
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 54131b877da3..b51bb28c054b 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c | |||
@@ -549,6 +549,17 @@ fail: | |||
549 | return addr; | 549 | return addr; |
550 | } | 550 | } |
551 | 551 | ||
552 | static int htlb_check_hinted_area(unsigned long addr, unsigned long len) | ||
553 | { | ||
554 | struct vm_area_struct *vma; | ||
555 | |||
556 | vma = find_vma(current->mm, addr); | ||
557 | if (!vma || ((addr + len) <= vma->vm_start)) | ||
558 | return 0; | ||
559 | |||
560 | return -ENOMEM; | ||
561 | } | ||
562 | |||
552 | static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) | 563 | static unsigned long htlb_get_low_area(unsigned long len, u16 segmask) |
553 | { | 564 | { |
554 | unsigned long addr = 0; | 565 | unsigned long addr = 0; |
@@ -618,15 +629,28 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
618 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) | 629 | if (!cpu_has_feature(CPU_FTR_16M_PAGE)) |
619 | return -EINVAL; | 630 | return -EINVAL; |
620 | 631 | ||
632 | /* Paranoia, caller should have dealt with this */ | ||
633 | BUG_ON((addr + len) < addr); | ||
634 | |||
621 | if (test_thread_flag(TIF_32BIT)) { | 635 | if (test_thread_flag(TIF_32BIT)) { |
636 | /* Paranoia, caller should have dealt with this */ | ||
637 | BUG_ON((addr + len) > 0x100000000UL); | ||
638 | |||
622 | curareas = current->mm->context.low_htlb_areas; | 639 | curareas = current->mm->context.low_htlb_areas; |
623 | 640 | ||
624 | /* First see if we can do the mapping in the existing | 641 | /* First see if we can use the hint address */ |
625 | * low areas */ | 642 | if (addr && (htlb_check_hinted_area(addr, len) == 0)) { |
643 | areamask = LOW_ESID_MASK(addr, len); | ||
644 | if (open_low_hpage_areas(current->mm, areamask) == 0) | ||
645 | return addr; | ||
646 | } | ||
647 | |||
648 | /* Next see if we can map in the existing low areas */ | ||
626 | addr = htlb_get_low_area(len, curareas); | 649 | addr = htlb_get_low_area(len, curareas); |
627 | if (addr != -ENOMEM) | 650 | if (addr != -ENOMEM) |
628 | return addr; | 651 | return addr; |
629 | 652 | ||
653 | /* Finally go looking for areas to open */ | ||
630 | lastshift = 0; | 654 | lastshift = 0; |
631 | for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); | 655 | for (areamask = LOW_ESID_MASK(0x100000000UL-len, len); |
632 | ! lastshift; areamask >>=1) { | 656 | ! lastshift; areamask >>=1) { |
@@ -641,12 +665,22 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, | |||
641 | } else { | 665 | } else { |
642 | curareas = current->mm->context.high_htlb_areas; | 666 | curareas = current->mm->context.high_htlb_areas; |
643 | 667 | ||
644 | /* First see if we can do the mapping in the existing | 668 | /* First see if we can use the hint address */ |
645 | * high areas */ | 669 | /* We discourage 64-bit processes from doing hugepage |
670 | * mappings below 4GB (must use MAP_FIXED) */ | ||
671 | if ((addr >= 0x100000000UL) | ||
672 | && (htlb_check_hinted_area(addr, len) == 0)) { | ||
673 | areamask = HTLB_AREA_MASK(addr, len); | ||
674 | if (open_high_hpage_areas(current->mm, areamask) == 0) | ||
675 | return addr; | ||
676 | } | ||
677 | |||
678 | /* Next see if we can map in the existing high areas */ | ||
646 | addr = htlb_get_high_area(len, curareas); | 679 | addr = htlb_get_high_area(len, curareas); |
647 | if (addr != -ENOMEM) | 680 | if (addr != -ENOMEM) |
648 | return addr; | 681 | return addr; |
649 | 682 | ||
683 | /* Finally go looking for areas to open */ | ||
650 | lastshift = 0; | 684 | lastshift = 0; |
651 | for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); | 685 | for (areamask = HTLB_AREA_MASK(TASK_SIZE_USER64-len, len); |
652 | ! lastshift; areamask >>=1) { | 686 | ! lastshift; areamask >>=1) { |
diff --git a/arch/powerpc/mm/imalloc.c b/arch/powerpc/mm/imalloc.c index f9587bcc6a48..8b0c132bc163 100644 --- a/arch/powerpc/mm/imalloc.c +++ b/arch/powerpc/mm/imalloc.c | |||
@@ -107,6 +107,7 @@ static int im_region_status(unsigned long v_addr, unsigned long size, | |||
107 | if (v_addr < (unsigned long) tmp->addr + tmp->size) | 107 | if (v_addr < (unsigned long) tmp->addr + tmp->size) |
108 | break; | 108 | break; |
109 | 109 | ||
110 | *vm = NULL; | ||
110 | if (tmp) { | 111 | if (tmp) { |
111 | if (im_region_overlaps(v_addr, size, tmp)) | 112 | if (im_region_overlaps(v_addr, size, tmp)) |
112 | return IM_REGION_OVERLAP; | 113 | return IM_REGION_OVERLAP; |
@@ -127,7 +128,6 @@ static int im_region_status(unsigned long v_addr, unsigned long size, | |||
127 | } | 128 | } |
128 | } | 129 | } |
129 | 130 | ||
130 | *vm = NULL; | ||
131 | return IM_REGION_UNUSED; | 131 | return IM_REGION_UNUSED; |
132 | } | 132 | } |
133 | 133 | ||
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 7d4b8b5f0606..7d0d75c11848 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c | |||
@@ -188,6 +188,11 @@ void __init MMU_init(void) | |||
188 | 188 | ||
189 | if (ppc_md.progress) | 189 | if (ppc_md.progress) |
190 | ppc_md.progress("MMU:exit", 0x211); | 190 | ppc_md.progress("MMU:exit", 0x211); |
191 | |||
192 | /* From now on, btext is no longer BAT mapped if it was at all */ | ||
193 | #ifdef CONFIG_BOOTX_TEXT | ||
194 | btext_unmap(); | ||
195 | #endif | ||
191 | } | 196 | } |
192 | 197 | ||
193 | /* This is only called until mem_init is done. */ | 198 | /* This is only called until mem_init is done. */ |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index ed6ed2e30dac..15aac0d78dfa 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -114,19 +114,18 @@ void online_page(struct page *page) | |||
114 | num_physpages++; | 114 | num_physpages++; |
115 | } | 115 | } |
116 | 116 | ||
117 | /* | ||
118 | * This works only for the non-NUMA case. Later, we'll need a lookup | ||
119 | * to convert from real physical addresses to nid, that doesn't use | ||
120 | * pfn_to_nid(). | ||
121 | */ | ||
122 | int __devinit add_memory(u64 start, u64 size) | 117 | int __devinit add_memory(u64 start, u64 size) |
123 | { | 118 | { |
124 | struct pglist_data *pgdata = NODE_DATA(0); | 119 | struct pglist_data *pgdata; |
125 | struct zone *zone; | 120 | struct zone *zone; |
121 | int nid; | ||
126 | unsigned long start_pfn = start >> PAGE_SHIFT; | 122 | unsigned long start_pfn = start >> PAGE_SHIFT; |
127 | unsigned long nr_pages = size >> PAGE_SHIFT; | 123 | unsigned long nr_pages = size >> PAGE_SHIFT; |
128 | 124 | ||
129 | start += KERNELBASE; | 125 | nid = hot_add_scn_to_nid(start); |
126 | pgdata = NODE_DATA(nid); | ||
127 | |||
128 | start = __va(start); | ||
130 | create_section_mapping(start, start + size); | 129 | create_section_mapping(start, start + size); |
131 | 130 | ||
132 | /* this should work for most non-highmem platforms */ | 131 | /* this should work for most non-highmem platforms */ |
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index ba7a3055a9fc..2863a912bcd0 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -37,6 +37,7 @@ EXPORT_SYMBOL(node_data); | |||
37 | 37 | ||
38 | static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; | 38 | static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; |
39 | static int min_common_depth; | 39 | static int min_common_depth; |
40 | static int n_mem_addr_cells, n_mem_size_cells; | ||
40 | 41 | ||
41 | /* | 42 | /* |
42 | * We need somewhere to store start/end/node for each region until we have | 43 | * We need somewhere to store start/end/node for each region until we have |
@@ -254,32 +255,20 @@ static int __init find_min_common_depth(void) | |||
254 | return depth; | 255 | return depth; |
255 | } | 256 | } |
256 | 257 | ||
257 | static int __init get_mem_addr_cells(void) | 258 | static void __init get_n_mem_cells(int *n_addr_cells, int *n_size_cells) |
258 | { | 259 | { |
259 | struct device_node *memory = NULL; | 260 | struct device_node *memory = NULL; |
260 | int rc; | ||
261 | 261 | ||
262 | memory = of_find_node_by_type(memory, "memory"); | 262 | memory = of_find_node_by_type(memory, "memory"); |
263 | if (!memory) | 263 | if (!memory) |
264 | return 0; /* it won't matter */ | 264 | panic("numa.c: No memory nodes found!"); |
265 | 265 | ||
266 | rc = prom_n_addr_cells(memory); | 266 | *n_addr_cells = prom_n_addr_cells(memory); |
267 | return rc; | 267 | *n_size_cells = prom_n_size_cells(memory); |
268 | of_node_put(memory); | ||
268 | } | 269 | } |
269 | 270 | ||
270 | static int __init get_mem_size_cells(void) | 271 | static unsigned long __devinit read_n_cells(int n, unsigned int **buf) |
271 | { | ||
272 | struct device_node *memory = NULL; | ||
273 | int rc; | ||
274 | |||
275 | memory = of_find_node_by_type(memory, "memory"); | ||
276 | if (!memory) | ||
277 | return 0; /* it won't matter */ | ||
278 | rc = prom_n_size_cells(memory); | ||
279 | return rc; | ||
280 | } | ||
281 | |||
282 | static unsigned long __init read_n_cells(int n, unsigned int **buf) | ||
283 | { | 272 | { |
284 | unsigned long result = 0; | 273 | unsigned long result = 0; |
285 | 274 | ||
@@ -386,7 +375,6 @@ static int __init parse_numa_properties(void) | |||
386 | { | 375 | { |
387 | struct device_node *cpu = NULL; | 376 | struct device_node *cpu = NULL; |
388 | struct device_node *memory = NULL; | 377 | struct device_node *memory = NULL; |
389 | int addr_cells, size_cells; | ||
390 | int max_domain; | 378 | int max_domain; |
391 | unsigned long i; | 379 | unsigned long i; |
392 | 380 | ||
@@ -425,8 +413,7 @@ static int __init parse_numa_properties(void) | |||
425 | } | 413 | } |
426 | } | 414 | } |
427 | 415 | ||
428 | addr_cells = get_mem_addr_cells(); | 416 | get_n_mem_cells(&n_mem_addr_cells, &n_mem_size_cells); |
429 | size_cells = get_mem_size_cells(); | ||
430 | memory = NULL; | 417 | memory = NULL; |
431 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | 418 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { |
432 | unsigned long start; | 419 | unsigned long start; |
@@ -436,15 +423,21 @@ static int __init parse_numa_properties(void) | |||
436 | unsigned int *memcell_buf; | 423 | unsigned int *memcell_buf; |
437 | unsigned int len; | 424 | unsigned int len; |
438 | 425 | ||
439 | memcell_buf = (unsigned int *)get_property(memory, "reg", &len); | 426 | memcell_buf = (unsigned int *)get_property(memory, |
427 | "linux,usable-memory", &len); | ||
428 | if (!memcell_buf || len <= 0) | ||
429 | memcell_buf = | ||
430 | (unsigned int *)get_property(memory, "reg", | ||
431 | &len); | ||
440 | if (!memcell_buf || len <= 0) | 432 | if (!memcell_buf || len <= 0) |
441 | continue; | 433 | continue; |
442 | 434 | ||
443 | ranges = memory->n_addrs; | 435 | /* ranges in cell */ |
436 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | ||
444 | new_range: | 437 | new_range: |
445 | /* these are order-sensitive, and modify the buffer pointer */ | 438 | /* these are order-sensitive, and modify the buffer pointer */ |
446 | start = read_n_cells(addr_cells, &memcell_buf); | 439 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); |
447 | size = read_n_cells(size_cells, &memcell_buf); | 440 | size = read_n_cells(n_mem_size_cells, &memcell_buf); |
448 | 441 | ||
449 | numa_domain = of_node_numa_domain(memory); | 442 | numa_domain = of_node_numa_domain(memory); |
450 | 443 | ||
@@ -497,7 +490,41 @@ static void __init setup_nonnuma(void) | |||
497 | node_set_online(0); | 490 | node_set_online(0); |
498 | } | 491 | } |
499 | 492 | ||
500 | static void __init dump_numa_topology(void) | 493 | void __init dump_numa_cpu_topology(void) |
494 | { | ||
495 | unsigned int node; | ||
496 | unsigned int cpu, count; | ||
497 | |||
498 | if (min_common_depth == -1 || !numa_enabled) | ||
499 | return; | ||
500 | |||
501 | for_each_online_node(node) { | ||
502 | printk(KERN_INFO "Node %d CPUs:", node); | ||
503 | |||
504 | count = 0; | ||
505 | /* | ||
506 | * If we used a CPU iterator here we would miss printing | ||
507 | * the holes in the cpumap. | ||
508 | */ | ||
509 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
510 | if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { | ||
511 | if (count == 0) | ||
512 | printk(" %u", cpu); | ||
513 | ++count; | ||
514 | } else { | ||
515 | if (count > 1) | ||
516 | printk("-%u", cpu - 1); | ||
517 | count = 0; | ||
518 | } | ||
519 | } | ||
520 | |||
521 | if (count > 1) | ||
522 | printk("-%u", NR_CPUS - 1); | ||
523 | printk("\n"); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | static void __init dump_numa_memory_topology(void) | ||
501 | { | 528 | { |
502 | unsigned int node; | 529 | unsigned int node; |
503 | unsigned int count; | 530 | unsigned int count; |
@@ -529,7 +556,6 @@ static void __init dump_numa_topology(void) | |||
529 | printk("-0x%lx", i); | 556 | printk("-0x%lx", i); |
530 | printk("\n"); | 557 | printk("\n"); |
531 | } | 558 | } |
532 | return; | ||
533 | } | 559 | } |
534 | 560 | ||
535 | /* | 561 | /* |
@@ -591,7 +617,7 @@ void __init do_init_bootmem(void) | |||
591 | if (parse_numa_properties()) | 617 | if (parse_numa_properties()) |
592 | setup_nonnuma(); | 618 | setup_nonnuma(); |
593 | else | 619 | else |
594 | dump_numa_topology(); | 620 | dump_numa_memory_topology(); |
595 | 621 | ||
596 | register_cpu_notifier(&ppc64_numa_nb); | 622 | register_cpu_notifier(&ppc64_numa_nb); |
597 | 623 | ||
@@ -730,3 +756,60 @@ static int __init early_numa(char *p) | |||
730 | return 0; | 756 | return 0; |
731 | } | 757 | } |
732 | early_param("numa", early_numa); | 758 | early_param("numa", early_numa); |
759 | |||
760 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
761 | /* | ||
762 | * Find the node associated with a hot added memory section. Section | ||
763 | * corresponds to a SPARSEMEM section, not an LMB. It is assumed that | ||
764 | * sections are fully contained within a single LMB. | ||
765 | */ | ||
766 | int hot_add_scn_to_nid(unsigned long scn_addr) | ||
767 | { | ||
768 | struct device_node *memory = NULL; | ||
769 | nodemask_t nodes; | ||
770 | int numa_domain = 0; | ||
771 | |||
772 | if (!numa_enabled || (min_common_depth < 0)) | ||
773 | return numa_domain; | ||
774 | |||
775 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | ||
776 | unsigned long start, size; | ||
777 | int ranges; | ||
778 | unsigned int *memcell_buf; | ||
779 | unsigned int len; | ||
780 | |||
781 | memcell_buf = (unsigned int *)get_property(memory, "reg", &len); | ||
782 | if (!memcell_buf || len <= 0) | ||
783 | continue; | ||
784 | |||
785 | /* ranges in cell */ | ||
786 | ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells); | ||
787 | ha_new_range: | ||
788 | start = read_n_cells(n_mem_addr_cells, &memcell_buf); | ||
789 | size = read_n_cells(n_mem_size_cells, &memcell_buf); | ||
790 | numa_domain = of_node_numa_domain(memory); | ||
791 | |||
792 | /* Domains not present at boot default to 0 */ | ||
793 | if (!node_online(numa_domain)) | ||
794 | numa_domain = any_online_node(NODE_MASK_ALL); | ||
795 | |||
796 | if ((scn_addr >= start) && (scn_addr < (start + size))) { | ||
797 | of_node_put(memory); | ||
798 | goto got_numa_domain; | ||
799 | } | ||
800 | |||
801 | if (--ranges) /* process all ranges in cell */ | ||
802 | goto ha_new_range; | ||
803 | } | ||
804 | BUG(); /* section address should be found above */ | ||
805 | |||
806 | /* Temporary code to ensure that returned node is not empty */ | ||
807 | got_numa_domain: | ||
808 | nodes_setall(nodes); | ||
809 | while (NODE_DATA(numa_domain)->node_spanned_pages == 0) { | ||
810 | node_clear(numa_domain, nodes); | ||
811 | numa_domain = any_online_node(nodes); | ||
812 | } | ||
813 | return numa_domain; | ||
814 | } | ||
815 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
diff --git a/arch/powerpc/mm/slb.c b/arch/powerpc/mm/slb.c index 60e852f2f8e5..ffc8ed4de62d 100644 --- a/arch/powerpc/mm/slb.c +++ b/arch/powerpc/mm/slb.c | |||
@@ -75,7 +75,7 @@ static void slb_flush_and_rebolt(void) | |||
75 | vflags = SLB_VSID_KERNEL | virtual_llp; | 75 | vflags = SLB_VSID_KERNEL | virtual_llp; |
76 | 76 | ||
77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); | 77 | ksp_esid_data = mk_esid_data(get_paca()->kstack, 2); |
78 | if ((ksp_esid_data & ESID_MASK) == KERNELBASE) | 78 | if ((ksp_esid_data & ESID_MASK) == PAGE_OFFSET) |
79 | ksp_esid_data &= ~SLB_ESID_V; | 79 | ksp_esid_data &= ~SLB_ESID_V; |
80 | 80 | ||
81 | /* We need to do this all in asm, so we're sure we don't touch | 81 | /* We need to do this all in asm, so we're sure we don't touch |
@@ -87,8 +87,8 @@ static void slb_flush_and_rebolt(void) | |||
87 | /* Slot 2 - kernel stack */ | 87 | /* Slot 2 - kernel stack */ |
88 | "slbmte %2,%3\n" | 88 | "slbmte %2,%3\n" |
89 | "isync" | 89 | "isync" |
90 | :: "r"(mk_vsid_data(VMALLOCBASE, vflags)), | 90 | :: "r"(mk_vsid_data(VMALLOC_START, vflags)), |
91 | "r"(mk_esid_data(VMALLOCBASE, 1)), | 91 | "r"(mk_esid_data(VMALLOC_START, 1)), |
92 | "r"(mk_vsid_data(ksp_esid_data, lflags)), | 92 | "r"(mk_vsid_data(ksp_esid_data, lflags)), |
93 | "r"(ksp_esid_data) | 93 | "r"(ksp_esid_data) |
94 | : "memory"); | 94 | : "memory"); |
@@ -134,14 +134,14 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
134 | else | 134 | else |
135 | unmapped_base = TASK_UNMAPPED_BASE_USER64; | 135 | unmapped_base = TASK_UNMAPPED_BASE_USER64; |
136 | 136 | ||
137 | if (pc >= KERNELBASE) | 137 | if (is_kernel_addr(pc)) |
138 | return; | 138 | return; |
139 | slb_allocate(pc); | 139 | slb_allocate(pc); |
140 | 140 | ||
141 | if (GET_ESID(pc) == GET_ESID(stack)) | 141 | if (GET_ESID(pc) == GET_ESID(stack)) |
142 | return; | 142 | return; |
143 | 143 | ||
144 | if (stack >= KERNELBASE) | 144 | if (is_kernel_addr(stack)) |
145 | return; | 145 | return; |
146 | slb_allocate(stack); | 146 | slb_allocate(stack); |
147 | 147 | ||
@@ -149,7 +149,7 @@ void switch_slb(struct task_struct *tsk, struct mm_struct *mm) | |||
149 | || (GET_ESID(stack) == GET_ESID(unmapped_base))) | 149 | || (GET_ESID(stack) == GET_ESID(unmapped_base))) |
150 | return; | 150 | return; |
151 | 151 | ||
152 | if (unmapped_base >= KERNELBASE) | 152 | if (is_kernel_addr(unmapped_base)) |
153 | return; | 153 | return; |
154 | slb_allocate(unmapped_base); | 154 | slb_allocate(unmapped_base); |
155 | } | 155 | } |
@@ -213,10 +213,10 @@ void slb_initialize(void) | |||
213 | asm volatile("isync":::"memory"); | 213 | asm volatile("isync":::"memory"); |
214 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); | 214 | asm volatile("slbmte %0,%0"::"r" (0) : "memory"); |
215 | asm volatile("isync; slbia; isync":::"memory"); | 215 | asm volatile("isync; slbia; isync":::"memory"); |
216 | create_slbe(KERNELBASE, lflags, 0); | 216 | create_slbe(PAGE_OFFSET, lflags, 0); |
217 | 217 | ||
218 | /* VMALLOC space has 4K pages always for now */ | 218 | /* VMALLOC space has 4K pages always for now */ |
219 | create_slbe(VMALLOCBASE, vflags, 1); | 219 | create_slbe(VMALLOC_START, vflags, 1); |
220 | 220 | ||
221 | /* We don't bolt the stack for the time being - we're in boot, | 221 | /* We don't bolt the stack for the time being - we're in boot, |
222 | * so the stack is in the bolted segment. By the time it goes | 222 | * so the stack is in the bolted segment. By the time it goes |
diff --git a/arch/powerpc/mm/slb_low.S b/arch/powerpc/mm/slb_low.S index 950ffc5848c7..d1acee38f163 100644 --- a/arch/powerpc/mm/slb_low.S +++ b/arch/powerpc/mm/slb_low.S | |||
@@ -37,9 +37,9 @@ _GLOBAL(slb_allocate_realmode) | |||
37 | 37 | ||
38 | srdi r9,r3,60 /* get region */ | 38 | srdi r9,r3,60 /* get region */ |
39 | srdi r10,r3,28 /* get esid */ | 39 | srdi r10,r3,28 /* get esid */ |
40 | cmpldi cr7,r9,0xc /* cmp KERNELBASE for later use */ | 40 | cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ |
41 | 41 | ||
42 | /* r3 = address, r10 = esid, cr7 = <>KERNELBASE */ | 42 | /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ |
43 | blt cr7,0f /* user or kernel? */ | 43 | blt cr7,0f /* user or kernel? */ |
44 | 44 | ||
45 | /* kernel address: proto-VSID = ESID */ | 45 | /* kernel address: proto-VSID = ESID */ |
@@ -166,7 +166,7 @@ _GLOBAL(slb_allocate_user) | |||
166 | /* | 166 | /* |
167 | * Finish loading of an SLB entry and return | 167 | * Finish loading of an SLB entry and return |
168 | * | 168 | * |
169 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <>KERNELBASE | 169 | * r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET |
170 | */ | 170 | */ |
171 | slb_finish_load: | 171 | slb_finish_load: |
172 | ASM_VSID_SCRAMBLE(r10,r9) | 172 | ASM_VSID_SCRAMBLE(r10,r9) |
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 51e7951414e5..82e4951826bc 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c | |||
@@ -40,7 +40,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) | |||
40 | unsigned long entry, group, old_esid, castout_entry, i; | 40 | unsigned long entry, group, old_esid, castout_entry, i; |
41 | unsigned int global_entry; | 41 | unsigned int global_entry; |
42 | struct stab_entry *ste, *castout_ste; | 42 | struct stab_entry *ste, *castout_ste; |
43 | unsigned long kernel_segment = (esid << SID_SHIFT) >= KERNELBASE; | 43 | unsigned long kernel_segment = (esid << SID_SHIFT) >= PAGE_OFFSET; |
44 | 44 | ||
45 | vsid_data = vsid << STE_VSID_SHIFT; | 45 | vsid_data = vsid << STE_VSID_SHIFT; |
46 | esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; | 46 | esid_data = esid << SID_SHIFT | STE_ESID_KP | STE_ESID_V; |
@@ -83,7 +83,7 @@ static int make_ste(unsigned long stab, unsigned long esid, unsigned long vsid) | |||
83 | } | 83 | } |
84 | 84 | ||
85 | /* Dont cast out the first kernel segment */ | 85 | /* Dont cast out the first kernel segment */ |
86 | if ((castout_ste->esid_data & ESID_MASK) != KERNELBASE) | 86 | if ((castout_ste->esid_data & ESID_MASK) != PAGE_OFFSET) |
87 | break; | 87 | break; |
88 | 88 | ||
89 | castout_entry = (castout_entry + 1) & 0xf; | 89 | castout_entry = (castout_entry + 1) & 0xf; |
@@ -122,7 +122,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm) | |||
122 | unsigned long offset; | 122 | unsigned long offset; |
123 | 123 | ||
124 | /* Kernel or user address? */ | 124 | /* Kernel or user address? */ |
125 | if (ea >= KERNELBASE) { | 125 | if (is_kernel_addr(ea)) { |
126 | vsid = get_kernel_vsid(ea); | 126 | vsid = get_kernel_vsid(ea); |
127 | } else { | 127 | } else { |
128 | if ((ea >= TASK_SIZE_USER64) || (! mm)) | 128 | if ((ea >= TASK_SIZE_USER64) || (! mm)) |
@@ -133,7 +133,7 @@ static int __ste_allocate(unsigned long ea, struct mm_struct *mm) | |||
133 | 133 | ||
134 | stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); | 134 | stab_entry = make_ste(get_paca()->stab_addr, GET_ESID(ea), vsid); |
135 | 135 | ||
136 | if (ea < KERNELBASE) { | 136 | if (!is_kernel_addr(ea)) { |
137 | offset = __get_cpu_var(stab_cache_ptr); | 137 | offset = __get_cpu_var(stab_cache_ptr); |
138 | if (offset < NR_STAB_CACHE_ENTRIES) | 138 | if (offset < NR_STAB_CACHE_ENTRIES) |
139 | __get_cpu_var(stab_cache[offset++]) = stab_entry; | 139 | __get_cpu_var(stab_cache[offset++]) = stab_entry; |
@@ -190,7 +190,7 @@ void switch_stab(struct task_struct *tsk, struct mm_struct *mm) | |||
190 | entry++, ste++) { | 190 | entry++, ste++) { |
191 | unsigned long ea; | 191 | unsigned long ea; |
192 | ea = ste->esid_data & ESID_MASK; | 192 | ea = ste->esid_data & ESID_MASK; |
193 | if (ea < KERNELBASE) { | 193 | if (!is_kernel_addr(ea)) { |
194 | ste->esid_data = 0; | 194 | ste->esid_data = 0; |
195 | } | 195 | } |
196 | } | 196 | } |
@@ -251,7 +251,7 @@ void stabs_alloc(void) | |||
251 | panic("Unable to allocate segment table for CPU %d.\n", | 251 | panic("Unable to allocate segment table for CPU %d.\n", |
252 | cpu); | 252 | cpu); |
253 | 253 | ||
254 | newstab += KERNELBASE; | 254 | newstab = (unsigned long)__va(newstab); |
255 | 255 | ||
256 | memset((void *)newstab, 0, HW_PAGE_SIZE); | 256 | memset((void *)newstab, 0, HW_PAGE_SIZE); |
257 | 257 | ||
@@ -270,11 +270,11 @@ void stabs_alloc(void) | |||
270 | */ | 270 | */ |
271 | void stab_initialize(unsigned long stab) | 271 | void stab_initialize(unsigned long stab) |
272 | { | 272 | { |
273 | unsigned long vsid = get_kernel_vsid(KERNELBASE); | 273 | unsigned long vsid = get_kernel_vsid(PAGE_OFFSET); |
274 | unsigned long stabreal; | 274 | unsigned long stabreal; |
275 | 275 | ||
276 | asm volatile("isync; slbia; isync":::"memory"); | 276 | asm volatile("isync; slbia; isync":::"memory"); |
277 | make_ste(stab, GET_ESID(KERNELBASE), vsid); | 277 | make_ste(stab, GET_ESID(PAGE_OFFSET), vsid); |
278 | 278 | ||
279 | /* Order update */ | 279 | /* Order update */ |
280 | asm volatile("sync":::"memory"); | 280 | asm volatile("sync":::"memory"); |
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c index 859d29a0cac5..bb3afb6e6317 100644 --- a/arch/powerpc/mm/tlb_64.c +++ b/arch/powerpc/mm/tlb_64.c | |||
@@ -168,7 +168,7 @@ void hpte_update(struct mm_struct *mm, unsigned long addr, | |||
168 | batch->mm = mm; | 168 | batch->mm = mm; |
169 | batch->psize = psize; | 169 | batch->psize = psize; |
170 | } | 170 | } |
171 | if (addr < KERNELBASE) { | 171 | if (!is_kernel_addr(addr)) { |
172 | vsid = get_vsid(mm->context.id, addr); | 172 | vsid = get_vsid(mm->context.id, addr); |
173 | WARN_ON(vsid == 0); | 173 | WARN_ON(vsid == 0); |
174 | } else | 174 | } else |
diff --git a/arch/powerpc/oprofile/Makefile b/arch/powerpc/oprofile/Makefile index 0782d0cca89c..554cd7c75321 100644 --- a/arch/powerpc/oprofile/Makefile +++ b/arch/powerpc/oprofile/Makefile | |||
@@ -9,3 +9,4 @@ DRIVER_OBJS := $(addprefix ../../../drivers/oprofile/, \ | |||
9 | oprofile-y := $(DRIVER_OBJS) common.o | 9 | oprofile-y := $(DRIVER_OBJS) common.o |
10 | oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o | 10 | oprofile-$(CONFIG_PPC64) += op_model_rs64.o op_model_power4.o |
11 | oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o | 11 | oprofile-$(CONFIG_FSL_BOOKE) += op_model_fsl_booke.o |
12 | oprofile-$(CONFIG_PPC32) += op_model_7450.o | ||
diff --git a/arch/powerpc/oprofile/common.c b/arch/powerpc/oprofile/common.c index af2c05d20ba5..71615eb70b2b 100644 --- a/arch/powerpc/oprofile/common.c +++ b/arch/powerpc/oprofile/common.c | |||
@@ -14,9 +14,6 @@ | |||
14 | */ | 14 | */ |
15 | 15 | ||
16 | #include <linux/oprofile.h> | 16 | #include <linux/oprofile.h> |
17 | #ifndef __powerpc64__ | ||
18 | #include <linux/slab.h> | ||
19 | #endif /* ! __powerpc64__ */ | ||
20 | #include <linux/init.h> | 17 | #include <linux/init.h> |
21 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
22 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
@@ -31,10 +28,6 @@ static struct op_powerpc_model *model; | |||
31 | static struct op_counter_config ctr[OP_MAX_COUNTER]; | 28 | static struct op_counter_config ctr[OP_MAX_COUNTER]; |
32 | static struct op_system_config sys; | 29 | static struct op_system_config sys; |
33 | 30 | ||
34 | #ifndef __powerpc64__ | ||
35 | static char *cpu_type; | ||
36 | #endif /* ! __powerpc64__ */ | ||
37 | |||
38 | static void op_handle_interrupt(struct pt_regs *regs) | 31 | static void op_handle_interrupt(struct pt_regs *regs) |
39 | { | 32 | { |
40 | model->handle_interrupt(regs, ctr); | 33 | model->handle_interrupt(regs, ctr); |
@@ -53,14 +46,7 @@ static int op_powerpc_setup(void) | |||
53 | model->reg_setup(ctr, &sys, model->num_counters); | 46 | model->reg_setup(ctr, &sys, model->num_counters); |
54 | 47 | ||
55 | /* Configure the registers on all cpus. */ | 48 | /* Configure the registers on all cpus. */ |
56 | #ifdef __powerpc64__ | ||
57 | on_each_cpu(model->cpu_setup, NULL, 0, 1); | 49 | on_each_cpu(model->cpu_setup, NULL, 0, 1); |
58 | #else /* __powerpc64__ */ | ||
59 | #if 0 | ||
60 | /* FIXME: Make multi-cpu work */ | ||
61 | on_each_cpu(model->reg_setup, NULL, 0, 1); | ||
62 | #endif | ||
63 | #endif /* __powerpc64__ */ | ||
64 | 50 | ||
65 | return 0; | 51 | return 0; |
66 | } | 52 | } |
@@ -95,7 +81,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) | |||
95 | { | 81 | { |
96 | int i; | 82 | int i; |
97 | 83 | ||
98 | #ifdef __powerpc64__ | 84 | #ifdef CONFIG_PPC64 |
99 | /* | 85 | /* |
100 | * There is one mmcr0, mmcr1 and mmcra for setting the events for | 86 | * There is one mmcr0, mmcr1 and mmcra for setting the events for |
101 | * all of the counters. | 87 | * all of the counters. |
@@ -103,7 +89,7 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) | |||
103 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); | 89 | oprofilefs_create_ulong(sb, root, "mmcr0", &sys.mmcr0); |
104 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); | 90 | oprofilefs_create_ulong(sb, root, "mmcr1", &sys.mmcr1); |
105 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); | 91 | oprofilefs_create_ulong(sb, root, "mmcra", &sys.mmcra); |
106 | #endif /* __powerpc64__ */ | 92 | #endif |
107 | 93 | ||
108 | for (i = 0; i < model->num_counters; ++i) { | 94 | for (i = 0; i < model->num_counters; ++i) { |
109 | struct dentry *dir; | 95 | struct dentry *dir; |
@@ -115,65 +101,68 @@ static int op_powerpc_create_files(struct super_block *sb, struct dentry *root) | |||
115 | oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); | 101 | oprofilefs_create_ulong(sb, dir, "enabled", &ctr[i].enabled); |
116 | oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); | 102 | oprofilefs_create_ulong(sb, dir, "event", &ctr[i].event); |
117 | oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); | 103 | oprofilefs_create_ulong(sb, dir, "count", &ctr[i].count); |
118 | #ifdef __powerpc64__ | 104 | |
119 | /* | 105 | /* |
120 | * We dont support per counter user/kernel selection, but | 106 | * Classic PowerPC doesn't support per-counter |
121 | * we leave the entries because userspace expects them | 107 | * control like this, but the options are |
108 | * expected, so they remain. For Freescale | ||
109 | * Book-E style performance monitors, we do | ||
110 | * support them. | ||
122 | */ | 111 | */ |
123 | #endif /* __powerpc64__ */ | ||
124 | oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); | 112 | oprofilefs_create_ulong(sb, dir, "kernel", &ctr[i].kernel); |
125 | oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); | 113 | oprofilefs_create_ulong(sb, dir, "user", &ctr[i].user); |
126 | 114 | ||
127 | #ifndef __powerpc64__ | ||
128 | /* FIXME: Not sure if this is used */ | ||
129 | #endif /* ! __powerpc64__ */ | ||
130 | oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); | 115 | oprofilefs_create_ulong(sb, dir, "unit_mask", &ctr[i].unit_mask); |
131 | } | 116 | } |
132 | 117 | ||
133 | oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); | 118 | oprofilefs_create_ulong(sb, root, "enable_kernel", &sys.enable_kernel); |
134 | oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); | 119 | oprofilefs_create_ulong(sb, root, "enable_user", &sys.enable_user); |
135 | #ifdef __powerpc64__ | 120 | #ifdef CONFIG_PPC64 |
136 | oprofilefs_create_ulong(sb, root, "backtrace_spinlocks", | 121 | oprofilefs_create_ulong(sb, root, "backtrace_spinlocks", |
137 | &sys.backtrace_spinlocks); | 122 | &sys.backtrace_spinlocks); |
138 | #endif /* __powerpc64__ */ | 123 | #endif |
139 | 124 | ||
140 | /* Default to tracing both kernel and user */ | 125 | /* Default to tracing both kernel and user */ |
141 | sys.enable_kernel = 1; | 126 | sys.enable_kernel = 1; |
142 | sys.enable_user = 1; | 127 | sys.enable_user = 1; |
143 | #ifdef __powerpc64__ | 128 | #ifdef CONFIG_PPC64 |
144 | /* Turn on backtracing through spinlocks by default */ | 129 | /* Turn on backtracing through spinlocks by default */ |
145 | sys.backtrace_spinlocks = 1; | 130 | sys.backtrace_spinlocks = 1; |
146 | #endif /* __powerpc64__ */ | 131 | #endif |
147 | 132 | ||
148 | return 0; | 133 | return 0; |
149 | } | 134 | } |
150 | 135 | ||
151 | int __init oprofile_arch_init(struct oprofile_operations *ops) | 136 | int __init oprofile_arch_init(struct oprofile_operations *ops) |
152 | { | 137 | { |
153 | #ifndef __powerpc64__ | 138 | if (!cur_cpu_spec->oprofile_cpu_type) |
154 | #ifdef CONFIG_FSL_BOOKE | 139 | return -ENODEV; |
155 | model = &op_model_fsl_booke; | 140 | |
141 | switch (cur_cpu_spec->oprofile_type) { | ||
142 | #ifdef CONFIG_PPC64 | ||
143 | case RS64: | ||
144 | model = &op_model_rs64; | ||
145 | break; | ||
146 | case POWER4: | ||
147 | model = &op_model_power4; | ||
148 | break; | ||
156 | #else | 149 | #else |
157 | return -ENODEV; | 150 | case G4: |
151 | model = &op_model_7450; | ||
152 | break; | ||
158 | #endif | 153 | #endif |
154 | #ifdef CONFIG_FSL_BOOKE | ||
155 | case BOOKE: | ||
156 | model = &op_model_fsl_booke; | ||
157 | break; | ||
158 | #endif | ||
159 | default: | ||
160 | return -ENODEV; | ||
161 | } | ||
159 | 162 | ||
160 | cpu_type = kmalloc(32, GFP_KERNEL); | ||
161 | if (NULL == cpu_type) | ||
162 | return -ENOMEM; | ||
163 | |||
164 | sprintf(cpu_type, "ppc/%s", cur_cpu_spec->cpu_name); | ||
165 | |||
166 | model->num_counters = cur_cpu_spec->num_pmcs; | ||
167 | |||
168 | ops->cpu_type = cpu_type; | ||
169 | #else /* __powerpc64__ */ | ||
170 | if (!cur_cpu_spec->oprofile_model || !cur_cpu_spec->oprofile_cpu_type) | ||
171 | return -ENODEV; | ||
172 | model = cur_cpu_spec->oprofile_model; | ||
173 | model->num_counters = cur_cpu_spec->num_pmcs; | 163 | model->num_counters = cur_cpu_spec->num_pmcs; |
174 | 164 | ||
175 | ops->cpu_type = cur_cpu_spec->oprofile_cpu_type; | 165 | ops->cpu_type = cur_cpu_spec->oprofile_cpu_type; |
176 | #endif /* __powerpc64__ */ | ||
177 | ops->create_files = op_powerpc_create_files; | 166 | ops->create_files = op_powerpc_create_files; |
178 | ops->setup = op_powerpc_setup; | 167 | ops->setup = op_powerpc_setup; |
179 | ops->shutdown = op_powerpc_shutdown; | 168 | ops->shutdown = op_powerpc_shutdown; |
@@ -188,8 +177,4 @@ int __init oprofile_arch_init(struct oprofile_operations *ops) | |||
188 | 177 | ||
189 | void oprofile_arch_exit(void) | 178 | void oprofile_arch_exit(void) |
190 | { | 179 | { |
191 | #ifndef __powerpc64__ | ||
192 | kfree(cpu_type); | ||
193 | cpu_type = NULL; | ||
194 | #endif /* ! __powerpc64__ */ | ||
195 | } | 180 | } |
diff --git a/arch/powerpc/oprofile/op_model_7450.c b/arch/powerpc/oprofile/op_model_7450.c new file mode 100644 index 000000000000..32abfdbb0eb1 --- /dev/null +++ b/arch/powerpc/oprofile/op_model_7450.c | |||
@@ -0,0 +1,206 @@ | |||
1 | /* | ||
2 | * oprofile/op_model_7450.c | ||
3 | * | ||
4 | * Freescale 745x/744x oprofile support, based on fsl_booke support | ||
5 | * Copyright (C) 2004 Anton Blanchard <anton@au.ibm.com>, IBM | ||
6 | * | ||
7 | * Copyright (c) 2004 Freescale Semiconductor, Inc | ||
8 | * | ||
9 | * Author: Andy Fleming | ||
10 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version | ||
15 | * 2 of the License, or (at your option) any later version. | ||
16 | */ | ||
17 | |||
18 | #include <linux/oprofile.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <asm/ptrace.h> | ||
22 | #include <asm/system.h> | ||
23 | #include <asm/processor.h> | ||
24 | #include <asm/cputable.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/pmc.h> | ||
27 | #include <asm/oprofile_impl.h> | ||
28 | |||
29 | static unsigned long reset_value[OP_MAX_COUNTER]; | ||
30 | |||
31 | static int oprofile_running; | ||
32 | static u32 mmcr0_val, mmcr1_val, mmcr2_val; | ||
33 | |||
34 | #define MMCR0_PMC1_SHIFT 6 | ||
35 | #define MMCR0_PMC2_SHIFT 0 | ||
36 | #define MMCR1_PMC3_SHIFT 27 | ||
37 | #define MMCR1_PMC4_SHIFT 22 | ||
38 | #define MMCR1_PMC5_SHIFT 17 | ||
39 | #define MMCR1_PMC6_SHIFT 11 | ||
40 | |||
41 | #define mmcr0_event1(event) \ | ||
42 | ((event << MMCR0_PMC1_SHIFT) & MMCR0_PMC1SEL) | ||
43 | #define mmcr0_event2(event) \ | ||
44 | ((event << MMCR0_PMC2_SHIFT) & MMCR0_PMC2SEL) | ||
45 | |||
46 | #define mmcr1_event3(event) \ | ||
47 | ((event << MMCR1_PMC3_SHIFT) & MMCR1_PMC3SEL) | ||
48 | #define mmcr1_event4(event) \ | ||
49 | ((event << MMCR1_PMC4_SHIFT) & MMCR1_PMC4SEL) | ||
50 | #define mmcr1_event5(event) \ | ||
51 | ((event << MMCR1_PMC5_SHIFT) & MMCR1_PMC5SEL) | ||
52 | #define mmcr1_event6(event) \ | ||
53 | ((event << MMCR1_PMC6_SHIFT) & MMCR1_PMC6SEL) | ||
54 | |||
55 | #define MMCR0_INIT (MMCR0_FC | MMCR0_FCS | MMCR0_FCP | MMCR0_FCM1 | MMCR0_FCM0) | ||
56 | |||
57 | /* Unfreezes the counters on this CPU, enables the interrupt, | ||
58 | * enables the counters to trigger the interrupt, and sets the | ||
59 | * counters to only count when the mark bit is not set. | ||
60 | */ | ||
61 | static void pmc_start_ctrs(void) | ||
62 | { | ||
63 | u32 mmcr0 = mfspr(SPRN_MMCR0); | ||
64 | |||
65 | mmcr0 &= ~(MMCR0_FC | MMCR0_FCM0); | ||
66 | mmcr0 |= (MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE); | ||
67 | |||
68 | mtspr(SPRN_MMCR0, mmcr0); | ||
69 | } | ||
70 | |||
71 | /* Disables the counters on this CPU, and freezes them */ | ||
72 | static void pmc_stop_ctrs(void) | ||
73 | { | ||
74 | u32 mmcr0 = mfspr(SPRN_MMCR0); | ||
75 | |||
76 | mmcr0 |= MMCR0_FC; | ||
77 | mmcr0 &= ~(MMCR0_FCECE | MMCR0_PMC1CE | MMCR0_PMCnCE | MMCR0_PMXE); | ||
78 | |||
79 | mtspr(SPRN_MMCR0, mmcr0); | ||
80 | } | ||
81 | |||
82 | /* Configures the counters on this CPU based on the global | ||
83 | * settings */ | ||
84 | static void fsl7450_cpu_setup(void *unused) | ||
85 | { | ||
86 | /* freeze all counters */ | ||
87 | pmc_stop_ctrs(); | ||
88 | |||
89 | mtspr(SPRN_MMCR0, mmcr0_val); | ||
90 | mtspr(SPRN_MMCR1, mmcr1_val); | ||
91 | mtspr(SPRN_MMCR2, mmcr2_val); | ||
92 | } | ||
93 | |||
94 | #define NUM_CTRS 6 | ||
95 | |||
96 | /* Configures the global settings for the countes on all CPUs. */ | ||
97 | static void fsl7450_reg_setup(struct op_counter_config *ctr, | ||
98 | struct op_system_config *sys, | ||
99 | int num_ctrs) | ||
100 | { | ||
101 | int i; | ||
102 | |||
103 | /* Our counters count up, and "count" refers to | ||
104 | * how much before the next interrupt, and we interrupt | ||
105 | * on overflow. So we calculate the starting value | ||
106 | * which will give us "count" until overflow. | ||
107 | * Then we set the events on the enabled counters */ | ||
108 | for (i = 0; i < NUM_CTRS; ++i) | ||
109 | reset_value[i] = 0x80000000UL - ctr[i].count; | ||
110 | |||
111 | /* Set events for Counters 1 & 2 */ | ||
112 | mmcr0_val = MMCR0_INIT | mmcr0_event1(ctr[0].event) | ||
113 | | mmcr0_event2(ctr[1].event); | ||
114 | |||
115 | /* Setup user/kernel bits */ | ||
116 | if (sys->enable_kernel) | ||
117 | mmcr0_val &= ~(MMCR0_FCS); | ||
118 | |||
119 | if (sys->enable_user) | ||
120 | mmcr0_val &= ~(MMCR0_FCP); | ||
121 | |||
122 | /* Set events for Counters 3-6 */ | ||
123 | mmcr1_val = mmcr1_event3(ctr[2].event) | ||
124 | | mmcr1_event4(ctr[3].event) | ||
125 | | mmcr1_event5(ctr[4].event) | ||
126 | | mmcr1_event6(ctr[5].event); | ||
127 | |||
128 | mmcr2_val = 0; | ||
129 | } | ||
130 | |||
131 | /* Sets the counters on this CPU to the chosen values, and starts them */ | ||
132 | static void fsl7450_start(struct op_counter_config *ctr) | ||
133 | { | ||
134 | int i; | ||
135 | |||
136 | mtmsr(mfmsr() | MSR_PMM); | ||
137 | |||
138 | for (i = 0; i < NUM_CTRS; ++i) { | ||
139 | if (ctr[i].enabled) | ||
140 | ctr_write(i, reset_value[i]); | ||
141 | else | ||
142 | ctr_write(i, 0); | ||
143 | } | ||
144 | |||
145 | /* Clear the freeze bit, and enable the interrupt. | ||
146 | * The counters won't actually start until the rfi clears | ||
147 | * the PMM bit */ | ||
148 | pmc_start_ctrs(); | ||
149 | |||
150 | oprofile_running = 1; | ||
151 | } | ||
152 | |||
153 | /* Stop the counters on this CPU */ | ||
154 | static void fsl7450_stop(void) | ||
155 | { | ||
156 | /* freeze counters */ | ||
157 | pmc_stop_ctrs(); | ||
158 | |||
159 | oprofile_running = 0; | ||
160 | |||
161 | mb(); | ||
162 | } | ||
163 | |||
164 | |||
165 | /* Handle the interrupt on this CPU, and log a sample for each | ||
166 | * event that triggered the interrupt */ | ||
167 | static void fsl7450_handle_interrupt(struct pt_regs *regs, | ||
168 | struct op_counter_config *ctr) | ||
169 | { | ||
170 | unsigned long pc; | ||
171 | int is_kernel; | ||
172 | int val; | ||
173 | int i; | ||
174 | |||
175 | /* set the PMM bit (see comment below) */ | ||
176 | mtmsr(mfmsr() | MSR_PMM); | ||
177 | |||
178 | pc = mfspr(SPRN_SIAR); | ||
179 | is_kernel = (pc >= KERNELBASE); | ||
180 | |||
181 | for (i = 0; i < NUM_CTRS; ++i) { | ||
182 | val = ctr_read(i); | ||
183 | if (val < 0) { | ||
184 | if (oprofile_running && ctr[i].enabled) { | ||
185 | oprofile_add_pc(pc, is_kernel, i); | ||
186 | ctr_write(i, reset_value[i]); | ||
187 | } else { | ||
188 | ctr_write(i, 0); | ||
189 | } | ||
190 | } | ||
191 | } | ||
192 | |||
193 | /* The freeze bit was set by the interrupt. */ | ||
194 | /* Clear the freeze bit, and reenable the interrupt. | ||
195 | * The counters won't actually start until the rfi clears | ||
196 | * the PMM bit */ | ||
197 | pmc_start_ctrs(); | ||
198 | } | ||
199 | |||
200 | struct op_powerpc_model op_model_7450= { | ||
201 | .reg_setup = fsl7450_reg_setup, | ||
202 | .cpu_setup = fsl7450_cpu_setup, | ||
203 | .start = fsl7450_start, | ||
204 | .stop = fsl7450_stop, | ||
205 | .handle_interrupt = fsl7450_handle_interrupt, | ||
206 | }; | ||
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index a3401b46f3ba..659a021da0c7 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c | |||
@@ -252,7 +252,7 @@ static unsigned long get_pc(struct pt_regs *regs) | |||
252 | return (unsigned long)__va(pc); | 252 | return (unsigned long)__va(pc); |
253 | 253 | ||
254 | /* Not sure where we were */ | 254 | /* Not sure where we were */ |
255 | if (pc < KERNELBASE) | 255 | if (!is_kernel_addr(pc)) |
256 | /* function descriptor madness */ | 256 | /* function descriptor madness */ |
257 | return *((unsigned long *)kernel_unknown_bucket); | 257 | return *((unsigned long *)kernel_unknown_bucket); |
258 | 258 | ||
@@ -264,7 +264,7 @@ static int get_kernel(unsigned long pc) | |||
264 | int is_kernel; | 264 | int is_kernel; |
265 | 265 | ||
266 | if (!mmcra_has_sihv) { | 266 | if (!mmcra_has_sihv) { |
267 | is_kernel = (pc >= KERNELBASE); | 267 | is_kernel = is_kernel_addr(pc); |
268 | } else { | 268 | } else { |
269 | unsigned long mmcra = mfspr(SPRN_MMCRA); | 269 | unsigned long mmcra = mfspr(SPRN_MMCRA); |
270 | is_kernel = ((mmcra & MMCRA_SIPR) == 0); | 270 | is_kernel = ((mmcra & MMCRA_SIPR) == 0); |
diff --git a/arch/powerpc/oprofile/op_model_rs64.c b/arch/powerpc/oprofile/op_model_rs64.c index e010b85996e8..5c909ee609fe 100644 --- a/arch/powerpc/oprofile/op_model_rs64.c +++ b/arch/powerpc/oprofile/op_model_rs64.c | |||
@@ -178,7 +178,6 @@ static void rs64_handle_interrupt(struct pt_regs *regs, | |||
178 | int val; | 178 | int val; |
179 | int i; | 179 | int i; |
180 | unsigned long pc = mfspr(SPRN_SIAR); | 180 | unsigned long pc = mfspr(SPRN_SIAR); |
181 | int is_kernel = (pc >= KERNELBASE); | ||
182 | 181 | ||
183 | /* set the PMM bit (see comment below) */ | 182 | /* set the PMM bit (see comment below) */ |
184 | mtmsrd(mfmsr() | MSR_PMM); | 183 | mtmsrd(mfmsr() | MSR_PMM); |
@@ -187,7 +186,7 @@ static void rs64_handle_interrupt(struct pt_regs *regs, | |||
187 | val = ctr_read(i); | 186 | val = ctr_read(i); |
188 | if (val < 0) { | 187 | if (val < 0) { |
189 | if (ctr[i].enabled) { | 188 | if (ctr[i].enabled) { |
190 | oprofile_add_pc(pc, is_kernel, i); | 189 | oprofile_add_pc(pc, is_kernel_addr(pc), i); |
191 | ctr_write(i, reset_value[i]); | 190 | ctr_write(i, reset_value[i]); |
192 | } else { | 191 | } else { |
193 | ctr_write(i, 0); | 192 | ctr_write(i, 0); |
diff --git a/arch/powerpc/platforms/cell/Kconfig b/arch/powerpc/platforms/cell/Kconfig new file mode 100644 index 000000000000..3157071e241c --- /dev/null +++ b/arch/powerpc/platforms/cell/Kconfig | |||
@@ -0,0 +1,13 @@ | |||
1 | menu "Cell Broadband Engine options" | ||
2 | depends on PPC_CELL | ||
3 | |||
4 | config SPU_FS | ||
5 | tristate "SPU file system" | ||
6 | default m | ||
7 | depends on PPC_CELL | ||
8 | help | ||
9 | The SPU file system is used to access Synergistic Processing | ||
10 | Units on machines implementing the Broadband Processor | ||
11 | Architecture. | ||
12 | |||
13 | endmenu | ||
diff --git a/arch/powerpc/platforms/cell/Makefile b/arch/powerpc/platforms/cell/Makefile index 55e094b96bc0..16031b565be4 100644 --- a/arch/powerpc/platforms/cell/Makefile +++ b/arch/powerpc/platforms/cell/Makefile | |||
@@ -1,2 +1,10 @@ | |||
1 | obj-y += interrupt.o iommu.o setup.o spider-pic.o | 1 | obj-y += interrupt.o iommu.o setup.o spider-pic.o |
2 | obj-y += pervasive.o | ||
3 | |||
2 | obj-$(CONFIG_SMP) += smp.o | 4 | obj-$(CONFIG_SMP) += smp.o |
5 | obj-$(CONFIG_SPU_FS) += spufs/ spu-base.o | ||
6 | |||
7 | spu-base-y += spu_base.o spu_priv1.o | ||
8 | |||
9 | builtin-spufs-$(CONFIG_SPU_FS) += spu_syscalls.o | ||
10 | obj-y += $(builtin-spufs-m) | ||
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 7fbe78a9327d..63aa52acf441 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/config.h> | 23 | #include <linux/config.h> |
24 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
25 | #include <linux/irq.h> | 25 | #include <linux/irq.h> |
26 | #include <linux/module.h> | ||
26 | #include <linux/percpu.h> | 27 | #include <linux/percpu.h> |
27 | #include <linux/types.h> | 28 | #include <linux/types.h> |
28 | 29 | ||
@@ -55,6 +56,7 @@ struct iic_regs { | |||
55 | 56 | ||
56 | struct iic { | 57 | struct iic { |
57 | struct iic_regs __iomem *regs; | 58 | struct iic_regs __iomem *regs; |
59 | u8 target_id; | ||
58 | }; | 60 | }; |
59 | 61 | ||
60 | static DEFINE_PER_CPU(struct iic, iic); | 62 | static DEFINE_PER_CPU(struct iic, iic); |
@@ -172,12 +174,11 @@ int iic_get_irq(struct pt_regs *regs) | |||
172 | return irq; | 174 | return irq; |
173 | } | 175 | } |
174 | 176 | ||
175 | static struct iic_regs __iomem *find_iic(int cpu) | 177 | static int setup_iic(int cpu, struct iic *iic) |
176 | { | 178 | { |
177 | struct device_node *np; | 179 | struct device_node *np; |
178 | int nodeid = cpu / 2; | 180 | int nodeid = cpu / 2; |
179 | unsigned long regs; | 181 | unsigned long regs; |
180 | struct iic_regs __iomem *iic_regs; | ||
181 | 182 | ||
182 | for (np = of_find_node_by_type(NULL, "cpu"); | 183 | for (np = of_find_node_by_type(NULL, "cpu"); |
183 | np; | 184 | np; |
@@ -188,20 +189,23 @@ static struct iic_regs __iomem *find_iic(int cpu) | |||
188 | 189 | ||
189 | if (!np) { | 190 | if (!np) { |
190 | printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); | 191 | printk(KERN_WARNING "IIC: CPU %d not found\n", cpu); |
191 | iic_regs = NULL; | 192 | iic->regs = NULL; |
192 | } else { | 193 | iic->target_id = 0xff; |
193 | regs = *(long *)get_property(np, "iic", NULL); | 194 | return -ENODEV; |
194 | |||
195 | /* hack until we have decided on the devtree info */ | ||
196 | regs += 0x400; | ||
197 | if (cpu & 1) | ||
198 | regs += 0x20; | ||
199 | |||
200 | printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs); | ||
201 | iic_regs = __ioremap(regs, sizeof(struct iic_regs), | ||
202 | _PAGE_NO_CACHE); | ||
203 | } | 195 | } |
204 | return iic_regs; | 196 | |
197 | regs = *(long *)get_property(np, "iic", NULL); | ||
198 | |||
199 | /* hack until we have decided on the devtree info */ | ||
200 | regs += 0x400; | ||
201 | if (cpu & 1) | ||
202 | regs += 0x20; | ||
203 | |||
204 | printk(KERN_DEBUG "IIC for CPU %d at %lx\n", cpu, regs); | ||
205 | iic->regs = __ioremap(regs, sizeof(struct iic_regs), | ||
206 | _PAGE_NO_CACHE); | ||
207 | iic->target_id = (nodeid << 4) + ((cpu & 1) ? 0xf : 0xe); | ||
208 | return 0; | ||
205 | } | 209 | } |
206 | 210 | ||
207 | #ifdef CONFIG_SMP | 211 | #ifdef CONFIG_SMP |
@@ -227,6 +231,12 @@ void iic_cause_IPI(int cpu, int mesg) | |||
227 | out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4); | 231 | out_be64(&per_cpu(iic, cpu).regs->generate, (IIC_NUM_IPIS - 1 - mesg) << 4); |
228 | } | 232 | } |
229 | 233 | ||
234 | u8 iic_get_target_id(int cpu) | ||
235 | { | ||
236 | return per_cpu(iic, cpu).target_id; | ||
237 | } | ||
238 | EXPORT_SYMBOL_GPL(iic_get_target_id); | ||
239 | |||
230 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | 240 | static irqreturn_t iic_ipi_action(int irq, void *dev_id, struct pt_regs *regs) |
231 | { | 241 | { |
232 | smp_message_recv(iic_irq_to_ipi(irq), regs); | 242 | smp_message_recv(iic_irq_to_ipi(irq), regs); |
@@ -276,7 +286,7 @@ void iic_init_IRQ(void) | |||
276 | irq_offset = 0; | 286 | irq_offset = 0; |
277 | for_each_cpu(cpu) { | 287 | for_each_cpu(cpu) { |
278 | iic = &per_cpu(iic, cpu); | 288 | iic = &per_cpu(iic, cpu); |
279 | iic->regs = find_iic(cpu); | 289 | setup_iic(cpu, iic); |
280 | if (iic->regs) | 290 | if (iic->regs) |
281 | out_be64(&iic->regs->prio, 0xff); | 291 | out_be64(&iic->regs->prio, 0xff); |
282 | } | 292 | } |
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index 37d58e6fd0c6..a14bd38791c0 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h | |||
@@ -54,6 +54,7 @@ extern void iic_setup_cpu(void); | |||
54 | extern void iic_local_enable(void); | 54 | extern void iic_local_enable(void); |
55 | extern void iic_local_disable(void); | 55 | extern void iic_local_disable(void); |
56 | 56 | ||
57 | extern u8 iic_get_target_id(int cpu); | ||
57 | 58 | ||
58 | extern void spider_init_IRQ(void); | 59 | extern void spider_init_IRQ(void); |
59 | extern int spider_get_irq(unsigned long int_pending); | 60 | extern int spider_get_irq(unsigned long int_pending); |
diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index 74f999b4ac9e..46e7cb9c3e64 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c | |||
@@ -29,6 +29,8 @@ | |||
29 | #include <linux/bootmem.h> | 29 | #include <linux/bootmem.h> |
30 | #include <linux/mm.h> | 30 | #include <linux/mm.h> |
31 | #include <linux/dma-mapping.h> | 31 | #include <linux/dma-mapping.h> |
32 | #include <linux/kernel.h> | ||
33 | #include <linux/compiler.h> | ||
32 | 34 | ||
33 | #include <asm/sections.h> | 35 | #include <asm/sections.h> |
34 | #include <asm/iommu.h> | 36 | #include <asm/iommu.h> |
@@ -40,6 +42,7 @@ | |||
40 | #include <asm/abs_addr.h> | 42 | #include <asm/abs_addr.h> |
41 | #include <asm/system.h> | 43 | #include <asm/system.h> |
42 | #include <asm/ppc-pci.h> | 44 | #include <asm/ppc-pci.h> |
45 | #include <asm/udbg.h> | ||
43 | 46 | ||
44 | #include "iommu.h" | 47 | #include "iommu.h" |
45 | 48 | ||
@@ -220,8 +223,6 @@ set_iopt_cache(void __iomem *base, unsigned long index, | |||
220 | { | 223 | { |
221 | unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; | 224 | unsigned long __iomem *tags = base + IOC_PT_CACHE_DIR; |
222 | unsigned long __iomem *p = base + IOC_PT_CACHE_REG; | 225 | unsigned long __iomem *p = base + IOC_PT_CACHE_REG; |
223 | pr_debug("iopt %02lx was v%016lx/t%016lx, store v%016lx/t%016lx\n", | ||
224 | index, get_iopt_cache(base, index, &oldtag), oldtag, val, tag); | ||
225 | 226 | ||
226 | out_be64(p, val); | 227 | out_be64(p, val); |
227 | out_be64(&tags[index], tag); | 228 | out_be64(&tags[index], tag); |
@@ -248,67 +249,176 @@ set_iocmd_config(void __iomem *base) | |||
248 | out_be64(p, conf | IOCMD_CONF_TE); | 249 | out_be64(p, conf | IOCMD_CONF_TE); |
249 | } | 250 | } |
250 | 251 | ||
251 | /* FIXME: get these from the device tree */ | 252 | static void enable_mapping(void __iomem *base, void __iomem *mmio_base) |
252 | #define ioc_base 0x20000511000ull | ||
253 | #define ioc_mmio_base 0x20000510000ull | ||
254 | #define ioid 0x48a | ||
255 | #define iopt_phys_offset (- 0x20000000) /* We have a 512MB offset from the SB */ | ||
256 | #define io_page_size 0x1000000 | ||
257 | |||
258 | static unsigned long map_iopt_entry(unsigned long address) | ||
259 | { | 253 | { |
260 | switch (address >> 20) { | 254 | set_iocmd_config(base); |
261 | case 0x600: | 255 | set_iost_origin(mmio_base); |
262 | address = 0x24020000000ull; /* spider i/o */ | ||
263 | break; | ||
264 | default: | ||
265 | address += iopt_phys_offset; | ||
266 | break; | ||
267 | } | ||
268 | |||
269 | return get_iopt_entry(address, ioid, IOPT_PROT_RW); | ||
270 | } | 256 | } |
271 | 257 | ||
272 | static void iommu_bus_setup_null(struct pci_bus *b) { } | ||
273 | static void iommu_dev_setup_null(struct pci_dev *d) { } | 258 | static void iommu_dev_setup_null(struct pci_dev *d) { } |
259 | static void iommu_bus_setup_null(struct pci_bus *b) { } | ||
260 | |||
261 | struct cell_iommu { | ||
262 | unsigned long base; | ||
263 | unsigned long mmio_base; | ||
264 | void __iomem *mapped_base; | ||
265 | void __iomem *mapped_mmio_base; | ||
266 | }; | ||
267 | |||
268 | static struct cell_iommu cell_iommus[NR_CPUS]; | ||
274 | 269 | ||
275 | /* initialize the iommu to support a simple linear mapping | 270 | /* initialize the iommu to support a simple linear mapping |
276 | * for each DMA window used by any device. For now, we | 271 | * for each DMA window used by any device. For now, we |
277 | * happen to know that there is only one DMA window in use, | 272 | * happen to know that there is only one DMA window in use, |
278 | * starting at iopt_phys_offset. */ | 273 | * starting at iopt_phys_offset. */ |
279 | static void cell_map_iommu(void) | 274 | static void cell_do_map_iommu(struct cell_iommu *iommu, |
275 | unsigned int ioid, | ||
276 | unsigned long map_start, | ||
277 | unsigned long map_size) | ||
280 | { | 278 | { |
281 | unsigned long address; | 279 | unsigned long io_address, real_address; |
282 | void __iomem *base; | 280 | void __iomem *ioc_base, *ioc_mmio_base; |
283 | ioste ioste; | 281 | ioste ioste; |
284 | unsigned long index; | 282 | unsigned long index; |
285 | 283 | ||
286 | base = __ioremap(ioc_base, 0x1000, _PAGE_NO_CACHE); | 284 | /* we pretend the io page table was at a very high address */ |
287 | pr_debug("%lx mapped to %p\n", ioc_base, base); | 285 | const unsigned long fake_iopt = 0x10000000000ul; |
288 | set_iocmd_config(base); | 286 | const unsigned long io_page_size = 0x1000000; /* use 16M pages */ |
289 | iounmap(base); | 287 | const unsigned long io_segment_size = 0x10000000; /* 256M */ |
288 | |||
289 | ioc_base = iommu->mapped_base; | ||
290 | ioc_mmio_base = iommu->mapped_mmio_base; | ||
291 | |||
292 | for (real_address = 0, io_address = 0; | ||
293 | io_address <= map_start + map_size; | ||
294 | real_address += io_page_size, io_address += io_page_size) { | ||
295 | ioste = get_iost_entry(fake_iopt, io_address, io_page_size); | ||
296 | if ((real_address % io_segment_size) == 0) /* segment start */ | ||
297 | set_iost_cache(ioc_mmio_base, | ||
298 | io_address >> 28, ioste); | ||
299 | index = get_ioc_hash_1way(ioste, io_address); | ||
300 | pr_debug("addr %08lx, index %02lx, ioste %016lx\n", | ||
301 | io_address, index, ioste.val); | ||
302 | set_iopt_cache(ioc_mmio_base, | ||
303 | get_ioc_hash_1way(ioste, io_address), | ||
304 | get_ioc_tag(ioste, io_address), | ||
305 | get_iopt_entry(real_address-map_start, ioid, IOPT_PROT_RW)); | ||
306 | } | ||
307 | } | ||
290 | 308 | ||
291 | base = __ioremap(ioc_mmio_base, 0x1000, _PAGE_NO_CACHE); | 309 | static void iommu_devnode_setup(struct device_node *d) |
292 | pr_debug("%lx mapped to %p\n", ioc_mmio_base, base); | 310 | { |
311 | unsigned int *ioid; | ||
312 | unsigned long *dma_window, map_start, map_size, token; | ||
313 | struct cell_iommu *iommu; | ||
293 | 314 | ||
294 | set_iost_origin(base); | 315 | ioid = (unsigned int *)get_property(d, "ioid", NULL); |
316 | if (!ioid) | ||
317 | pr_debug("No ioid entry found !\n"); | ||
295 | 318 | ||
296 | for (address = 0; address < 0x100000000ul; address += io_page_size) { | 319 | dma_window = (unsigned long *)get_property(d, "ibm,dma-window", NULL); |
297 | ioste = get_iost_entry(0x10000000000ul, address, io_page_size); | 320 | if (!dma_window) |
298 | if ((address & 0xfffffff) == 0) /* segment start */ | 321 | pr_debug("No ibm,dma-window entry found !\n"); |
299 | set_iost_cache(base, address >> 28, ioste); | 322 | |
300 | index = get_ioc_hash_1way(ioste, address); | 323 | map_start = dma_window[1]; |
301 | pr_debug("addr %08lx, index %02lx, ioste %016lx\n", | 324 | map_size = dma_window[2]; |
302 | address, index, ioste.val); | 325 | token = dma_window[0] >> 32; |
303 | set_iopt_cache(base, | 326 | |
304 | get_ioc_hash_1way(ioste, address), | 327 | iommu = &cell_iommus[token]; |
305 | get_ioc_tag(ioste, address), | 328 | |
306 | map_iopt_entry(address)); | 329 | cell_do_map_iommu(iommu, *ioid, map_start, map_size); |
307 | } | 330 | } |
308 | iounmap(base); | 331 | |
332 | static void iommu_bus_setup(struct pci_bus *b) | ||
333 | { | ||
334 | struct device_node *d = (struct device_node *)b->sysdata; | ||
335 | iommu_devnode_setup(d); | ||
336 | } | ||
337 | |||
338 | |||
339 | static int cell_map_iommu_hardcoded(int num_nodes) | ||
340 | { | ||
341 | struct cell_iommu *iommu = NULL; | ||
342 | |||
343 | pr_debug("%s(%d): Using hardcoded defaults\n", __FUNCTION__, __LINE__); | ||
344 | |||
345 | /* node 0 */ | ||
346 | iommu = &cell_iommus[0]; | ||
347 | iommu->mapped_base = __ioremap(0x20000511000, 0x1000, _PAGE_NO_CACHE); | ||
348 | iommu->mapped_mmio_base = __ioremap(0x20000510000, 0x1000, _PAGE_NO_CACHE); | ||
349 | |||
350 | enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); | ||
351 | |||
352 | cell_do_map_iommu(iommu, 0x048a, | ||
353 | 0x20000000ul,0x20000000ul); | ||
354 | |||
355 | if (num_nodes < 2) | ||
356 | return 0; | ||
357 | |||
358 | /* node 1 */ | ||
359 | iommu = &cell_iommus[1]; | ||
360 | iommu->mapped_base = __ioremap(0x30000511000, 0x1000, _PAGE_NO_CACHE); | ||
361 | iommu->mapped_mmio_base = __ioremap(0x30000510000, 0x1000, _PAGE_NO_CACHE); | ||
362 | |||
363 | enable_mapping(iommu->mapped_base, iommu->mapped_mmio_base); | ||
364 | |||
365 | cell_do_map_iommu(iommu, 0x048a, | ||
366 | 0x20000000,0x20000000ul); | ||
367 | |||
368 | return 0; | ||
309 | } | 369 | } |
310 | 370 | ||
311 | 371 | ||
372 | static int cell_map_iommu(void) | ||
373 | { | ||
374 | unsigned int num_nodes = 0, *node_id; | ||
375 | unsigned long *base, *mmio_base; | ||
376 | struct device_node *dn; | ||
377 | struct cell_iommu *iommu = NULL; | ||
378 | |||
379 | /* determine number of nodes (=iommus) */ | ||
380 | pr_debug("%s(%d): determining number of nodes...", __FUNCTION__, __LINE__); | ||
381 | for(dn = of_find_node_by_type(NULL, "cpu"); | ||
382 | dn; | ||
383 | dn = of_find_node_by_type(dn, "cpu")) { | ||
384 | node_id = (unsigned int *)get_property(dn, "node-id", NULL); | ||
385 | |||
386 | if (num_nodes < *node_id) | ||
387 | num_nodes = *node_id; | ||
388 | } | ||
389 | |||
390 | num_nodes++; | ||
391 | pr_debug("%i found.\n", num_nodes); | ||
392 | |||
393 | /* map the iommu registers for each node */ | ||
394 | pr_debug("%s(%d): Looping through nodes\n", __FUNCTION__, __LINE__); | ||
395 | for(dn = of_find_node_by_type(NULL, "cpu"); | ||
396 | dn; | ||
397 | dn = of_find_node_by_type(dn, "cpu")) { | ||
398 | |||
399 | node_id = (unsigned int *)get_property(dn, "node-id", NULL); | ||
400 | base = (unsigned long *)get_property(dn, "ioc-cache", NULL); | ||
401 | mmio_base = (unsigned long *)get_property(dn, "ioc-translation", NULL); | ||
402 | |||
403 | if (!base || !mmio_base || !node_id) | ||
404 | return cell_map_iommu_hardcoded(num_nodes); | ||
405 | |||
406 | iommu = &cell_iommus[*node_id]; | ||
407 | iommu->base = *base; | ||
408 | iommu->mmio_base = *mmio_base; | ||
409 | |||
410 | iommu->mapped_base = __ioremap(*base, 0x1000, _PAGE_NO_CACHE); | ||
411 | iommu->mapped_mmio_base = __ioremap(*mmio_base, 0x1000, _PAGE_NO_CACHE); | ||
412 | |||
413 | enable_mapping(iommu->mapped_base, | ||
414 | iommu->mapped_mmio_base); | ||
415 | |||
416 | /* everything else will be done in iommu_bus_setup */ | ||
417 | } | ||
418 | |||
419 | return 1; | ||
420 | } | ||
421 | |||
312 | static void *cell_alloc_coherent(struct device *hwdev, size_t size, | 422 | static void *cell_alloc_coherent(struct device *hwdev, size_t size, |
313 | dma_addr_t *dma_handle, gfp_t flag) | 423 | dma_addr_t *dma_handle, gfp_t flag) |
314 | { | 424 | { |
@@ -365,11 +475,28 @@ static int cell_dma_supported(struct device *dev, u64 mask) | |||
365 | 475 | ||
366 | void cell_init_iommu(void) | 476 | void cell_init_iommu(void) |
367 | { | 477 | { |
368 | cell_map_iommu(); | 478 | int setup_bus = 0; |
369 | 479 | ||
370 | /* Direct I/O, IOMMU off */ | 480 | if (of_find_node_by_path("/mambo")) { |
371 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | 481 | pr_info("Not using iommu on systemsim\n"); |
372 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | 482 | } else { |
483 | |||
484 | if (!(of_chosen && | ||
485 | get_property(of_chosen, "linux,iommu-off", NULL))) | ||
486 | setup_bus = cell_map_iommu(); | ||
487 | |||
488 | if (setup_bus) { | ||
489 | pr_debug("%s: IOMMU mapping activated\n", __FUNCTION__); | ||
490 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | ||
491 | ppc_md.iommu_bus_setup = iommu_bus_setup; | ||
492 | } else { | ||
493 | pr_debug("%s: IOMMU mapping activated, " | ||
494 | "no device action necessary\n", __FUNCTION__); | ||
495 | /* Direct I/O, IOMMU off */ | ||
496 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | ||
497 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | ||
498 | } | ||
499 | } | ||
373 | 500 | ||
374 | pci_dma_ops.alloc_coherent = cell_alloc_coherent; | 501 | pci_dma_ops.alloc_coherent = cell_alloc_coherent; |
375 | pci_dma_ops.free_coherent = cell_free_coherent; | 502 | pci_dma_ops.free_coherent = cell_free_coherent; |
diff --git a/arch/powerpc/platforms/cell/pervasive.c b/arch/powerpc/platforms/cell/pervasive.c new file mode 100644 index 000000000000..85152544c153 --- /dev/null +++ b/arch/powerpc/platforms/cell/pervasive.c | |||
@@ -0,0 +1,229 @@ | |||
1 | /* | ||
2 | * CBE Pervasive Monitor and Debug | ||
3 | * | ||
4 | * (C) Copyright IBM Corporation 2005 | ||
5 | * | ||
6 | * Authors: Maximino Aguilar (maguilar@us.ibm.com) | ||
7 | * Michael N. Day (mnday@us.ibm.com) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | */ | ||
23 | |||
24 | #undef DEBUG | ||
25 | |||
26 | #include <linux/config.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/irq.h> | ||
29 | #include <linux/percpu.h> | ||
30 | #include <linux/types.h> | ||
31 | #include <linux/kallsyms.h> | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | #include <asm/machdep.h> | ||
35 | #include <asm/prom.h> | ||
36 | #include <asm/pgtable.h> | ||
37 | #include <asm/reg.h> | ||
38 | |||
39 | #include "pervasive.h" | ||
40 | |||
41 | static DEFINE_SPINLOCK(cbe_pervasive_lock); | ||
42 | struct cbe_pervasive { | ||
43 | struct pmd_regs __iomem *regs; | ||
44 | unsigned int thread; | ||
45 | }; | ||
46 | |||
47 | /* can't use per_cpu from setup_arch */ | ||
48 | static struct cbe_pervasive cbe_pervasive[NR_CPUS]; | ||
49 | |||
50 | static void __init cbe_enable_pause_zero(void) | ||
51 | { | ||
52 | unsigned long thread_switch_control; | ||
53 | unsigned long temp_register; | ||
54 | struct cbe_pervasive *p; | ||
55 | int thread; | ||
56 | |||
57 | spin_lock_irq(&cbe_pervasive_lock); | ||
58 | p = &cbe_pervasive[smp_processor_id()]; | ||
59 | |||
60 | if (!cbe_pervasive->regs) | ||
61 | goto out; | ||
62 | |||
63 | pr_debug("Power Management: CPU %d\n", smp_processor_id()); | ||
64 | |||
65 | /* Enable Pause(0) control bit */ | ||
66 | temp_register = in_be64(&p->regs->pm_control); | ||
67 | |||
68 | out_be64(&p->regs->pm_control, | ||
69 | temp_register|PMD_PAUSE_ZERO_CONTROL); | ||
70 | |||
71 | /* Enable DEC and EE interrupt request */ | ||
72 | thread_switch_control = mfspr(SPRN_TSC_CELL); | ||
73 | thread_switch_control |= TSC_CELL_EE_ENABLE | TSC_CELL_EE_BOOST; | ||
74 | |||
75 | switch ((mfspr(SPRN_CTRLF) & CTRL_CT)) { | ||
76 | case CTRL_CT0: | ||
77 | thread_switch_control |= TSC_CELL_DEC_ENABLE_0; | ||
78 | thread = 0; | ||
79 | break; | ||
80 | case CTRL_CT1: | ||
81 | thread_switch_control |= TSC_CELL_DEC_ENABLE_1; | ||
82 | thread = 1; | ||
83 | break; | ||
84 | default: | ||
85 | printk(KERN_WARNING "%s: unknown configuration\n", | ||
86 | __FUNCTION__); | ||
87 | thread = -1; | ||
88 | break; | ||
89 | } | ||
90 | |||
91 | if (p->thread != thread) | ||
92 | printk(KERN_WARNING "%s: device tree inconsistant, " | ||
93 | "cpu %i: %d/%d\n", __FUNCTION__, | ||
94 | smp_processor_id(), | ||
95 | p->thread, thread); | ||
96 | |||
97 | mtspr(SPRN_TSC_CELL, thread_switch_control); | ||
98 | |||
99 | out: | ||
100 | spin_unlock_irq(&cbe_pervasive_lock); | ||
101 | } | ||
102 | |||
103 | static void cbe_idle(void) | ||
104 | { | ||
105 | unsigned long ctrl; | ||
106 | |||
107 | cbe_enable_pause_zero(); | ||
108 | |||
109 | while (1) { | ||
110 | if (!need_resched()) { | ||
111 | local_irq_disable(); | ||
112 | while (!need_resched()) { | ||
113 | /* go into low thread priority */ | ||
114 | HMT_low(); | ||
115 | |||
116 | /* | ||
117 | * atomically disable thread execution | ||
118 | * and runlatch. | ||
119 | * External and Decrementer exceptions | ||
120 | * are still handled when the thread | ||
121 | * is disabled but now enter in | ||
122 | * cbe_system_reset_exception() | ||
123 | */ | ||
124 | ctrl = mfspr(SPRN_CTRLF); | ||
125 | ctrl &= ~(CTRL_RUNLATCH | CTRL_TE); | ||
126 | mtspr(SPRN_CTRLT, ctrl); | ||
127 | } | ||
128 | /* restore thread prio */ | ||
129 | HMT_medium(); | ||
130 | local_irq_enable(); | ||
131 | } | ||
132 | |||
133 | /* | ||
134 | * turn runlatch on again before scheduling the | ||
135 | * process we just woke up | ||
136 | */ | ||
137 | ppc64_runlatch_on(); | ||
138 | |||
139 | preempt_enable_no_resched(); | ||
140 | schedule(); | ||
141 | preempt_disable(); | ||
142 | } | ||
143 | } | ||
144 | |||
145 | int cbe_system_reset_exception(struct pt_regs *regs) | ||
146 | { | ||
147 | switch (regs->msr & SRR1_WAKEMASK) { | ||
148 | case SRR1_WAKEEE: | ||
149 | do_IRQ(regs); | ||
150 | break; | ||
151 | case SRR1_WAKEDEC: | ||
152 | timer_interrupt(regs); | ||
153 | break; | ||
154 | case SRR1_WAKEMT: | ||
155 | /* no action required */ | ||
156 | break; | ||
157 | default: | ||
158 | /* do system reset */ | ||
159 | return 0; | ||
160 | } | ||
161 | /* everything handled */ | ||
162 | return 1; | ||
163 | } | ||
164 | |||
165 | static int __init cbe_find_pmd_mmio(int cpu, struct cbe_pervasive *p) | ||
166 | { | ||
167 | struct device_node *node; | ||
168 | unsigned int *int_servers; | ||
169 | char *addr; | ||
170 | unsigned long real_address; | ||
171 | unsigned int size; | ||
172 | |||
173 | struct pmd_regs __iomem *pmd_mmio_area; | ||
174 | int hardid, thread; | ||
175 | int proplen; | ||
176 | |||
177 | pmd_mmio_area = NULL; | ||
178 | hardid = get_hard_smp_processor_id(cpu); | ||
179 | for (node = NULL; (node = of_find_node_by_type(node, "cpu"));) { | ||
180 | int_servers = (void *) get_property(node, | ||
181 | "ibm,ppc-interrupt-server#s", &proplen); | ||
182 | if (!int_servers) { | ||
183 | printk(KERN_WARNING "%s misses " | ||
184 | "ibm,ppc-interrupt-server#s property", | ||
185 | node->full_name); | ||
186 | continue; | ||
187 | } | ||
188 | for (thread = 0; thread < proplen / sizeof (int); thread++) { | ||
189 | if (hardid == int_servers[thread]) { | ||
190 | addr = get_property(node, "pervasive", NULL); | ||
191 | goto found; | ||
192 | } | ||
193 | } | ||
194 | } | ||
195 | |||
196 | printk(KERN_WARNING "%s: CPU %d not found\n", __FUNCTION__, cpu); | ||
197 | return -EINVAL; | ||
198 | |||
199 | found: | ||
200 | real_address = *(unsigned long*) addr; | ||
201 | addr += sizeof (unsigned long); | ||
202 | size = *(unsigned int*) addr; | ||
203 | |||
204 | pr_debug("pervasive area for CPU %d at %lx, size %x\n", | ||
205 | cpu, real_address, size); | ||
206 | p->regs = __ioremap(real_address, size, _PAGE_NO_CACHE); | ||
207 | p->thread = thread; | ||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | void __init cell_pervasive_init(void) | ||
212 | { | ||
213 | struct cbe_pervasive *p; | ||
214 | int cpu; | ||
215 | int ret; | ||
216 | |||
217 | if (!cpu_has_feature(CPU_FTR_PAUSE_ZERO)) | ||
218 | return; | ||
219 | |||
220 | for_each_cpu(cpu) { | ||
221 | p = &cbe_pervasive[cpu]; | ||
222 | ret = cbe_find_pmd_mmio(cpu, p); | ||
223 | if (ret) | ||
224 | return; | ||
225 | } | ||
226 | |||
227 | ppc_md.idle_loop = cbe_idle; | ||
228 | ppc_md.system_reset_exception = cbe_system_reset_exception; | ||
229 | } | ||
diff --git a/arch/powerpc/platforms/cell/pervasive.h b/arch/powerpc/platforms/cell/pervasive.h new file mode 100644 index 000000000000..da1fb85ca3e8 --- /dev/null +++ b/arch/powerpc/platforms/cell/pervasive.h | |||
@@ -0,0 +1,62 @@ | |||
1 | /* | ||
2 | * Cell Pervasive Monitor and Debug interface and HW structures | ||
3 | * | ||
4 | * (C) Copyright IBM Corporation 2005 | ||
5 | * | ||
6 | * Authors: Maximino Aguilar (maguilar@us.ibm.com) | ||
7 | * David J. Erb (djerb@us.ibm.com) | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | */ | ||
23 | |||
24 | |||
25 | #ifndef PERVASIVE_H | ||
26 | #define PERVASIVE_H | ||
27 | |||
28 | struct pmd_regs { | ||
29 | u8 pad_0x0000_0x0800[0x0800 - 0x0000]; /* 0x0000 */ | ||
30 | |||
31 | /* Thermal Sensor Registers */ | ||
32 | u64 ts_ctsr1; /* 0x0800 */ | ||
33 | u64 ts_ctsr2; /* 0x0808 */ | ||
34 | u64 ts_mtsr1; /* 0x0810 */ | ||
35 | u64 ts_mtsr2; /* 0x0818 */ | ||
36 | u64 ts_itr1; /* 0x0820 */ | ||
37 | u64 ts_itr2; /* 0x0828 */ | ||
38 | u64 ts_gitr; /* 0x0830 */ | ||
39 | u64 ts_isr; /* 0x0838 */ | ||
40 | u64 ts_imr; /* 0x0840 */ | ||
41 | u64 tm_cr1; /* 0x0848 */ | ||
42 | u64 tm_cr2; /* 0x0850 */ | ||
43 | u64 tm_simr; /* 0x0858 */ | ||
44 | u64 tm_tpr; /* 0x0860 */ | ||
45 | u64 tm_str1; /* 0x0868 */ | ||
46 | u64 tm_str2; /* 0x0870 */ | ||
47 | u64 tm_tsr; /* 0x0878 */ | ||
48 | |||
49 | /* Power Management */ | ||
50 | u64 pm_control; /* 0x0880 */ | ||
51 | #define PMD_PAUSE_ZERO_CONTROL 0x10000 | ||
52 | u64 pm_status; /* 0x0888 */ | ||
53 | |||
54 | /* Time Base Register */ | ||
55 | u64 tbr; /* 0x0890 */ | ||
56 | |||
57 | u8 pad_0x0898_0x1000 [0x1000 - 0x0898]; /* 0x0898 */ | ||
58 | }; | ||
59 | |||
60 | void __init cell_pervasive_init(void); | ||
61 | |||
62 | #endif | ||
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c index 9a495634d0c2..18e25e65c04b 100644 --- a/arch/powerpc/platforms/cell/setup.c +++ b/arch/powerpc/platforms/cell/setup.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/mmu.h> | 33 | #include <asm/mmu.h> |
34 | #include <asm/processor.h> | 34 | #include <asm/processor.h> |
35 | #include <asm/io.h> | 35 | #include <asm/io.h> |
36 | #include <asm/kexec.h> | ||
36 | #include <asm/pgtable.h> | 37 | #include <asm/pgtable.h> |
37 | #include <asm/prom.h> | 38 | #include <asm/prom.h> |
38 | #include <asm/rtas.h> | 39 | #include <asm/rtas.h> |
@@ -48,6 +49,7 @@ | |||
48 | 49 | ||
49 | #include "interrupt.h" | 50 | #include "interrupt.h" |
50 | #include "iommu.h" | 51 | #include "iommu.h" |
52 | #include "pervasive.h" | ||
51 | 53 | ||
52 | #ifdef DEBUG | 54 | #ifdef DEBUG |
53 | #define DBG(fmt...) udbg_printf(fmt) | 55 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -67,6 +69,77 @@ void cell_show_cpuinfo(struct seq_file *m) | |||
67 | of_node_put(root); | 69 | of_node_put(root); |
68 | } | 70 | } |
69 | 71 | ||
72 | #ifdef CONFIG_SPARSEMEM | ||
73 | static int __init find_spu_node_id(struct device_node *spe) | ||
74 | { | ||
75 | unsigned int *id; | ||
76 | #ifdef CONFIG_NUMA | ||
77 | struct device_node *cpu; | ||
78 | cpu = spe->parent->parent; | ||
79 | id = (unsigned int *)get_property(cpu, "node-id", NULL); | ||
80 | #else | ||
81 | id = NULL; | ||
82 | #endif | ||
83 | return id ? *id : 0; | ||
84 | } | ||
85 | |||
86 | static void __init cell_spuprop_present(struct device_node *spe, | ||
87 | const char *prop, int early) | ||
88 | { | ||
89 | struct address_prop { | ||
90 | unsigned long address; | ||
91 | unsigned int len; | ||
92 | } __attribute__((packed)) *p; | ||
93 | int proplen; | ||
94 | |||
95 | unsigned long start_pfn, end_pfn, pfn; | ||
96 | int node_id; | ||
97 | |||
98 | p = (void*)get_property(spe, prop, &proplen); | ||
99 | WARN_ON(proplen != sizeof (*p)); | ||
100 | |||
101 | node_id = find_spu_node_id(spe); | ||
102 | |||
103 | start_pfn = p->address >> PAGE_SHIFT; | ||
104 | end_pfn = (p->address + p->len + PAGE_SIZE - 1) >> PAGE_SHIFT; | ||
105 | |||
106 | /* We need to call memory_present *before* the call to sparse_init, | ||
107 | but we can initialize the page structs only *after* that call. | ||
108 | Thus, we're being called twice. */ | ||
109 | if (early) | ||
110 | memory_present(node_id, start_pfn, end_pfn); | ||
111 | else { | ||
112 | /* As the pages backing SPU LS and I/O are outside the range | ||
113 | of regular memory, their page structs were not initialized | ||
114 | by free_area_init. Do it here instead. */ | ||
115 | for (pfn = start_pfn; pfn < end_pfn; pfn++) { | ||
116 | struct page *page = pfn_to_page(pfn); | ||
117 | set_page_links(page, ZONE_DMA, node_id, pfn); | ||
118 | set_page_count(page, 1); | ||
119 | reset_page_mapcount(page); | ||
120 | SetPageReserved(page); | ||
121 | INIT_LIST_HEAD(&page->lru); | ||
122 | } | ||
123 | } | ||
124 | } | ||
125 | |||
126 | static void __init cell_spumem_init(int early) | ||
127 | { | ||
128 | struct device_node *node; | ||
129 | for (node = of_find_node_by_type(NULL, "spe"); | ||
130 | node; node = of_find_node_by_type(node, "spe")) { | ||
131 | cell_spuprop_present(node, "local-store", early); | ||
132 | cell_spuprop_present(node, "problem", early); | ||
133 | cell_spuprop_present(node, "priv1", early); | ||
134 | cell_spuprop_present(node, "priv2", early); | ||
135 | } | ||
136 | } | ||
137 | #else | ||
138 | static void __init cell_spumem_init(int early) | ||
139 | { | ||
140 | } | ||
141 | #endif | ||
142 | |||
70 | static void cell_progress(char *s, unsigned short hex) | 143 | static void cell_progress(char *s, unsigned short hex) |
71 | { | 144 | { |
72 | printk("*** %04x : %s\n", hex, s ? s : ""); | 145 | printk("*** %04x : %s\n", hex, s ? s : ""); |
@@ -93,11 +166,14 @@ static void __init cell_setup_arch(void) | |||
93 | init_pci_config_tokens(); | 166 | init_pci_config_tokens(); |
94 | find_and_init_phbs(); | 167 | find_and_init_phbs(); |
95 | spider_init_IRQ(); | 168 | spider_init_IRQ(); |
169 | cell_pervasive_init(); | ||
96 | #ifdef CONFIG_DUMMY_CONSOLE | 170 | #ifdef CONFIG_DUMMY_CONSOLE |
97 | conswitchp = &dummy_con; | 171 | conswitchp = &dummy_con; |
98 | #endif | 172 | #endif |
99 | 173 | ||
100 | mmio_nvram_init(); | 174 | mmio_nvram_init(); |
175 | |||
176 | cell_spumem_init(0); | ||
101 | } | 177 | } |
102 | 178 | ||
103 | /* | 179 | /* |
@@ -113,6 +189,8 @@ static void __init cell_init_early(void) | |||
113 | 189 | ||
114 | ppc64_interrupt_controller = IC_CELL_PIC; | 190 | ppc64_interrupt_controller = IC_CELL_PIC; |
115 | 191 | ||
192 | cell_spumem_init(1); | ||
193 | |||
116 | DBG(" <- cell_init_early()\n"); | 194 | DBG(" <- cell_init_early()\n"); |
117 | } | 195 | } |
118 | 196 | ||
@@ -125,6 +203,15 @@ static int __init cell_probe(int platform) | |||
125 | return 1; | 203 | return 1; |
126 | } | 204 | } |
127 | 205 | ||
206 | /* | ||
207 | * Cell has no legacy IO; anything calling this function has to | ||
208 | * fail or bad things will happen | ||
209 | */ | ||
210 | static int cell_check_legacy_ioport(unsigned int baseport) | ||
211 | { | ||
212 | return -ENODEV; | ||
213 | } | ||
214 | |||
128 | struct machdep_calls __initdata cell_md = { | 215 | struct machdep_calls __initdata cell_md = { |
129 | .probe = cell_probe, | 216 | .probe = cell_probe, |
130 | .setup_arch = cell_setup_arch, | 217 | .setup_arch = cell_setup_arch, |
@@ -137,5 +224,11 @@ struct machdep_calls __initdata cell_md = { | |||
137 | .get_rtc_time = rtas_get_rtc_time, | 224 | .get_rtc_time = rtas_get_rtc_time, |
138 | .set_rtc_time = rtas_set_rtc_time, | 225 | .set_rtc_time = rtas_set_rtc_time, |
139 | .calibrate_decr = generic_calibrate_decr, | 226 | .calibrate_decr = generic_calibrate_decr, |
227 | .check_legacy_ioport = cell_check_legacy_ioport, | ||
140 | .progress = cell_progress, | 228 | .progress = cell_progress, |
229 | #ifdef CONFIG_KEXEC | ||
230 | .machine_kexec = default_machine_kexec, | ||
231 | .machine_kexec_prepare = default_machine_kexec_prepare, | ||
232 | .machine_crash_shutdown = default_machine_crash_shutdown, | ||
233 | #endif | ||
141 | }; | 234 | }; |
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c new file mode 100644 index 000000000000..d75ae03df686 --- /dev/null +++ b/arch/powerpc/platforms/cell/spu_base.c | |||
@@ -0,0 +1,711 @@ | |||
1 | /* | ||
2 | * Low-level SPU handling | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #undef DEBUG | ||
24 | |||
25 | #include <linux/interrupt.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/poll.h> | ||
29 | #include <linux/ptrace.h> | ||
30 | #include <linux/slab.h> | ||
31 | #include <linux/wait.h> | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | #include <asm/prom.h> | ||
35 | #include <asm/semaphore.h> | ||
36 | #include <asm/spu.h> | ||
37 | #include <asm/mmu_context.h> | ||
38 | |||
39 | #include "interrupt.h" | ||
40 | |||
41 | static int __spu_trap_invalid_dma(struct spu *spu) | ||
42 | { | ||
43 | pr_debug("%s\n", __FUNCTION__); | ||
44 | force_sig(SIGBUS, /* info, */ current); | ||
45 | return 0; | ||
46 | } | ||
47 | |||
48 | static int __spu_trap_dma_align(struct spu *spu) | ||
49 | { | ||
50 | pr_debug("%s\n", __FUNCTION__); | ||
51 | force_sig(SIGBUS, /* info, */ current); | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static int __spu_trap_error(struct spu *spu) | ||
56 | { | ||
57 | pr_debug("%s\n", __FUNCTION__); | ||
58 | force_sig(SIGILL, /* info, */ current); | ||
59 | return 0; | ||
60 | } | ||
61 | |||
62 | static void spu_restart_dma(struct spu *spu) | ||
63 | { | ||
64 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
65 | |||
66 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) | ||
67 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); | ||
68 | } | ||
69 | |||
70 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) | ||
71 | { | ||
72 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
73 | struct mm_struct *mm = spu->mm; | ||
74 | u64 esid, vsid; | ||
75 | |||
76 | pr_debug("%s\n", __FUNCTION__); | ||
77 | |||
78 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { | ||
79 | /* SLBs are pre-loaded for context switch, so | ||
80 | * we should never get here! | ||
81 | */ | ||
82 | printk("%s: invalid access during switch!\n", __func__); | ||
83 | return 1; | ||
84 | } | ||
85 | if (!mm || (REGION_ID(ea) != USER_REGION_ID)) { | ||
86 | /* Future: support kernel segments so that drivers | ||
87 | * can use SPUs. | ||
88 | */ | ||
89 | pr_debug("invalid region access at %016lx\n", ea); | ||
90 | return 1; | ||
91 | } | ||
92 | |||
93 | esid = (ea & ESID_MASK) | SLB_ESID_V; | ||
94 | vsid = (get_vsid(mm->context.id, ea) << SLB_VSID_SHIFT) | SLB_VSID_USER; | ||
95 | if (in_hugepage_area(mm->context, ea)) | ||
96 | vsid |= SLB_VSID_L; | ||
97 | |||
98 | out_be64(&priv2->slb_index_W, spu->slb_replace); | ||
99 | out_be64(&priv2->slb_vsid_RW, vsid); | ||
100 | out_be64(&priv2->slb_esid_RW, esid); | ||
101 | |||
102 | spu->slb_replace++; | ||
103 | if (spu->slb_replace >= 8) | ||
104 | spu->slb_replace = 0; | ||
105 | |||
106 | spu_restart_dma(spu); | ||
107 | |||
108 | return 0; | ||
109 | } | ||
110 | |||
111 | extern int hash_page(unsigned long ea, unsigned long access, unsigned long trap); //XXX | ||
112 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) | ||
113 | { | ||
114 | pr_debug("%s\n", __FUNCTION__); | ||
115 | |||
116 | /* Handle kernel space hash faults immediately. | ||
117 | User hash faults need to be deferred to process context. */ | ||
118 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) | ||
119 | && REGION_ID(ea) != USER_REGION_ID | ||
120 | && hash_page(ea, _PAGE_PRESENT, 0x300) == 0) { | ||
121 | spu_restart_dma(spu); | ||
122 | return 0; | ||
123 | } | ||
124 | |||
125 | if (test_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags)) { | ||
126 | printk("%s: invalid access during switch!\n", __func__); | ||
127 | return 1; | ||
128 | } | ||
129 | |||
130 | spu->dar = ea; | ||
131 | spu->dsisr = dsisr; | ||
132 | mb(); | ||
133 | if (spu->stop_callback) | ||
134 | spu->stop_callback(spu); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static int __spu_trap_mailbox(struct spu *spu) | ||
139 | { | ||
140 | if (spu->ibox_callback) | ||
141 | spu->ibox_callback(spu); | ||
142 | |||
143 | /* atomically disable SPU mailbox interrupts */ | ||
144 | spin_lock(&spu->register_lock); | ||
145 | spu_int_mask_and(spu, 2, ~0x1); | ||
146 | spin_unlock(&spu->register_lock); | ||
147 | return 0; | ||
148 | } | ||
149 | |||
150 | static int __spu_trap_stop(struct spu *spu) | ||
151 | { | ||
152 | pr_debug("%s\n", __FUNCTION__); | ||
153 | spu->stop_code = in_be32(&spu->problem->spu_status_R); | ||
154 | if (spu->stop_callback) | ||
155 | spu->stop_callback(spu); | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int __spu_trap_halt(struct spu *spu) | ||
160 | { | ||
161 | pr_debug("%s\n", __FUNCTION__); | ||
162 | spu->stop_code = in_be32(&spu->problem->spu_status_R); | ||
163 | if (spu->stop_callback) | ||
164 | spu->stop_callback(spu); | ||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static int __spu_trap_tag_group(struct spu *spu) | ||
169 | { | ||
170 | pr_debug("%s\n", __FUNCTION__); | ||
171 | /* wake_up(&spu->dma_wq); */ | ||
172 | return 0; | ||
173 | } | ||
174 | |||
175 | static int __spu_trap_spubox(struct spu *spu) | ||
176 | { | ||
177 | if (spu->wbox_callback) | ||
178 | spu->wbox_callback(spu); | ||
179 | |||
180 | /* atomically disable SPU mailbox interrupts */ | ||
181 | spin_lock(&spu->register_lock); | ||
182 | spu_int_mask_and(spu, 2, ~0x10); | ||
183 | spin_unlock(&spu->register_lock); | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static irqreturn_t | ||
188 | spu_irq_class_0(int irq, void *data, struct pt_regs *regs) | ||
189 | { | ||
190 | struct spu *spu; | ||
191 | |||
192 | spu = data; | ||
193 | spu->class_0_pending = 1; | ||
194 | if (spu->stop_callback) | ||
195 | spu->stop_callback(spu); | ||
196 | |||
197 | return IRQ_HANDLED; | ||
198 | } | ||
199 | |||
200 | int | ||
201 | spu_irq_class_0_bottom(struct spu *spu) | ||
202 | { | ||
203 | unsigned long stat, mask; | ||
204 | |||
205 | spu->class_0_pending = 0; | ||
206 | |||
207 | mask = spu_int_mask_get(spu, 0); | ||
208 | stat = spu_int_stat_get(spu, 0); | ||
209 | |||
210 | stat &= mask; | ||
211 | |||
212 | if (stat & 1) /* invalid MFC DMA */ | ||
213 | __spu_trap_invalid_dma(spu); | ||
214 | |||
215 | if (stat & 2) /* invalid DMA alignment */ | ||
216 | __spu_trap_dma_align(spu); | ||
217 | |||
218 | if (stat & 4) /* error on SPU */ | ||
219 | __spu_trap_error(spu); | ||
220 | |||
221 | spu_int_stat_clear(spu, 0, stat); | ||
222 | |||
223 | return (stat & 0x7) ? -EIO : 0; | ||
224 | } | ||
225 | EXPORT_SYMBOL_GPL(spu_irq_class_0_bottom); | ||
226 | |||
227 | static irqreturn_t | ||
228 | spu_irq_class_1(int irq, void *data, struct pt_regs *regs) | ||
229 | { | ||
230 | struct spu *spu; | ||
231 | unsigned long stat, mask, dar, dsisr; | ||
232 | |||
233 | spu = data; | ||
234 | |||
235 | /* atomically read & clear class1 status. */ | ||
236 | spin_lock(&spu->register_lock); | ||
237 | mask = spu_int_mask_get(spu, 1); | ||
238 | stat = spu_int_stat_get(spu, 1) & mask; | ||
239 | dar = spu_mfc_dar_get(spu); | ||
240 | dsisr = spu_mfc_dsisr_get(spu); | ||
241 | if (stat & 2) /* mapping fault */ | ||
242 | spu_mfc_dsisr_set(spu, 0ul); | ||
243 | spu_int_stat_clear(spu, 1, stat); | ||
244 | spin_unlock(&spu->register_lock); | ||
245 | |||
246 | if (stat & 1) /* segment fault */ | ||
247 | __spu_trap_data_seg(spu, dar); | ||
248 | |||
249 | if (stat & 2) { /* mapping fault */ | ||
250 | __spu_trap_data_map(spu, dar, dsisr); | ||
251 | } | ||
252 | |||
253 | if (stat & 4) /* ls compare & suspend on get */ | ||
254 | ; | ||
255 | |||
256 | if (stat & 8) /* ls compare & suspend on put */ | ||
257 | ; | ||
258 | |||
259 | return stat ? IRQ_HANDLED : IRQ_NONE; | ||
260 | } | ||
261 | EXPORT_SYMBOL_GPL(spu_irq_class_1_bottom); | ||
262 | |||
263 | static irqreturn_t | ||
264 | spu_irq_class_2(int irq, void *data, struct pt_regs *regs) | ||
265 | { | ||
266 | struct spu *spu; | ||
267 | unsigned long stat; | ||
268 | unsigned long mask; | ||
269 | |||
270 | spu = data; | ||
271 | stat = spu_int_stat_get(spu, 2); | ||
272 | mask = spu_int_mask_get(spu, 2); | ||
273 | |||
274 | pr_debug("class 2 interrupt %d, %lx, %lx\n", irq, stat, mask); | ||
275 | |||
276 | stat &= mask; | ||
277 | |||
278 | if (stat & 1) /* PPC core mailbox */ | ||
279 | __spu_trap_mailbox(spu); | ||
280 | |||
281 | if (stat & 2) /* SPU stop-and-signal */ | ||
282 | __spu_trap_stop(spu); | ||
283 | |||
284 | if (stat & 4) /* SPU halted */ | ||
285 | __spu_trap_halt(spu); | ||
286 | |||
287 | if (stat & 8) /* DMA tag group complete */ | ||
288 | __spu_trap_tag_group(spu); | ||
289 | |||
290 | if (stat & 0x10) /* SPU mailbox threshold */ | ||
291 | __spu_trap_spubox(spu); | ||
292 | |||
293 | spu_int_stat_clear(spu, 2, stat); | ||
294 | return stat ? IRQ_HANDLED : IRQ_NONE; | ||
295 | } | ||
296 | |||
297 | static int | ||
298 | spu_request_irqs(struct spu *spu) | ||
299 | { | ||
300 | int ret; | ||
301 | int irq_base; | ||
302 | |||
303 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | ||
304 | |||
305 | snprintf(spu->irq_c0, sizeof (spu->irq_c0), "spe%02d.0", spu->number); | ||
306 | ret = request_irq(irq_base + spu->isrc, | ||
307 | spu_irq_class_0, 0, spu->irq_c0, spu); | ||
308 | if (ret) | ||
309 | goto out; | ||
310 | |||
311 | snprintf(spu->irq_c1, sizeof (spu->irq_c1), "spe%02d.1", spu->number); | ||
312 | ret = request_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, | ||
313 | spu_irq_class_1, 0, spu->irq_c1, spu); | ||
314 | if (ret) | ||
315 | goto out1; | ||
316 | |||
317 | snprintf(spu->irq_c2, sizeof (spu->irq_c2), "spe%02d.2", spu->number); | ||
318 | ret = request_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, | ||
319 | spu_irq_class_2, 0, spu->irq_c2, spu); | ||
320 | if (ret) | ||
321 | goto out2; | ||
322 | goto out; | ||
323 | |||
324 | out2: | ||
325 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | ||
326 | out1: | ||
327 | free_irq(irq_base + spu->isrc, spu); | ||
328 | out: | ||
329 | return ret; | ||
330 | } | ||
331 | |||
332 | static void | ||
333 | spu_free_irqs(struct spu *spu) | ||
334 | { | ||
335 | int irq_base; | ||
336 | |||
337 | irq_base = IIC_NODE_STRIDE * spu->node + IIC_SPE_OFFSET; | ||
338 | |||
339 | free_irq(irq_base + spu->isrc, spu); | ||
340 | free_irq(irq_base + IIC_CLASS_STRIDE + spu->isrc, spu); | ||
341 | free_irq(irq_base + 2*IIC_CLASS_STRIDE + spu->isrc, spu); | ||
342 | } | ||
343 | |||
344 | static LIST_HEAD(spu_list); | ||
345 | static DECLARE_MUTEX(spu_mutex); | ||
346 | |||
347 | static void spu_init_channels(struct spu *spu) | ||
348 | { | ||
349 | static const struct { | ||
350 | unsigned channel; | ||
351 | unsigned count; | ||
352 | } zero_list[] = { | ||
353 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, | ||
354 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, | ||
355 | }, count_list[] = { | ||
356 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, | ||
357 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, | ||
358 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, | ||
359 | }; | ||
360 | struct spu_priv2 __iomem *priv2; | ||
361 | int i; | ||
362 | |||
363 | priv2 = spu->priv2; | ||
364 | |||
365 | /* initialize all channel data to zero */ | ||
366 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { | ||
367 | int count; | ||
368 | |||
369 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); | ||
370 | for (count = 0; count < zero_list[i].count; count++) | ||
371 | out_be64(&priv2->spu_chnldata_RW, 0); | ||
372 | } | ||
373 | |||
374 | /* initialize channel counts to meaningful values */ | ||
375 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { | ||
376 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); | ||
377 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); | ||
378 | } | ||
379 | } | ||
380 | |||
381 | struct spu *spu_alloc(void) | ||
382 | { | ||
383 | struct spu *spu; | ||
384 | |||
385 | down(&spu_mutex); | ||
386 | if (!list_empty(&spu_list)) { | ||
387 | spu = list_entry(spu_list.next, struct spu, list); | ||
388 | list_del_init(&spu->list); | ||
389 | pr_debug("Got SPU %x %d\n", spu->isrc, spu->number); | ||
390 | } else { | ||
391 | pr_debug("No SPU left\n"); | ||
392 | spu = NULL; | ||
393 | } | ||
394 | up(&spu_mutex); | ||
395 | |||
396 | if (spu) | ||
397 | spu_init_channels(spu); | ||
398 | |||
399 | return spu; | ||
400 | } | ||
401 | EXPORT_SYMBOL_GPL(spu_alloc); | ||
402 | |||
403 | void spu_free(struct spu *spu) | ||
404 | { | ||
405 | down(&spu_mutex); | ||
406 | list_add_tail(&spu->list, &spu_list); | ||
407 | up(&spu_mutex); | ||
408 | } | ||
409 | EXPORT_SYMBOL_GPL(spu_free); | ||
410 | |||
411 | static int spu_handle_mm_fault(struct spu *spu) | ||
412 | { | ||
413 | struct mm_struct *mm = spu->mm; | ||
414 | struct vm_area_struct *vma; | ||
415 | u64 ea, dsisr, is_write; | ||
416 | int ret; | ||
417 | |||
418 | ea = spu->dar; | ||
419 | dsisr = spu->dsisr; | ||
420 | #if 0 | ||
421 | if (!IS_VALID_EA(ea)) { | ||
422 | return -EFAULT; | ||
423 | } | ||
424 | #endif /* XXX */ | ||
425 | if (mm == NULL) { | ||
426 | return -EFAULT; | ||
427 | } | ||
428 | if (mm->pgd == NULL) { | ||
429 | return -EFAULT; | ||
430 | } | ||
431 | |||
432 | down_read(&mm->mmap_sem); | ||
433 | vma = find_vma(mm, ea); | ||
434 | if (!vma) | ||
435 | goto bad_area; | ||
436 | if (vma->vm_start <= ea) | ||
437 | goto good_area; | ||
438 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
439 | goto bad_area; | ||
440 | #if 0 | ||
441 | if (expand_stack(vma, ea)) | ||
442 | goto bad_area; | ||
443 | #endif /* XXX */ | ||
444 | good_area: | ||
445 | is_write = dsisr & MFC_DSISR_ACCESS_PUT; | ||
446 | if (is_write) { | ||
447 | if (!(vma->vm_flags & VM_WRITE)) | ||
448 | goto bad_area; | ||
449 | } else { | ||
450 | if (dsisr & MFC_DSISR_ACCESS_DENIED) | ||
451 | goto bad_area; | ||
452 | if (!(vma->vm_flags & (VM_READ | VM_EXEC))) | ||
453 | goto bad_area; | ||
454 | } | ||
455 | ret = 0; | ||
456 | switch (handle_mm_fault(mm, vma, ea, is_write)) { | ||
457 | case VM_FAULT_MINOR: | ||
458 | current->min_flt++; | ||
459 | break; | ||
460 | case VM_FAULT_MAJOR: | ||
461 | current->maj_flt++; | ||
462 | break; | ||
463 | case VM_FAULT_SIGBUS: | ||
464 | ret = -EFAULT; | ||
465 | goto bad_area; | ||
466 | case VM_FAULT_OOM: | ||
467 | ret = -ENOMEM; | ||
468 | goto bad_area; | ||
469 | default: | ||
470 | BUG(); | ||
471 | } | ||
472 | up_read(&mm->mmap_sem); | ||
473 | return ret; | ||
474 | |||
475 | bad_area: | ||
476 | up_read(&mm->mmap_sem); | ||
477 | return -EFAULT; | ||
478 | } | ||
479 | |||
480 | int spu_irq_class_1_bottom(struct spu *spu) | ||
481 | { | ||
482 | u64 ea, dsisr, access, error = 0UL; | ||
483 | int ret = 0; | ||
484 | |||
485 | ea = spu->dar; | ||
486 | dsisr = spu->dsisr; | ||
487 | if (dsisr & MFC_DSISR_PTE_NOT_FOUND) { | ||
488 | access = (_PAGE_PRESENT | _PAGE_USER); | ||
489 | access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_RW : 0UL; | ||
490 | if (hash_page(ea, access, 0x300) != 0) | ||
491 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
492 | } | ||
493 | if ((error & CLASS1_ENABLE_STORAGE_FAULT_INTR) || | ||
494 | (dsisr & MFC_DSISR_ACCESS_DENIED)) { | ||
495 | if ((ret = spu_handle_mm_fault(spu)) != 0) | ||
496 | error |= CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
497 | else | ||
498 | error &= ~CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
499 | } | ||
500 | spu->dar = 0UL; | ||
501 | spu->dsisr = 0UL; | ||
502 | if (!error) { | ||
503 | spu_restart_dma(spu); | ||
504 | } else { | ||
505 | __spu_trap_invalid_dma(spu); | ||
506 | } | ||
507 | return ret; | ||
508 | } | ||
509 | |||
510 | void spu_irq_setaffinity(struct spu *spu, int cpu) | ||
511 | { | ||
512 | u64 target = iic_get_target_id(cpu); | ||
513 | u64 route = target << 48 | target << 32 | target << 16; | ||
514 | spu_int_route_set(spu, route); | ||
515 | } | ||
516 | EXPORT_SYMBOL_GPL(spu_irq_setaffinity); | ||
517 | |||
518 | static void __iomem * __init map_spe_prop(struct device_node *n, | ||
519 | const char *name) | ||
520 | { | ||
521 | struct address_prop { | ||
522 | unsigned long address; | ||
523 | unsigned int len; | ||
524 | } __attribute__((packed)) *prop; | ||
525 | |||
526 | void *p; | ||
527 | int proplen; | ||
528 | |||
529 | p = get_property(n, name, &proplen); | ||
530 | if (proplen != sizeof (struct address_prop)) | ||
531 | return NULL; | ||
532 | |||
533 | prop = p; | ||
534 | |||
535 | return ioremap(prop->address, prop->len); | ||
536 | } | ||
537 | |||
538 | static void spu_unmap(struct spu *spu) | ||
539 | { | ||
540 | iounmap(spu->priv2); | ||
541 | iounmap(spu->priv1); | ||
542 | iounmap(spu->problem); | ||
543 | iounmap((u8 __iomem *)spu->local_store); | ||
544 | } | ||
545 | |||
546 | static int __init spu_map_device(struct spu *spu, struct device_node *spe) | ||
547 | { | ||
548 | char *prop; | ||
549 | int ret; | ||
550 | |||
551 | ret = -ENODEV; | ||
552 | prop = get_property(spe, "isrc", NULL); | ||
553 | if (!prop) | ||
554 | goto out; | ||
555 | spu->isrc = *(unsigned int *)prop; | ||
556 | |||
557 | spu->name = get_property(spe, "name", NULL); | ||
558 | if (!spu->name) | ||
559 | goto out; | ||
560 | |||
561 | prop = get_property(spe, "local-store", NULL); | ||
562 | if (!prop) | ||
563 | goto out; | ||
564 | spu->local_store_phys = *(unsigned long *)prop; | ||
565 | |||
566 | /* we use local store as ram, not io memory */ | ||
567 | spu->local_store = (void __force *)map_spe_prop(spe, "local-store"); | ||
568 | if (!spu->local_store) | ||
569 | goto out; | ||
570 | |||
571 | spu->problem= map_spe_prop(spe, "problem"); | ||
572 | if (!spu->problem) | ||
573 | goto out_unmap; | ||
574 | |||
575 | spu->priv1= map_spe_prop(spe, "priv1"); | ||
576 | /* priv1 is not available on a hypervisor */ | ||
577 | |||
578 | spu->priv2= map_spe_prop(spe, "priv2"); | ||
579 | if (!spu->priv2) | ||
580 | goto out_unmap; | ||
581 | ret = 0; | ||
582 | goto out; | ||
583 | |||
584 | out_unmap: | ||
585 | spu_unmap(spu); | ||
586 | out: | ||
587 | return ret; | ||
588 | } | ||
589 | |||
590 | static int __init find_spu_node_id(struct device_node *spe) | ||
591 | { | ||
592 | unsigned int *id; | ||
593 | struct device_node *cpu; | ||
594 | |||
595 | cpu = spe->parent->parent; | ||
596 | id = (unsigned int *)get_property(cpu, "node-id", NULL); | ||
597 | |||
598 | return id ? *id : 0; | ||
599 | } | ||
600 | |||
601 | static int __init create_spu(struct device_node *spe) | ||
602 | { | ||
603 | struct spu *spu; | ||
604 | int ret; | ||
605 | static int number; | ||
606 | |||
607 | ret = -ENOMEM; | ||
608 | spu = kmalloc(sizeof (*spu), GFP_KERNEL); | ||
609 | if (!spu) | ||
610 | goto out; | ||
611 | |||
612 | ret = spu_map_device(spu, spe); | ||
613 | if (ret) | ||
614 | goto out_free; | ||
615 | |||
616 | spu->node = find_spu_node_id(spe); | ||
617 | spu->stop_code = 0; | ||
618 | spu->slb_replace = 0; | ||
619 | spu->mm = NULL; | ||
620 | spu->ctx = NULL; | ||
621 | spu->rq = NULL; | ||
622 | spu->pid = 0; | ||
623 | spu->class_0_pending = 0; | ||
624 | spu->flags = 0UL; | ||
625 | spu->dar = 0UL; | ||
626 | spu->dsisr = 0UL; | ||
627 | spin_lock_init(&spu->register_lock); | ||
628 | |||
629 | spu_mfc_sdr_set(spu, mfspr(SPRN_SDR1)); | ||
630 | spu_mfc_sr1_set(spu, 0x33); | ||
631 | |||
632 | spu->ibox_callback = NULL; | ||
633 | spu->wbox_callback = NULL; | ||
634 | spu->stop_callback = NULL; | ||
635 | |||
636 | down(&spu_mutex); | ||
637 | spu->number = number++; | ||
638 | ret = spu_request_irqs(spu); | ||
639 | if (ret) | ||
640 | goto out_unmap; | ||
641 | |||
642 | list_add(&spu->list, &spu_list); | ||
643 | up(&spu_mutex); | ||
644 | |||
645 | pr_debug(KERN_DEBUG "Using SPE %s %02x %p %p %p %p %d\n", | ||
646 | spu->name, spu->isrc, spu->local_store, | ||
647 | spu->problem, spu->priv1, spu->priv2, spu->number); | ||
648 | goto out; | ||
649 | |||
650 | out_unmap: | ||
651 | up(&spu_mutex); | ||
652 | spu_unmap(spu); | ||
653 | out_free: | ||
654 | kfree(spu); | ||
655 | out: | ||
656 | return ret; | ||
657 | } | ||
658 | |||
659 | static void destroy_spu(struct spu *spu) | ||
660 | { | ||
661 | list_del_init(&spu->list); | ||
662 | |||
663 | spu_free_irqs(spu); | ||
664 | spu_unmap(spu); | ||
665 | kfree(spu); | ||
666 | } | ||
667 | |||
668 | static void cleanup_spu_base(void) | ||
669 | { | ||
670 | struct spu *spu, *tmp; | ||
671 | down(&spu_mutex); | ||
672 | list_for_each_entry_safe(spu, tmp, &spu_list, list) | ||
673 | destroy_spu(spu); | ||
674 | up(&spu_mutex); | ||
675 | } | ||
676 | module_exit(cleanup_spu_base); | ||
677 | |||
678 | static int __init init_spu_base(void) | ||
679 | { | ||
680 | struct device_node *node; | ||
681 | int ret; | ||
682 | |||
683 | ret = -ENODEV; | ||
684 | for (node = of_find_node_by_type(NULL, "spe"); | ||
685 | node; node = of_find_node_by_type(node, "spe")) { | ||
686 | ret = create_spu(node); | ||
687 | if (ret) { | ||
688 | printk(KERN_WARNING "%s: Error initializing %s\n", | ||
689 | __FUNCTION__, node->name); | ||
690 | cleanup_spu_base(); | ||
691 | break; | ||
692 | } | ||
693 | } | ||
694 | /* in some old firmware versions, the spe is called 'spc', so we | ||
695 | look for that as well */ | ||
696 | for (node = of_find_node_by_type(NULL, "spc"); | ||
697 | node; node = of_find_node_by_type(node, "spc")) { | ||
698 | ret = create_spu(node); | ||
699 | if (ret) { | ||
700 | printk(KERN_WARNING "%s: Error initializing %s\n", | ||
701 | __FUNCTION__, node->name); | ||
702 | cleanup_spu_base(); | ||
703 | break; | ||
704 | } | ||
705 | } | ||
706 | return ret; | ||
707 | } | ||
708 | module_init(init_spu_base); | ||
709 | |||
710 | MODULE_LICENSE("GPL"); | ||
711 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); | ||
diff --git a/arch/powerpc/platforms/cell/spu_priv1.c b/arch/powerpc/platforms/cell/spu_priv1.c new file mode 100644 index 000000000000..b2656421c7b5 --- /dev/null +++ b/arch/powerpc/platforms/cell/spu_priv1.c | |||
@@ -0,0 +1,133 @@ | |||
1 | /* | ||
2 | * access to SPU privileged registers | ||
3 | */ | ||
4 | #include <linux/module.h> | ||
5 | |||
6 | #include <asm/io.h> | ||
7 | #include <asm/spu.h> | ||
8 | |||
9 | void spu_int_mask_and(struct spu *spu, int class, u64 mask) | ||
10 | { | ||
11 | u64 old_mask; | ||
12 | |||
13 | old_mask = in_be64(&spu->priv1->int_mask_RW[class]); | ||
14 | out_be64(&spu->priv1->int_mask_RW[class], old_mask & mask); | ||
15 | } | ||
16 | EXPORT_SYMBOL_GPL(spu_int_mask_and); | ||
17 | |||
18 | void spu_int_mask_or(struct spu *spu, int class, u64 mask) | ||
19 | { | ||
20 | u64 old_mask; | ||
21 | |||
22 | old_mask = in_be64(&spu->priv1->int_mask_RW[class]); | ||
23 | out_be64(&spu->priv1->int_mask_RW[class], old_mask | mask); | ||
24 | } | ||
25 | EXPORT_SYMBOL_GPL(spu_int_mask_or); | ||
26 | |||
27 | void spu_int_mask_set(struct spu *spu, int class, u64 mask) | ||
28 | { | ||
29 | out_be64(&spu->priv1->int_mask_RW[class], mask); | ||
30 | } | ||
31 | EXPORT_SYMBOL_GPL(spu_int_mask_set); | ||
32 | |||
33 | u64 spu_int_mask_get(struct spu *spu, int class) | ||
34 | { | ||
35 | return in_be64(&spu->priv1->int_mask_RW[class]); | ||
36 | } | ||
37 | EXPORT_SYMBOL_GPL(spu_int_mask_get); | ||
38 | |||
39 | void spu_int_stat_clear(struct spu *spu, int class, u64 stat) | ||
40 | { | ||
41 | out_be64(&spu->priv1->int_stat_RW[class], stat); | ||
42 | } | ||
43 | EXPORT_SYMBOL_GPL(spu_int_stat_clear); | ||
44 | |||
45 | u64 spu_int_stat_get(struct spu *spu, int class) | ||
46 | { | ||
47 | return in_be64(&spu->priv1->int_stat_RW[class]); | ||
48 | } | ||
49 | EXPORT_SYMBOL_GPL(spu_int_stat_get); | ||
50 | |||
51 | void spu_int_route_set(struct spu *spu, u64 route) | ||
52 | { | ||
53 | out_be64(&spu->priv1->int_route_RW, route); | ||
54 | } | ||
55 | EXPORT_SYMBOL_GPL(spu_int_route_set); | ||
56 | |||
57 | u64 spu_mfc_dar_get(struct spu *spu) | ||
58 | { | ||
59 | return in_be64(&spu->priv1->mfc_dar_RW); | ||
60 | } | ||
61 | EXPORT_SYMBOL_GPL(spu_mfc_dar_get); | ||
62 | |||
63 | u64 spu_mfc_dsisr_get(struct spu *spu) | ||
64 | { | ||
65 | return in_be64(&spu->priv1->mfc_dsisr_RW); | ||
66 | } | ||
67 | EXPORT_SYMBOL_GPL(spu_mfc_dsisr_get); | ||
68 | |||
69 | void spu_mfc_dsisr_set(struct spu *spu, u64 dsisr) | ||
70 | { | ||
71 | out_be64(&spu->priv1->mfc_dsisr_RW, dsisr); | ||
72 | } | ||
73 | EXPORT_SYMBOL_GPL(spu_mfc_dsisr_set); | ||
74 | |||
75 | void spu_mfc_sdr_set(struct spu *spu, u64 sdr) | ||
76 | { | ||
77 | out_be64(&spu->priv1->mfc_sdr_RW, sdr); | ||
78 | } | ||
79 | EXPORT_SYMBOL_GPL(spu_mfc_sdr_set); | ||
80 | |||
81 | void spu_mfc_sr1_set(struct spu *spu, u64 sr1) | ||
82 | { | ||
83 | out_be64(&spu->priv1->mfc_sr1_RW, sr1); | ||
84 | } | ||
85 | EXPORT_SYMBOL_GPL(spu_mfc_sr1_set); | ||
86 | |||
87 | u64 spu_mfc_sr1_get(struct spu *spu) | ||
88 | { | ||
89 | return in_be64(&spu->priv1->mfc_sr1_RW); | ||
90 | } | ||
91 | EXPORT_SYMBOL_GPL(spu_mfc_sr1_get); | ||
92 | |||
93 | void spu_mfc_tclass_id_set(struct spu *spu, u64 tclass_id) | ||
94 | { | ||
95 | out_be64(&spu->priv1->mfc_tclass_id_RW, tclass_id); | ||
96 | } | ||
97 | EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_set); | ||
98 | |||
99 | u64 spu_mfc_tclass_id_get(struct spu *spu) | ||
100 | { | ||
101 | return in_be64(&spu->priv1->mfc_tclass_id_RW); | ||
102 | } | ||
103 | EXPORT_SYMBOL_GPL(spu_mfc_tclass_id_get); | ||
104 | |||
105 | void spu_tlb_invalidate(struct spu *spu) | ||
106 | { | ||
107 | out_be64(&spu->priv1->tlb_invalidate_entry_W, 0ul); | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(spu_tlb_invalidate); | ||
110 | |||
111 | void spu_resource_allocation_groupID_set(struct spu *spu, u64 id) | ||
112 | { | ||
113 | out_be64(&spu->priv1->resource_allocation_groupID_RW, id); | ||
114 | } | ||
115 | EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_set); | ||
116 | |||
117 | u64 spu_resource_allocation_groupID_get(struct spu *spu) | ||
118 | { | ||
119 | return in_be64(&spu->priv1->resource_allocation_groupID_RW); | ||
120 | } | ||
121 | EXPORT_SYMBOL_GPL(spu_resource_allocation_groupID_get); | ||
122 | |||
123 | void spu_resource_allocation_enable_set(struct spu *spu, u64 enable) | ||
124 | { | ||
125 | out_be64(&spu->priv1->resource_allocation_enable_RW, enable); | ||
126 | } | ||
127 | EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_set); | ||
128 | |||
129 | u64 spu_resource_allocation_enable_get(struct spu *spu) | ||
130 | { | ||
131 | return in_be64(&spu->priv1->resource_allocation_enable_RW); | ||
132 | } | ||
133 | EXPORT_SYMBOL_GPL(spu_resource_allocation_enable_get); | ||
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c new file mode 100644 index 000000000000..261b507a901a --- /dev/null +++ b/arch/powerpc/platforms/cell/spu_syscalls.c | |||
@@ -0,0 +1,88 @@ | |||
1 | /* | ||
2 | * SPU file system -- system call stubs | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | #include <linux/file.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | |||
26 | #include <asm/spu.h> | ||
27 | |||
28 | struct spufs_calls spufs_calls = { | ||
29 | .owner = NULL, | ||
30 | }; | ||
31 | |||
32 | /* These stub syscalls are needed to have the actual implementation | ||
33 | * within a loadable module. When spufs is built into the kernel, | ||
34 | * this file is not used and the syscalls directly enter the fs code */ | ||
35 | |||
36 | asmlinkage long sys_spu_create(const char __user *name, | ||
37 | unsigned int flags, mode_t mode) | ||
38 | { | ||
39 | long ret; | ||
40 | struct module *owner = spufs_calls.owner; | ||
41 | |||
42 | ret = -ENOSYS; | ||
43 | if (owner && try_module_get(owner)) { | ||
44 | ret = spufs_calls.create_thread(name, flags, mode); | ||
45 | module_put(owner); | ||
46 | } | ||
47 | return ret; | ||
48 | } | ||
49 | |||
50 | asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus) | ||
51 | { | ||
52 | long ret; | ||
53 | struct file *filp; | ||
54 | int fput_needed; | ||
55 | struct module *owner = spufs_calls.owner; | ||
56 | |||
57 | ret = -ENOSYS; | ||
58 | if (owner && try_module_get(owner)) { | ||
59 | ret = -EBADF; | ||
60 | filp = fget_light(fd, &fput_needed); | ||
61 | if (filp) { | ||
62 | ret = spufs_calls.spu_run(filp, unpc, ustatus); | ||
63 | fput_light(filp, fput_needed); | ||
64 | } | ||
65 | module_put(owner); | ||
66 | } | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | int register_spu_syscalls(struct spufs_calls *calls) | ||
71 | { | ||
72 | if (spufs_calls.owner) | ||
73 | return -EBUSY; | ||
74 | |||
75 | spufs_calls.create_thread = calls->create_thread; | ||
76 | spufs_calls.spu_run = calls->spu_run; | ||
77 | smp_mb(); | ||
78 | spufs_calls.owner = calls->owner; | ||
79 | return 0; | ||
80 | } | ||
81 | EXPORT_SYMBOL_GPL(register_spu_syscalls); | ||
82 | |||
83 | void unregister_spu_syscalls(struct spufs_calls *calls) | ||
84 | { | ||
85 | BUG_ON(spufs_calls.owner != calls->owner); | ||
86 | spufs_calls.owner = NULL; | ||
87 | } | ||
88 | EXPORT_SYMBOL_GPL(unregister_spu_syscalls); | ||
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile new file mode 100644 index 000000000000..a7cddf40e3d9 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/Makefile | |||
@@ -0,0 +1,54 @@ | |||
1 | obj-$(CONFIG_SPU_FS) += spufs.o | ||
2 | spufs-y += inode.o file.o context.o switch.o syscalls.o | ||
3 | spufs-y += sched.o backing_ops.o hw_ops.o run.o | ||
4 | |||
5 | # Rules to build switch.o with the help of SPU tool chain | ||
6 | SPU_CROSS := spu- | ||
7 | SPU_CC := $(SPU_CROSS)gcc | ||
8 | SPU_AS := $(SPU_CROSS)gcc | ||
9 | SPU_LD := $(SPU_CROSS)ld | ||
10 | SPU_OBJCOPY := $(SPU_CROSS)objcopy | ||
11 | SPU_CFLAGS := -O2 -Wall -I$(srctree)/include -I$(objtree)/include2 | ||
12 | SPU_AFLAGS := -c -D__ASSEMBLY__ -I$(srctree)/include -I$(objtree)/include2 | ||
13 | SPU_LDFLAGS := -N -Ttext=0x0 | ||
14 | |||
15 | $(obj)/switch.o: $(obj)/spu_save_dump.h $(obj)/spu_restore_dump.h | ||
16 | |||
17 | # Compile SPU files | ||
18 | cmd_spu_cc = $(SPU_CC) $(SPU_CFLAGS) -c -o $@ $< | ||
19 | quiet_cmd_spu_cc = SPU_CC $@ | ||
20 | $(obj)/spu_%.o: $(src)/spu_%.c | ||
21 | $(call if_changed,spu_cc) | ||
22 | |||
23 | # Assemble SPU files | ||
24 | cmd_spu_as = $(SPU_AS) $(SPU_AFLAGS) -o $@ $< | ||
25 | quiet_cmd_spu_as = SPU_AS $@ | ||
26 | $(obj)/spu_%.o: $(src)/spu_%.S | ||
27 | $(call if_changed,spu_as) | ||
28 | |||
29 | # Link SPU Executables | ||
30 | cmd_spu_ld = $(SPU_LD) $(SPU_LDFLAGS) -o $@ $^ | ||
31 | quiet_cmd_spu_ld = SPU_LD $@ | ||
32 | $(obj)/spu_%: $(obj)/spu_%_crt0.o $(obj)/spu_%.o | ||
33 | $(call if_changed,spu_ld) | ||
34 | |||
35 | # Copy into binary format | ||
36 | cmd_spu_objcopy = $(SPU_OBJCOPY) -O binary $< $@ | ||
37 | quiet_cmd_spu_objcopy = OBJCOPY $@ | ||
38 | $(obj)/spu_%.bin: $(src)/spu_% | ||
39 | $(call if_changed,spu_objcopy) | ||
40 | |||
41 | # create C code from ELF executable | ||
42 | cmd_hexdump = ( \ | ||
43 | echo "/*" ; \ | ||
44 | echo " * $*_dump.h: Copyright (C) 2005 IBM." ; \ | ||
45 | echo " * Hex-dump auto generated from $*.c." ; \ | ||
46 | echo " * Do not edit!" ; \ | ||
47 | echo " */" ; \ | ||
48 | echo "static unsigned int $*_code[] __page_aligned = {" ; \ | ||
49 | hexdump -v -e '"0x" 4/1 "%02x" "," "\n"' $< ; \ | ||
50 | echo "};" ; \ | ||
51 | ) > $@ | ||
52 | quiet_cmd_hexdump = HEXDUMP $@ | ||
53 | $(obj)/%_dump.h: $(obj)/%.bin | ||
54 | $(call if_changed,hexdump) | ||
diff --git a/arch/powerpc/platforms/cell/spufs/backing_ops.c b/arch/powerpc/platforms/cell/spufs/backing_ops.c new file mode 100644 index 000000000000..a5c489a53c61 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/backing_ops.c | |||
@@ -0,0 +1,308 @@ | |||
1 | /* backing_ops.c - query/set operations on saved SPU context. | ||
2 | * | ||
3 | * Copyright (C) IBM 2005 | ||
4 | * Author: Mark Nutter <mnutter@us.ibm.com> | ||
5 | * | ||
6 | * These register operations allow SPUFS to operate on saved | ||
7 | * SPU contexts rather than hardware. | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License as published by | ||
11 | * the Free Software Foundation; either version 2, or (at your option) | ||
12 | * any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, | ||
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | * GNU General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License | ||
20 | * along with this program; if not, write to the Free Software | ||
21 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/config.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/errno.h> | ||
27 | #include <linux/sched.h> | ||
28 | #include <linux/kernel.h> | ||
29 | #include <linux/mm.h> | ||
30 | #include <linux/vmalloc.h> | ||
31 | #include <linux/smp.h> | ||
32 | #include <linux/smp_lock.h> | ||
33 | #include <linux/stddef.h> | ||
34 | #include <linux/unistd.h> | ||
35 | #include <linux/poll.h> | ||
36 | |||
37 | #include <asm/io.h> | ||
38 | #include <asm/spu.h> | ||
39 | #include <asm/spu_csa.h> | ||
40 | #include <asm/mmu_context.h> | ||
41 | #include "spufs.h" | ||
42 | |||
43 | /* | ||
44 | * Reads/writes to various problem and priv2 registers require | ||
45 | * state changes, i.e. generate SPU events, modify channel | ||
46 | * counts, etc. | ||
47 | */ | ||
48 | |||
49 | static void gen_spu_event(struct spu_context *ctx, u32 event) | ||
50 | { | ||
51 | u64 ch0_cnt; | ||
52 | u64 ch0_data; | ||
53 | u64 ch1_data; | ||
54 | |||
55 | ch0_cnt = ctx->csa.spu_chnlcnt_RW[0]; | ||
56 | ch0_data = ctx->csa.spu_chnldata_RW[0]; | ||
57 | ch1_data = ctx->csa.spu_chnldata_RW[1]; | ||
58 | ctx->csa.spu_chnldata_RW[0] |= event; | ||
59 | if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) { | ||
60 | ctx->csa.spu_chnlcnt_RW[0] = 1; | ||
61 | } | ||
62 | } | ||
63 | |||
64 | static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data) | ||
65 | { | ||
66 | u32 mbox_stat; | ||
67 | int ret = 0; | ||
68 | |||
69 | spin_lock(&ctx->csa.register_lock); | ||
70 | mbox_stat = ctx->csa.prob.mb_stat_R; | ||
71 | if (mbox_stat & 0x0000ff) { | ||
72 | /* Read the first available word. | ||
73 | * Implementation note: the depth | ||
74 | * of pu_mb_R is currently 1. | ||
75 | */ | ||
76 | *data = ctx->csa.prob.pu_mb_R; | ||
77 | ctx->csa.prob.mb_stat_R &= ~(0x0000ff); | ||
78 | ctx->csa.spu_chnlcnt_RW[28] = 1; | ||
79 | gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT); | ||
80 | ret = 4; | ||
81 | } | ||
82 | spin_unlock(&ctx->csa.register_lock); | ||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | static u32 spu_backing_mbox_stat_read(struct spu_context *ctx) | ||
87 | { | ||
88 | return ctx->csa.prob.mb_stat_R; | ||
89 | } | ||
90 | |||
91 | static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx, | ||
92 | unsigned int events) | ||
93 | { | ||
94 | int ret; | ||
95 | u32 stat; | ||
96 | |||
97 | ret = 0; | ||
98 | spin_lock_irq(&ctx->csa.register_lock); | ||
99 | stat = ctx->csa.prob.mb_stat_R; | ||
100 | |||
101 | /* if the requested event is there, return the poll | ||
102 | mask, otherwise enable the interrupt to get notified, | ||
103 | but first mark any pending interrupts as done so | ||
104 | we don't get woken up unnecessarily */ | ||
105 | |||
106 | if (events & (POLLIN | POLLRDNORM)) { | ||
107 | if (stat & 0xff0000) | ||
108 | ret |= POLLIN | POLLRDNORM; | ||
109 | else { | ||
110 | ctx->csa.priv1.int_stat_class0_RW &= ~0x1; | ||
111 | ctx->csa.priv1.int_mask_class2_RW |= 0x1; | ||
112 | } | ||
113 | } | ||
114 | if (events & (POLLOUT | POLLWRNORM)) { | ||
115 | if (stat & 0x00ff00) | ||
116 | ret = POLLOUT | POLLWRNORM; | ||
117 | else { | ||
118 | ctx->csa.priv1.int_stat_class0_RW &= ~0x10; | ||
119 | ctx->csa.priv1.int_mask_class2_RW |= 0x10; | ||
120 | } | ||
121 | } | ||
122 | spin_unlock_irq(&ctx->csa.register_lock); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data) | ||
127 | { | ||
128 | int ret; | ||
129 | |||
130 | spin_lock(&ctx->csa.register_lock); | ||
131 | if (ctx->csa.prob.mb_stat_R & 0xff0000) { | ||
132 | /* Read the first available word. | ||
133 | * Implementation note: the depth | ||
134 | * of puint_mb_R is currently 1. | ||
135 | */ | ||
136 | *data = ctx->csa.priv2.puint_mb_R; | ||
137 | ctx->csa.prob.mb_stat_R &= ~(0xff0000); | ||
138 | ctx->csa.spu_chnlcnt_RW[30] = 1; | ||
139 | gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT); | ||
140 | ret = 4; | ||
141 | } else { | ||
142 | /* make sure we get woken up by the interrupt */ | ||
143 | ctx->csa.priv1.int_mask_class2_RW |= 0x1UL; | ||
144 | ret = 0; | ||
145 | } | ||
146 | spin_unlock(&ctx->csa.register_lock); | ||
147 | return ret; | ||
148 | } | ||
149 | |||
150 | static int spu_backing_wbox_write(struct spu_context *ctx, u32 data) | ||
151 | { | ||
152 | int ret; | ||
153 | |||
154 | spin_lock(&ctx->csa.register_lock); | ||
155 | if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) { | ||
156 | int slot = ctx->csa.spu_chnlcnt_RW[29]; | ||
157 | int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8; | ||
158 | |||
159 | /* We have space to write wbox_data. | ||
160 | * Implementation note: the depth | ||
161 | * of spu_mb_W is currently 4. | ||
162 | */ | ||
163 | BUG_ON(avail != (4 - slot)); | ||
164 | ctx->csa.spu_mailbox_data[slot] = data; | ||
165 | ctx->csa.spu_chnlcnt_RW[29] = ++slot; | ||
166 | ctx->csa.prob.mb_stat_R = (((4 - slot) & 0xff) << 8); | ||
167 | gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT); | ||
168 | ret = 4; | ||
169 | } else { | ||
170 | /* make sure we get woken up by the interrupt when space | ||
171 | becomes available */ | ||
172 | ctx->csa.priv1.int_mask_class2_RW |= 0x10; | ||
173 | ret = 0; | ||
174 | } | ||
175 | spin_unlock(&ctx->csa.register_lock); | ||
176 | return ret; | ||
177 | } | ||
178 | |||
179 | static u32 spu_backing_signal1_read(struct spu_context *ctx) | ||
180 | { | ||
181 | return ctx->csa.spu_chnldata_RW[3]; | ||
182 | } | ||
183 | |||
184 | static void spu_backing_signal1_write(struct spu_context *ctx, u32 data) | ||
185 | { | ||
186 | spin_lock(&ctx->csa.register_lock); | ||
187 | if (ctx->csa.priv2.spu_cfg_RW & 0x1) | ||
188 | ctx->csa.spu_chnldata_RW[3] |= data; | ||
189 | else | ||
190 | ctx->csa.spu_chnldata_RW[3] = data; | ||
191 | ctx->csa.spu_chnlcnt_RW[3] = 1; | ||
192 | gen_spu_event(ctx, MFC_SIGNAL_1_EVENT); | ||
193 | spin_unlock(&ctx->csa.register_lock); | ||
194 | } | ||
195 | |||
196 | static u32 spu_backing_signal2_read(struct spu_context *ctx) | ||
197 | { | ||
198 | return ctx->csa.spu_chnldata_RW[4]; | ||
199 | } | ||
200 | |||
201 | static void spu_backing_signal2_write(struct spu_context *ctx, u32 data) | ||
202 | { | ||
203 | spin_lock(&ctx->csa.register_lock); | ||
204 | if (ctx->csa.priv2.spu_cfg_RW & 0x2) | ||
205 | ctx->csa.spu_chnldata_RW[4] |= data; | ||
206 | else | ||
207 | ctx->csa.spu_chnldata_RW[4] = data; | ||
208 | ctx->csa.spu_chnlcnt_RW[4] = 1; | ||
209 | gen_spu_event(ctx, MFC_SIGNAL_2_EVENT); | ||
210 | spin_unlock(&ctx->csa.register_lock); | ||
211 | } | ||
212 | |||
213 | static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val) | ||
214 | { | ||
215 | u64 tmp; | ||
216 | |||
217 | spin_lock(&ctx->csa.register_lock); | ||
218 | tmp = ctx->csa.priv2.spu_cfg_RW; | ||
219 | if (val) | ||
220 | tmp |= 1; | ||
221 | else | ||
222 | tmp &= ~1; | ||
223 | ctx->csa.priv2.spu_cfg_RW = tmp; | ||
224 | spin_unlock(&ctx->csa.register_lock); | ||
225 | } | ||
226 | |||
227 | static u64 spu_backing_signal1_type_get(struct spu_context *ctx) | ||
228 | { | ||
229 | return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0); | ||
230 | } | ||
231 | |||
232 | static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val) | ||
233 | { | ||
234 | u64 tmp; | ||
235 | |||
236 | spin_lock(&ctx->csa.register_lock); | ||
237 | tmp = ctx->csa.priv2.spu_cfg_RW; | ||
238 | if (val) | ||
239 | tmp |= 2; | ||
240 | else | ||
241 | tmp &= ~2; | ||
242 | ctx->csa.priv2.spu_cfg_RW = tmp; | ||
243 | spin_unlock(&ctx->csa.register_lock); | ||
244 | } | ||
245 | |||
246 | static u64 spu_backing_signal2_type_get(struct spu_context *ctx) | ||
247 | { | ||
248 | return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0); | ||
249 | } | ||
250 | |||
251 | static u32 spu_backing_npc_read(struct spu_context *ctx) | ||
252 | { | ||
253 | return ctx->csa.prob.spu_npc_RW; | ||
254 | } | ||
255 | |||
256 | static void spu_backing_npc_write(struct spu_context *ctx, u32 val) | ||
257 | { | ||
258 | ctx->csa.prob.spu_npc_RW = val; | ||
259 | } | ||
260 | |||
261 | static u32 spu_backing_status_read(struct spu_context *ctx) | ||
262 | { | ||
263 | return ctx->csa.prob.spu_status_R; | ||
264 | } | ||
265 | |||
266 | static char *spu_backing_get_ls(struct spu_context *ctx) | ||
267 | { | ||
268 | return ctx->csa.lscsa->ls; | ||
269 | } | ||
270 | |||
271 | static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val) | ||
272 | { | ||
273 | spin_lock(&ctx->csa.register_lock); | ||
274 | ctx->csa.prob.spu_runcntl_RW = val; | ||
275 | if (val & SPU_RUNCNTL_RUNNABLE) { | ||
276 | ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING; | ||
277 | } else { | ||
278 | ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING; | ||
279 | } | ||
280 | spin_unlock(&ctx->csa.register_lock); | ||
281 | } | ||
282 | |||
283 | static void spu_backing_runcntl_stop(struct spu_context *ctx) | ||
284 | { | ||
285 | spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP); | ||
286 | } | ||
287 | |||
288 | struct spu_context_ops spu_backing_ops = { | ||
289 | .mbox_read = spu_backing_mbox_read, | ||
290 | .mbox_stat_read = spu_backing_mbox_stat_read, | ||
291 | .mbox_stat_poll = spu_backing_mbox_stat_poll, | ||
292 | .ibox_read = spu_backing_ibox_read, | ||
293 | .wbox_write = spu_backing_wbox_write, | ||
294 | .signal1_read = spu_backing_signal1_read, | ||
295 | .signal1_write = spu_backing_signal1_write, | ||
296 | .signal2_read = spu_backing_signal2_read, | ||
297 | .signal2_write = spu_backing_signal2_write, | ||
298 | .signal1_type_set = spu_backing_signal1_type_set, | ||
299 | .signal1_type_get = spu_backing_signal1_type_get, | ||
300 | .signal2_type_set = spu_backing_signal2_type_set, | ||
301 | .signal2_type_get = spu_backing_signal2_type_get, | ||
302 | .npc_read = spu_backing_npc_read, | ||
303 | .npc_write = spu_backing_npc_write, | ||
304 | .status_read = spu_backing_status_read, | ||
305 | .get_ls = spu_backing_get_ls, | ||
306 | .runcntl_write = spu_backing_runcntl_write, | ||
307 | .runcntl_stop = spu_backing_runcntl_stop, | ||
308 | }; | ||
diff --git a/arch/powerpc/platforms/cell/spufs/context.c b/arch/powerpc/platforms/cell/spufs/context.c new file mode 100644 index 000000000000..336f238102fd --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/context.c | |||
@@ -0,0 +1,167 @@ | |||
1 | /* | ||
2 | * SPU file system -- SPU context management | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/fs.h> | ||
24 | #include <linux/mm.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <asm/spu.h> | ||
27 | #include <asm/spu_csa.h> | ||
28 | #include "spufs.h" | ||
29 | |||
30 | struct spu_context *alloc_spu_context(struct address_space *local_store) | ||
31 | { | ||
32 | struct spu_context *ctx; | ||
33 | ctx = kmalloc(sizeof *ctx, GFP_KERNEL); | ||
34 | if (!ctx) | ||
35 | goto out; | ||
36 | /* Binding to physical processor deferred | ||
37 | * until spu_activate(). | ||
38 | */ | ||
39 | spu_init_csa(&ctx->csa); | ||
40 | if (!ctx->csa.lscsa) { | ||
41 | goto out_free; | ||
42 | } | ||
43 | spin_lock_init(&ctx->mmio_lock); | ||
44 | kref_init(&ctx->kref); | ||
45 | init_rwsem(&ctx->state_sema); | ||
46 | init_MUTEX(&ctx->run_sema); | ||
47 | init_waitqueue_head(&ctx->ibox_wq); | ||
48 | init_waitqueue_head(&ctx->wbox_wq); | ||
49 | init_waitqueue_head(&ctx->stop_wq); | ||
50 | ctx->ibox_fasync = NULL; | ||
51 | ctx->wbox_fasync = NULL; | ||
52 | ctx->state = SPU_STATE_SAVED; | ||
53 | ctx->local_store = local_store; | ||
54 | ctx->spu = NULL; | ||
55 | ctx->ops = &spu_backing_ops; | ||
56 | ctx->owner = get_task_mm(current); | ||
57 | goto out; | ||
58 | out_free: | ||
59 | kfree(ctx); | ||
60 | ctx = NULL; | ||
61 | out: | ||
62 | return ctx; | ||
63 | } | ||
64 | |||
65 | void destroy_spu_context(struct kref *kref) | ||
66 | { | ||
67 | struct spu_context *ctx; | ||
68 | ctx = container_of(kref, struct spu_context, kref); | ||
69 | down_write(&ctx->state_sema); | ||
70 | spu_deactivate(ctx); | ||
71 | ctx->ibox_fasync = NULL; | ||
72 | ctx->wbox_fasync = NULL; | ||
73 | up_write(&ctx->state_sema); | ||
74 | spu_fini_csa(&ctx->csa); | ||
75 | kfree(ctx); | ||
76 | } | ||
77 | |||
78 | struct spu_context * get_spu_context(struct spu_context *ctx) | ||
79 | { | ||
80 | kref_get(&ctx->kref); | ||
81 | return ctx; | ||
82 | } | ||
83 | |||
84 | int put_spu_context(struct spu_context *ctx) | ||
85 | { | ||
86 | return kref_put(&ctx->kref, &destroy_spu_context); | ||
87 | } | ||
88 | |||
89 | /* give up the mm reference when the context is about to be destroyed */ | ||
90 | void spu_forget(struct spu_context *ctx) | ||
91 | { | ||
92 | struct mm_struct *mm; | ||
93 | spu_acquire_saved(ctx); | ||
94 | mm = ctx->owner; | ||
95 | ctx->owner = NULL; | ||
96 | mmput(mm); | ||
97 | spu_release(ctx); | ||
98 | } | ||
99 | |||
100 | void spu_acquire(struct spu_context *ctx) | ||
101 | { | ||
102 | down_read(&ctx->state_sema); | ||
103 | } | ||
104 | |||
105 | void spu_release(struct spu_context *ctx) | ||
106 | { | ||
107 | up_read(&ctx->state_sema); | ||
108 | } | ||
109 | |||
110 | void spu_unmap_mappings(struct spu_context *ctx) | ||
111 | { | ||
112 | unmap_mapping_range(ctx->local_store, 0, LS_SIZE, 1); | ||
113 | } | ||
114 | |||
115 | int spu_acquire_runnable(struct spu_context *ctx) | ||
116 | { | ||
117 | int ret = 0; | ||
118 | |||
119 | down_read(&ctx->state_sema); | ||
120 | if (ctx->state == SPU_STATE_RUNNABLE) { | ||
121 | ctx->spu->prio = current->prio; | ||
122 | return 0; | ||
123 | } | ||
124 | up_read(&ctx->state_sema); | ||
125 | |||
126 | down_write(&ctx->state_sema); | ||
127 | /* ctx is about to be freed, can't acquire any more */ | ||
128 | if (!ctx->owner) { | ||
129 | ret = -EINVAL; | ||
130 | goto out; | ||
131 | } | ||
132 | |||
133 | if (ctx->state == SPU_STATE_SAVED) { | ||
134 | ret = spu_activate(ctx, 0); | ||
135 | if (ret) | ||
136 | goto out; | ||
137 | ctx->state = SPU_STATE_RUNNABLE; | ||
138 | } | ||
139 | |||
140 | downgrade_write(&ctx->state_sema); | ||
141 | /* On success, we return holding the lock */ | ||
142 | |||
143 | return ret; | ||
144 | out: | ||
145 | /* Release here, to simplify calling code. */ | ||
146 | up_write(&ctx->state_sema); | ||
147 | |||
148 | return ret; | ||
149 | } | ||
150 | |||
151 | void spu_acquire_saved(struct spu_context *ctx) | ||
152 | { | ||
153 | down_read(&ctx->state_sema); | ||
154 | |||
155 | if (ctx->state == SPU_STATE_SAVED) | ||
156 | return; | ||
157 | |||
158 | up_read(&ctx->state_sema); | ||
159 | down_write(&ctx->state_sema); | ||
160 | |||
161 | if (ctx->state == SPU_STATE_RUNNABLE) { | ||
162 | spu_deactivate(ctx); | ||
163 | ctx->state = SPU_STATE_SAVED; | ||
164 | } | ||
165 | |||
166 | downgrade_write(&ctx->state_sema); | ||
167 | } | ||
diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c new file mode 100644 index 000000000000..dfa649c9b956 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/file.c | |||
@@ -0,0 +1,794 @@ | |||
1 | /* | ||
2 | * SPU file system -- file contents | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/fs.h> | ||
24 | #include <linux/ioctl.h> | ||
25 | #include <linux/module.h> | ||
26 | #include <linux/pagemap.h> | ||
27 | #include <linux/poll.h> | ||
28 | #include <linux/ptrace.h> | ||
29 | |||
30 | #include <asm/io.h> | ||
31 | #include <asm/semaphore.h> | ||
32 | #include <asm/spu.h> | ||
33 | #include <asm/uaccess.h> | ||
34 | |||
35 | #include "spufs.h" | ||
36 | |||
37 | |||
38 | static int | ||
39 | spufs_mem_open(struct inode *inode, struct file *file) | ||
40 | { | ||
41 | struct spufs_inode_info *i = SPUFS_I(inode); | ||
42 | file->private_data = i->i_ctx; | ||
43 | file->f_mapping = i->i_ctx->local_store; | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static ssize_t | ||
48 | spufs_mem_read(struct file *file, char __user *buffer, | ||
49 | size_t size, loff_t *pos) | ||
50 | { | ||
51 | struct spu_context *ctx = file->private_data; | ||
52 | char *local_store; | ||
53 | int ret; | ||
54 | |||
55 | spu_acquire(ctx); | ||
56 | |||
57 | local_store = ctx->ops->get_ls(ctx); | ||
58 | ret = simple_read_from_buffer(buffer, size, pos, local_store, LS_SIZE); | ||
59 | |||
60 | spu_release(ctx); | ||
61 | return ret; | ||
62 | } | ||
63 | |||
64 | static ssize_t | ||
65 | spufs_mem_write(struct file *file, const char __user *buffer, | ||
66 | size_t size, loff_t *pos) | ||
67 | { | ||
68 | struct spu_context *ctx = file->private_data; | ||
69 | char *local_store; | ||
70 | int ret; | ||
71 | |||
72 | size = min_t(ssize_t, LS_SIZE - *pos, size); | ||
73 | if (size <= 0) | ||
74 | return -EFBIG; | ||
75 | *pos += size; | ||
76 | |||
77 | spu_acquire(ctx); | ||
78 | |||
79 | local_store = ctx->ops->get_ls(ctx); | ||
80 | ret = copy_from_user(local_store + *pos - size, | ||
81 | buffer, size) ? -EFAULT : size; | ||
82 | |||
83 | spu_release(ctx); | ||
84 | return ret; | ||
85 | } | ||
86 | |||
87 | #ifdef CONFIG_SPARSEMEM | ||
88 | static struct page * | ||
89 | spufs_mem_mmap_nopage(struct vm_area_struct *vma, | ||
90 | unsigned long address, int *type) | ||
91 | { | ||
92 | struct page *page = NOPAGE_SIGBUS; | ||
93 | |||
94 | struct spu_context *ctx = vma->vm_file->private_data; | ||
95 | unsigned long offset = address - vma->vm_start; | ||
96 | offset += vma->vm_pgoff << PAGE_SHIFT; | ||
97 | |||
98 | spu_acquire(ctx); | ||
99 | |||
100 | if (ctx->state == SPU_STATE_SAVED) | ||
101 | page = vmalloc_to_page(ctx->csa.lscsa->ls + offset); | ||
102 | else | ||
103 | page = pfn_to_page((ctx->spu->local_store_phys + offset) | ||
104 | >> PAGE_SHIFT); | ||
105 | |||
106 | spu_release(ctx); | ||
107 | |||
108 | if (type) | ||
109 | *type = VM_FAULT_MINOR; | ||
110 | |||
111 | page_cache_get(page); | ||
112 | return page; | ||
113 | } | ||
114 | |||
115 | static struct vm_operations_struct spufs_mem_mmap_vmops = { | ||
116 | .nopage = spufs_mem_mmap_nopage, | ||
117 | }; | ||
118 | |||
119 | static int | ||
120 | spufs_mem_mmap(struct file *file, struct vm_area_struct *vma) | ||
121 | { | ||
122 | if (!(vma->vm_flags & VM_SHARED)) | ||
123 | return -EINVAL; | ||
124 | |||
125 | /* FIXME: */ | ||
126 | vma->vm_page_prot = __pgprot(pgprot_val(vma->vm_page_prot) | ||
127 | | _PAGE_NO_CACHE); | ||
128 | |||
129 | vma->vm_ops = &spufs_mem_mmap_vmops; | ||
130 | return 0; | ||
131 | } | ||
132 | #endif | ||
133 | |||
134 | static struct file_operations spufs_mem_fops = { | ||
135 | .open = spufs_mem_open, | ||
136 | .read = spufs_mem_read, | ||
137 | .write = spufs_mem_write, | ||
138 | .llseek = generic_file_llseek, | ||
139 | #ifdef CONFIG_SPARSEMEM | ||
140 | .mmap = spufs_mem_mmap, | ||
141 | #endif | ||
142 | }; | ||
143 | |||
144 | static int | ||
145 | spufs_regs_open(struct inode *inode, struct file *file) | ||
146 | { | ||
147 | struct spufs_inode_info *i = SPUFS_I(inode); | ||
148 | file->private_data = i->i_ctx; | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static ssize_t | ||
153 | spufs_regs_read(struct file *file, char __user *buffer, | ||
154 | size_t size, loff_t *pos) | ||
155 | { | ||
156 | struct spu_context *ctx = file->private_data; | ||
157 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
158 | int ret; | ||
159 | |||
160 | spu_acquire_saved(ctx); | ||
161 | |||
162 | ret = simple_read_from_buffer(buffer, size, pos, | ||
163 | lscsa->gprs, sizeof lscsa->gprs); | ||
164 | |||
165 | spu_release(ctx); | ||
166 | return ret; | ||
167 | } | ||
168 | |||
169 | static ssize_t | ||
170 | spufs_regs_write(struct file *file, const char __user *buffer, | ||
171 | size_t size, loff_t *pos) | ||
172 | { | ||
173 | struct spu_context *ctx = file->private_data; | ||
174 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
175 | int ret; | ||
176 | |||
177 | size = min_t(ssize_t, sizeof lscsa->gprs - *pos, size); | ||
178 | if (size <= 0) | ||
179 | return -EFBIG; | ||
180 | *pos += size; | ||
181 | |||
182 | spu_acquire_saved(ctx); | ||
183 | |||
184 | ret = copy_from_user(lscsa->gprs + *pos - size, | ||
185 | buffer, size) ? -EFAULT : size; | ||
186 | |||
187 | spu_release(ctx); | ||
188 | return ret; | ||
189 | } | ||
190 | |||
191 | static struct file_operations spufs_regs_fops = { | ||
192 | .open = spufs_regs_open, | ||
193 | .read = spufs_regs_read, | ||
194 | .write = spufs_regs_write, | ||
195 | .llseek = generic_file_llseek, | ||
196 | }; | ||
197 | |||
198 | static ssize_t | ||
199 | spufs_fpcr_read(struct file *file, char __user * buffer, | ||
200 | size_t size, loff_t * pos) | ||
201 | { | ||
202 | struct spu_context *ctx = file->private_data; | ||
203 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
204 | int ret; | ||
205 | |||
206 | spu_acquire_saved(ctx); | ||
207 | |||
208 | ret = simple_read_from_buffer(buffer, size, pos, | ||
209 | &lscsa->fpcr, sizeof(lscsa->fpcr)); | ||
210 | |||
211 | spu_release(ctx); | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | static ssize_t | ||
216 | spufs_fpcr_write(struct file *file, const char __user * buffer, | ||
217 | size_t size, loff_t * pos) | ||
218 | { | ||
219 | struct spu_context *ctx = file->private_data; | ||
220 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
221 | int ret; | ||
222 | |||
223 | size = min_t(ssize_t, sizeof(lscsa->fpcr) - *pos, size); | ||
224 | if (size <= 0) | ||
225 | return -EFBIG; | ||
226 | *pos += size; | ||
227 | |||
228 | spu_acquire_saved(ctx); | ||
229 | |||
230 | ret = copy_from_user((char *)&lscsa->fpcr + *pos - size, | ||
231 | buffer, size) ? -EFAULT : size; | ||
232 | |||
233 | spu_release(ctx); | ||
234 | return ret; | ||
235 | } | ||
236 | |||
237 | static struct file_operations spufs_fpcr_fops = { | ||
238 | .open = spufs_regs_open, | ||
239 | .read = spufs_fpcr_read, | ||
240 | .write = spufs_fpcr_write, | ||
241 | .llseek = generic_file_llseek, | ||
242 | }; | ||
243 | |||
244 | /* generic open function for all pipe-like files */ | ||
245 | static int spufs_pipe_open(struct inode *inode, struct file *file) | ||
246 | { | ||
247 | struct spufs_inode_info *i = SPUFS_I(inode); | ||
248 | file->private_data = i->i_ctx; | ||
249 | |||
250 | return nonseekable_open(inode, file); | ||
251 | } | ||
252 | |||
253 | static ssize_t spufs_mbox_read(struct file *file, char __user *buf, | ||
254 | size_t len, loff_t *pos) | ||
255 | { | ||
256 | struct spu_context *ctx = file->private_data; | ||
257 | u32 mbox_data; | ||
258 | int ret; | ||
259 | |||
260 | if (len < 4) | ||
261 | return -EINVAL; | ||
262 | |||
263 | spu_acquire(ctx); | ||
264 | ret = ctx->ops->mbox_read(ctx, &mbox_data); | ||
265 | spu_release(ctx); | ||
266 | |||
267 | if (!ret) | ||
268 | return -EAGAIN; | ||
269 | |||
270 | if (copy_to_user(buf, &mbox_data, sizeof mbox_data)) | ||
271 | return -EFAULT; | ||
272 | |||
273 | return 4; | ||
274 | } | ||
275 | |||
276 | static struct file_operations spufs_mbox_fops = { | ||
277 | .open = spufs_pipe_open, | ||
278 | .read = spufs_mbox_read, | ||
279 | }; | ||
280 | |||
281 | static ssize_t spufs_mbox_stat_read(struct file *file, char __user *buf, | ||
282 | size_t len, loff_t *pos) | ||
283 | { | ||
284 | struct spu_context *ctx = file->private_data; | ||
285 | u32 mbox_stat; | ||
286 | |||
287 | if (len < 4) | ||
288 | return -EINVAL; | ||
289 | |||
290 | spu_acquire(ctx); | ||
291 | |||
292 | mbox_stat = ctx->ops->mbox_stat_read(ctx) & 0xff; | ||
293 | |||
294 | spu_release(ctx); | ||
295 | |||
296 | if (copy_to_user(buf, &mbox_stat, sizeof mbox_stat)) | ||
297 | return -EFAULT; | ||
298 | |||
299 | return 4; | ||
300 | } | ||
301 | |||
302 | static struct file_operations spufs_mbox_stat_fops = { | ||
303 | .open = spufs_pipe_open, | ||
304 | .read = spufs_mbox_stat_read, | ||
305 | }; | ||
306 | |||
307 | /* low-level ibox access function */ | ||
308 | size_t spu_ibox_read(struct spu_context *ctx, u32 *data) | ||
309 | { | ||
310 | return ctx->ops->ibox_read(ctx, data); | ||
311 | } | ||
312 | |||
313 | static int spufs_ibox_fasync(int fd, struct file *file, int on) | ||
314 | { | ||
315 | struct spu_context *ctx = file->private_data; | ||
316 | |||
317 | return fasync_helper(fd, file, on, &ctx->ibox_fasync); | ||
318 | } | ||
319 | |||
320 | /* interrupt-level ibox callback function. */ | ||
321 | void spufs_ibox_callback(struct spu *spu) | ||
322 | { | ||
323 | struct spu_context *ctx = spu->ctx; | ||
324 | |||
325 | wake_up_all(&ctx->ibox_wq); | ||
326 | kill_fasync(&ctx->ibox_fasync, SIGIO, POLLIN); | ||
327 | } | ||
328 | |||
329 | static ssize_t spufs_ibox_read(struct file *file, char __user *buf, | ||
330 | size_t len, loff_t *pos) | ||
331 | { | ||
332 | struct spu_context *ctx = file->private_data; | ||
333 | u32 ibox_data; | ||
334 | ssize_t ret; | ||
335 | |||
336 | if (len < 4) | ||
337 | return -EINVAL; | ||
338 | |||
339 | spu_acquire(ctx); | ||
340 | |||
341 | ret = 0; | ||
342 | if (file->f_flags & O_NONBLOCK) { | ||
343 | if (!spu_ibox_read(ctx, &ibox_data)) | ||
344 | ret = -EAGAIN; | ||
345 | } else { | ||
346 | ret = spufs_wait(ctx->ibox_wq, spu_ibox_read(ctx, &ibox_data)); | ||
347 | } | ||
348 | |||
349 | spu_release(ctx); | ||
350 | |||
351 | if (ret) | ||
352 | return ret; | ||
353 | |||
354 | ret = 4; | ||
355 | if (copy_to_user(buf, &ibox_data, sizeof ibox_data)) | ||
356 | ret = -EFAULT; | ||
357 | |||
358 | return ret; | ||
359 | } | ||
360 | |||
361 | static unsigned int spufs_ibox_poll(struct file *file, poll_table *wait) | ||
362 | { | ||
363 | struct spu_context *ctx = file->private_data; | ||
364 | unsigned int mask; | ||
365 | |||
366 | poll_wait(file, &ctx->ibox_wq, wait); | ||
367 | |||
368 | spu_acquire(ctx); | ||
369 | mask = ctx->ops->mbox_stat_poll(ctx, POLLIN | POLLRDNORM); | ||
370 | spu_release(ctx); | ||
371 | |||
372 | return mask; | ||
373 | } | ||
374 | |||
375 | static struct file_operations spufs_ibox_fops = { | ||
376 | .open = spufs_pipe_open, | ||
377 | .read = spufs_ibox_read, | ||
378 | .poll = spufs_ibox_poll, | ||
379 | .fasync = spufs_ibox_fasync, | ||
380 | }; | ||
381 | |||
382 | static ssize_t spufs_ibox_stat_read(struct file *file, char __user *buf, | ||
383 | size_t len, loff_t *pos) | ||
384 | { | ||
385 | struct spu_context *ctx = file->private_data; | ||
386 | u32 ibox_stat; | ||
387 | |||
388 | if (len < 4) | ||
389 | return -EINVAL; | ||
390 | |||
391 | spu_acquire(ctx); | ||
392 | ibox_stat = (ctx->ops->mbox_stat_read(ctx) >> 16) & 0xff; | ||
393 | spu_release(ctx); | ||
394 | |||
395 | if (copy_to_user(buf, &ibox_stat, sizeof ibox_stat)) | ||
396 | return -EFAULT; | ||
397 | |||
398 | return 4; | ||
399 | } | ||
400 | |||
401 | static struct file_operations spufs_ibox_stat_fops = { | ||
402 | .open = spufs_pipe_open, | ||
403 | .read = spufs_ibox_stat_read, | ||
404 | }; | ||
405 | |||
406 | /* low-level mailbox write */ | ||
407 | size_t spu_wbox_write(struct spu_context *ctx, u32 data) | ||
408 | { | ||
409 | return ctx->ops->wbox_write(ctx, data); | ||
410 | } | ||
411 | |||
412 | static int spufs_wbox_fasync(int fd, struct file *file, int on) | ||
413 | { | ||
414 | struct spu_context *ctx = file->private_data; | ||
415 | int ret; | ||
416 | |||
417 | ret = fasync_helper(fd, file, on, &ctx->wbox_fasync); | ||
418 | |||
419 | return ret; | ||
420 | } | ||
421 | |||
422 | /* interrupt-level wbox callback function. */ | ||
423 | void spufs_wbox_callback(struct spu *spu) | ||
424 | { | ||
425 | struct spu_context *ctx = spu->ctx; | ||
426 | |||
427 | wake_up_all(&ctx->wbox_wq); | ||
428 | kill_fasync(&ctx->wbox_fasync, SIGIO, POLLOUT); | ||
429 | } | ||
430 | |||
431 | static ssize_t spufs_wbox_write(struct file *file, const char __user *buf, | ||
432 | size_t len, loff_t *pos) | ||
433 | { | ||
434 | struct spu_context *ctx = file->private_data; | ||
435 | u32 wbox_data; | ||
436 | int ret; | ||
437 | |||
438 | if (len < 4) | ||
439 | return -EINVAL; | ||
440 | |||
441 | if (copy_from_user(&wbox_data, buf, sizeof wbox_data)) | ||
442 | return -EFAULT; | ||
443 | |||
444 | spu_acquire(ctx); | ||
445 | |||
446 | ret = 0; | ||
447 | if (file->f_flags & O_NONBLOCK) { | ||
448 | if (!spu_wbox_write(ctx, wbox_data)) | ||
449 | ret = -EAGAIN; | ||
450 | } else { | ||
451 | ret = spufs_wait(ctx->wbox_wq, spu_wbox_write(ctx, wbox_data)); | ||
452 | } | ||
453 | |||
454 | spu_release(ctx); | ||
455 | |||
456 | return ret ? ret : sizeof wbox_data; | ||
457 | } | ||
458 | |||
459 | static unsigned int spufs_wbox_poll(struct file *file, poll_table *wait) | ||
460 | { | ||
461 | struct spu_context *ctx = file->private_data; | ||
462 | unsigned int mask; | ||
463 | |||
464 | poll_wait(file, &ctx->wbox_wq, wait); | ||
465 | |||
466 | spu_acquire(ctx); | ||
467 | mask = ctx->ops->mbox_stat_poll(ctx, POLLOUT | POLLWRNORM); | ||
468 | spu_release(ctx); | ||
469 | |||
470 | return mask; | ||
471 | } | ||
472 | |||
473 | static struct file_operations spufs_wbox_fops = { | ||
474 | .open = spufs_pipe_open, | ||
475 | .write = spufs_wbox_write, | ||
476 | .poll = spufs_wbox_poll, | ||
477 | .fasync = spufs_wbox_fasync, | ||
478 | }; | ||
479 | |||
480 | static ssize_t spufs_wbox_stat_read(struct file *file, char __user *buf, | ||
481 | size_t len, loff_t *pos) | ||
482 | { | ||
483 | struct spu_context *ctx = file->private_data; | ||
484 | u32 wbox_stat; | ||
485 | |||
486 | if (len < 4) | ||
487 | return -EINVAL; | ||
488 | |||
489 | spu_acquire(ctx); | ||
490 | wbox_stat = (ctx->ops->mbox_stat_read(ctx) >> 8) & 0xff; | ||
491 | spu_release(ctx); | ||
492 | |||
493 | if (copy_to_user(buf, &wbox_stat, sizeof wbox_stat)) | ||
494 | return -EFAULT; | ||
495 | |||
496 | return 4; | ||
497 | } | ||
498 | |||
499 | static struct file_operations spufs_wbox_stat_fops = { | ||
500 | .open = spufs_pipe_open, | ||
501 | .read = spufs_wbox_stat_read, | ||
502 | }; | ||
503 | |||
504 | static ssize_t spufs_signal1_read(struct file *file, char __user *buf, | ||
505 | size_t len, loff_t *pos) | ||
506 | { | ||
507 | struct spu_context *ctx = file->private_data; | ||
508 | u32 data; | ||
509 | |||
510 | if (len < 4) | ||
511 | return -EINVAL; | ||
512 | |||
513 | spu_acquire(ctx); | ||
514 | data = ctx->ops->signal1_read(ctx); | ||
515 | spu_release(ctx); | ||
516 | |||
517 | if (copy_to_user(buf, &data, 4)) | ||
518 | return -EFAULT; | ||
519 | |||
520 | return 4; | ||
521 | } | ||
522 | |||
523 | static ssize_t spufs_signal1_write(struct file *file, const char __user *buf, | ||
524 | size_t len, loff_t *pos) | ||
525 | { | ||
526 | struct spu_context *ctx; | ||
527 | u32 data; | ||
528 | |||
529 | ctx = file->private_data; | ||
530 | |||
531 | if (len < 4) | ||
532 | return -EINVAL; | ||
533 | |||
534 | if (copy_from_user(&data, buf, 4)) | ||
535 | return -EFAULT; | ||
536 | |||
537 | spu_acquire(ctx); | ||
538 | ctx->ops->signal1_write(ctx, data); | ||
539 | spu_release(ctx); | ||
540 | |||
541 | return 4; | ||
542 | } | ||
543 | |||
544 | static struct file_operations spufs_signal1_fops = { | ||
545 | .open = spufs_pipe_open, | ||
546 | .read = spufs_signal1_read, | ||
547 | .write = spufs_signal1_write, | ||
548 | }; | ||
549 | |||
550 | static ssize_t spufs_signal2_read(struct file *file, char __user *buf, | ||
551 | size_t len, loff_t *pos) | ||
552 | { | ||
553 | struct spu_context *ctx; | ||
554 | u32 data; | ||
555 | |||
556 | ctx = file->private_data; | ||
557 | |||
558 | if (len < 4) | ||
559 | return -EINVAL; | ||
560 | |||
561 | spu_acquire(ctx); | ||
562 | data = ctx->ops->signal2_read(ctx); | ||
563 | spu_release(ctx); | ||
564 | |||
565 | if (copy_to_user(buf, &data, 4)) | ||
566 | return -EFAULT; | ||
567 | |||
568 | return 4; | ||
569 | } | ||
570 | |||
571 | static ssize_t spufs_signal2_write(struct file *file, const char __user *buf, | ||
572 | size_t len, loff_t *pos) | ||
573 | { | ||
574 | struct spu_context *ctx; | ||
575 | u32 data; | ||
576 | |||
577 | ctx = file->private_data; | ||
578 | |||
579 | if (len < 4) | ||
580 | return -EINVAL; | ||
581 | |||
582 | if (copy_from_user(&data, buf, 4)) | ||
583 | return -EFAULT; | ||
584 | |||
585 | spu_acquire(ctx); | ||
586 | ctx->ops->signal2_write(ctx, data); | ||
587 | spu_release(ctx); | ||
588 | |||
589 | return 4; | ||
590 | } | ||
591 | |||
592 | static struct file_operations spufs_signal2_fops = { | ||
593 | .open = spufs_pipe_open, | ||
594 | .read = spufs_signal2_read, | ||
595 | .write = spufs_signal2_write, | ||
596 | }; | ||
597 | |||
598 | static void spufs_signal1_type_set(void *data, u64 val) | ||
599 | { | ||
600 | struct spu_context *ctx = data; | ||
601 | |||
602 | spu_acquire(ctx); | ||
603 | ctx->ops->signal1_type_set(ctx, val); | ||
604 | spu_release(ctx); | ||
605 | } | ||
606 | |||
607 | static u64 spufs_signal1_type_get(void *data) | ||
608 | { | ||
609 | struct spu_context *ctx = data; | ||
610 | u64 ret; | ||
611 | |||
612 | spu_acquire(ctx); | ||
613 | ret = ctx->ops->signal1_type_get(ctx); | ||
614 | spu_release(ctx); | ||
615 | |||
616 | return ret; | ||
617 | } | ||
618 | DEFINE_SIMPLE_ATTRIBUTE(spufs_signal1_type, spufs_signal1_type_get, | ||
619 | spufs_signal1_type_set, "%llu"); | ||
620 | |||
621 | static void spufs_signal2_type_set(void *data, u64 val) | ||
622 | { | ||
623 | struct spu_context *ctx = data; | ||
624 | |||
625 | spu_acquire(ctx); | ||
626 | ctx->ops->signal2_type_set(ctx, val); | ||
627 | spu_release(ctx); | ||
628 | } | ||
629 | |||
630 | static u64 spufs_signal2_type_get(void *data) | ||
631 | { | ||
632 | struct spu_context *ctx = data; | ||
633 | u64 ret; | ||
634 | |||
635 | spu_acquire(ctx); | ||
636 | ret = ctx->ops->signal2_type_get(ctx); | ||
637 | spu_release(ctx); | ||
638 | |||
639 | return ret; | ||
640 | } | ||
641 | DEFINE_SIMPLE_ATTRIBUTE(spufs_signal2_type, spufs_signal2_type_get, | ||
642 | spufs_signal2_type_set, "%llu"); | ||
643 | |||
644 | static void spufs_npc_set(void *data, u64 val) | ||
645 | { | ||
646 | struct spu_context *ctx = data; | ||
647 | spu_acquire(ctx); | ||
648 | ctx->ops->npc_write(ctx, val); | ||
649 | spu_release(ctx); | ||
650 | } | ||
651 | |||
652 | static u64 spufs_npc_get(void *data) | ||
653 | { | ||
654 | struct spu_context *ctx = data; | ||
655 | u64 ret; | ||
656 | spu_acquire(ctx); | ||
657 | ret = ctx->ops->npc_read(ctx); | ||
658 | spu_release(ctx); | ||
659 | return ret; | ||
660 | } | ||
661 | DEFINE_SIMPLE_ATTRIBUTE(spufs_npc_ops, spufs_npc_get, spufs_npc_set, "%llx\n") | ||
662 | |||
663 | static void spufs_decr_set(void *data, u64 val) | ||
664 | { | ||
665 | struct spu_context *ctx = data; | ||
666 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
667 | spu_acquire_saved(ctx); | ||
668 | lscsa->decr.slot[0] = (u32) val; | ||
669 | spu_release(ctx); | ||
670 | } | ||
671 | |||
672 | static u64 spufs_decr_get(void *data) | ||
673 | { | ||
674 | struct spu_context *ctx = data; | ||
675 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
676 | u64 ret; | ||
677 | spu_acquire_saved(ctx); | ||
678 | ret = lscsa->decr.slot[0]; | ||
679 | spu_release(ctx); | ||
680 | return ret; | ||
681 | } | ||
682 | DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_ops, spufs_decr_get, spufs_decr_set, | ||
683 | "%llx\n") | ||
684 | |||
685 | static void spufs_decr_status_set(void *data, u64 val) | ||
686 | { | ||
687 | struct spu_context *ctx = data; | ||
688 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
689 | spu_acquire_saved(ctx); | ||
690 | lscsa->decr_status.slot[0] = (u32) val; | ||
691 | spu_release(ctx); | ||
692 | } | ||
693 | |||
694 | static u64 spufs_decr_status_get(void *data) | ||
695 | { | ||
696 | struct spu_context *ctx = data; | ||
697 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
698 | u64 ret; | ||
699 | spu_acquire_saved(ctx); | ||
700 | ret = lscsa->decr_status.slot[0]; | ||
701 | spu_release(ctx); | ||
702 | return ret; | ||
703 | } | ||
704 | DEFINE_SIMPLE_ATTRIBUTE(spufs_decr_status_ops, spufs_decr_status_get, | ||
705 | spufs_decr_status_set, "%llx\n") | ||
706 | |||
707 | static void spufs_spu_tag_mask_set(void *data, u64 val) | ||
708 | { | ||
709 | struct spu_context *ctx = data; | ||
710 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
711 | spu_acquire_saved(ctx); | ||
712 | lscsa->tag_mask.slot[0] = (u32) val; | ||
713 | spu_release(ctx); | ||
714 | } | ||
715 | |||
716 | static u64 spufs_spu_tag_mask_get(void *data) | ||
717 | { | ||
718 | struct spu_context *ctx = data; | ||
719 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
720 | u64 ret; | ||
721 | spu_acquire_saved(ctx); | ||
722 | ret = lscsa->tag_mask.slot[0]; | ||
723 | spu_release(ctx); | ||
724 | return ret; | ||
725 | } | ||
726 | DEFINE_SIMPLE_ATTRIBUTE(spufs_spu_tag_mask_ops, spufs_spu_tag_mask_get, | ||
727 | spufs_spu_tag_mask_set, "%llx\n") | ||
728 | |||
729 | static void spufs_event_mask_set(void *data, u64 val) | ||
730 | { | ||
731 | struct spu_context *ctx = data; | ||
732 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
733 | spu_acquire_saved(ctx); | ||
734 | lscsa->event_mask.slot[0] = (u32) val; | ||
735 | spu_release(ctx); | ||
736 | } | ||
737 | |||
738 | static u64 spufs_event_mask_get(void *data) | ||
739 | { | ||
740 | struct spu_context *ctx = data; | ||
741 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
742 | u64 ret; | ||
743 | spu_acquire_saved(ctx); | ||
744 | ret = lscsa->event_mask.slot[0]; | ||
745 | spu_release(ctx); | ||
746 | return ret; | ||
747 | } | ||
748 | DEFINE_SIMPLE_ATTRIBUTE(spufs_event_mask_ops, spufs_event_mask_get, | ||
749 | spufs_event_mask_set, "%llx\n") | ||
750 | |||
751 | static void spufs_srr0_set(void *data, u64 val) | ||
752 | { | ||
753 | struct spu_context *ctx = data; | ||
754 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
755 | spu_acquire_saved(ctx); | ||
756 | lscsa->srr0.slot[0] = (u32) val; | ||
757 | spu_release(ctx); | ||
758 | } | ||
759 | |||
760 | static u64 spufs_srr0_get(void *data) | ||
761 | { | ||
762 | struct spu_context *ctx = data; | ||
763 | struct spu_lscsa *lscsa = ctx->csa.lscsa; | ||
764 | u64 ret; | ||
765 | spu_acquire_saved(ctx); | ||
766 | ret = lscsa->srr0.slot[0]; | ||
767 | spu_release(ctx); | ||
768 | return ret; | ||
769 | } | ||
770 | DEFINE_SIMPLE_ATTRIBUTE(spufs_srr0_ops, spufs_srr0_get, spufs_srr0_set, | ||
771 | "%llx\n") | ||
772 | |||
773 | struct tree_descr spufs_dir_contents[] = { | ||
774 | { "mem", &spufs_mem_fops, 0666, }, | ||
775 | { "regs", &spufs_regs_fops, 0666, }, | ||
776 | { "mbox", &spufs_mbox_fops, 0444, }, | ||
777 | { "ibox", &spufs_ibox_fops, 0444, }, | ||
778 | { "wbox", &spufs_wbox_fops, 0222, }, | ||
779 | { "mbox_stat", &spufs_mbox_stat_fops, 0444, }, | ||
780 | { "ibox_stat", &spufs_ibox_stat_fops, 0444, }, | ||
781 | { "wbox_stat", &spufs_wbox_stat_fops, 0444, }, | ||
782 | { "signal1", &spufs_signal1_fops, 0666, }, | ||
783 | { "signal2", &spufs_signal2_fops, 0666, }, | ||
784 | { "signal1_type", &spufs_signal1_type, 0666, }, | ||
785 | { "signal2_type", &spufs_signal2_type, 0666, }, | ||
786 | { "npc", &spufs_npc_ops, 0666, }, | ||
787 | { "fpcr", &spufs_fpcr_fops, 0666, }, | ||
788 | { "decr", &spufs_decr_ops, 0666, }, | ||
789 | { "decr_status", &spufs_decr_status_ops, 0666, }, | ||
790 | { "spu_tag_mask", &spufs_spu_tag_mask_ops, 0666, }, | ||
791 | { "event_mask", &spufs_event_mask_ops, 0666, }, | ||
792 | { "srr0", &spufs_srr0_ops, 0666, }, | ||
793 | {}, | ||
794 | }; | ||
diff --git a/arch/powerpc/platforms/cell/spufs/hw_ops.c b/arch/powerpc/platforms/cell/spufs/hw_ops.c new file mode 100644 index 000000000000..5445719bff79 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/hw_ops.c | |||
@@ -0,0 +1,255 @@ | |||
1 | /* hw_ops.c - query/set operations on active SPU context. | ||
2 | * | ||
3 | * Copyright (C) IBM 2005 | ||
4 | * Author: Mark Nutter <mnutter@us.ibm.com> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #include <linux/config.h> | ||
22 | #include <linux/module.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/sched.h> | ||
25 | #include <linux/kernel.h> | ||
26 | #include <linux/mm.h> | ||
27 | #include <linux/poll.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/smp_lock.h> | ||
30 | #include <linux/stddef.h> | ||
31 | #include <linux/unistd.h> | ||
32 | |||
33 | #include <asm/io.h> | ||
34 | #include <asm/spu.h> | ||
35 | #include <asm/spu_csa.h> | ||
36 | #include <asm/mmu_context.h> | ||
37 | #include "spufs.h" | ||
38 | |||
39 | static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data) | ||
40 | { | ||
41 | struct spu *spu = ctx->spu; | ||
42 | struct spu_problem __iomem *prob = spu->problem; | ||
43 | u32 mbox_stat; | ||
44 | int ret = 0; | ||
45 | |||
46 | spin_lock_irq(&spu->register_lock); | ||
47 | mbox_stat = in_be32(&prob->mb_stat_R); | ||
48 | if (mbox_stat & 0x0000ff) { | ||
49 | *data = in_be32(&prob->pu_mb_R); | ||
50 | ret = 4; | ||
51 | } | ||
52 | spin_unlock_irq(&spu->register_lock); | ||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | static u32 spu_hw_mbox_stat_read(struct spu_context *ctx) | ||
57 | { | ||
58 | return in_be32(&ctx->spu->problem->mb_stat_R); | ||
59 | } | ||
60 | |||
61 | static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx, | ||
62 | unsigned int events) | ||
63 | { | ||
64 | struct spu *spu = ctx->spu; | ||
65 | int ret = 0; | ||
66 | u32 stat; | ||
67 | |||
68 | spin_lock_irq(&spu->register_lock); | ||
69 | stat = in_be32(&spu->problem->mb_stat_R); | ||
70 | |||
71 | /* if the requested event is there, return the poll | ||
72 | mask, otherwise enable the interrupt to get notified, | ||
73 | but first mark any pending interrupts as done so | ||
74 | we don't get woken up unnecessarily */ | ||
75 | |||
76 | if (events & (POLLIN | POLLRDNORM)) { | ||
77 | if (stat & 0xff0000) | ||
78 | ret |= POLLIN | POLLRDNORM; | ||
79 | else { | ||
80 | spu_int_stat_clear(spu, 2, 0x1); | ||
81 | spu_int_mask_or(spu, 2, 0x1); | ||
82 | } | ||
83 | } | ||
84 | if (events & (POLLOUT | POLLWRNORM)) { | ||
85 | if (stat & 0x00ff00) | ||
86 | ret = POLLOUT | POLLWRNORM; | ||
87 | else { | ||
88 | spu_int_stat_clear(spu, 2, 0x10); | ||
89 | spu_int_mask_or(spu, 2, 0x10); | ||
90 | } | ||
91 | } | ||
92 | spin_unlock_irq(&spu->register_lock); | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data) | ||
97 | { | ||
98 | struct spu *spu = ctx->spu; | ||
99 | struct spu_problem __iomem *prob = spu->problem; | ||
100 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
101 | int ret; | ||
102 | |||
103 | spin_lock_irq(&spu->register_lock); | ||
104 | if (in_be32(&prob->mb_stat_R) & 0xff0000) { | ||
105 | /* read the first available word */ | ||
106 | *data = in_be64(&priv2->puint_mb_R); | ||
107 | ret = 4; | ||
108 | } else { | ||
109 | /* make sure we get woken up by the interrupt */ | ||
110 | spu_int_mask_or(spu, 2, 0x1); | ||
111 | ret = 0; | ||
112 | } | ||
113 | spin_unlock_irq(&spu->register_lock); | ||
114 | return ret; | ||
115 | } | ||
116 | |||
117 | static int spu_hw_wbox_write(struct spu_context *ctx, u32 data) | ||
118 | { | ||
119 | struct spu *spu = ctx->spu; | ||
120 | struct spu_problem __iomem *prob = spu->problem; | ||
121 | int ret; | ||
122 | |||
123 | spin_lock_irq(&spu->register_lock); | ||
124 | if (in_be32(&prob->mb_stat_R) & 0x00ff00) { | ||
125 | /* we have space to write wbox_data to */ | ||
126 | out_be32(&prob->spu_mb_W, data); | ||
127 | ret = 4; | ||
128 | } else { | ||
129 | /* make sure we get woken up by the interrupt when space | ||
130 | becomes available */ | ||
131 | spu_int_mask_or(spu, 2, 0x10); | ||
132 | ret = 0; | ||
133 | } | ||
134 | spin_unlock_irq(&spu->register_lock); | ||
135 | return ret; | ||
136 | } | ||
137 | |||
138 | static u32 spu_hw_signal1_read(struct spu_context *ctx) | ||
139 | { | ||
140 | return in_be32(&ctx->spu->problem->signal_notify1); | ||
141 | } | ||
142 | |||
143 | static void spu_hw_signal1_write(struct spu_context *ctx, u32 data) | ||
144 | { | ||
145 | out_be32(&ctx->spu->problem->signal_notify1, data); | ||
146 | } | ||
147 | |||
148 | static u32 spu_hw_signal2_read(struct spu_context *ctx) | ||
149 | { | ||
150 | return in_be32(&ctx->spu->problem->signal_notify1); | ||
151 | } | ||
152 | |||
153 | static void spu_hw_signal2_write(struct spu_context *ctx, u32 data) | ||
154 | { | ||
155 | out_be32(&ctx->spu->problem->signal_notify2, data); | ||
156 | } | ||
157 | |||
158 | static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val) | ||
159 | { | ||
160 | struct spu *spu = ctx->spu; | ||
161 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
162 | u64 tmp; | ||
163 | |||
164 | spin_lock_irq(&spu->register_lock); | ||
165 | tmp = in_be64(&priv2->spu_cfg_RW); | ||
166 | if (val) | ||
167 | tmp |= 1; | ||
168 | else | ||
169 | tmp &= ~1; | ||
170 | out_be64(&priv2->spu_cfg_RW, tmp); | ||
171 | spin_unlock_irq(&spu->register_lock); | ||
172 | } | ||
173 | |||
174 | static u64 spu_hw_signal1_type_get(struct spu_context *ctx) | ||
175 | { | ||
176 | return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0); | ||
177 | } | ||
178 | |||
179 | static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val) | ||
180 | { | ||
181 | struct spu *spu = ctx->spu; | ||
182 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
183 | u64 tmp; | ||
184 | |||
185 | spin_lock_irq(&spu->register_lock); | ||
186 | tmp = in_be64(&priv2->spu_cfg_RW); | ||
187 | if (val) | ||
188 | tmp |= 2; | ||
189 | else | ||
190 | tmp &= ~2; | ||
191 | out_be64(&priv2->spu_cfg_RW, tmp); | ||
192 | spin_unlock_irq(&spu->register_lock); | ||
193 | } | ||
194 | |||
195 | static u64 spu_hw_signal2_type_get(struct spu_context *ctx) | ||
196 | { | ||
197 | return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0); | ||
198 | } | ||
199 | |||
200 | static u32 spu_hw_npc_read(struct spu_context *ctx) | ||
201 | { | ||
202 | return in_be32(&ctx->spu->problem->spu_npc_RW); | ||
203 | } | ||
204 | |||
205 | static void spu_hw_npc_write(struct spu_context *ctx, u32 val) | ||
206 | { | ||
207 | out_be32(&ctx->spu->problem->spu_npc_RW, val); | ||
208 | } | ||
209 | |||
210 | static u32 spu_hw_status_read(struct spu_context *ctx) | ||
211 | { | ||
212 | return in_be32(&ctx->spu->problem->spu_status_R); | ||
213 | } | ||
214 | |||
215 | static char *spu_hw_get_ls(struct spu_context *ctx) | ||
216 | { | ||
217 | return ctx->spu->local_store; | ||
218 | } | ||
219 | |||
220 | static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val) | ||
221 | { | ||
222 | eieio(); | ||
223 | out_be32(&ctx->spu->problem->spu_runcntl_RW, val); | ||
224 | } | ||
225 | |||
226 | static void spu_hw_runcntl_stop(struct spu_context *ctx) | ||
227 | { | ||
228 | spin_lock_irq(&ctx->spu->register_lock); | ||
229 | out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
230 | while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING) | ||
231 | cpu_relax(); | ||
232 | spin_unlock_irq(&ctx->spu->register_lock); | ||
233 | } | ||
234 | |||
235 | struct spu_context_ops spu_hw_ops = { | ||
236 | .mbox_read = spu_hw_mbox_read, | ||
237 | .mbox_stat_read = spu_hw_mbox_stat_read, | ||
238 | .mbox_stat_poll = spu_hw_mbox_stat_poll, | ||
239 | .ibox_read = spu_hw_ibox_read, | ||
240 | .wbox_write = spu_hw_wbox_write, | ||
241 | .signal1_read = spu_hw_signal1_read, | ||
242 | .signal1_write = spu_hw_signal1_write, | ||
243 | .signal2_read = spu_hw_signal2_read, | ||
244 | .signal2_write = spu_hw_signal2_write, | ||
245 | .signal1_type_set = spu_hw_signal1_type_set, | ||
246 | .signal1_type_get = spu_hw_signal1_type_get, | ||
247 | .signal2_type_set = spu_hw_signal2_type_set, | ||
248 | .signal2_type_get = spu_hw_signal2_type_get, | ||
249 | .npc_read = spu_hw_npc_read, | ||
250 | .npc_write = spu_hw_npc_write, | ||
251 | .status_read = spu_hw_status_read, | ||
252 | .get_ls = spu_hw_get_ls, | ||
253 | .runcntl_write = spu_hw_runcntl_write, | ||
254 | .runcntl_stop = spu_hw_runcntl_stop, | ||
255 | }; | ||
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c new file mode 100644 index 000000000000..1f3507c75e90 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/inode.c | |||
@@ -0,0 +1,486 @@ | |||
1 | /* | ||
2 | * SPU file system | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | |||
23 | #include <linux/file.h> | ||
24 | #include <linux/fs.h> | ||
25 | #include <linux/backing-dev.h> | ||
26 | #include <linux/init.h> | ||
27 | #include <linux/ioctl.h> | ||
28 | #include <linux/module.h> | ||
29 | #include <linux/mount.h> | ||
30 | #include <linux/namei.h> | ||
31 | #include <linux/pagemap.h> | ||
32 | #include <linux/poll.h> | ||
33 | #include <linux/slab.h> | ||
34 | #include <linux/parser.h> | ||
35 | |||
36 | #include <asm/io.h> | ||
37 | #include <asm/semaphore.h> | ||
38 | #include <asm/spu.h> | ||
39 | #include <asm/uaccess.h> | ||
40 | |||
41 | #include "spufs.h" | ||
42 | |||
43 | static kmem_cache_t *spufs_inode_cache; | ||
44 | |||
45 | static struct inode * | ||
46 | spufs_alloc_inode(struct super_block *sb) | ||
47 | { | ||
48 | struct spufs_inode_info *ei; | ||
49 | |||
50 | ei = kmem_cache_alloc(spufs_inode_cache, SLAB_KERNEL); | ||
51 | if (!ei) | ||
52 | return NULL; | ||
53 | return &ei->vfs_inode; | ||
54 | } | ||
55 | |||
56 | static void | ||
57 | spufs_destroy_inode(struct inode *inode) | ||
58 | { | ||
59 | kmem_cache_free(spufs_inode_cache, SPUFS_I(inode)); | ||
60 | } | ||
61 | |||
62 | static void | ||
63 | spufs_init_once(void *p, kmem_cache_t * cachep, unsigned long flags) | ||
64 | { | ||
65 | struct spufs_inode_info *ei = p; | ||
66 | |||
67 | if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == | ||
68 | SLAB_CTOR_CONSTRUCTOR) { | ||
69 | inode_init_once(&ei->vfs_inode); | ||
70 | } | ||
71 | } | ||
72 | |||
73 | static struct inode * | ||
74 | spufs_new_inode(struct super_block *sb, int mode) | ||
75 | { | ||
76 | struct inode *inode; | ||
77 | |||
78 | inode = new_inode(sb); | ||
79 | if (!inode) | ||
80 | goto out; | ||
81 | |||
82 | inode->i_mode = mode; | ||
83 | inode->i_uid = current->fsuid; | ||
84 | inode->i_gid = current->fsgid; | ||
85 | inode->i_blksize = PAGE_CACHE_SIZE; | ||
86 | inode->i_blocks = 0; | ||
87 | inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; | ||
88 | out: | ||
89 | return inode; | ||
90 | } | ||
91 | |||
92 | static int | ||
93 | spufs_setattr(struct dentry *dentry, struct iattr *attr) | ||
94 | { | ||
95 | struct inode *inode = dentry->d_inode; | ||
96 | |||
97 | if ((attr->ia_valid & ATTR_SIZE) && | ||
98 | (attr->ia_size != inode->i_size)) | ||
99 | return -EINVAL; | ||
100 | return inode_setattr(inode, attr); | ||
101 | } | ||
102 | |||
103 | |||
104 | static int | ||
105 | spufs_new_file(struct super_block *sb, struct dentry *dentry, | ||
106 | struct file_operations *fops, int mode, | ||
107 | struct spu_context *ctx) | ||
108 | { | ||
109 | static struct inode_operations spufs_file_iops = { | ||
110 | .setattr = spufs_setattr, | ||
111 | }; | ||
112 | struct inode *inode; | ||
113 | int ret; | ||
114 | |||
115 | ret = -ENOSPC; | ||
116 | inode = spufs_new_inode(sb, S_IFREG | mode); | ||
117 | if (!inode) | ||
118 | goto out; | ||
119 | |||
120 | ret = 0; | ||
121 | inode->i_op = &spufs_file_iops; | ||
122 | inode->i_fop = fops; | ||
123 | inode->u.generic_ip = SPUFS_I(inode)->i_ctx = get_spu_context(ctx); | ||
124 | d_add(dentry, inode); | ||
125 | out: | ||
126 | return ret; | ||
127 | } | ||
128 | |||
129 | static void | ||
130 | spufs_delete_inode(struct inode *inode) | ||
131 | { | ||
132 | if (SPUFS_I(inode)->i_ctx) | ||
133 | put_spu_context(SPUFS_I(inode)->i_ctx); | ||
134 | clear_inode(inode); | ||
135 | } | ||
136 | |||
137 | static void spufs_prune_dir(struct dentry *dir) | ||
138 | { | ||
139 | struct dentry *dentry, *tmp; | ||
140 | down(&dir->d_inode->i_sem); | ||
141 | list_for_each_entry_safe(dentry, tmp, &dir->d_subdirs, d_child) { | ||
142 | spin_lock(&dcache_lock); | ||
143 | spin_lock(&dentry->d_lock); | ||
144 | if (!(d_unhashed(dentry)) && dentry->d_inode) { | ||
145 | dget_locked(dentry); | ||
146 | __d_drop(dentry); | ||
147 | spin_unlock(&dentry->d_lock); | ||
148 | simple_unlink(dir->d_inode, dentry); | ||
149 | spin_unlock(&dcache_lock); | ||
150 | dput(dentry); | ||
151 | } else { | ||
152 | spin_unlock(&dentry->d_lock); | ||
153 | spin_unlock(&dcache_lock); | ||
154 | } | ||
155 | } | ||
156 | shrink_dcache_parent(dir); | ||
157 | up(&dir->d_inode->i_sem); | ||
158 | } | ||
159 | |||
160 | static int spufs_rmdir(struct inode *root, struct dentry *dir_dentry) | ||
161 | { | ||
162 | struct spu_context *ctx; | ||
163 | |||
164 | /* remove all entries */ | ||
165 | down(&root->i_sem); | ||
166 | spufs_prune_dir(dir_dentry); | ||
167 | up(&root->i_sem); | ||
168 | |||
169 | /* We have to give up the mm_struct */ | ||
170 | ctx = SPUFS_I(dir_dentry->d_inode)->i_ctx; | ||
171 | spu_forget(ctx); | ||
172 | |||
173 | /* XXX Do we need to hold i_sem here ? */ | ||
174 | return simple_rmdir(root, dir_dentry); | ||
175 | } | ||
176 | |||
177 | static int spufs_fill_dir(struct dentry *dir, struct tree_descr *files, | ||
178 | int mode, struct spu_context *ctx) | ||
179 | { | ||
180 | struct dentry *dentry; | ||
181 | int ret; | ||
182 | |||
183 | while (files->name && files->name[0]) { | ||
184 | ret = -ENOMEM; | ||
185 | dentry = d_alloc_name(dir, files->name); | ||
186 | if (!dentry) | ||
187 | goto out; | ||
188 | ret = spufs_new_file(dir->d_sb, dentry, files->ops, | ||
189 | files->mode & mode, ctx); | ||
190 | if (ret) | ||
191 | goto out; | ||
192 | files++; | ||
193 | } | ||
194 | return 0; | ||
195 | out: | ||
196 | spufs_prune_dir(dir); | ||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | static int spufs_dir_close(struct inode *inode, struct file *file) | ||
201 | { | ||
202 | struct inode *dir; | ||
203 | struct dentry *dentry; | ||
204 | int ret; | ||
205 | |||
206 | dentry = file->f_dentry; | ||
207 | dir = dentry->d_parent->d_inode; | ||
208 | |||
209 | ret = spufs_rmdir(dir, dentry); | ||
210 | WARN_ON(ret); | ||
211 | |||
212 | return dcache_dir_close(inode, file); | ||
213 | } | ||
214 | |||
215 | struct inode_operations spufs_dir_inode_operations = { | ||
216 | .lookup = simple_lookup, | ||
217 | }; | ||
218 | |||
219 | struct file_operations spufs_context_fops = { | ||
220 | .open = dcache_dir_open, | ||
221 | .release = spufs_dir_close, | ||
222 | .llseek = dcache_dir_lseek, | ||
223 | .read = generic_read_dir, | ||
224 | .readdir = dcache_readdir, | ||
225 | .fsync = simple_sync_file, | ||
226 | }; | ||
227 | |||
228 | static int | ||
229 | spufs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | ||
230 | { | ||
231 | int ret; | ||
232 | struct inode *inode; | ||
233 | struct spu_context *ctx; | ||
234 | |||
235 | ret = -ENOSPC; | ||
236 | inode = spufs_new_inode(dir->i_sb, mode | S_IFDIR); | ||
237 | if (!inode) | ||
238 | goto out; | ||
239 | |||
240 | if (dir->i_mode & S_ISGID) { | ||
241 | inode->i_gid = dir->i_gid; | ||
242 | inode->i_mode &= S_ISGID; | ||
243 | } | ||
244 | ctx = alloc_spu_context(inode->i_mapping); | ||
245 | SPUFS_I(inode)->i_ctx = ctx; | ||
246 | if (!ctx) | ||
247 | goto out_iput; | ||
248 | |||
249 | inode->i_op = &spufs_dir_inode_operations; | ||
250 | inode->i_fop = &simple_dir_operations; | ||
251 | ret = spufs_fill_dir(dentry, spufs_dir_contents, mode, ctx); | ||
252 | if (ret) | ||
253 | goto out_free_ctx; | ||
254 | |||
255 | d_instantiate(dentry, inode); | ||
256 | dget(dentry); | ||
257 | dir->i_nlink++; | ||
258 | dentry->d_inode->i_nlink++; | ||
259 | goto out; | ||
260 | |||
261 | out_free_ctx: | ||
262 | put_spu_context(ctx); | ||
263 | out_iput: | ||
264 | iput(inode); | ||
265 | out: | ||
266 | return ret; | ||
267 | } | ||
268 | |||
269 | static int spufs_context_open(struct dentry *dentry, struct vfsmount *mnt) | ||
270 | { | ||
271 | int ret; | ||
272 | struct file *filp; | ||
273 | |||
274 | ret = get_unused_fd(); | ||
275 | if (ret < 0) { | ||
276 | dput(dentry); | ||
277 | mntput(mnt); | ||
278 | goto out; | ||
279 | } | ||
280 | |||
281 | filp = dentry_open(dentry, mnt, O_RDONLY); | ||
282 | if (IS_ERR(filp)) { | ||
283 | put_unused_fd(ret); | ||
284 | ret = PTR_ERR(filp); | ||
285 | goto out; | ||
286 | } | ||
287 | |||
288 | filp->f_op = &spufs_context_fops; | ||
289 | fd_install(ret, filp); | ||
290 | out: | ||
291 | return ret; | ||
292 | } | ||
293 | |||
294 | static struct file_system_type spufs_type; | ||
295 | |||
296 | long spufs_create_thread(struct nameidata *nd, | ||
297 | unsigned int flags, mode_t mode) | ||
298 | { | ||
299 | struct dentry *dentry; | ||
300 | int ret; | ||
301 | |||
302 | /* need to be at the root of spufs */ | ||
303 | ret = -EINVAL; | ||
304 | if (nd->dentry->d_sb->s_type != &spufs_type || | ||
305 | nd->dentry != nd->dentry->d_sb->s_root) | ||
306 | goto out; | ||
307 | |||
308 | dentry = lookup_create(nd, 1); | ||
309 | ret = PTR_ERR(dentry); | ||
310 | if (IS_ERR(dentry)) | ||
311 | goto out_dir; | ||
312 | |||
313 | ret = -EEXIST; | ||
314 | if (dentry->d_inode) | ||
315 | goto out_dput; | ||
316 | |||
317 | mode &= ~current->fs->umask; | ||
318 | ret = spufs_mkdir(nd->dentry->d_inode, dentry, mode & S_IRWXUGO); | ||
319 | if (ret) | ||
320 | goto out_dput; | ||
321 | |||
322 | /* | ||
323 | * get references for dget and mntget, will be released | ||
324 | * in error path of *_open(). | ||
325 | */ | ||
326 | ret = spufs_context_open(dget(dentry), mntget(nd->mnt)); | ||
327 | if (ret < 0) | ||
328 | spufs_rmdir(nd->dentry->d_inode, dentry); | ||
329 | |||
330 | out_dput: | ||
331 | dput(dentry); | ||
332 | out_dir: | ||
333 | up(&nd->dentry->d_inode->i_sem); | ||
334 | out: | ||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | /* File system initialization */ | ||
339 | enum { | ||
340 | Opt_uid, Opt_gid, Opt_err, | ||
341 | }; | ||
342 | |||
343 | static match_table_t spufs_tokens = { | ||
344 | { Opt_uid, "uid=%d" }, | ||
345 | { Opt_gid, "gid=%d" }, | ||
346 | { Opt_err, NULL }, | ||
347 | }; | ||
348 | |||
349 | static int | ||
350 | spufs_parse_options(char *options, struct inode *root) | ||
351 | { | ||
352 | char *p; | ||
353 | substring_t args[MAX_OPT_ARGS]; | ||
354 | |||
355 | while ((p = strsep(&options, ",")) != NULL) { | ||
356 | int token, option; | ||
357 | |||
358 | if (!*p) | ||
359 | continue; | ||
360 | |||
361 | token = match_token(p, spufs_tokens, args); | ||
362 | switch (token) { | ||
363 | case Opt_uid: | ||
364 | if (match_int(&args[0], &option)) | ||
365 | return 0; | ||
366 | root->i_uid = option; | ||
367 | break; | ||
368 | case Opt_gid: | ||
369 | if (match_int(&args[0], &option)) | ||
370 | return 0; | ||
371 | root->i_gid = option; | ||
372 | break; | ||
373 | default: | ||
374 | return 0; | ||
375 | } | ||
376 | } | ||
377 | return 1; | ||
378 | } | ||
379 | |||
380 | static int | ||
381 | spufs_create_root(struct super_block *sb, void *data) | ||
382 | { | ||
383 | struct inode *inode; | ||
384 | int ret; | ||
385 | |||
386 | ret = -ENOMEM; | ||
387 | inode = spufs_new_inode(sb, S_IFDIR | 0775); | ||
388 | if (!inode) | ||
389 | goto out; | ||
390 | |||
391 | inode->i_op = &spufs_dir_inode_operations; | ||
392 | inode->i_fop = &simple_dir_operations; | ||
393 | SPUFS_I(inode)->i_ctx = NULL; | ||
394 | |||
395 | ret = -EINVAL; | ||
396 | if (!spufs_parse_options(data, inode)) | ||
397 | goto out_iput; | ||
398 | |||
399 | ret = -ENOMEM; | ||
400 | sb->s_root = d_alloc_root(inode); | ||
401 | if (!sb->s_root) | ||
402 | goto out_iput; | ||
403 | |||
404 | return 0; | ||
405 | out_iput: | ||
406 | iput(inode); | ||
407 | out: | ||
408 | return ret; | ||
409 | } | ||
410 | |||
411 | static int | ||
412 | spufs_fill_super(struct super_block *sb, void *data, int silent) | ||
413 | { | ||
414 | static struct super_operations s_ops = { | ||
415 | .alloc_inode = spufs_alloc_inode, | ||
416 | .destroy_inode = spufs_destroy_inode, | ||
417 | .statfs = simple_statfs, | ||
418 | .delete_inode = spufs_delete_inode, | ||
419 | .drop_inode = generic_delete_inode, | ||
420 | }; | ||
421 | |||
422 | sb->s_maxbytes = MAX_LFS_FILESIZE; | ||
423 | sb->s_blocksize = PAGE_CACHE_SIZE; | ||
424 | sb->s_blocksize_bits = PAGE_CACHE_SHIFT; | ||
425 | sb->s_magic = SPUFS_MAGIC; | ||
426 | sb->s_op = &s_ops; | ||
427 | |||
428 | return spufs_create_root(sb, data); | ||
429 | } | ||
430 | |||
431 | static struct super_block * | ||
432 | spufs_get_sb(struct file_system_type *fstype, int flags, | ||
433 | const char *name, void *data) | ||
434 | { | ||
435 | return get_sb_single(fstype, flags, data, spufs_fill_super); | ||
436 | } | ||
437 | |||
438 | static struct file_system_type spufs_type = { | ||
439 | .owner = THIS_MODULE, | ||
440 | .name = "spufs", | ||
441 | .get_sb = spufs_get_sb, | ||
442 | .kill_sb = kill_litter_super, | ||
443 | }; | ||
444 | |||
445 | static int spufs_init(void) | ||
446 | { | ||
447 | int ret; | ||
448 | ret = -ENOMEM; | ||
449 | spufs_inode_cache = kmem_cache_create("spufs_inode_cache", | ||
450 | sizeof(struct spufs_inode_info), 0, | ||
451 | SLAB_HWCACHE_ALIGN, spufs_init_once, NULL); | ||
452 | |||
453 | if (!spufs_inode_cache) | ||
454 | goto out; | ||
455 | if (spu_sched_init() != 0) { | ||
456 | kmem_cache_destroy(spufs_inode_cache); | ||
457 | goto out; | ||
458 | } | ||
459 | ret = register_filesystem(&spufs_type); | ||
460 | if (ret) | ||
461 | goto out_cache; | ||
462 | ret = register_spu_syscalls(&spufs_calls); | ||
463 | if (ret) | ||
464 | goto out_fs; | ||
465 | return 0; | ||
466 | out_fs: | ||
467 | unregister_filesystem(&spufs_type); | ||
468 | out_cache: | ||
469 | kmem_cache_destroy(spufs_inode_cache); | ||
470 | out: | ||
471 | return ret; | ||
472 | } | ||
473 | module_init(spufs_init); | ||
474 | |||
475 | static void spufs_exit(void) | ||
476 | { | ||
477 | spu_sched_exit(); | ||
478 | unregister_spu_syscalls(&spufs_calls); | ||
479 | unregister_filesystem(&spufs_type); | ||
480 | kmem_cache_destroy(spufs_inode_cache); | ||
481 | } | ||
482 | module_exit(spufs_exit); | ||
483 | |||
484 | MODULE_LICENSE("GPL"); | ||
485 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); | ||
486 | |||
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c new file mode 100644 index 000000000000..18ea8866c61a --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/run.c | |||
@@ -0,0 +1,131 @@ | |||
1 | #include <linux/wait.h> | ||
2 | #include <linux/ptrace.h> | ||
3 | |||
4 | #include <asm/spu.h> | ||
5 | |||
6 | #include "spufs.h" | ||
7 | |||
8 | /* interrupt-level stop callback function. */ | ||
9 | void spufs_stop_callback(struct spu *spu) | ||
10 | { | ||
11 | struct spu_context *ctx = spu->ctx; | ||
12 | |||
13 | wake_up_all(&ctx->stop_wq); | ||
14 | } | ||
15 | |||
16 | static inline int spu_stopped(struct spu_context *ctx, u32 * stat) | ||
17 | { | ||
18 | struct spu *spu; | ||
19 | u64 pte_fault; | ||
20 | |||
21 | *stat = ctx->ops->status_read(ctx); | ||
22 | if (ctx->state != SPU_STATE_RUNNABLE) | ||
23 | return 1; | ||
24 | spu = ctx->spu; | ||
25 | pte_fault = spu->dsisr & | ||
26 | (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED); | ||
27 | return (!(*stat & 0x1) || pte_fault || spu->class_0_pending) ? 1 : 0; | ||
28 | } | ||
29 | |||
30 | static inline int spu_run_init(struct spu_context *ctx, u32 * npc, | ||
31 | u32 * status) | ||
32 | { | ||
33 | int ret; | ||
34 | |||
35 | if ((ret = spu_acquire_runnable(ctx)) != 0) | ||
36 | return ret; | ||
37 | ctx->ops->npc_write(ctx, *npc); | ||
38 | ctx->ops->runcntl_write(ctx, SPU_RUNCNTL_RUNNABLE); | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | static inline int spu_run_fini(struct spu_context *ctx, u32 * npc, | ||
43 | u32 * status) | ||
44 | { | ||
45 | int ret = 0; | ||
46 | |||
47 | *status = ctx->ops->status_read(ctx); | ||
48 | *npc = ctx->ops->npc_read(ctx); | ||
49 | spu_release(ctx); | ||
50 | |||
51 | if (signal_pending(current)) | ||
52 | ret = -ERESTARTSYS; | ||
53 | if (unlikely(current->ptrace & PT_PTRACED)) { | ||
54 | if ((*status & SPU_STATUS_STOPPED_BY_STOP) | ||
55 | && (*status >> SPU_STOP_STATUS_SHIFT) == 0x3fff) { | ||
56 | force_sig(SIGTRAP, current); | ||
57 | ret = -ERESTARTSYS; | ||
58 | } | ||
59 | } | ||
60 | return ret; | ||
61 | } | ||
62 | |||
63 | static inline int spu_reacquire_runnable(struct spu_context *ctx, u32 *npc, | ||
64 | u32 *status) | ||
65 | { | ||
66 | int ret; | ||
67 | |||
68 | if ((ret = spu_run_fini(ctx, npc, status)) != 0) | ||
69 | return ret; | ||
70 | if (*status & (SPU_STATUS_STOPPED_BY_STOP | | ||
71 | SPU_STATUS_STOPPED_BY_HALT)) { | ||
72 | return *status; | ||
73 | } | ||
74 | if ((ret = spu_run_init(ctx, npc, status)) != 0) | ||
75 | return ret; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static inline int spu_process_events(struct spu_context *ctx) | ||
80 | { | ||
81 | struct spu *spu = ctx->spu; | ||
82 | u64 pte_fault = MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED; | ||
83 | int ret = 0; | ||
84 | |||
85 | if (spu->dsisr & pte_fault) | ||
86 | ret = spu_irq_class_1_bottom(spu); | ||
87 | if (spu->class_0_pending) | ||
88 | ret = spu_irq_class_0_bottom(spu); | ||
89 | if (!ret && signal_pending(current)) | ||
90 | ret = -ERESTARTSYS; | ||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | long spufs_run_spu(struct file *file, struct spu_context *ctx, | ||
95 | u32 * npc, u32 * status) | ||
96 | { | ||
97 | int ret; | ||
98 | |||
99 | if (down_interruptible(&ctx->run_sema)) | ||
100 | return -ERESTARTSYS; | ||
101 | |||
102 | ret = spu_run_init(ctx, npc, status); | ||
103 | if (ret) | ||
104 | goto out; | ||
105 | |||
106 | do { | ||
107 | ret = spufs_wait(ctx->stop_wq, spu_stopped(ctx, status)); | ||
108 | if (unlikely(ret)) | ||
109 | break; | ||
110 | if (unlikely(ctx->state != SPU_STATE_RUNNABLE)) { | ||
111 | ret = spu_reacquire_runnable(ctx, npc, status); | ||
112 | if (ret) | ||
113 | goto out; | ||
114 | continue; | ||
115 | } | ||
116 | ret = spu_process_events(ctx); | ||
117 | |||
118 | } while (!ret && !(*status & (SPU_STATUS_STOPPED_BY_STOP | | ||
119 | SPU_STATUS_STOPPED_BY_HALT))); | ||
120 | |||
121 | ctx->ops->runcntl_stop(ctx); | ||
122 | ret = spu_run_fini(ctx, npc, status); | ||
123 | if (!ret) | ||
124 | ret = *status; | ||
125 | spu_yield(ctx); | ||
126 | |||
127 | out: | ||
128 | up(&ctx->run_sema); | ||
129 | return ret; | ||
130 | } | ||
131 | |||
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c new file mode 100644 index 000000000000..963182fbd1aa --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/sched.c | |||
@@ -0,0 +1,461 @@ | |||
1 | /* sched.c - SPU scheduler. | ||
2 | * | ||
3 | * Copyright (C) IBM 2005 | ||
4 | * Author: Mark Nutter <mnutter@us.ibm.com> | ||
5 | * | ||
6 | * SPU scheduler, based on Linux thread priority. For now use | ||
7 | * a simple "cooperative" yield model with no preemption. SPU | ||
8 | * scheduling will eventually be preemptive: When a thread with | ||
9 | * a higher static priority gets ready to run, then an active SPU | ||
10 | * context will be preempted and returned to the waitq. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | */ | ||
26 | |||
27 | #undef DEBUG | ||
28 | |||
29 | #include <linux/config.h> | ||
30 | #include <linux/module.h> | ||
31 | #include <linux/errno.h> | ||
32 | #include <linux/sched.h> | ||
33 | #include <linux/kernel.h> | ||
34 | #include <linux/mm.h> | ||
35 | #include <linux/completion.h> | ||
36 | #include <linux/vmalloc.h> | ||
37 | #include <linux/smp.h> | ||
38 | #include <linux/smp_lock.h> | ||
39 | #include <linux/stddef.h> | ||
40 | #include <linux/unistd.h> | ||
41 | |||
42 | #include <asm/io.h> | ||
43 | #include <asm/mmu_context.h> | ||
44 | #include <asm/spu.h> | ||
45 | #include <asm/spu_csa.h> | ||
46 | #include "spufs.h" | ||
47 | |||
48 | #define SPU_MIN_TIMESLICE (100 * HZ / 1000) | ||
49 | |||
50 | #define SPU_BITMAP_SIZE (((MAX_PRIO+BITS_PER_LONG)/BITS_PER_LONG)+1) | ||
51 | struct spu_prio_array { | ||
52 | atomic_t nr_blocked; | ||
53 | unsigned long bitmap[SPU_BITMAP_SIZE]; | ||
54 | wait_queue_head_t waitq[MAX_PRIO]; | ||
55 | }; | ||
56 | |||
57 | /* spu_runqueue - This is the main runqueue data structure for SPUs. */ | ||
58 | struct spu_runqueue { | ||
59 | struct semaphore sem; | ||
60 | unsigned long nr_active; | ||
61 | unsigned long nr_idle; | ||
62 | unsigned long nr_switches; | ||
63 | struct list_head active_list; | ||
64 | struct list_head idle_list; | ||
65 | struct spu_prio_array prio; | ||
66 | }; | ||
67 | |||
68 | static struct spu_runqueue *spu_runqueues = NULL; | ||
69 | |||
70 | static inline struct spu_runqueue *spu_rq(void) | ||
71 | { | ||
72 | /* Future: make this a per-NODE array, | ||
73 | * and use cpu_to_node(smp_processor_id()) | ||
74 | */ | ||
75 | return spu_runqueues; | ||
76 | } | ||
77 | |||
78 | static inline struct spu *del_idle(struct spu_runqueue *rq) | ||
79 | { | ||
80 | struct spu *spu; | ||
81 | |||
82 | BUG_ON(rq->nr_idle <= 0); | ||
83 | BUG_ON(list_empty(&rq->idle_list)); | ||
84 | /* Future: Move SPU out of low-power SRI state. */ | ||
85 | spu = list_entry(rq->idle_list.next, struct spu, sched_list); | ||
86 | list_del_init(&spu->sched_list); | ||
87 | rq->nr_idle--; | ||
88 | return spu; | ||
89 | } | ||
90 | |||
91 | static inline void del_active(struct spu_runqueue *rq, struct spu *spu) | ||
92 | { | ||
93 | BUG_ON(rq->nr_active <= 0); | ||
94 | BUG_ON(list_empty(&rq->active_list)); | ||
95 | list_del_init(&spu->sched_list); | ||
96 | rq->nr_active--; | ||
97 | } | ||
98 | |||
99 | static inline void add_idle(struct spu_runqueue *rq, struct spu *spu) | ||
100 | { | ||
101 | /* Future: Put SPU into low-power SRI state. */ | ||
102 | list_add_tail(&spu->sched_list, &rq->idle_list); | ||
103 | rq->nr_idle++; | ||
104 | } | ||
105 | |||
106 | static inline void add_active(struct spu_runqueue *rq, struct spu *spu) | ||
107 | { | ||
108 | rq->nr_active++; | ||
109 | rq->nr_switches++; | ||
110 | list_add_tail(&spu->sched_list, &rq->active_list); | ||
111 | } | ||
112 | |||
113 | static void prio_wakeup(struct spu_runqueue *rq) | ||
114 | { | ||
115 | if (atomic_read(&rq->prio.nr_blocked) && rq->nr_idle) { | ||
116 | int best = sched_find_first_bit(rq->prio.bitmap); | ||
117 | if (best < MAX_PRIO) { | ||
118 | wait_queue_head_t *wq = &rq->prio.waitq[best]; | ||
119 | wake_up_interruptible_nr(wq, 1); | ||
120 | } | ||
121 | } | ||
122 | } | ||
123 | |||
124 | static void prio_wait(struct spu_runqueue *rq, struct spu_context *ctx, | ||
125 | u64 flags) | ||
126 | { | ||
127 | int prio = current->prio; | ||
128 | wait_queue_head_t *wq = &rq->prio.waitq[prio]; | ||
129 | DEFINE_WAIT(wait); | ||
130 | |||
131 | __set_bit(prio, rq->prio.bitmap); | ||
132 | atomic_inc(&rq->prio.nr_blocked); | ||
133 | prepare_to_wait_exclusive(wq, &wait, TASK_INTERRUPTIBLE); | ||
134 | if (!signal_pending(current)) { | ||
135 | up(&rq->sem); | ||
136 | up_write(&ctx->state_sema); | ||
137 | pr_debug("%s: pid=%d prio=%d\n", __FUNCTION__, | ||
138 | current->pid, current->prio); | ||
139 | schedule(); | ||
140 | down_write(&ctx->state_sema); | ||
141 | down(&rq->sem); | ||
142 | } | ||
143 | finish_wait(wq, &wait); | ||
144 | atomic_dec(&rq->prio.nr_blocked); | ||
145 | if (!waitqueue_active(wq)) | ||
146 | __clear_bit(prio, rq->prio.bitmap); | ||
147 | } | ||
148 | |||
149 | static inline int is_best_prio(struct spu_runqueue *rq) | ||
150 | { | ||
151 | int best_prio; | ||
152 | |||
153 | best_prio = sched_find_first_bit(rq->prio.bitmap); | ||
154 | return (current->prio < best_prio) ? 1 : 0; | ||
155 | } | ||
156 | |||
157 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) | ||
158 | { | ||
159 | /* Global TLBIE broadcast required with SPEs. */ | ||
160 | #if (NR_CPUS > 1) | ||
161 | __cpus_setall(&mm->cpu_vm_mask, NR_CPUS); | ||
162 | #else | ||
163 | __cpus_setall(&mm->cpu_vm_mask, NR_CPUS+1); /* is this ok? */ | ||
164 | #endif | ||
165 | } | ||
166 | |||
167 | static inline void bind_context(struct spu *spu, struct spu_context *ctx) | ||
168 | { | ||
169 | pr_debug("%s: pid=%d SPU=%d\n", __FUNCTION__, current->pid, | ||
170 | spu->number); | ||
171 | spu->ctx = ctx; | ||
172 | spu->flags = 0; | ||
173 | ctx->flags = 0; | ||
174 | ctx->spu = spu; | ||
175 | ctx->ops = &spu_hw_ops; | ||
176 | spu->pid = current->pid; | ||
177 | spu->prio = current->prio; | ||
178 | spu->mm = ctx->owner; | ||
179 | mm_needs_global_tlbie(spu->mm); | ||
180 | spu->ibox_callback = spufs_ibox_callback; | ||
181 | spu->wbox_callback = spufs_wbox_callback; | ||
182 | spu->stop_callback = spufs_stop_callback; | ||
183 | mb(); | ||
184 | spu_unmap_mappings(ctx); | ||
185 | spu_restore(&ctx->csa, spu); | ||
186 | spu->timestamp = jiffies; | ||
187 | } | ||
188 | |||
189 | static inline void unbind_context(struct spu *spu, struct spu_context *ctx) | ||
190 | { | ||
191 | pr_debug("%s: unbind pid=%d SPU=%d\n", __FUNCTION__, | ||
192 | spu->pid, spu->number); | ||
193 | spu_unmap_mappings(ctx); | ||
194 | spu_save(&ctx->csa, spu); | ||
195 | spu->timestamp = jiffies; | ||
196 | ctx->state = SPU_STATE_SAVED; | ||
197 | spu->ibox_callback = NULL; | ||
198 | spu->wbox_callback = NULL; | ||
199 | spu->stop_callback = NULL; | ||
200 | spu->mm = NULL; | ||
201 | spu->pid = 0; | ||
202 | spu->prio = MAX_PRIO; | ||
203 | ctx->ops = &spu_backing_ops; | ||
204 | ctx->spu = NULL; | ||
205 | ctx->flags = 0; | ||
206 | spu->flags = 0; | ||
207 | spu->ctx = NULL; | ||
208 | } | ||
209 | |||
210 | static void spu_reaper(void *data) | ||
211 | { | ||
212 | struct spu_context *ctx = data; | ||
213 | struct spu *spu; | ||
214 | |||
215 | down_write(&ctx->state_sema); | ||
216 | spu = ctx->spu; | ||
217 | if (spu && test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) { | ||
218 | if (atomic_read(&spu->rq->prio.nr_blocked)) { | ||
219 | pr_debug("%s: spu=%d\n", __func__, spu->number); | ||
220 | ctx->ops->runcntl_stop(ctx); | ||
221 | spu_deactivate(ctx); | ||
222 | wake_up_all(&ctx->stop_wq); | ||
223 | } else { | ||
224 | clear_bit(SPU_CONTEXT_PREEMPT, &ctx->flags); | ||
225 | } | ||
226 | } | ||
227 | up_write(&ctx->state_sema); | ||
228 | put_spu_context(ctx); | ||
229 | } | ||
230 | |||
231 | static void schedule_spu_reaper(struct spu_runqueue *rq, struct spu *spu) | ||
232 | { | ||
233 | struct spu_context *ctx = get_spu_context(spu->ctx); | ||
234 | unsigned long now = jiffies; | ||
235 | unsigned long expire = spu->timestamp + SPU_MIN_TIMESLICE; | ||
236 | |||
237 | set_bit(SPU_CONTEXT_PREEMPT, &ctx->flags); | ||
238 | INIT_WORK(&ctx->reap_work, spu_reaper, ctx); | ||
239 | if (time_after(now, expire)) | ||
240 | schedule_work(&ctx->reap_work); | ||
241 | else | ||
242 | schedule_delayed_work(&ctx->reap_work, expire - now); | ||
243 | } | ||
244 | |||
245 | static void check_preempt_active(struct spu_runqueue *rq) | ||
246 | { | ||
247 | struct list_head *p; | ||
248 | struct spu *worst = NULL; | ||
249 | |||
250 | list_for_each(p, &rq->active_list) { | ||
251 | struct spu *spu = list_entry(p, struct spu, sched_list); | ||
252 | struct spu_context *ctx = spu->ctx; | ||
253 | if (!test_bit(SPU_CONTEXT_PREEMPT, &ctx->flags)) { | ||
254 | if (!worst || (spu->prio > worst->prio)) { | ||
255 | worst = spu; | ||
256 | } | ||
257 | } | ||
258 | } | ||
259 | if (worst && (current->prio < worst->prio)) | ||
260 | schedule_spu_reaper(rq, worst); | ||
261 | } | ||
262 | |||
263 | static struct spu *get_idle_spu(struct spu_context *ctx, u64 flags) | ||
264 | { | ||
265 | struct spu_runqueue *rq; | ||
266 | struct spu *spu = NULL; | ||
267 | |||
268 | rq = spu_rq(); | ||
269 | down(&rq->sem); | ||
270 | for (;;) { | ||
271 | if (rq->nr_idle > 0) { | ||
272 | if (is_best_prio(rq)) { | ||
273 | /* Fall through. */ | ||
274 | spu = del_idle(rq); | ||
275 | break; | ||
276 | } else { | ||
277 | prio_wakeup(rq); | ||
278 | up(&rq->sem); | ||
279 | yield(); | ||
280 | if (signal_pending(current)) { | ||
281 | return NULL; | ||
282 | } | ||
283 | rq = spu_rq(); | ||
284 | down(&rq->sem); | ||
285 | continue; | ||
286 | } | ||
287 | } else { | ||
288 | check_preempt_active(rq); | ||
289 | prio_wait(rq, ctx, flags); | ||
290 | if (signal_pending(current)) { | ||
291 | prio_wakeup(rq); | ||
292 | spu = NULL; | ||
293 | break; | ||
294 | } | ||
295 | continue; | ||
296 | } | ||
297 | } | ||
298 | up(&rq->sem); | ||
299 | return spu; | ||
300 | } | ||
301 | |||
302 | static void put_idle_spu(struct spu *spu) | ||
303 | { | ||
304 | struct spu_runqueue *rq = spu->rq; | ||
305 | |||
306 | down(&rq->sem); | ||
307 | add_idle(rq, spu); | ||
308 | prio_wakeup(rq); | ||
309 | up(&rq->sem); | ||
310 | } | ||
311 | |||
312 | static int get_active_spu(struct spu *spu) | ||
313 | { | ||
314 | struct spu_runqueue *rq = spu->rq; | ||
315 | struct list_head *p; | ||
316 | struct spu *tmp; | ||
317 | int rc = 0; | ||
318 | |||
319 | down(&rq->sem); | ||
320 | list_for_each(p, &rq->active_list) { | ||
321 | tmp = list_entry(p, struct spu, sched_list); | ||
322 | if (tmp == spu) { | ||
323 | del_active(rq, spu); | ||
324 | rc = 1; | ||
325 | break; | ||
326 | } | ||
327 | } | ||
328 | up(&rq->sem); | ||
329 | return rc; | ||
330 | } | ||
331 | |||
332 | static void put_active_spu(struct spu *spu) | ||
333 | { | ||
334 | struct spu_runqueue *rq = spu->rq; | ||
335 | |||
336 | down(&rq->sem); | ||
337 | add_active(rq, spu); | ||
338 | up(&rq->sem); | ||
339 | } | ||
340 | |||
341 | /* Lock order: | ||
342 | * spu_activate() & spu_deactivate() require the | ||
343 | * caller to have down_write(&ctx->state_sema). | ||
344 | * | ||
345 | * The rq->sem is breifly held (inside or outside a | ||
346 | * given ctx lock) for list management, but is never | ||
347 | * held during save/restore. | ||
348 | */ | ||
349 | |||
350 | int spu_activate(struct spu_context *ctx, u64 flags) | ||
351 | { | ||
352 | struct spu *spu; | ||
353 | |||
354 | if (ctx->spu) | ||
355 | return 0; | ||
356 | spu = get_idle_spu(ctx, flags); | ||
357 | if (!spu) | ||
358 | return (signal_pending(current)) ? -ERESTARTSYS : -EAGAIN; | ||
359 | bind_context(spu, ctx); | ||
360 | /* | ||
361 | * We're likely to wait for interrupts on the same | ||
362 | * CPU that we are now on, so send them here. | ||
363 | */ | ||
364 | spu_irq_setaffinity(spu, raw_smp_processor_id()); | ||
365 | put_active_spu(spu); | ||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | void spu_deactivate(struct spu_context *ctx) | ||
370 | { | ||
371 | struct spu *spu; | ||
372 | int needs_idle; | ||
373 | |||
374 | spu = ctx->spu; | ||
375 | if (!spu) | ||
376 | return; | ||
377 | needs_idle = get_active_spu(spu); | ||
378 | unbind_context(spu, ctx); | ||
379 | if (needs_idle) | ||
380 | put_idle_spu(spu); | ||
381 | } | ||
382 | |||
383 | void spu_yield(struct spu_context *ctx) | ||
384 | { | ||
385 | struct spu *spu; | ||
386 | int need_yield = 0; | ||
387 | |||
388 | down_write(&ctx->state_sema); | ||
389 | spu = ctx->spu; | ||
390 | if (spu && (sched_find_first_bit(spu->rq->prio.bitmap) < MAX_PRIO)) { | ||
391 | pr_debug("%s: yielding SPU %d\n", __FUNCTION__, spu->number); | ||
392 | spu_deactivate(ctx); | ||
393 | ctx->state = SPU_STATE_SAVED; | ||
394 | need_yield = 1; | ||
395 | } else if (spu) { | ||
396 | spu->prio = MAX_PRIO; | ||
397 | } | ||
398 | up_write(&ctx->state_sema); | ||
399 | if (unlikely(need_yield)) | ||
400 | yield(); | ||
401 | } | ||
402 | |||
403 | int __init spu_sched_init(void) | ||
404 | { | ||
405 | struct spu_runqueue *rq; | ||
406 | struct spu *spu; | ||
407 | int i; | ||
408 | |||
409 | rq = spu_runqueues = kmalloc(sizeof(struct spu_runqueue), GFP_KERNEL); | ||
410 | if (!rq) { | ||
411 | printk(KERN_WARNING "%s: Unable to allocate runqueues.\n", | ||
412 | __FUNCTION__); | ||
413 | return 1; | ||
414 | } | ||
415 | memset(rq, 0, sizeof(struct spu_runqueue)); | ||
416 | init_MUTEX(&rq->sem); | ||
417 | INIT_LIST_HEAD(&rq->active_list); | ||
418 | INIT_LIST_HEAD(&rq->idle_list); | ||
419 | rq->nr_active = 0; | ||
420 | rq->nr_idle = 0; | ||
421 | rq->nr_switches = 0; | ||
422 | atomic_set(&rq->prio.nr_blocked, 0); | ||
423 | for (i = 0; i < MAX_PRIO; i++) { | ||
424 | init_waitqueue_head(&rq->prio.waitq[i]); | ||
425 | __clear_bit(i, rq->prio.bitmap); | ||
426 | } | ||
427 | __set_bit(MAX_PRIO, rq->prio.bitmap); | ||
428 | for (;;) { | ||
429 | spu = spu_alloc(); | ||
430 | if (!spu) | ||
431 | break; | ||
432 | pr_debug("%s: adding SPU[%d]\n", __FUNCTION__, spu->number); | ||
433 | add_idle(rq, spu); | ||
434 | spu->rq = rq; | ||
435 | spu->timestamp = jiffies; | ||
436 | } | ||
437 | if (!rq->nr_idle) { | ||
438 | printk(KERN_WARNING "%s: No available SPUs.\n", __FUNCTION__); | ||
439 | kfree(rq); | ||
440 | return 1; | ||
441 | } | ||
442 | return 0; | ||
443 | } | ||
444 | |||
445 | void __exit spu_sched_exit(void) | ||
446 | { | ||
447 | struct spu_runqueue *rq = spu_rq(); | ||
448 | struct spu *spu; | ||
449 | |||
450 | if (!rq) { | ||
451 | printk(KERN_WARNING "%s: no runqueues!\n", __FUNCTION__); | ||
452 | return; | ||
453 | } | ||
454 | while (rq->nr_idle > 0) { | ||
455 | spu = del_idle(rq); | ||
456 | if (!spu) | ||
457 | break; | ||
458 | spu_free(spu); | ||
459 | } | ||
460 | kfree(rq); | ||
461 | } | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore.c b/arch/powerpc/platforms/cell/spufs/spu_restore.c new file mode 100644 index 000000000000..0bf723dcd677 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_restore.c | |||
@@ -0,0 +1,336 @@ | |||
1 | /* | ||
2 | * spu_restore.c | ||
3 | * | ||
4 | * (C) Copyright IBM Corp. 2005 | ||
5 | * | ||
6 | * SPU-side context restore sequence outlined in | ||
7 | * Synergistic Processor Element Book IV | ||
8 | * | ||
9 | * Author: Mark Nutter <mnutter@us.ibm.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #ifndef LS_SIZE | ||
29 | #define LS_SIZE 0x40000 /* 256K (in bytes) */ | ||
30 | #endif | ||
31 | |||
32 | typedef unsigned int u32; | ||
33 | typedef unsigned long long u64; | ||
34 | |||
35 | #include <spu_intrinsics.h> | ||
36 | #include <asm/spu_csa.h> | ||
37 | #include "spu_utils.h" | ||
38 | |||
39 | #define BR_INSTR 0x327fff80 /* br -4 */ | ||
40 | #define NOP_INSTR 0x40200000 /* nop */ | ||
41 | #define HEQ_INSTR 0x7b000000 /* heq $0, $0 */ | ||
42 | #define STOP_INSTR 0x00000000 /* stop 0x0 */ | ||
43 | #define ILLEGAL_INSTR 0x00800000 /* illegal instr */ | ||
44 | #define RESTORE_COMPLETE 0x00003ffc /* stop 0x3ffc */ | ||
45 | |||
46 | static inline void fetch_regs_from_mem(addr64 lscsa_ea) | ||
47 | { | ||
48 | unsigned int ls = (unsigned int)®s_spill[0]; | ||
49 | unsigned int size = sizeof(regs_spill); | ||
50 | unsigned int tag_id = 0; | ||
51 | unsigned int cmd = 0x40; /* GET */ | ||
52 | |||
53 | spu_writech(MFC_LSA, ls); | ||
54 | spu_writech(MFC_EAH, lscsa_ea.ui[0]); | ||
55 | spu_writech(MFC_EAL, lscsa_ea.ui[1]); | ||
56 | spu_writech(MFC_Size, size); | ||
57 | spu_writech(MFC_TagID, tag_id); | ||
58 | spu_writech(MFC_Cmd, cmd); | ||
59 | } | ||
60 | |||
61 | static inline void restore_upper_240kb(addr64 lscsa_ea) | ||
62 | { | ||
63 | unsigned int ls = 16384; | ||
64 | unsigned int list = (unsigned int)&dma_list[0]; | ||
65 | unsigned int size = sizeof(dma_list); | ||
66 | unsigned int tag_id = 0; | ||
67 | unsigned int cmd = 0x44; /* GETL */ | ||
68 | |||
69 | /* Restore, Step 4: | ||
70 | * Enqueue the GETL command (tag 0) to the MFC SPU command | ||
71 | * queue to transfer the upper 240 kb of LS from CSA. | ||
72 | */ | ||
73 | spu_writech(MFC_LSA, ls); | ||
74 | spu_writech(MFC_EAH, lscsa_ea.ui[0]); | ||
75 | spu_writech(MFC_EAL, list); | ||
76 | spu_writech(MFC_Size, size); | ||
77 | spu_writech(MFC_TagID, tag_id); | ||
78 | spu_writech(MFC_Cmd, cmd); | ||
79 | } | ||
80 | |||
81 | static inline void restore_decr(void) | ||
82 | { | ||
83 | unsigned int offset; | ||
84 | unsigned int decr_running; | ||
85 | unsigned int decr; | ||
86 | |||
87 | /* Restore, Step 6: | ||
88 | * If the LSCSA "decrementer running" flag is set | ||
89 | * then write the SPU_WrDec channel with the | ||
90 | * decrementer value from LSCSA. | ||
91 | */ | ||
92 | offset = LSCSA_QW_OFFSET(decr_status); | ||
93 | decr_running = regs_spill[offset].slot[0]; | ||
94 | if (decr_running) { | ||
95 | offset = LSCSA_QW_OFFSET(decr); | ||
96 | decr = regs_spill[offset].slot[0]; | ||
97 | spu_writech(SPU_WrDec, decr); | ||
98 | } | ||
99 | } | ||
100 | |||
101 | static inline void write_ppu_mb(void) | ||
102 | { | ||
103 | unsigned int offset; | ||
104 | unsigned int data; | ||
105 | |||
106 | /* Restore, Step 11: | ||
107 | * Write the MFC_WrOut_MB channel with the PPU_MB | ||
108 | * data from LSCSA. | ||
109 | */ | ||
110 | offset = LSCSA_QW_OFFSET(ppu_mb); | ||
111 | data = regs_spill[offset].slot[0]; | ||
112 | spu_writech(SPU_WrOutMbox, data); | ||
113 | } | ||
114 | |||
115 | static inline void write_ppuint_mb(void) | ||
116 | { | ||
117 | unsigned int offset; | ||
118 | unsigned int data; | ||
119 | |||
120 | /* Restore, Step 12: | ||
121 | * Write the MFC_WrInt_MB channel with the PPUINT_MB | ||
122 | * data from LSCSA. | ||
123 | */ | ||
124 | offset = LSCSA_QW_OFFSET(ppuint_mb); | ||
125 | data = regs_spill[offset].slot[0]; | ||
126 | spu_writech(SPU_WrOutIntrMbox, data); | ||
127 | } | ||
128 | |||
129 | static inline void restore_fpcr(void) | ||
130 | { | ||
131 | unsigned int offset; | ||
132 | vector unsigned int fpcr; | ||
133 | |||
134 | /* Restore, Step 13: | ||
135 | * Restore the floating-point status and control | ||
136 | * register from the LSCSA. | ||
137 | */ | ||
138 | offset = LSCSA_QW_OFFSET(fpcr); | ||
139 | fpcr = regs_spill[offset].v; | ||
140 | spu_mtfpscr(fpcr); | ||
141 | } | ||
142 | |||
143 | static inline void restore_srr0(void) | ||
144 | { | ||
145 | unsigned int offset; | ||
146 | unsigned int srr0; | ||
147 | |||
148 | /* Restore, Step 14: | ||
149 | * Restore the SPU SRR0 data from the LSCSA. | ||
150 | */ | ||
151 | offset = LSCSA_QW_OFFSET(srr0); | ||
152 | srr0 = regs_spill[offset].slot[0]; | ||
153 | spu_writech(SPU_WrSRR0, srr0); | ||
154 | } | ||
155 | |||
156 | static inline void restore_event_mask(void) | ||
157 | { | ||
158 | unsigned int offset; | ||
159 | unsigned int event_mask; | ||
160 | |||
161 | /* Restore, Step 15: | ||
162 | * Restore the SPU_RdEventMsk data from the LSCSA. | ||
163 | */ | ||
164 | offset = LSCSA_QW_OFFSET(event_mask); | ||
165 | event_mask = regs_spill[offset].slot[0]; | ||
166 | spu_writech(SPU_WrEventMask, event_mask); | ||
167 | } | ||
168 | |||
169 | static inline void restore_tag_mask(void) | ||
170 | { | ||
171 | unsigned int offset; | ||
172 | unsigned int tag_mask; | ||
173 | |||
174 | /* Restore, Step 16: | ||
175 | * Restore the SPU_RdTagMsk data from the LSCSA. | ||
176 | */ | ||
177 | offset = LSCSA_QW_OFFSET(tag_mask); | ||
178 | tag_mask = regs_spill[offset].slot[0]; | ||
179 | spu_writech(MFC_WrTagMask, tag_mask); | ||
180 | } | ||
181 | |||
182 | static inline void restore_complete(void) | ||
183 | { | ||
184 | extern void exit_fini(void); | ||
185 | unsigned int *exit_instrs = (unsigned int *)exit_fini; | ||
186 | unsigned int offset; | ||
187 | unsigned int stopped_status; | ||
188 | unsigned int stopped_code; | ||
189 | |||
190 | /* Restore, Step 18: | ||
191 | * Issue a stop-and-signal instruction with | ||
192 | * "good context restore" signal value. | ||
193 | * | ||
194 | * Restore, Step 19: | ||
195 | * There may be additional instructions placed | ||
196 | * here by the PPE Sequence for SPU Context | ||
197 | * Restore in order to restore the correct | ||
198 | * "stopped state". | ||
199 | * | ||
200 | * This step is handled here by analyzing the | ||
201 | * LSCSA.stopped_status and then modifying the | ||
202 | * exit() function to behave appropriately. | ||
203 | */ | ||
204 | |||
205 | offset = LSCSA_QW_OFFSET(stopped_status); | ||
206 | stopped_status = regs_spill[offset].slot[0]; | ||
207 | stopped_code = regs_spill[offset].slot[1]; | ||
208 | |||
209 | switch (stopped_status) { | ||
210 | case SPU_STOPPED_STATUS_P_I: | ||
211 | /* SPU_Status[P,I]=1. Add illegal instruction | ||
212 | * followed by stop-and-signal instruction after | ||
213 | * end of restore code. | ||
214 | */ | ||
215 | exit_instrs[0] = RESTORE_COMPLETE; | ||
216 | exit_instrs[1] = ILLEGAL_INSTR; | ||
217 | exit_instrs[2] = STOP_INSTR | stopped_code; | ||
218 | break; | ||
219 | case SPU_STOPPED_STATUS_P_H: | ||
220 | /* SPU_Status[P,H]=1. Add 'heq $0, $0' followed | ||
221 | * by stop-and-signal instruction after end of | ||
222 | * restore code. | ||
223 | */ | ||
224 | exit_instrs[0] = RESTORE_COMPLETE; | ||
225 | exit_instrs[1] = HEQ_INSTR; | ||
226 | exit_instrs[2] = STOP_INSTR | stopped_code; | ||
227 | break; | ||
228 | case SPU_STOPPED_STATUS_S_P: | ||
229 | /* SPU_Status[S,P]=1. Add nop instruction | ||
230 | * followed by 'br -4' after end of restore | ||
231 | * code. | ||
232 | */ | ||
233 | exit_instrs[0] = RESTORE_COMPLETE; | ||
234 | exit_instrs[1] = STOP_INSTR | stopped_code; | ||
235 | exit_instrs[2] = NOP_INSTR; | ||
236 | exit_instrs[3] = BR_INSTR; | ||
237 | break; | ||
238 | case SPU_STOPPED_STATUS_S_I: | ||
239 | /* SPU_Status[S,I]=1. Add illegal instruction | ||
240 | * followed by 'br -4' after end of restore code. | ||
241 | */ | ||
242 | exit_instrs[0] = RESTORE_COMPLETE; | ||
243 | exit_instrs[1] = ILLEGAL_INSTR; | ||
244 | exit_instrs[2] = NOP_INSTR; | ||
245 | exit_instrs[3] = BR_INSTR; | ||
246 | break; | ||
247 | case SPU_STOPPED_STATUS_I: | ||
248 | /* SPU_Status[I]=1. Add illegal instruction followed | ||
249 | * by infinite loop after end of restore sequence. | ||
250 | */ | ||
251 | exit_instrs[0] = RESTORE_COMPLETE; | ||
252 | exit_instrs[1] = ILLEGAL_INSTR; | ||
253 | exit_instrs[2] = NOP_INSTR; | ||
254 | exit_instrs[3] = BR_INSTR; | ||
255 | break; | ||
256 | case SPU_STOPPED_STATUS_S: | ||
257 | /* SPU_Status[S]=1. Add two 'nop' instructions. */ | ||
258 | exit_instrs[0] = RESTORE_COMPLETE; | ||
259 | exit_instrs[1] = NOP_INSTR; | ||
260 | exit_instrs[2] = NOP_INSTR; | ||
261 | exit_instrs[3] = BR_INSTR; | ||
262 | break; | ||
263 | case SPU_STOPPED_STATUS_H: | ||
264 | /* SPU_Status[H]=1. Add 'heq $0, $0' instruction | ||
265 | * after end of restore code. | ||
266 | */ | ||
267 | exit_instrs[0] = RESTORE_COMPLETE; | ||
268 | exit_instrs[1] = HEQ_INSTR; | ||
269 | exit_instrs[2] = NOP_INSTR; | ||
270 | exit_instrs[3] = BR_INSTR; | ||
271 | break; | ||
272 | case SPU_STOPPED_STATUS_P: | ||
273 | /* SPU_Status[P]=1. Add stop-and-signal instruction | ||
274 | * after end of restore code. | ||
275 | */ | ||
276 | exit_instrs[0] = RESTORE_COMPLETE; | ||
277 | exit_instrs[1] = STOP_INSTR | stopped_code; | ||
278 | break; | ||
279 | case SPU_STOPPED_STATUS_R: | ||
280 | /* SPU_Status[I,S,H,P,R]=0. Add infinite loop. */ | ||
281 | exit_instrs[0] = RESTORE_COMPLETE; | ||
282 | exit_instrs[1] = NOP_INSTR; | ||
283 | exit_instrs[2] = NOP_INSTR; | ||
284 | exit_instrs[3] = BR_INSTR; | ||
285 | break; | ||
286 | default: | ||
287 | /* SPU_Status[R]=1. No additonal instructions. */ | ||
288 | break; | ||
289 | } | ||
290 | spu_sync(); | ||
291 | } | ||
292 | |||
293 | /** | ||
294 | * main - entry point for SPU-side context restore. | ||
295 | * | ||
296 | * This code deviates from the documented sequence in the | ||
297 | * following aspects: | ||
298 | * | ||
299 | * 1. The EA for LSCSA is passed from PPE in the | ||
300 | * signal notification channels. | ||
301 | * 2. The register spill area is pulled by SPU | ||
302 | * into LS, rather than pushed by PPE. | ||
303 | * 3. All 128 registers are restored by exit(). | ||
304 | * 4. The exit() function is modified at run | ||
305 | * time in order to properly restore the | ||
306 | * SPU_Status register. | ||
307 | */ | ||
308 | int main() | ||
309 | { | ||
310 | addr64 lscsa_ea; | ||
311 | |||
312 | lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1); | ||
313 | lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2); | ||
314 | fetch_regs_from_mem(lscsa_ea); | ||
315 | |||
316 | set_event_mask(); /* Step 1. */ | ||
317 | set_tag_mask(); /* Step 2. */ | ||
318 | build_dma_list(lscsa_ea); /* Step 3. */ | ||
319 | restore_upper_240kb(lscsa_ea); /* Step 4. */ | ||
320 | /* Step 5: done by 'exit'. */ | ||
321 | restore_decr(); /* Step 6. */ | ||
322 | enqueue_putllc(lscsa_ea); /* Step 7. */ | ||
323 | set_tag_update(); /* Step 8. */ | ||
324 | read_tag_status(); /* Step 9. */ | ||
325 | read_llar_status(); /* Step 10. */ | ||
326 | write_ppu_mb(); /* Step 11. */ | ||
327 | write_ppuint_mb(); /* Step 12. */ | ||
328 | restore_fpcr(); /* Step 13. */ | ||
329 | restore_srr0(); /* Step 14. */ | ||
330 | restore_event_mask(); /* Step 15. */ | ||
331 | restore_tag_mask(); /* Step 16. */ | ||
332 | /* Step 17. done by 'exit'. */ | ||
333 | restore_complete(); /* Step 18. */ | ||
334 | |||
335 | return 0; | ||
336 | } | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S new file mode 100644 index 000000000000..2905949debe1 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_restore_crt0.S | |||
@@ -0,0 +1,116 @@ | |||
1 | /* | ||
2 | * crt0_r.S: Entry function for SPU-side context restore. | ||
3 | * | ||
4 | * Copyright (C) 2005 IBM | ||
5 | * | ||
6 | * Entry and exit function for SPU-side of the context restore | ||
7 | * sequence. Sets up an initial stack frame, then branches to | ||
8 | * 'main'. On return, restores all 128 registers from the LSCSA | ||
9 | * and exits. | ||
10 | * | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | */ | ||
26 | |||
27 | #include <asm/spu_csa.h> | ||
28 | |||
29 | .data | ||
30 | .align 7 | ||
31 | .globl regs_spill | ||
32 | regs_spill: | ||
33 | .space SIZEOF_SPU_SPILL_REGS, 0x0 | ||
34 | |||
35 | .text | ||
36 | .global _start | ||
37 | _start: | ||
38 | /* Initialize the stack pointer to point to 16368 | ||
39 | * (16kb-16). The back chain pointer is initialized | ||
40 | * to NULL. | ||
41 | */ | ||
42 | il $0, 0 | ||
43 | il $SP, 16368 | ||
44 | stqd $0, 0($SP) | ||
45 | |||
46 | /* Allocate a minimum stack frame for the called main. | ||
47 | * This is needed so that main has a place to save the | ||
48 | * link register when it calls another function. | ||
49 | */ | ||
50 | stqd $SP, -160($SP) | ||
51 | ai $SP, $SP, -160 | ||
52 | |||
53 | /* Call the program's main function. */ | ||
54 | brsl $0, main | ||
55 | |||
56 | .global exit | ||
57 | .global _exit | ||
58 | exit: | ||
59 | _exit: | ||
60 | /* SPU Context Restore, Step 5: Restore the remaining 112 GPRs. */ | ||
61 | ila $3, regs_spill + 256 | ||
62 | restore_regs: | ||
63 | lqr $4, restore_reg_insts | ||
64 | restore_reg_loop: | ||
65 | ai $4, $4, 4 | ||
66 | .balignl 16, 0x40200000 | ||
67 | restore_reg_insts: /* must be quad-word aligned. */ | ||
68 | lqd $16, 0($3) | ||
69 | lqd $17, 16($3) | ||
70 | lqd $18, 32($3) | ||
71 | lqd $19, 48($3) | ||
72 | andi $5, $4, 0x7F | ||
73 | stqr $4, restore_reg_insts | ||
74 | ai $3, $3, 64 | ||
75 | brnz $5, restore_reg_loop | ||
76 | |||
77 | /* SPU Context Restore Step 17: Restore the first 16 GPRs. */ | ||
78 | lqa $0, regs_spill + 0 | ||
79 | lqa $1, regs_spill + 16 | ||
80 | lqa $2, regs_spill + 32 | ||
81 | lqa $3, regs_spill + 48 | ||
82 | lqa $4, regs_spill + 64 | ||
83 | lqa $5, regs_spill + 80 | ||
84 | lqa $6, regs_spill + 96 | ||
85 | lqa $7, regs_spill + 112 | ||
86 | lqa $8, regs_spill + 128 | ||
87 | lqa $9, regs_spill + 144 | ||
88 | lqa $10, regs_spill + 160 | ||
89 | lqa $11, regs_spill + 176 | ||
90 | lqa $12, regs_spill + 192 | ||
91 | lqa $13, regs_spill + 208 | ||
92 | lqa $14, regs_spill + 224 | ||
93 | lqa $15, regs_spill + 240 | ||
94 | |||
95 | /* Under normal circumstances, the 'exit' function | ||
96 | * terminates with 'stop SPU_RESTORE_COMPLETE', | ||
97 | * indicating that the SPU-side restore code has | ||
98 | * completed. | ||
99 | * | ||
100 | * However it is possible that instructions immediately | ||
101 | * following the 'stop 0x3ffc' have been modified at run | ||
102 | * time so as to recreate the exact SPU_Status settings | ||
103 | * from the application, e.g. illegal instruciton, halt, | ||
104 | * etc. | ||
105 | */ | ||
106 | .global exit_fini | ||
107 | .global _exit_fini | ||
108 | exit_fini: | ||
109 | _exit_fini: | ||
110 | stop SPU_RESTORE_COMPLETE | ||
111 | stop 0 | ||
112 | stop 0 | ||
113 | stop 0 | ||
114 | |||
115 | /* Pad the size of this crt0.o to be multiple of 16 bytes. */ | ||
116 | .balignl 16, 0x0 | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped new file mode 100644 index 000000000000..1b2355ff7036 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_restore_dump.h_shipped | |||
@@ -0,0 +1,231 @@ | |||
1 | /* | ||
2 | * spu_restore_dump.h: Copyright (C) 2005 IBM. | ||
3 | * Hex-dump auto generated from spu_restore.c. | ||
4 | * Do not edit! | ||
5 | */ | ||
6 | static unsigned int spu_restore_code[] __page_aligned = { | ||
7 | 0x40800000, 0x409ff801, 0x24000080, 0x24fd8081, | ||
8 | 0x1cd80081, 0x33001180, 0x42030003, 0x33800284, | ||
9 | 0x1c010204, 0x40200000, 0x40200000, 0x40200000, | ||
10 | 0x34000190, 0x34004191, 0x34008192, 0x3400c193, | ||
11 | 0x141fc205, 0x23fffd84, 0x1c100183, 0x217ffa85, | ||
12 | 0x3080a000, 0x3080a201, 0x3080a402, 0x3080a603, | ||
13 | 0x3080a804, 0x3080aa05, 0x3080ac06, 0x3080ae07, | ||
14 | 0x3080b008, 0x3080b209, 0x3080b40a, 0x3080b60b, | ||
15 | 0x3080b80c, 0x3080ba0d, 0x3080bc0e, 0x3080be0f, | ||
16 | 0x00003ffc, 0x00000000, 0x00000000, 0x00000000, | ||
17 | 0x01a00182, 0x3ec00083, 0xb0a14103, 0x01a00204, | ||
18 | 0x3ec10082, 0x4202800e, 0x04000703, 0xb0a14202, | ||
19 | 0x21a00803, 0x3fbf028d, 0x3f20068d, 0x3fbe0682, | ||
20 | 0x3fe30102, 0x21a00882, 0x3f82028f, 0x3fe3078f, | ||
21 | 0x3fbf0784, 0x3f200204, 0x3fbe0204, 0x3fe30204, | ||
22 | 0x04000203, 0x21a00903, 0x40848002, 0x21a00982, | ||
23 | 0x40800003, 0x21a00a03, 0x40802002, 0x21a00a82, | ||
24 | 0x21a00083, 0x40800082, 0x21a00b02, 0x10002818, | ||
25 | 0x40a80002, 0x32800007, 0x4207000c, 0x18008208, | ||
26 | 0x40a0000b, 0x4080020a, 0x40800709, 0x00200000, | ||
27 | 0x42070002, 0x3ac30384, 0x1cffc489, 0x00200000, | ||
28 | 0x18008383, 0x38830382, 0x4cffc486, 0x3ac28185, | ||
29 | 0xb0408584, 0x28830382, 0x1c020387, 0x38828182, | ||
30 | 0xb0408405, 0x1802c408, 0x28828182, 0x217ff886, | ||
31 | 0x04000583, 0x21a00803, 0x3fbe0682, 0x3fe30102, | ||
32 | 0x04000106, 0x21a00886, 0x04000603, 0x21a00903, | ||
33 | 0x40803c02, 0x21a00982, 0x40800003, 0x04000184, | ||
34 | 0x21a00a04, 0x40802202, 0x21a00a82, 0x42028005, | ||
35 | 0x34208702, 0x21002282, 0x21a00804, 0x21a00886, | ||
36 | 0x3fbf0782, 0x3f200102, 0x3fbe0102, 0x3fe30102, | ||
37 | 0x21a00902, 0x40804003, 0x21a00983, 0x21a00a04, | ||
38 | 0x40805a02, 0x21a00a82, 0x40800083, 0x21a00b83, | ||
39 | 0x01a00c02, 0x01a00d83, 0x3420c282, 0x21a00e02, | ||
40 | 0x34210283, 0x21a00f03, 0x34200284, 0x77400200, | ||
41 | 0x3421c282, 0x21a00702, 0x34218283, 0x21a00083, | ||
42 | 0x34214282, 0x21a00b02, 0x4200480c, 0x00200000, | ||
43 | 0x1c010286, 0x34220284, 0x34220302, 0x0f608203, | ||
44 | 0x5c024204, 0x3b81810b, 0x42013c02, 0x00200000, | ||
45 | 0x18008185, 0x38808183, 0x3b814182, 0x21004e84, | ||
46 | 0x4020007f, 0x35000100, 0x000004e0, 0x000002a0, | ||
47 | 0x000002e8, 0x00000428, 0x00000360, 0x000002e8, | ||
48 | 0x000004a0, 0x00000468, 0x000003c8, 0x00000360, | ||
49 | 0x409ffe02, 0x30801203, 0x40800204, 0x3ec40085, | ||
50 | 0x10009c09, 0x3ac10606, 0xb060c105, 0x4020007f, | ||
51 | 0x4020007f, 0x20801203, 0x38810602, 0xb0408586, | ||
52 | 0x28810602, 0x32004180, 0x34204702, 0x21a00382, | ||
53 | 0x4020007f, 0x327fdc80, 0x409ffe02, 0x30801203, | ||
54 | 0x40800204, 0x3ec40087, 0x40800405, 0x00200000, | ||
55 | 0x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a, | ||
56 | 0xb060c107, 0x20801203, 0x41004003, 0x38810602, | ||
57 | 0x4020007f, 0xb0408188, 0x4020007f, 0x28810602, | ||
58 | 0x41201002, 0x38814603, 0x10009c09, 0xb060c109, | ||
59 | 0x4020007f, 0x28814603, 0x41193f83, 0x38818602, | ||
60 | 0x60ffc003, 0xb040818a, 0x28818602, 0x32003080, | ||
61 | 0x409ffe02, 0x30801203, 0x40800204, 0x3ec40087, | ||
62 | 0x41201008, 0x10009c14, 0x40800405, 0x3ac10609, | ||
63 | 0x40800606, 0x3ac1460a, 0xb060c107, 0x3ac1860b, | ||
64 | 0x20801203, 0x38810602, 0xb0408409, 0x28810602, | ||
65 | 0x38814603, 0xb060c40a, 0x4020007f, 0x28814603, | ||
66 | 0x41193f83, 0x38818602, 0x60ffc003, 0xb040818b, | ||
67 | 0x28818602, 0x32002380, 0x409ffe02, 0x30801204, | ||
68 | 0x40800205, 0x3ec40083, 0x40800406, 0x3ac14607, | ||
69 | 0x3ac18608, 0xb0810103, 0x41004002, 0x20801204, | ||
70 | 0x4020007f, 0x38814603, 0x10009c0b, 0xb060c107, | ||
71 | 0x4020007f, 0x4020007f, 0x28814603, 0x38818602, | ||
72 | 0x4020007f, 0x4020007f, 0xb0408588, 0x28818602, | ||
73 | 0x4020007f, 0x32001780, 0x409ffe02, 0x1000640e, | ||
74 | 0x40800204, 0x30801203, 0x40800405, 0x3ec40087, | ||
75 | 0x40800606, 0x3ac10608, 0x3ac14609, 0x3ac1860a, | ||
76 | 0xb060c107, 0x20801203, 0x413d8003, 0x38810602, | ||
77 | 0x4020007f, 0x327fd780, 0x409ffe02, 0x10007f0c, | ||
78 | 0x40800205, 0x30801204, 0x40800406, 0x3ec40083, | ||
79 | 0x3ac14607, 0x3ac18608, 0xb0810103, 0x413d8002, | ||
80 | 0x20801204, 0x38814603, 0x4020007f, 0x327feb80, | ||
81 | 0x409ffe02, 0x30801203, 0x40800204, 0x3ec40087, | ||
82 | 0x40800405, 0x1000650a, 0x40800606, 0x3ac10608, | ||
83 | 0x3ac14609, 0x3ac1860a, 0xb060c107, 0x20801203, | ||
84 | 0x38810602, 0xb0408588, 0x4020007f, 0x327fc980, | ||
85 | 0x00400000, 0x40800003, 0x4020007f, 0x35000000, | ||
86 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
87 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
88 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
89 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
90 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
91 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
92 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
93 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
94 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
95 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
96 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
97 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
98 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
99 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
100 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
101 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
102 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
103 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
104 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
105 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
106 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
107 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
108 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
109 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
110 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
111 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
112 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
113 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
114 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
115 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
116 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
117 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
118 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
119 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
120 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
121 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
122 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
123 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
124 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
125 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
126 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
127 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
128 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
129 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
130 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
131 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
132 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
133 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
134 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
135 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
136 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
137 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
138 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
139 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
140 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
141 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
142 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
143 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
144 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
145 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
146 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
147 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
148 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
149 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
150 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
151 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
152 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
153 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
154 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
155 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
156 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
157 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
158 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
159 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
160 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
161 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
162 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
163 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
164 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
165 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
166 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
167 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
168 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
169 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
170 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
171 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
172 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
173 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
174 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
175 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
176 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
177 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
178 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
179 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
180 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
181 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
182 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
183 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
184 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
185 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
186 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
187 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
188 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
189 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
190 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
191 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
192 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
193 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
194 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
195 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
196 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
197 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
198 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
199 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
200 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
201 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
202 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
203 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
204 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
205 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
206 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
207 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
208 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
209 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
210 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
211 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
212 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
213 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
214 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
215 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
216 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
217 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
218 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
219 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
220 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
221 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
222 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
223 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
224 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
225 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
226 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
227 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
228 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
229 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
230 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
231 | }; | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save.c b/arch/powerpc/platforms/cell/spufs/spu_save.c new file mode 100644 index 000000000000..196033b8a579 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_save.c | |||
@@ -0,0 +1,195 @@ | |||
1 | /* | ||
2 | * spu_save.c | ||
3 | * | ||
4 | * (C) Copyright IBM Corp. 2005 | ||
5 | * | ||
6 | * SPU-side context save sequence outlined in | ||
7 | * Synergistic Processor Element Book IV | ||
8 | * | ||
9 | * Author: Mark Nutter <mnutter@us.ibm.com> | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | * | ||
25 | */ | ||
26 | |||
27 | |||
28 | #ifndef LS_SIZE | ||
29 | #define LS_SIZE 0x40000 /* 256K (in bytes) */ | ||
30 | #endif | ||
31 | |||
32 | typedef unsigned int u32; | ||
33 | typedef unsigned long long u64; | ||
34 | |||
35 | #include <spu_intrinsics.h> | ||
36 | #include <asm/spu_csa.h> | ||
37 | #include "spu_utils.h" | ||
38 | |||
39 | static inline void save_event_mask(void) | ||
40 | { | ||
41 | unsigned int offset; | ||
42 | |||
43 | /* Save, Step 2: | ||
44 | * Read the SPU_RdEventMsk channel and save to the LSCSA. | ||
45 | */ | ||
46 | offset = LSCSA_QW_OFFSET(event_mask); | ||
47 | regs_spill[offset].slot[0] = spu_readch(SPU_RdEventStatMask); | ||
48 | } | ||
49 | |||
50 | static inline void save_tag_mask(void) | ||
51 | { | ||
52 | unsigned int offset; | ||
53 | |||
54 | /* Save, Step 3: | ||
55 | * Read the SPU_RdTagMsk channel and save to the LSCSA. | ||
56 | */ | ||
57 | offset = LSCSA_QW_OFFSET(tag_mask); | ||
58 | regs_spill[offset].slot[0] = spu_readch(MFC_RdTagMask); | ||
59 | } | ||
60 | |||
61 | static inline void save_upper_240kb(addr64 lscsa_ea) | ||
62 | { | ||
63 | unsigned int ls = 16384; | ||
64 | unsigned int list = (unsigned int)&dma_list[0]; | ||
65 | unsigned int size = sizeof(dma_list); | ||
66 | unsigned int tag_id = 0; | ||
67 | unsigned int cmd = 0x24; /* PUTL */ | ||
68 | |||
69 | /* Save, Step 7: | ||
70 | * Enqueue the PUTL command (tag 0) to the MFC SPU command | ||
71 | * queue to transfer the remaining 240 kb of LS to CSA. | ||
72 | */ | ||
73 | spu_writech(MFC_LSA, ls); | ||
74 | spu_writech(MFC_EAH, lscsa_ea.ui[0]); | ||
75 | spu_writech(MFC_EAL, list); | ||
76 | spu_writech(MFC_Size, size); | ||
77 | spu_writech(MFC_TagID, tag_id); | ||
78 | spu_writech(MFC_Cmd, cmd); | ||
79 | } | ||
80 | |||
81 | static inline void save_fpcr(void) | ||
82 | { | ||
83 | // vector unsigned int fpcr; | ||
84 | unsigned int offset; | ||
85 | |||
86 | /* Save, Step 9: | ||
87 | * Issue the floating-point status and control register | ||
88 | * read instruction, and save to the LSCSA. | ||
89 | */ | ||
90 | offset = LSCSA_QW_OFFSET(fpcr); | ||
91 | regs_spill[offset].v = spu_mffpscr(); | ||
92 | } | ||
93 | |||
94 | static inline void save_decr(void) | ||
95 | { | ||
96 | unsigned int offset; | ||
97 | |||
98 | /* Save, Step 10: | ||
99 | * Read and save the SPU_RdDec channel data to | ||
100 | * the LSCSA. | ||
101 | */ | ||
102 | offset = LSCSA_QW_OFFSET(decr); | ||
103 | regs_spill[offset].slot[0] = spu_readch(SPU_RdDec); | ||
104 | } | ||
105 | |||
106 | static inline void save_srr0(void) | ||
107 | { | ||
108 | unsigned int offset; | ||
109 | |||
110 | /* Save, Step 11: | ||
111 | * Read and save the SPU_WSRR0 channel data to | ||
112 | * the LSCSA. | ||
113 | */ | ||
114 | offset = LSCSA_QW_OFFSET(srr0); | ||
115 | regs_spill[offset].slot[0] = spu_readch(SPU_RdSRR0); | ||
116 | } | ||
117 | |||
118 | static inline void spill_regs_to_mem(addr64 lscsa_ea) | ||
119 | { | ||
120 | unsigned int ls = (unsigned int)®s_spill[0]; | ||
121 | unsigned int size = sizeof(regs_spill); | ||
122 | unsigned int tag_id = 0; | ||
123 | unsigned int cmd = 0x20; /* PUT */ | ||
124 | |||
125 | /* Save, Step 13: | ||
126 | * Enqueue a PUT command (tag 0) to send the LSCSA | ||
127 | * to the CSA. | ||
128 | */ | ||
129 | spu_writech(MFC_LSA, ls); | ||
130 | spu_writech(MFC_EAH, lscsa_ea.ui[0]); | ||
131 | spu_writech(MFC_EAL, lscsa_ea.ui[1]); | ||
132 | spu_writech(MFC_Size, size); | ||
133 | spu_writech(MFC_TagID, tag_id); | ||
134 | spu_writech(MFC_Cmd, cmd); | ||
135 | } | ||
136 | |||
137 | static inline void enqueue_sync(addr64 lscsa_ea) | ||
138 | { | ||
139 | unsigned int tag_id = 0; | ||
140 | unsigned int cmd = 0xCC; | ||
141 | |||
142 | /* Save, Step 14: | ||
143 | * Enqueue an MFC_SYNC command (tag 0). | ||
144 | */ | ||
145 | spu_writech(MFC_TagID, tag_id); | ||
146 | spu_writech(MFC_Cmd, cmd); | ||
147 | } | ||
148 | |||
149 | static inline void save_complete(void) | ||
150 | { | ||
151 | /* Save, Step 18: | ||
152 | * Issue a stop-and-signal instruction indicating | ||
153 | * "save complete". Note: This function will not | ||
154 | * return!! | ||
155 | */ | ||
156 | spu_stop(SPU_SAVE_COMPLETE); | ||
157 | } | ||
158 | |||
159 | /** | ||
160 | * main - entry point for SPU-side context save. | ||
161 | * | ||
162 | * This code deviates from the documented sequence as follows: | ||
163 | * | ||
164 | * 1. The EA for LSCSA is passed from PPE in the | ||
165 | * signal notification channels. | ||
166 | * 2. All 128 registers are saved by crt0.o. | ||
167 | */ | ||
168 | int main() | ||
169 | { | ||
170 | addr64 lscsa_ea; | ||
171 | |||
172 | lscsa_ea.ui[0] = spu_readch(SPU_RdSigNotify1); | ||
173 | lscsa_ea.ui[1] = spu_readch(SPU_RdSigNotify2); | ||
174 | |||
175 | /* Step 1: done by exit(). */ | ||
176 | save_event_mask(); /* Step 2. */ | ||
177 | save_tag_mask(); /* Step 3. */ | ||
178 | set_event_mask(); /* Step 4. */ | ||
179 | set_tag_mask(); /* Step 5. */ | ||
180 | build_dma_list(lscsa_ea); /* Step 6. */ | ||
181 | save_upper_240kb(lscsa_ea); /* Step 7. */ | ||
182 | /* Step 8: done by exit(). */ | ||
183 | save_fpcr(); /* Step 9. */ | ||
184 | save_decr(); /* Step 10. */ | ||
185 | save_srr0(); /* Step 11. */ | ||
186 | enqueue_putllc(lscsa_ea); /* Step 12. */ | ||
187 | spill_regs_to_mem(lscsa_ea); /* Step 13. */ | ||
188 | enqueue_sync(lscsa_ea); /* Step 14. */ | ||
189 | set_tag_update(); /* Step 15. */ | ||
190 | read_tag_status(); /* Step 16. */ | ||
191 | read_llar_status(); /* Step 17. */ | ||
192 | save_complete(); /* Step 18. */ | ||
193 | |||
194 | return 0; | ||
195 | } | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S new file mode 100644 index 000000000000..6659d6a66faa --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_save_crt0.S | |||
@@ -0,0 +1,102 @@ | |||
1 | /* | ||
2 | * crt0_s.S: Entry function for SPU-side context save. | ||
3 | * | ||
4 | * Copyright (C) 2005 IBM | ||
5 | * | ||
6 | * Entry function for SPU-side of the context save sequence. | ||
7 | * Saves all 128 GPRs, sets up an initial stack frame, then | ||
8 | * branches to 'main'. | ||
9 | * | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or modify | ||
12 | * it under the terms of the GNU General Public License as published by | ||
13 | * the Free Software Foundation; either version 2, or (at your option) | ||
14 | * any later version. | ||
15 | * | ||
16 | * This program is distributed in the hope that it will be useful, | ||
17 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | * GNU General Public License for more details. | ||
20 | * | ||
21 | * You should have received a copy of the GNU General Public License | ||
22 | * along with this program; if not, write to the Free Software | ||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
24 | */ | ||
25 | |||
26 | #include <asm/spu_csa.h> | ||
27 | |||
28 | .data | ||
29 | .align 7 | ||
30 | .globl regs_spill | ||
31 | regs_spill: | ||
32 | .space SIZEOF_SPU_SPILL_REGS, 0x0 | ||
33 | |||
34 | .text | ||
35 | .global _start | ||
36 | _start: | ||
37 | /* SPU Context Save Step 1: Save the first 16 GPRs. */ | ||
38 | stqa $0, regs_spill + 0 | ||
39 | stqa $1, regs_spill + 16 | ||
40 | stqa $2, regs_spill + 32 | ||
41 | stqa $3, regs_spill + 48 | ||
42 | stqa $4, regs_spill + 64 | ||
43 | stqa $5, regs_spill + 80 | ||
44 | stqa $6, regs_spill + 96 | ||
45 | stqa $7, regs_spill + 112 | ||
46 | stqa $8, regs_spill + 128 | ||
47 | stqa $9, regs_spill + 144 | ||
48 | stqa $10, regs_spill + 160 | ||
49 | stqa $11, regs_spill + 176 | ||
50 | stqa $12, regs_spill + 192 | ||
51 | stqa $13, regs_spill + 208 | ||
52 | stqa $14, regs_spill + 224 | ||
53 | stqa $15, regs_spill + 240 | ||
54 | |||
55 | /* SPU Context Save, Step 8: Save the remaining 112 GPRs. */ | ||
56 | ila $3, regs_spill + 256 | ||
57 | save_regs: | ||
58 | lqr $4, save_reg_insts | ||
59 | save_reg_loop: | ||
60 | ai $4, $4, 4 | ||
61 | .balignl 16, 0x40200000 | ||
62 | save_reg_insts: /* must be quad-word aligned. */ | ||
63 | stqd $16, 0($3) | ||
64 | stqd $17, 16($3) | ||
65 | stqd $18, 32($3) | ||
66 | stqd $19, 48($3) | ||
67 | andi $5, $4, 0x7F | ||
68 | stqr $4, save_reg_insts | ||
69 | ai $3, $3, 64 | ||
70 | brnz $5, save_reg_loop | ||
71 | |||
72 | /* Initialize the stack pointer to point to 16368 | ||
73 | * (16kb-16). The back chain pointer is initialized | ||
74 | * to NULL. | ||
75 | */ | ||
76 | il $0, 0 | ||
77 | il $SP, 16368 | ||
78 | stqd $0, 0($SP) | ||
79 | |||
80 | /* Allocate a minimum stack frame for the called main. | ||
81 | * This is needed so that main has a place to save the | ||
82 | * link register when it calls another function. | ||
83 | */ | ||
84 | stqd $SP, -160($SP) | ||
85 | ai $SP, $SP, -160 | ||
86 | |||
87 | /* Call the program's main function. */ | ||
88 | brsl $0, main | ||
89 | |||
90 | /* In this case main should not return; if it does | ||
91 | * there has been an error in the sequence. Execute | ||
92 | * stop-and-signal with code=0. | ||
93 | */ | ||
94 | .global exit | ||
95 | .global _exit | ||
96 | exit: | ||
97 | _exit: | ||
98 | stop 0x0 | ||
99 | |||
100 | /* Pad the size of this crt0.o to be multiple of 16 bytes. */ | ||
101 | .balignl 16, 0x0 | ||
102 | |||
diff --git a/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped new file mode 100644 index 000000000000..39e54003f1df --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_save_dump.h_shipped | |||
@@ -0,0 +1,191 @@ | |||
1 | /* | ||
2 | * spu_save_dump.h: Copyright (C) 2005 IBM. | ||
3 | * Hex-dump auto generated from spu_save.c. | ||
4 | * Do not edit! | ||
5 | */ | ||
6 | static unsigned int spu_save_code[] __page_aligned = { | ||
7 | 0x20805000, 0x20805201, 0x20805402, 0x20805603, | ||
8 | 0x20805804, 0x20805a05, 0x20805c06, 0x20805e07, | ||
9 | 0x20806008, 0x20806209, 0x2080640a, 0x2080660b, | ||
10 | 0x2080680c, 0x20806a0d, 0x20806c0e, 0x20806e0f, | ||
11 | 0x4201c003, 0x33800184, 0x1c010204, 0x40200000, | ||
12 | 0x24000190, 0x24004191, 0x24008192, 0x2400c193, | ||
13 | 0x141fc205, 0x23fffd84, 0x1c100183, 0x217ffb85, | ||
14 | 0x40800000, 0x409ff801, 0x24000080, 0x24fd8081, | ||
15 | 0x1cd80081, 0x33000180, 0x00000000, 0x00000000, | ||
16 | 0x01a00182, 0x3ec00083, 0xb1c38103, 0x01a00204, | ||
17 | 0x3ec10082, 0x4201400d, 0xb1c38202, 0x01a00583, | ||
18 | 0x34218682, 0x3ed80684, 0xb0408184, 0x24218682, | ||
19 | 0x01a00603, 0x00200000, 0x34214682, 0x3ed40684, | ||
20 | 0xb0408184, 0x40800003, 0x24214682, 0x21a00083, | ||
21 | 0x40800082, 0x21a00b02, 0x4020007f, 0x1000251e, | ||
22 | 0x40a80002, 0x32800008, 0x4205c00c, 0x00200000, | ||
23 | 0x40a0000b, 0x3f82070f, 0x4080020a, 0x40800709, | ||
24 | 0x3fe3078f, 0x3fbf0783, 0x3f200183, 0x3fbe0183, | ||
25 | 0x3fe30187, 0x18008387, 0x4205c002, 0x3ac30404, | ||
26 | 0x1cffc489, 0x00200000, 0x18008403, 0x38830402, | ||
27 | 0x4cffc486, 0x3ac28185, 0xb0408584, 0x28830402, | ||
28 | 0x1c020408, 0x38828182, 0xb0408385, 0x1802c387, | ||
29 | 0x28828182, 0x217ff886, 0x04000582, 0x32800007, | ||
30 | 0x21a00802, 0x3fbf0705, 0x3f200285, 0x3fbe0285, | ||
31 | 0x3fe30285, 0x21a00885, 0x04000603, 0x21a00903, | ||
32 | 0x40803c02, 0x21a00982, 0x04000386, 0x21a00a06, | ||
33 | 0x40801202, 0x21a00a82, 0x73000003, 0x24200683, | ||
34 | 0x01a00404, 0x00200000, 0x34204682, 0x3ec40683, | ||
35 | 0xb0408203, 0x24204682, 0x01a00783, 0x00200000, | ||
36 | 0x3421c682, 0x3edc0684, 0xb0408184, 0x2421c682, | ||
37 | 0x21a00806, 0x21a00885, 0x3fbf0784, 0x3f200204, | ||
38 | 0x3fbe0204, 0x3fe30204, 0x21a00904, 0x40804002, | ||
39 | 0x21a00982, 0x21a00a06, 0x40805a02, 0x21a00a82, | ||
40 | 0x04000683, 0x21a00803, 0x21a00885, 0x21a00904, | ||
41 | 0x40848002, 0x21a00982, 0x21a00a06, 0x40801002, | ||
42 | 0x21a00a82, 0x21a00a06, 0x40806602, 0x00200000, | ||
43 | 0x35800009, 0x21a00a82, 0x40800083, 0x21a00b83, | ||
44 | 0x01a00c02, 0x01a00d83, 0x00003ffb, 0x40800003, | ||
45 | 0x4020007f, 0x35000000, 0x00000000, 0x00000000, | ||
46 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
47 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
48 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
49 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
50 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
51 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
52 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
53 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
54 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
55 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
56 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
57 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
58 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
59 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
60 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
61 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
62 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
63 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
64 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
65 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
66 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
67 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
68 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
69 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
70 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
71 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
72 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
73 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
74 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
75 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
76 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
77 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
78 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
79 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
80 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
81 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
82 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
83 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
84 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
85 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
86 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
87 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
88 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
89 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
90 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
91 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
92 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
93 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
94 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
95 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
96 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
97 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
98 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
99 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
100 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
101 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
102 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
103 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
104 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
105 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
106 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
107 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
108 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
109 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
110 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
111 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
112 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
113 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
114 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
115 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
116 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
117 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
118 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
119 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
120 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
121 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
122 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
123 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
124 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
125 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
126 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
127 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
128 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
129 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
130 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
131 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
132 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
133 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
134 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
135 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
136 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
137 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
138 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
139 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
140 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
141 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
142 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
143 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
144 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
145 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
146 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
147 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
148 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
149 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
150 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
151 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
152 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
153 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
154 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
155 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
156 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
157 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
158 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
159 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
160 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
161 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
162 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
163 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
164 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
165 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
166 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
167 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
168 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
169 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
170 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
171 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
172 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
173 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
174 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
175 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
176 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
177 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
178 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
179 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
180 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
181 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
182 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
183 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
184 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
185 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
186 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
187 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
188 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
189 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
190 | 0x00000000, 0x00000000, 0x00000000, 0x00000000, | ||
191 | }; | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spu_utils.h b/arch/powerpc/platforms/cell/spufs/spu_utils.h new file mode 100644 index 000000000000..58359feb6c95 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spu_utils.h | |||
@@ -0,0 +1,160 @@ | |||
1 | /* | ||
2 | * utils.h: Utilities for SPU-side of the context switch operation. | ||
3 | * | ||
4 | * (C) Copyright IBM 2005 | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2, or (at your option) | ||
9 | * any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
19 | */ | ||
20 | |||
21 | #ifndef _SPU_CONTEXT_UTILS_H_ | ||
22 | #define _SPU_CONTEXT_UTILS_H_ | ||
23 | |||
24 | /* | ||
25 | * 64-bit safe EA. | ||
26 | */ | ||
27 | typedef union { | ||
28 | unsigned long long ull; | ||
29 | unsigned int ui[2]; | ||
30 | } addr64; | ||
31 | |||
32 | /* | ||
33 | * 128-bit register template. | ||
34 | */ | ||
35 | typedef union { | ||
36 | unsigned int slot[4]; | ||
37 | vector unsigned int v; | ||
38 | } spu_reg128v; | ||
39 | |||
40 | /* | ||
41 | * DMA list structure. | ||
42 | */ | ||
43 | struct dma_list_elem { | ||
44 | unsigned int size; | ||
45 | unsigned int ea_low; | ||
46 | }; | ||
47 | |||
48 | /* | ||
49 | * Declare storage for 8-byte aligned DMA list. | ||
50 | */ | ||
51 | struct dma_list_elem dma_list[15] __attribute__ ((aligned(8))); | ||
52 | |||
53 | /* | ||
54 | * External definition for storage | ||
55 | * declared in crt0. | ||
56 | */ | ||
57 | extern spu_reg128v regs_spill[NR_SPU_SPILL_REGS]; | ||
58 | |||
59 | /* | ||
60 | * Compute LSCSA byte offset for a given field. | ||
61 | */ | ||
62 | static struct spu_lscsa *dummy = (struct spu_lscsa *)0; | ||
63 | #define LSCSA_BYTE_OFFSET(_field) \ | ||
64 | ((char *)(&(dummy->_field)) - (char *)(&(dummy->gprs[0].slot[0]))) | ||
65 | #define LSCSA_QW_OFFSET(_field) (LSCSA_BYTE_OFFSET(_field) >> 4) | ||
66 | |||
67 | static inline void set_event_mask(void) | ||
68 | { | ||
69 | unsigned int event_mask = 0; | ||
70 | |||
71 | /* Save, Step 4: | ||
72 | * Restore, Step 1: | ||
73 | * Set the SPU_RdEventMsk channel to zero to mask | ||
74 | * all events. | ||
75 | */ | ||
76 | spu_writech(SPU_WrEventMask, event_mask); | ||
77 | } | ||
78 | |||
79 | static inline void set_tag_mask(void) | ||
80 | { | ||
81 | unsigned int tag_mask = 1; | ||
82 | |||
83 | /* Save, Step 5: | ||
84 | * Restore, Step 2: | ||
85 | * Set the SPU_WrTagMsk channel to '01' to unmask | ||
86 | * only tag group 0. | ||
87 | */ | ||
88 | spu_writech(MFC_WrTagMask, tag_mask); | ||
89 | } | ||
90 | |||
91 | static inline void build_dma_list(addr64 lscsa_ea) | ||
92 | { | ||
93 | unsigned int ea_low; | ||
94 | int i; | ||
95 | |||
96 | /* Save, Step 6: | ||
97 | * Restore, Step 3: | ||
98 | * Update the effective address for the CSA in the | ||
99 | * pre-canned DMA-list in local storage. | ||
100 | */ | ||
101 | ea_low = lscsa_ea.ui[1]; | ||
102 | ea_low += LSCSA_BYTE_OFFSET(ls[16384]); | ||
103 | |||
104 | for (i = 0; i < 15; i++, ea_low += 16384) { | ||
105 | dma_list[i].size = 16384; | ||
106 | dma_list[i].ea_low = ea_low; | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static inline void enqueue_putllc(addr64 lscsa_ea) | ||
111 | { | ||
112 | unsigned int ls = 0; | ||
113 | unsigned int size = 128; | ||
114 | unsigned int tag_id = 0; | ||
115 | unsigned int cmd = 0xB4; /* PUTLLC */ | ||
116 | |||
117 | /* Save, Step 12: | ||
118 | * Restore, Step 7: | ||
119 | * Send a PUTLLC (tag 0) command to the MFC using | ||
120 | * an effective address in the CSA in order to | ||
121 | * remove any possible lock-line reservation. | ||
122 | */ | ||
123 | spu_writech(MFC_LSA, ls); | ||
124 | spu_writech(MFC_EAH, lscsa_ea.ui[0]); | ||
125 | spu_writech(MFC_EAL, lscsa_ea.ui[1]); | ||
126 | spu_writech(MFC_Size, size); | ||
127 | spu_writech(MFC_TagID, tag_id); | ||
128 | spu_writech(MFC_Cmd, cmd); | ||
129 | } | ||
130 | |||
131 | static inline void set_tag_update(void) | ||
132 | { | ||
133 | unsigned int update_any = 1; | ||
134 | |||
135 | /* Save, Step 15: | ||
136 | * Restore, Step 8: | ||
137 | * Write the MFC_TagUpdate channel with '01'. | ||
138 | */ | ||
139 | spu_writech(MFC_WrTagUpdate, update_any); | ||
140 | } | ||
141 | |||
142 | static inline void read_tag_status(void) | ||
143 | { | ||
144 | /* Save, Step 16: | ||
145 | * Restore, Step 9: | ||
146 | * Read the MFC_TagStat channel data. | ||
147 | */ | ||
148 | spu_readch(MFC_RdTagStat); | ||
149 | } | ||
150 | |||
151 | static inline void read_llar_status(void) | ||
152 | { | ||
153 | /* Save, Step 17: | ||
154 | * Restore, Step 10: | ||
155 | * Read the MFC_AtomicStat channel data. | ||
156 | */ | ||
157 | spu_readch(MFC_RdAtomicStat); | ||
158 | } | ||
159 | |||
160 | #endif /* _SPU_CONTEXT_UTILS_H_ */ | ||
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h new file mode 100644 index 000000000000..db2601f0abd5 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/spufs.h | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * SPU file system | ||
3 | * | ||
4 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 | ||
5 | * | ||
6 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License as published by | ||
10 | * the Free Software Foundation; either version 2, or (at your option) | ||
11 | * any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
21 | */ | ||
22 | #ifndef SPUFS_H | ||
23 | #define SPUFS_H | ||
24 | |||
25 | #include <linux/kref.h> | ||
26 | #include <linux/rwsem.h> | ||
27 | #include <linux/spinlock.h> | ||
28 | #include <linux/fs.h> | ||
29 | |||
30 | #include <asm/spu.h> | ||
31 | #include <asm/spu_csa.h> | ||
32 | |||
33 | /* The magic number for our file system */ | ||
34 | enum { | ||
35 | SPUFS_MAGIC = 0x23c9b64e, | ||
36 | }; | ||
37 | |||
38 | struct spu_context_ops; | ||
39 | |||
40 | #define SPU_CONTEXT_PREEMPT 0UL | ||
41 | |||
42 | struct spu_context { | ||
43 | struct spu *spu; /* pointer to a physical SPU */ | ||
44 | struct spu_state csa; /* SPU context save area. */ | ||
45 | spinlock_t mmio_lock; /* protects mmio access */ | ||
46 | struct address_space *local_store;/* local store backing store */ | ||
47 | |||
48 | enum { SPU_STATE_RUNNABLE, SPU_STATE_SAVED } state; | ||
49 | struct rw_semaphore state_sema; | ||
50 | struct semaphore run_sema; | ||
51 | |||
52 | struct mm_struct *owner; | ||
53 | |||
54 | struct kref kref; | ||
55 | wait_queue_head_t ibox_wq; | ||
56 | wait_queue_head_t wbox_wq; | ||
57 | wait_queue_head_t stop_wq; | ||
58 | struct fasync_struct *ibox_fasync; | ||
59 | struct fasync_struct *wbox_fasync; | ||
60 | struct spu_context_ops *ops; | ||
61 | struct work_struct reap_work; | ||
62 | u64 flags; | ||
63 | }; | ||
64 | |||
65 | /* SPU context query/set operations. */ | ||
66 | struct spu_context_ops { | ||
67 | int (*mbox_read) (struct spu_context * ctx, u32 * data); | ||
68 | u32(*mbox_stat_read) (struct spu_context * ctx); | ||
69 | unsigned int (*mbox_stat_poll)(struct spu_context *ctx, | ||
70 | unsigned int events); | ||
71 | int (*ibox_read) (struct spu_context * ctx, u32 * data); | ||
72 | int (*wbox_write) (struct spu_context * ctx, u32 data); | ||
73 | u32(*signal1_read) (struct spu_context * ctx); | ||
74 | void (*signal1_write) (struct spu_context * ctx, u32 data); | ||
75 | u32(*signal2_read) (struct spu_context * ctx); | ||
76 | void (*signal2_write) (struct spu_context * ctx, u32 data); | ||
77 | void (*signal1_type_set) (struct spu_context * ctx, u64 val); | ||
78 | u64(*signal1_type_get) (struct spu_context * ctx); | ||
79 | void (*signal2_type_set) (struct spu_context * ctx, u64 val); | ||
80 | u64(*signal2_type_get) (struct spu_context * ctx); | ||
81 | u32(*npc_read) (struct spu_context * ctx); | ||
82 | void (*npc_write) (struct spu_context * ctx, u32 data); | ||
83 | u32(*status_read) (struct spu_context * ctx); | ||
84 | char*(*get_ls) (struct spu_context * ctx); | ||
85 | void (*runcntl_write) (struct spu_context * ctx, u32 data); | ||
86 | void (*runcntl_stop) (struct spu_context * ctx); | ||
87 | }; | ||
88 | |||
89 | extern struct spu_context_ops spu_hw_ops; | ||
90 | extern struct spu_context_ops spu_backing_ops; | ||
91 | |||
92 | struct spufs_inode_info { | ||
93 | struct spu_context *i_ctx; | ||
94 | struct inode vfs_inode; | ||
95 | }; | ||
96 | #define SPUFS_I(inode) \ | ||
97 | container_of(inode, struct spufs_inode_info, vfs_inode) | ||
98 | |||
99 | extern struct tree_descr spufs_dir_contents[]; | ||
100 | |||
101 | /* system call implementation */ | ||
102 | long spufs_run_spu(struct file *file, | ||
103 | struct spu_context *ctx, u32 *npc, u32 *status); | ||
104 | long spufs_create_thread(struct nameidata *nd, | ||
105 | unsigned int flags, mode_t mode); | ||
106 | extern struct file_operations spufs_context_fops; | ||
107 | |||
108 | /* context management */ | ||
109 | struct spu_context * alloc_spu_context(struct address_space *local_store); | ||
110 | void destroy_spu_context(struct kref *kref); | ||
111 | struct spu_context * get_spu_context(struct spu_context *ctx); | ||
112 | int put_spu_context(struct spu_context *ctx); | ||
113 | void spu_unmap_mappings(struct spu_context *ctx); | ||
114 | |||
115 | void spu_forget(struct spu_context *ctx); | ||
116 | void spu_acquire(struct spu_context *ctx); | ||
117 | void spu_release(struct spu_context *ctx); | ||
118 | int spu_acquire_runnable(struct spu_context *ctx); | ||
119 | void spu_acquire_saved(struct spu_context *ctx); | ||
120 | |||
121 | int spu_activate(struct spu_context *ctx, u64 flags); | ||
122 | void spu_deactivate(struct spu_context *ctx); | ||
123 | void spu_yield(struct spu_context *ctx); | ||
124 | int __init spu_sched_init(void); | ||
125 | void __exit spu_sched_exit(void); | ||
126 | |||
127 | /* | ||
128 | * spufs_wait | ||
129 | * Same as wait_event_interruptible(), except that here | ||
130 | * we need to call spu_release(ctx) before sleeping, and | ||
131 | * then spu_acquire(ctx) when awoken. | ||
132 | */ | ||
133 | |||
134 | #define spufs_wait(wq, condition) \ | ||
135 | ({ \ | ||
136 | int __ret = 0; \ | ||
137 | DEFINE_WAIT(__wait); \ | ||
138 | for (;;) { \ | ||
139 | prepare_to_wait(&(wq), &__wait, TASK_INTERRUPTIBLE); \ | ||
140 | if (condition) \ | ||
141 | break; \ | ||
142 | if (!signal_pending(current)) { \ | ||
143 | spu_release(ctx); \ | ||
144 | schedule(); \ | ||
145 | spu_acquire(ctx); \ | ||
146 | continue; \ | ||
147 | } \ | ||
148 | __ret = -ERESTARTSYS; \ | ||
149 | break; \ | ||
150 | } \ | ||
151 | finish_wait(&(wq), &__wait); \ | ||
152 | __ret; \ | ||
153 | }) | ||
154 | |||
155 | size_t spu_wbox_write(struct spu_context *ctx, u32 data); | ||
156 | size_t spu_ibox_read(struct spu_context *ctx, u32 *data); | ||
157 | |||
158 | /* irq callback funcs. */ | ||
159 | void spufs_ibox_callback(struct spu *spu); | ||
160 | void spufs_wbox_callback(struct spu *spu); | ||
161 | void spufs_stop_callback(struct spu *spu); | ||
162 | |||
163 | #endif | ||
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c new file mode 100644 index 000000000000..1061c12b2edb --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/switch.c | |||
@@ -0,0 +1,2180 @@ | |||
1 | /* | ||
2 | * spu_switch.c | ||
3 | * | ||
4 | * (C) Copyright IBM Corp. 2005 | ||
5 | * | ||
6 | * Author: Mark Nutter <mnutter@us.ibm.com> | ||
7 | * | ||
8 | * Host-side part of SPU context switch sequence outlined in | ||
9 | * Synergistic Processor Element, Book IV. | ||
10 | * | ||
11 | * A fully premptive switch of an SPE is very expensive in terms | ||
12 | * of time and system resources. SPE Book IV indicates that SPE | ||
13 | * allocation should follow a "serially reusable device" model, | ||
14 | * in which the SPE is assigned a task until it completes. When | ||
15 | * this is not possible, this sequence may be used to premptively | ||
16 | * save, and then later (optionally) restore the context of a | ||
17 | * program executing on an SPE. | ||
18 | * | ||
19 | * | ||
20 | * This program is free software; you can redistribute it and/or modify | ||
21 | * it under the terms of the GNU General Public License as published by | ||
22 | * the Free Software Foundation; either version 2, or (at your option) | ||
23 | * any later version. | ||
24 | * | ||
25 | * This program is distributed in the hope that it will be useful, | ||
26 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
27 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
28 | * GNU General Public License for more details. | ||
29 | * | ||
30 | * You should have received a copy of the GNU General Public License | ||
31 | * along with this program; if not, write to the Free Software | ||
32 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
33 | */ | ||
34 | |||
35 | #include <linux/config.h> | ||
36 | #include <linux/module.h> | ||
37 | #include <linux/errno.h> | ||
38 | #include <linux/sched.h> | ||
39 | #include <linux/kernel.h> | ||
40 | #include <linux/mm.h> | ||
41 | #include <linux/vmalloc.h> | ||
42 | #include <linux/smp.h> | ||
43 | #include <linux/smp_lock.h> | ||
44 | #include <linux/stddef.h> | ||
45 | #include <linux/unistd.h> | ||
46 | |||
47 | #include <asm/io.h> | ||
48 | #include <asm/spu.h> | ||
49 | #include <asm/spu_csa.h> | ||
50 | #include <asm/mmu_context.h> | ||
51 | |||
52 | #include "spu_save_dump.h" | ||
53 | #include "spu_restore_dump.h" | ||
54 | |||
55 | #if 0 | ||
56 | #define POLL_WHILE_TRUE(_c) { \ | ||
57 | do { \ | ||
58 | } while (_c); \ | ||
59 | } | ||
60 | #else | ||
61 | #define RELAX_SPIN_COUNT 1000 | ||
62 | #define POLL_WHILE_TRUE(_c) { \ | ||
63 | do { \ | ||
64 | int _i; \ | ||
65 | for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \ | ||
66 | cpu_relax(); \ | ||
67 | } \ | ||
68 | if (unlikely(_c)) yield(); \ | ||
69 | else break; \ | ||
70 | } while (_c); \ | ||
71 | } | ||
72 | #endif /* debug */ | ||
73 | |||
74 | #define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c)) | ||
75 | |||
76 | static inline void acquire_spu_lock(struct spu *spu) | ||
77 | { | ||
78 | /* Save, Step 1: | ||
79 | * Restore, Step 1: | ||
80 | * Acquire SPU-specific mutual exclusion lock. | ||
81 | * TBD. | ||
82 | */ | ||
83 | } | ||
84 | |||
85 | static inline void release_spu_lock(struct spu *spu) | ||
86 | { | ||
87 | /* Restore, Step 76: | ||
88 | * Release SPU-specific mutual exclusion lock. | ||
89 | * TBD. | ||
90 | */ | ||
91 | } | ||
92 | |||
93 | static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu) | ||
94 | { | ||
95 | struct spu_problem __iomem *prob = spu->problem; | ||
96 | u32 isolate_state; | ||
97 | |||
98 | /* Save, Step 2: | ||
99 | * Save, Step 6: | ||
100 | * If SPU_Status[E,L,IS] any field is '1', this | ||
101 | * SPU is in isolate state and cannot be context | ||
102 | * saved at this time. | ||
103 | */ | ||
104 | isolate_state = SPU_STATUS_ISOLATED_STATE | | ||
105 | SPU_STATUS_ISOLATED_LOAD_STAUTUS | SPU_STATUS_ISOLATED_EXIT_STAUTUS; | ||
106 | return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0; | ||
107 | } | ||
108 | |||
109 | static inline void disable_interrupts(struct spu_state *csa, struct spu *spu) | ||
110 | { | ||
111 | /* Save, Step 3: | ||
112 | * Restore, Step 2: | ||
113 | * Save INT_Mask_class0 in CSA. | ||
114 | * Write INT_MASK_class0 with value of 0. | ||
115 | * Save INT_Mask_class1 in CSA. | ||
116 | * Write INT_MASK_class1 with value of 0. | ||
117 | * Save INT_Mask_class2 in CSA. | ||
118 | * Write INT_MASK_class2 with value of 0. | ||
119 | */ | ||
120 | spin_lock_irq(&spu->register_lock); | ||
121 | if (csa) { | ||
122 | csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0); | ||
123 | csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1); | ||
124 | csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2); | ||
125 | } | ||
126 | spu_int_mask_set(spu, 0, 0ul); | ||
127 | spu_int_mask_set(spu, 1, 0ul); | ||
128 | spu_int_mask_set(spu, 2, 0ul); | ||
129 | eieio(); | ||
130 | spin_unlock_irq(&spu->register_lock); | ||
131 | } | ||
132 | |||
133 | static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu) | ||
134 | { | ||
135 | /* Save, Step 4: | ||
136 | * Restore, Step 25. | ||
137 | * Set a software watchdog timer, which specifies the | ||
138 | * maximum allowable time for a context save sequence. | ||
139 | * | ||
140 | * For present, this implementation will not set a global | ||
141 | * watchdog timer, as virtualization & variable system load | ||
142 | * may cause unpredictable execution times. | ||
143 | */ | ||
144 | } | ||
145 | |||
146 | static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu) | ||
147 | { | ||
148 | /* Save, Step 5: | ||
149 | * Restore, Step 3: | ||
150 | * Inhibit user-space access (if provided) to this | ||
151 | * SPU by unmapping the virtual pages assigned to | ||
152 | * the SPU memory-mapped I/O (MMIO) for problem | ||
153 | * state. TBD. | ||
154 | */ | ||
155 | } | ||
156 | |||
157 | static inline void set_switch_pending(struct spu_state *csa, struct spu *spu) | ||
158 | { | ||
159 | /* Save, Step 7: | ||
160 | * Restore, Step 5: | ||
161 | * Set a software context switch pending flag. | ||
162 | */ | ||
163 | set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); | ||
164 | mb(); | ||
165 | } | ||
166 | |||
167 | static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu) | ||
168 | { | ||
169 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
170 | |||
171 | /* Save, Step 8: | ||
172 | * Read and save MFC_CNTL[Ss]. | ||
173 | */ | ||
174 | if (csa) { | ||
175 | csa->priv2.mfc_control_RW = in_be64(&priv2->mfc_control_RW) & | ||
176 | MFC_CNTL_SUSPEND_DMA_STATUS_MASK; | ||
177 | } | ||
178 | } | ||
179 | |||
180 | static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu) | ||
181 | { | ||
182 | struct spu_problem __iomem *prob = spu->problem; | ||
183 | |||
184 | /* Save, Step 9: | ||
185 | * Save SPU_Runcntl in the CSA. This value contains | ||
186 | * the "Application Desired State". | ||
187 | */ | ||
188 | csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW); | ||
189 | } | ||
190 | |||
191 | static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu) | ||
192 | { | ||
193 | /* Save, Step 10: | ||
194 | * Save MFC_SR1 in the CSA. | ||
195 | */ | ||
196 | csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu); | ||
197 | } | ||
198 | |||
199 | static inline void save_spu_status(struct spu_state *csa, struct spu *spu) | ||
200 | { | ||
201 | struct spu_problem __iomem *prob = spu->problem; | ||
202 | |||
203 | /* Save, Step 11: | ||
204 | * Read SPU_Status[R], and save to CSA. | ||
205 | */ | ||
206 | if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) { | ||
207 | csa->prob.spu_status_R = in_be32(&prob->spu_status_R); | ||
208 | } else { | ||
209 | u32 stopped; | ||
210 | |||
211 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
212 | eieio(); | ||
213 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
214 | SPU_STATUS_RUNNING); | ||
215 | stopped = | ||
216 | SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP | | ||
217 | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; | ||
218 | if ((in_be32(&prob->spu_status_R) & stopped) == 0) | ||
219 | csa->prob.spu_status_R = SPU_STATUS_RUNNING; | ||
220 | else | ||
221 | csa->prob.spu_status_R = in_be32(&prob->spu_status_R); | ||
222 | } | ||
223 | } | ||
224 | |||
225 | static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu) | ||
226 | { | ||
227 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
228 | |||
229 | /* Save, Step 12: | ||
230 | * Read MFC_CNTL[Ds]. Update saved copy of | ||
231 | * CSA.MFC_CNTL[Ds]. | ||
232 | */ | ||
233 | if (in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING) { | ||
234 | csa->priv2.mfc_control_RW |= MFC_CNTL_DECREMENTER_RUNNING; | ||
235 | csa->suspend_time = get_cycles(); | ||
236 | out_be64(&priv2->spu_chnlcntptr_RW, 7ULL); | ||
237 | eieio(); | ||
238 | csa->spu_chnldata_RW[7] = in_be64(&priv2->spu_chnldata_RW); | ||
239 | eieio(); | ||
240 | } | ||
241 | } | ||
242 | |||
243 | static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu) | ||
244 | { | ||
245 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
246 | |||
247 | /* Save, Step 13: | ||
248 | * Write MFC_CNTL[Dh] set to a '1' to halt | ||
249 | * the decrementer. | ||
250 | */ | ||
251 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_DECREMENTER_HALTED); | ||
252 | eieio(); | ||
253 | } | ||
254 | |||
255 | static inline void save_timebase(struct spu_state *csa, struct spu *spu) | ||
256 | { | ||
257 | /* Save, Step 14: | ||
258 | * Read PPE Timebase High and Timebase low registers | ||
259 | * and save in CSA. TBD. | ||
260 | */ | ||
261 | csa->suspend_time = get_cycles(); | ||
262 | } | ||
263 | |||
264 | static inline void remove_other_spu_access(struct spu_state *csa, | ||
265 | struct spu *spu) | ||
266 | { | ||
267 | /* Save, Step 15: | ||
268 | * Remove other SPU access to this SPU by unmapping | ||
269 | * this SPU's pages from their address space. TBD. | ||
270 | */ | ||
271 | } | ||
272 | |||
273 | static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu) | ||
274 | { | ||
275 | struct spu_problem __iomem *prob = spu->problem; | ||
276 | |||
277 | /* Save, Step 16: | ||
278 | * Restore, Step 11. | ||
279 | * Write SPU_MSSync register. Poll SPU_MSSync[P] | ||
280 | * for a value of 0. | ||
281 | */ | ||
282 | out_be64(&prob->spc_mssync_RW, 1UL); | ||
283 | POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING); | ||
284 | } | ||
285 | |||
286 | static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu) | ||
287 | { | ||
288 | /* Save, Step 17: | ||
289 | * Restore, Step 12. | ||
290 | * Restore, Step 48. | ||
291 | * Write TLB_Invalidate_Entry[IS,VPN,L,Lp]=0 register. | ||
292 | * Then issue a PPE sync instruction. | ||
293 | */ | ||
294 | spu_tlb_invalidate(spu); | ||
295 | mb(); | ||
296 | } | ||
297 | |||
298 | static inline void handle_pending_interrupts(struct spu_state *csa, | ||
299 | struct spu *spu) | ||
300 | { | ||
301 | /* Save, Step 18: | ||
302 | * Handle any pending interrupts from this SPU | ||
303 | * here. This is OS or hypervisor specific. One | ||
304 | * option is to re-enable interrupts to handle any | ||
305 | * pending interrupts, with the interrupt handlers | ||
306 | * recognizing the software Context Switch Pending | ||
307 | * flag, to ensure the SPU execution or MFC command | ||
308 | * queue is not restarted. TBD. | ||
309 | */ | ||
310 | } | ||
311 | |||
312 | static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu) | ||
313 | { | ||
314 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
315 | int i; | ||
316 | |||
317 | /* Save, Step 19: | ||
318 | * If MFC_Cntl[Se]=0 then save | ||
319 | * MFC command queues. | ||
320 | */ | ||
321 | if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) { | ||
322 | for (i = 0; i < 8; i++) { | ||
323 | csa->priv2.puq[i].mfc_cq_data0_RW = | ||
324 | in_be64(&priv2->puq[i].mfc_cq_data0_RW); | ||
325 | csa->priv2.puq[i].mfc_cq_data1_RW = | ||
326 | in_be64(&priv2->puq[i].mfc_cq_data1_RW); | ||
327 | csa->priv2.puq[i].mfc_cq_data2_RW = | ||
328 | in_be64(&priv2->puq[i].mfc_cq_data2_RW); | ||
329 | csa->priv2.puq[i].mfc_cq_data3_RW = | ||
330 | in_be64(&priv2->puq[i].mfc_cq_data3_RW); | ||
331 | } | ||
332 | for (i = 0; i < 16; i++) { | ||
333 | csa->priv2.spuq[i].mfc_cq_data0_RW = | ||
334 | in_be64(&priv2->spuq[i].mfc_cq_data0_RW); | ||
335 | csa->priv2.spuq[i].mfc_cq_data1_RW = | ||
336 | in_be64(&priv2->spuq[i].mfc_cq_data1_RW); | ||
337 | csa->priv2.spuq[i].mfc_cq_data2_RW = | ||
338 | in_be64(&priv2->spuq[i].mfc_cq_data2_RW); | ||
339 | csa->priv2.spuq[i].mfc_cq_data3_RW = | ||
340 | in_be64(&priv2->spuq[i].mfc_cq_data3_RW); | ||
341 | } | ||
342 | } | ||
343 | } | ||
344 | |||
345 | static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu) | ||
346 | { | ||
347 | struct spu_problem __iomem *prob = spu->problem; | ||
348 | |||
349 | /* Save, Step 20: | ||
350 | * Save the PPU_QueryMask register | ||
351 | * in the CSA. | ||
352 | */ | ||
353 | csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW); | ||
354 | } | ||
355 | |||
356 | static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu) | ||
357 | { | ||
358 | struct spu_problem __iomem *prob = spu->problem; | ||
359 | |||
360 | /* Save, Step 21: | ||
361 | * Save the PPU_QueryType register | ||
362 | * in the CSA. | ||
363 | */ | ||
364 | csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW); | ||
365 | } | ||
366 | |||
367 | static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) | ||
368 | { | ||
369 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
370 | |||
371 | /* Save, Step 22: | ||
372 | * Save the MFC_CSR_TSQ register | ||
373 | * in the LSCSA. | ||
374 | */ | ||
375 | csa->priv2.spu_tag_status_query_RW = | ||
376 | in_be64(&priv2->spu_tag_status_query_RW); | ||
377 | } | ||
378 | |||
379 | static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) | ||
380 | { | ||
381 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
382 | |||
383 | /* Save, Step 23: | ||
384 | * Save the MFC_CSR_CMD1 and MFC_CSR_CMD2 | ||
385 | * registers in the CSA. | ||
386 | */ | ||
387 | csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW); | ||
388 | csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW); | ||
389 | } | ||
390 | |||
391 | static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu) | ||
392 | { | ||
393 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
394 | |||
395 | /* Save, Step 24: | ||
396 | * Save the MFC_CSR_ATO register in | ||
397 | * the CSA. | ||
398 | */ | ||
399 | csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW); | ||
400 | } | ||
401 | |||
402 | static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu) | ||
403 | { | ||
404 | /* Save, Step 25: | ||
405 | * Save the MFC_TCLASS_ID register in | ||
406 | * the CSA. | ||
407 | */ | ||
408 | csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu); | ||
409 | } | ||
410 | |||
411 | static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu) | ||
412 | { | ||
413 | /* Save, Step 26: | ||
414 | * Restore, Step 23. | ||
415 | * Write the MFC_TCLASS_ID register with | ||
416 | * the value 0x10000000. | ||
417 | */ | ||
418 | spu_mfc_tclass_id_set(spu, 0x10000000); | ||
419 | eieio(); | ||
420 | } | ||
421 | |||
422 | static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu) | ||
423 | { | ||
424 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
425 | |||
426 | /* Save, Step 27: | ||
427 | * Restore, Step 14. | ||
428 | * Write MFC_CNTL[Pc]=1 (purge queue). | ||
429 | */ | ||
430 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST); | ||
431 | eieio(); | ||
432 | } | ||
433 | |||
434 | static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu) | ||
435 | { | ||
436 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
437 | |||
438 | /* Save, Step 28: | ||
439 | * Poll MFC_CNTL[Ps] until value '11' is read | ||
440 | * (purge complete). | ||
441 | */ | ||
442 | POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) & | ||
443 | MFC_CNTL_PURGE_DMA_COMPLETE); | ||
444 | } | ||
445 | |||
446 | static inline void save_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
447 | { | ||
448 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
449 | int i; | ||
450 | |||
451 | /* Save, Step 29: | ||
452 | * If MFC_SR1[R]='1', save SLBs in CSA. | ||
453 | */ | ||
454 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) { | ||
455 | csa->priv2.slb_index_W = in_be64(&priv2->slb_index_W); | ||
456 | for (i = 0; i < 8; i++) { | ||
457 | out_be64(&priv2->slb_index_W, i); | ||
458 | eieio(); | ||
459 | csa->slb_esid_RW[i] = in_be64(&priv2->slb_esid_RW); | ||
460 | csa->slb_vsid_RW[i] = in_be64(&priv2->slb_vsid_RW); | ||
461 | eieio(); | ||
462 | } | ||
463 | } | ||
464 | } | ||
465 | |||
466 | static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu) | ||
467 | { | ||
468 | /* Save, Step 30: | ||
469 | * Restore, Step 18: | ||
470 | * Write MFC_SR1 with MFC_SR1[D=0,S=1] and | ||
471 | * MFC_SR1[TL,R,Pr,T] set correctly for the | ||
472 | * OS specific environment. | ||
473 | * | ||
474 | * Implementation note: The SPU-side code | ||
475 | * for save/restore is privileged, so the | ||
476 | * MFC_SR1[Pr] bit is not set. | ||
477 | * | ||
478 | */ | ||
479 | spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK | | ||
480 | MFC_STATE1_RELOCATE_MASK | | ||
481 | MFC_STATE1_BUS_TLBIE_MASK)); | ||
482 | } | ||
483 | |||
484 | static inline void save_spu_npc(struct spu_state *csa, struct spu *spu) | ||
485 | { | ||
486 | struct spu_problem __iomem *prob = spu->problem; | ||
487 | |||
488 | /* Save, Step 31: | ||
489 | * Save SPU_NPC in the CSA. | ||
490 | */ | ||
491 | csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW); | ||
492 | } | ||
493 | |||
494 | static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu) | ||
495 | { | ||
496 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
497 | |||
498 | /* Save, Step 32: | ||
499 | * Save SPU_PrivCntl in the CSA. | ||
500 | */ | ||
501 | csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW); | ||
502 | } | ||
503 | |||
504 | static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu) | ||
505 | { | ||
506 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
507 | |||
508 | /* Save, Step 33: | ||
509 | * Restore, Step 16: | ||
510 | * Write SPU_PrivCntl[S,Le,A] fields reset to 0. | ||
511 | */ | ||
512 | out_be64(&priv2->spu_privcntl_RW, 0UL); | ||
513 | eieio(); | ||
514 | } | ||
515 | |||
516 | static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu) | ||
517 | { | ||
518 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
519 | |||
520 | /* Save, Step 34: | ||
521 | * Save SPU_LSLR in the CSA. | ||
522 | */ | ||
523 | csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW); | ||
524 | } | ||
525 | |||
526 | static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu) | ||
527 | { | ||
528 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
529 | |||
530 | /* Save, Step 35: | ||
531 | * Restore, Step 17. | ||
532 | * Reset SPU_LSLR. | ||
533 | */ | ||
534 | out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK); | ||
535 | eieio(); | ||
536 | } | ||
537 | |||
538 | static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu) | ||
539 | { | ||
540 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
541 | |||
542 | /* Save, Step 36: | ||
543 | * Save SPU_Cfg in the CSA. | ||
544 | */ | ||
545 | csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW); | ||
546 | } | ||
547 | |||
548 | static inline void save_pm_trace(struct spu_state *csa, struct spu *spu) | ||
549 | { | ||
550 | /* Save, Step 37: | ||
551 | * Save PM_Trace_Tag_Wait_Mask in the CSA. | ||
552 | * Not performed by this implementation. | ||
553 | */ | ||
554 | } | ||
555 | |||
556 | static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu) | ||
557 | { | ||
558 | /* Save, Step 38: | ||
559 | * Save RA_GROUP_ID register and the | ||
560 | * RA_ENABLE reigster in the CSA. | ||
561 | */ | ||
562 | csa->priv1.resource_allocation_groupID_RW = | ||
563 | spu_resource_allocation_groupID_get(spu); | ||
564 | csa->priv1.resource_allocation_enable_RW = | ||
565 | spu_resource_allocation_enable_get(spu); | ||
566 | } | ||
567 | |||
568 | static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu) | ||
569 | { | ||
570 | struct spu_problem __iomem *prob = spu->problem; | ||
571 | |||
572 | /* Save, Step 39: | ||
573 | * Save MB_Stat register in the CSA. | ||
574 | */ | ||
575 | csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R); | ||
576 | } | ||
577 | |||
578 | static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu) | ||
579 | { | ||
580 | struct spu_problem __iomem *prob = spu->problem; | ||
581 | |||
582 | /* Save, Step 40: | ||
583 | * Save the PPU_MB register in the CSA. | ||
584 | */ | ||
585 | csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R); | ||
586 | } | ||
587 | |||
588 | static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu) | ||
589 | { | ||
590 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
591 | |||
592 | /* Save, Step 41: | ||
593 | * Save the PPUINT_MB register in the CSA. | ||
594 | */ | ||
595 | csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R); | ||
596 | } | ||
597 | |||
598 | static inline void save_ch_part1(struct spu_state *csa, struct spu *spu) | ||
599 | { | ||
600 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
601 | u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL }; | ||
602 | int i; | ||
603 | |||
604 | /* Save, Step 42: | ||
605 | * Save the following CH: [0,1,3,4,24,25,27] | ||
606 | */ | ||
607 | for (i = 0; i < 7; i++) { | ||
608 | idx = ch_indices[i]; | ||
609 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
610 | eieio(); | ||
611 | csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW); | ||
612 | csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW); | ||
613 | out_be64(&priv2->spu_chnldata_RW, 0UL); | ||
614 | out_be64(&priv2->spu_chnlcnt_RW, 0UL); | ||
615 | eieio(); | ||
616 | } | ||
617 | } | ||
618 | |||
619 | static inline void save_spu_mb(struct spu_state *csa, struct spu *spu) | ||
620 | { | ||
621 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
622 | int i; | ||
623 | |||
624 | /* Save, Step 43: | ||
625 | * Save SPU Read Mailbox Channel. | ||
626 | */ | ||
627 | out_be64(&priv2->spu_chnlcntptr_RW, 29UL); | ||
628 | eieio(); | ||
629 | csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW); | ||
630 | for (i = 0; i < 4; i++) { | ||
631 | csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW); | ||
632 | } | ||
633 | out_be64(&priv2->spu_chnlcnt_RW, 0UL); | ||
634 | eieio(); | ||
635 | } | ||
636 | |||
637 | static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu) | ||
638 | { | ||
639 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
640 | |||
641 | /* Save, Step 44: | ||
642 | * Save MFC_CMD Channel. | ||
643 | */ | ||
644 | out_be64(&priv2->spu_chnlcntptr_RW, 21UL); | ||
645 | eieio(); | ||
646 | csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW); | ||
647 | eieio(); | ||
648 | } | ||
649 | |||
650 | static inline void reset_ch(struct spu_state *csa, struct spu *spu) | ||
651 | { | ||
652 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
653 | u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL }; | ||
654 | u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL }; | ||
655 | u64 idx; | ||
656 | int i; | ||
657 | |||
658 | /* Save, Step 45: | ||
659 | * Reset the following CH: [21, 23, 28, 30] | ||
660 | */ | ||
661 | for (i = 0; i < 4; i++) { | ||
662 | idx = ch_indices[i]; | ||
663 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
664 | eieio(); | ||
665 | out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); | ||
666 | eieio(); | ||
667 | } | ||
668 | } | ||
669 | |||
670 | static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu) | ||
671 | { | ||
672 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
673 | |||
674 | /* Save, Step 46: | ||
675 | * Restore, Step 25. | ||
676 | * Write MFC_CNTL[Sc]=0 (resume queue processing). | ||
677 | */ | ||
678 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE); | ||
679 | } | ||
680 | |||
681 | static inline void invalidate_slbs(struct spu_state *csa, struct spu *spu) | ||
682 | { | ||
683 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
684 | |||
685 | /* Save, Step 45: | ||
686 | * Restore, Step 19: | ||
687 | * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All. | ||
688 | */ | ||
689 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) { | ||
690 | out_be64(&priv2->slb_invalidate_all_W, 0UL); | ||
691 | eieio(); | ||
692 | } | ||
693 | } | ||
694 | |||
695 | static inline void get_kernel_slb(u64 ea, u64 slb[2]) | ||
696 | { | ||
697 | slb[0] = (get_kernel_vsid(ea) << SLB_VSID_SHIFT) | SLB_VSID_KERNEL; | ||
698 | slb[1] = (ea & ESID_MASK) | SLB_ESID_V; | ||
699 | |||
700 | /* Large pages are used for kernel text/data, but not vmalloc. */ | ||
701 | if (cpu_has_feature(CPU_FTR_16M_PAGE) | ||
702 | && REGION_ID(ea) == KERNEL_REGION_ID) | ||
703 | slb[0] |= SLB_VSID_L; | ||
704 | } | ||
705 | |||
706 | static inline void load_mfc_slb(struct spu *spu, u64 slb[2], int slbe) | ||
707 | { | ||
708 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
709 | |||
710 | out_be64(&priv2->slb_index_W, slbe); | ||
711 | eieio(); | ||
712 | out_be64(&priv2->slb_vsid_RW, slb[0]); | ||
713 | out_be64(&priv2->slb_esid_RW, slb[1]); | ||
714 | eieio(); | ||
715 | } | ||
716 | |||
717 | static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
718 | { | ||
719 | u64 code_slb[2]; | ||
720 | u64 lscsa_slb[2]; | ||
721 | |||
722 | /* Save, Step 47: | ||
723 | * Restore, Step 30. | ||
724 | * If MFC_SR1[R]=1, write 0 to SLB_Invalidate_All | ||
725 | * register, then initialize SLB_VSID and SLB_ESID | ||
726 | * to provide access to SPU context save code and | ||
727 | * LSCSA. | ||
728 | * | ||
729 | * This implementation places both the context | ||
730 | * switch code and LSCSA in kernel address space. | ||
731 | * | ||
732 | * Further this implementation assumes that the | ||
733 | * MFC_SR1[R]=1 (in other words, assume that | ||
734 | * translation is desired by OS environment). | ||
735 | */ | ||
736 | invalidate_slbs(csa, spu); | ||
737 | get_kernel_slb((unsigned long)&spu_save_code[0], code_slb); | ||
738 | get_kernel_slb((unsigned long)csa->lscsa, lscsa_slb); | ||
739 | load_mfc_slb(spu, code_slb, 0); | ||
740 | if ((lscsa_slb[0] != code_slb[0]) || (lscsa_slb[1] != code_slb[1])) | ||
741 | load_mfc_slb(spu, lscsa_slb, 1); | ||
742 | } | ||
743 | |||
744 | static inline void set_switch_active(struct spu_state *csa, struct spu *spu) | ||
745 | { | ||
746 | /* Save, Step 48: | ||
747 | * Restore, Step 23. | ||
748 | * Change the software context switch pending flag | ||
749 | * to context switch active. | ||
750 | */ | ||
751 | set_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags); | ||
752 | clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags); | ||
753 | mb(); | ||
754 | } | ||
755 | |||
756 | static inline void enable_interrupts(struct spu_state *csa, struct spu *spu) | ||
757 | { | ||
758 | unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR | | ||
759 | CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
760 | |||
761 | /* Save, Step 49: | ||
762 | * Restore, Step 22: | ||
763 | * Reset and then enable interrupts, as | ||
764 | * needed by OS. | ||
765 | * | ||
766 | * This implementation enables only class1 | ||
767 | * (translation) interrupts. | ||
768 | */ | ||
769 | spin_lock_irq(&spu->register_lock); | ||
770 | spu_int_stat_clear(spu, 0, ~0ul); | ||
771 | spu_int_stat_clear(spu, 1, ~0ul); | ||
772 | spu_int_stat_clear(spu, 2, ~0ul); | ||
773 | spu_int_mask_set(spu, 0, 0ul); | ||
774 | spu_int_mask_set(spu, 1, class1_mask); | ||
775 | spu_int_mask_set(spu, 2, 0ul); | ||
776 | spin_unlock_irq(&spu->register_lock); | ||
777 | } | ||
778 | |||
779 | static inline int send_mfc_dma(struct spu *spu, unsigned long ea, | ||
780 | unsigned int ls_offset, unsigned int size, | ||
781 | unsigned int tag, unsigned int rclass, | ||
782 | unsigned int cmd) | ||
783 | { | ||
784 | struct spu_problem __iomem *prob = spu->problem; | ||
785 | union mfc_tag_size_class_cmd command; | ||
786 | unsigned int transfer_size; | ||
787 | volatile unsigned int status = 0x0; | ||
788 | |||
789 | while (size > 0) { | ||
790 | transfer_size = | ||
791 | (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size; | ||
792 | command.u.mfc_size = transfer_size; | ||
793 | command.u.mfc_tag = tag; | ||
794 | command.u.mfc_rclassid = rclass; | ||
795 | command.u.mfc_cmd = cmd; | ||
796 | do { | ||
797 | out_be32(&prob->mfc_lsa_W, ls_offset); | ||
798 | out_be64(&prob->mfc_ea_W, ea); | ||
799 | out_be64(&prob->mfc_union_W.all64, command.all64); | ||
800 | status = | ||
801 | in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32); | ||
802 | if (unlikely(status & 0x2)) { | ||
803 | cpu_relax(); | ||
804 | } | ||
805 | } while (status & 0x3); | ||
806 | size -= transfer_size; | ||
807 | ea += transfer_size; | ||
808 | ls_offset += transfer_size; | ||
809 | } | ||
810 | return 0; | ||
811 | } | ||
812 | |||
813 | static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu) | ||
814 | { | ||
815 | unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; | ||
816 | unsigned int ls_offset = 0x0; | ||
817 | unsigned int size = 16384; | ||
818 | unsigned int tag = 0; | ||
819 | unsigned int rclass = 0; | ||
820 | unsigned int cmd = MFC_PUT_CMD; | ||
821 | |||
822 | /* Save, Step 50: | ||
823 | * Issue a DMA command to copy the first 16K bytes | ||
824 | * of local storage to the CSA. | ||
825 | */ | ||
826 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
827 | } | ||
828 | |||
829 | static inline void set_spu_npc(struct spu_state *csa, struct spu *spu) | ||
830 | { | ||
831 | struct spu_problem __iomem *prob = spu->problem; | ||
832 | |||
833 | /* Save, Step 51: | ||
834 | * Restore, Step 31. | ||
835 | * Write SPU_NPC[IE]=0 and SPU_NPC[LSA] to entry | ||
836 | * point address of context save code in local | ||
837 | * storage. | ||
838 | * | ||
839 | * This implementation uses SPU-side save/restore | ||
840 | * programs with entry points at LSA of 0. | ||
841 | */ | ||
842 | out_be32(&prob->spu_npc_RW, 0); | ||
843 | eieio(); | ||
844 | } | ||
845 | |||
846 | static inline void set_signot1(struct spu_state *csa, struct spu *spu) | ||
847 | { | ||
848 | struct spu_problem __iomem *prob = spu->problem; | ||
849 | union { | ||
850 | u64 ull; | ||
851 | u32 ui[2]; | ||
852 | } addr64; | ||
853 | |||
854 | /* Save, Step 52: | ||
855 | * Restore, Step 32: | ||
856 | * Write SPU_Sig_Notify_1 register with upper 32-bits | ||
857 | * of the CSA.LSCSA effective address. | ||
858 | */ | ||
859 | addr64.ull = (u64) csa->lscsa; | ||
860 | out_be32(&prob->signal_notify1, addr64.ui[0]); | ||
861 | eieio(); | ||
862 | } | ||
863 | |||
864 | static inline void set_signot2(struct spu_state *csa, struct spu *spu) | ||
865 | { | ||
866 | struct spu_problem __iomem *prob = spu->problem; | ||
867 | union { | ||
868 | u64 ull; | ||
869 | u32 ui[2]; | ||
870 | } addr64; | ||
871 | |||
872 | /* Save, Step 53: | ||
873 | * Restore, Step 33: | ||
874 | * Write SPU_Sig_Notify_2 register with lower 32-bits | ||
875 | * of the CSA.LSCSA effective address. | ||
876 | */ | ||
877 | addr64.ull = (u64) csa->lscsa; | ||
878 | out_be32(&prob->signal_notify2, addr64.ui[1]); | ||
879 | eieio(); | ||
880 | } | ||
881 | |||
882 | static inline void send_save_code(struct spu_state *csa, struct spu *spu) | ||
883 | { | ||
884 | unsigned long addr = (unsigned long)&spu_save_code[0]; | ||
885 | unsigned int ls_offset = 0x0; | ||
886 | unsigned int size = sizeof(spu_save_code); | ||
887 | unsigned int tag = 0; | ||
888 | unsigned int rclass = 0; | ||
889 | unsigned int cmd = MFC_GETFS_CMD; | ||
890 | |||
891 | /* Save, Step 54: | ||
892 | * Issue a DMA command to copy context save code | ||
893 | * to local storage and start SPU. | ||
894 | */ | ||
895 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
896 | } | ||
897 | |||
898 | static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu) | ||
899 | { | ||
900 | struct spu_problem __iomem *prob = spu->problem; | ||
901 | |||
902 | /* Save, Step 55: | ||
903 | * Restore, Step 38. | ||
904 | * Write PPU_QueryMask=1 (enable Tag Group 0) | ||
905 | * and issue eieio instruction. | ||
906 | */ | ||
907 | out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0)); | ||
908 | eieio(); | ||
909 | } | ||
910 | |||
911 | static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu) | ||
912 | { | ||
913 | struct spu_problem __iomem *prob = spu->problem; | ||
914 | u32 mask = MFC_TAGID_TO_TAGMASK(0); | ||
915 | unsigned long flags; | ||
916 | |||
917 | /* Save, Step 56: | ||
918 | * Restore, Step 39. | ||
919 | * Restore, Step 39. | ||
920 | * Restore, Step 46. | ||
921 | * Poll PPU_TagStatus[gn] until 01 (Tag group 0 complete) | ||
922 | * or write PPU_QueryType[TS]=01 and wait for Tag Group | ||
923 | * Complete Interrupt. Write INT_Stat_Class0 or | ||
924 | * INT_Stat_Class2 with value of 'handled'. | ||
925 | */ | ||
926 | POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask); | ||
927 | |||
928 | local_irq_save(flags); | ||
929 | spu_int_stat_clear(spu, 0, ~(0ul)); | ||
930 | spu_int_stat_clear(spu, 2, ~(0ul)); | ||
931 | local_irq_restore(flags); | ||
932 | } | ||
933 | |||
934 | static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu) | ||
935 | { | ||
936 | struct spu_problem __iomem *prob = spu->problem; | ||
937 | unsigned long flags; | ||
938 | |||
939 | /* Save, Step 57: | ||
940 | * Restore, Step 40. | ||
941 | * Poll until SPU_Status[R]=0 or wait for SPU Class 0 | ||
942 | * or SPU Class 2 interrupt. Write INT_Stat_class0 | ||
943 | * or INT_Stat_class2 with value of handled. | ||
944 | */ | ||
945 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING); | ||
946 | |||
947 | local_irq_save(flags); | ||
948 | spu_int_stat_clear(spu, 0, ~(0ul)); | ||
949 | spu_int_stat_clear(spu, 2, ~(0ul)); | ||
950 | local_irq_restore(flags); | ||
951 | } | ||
952 | |||
953 | static inline int check_save_status(struct spu_state *csa, struct spu *spu) | ||
954 | { | ||
955 | struct spu_problem __iomem *prob = spu->problem; | ||
956 | u32 complete; | ||
957 | |||
958 | /* Save, Step 54: | ||
959 | * If SPU_Status[P]=1 and SPU_Status[SC] = "success", | ||
960 | * context save succeeded, otherwise context save | ||
961 | * failed. | ||
962 | */ | ||
963 | complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) | | ||
964 | SPU_STATUS_STOPPED_BY_STOP); | ||
965 | return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; | ||
966 | } | ||
967 | |||
968 | static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu) | ||
969 | { | ||
970 | /* Restore, Step 4: | ||
971 | * If required, notify the "using application" that | ||
972 | * the SPU task has been terminated. TBD. | ||
973 | */ | ||
974 | } | ||
975 | |||
976 | static inline void suspend_mfc(struct spu_state *csa, struct spu *spu) | ||
977 | { | ||
978 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
979 | |||
980 | /* Restore, Step 7: | ||
981 | * Restore, Step 47. | ||
982 | * Write MFC_Cntl[Dh,Sc]='1','1' to suspend | ||
983 | * the queue and halt the decrementer. | ||
984 | */ | ||
985 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE | | ||
986 | MFC_CNTL_DECREMENTER_HALTED); | ||
987 | eieio(); | ||
988 | } | ||
989 | |||
990 | static inline void wait_suspend_mfc_complete(struct spu_state *csa, | ||
991 | struct spu *spu) | ||
992 | { | ||
993 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
994 | |||
995 | /* Restore, Step 8: | ||
996 | * Restore, Step 47. | ||
997 | * Poll MFC_CNTL[Ss] until 11 is returned. | ||
998 | */ | ||
999 | POLL_WHILE_FALSE(in_be64(&priv2->mfc_control_RW) & | ||
1000 | MFC_CNTL_SUSPEND_COMPLETE); | ||
1001 | } | ||
1002 | |||
1003 | static inline int suspend_spe(struct spu_state *csa, struct spu *spu) | ||
1004 | { | ||
1005 | struct spu_problem __iomem *prob = spu->problem; | ||
1006 | |||
1007 | /* Restore, Step 9: | ||
1008 | * If SPU_Status[R]=1, stop SPU execution | ||
1009 | * and wait for stop to complete. | ||
1010 | * | ||
1011 | * Returns 1 if SPU_Status[R]=1 on entry. | ||
1012 | * 0 otherwise | ||
1013 | */ | ||
1014 | if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) { | ||
1015 | if (in_be32(&prob->spu_status_R) & | ||
1016 | SPU_STATUS_ISOLATED_EXIT_STAUTUS) { | ||
1017 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1018 | SPU_STATUS_RUNNING); | ||
1019 | } | ||
1020 | if ((in_be32(&prob->spu_status_R) & | ||
1021 | SPU_STATUS_ISOLATED_LOAD_STAUTUS) | ||
1022 | || (in_be32(&prob->spu_status_R) & | ||
1023 | SPU_STATUS_ISOLATED_STATE)) { | ||
1024 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
1025 | eieio(); | ||
1026 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1027 | SPU_STATUS_RUNNING); | ||
1028 | out_be32(&prob->spu_runcntl_RW, 0x2); | ||
1029 | eieio(); | ||
1030 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1031 | SPU_STATUS_RUNNING); | ||
1032 | } | ||
1033 | if (in_be32(&prob->spu_status_R) & | ||
1034 | SPU_STATUS_WAITING_FOR_CHANNEL) { | ||
1035 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
1036 | eieio(); | ||
1037 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1038 | SPU_STATUS_RUNNING); | ||
1039 | } | ||
1040 | return 1; | ||
1041 | } | ||
1042 | return 0; | ||
1043 | } | ||
1044 | |||
1045 | static inline void clear_spu_status(struct spu_state *csa, struct spu *spu) | ||
1046 | { | ||
1047 | struct spu_problem __iomem *prob = spu->problem; | ||
1048 | |||
1049 | /* Restore, Step 10: | ||
1050 | * If SPU_Status[R]=0 and SPU_Status[E,L,IS]=1, | ||
1051 | * release SPU from isolate state. | ||
1052 | */ | ||
1053 | if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) { | ||
1054 | if (in_be32(&prob->spu_status_R) & | ||
1055 | SPU_STATUS_ISOLATED_EXIT_STAUTUS) { | ||
1056 | spu_mfc_sr1_set(spu, | ||
1057 | MFC_STATE1_MASTER_RUN_CONTROL_MASK); | ||
1058 | eieio(); | ||
1059 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1060 | eieio(); | ||
1061 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1062 | SPU_STATUS_RUNNING); | ||
1063 | } | ||
1064 | if ((in_be32(&prob->spu_status_R) & | ||
1065 | SPU_STATUS_ISOLATED_LOAD_STAUTUS) | ||
1066 | || (in_be32(&prob->spu_status_R) & | ||
1067 | SPU_STATUS_ISOLATED_STATE)) { | ||
1068 | spu_mfc_sr1_set(spu, | ||
1069 | MFC_STATE1_MASTER_RUN_CONTROL_MASK); | ||
1070 | eieio(); | ||
1071 | out_be32(&prob->spu_runcntl_RW, 0x2); | ||
1072 | eieio(); | ||
1073 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1074 | SPU_STATUS_RUNNING); | ||
1075 | } | ||
1076 | } | ||
1077 | } | ||
1078 | |||
1079 | static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu) | ||
1080 | { | ||
1081 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1082 | u64 ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL }; | ||
1083 | u64 idx; | ||
1084 | int i; | ||
1085 | |||
1086 | /* Restore, Step 20: | ||
1087 | * Reset the following CH: [0,1,3,4,24,25,27] | ||
1088 | */ | ||
1089 | for (i = 0; i < 7; i++) { | ||
1090 | idx = ch_indices[i]; | ||
1091 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1092 | eieio(); | ||
1093 | out_be64(&priv2->spu_chnldata_RW, 0UL); | ||
1094 | out_be64(&priv2->spu_chnlcnt_RW, 0UL); | ||
1095 | eieio(); | ||
1096 | } | ||
1097 | } | ||
1098 | |||
1099 | static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu) | ||
1100 | { | ||
1101 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1102 | u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL }; | ||
1103 | u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL }; | ||
1104 | u64 idx; | ||
1105 | int i; | ||
1106 | |||
1107 | /* Restore, Step 21: | ||
1108 | * Reset the following CH: [21, 23, 28, 29, 30] | ||
1109 | */ | ||
1110 | for (i = 0; i < 5; i++) { | ||
1111 | idx = ch_indices[i]; | ||
1112 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1113 | eieio(); | ||
1114 | out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); | ||
1115 | eieio(); | ||
1116 | } | ||
1117 | } | ||
1118 | |||
1119 | static inline void setup_spu_status_part1(struct spu_state *csa, | ||
1120 | struct spu *spu) | ||
1121 | { | ||
1122 | u32 status_P = SPU_STATUS_STOPPED_BY_STOP; | ||
1123 | u32 status_I = SPU_STATUS_INVALID_INSTR; | ||
1124 | u32 status_H = SPU_STATUS_STOPPED_BY_HALT; | ||
1125 | u32 status_S = SPU_STATUS_SINGLE_STEP; | ||
1126 | u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR; | ||
1127 | u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP; | ||
1128 | u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP; | ||
1129 | u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR; | ||
1130 | u32 status_code; | ||
1131 | |||
1132 | /* Restore, Step 27: | ||
1133 | * If the CSA.SPU_Status[I,S,H,P]=1 then add the correct | ||
1134 | * instruction sequence to the end of the SPU based restore | ||
1135 | * code (after the "context restored" stop and signal) to | ||
1136 | * restore the correct SPU status. | ||
1137 | * | ||
1138 | * NOTE: Rather than modifying the SPU executable, we | ||
1139 | * instead add a new 'stopped_status' field to the | ||
1140 | * LSCSA. The SPU-side restore reads this field and | ||
1141 | * takes the appropriate action when exiting. | ||
1142 | */ | ||
1143 | |||
1144 | status_code = | ||
1145 | (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF; | ||
1146 | if ((csa->prob.spu_status_R & status_P_I) == status_P_I) { | ||
1147 | |||
1148 | /* SPU_Status[P,I]=1 - Illegal Instruction followed | ||
1149 | * by Stop and Signal instruction, followed by 'br -4'. | ||
1150 | * | ||
1151 | */ | ||
1152 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I; | ||
1153 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1154 | |||
1155 | } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) { | ||
1156 | |||
1157 | /* SPU_Status[P,H]=1 - Halt Conditional, followed | ||
1158 | * by Stop and Signal instruction, followed by | ||
1159 | * 'br -4'. | ||
1160 | */ | ||
1161 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H; | ||
1162 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1163 | |||
1164 | } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) { | ||
1165 | |||
1166 | /* SPU_Status[S,P]=1 - Stop and Signal instruction | ||
1167 | * followed by 'br -4'. | ||
1168 | */ | ||
1169 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P; | ||
1170 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1171 | |||
1172 | } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) { | ||
1173 | |||
1174 | /* SPU_Status[S,I]=1 - Illegal instruction followed | ||
1175 | * by 'br -4'. | ||
1176 | */ | ||
1177 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I; | ||
1178 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1179 | |||
1180 | } else if ((csa->prob.spu_status_R & status_P) == status_P) { | ||
1181 | |||
1182 | /* SPU_Status[P]=1 - Stop and Signal instruction | ||
1183 | * followed by 'br -4'. | ||
1184 | */ | ||
1185 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P; | ||
1186 | csa->lscsa->stopped_status.slot[1] = status_code; | ||
1187 | |||
1188 | } else if ((csa->prob.spu_status_R & status_H) == status_H) { | ||
1189 | |||
1190 | /* SPU_Status[H]=1 - Halt Conditional, followed | ||
1191 | * by 'br -4'. | ||
1192 | */ | ||
1193 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H; | ||
1194 | |||
1195 | } else if ((csa->prob.spu_status_R & status_S) == status_S) { | ||
1196 | |||
1197 | /* SPU_Status[S]=1 - Two nop instructions. | ||
1198 | */ | ||
1199 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S; | ||
1200 | |||
1201 | } else if ((csa->prob.spu_status_R & status_I) == status_I) { | ||
1202 | |||
1203 | /* SPU_Status[I]=1 - Illegal instruction followed | ||
1204 | * by 'br -4'. | ||
1205 | */ | ||
1206 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I; | ||
1207 | |||
1208 | } | ||
1209 | } | ||
1210 | |||
1211 | static inline void setup_spu_status_part2(struct spu_state *csa, | ||
1212 | struct spu *spu) | ||
1213 | { | ||
1214 | u32 mask; | ||
1215 | |||
1216 | /* Restore, Step 28: | ||
1217 | * If the CSA.SPU_Status[I,S,H,P,R]=0 then | ||
1218 | * add a 'br *' instruction to the end of | ||
1219 | * the SPU based restore code. | ||
1220 | * | ||
1221 | * NOTE: Rather than modifying the SPU executable, we | ||
1222 | * instead add a new 'stopped_status' field to the | ||
1223 | * LSCSA. The SPU-side restore reads this field and | ||
1224 | * takes the appropriate action when exiting. | ||
1225 | */ | ||
1226 | mask = SPU_STATUS_INVALID_INSTR | | ||
1227 | SPU_STATUS_SINGLE_STEP | | ||
1228 | SPU_STATUS_STOPPED_BY_HALT | | ||
1229 | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; | ||
1230 | if (!(csa->prob.spu_status_R & mask)) { | ||
1231 | csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R; | ||
1232 | } | ||
1233 | } | ||
1234 | |||
1235 | static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu) | ||
1236 | { | ||
1237 | /* Restore, Step 29: | ||
1238 | * Restore RA_GROUP_ID register and the | ||
1239 | * RA_ENABLE reigster from the CSA. | ||
1240 | */ | ||
1241 | spu_resource_allocation_groupID_set(spu, | ||
1242 | csa->priv1.resource_allocation_groupID_RW); | ||
1243 | spu_resource_allocation_enable_set(spu, | ||
1244 | csa->priv1.resource_allocation_enable_RW); | ||
1245 | } | ||
1246 | |||
1247 | static inline void send_restore_code(struct spu_state *csa, struct spu *spu) | ||
1248 | { | ||
1249 | unsigned long addr = (unsigned long)&spu_restore_code[0]; | ||
1250 | unsigned int ls_offset = 0x0; | ||
1251 | unsigned int size = sizeof(spu_restore_code); | ||
1252 | unsigned int tag = 0; | ||
1253 | unsigned int rclass = 0; | ||
1254 | unsigned int cmd = MFC_GETFS_CMD; | ||
1255 | |||
1256 | /* Restore, Step 37: | ||
1257 | * Issue MFC DMA command to copy context | ||
1258 | * restore code to local storage. | ||
1259 | */ | ||
1260 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
1261 | } | ||
1262 | |||
1263 | static inline void setup_decr(struct spu_state *csa, struct spu *spu) | ||
1264 | { | ||
1265 | /* Restore, Step 34: | ||
1266 | * If CSA.MFC_CNTL[Ds]=1 (decrementer was | ||
1267 | * running) then adjust decrementer, set | ||
1268 | * decrementer running status in LSCSA, | ||
1269 | * and set decrementer "wrapped" status | ||
1270 | * in LSCSA. | ||
1271 | */ | ||
1272 | if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) { | ||
1273 | cycles_t resume_time = get_cycles(); | ||
1274 | cycles_t delta_time = resume_time - csa->suspend_time; | ||
1275 | |||
1276 | csa->lscsa->decr.slot[0] = delta_time; | ||
1277 | } | ||
1278 | } | ||
1279 | |||
1280 | static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu) | ||
1281 | { | ||
1282 | /* Restore, Step 35: | ||
1283 | * Copy the CSA.PU_MB data into the LSCSA. | ||
1284 | */ | ||
1285 | csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R; | ||
1286 | } | ||
1287 | |||
1288 | static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu) | ||
1289 | { | ||
1290 | /* Restore, Step 36: | ||
1291 | * Copy the CSA.PUINT_MB data into the LSCSA. | ||
1292 | */ | ||
1293 | csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R; | ||
1294 | } | ||
1295 | |||
1296 | static inline int check_restore_status(struct spu_state *csa, struct spu *spu) | ||
1297 | { | ||
1298 | struct spu_problem __iomem *prob = spu->problem; | ||
1299 | u32 complete; | ||
1300 | |||
1301 | /* Restore, Step 40: | ||
1302 | * If SPU_Status[P]=1 and SPU_Status[SC] = "success", | ||
1303 | * context restore succeeded, otherwise context restore | ||
1304 | * failed. | ||
1305 | */ | ||
1306 | complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) | | ||
1307 | SPU_STATUS_STOPPED_BY_STOP); | ||
1308 | return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0; | ||
1309 | } | ||
1310 | |||
1311 | static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu) | ||
1312 | { | ||
1313 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1314 | |||
1315 | /* Restore, Step 41: | ||
1316 | * Restore SPU_PrivCntl from the CSA. | ||
1317 | */ | ||
1318 | out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW); | ||
1319 | eieio(); | ||
1320 | } | ||
1321 | |||
1322 | static inline void restore_status_part1(struct spu_state *csa, struct spu *spu) | ||
1323 | { | ||
1324 | struct spu_problem __iomem *prob = spu->problem; | ||
1325 | u32 mask; | ||
1326 | |||
1327 | /* Restore, Step 42: | ||
1328 | * If any CSA.SPU_Status[I,S,H,P]=1, then | ||
1329 | * restore the error or single step state. | ||
1330 | */ | ||
1331 | mask = SPU_STATUS_INVALID_INSTR | | ||
1332 | SPU_STATUS_SINGLE_STEP | | ||
1333 | SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP; | ||
1334 | if (csa->prob.spu_status_R & mask) { | ||
1335 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1336 | eieio(); | ||
1337 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1338 | SPU_STATUS_RUNNING); | ||
1339 | } | ||
1340 | } | ||
1341 | |||
1342 | static inline void restore_status_part2(struct spu_state *csa, struct spu *spu) | ||
1343 | { | ||
1344 | struct spu_problem __iomem *prob = spu->problem; | ||
1345 | u32 mask; | ||
1346 | |||
1347 | /* Restore, Step 43: | ||
1348 | * If all CSA.SPU_Status[I,S,H,P,R]=0 then write | ||
1349 | * SPU_RunCntl[R0R1]='01', wait for SPU_Status[R]=1, | ||
1350 | * then write '00' to SPU_RunCntl[R0R1] and wait | ||
1351 | * for SPU_Status[R]=0. | ||
1352 | */ | ||
1353 | mask = SPU_STATUS_INVALID_INSTR | | ||
1354 | SPU_STATUS_SINGLE_STEP | | ||
1355 | SPU_STATUS_STOPPED_BY_HALT | | ||
1356 | SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING; | ||
1357 | if (!(csa->prob.spu_status_R & mask)) { | ||
1358 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1359 | eieio(); | ||
1360 | POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) & | ||
1361 | SPU_STATUS_RUNNING); | ||
1362 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP); | ||
1363 | eieio(); | ||
1364 | POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & | ||
1365 | SPU_STATUS_RUNNING); | ||
1366 | } | ||
1367 | } | ||
1368 | |||
1369 | static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu) | ||
1370 | { | ||
1371 | unsigned long addr = (unsigned long)&csa->lscsa->ls[0]; | ||
1372 | unsigned int ls_offset = 0x0; | ||
1373 | unsigned int size = 16384; | ||
1374 | unsigned int tag = 0; | ||
1375 | unsigned int rclass = 0; | ||
1376 | unsigned int cmd = MFC_GET_CMD; | ||
1377 | |||
1378 | /* Restore, Step 44: | ||
1379 | * Issue a DMA command to restore the first | ||
1380 | * 16kb of local storage from CSA. | ||
1381 | */ | ||
1382 | send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd); | ||
1383 | } | ||
1384 | |||
1385 | static inline void clear_interrupts(struct spu_state *csa, struct spu *spu) | ||
1386 | { | ||
1387 | /* Restore, Step 49: | ||
1388 | * Write INT_MASK_class0 with value of 0. | ||
1389 | * Write INT_MASK_class1 with value of 0. | ||
1390 | * Write INT_MASK_class2 with value of 0. | ||
1391 | * Write INT_STAT_class0 with value of -1. | ||
1392 | * Write INT_STAT_class1 with value of -1. | ||
1393 | * Write INT_STAT_class2 with value of -1. | ||
1394 | */ | ||
1395 | spin_lock_irq(&spu->register_lock); | ||
1396 | spu_int_mask_set(spu, 0, 0ul); | ||
1397 | spu_int_mask_set(spu, 1, 0ul); | ||
1398 | spu_int_mask_set(spu, 2, 0ul); | ||
1399 | spu_int_stat_clear(spu, 0, ~0ul); | ||
1400 | spu_int_stat_clear(spu, 1, ~0ul); | ||
1401 | spu_int_stat_clear(spu, 2, ~0ul); | ||
1402 | spin_unlock_irq(&spu->register_lock); | ||
1403 | } | ||
1404 | |||
1405 | static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu) | ||
1406 | { | ||
1407 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1408 | int i; | ||
1409 | |||
1410 | /* Restore, Step 50: | ||
1411 | * If MFC_Cntl[Se]!=0 then restore | ||
1412 | * MFC command queues. | ||
1413 | */ | ||
1414 | if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) { | ||
1415 | for (i = 0; i < 8; i++) { | ||
1416 | out_be64(&priv2->puq[i].mfc_cq_data0_RW, | ||
1417 | csa->priv2.puq[i].mfc_cq_data0_RW); | ||
1418 | out_be64(&priv2->puq[i].mfc_cq_data1_RW, | ||
1419 | csa->priv2.puq[i].mfc_cq_data1_RW); | ||
1420 | out_be64(&priv2->puq[i].mfc_cq_data2_RW, | ||
1421 | csa->priv2.puq[i].mfc_cq_data2_RW); | ||
1422 | out_be64(&priv2->puq[i].mfc_cq_data3_RW, | ||
1423 | csa->priv2.puq[i].mfc_cq_data3_RW); | ||
1424 | } | ||
1425 | for (i = 0; i < 16; i++) { | ||
1426 | out_be64(&priv2->spuq[i].mfc_cq_data0_RW, | ||
1427 | csa->priv2.spuq[i].mfc_cq_data0_RW); | ||
1428 | out_be64(&priv2->spuq[i].mfc_cq_data1_RW, | ||
1429 | csa->priv2.spuq[i].mfc_cq_data1_RW); | ||
1430 | out_be64(&priv2->spuq[i].mfc_cq_data2_RW, | ||
1431 | csa->priv2.spuq[i].mfc_cq_data2_RW); | ||
1432 | out_be64(&priv2->spuq[i].mfc_cq_data3_RW, | ||
1433 | csa->priv2.spuq[i].mfc_cq_data3_RW); | ||
1434 | } | ||
1435 | } | ||
1436 | eieio(); | ||
1437 | } | ||
1438 | |||
1439 | static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu) | ||
1440 | { | ||
1441 | struct spu_problem __iomem *prob = spu->problem; | ||
1442 | |||
1443 | /* Restore, Step 51: | ||
1444 | * Restore the PPU_QueryMask register from CSA. | ||
1445 | */ | ||
1446 | out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW); | ||
1447 | eieio(); | ||
1448 | } | ||
1449 | |||
1450 | static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu) | ||
1451 | { | ||
1452 | struct spu_problem __iomem *prob = spu->problem; | ||
1453 | |||
1454 | /* Restore, Step 52: | ||
1455 | * Restore the PPU_QueryType register from CSA. | ||
1456 | */ | ||
1457 | out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW); | ||
1458 | eieio(); | ||
1459 | } | ||
1460 | |||
1461 | static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu) | ||
1462 | { | ||
1463 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1464 | |||
1465 | /* Restore, Step 53: | ||
1466 | * Restore the MFC_CSR_TSQ register from CSA. | ||
1467 | */ | ||
1468 | out_be64(&priv2->spu_tag_status_query_RW, | ||
1469 | csa->priv2.spu_tag_status_query_RW); | ||
1470 | eieio(); | ||
1471 | } | ||
1472 | |||
1473 | static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu) | ||
1474 | { | ||
1475 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1476 | |||
1477 | /* Restore, Step 54: | ||
1478 | * Restore the MFC_CSR_CMD1 and MFC_CSR_CMD2 | ||
1479 | * registers from CSA. | ||
1480 | */ | ||
1481 | out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW); | ||
1482 | out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW); | ||
1483 | eieio(); | ||
1484 | } | ||
1485 | |||
1486 | static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu) | ||
1487 | { | ||
1488 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1489 | |||
1490 | /* Restore, Step 55: | ||
1491 | * Restore the MFC_CSR_ATO register from CSA. | ||
1492 | */ | ||
1493 | out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW); | ||
1494 | } | ||
1495 | |||
1496 | static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu) | ||
1497 | { | ||
1498 | /* Restore, Step 56: | ||
1499 | * Restore the MFC_TCLASS_ID register from CSA. | ||
1500 | */ | ||
1501 | spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW); | ||
1502 | eieio(); | ||
1503 | } | ||
1504 | |||
1505 | static inline void set_llr_event(struct spu_state *csa, struct spu *spu) | ||
1506 | { | ||
1507 | u64 ch0_cnt, ch0_data; | ||
1508 | u64 ch1_data; | ||
1509 | |||
1510 | /* Restore, Step 57: | ||
1511 | * Set the Lock Line Reservation Lost Event by: | ||
1512 | * 1. OR CSA.SPU_Event_Status with bit 21 (Lr) set to 1. | ||
1513 | * 2. If CSA.SPU_Channel_0_Count=0 and | ||
1514 | * CSA.SPU_Wr_Event_Mask[Lr]=1 and | ||
1515 | * CSA.SPU_Event_Status[Lr]=0 then set | ||
1516 | * CSA.SPU_Event_Status_Count=1. | ||
1517 | */ | ||
1518 | ch0_cnt = csa->spu_chnlcnt_RW[0]; | ||
1519 | ch0_data = csa->spu_chnldata_RW[0]; | ||
1520 | ch1_data = csa->spu_chnldata_RW[1]; | ||
1521 | csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT; | ||
1522 | if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) && | ||
1523 | (ch1_data & MFC_LLR_LOST_EVENT)) { | ||
1524 | csa->spu_chnlcnt_RW[0] = 1; | ||
1525 | } | ||
1526 | } | ||
1527 | |||
1528 | static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu) | ||
1529 | { | ||
1530 | /* Restore, Step 58: | ||
1531 | * If the status of the CSA software decrementer | ||
1532 | * "wrapped" flag is set, OR in a '1' to | ||
1533 | * CSA.SPU_Event_Status[Tm]. | ||
1534 | */ | ||
1535 | if (csa->lscsa->decr_status.slot[0] == 1) { | ||
1536 | csa->spu_chnldata_RW[0] |= 0x20; | ||
1537 | } | ||
1538 | if ((csa->lscsa->decr_status.slot[0] == 1) && | ||
1539 | (csa->spu_chnlcnt_RW[0] == 0 && | ||
1540 | ((csa->spu_chnldata_RW[2] & 0x20) == 0x0) && | ||
1541 | ((csa->spu_chnldata_RW[0] & 0x20) != 0x1))) { | ||
1542 | csa->spu_chnlcnt_RW[0] = 1; | ||
1543 | } | ||
1544 | } | ||
1545 | |||
1546 | static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu) | ||
1547 | { | ||
1548 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1549 | u64 idx, ch_indices[7] = { 0UL, 1UL, 3UL, 4UL, 24UL, 25UL, 27UL }; | ||
1550 | int i; | ||
1551 | |||
1552 | /* Restore, Step 59: | ||
1553 | * Restore the following CH: [0,1,3,4,24,25,27] | ||
1554 | */ | ||
1555 | for (i = 0; i < 7; i++) { | ||
1556 | idx = ch_indices[i]; | ||
1557 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1558 | eieio(); | ||
1559 | out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]); | ||
1560 | out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]); | ||
1561 | eieio(); | ||
1562 | } | ||
1563 | } | ||
1564 | |||
1565 | static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu) | ||
1566 | { | ||
1567 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1568 | u64 ch_indices[3] = { 9UL, 21UL, 23UL }; | ||
1569 | u64 ch_counts[3] = { 1UL, 16UL, 1UL }; | ||
1570 | u64 idx; | ||
1571 | int i; | ||
1572 | |||
1573 | /* Restore, Step 60: | ||
1574 | * Restore the following CH: [9,21,23]. | ||
1575 | */ | ||
1576 | ch_counts[0] = 1UL; | ||
1577 | ch_counts[1] = csa->spu_chnlcnt_RW[21]; | ||
1578 | ch_counts[2] = 1UL; | ||
1579 | for (i = 0; i < 3; i++) { | ||
1580 | idx = ch_indices[i]; | ||
1581 | out_be64(&priv2->spu_chnlcntptr_RW, idx); | ||
1582 | eieio(); | ||
1583 | out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]); | ||
1584 | eieio(); | ||
1585 | } | ||
1586 | } | ||
1587 | |||
1588 | static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu) | ||
1589 | { | ||
1590 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1591 | |||
1592 | /* Restore, Step 61: | ||
1593 | * Restore the SPU_LSLR register from CSA. | ||
1594 | */ | ||
1595 | out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW); | ||
1596 | eieio(); | ||
1597 | } | ||
1598 | |||
1599 | static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu) | ||
1600 | { | ||
1601 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1602 | |||
1603 | /* Restore, Step 62: | ||
1604 | * Restore the SPU_Cfg register from CSA. | ||
1605 | */ | ||
1606 | out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW); | ||
1607 | eieio(); | ||
1608 | } | ||
1609 | |||
1610 | static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu) | ||
1611 | { | ||
1612 | /* Restore, Step 63: | ||
1613 | * Restore PM_Trace_Tag_Wait_Mask from CSA. | ||
1614 | * Not performed by this implementation. | ||
1615 | */ | ||
1616 | } | ||
1617 | |||
1618 | static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu) | ||
1619 | { | ||
1620 | struct spu_problem __iomem *prob = spu->problem; | ||
1621 | |||
1622 | /* Restore, Step 64: | ||
1623 | * Restore SPU_NPC from CSA. | ||
1624 | */ | ||
1625 | out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW); | ||
1626 | eieio(); | ||
1627 | } | ||
1628 | |||
1629 | static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu) | ||
1630 | { | ||
1631 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1632 | int i; | ||
1633 | |||
1634 | /* Restore, Step 65: | ||
1635 | * Restore MFC_RdSPU_MB from CSA. | ||
1636 | */ | ||
1637 | out_be64(&priv2->spu_chnlcntptr_RW, 29UL); | ||
1638 | eieio(); | ||
1639 | out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]); | ||
1640 | for (i = 0; i < 4; i++) { | ||
1641 | out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]); | ||
1642 | } | ||
1643 | eieio(); | ||
1644 | } | ||
1645 | |||
1646 | static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu) | ||
1647 | { | ||
1648 | struct spu_problem __iomem *prob = spu->problem; | ||
1649 | u32 dummy = 0; | ||
1650 | |||
1651 | /* Restore, Step 66: | ||
1652 | * If CSA.MB_Stat[P]=0 (mailbox empty) then | ||
1653 | * read from the PPU_MB register. | ||
1654 | */ | ||
1655 | if ((csa->prob.mb_stat_R & 0xFF) == 0) { | ||
1656 | dummy = in_be32(&prob->pu_mb_R); | ||
1657 | eieio(); | ||
1658 | } | ||
1659 | } | ||
1660 | |||
1661 | static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu) | ||
1662 | { | ||
1663 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1664 | u64 dummy = 0UL; | ||
1665 | |||
1666 | /* Restore, Step 66: | ||
1667 | * If CSA.MB_Stat[I]=0 (mailbox empty) then | ||
1668 | * read from the PPUINT_MB register. | ||
1669 | */ | ||
1670 | if ((csa->prob.mb_stat_R & 0xFF0000) == 0) { | ||
1671 | dummy = in_be64(&priv2->puint_mb_R); | ||
1672 | eieio(); | ||
1673 | spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR); | ||
1674 | eieio(); | ||
1675 | } | ||
1676 | } | ||
1677 | |||
1678 | static inline void restore_mfc_slbs(struct spu_state *csa, struct spu *spu) | ||
1679 | { | ||
1680 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1681 | int i; | ||
1682 | |||
1683 | /* Restore, Step 68: | ||
1684 | * If MFC_SR1[R]='1', restore SLBs from CSA. | ||
1685 | */ | ||
1686 | if (csa->priv1.mfc_sr1_RW & MFC_STATE1_RELOCATE_MASK) { | ||
1687 | for (i = 0; i < 8; i++) { | ||
1688 | out_be64(&priv2->slb_index_W, i); | ||
1689 | eieio(); | ||
1690 | out_be64(&priv2->slb_esid_RW, csa->slb_esid_RW[i]); | ||
1691 | out_be64(&priv2->slb_vsid_RW, csa->slb_vsid_RW[i]); | ||
1692 | eieio(); | ||
1693 | } | ||
1694 | out_be64(&priv2->slb_index_W, csa->priv2.slb_index_W); | ||
1695 | eieio(); | ||
1696 | } | ||
1697 | } | ||
1698 | |||
1699 | static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu) | ||
1700 | { | ||
1701 | /* Restore, Step 69: | ||
1702 | * Restore the MFC_SR1 register from CSA. | ||
1703 | */ | ||
1704 | spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW); | ||
1705 | eieio(); | ||
1706 | } | ||
1707 | |||
1708 | static inline void restore_other_spu_access(struct spu_state *csa, | ||
1709 | struct spu *spu) | ||
1710 | { | ||
1711 | /* Restore, Step 70: | ||
1712 | * Restore other SPU mappings to this SPU. TBD. | ||
1713 | */ | ||
1714 | } | ||
1715 | |||
1716 | static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu) | ||
1717 | { | ||
1718 | struct spu_problem __iomem *prob = spu->problem; | ||
1719 | |||
1720 | /* Restore, Step 71: | ||
1721 | * If CSA.SPU_Status[R]=1 then write | ||
1722 | * SPU_RunCntl[R0R1]='01'. | ||
1723 | */ | ||
1724 | if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) { | ||
1725 | out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE); | ||
1726 | eieio(); | ||
1727 | } | ||
1728 | } | ||
1729 | |||
1730 | static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu) | ||
1731 | { | ||
1732 | struct spu_priv2 __iomem *priv2 = spu->priv2; | ||
1733 | |||
1734 | /* Restore, Step 72: | ||
1735 | * Restore the MFC_CNTL register for the CSA. | ||
1736 | */ | ||
1737 | out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW); | ||
1738 | eieio(); | ||
1739 | } | ||
1740 | |||
1741 | static inline void enable_user_access(struct spu_state *csa, struct spu *spu) | ||
1742 | { | ||
1743 | /* Restore, Step 73: | ||
1744 | * Enable user-space access (if provided) to this | ||
1745 | * SPU by mapping the virtual pages assigned to | ||
1746 | * the SPU memory-mapped I/O (MMIO) for problem | ||
1747 | * state. TBD. | ||
1748 | */ | ||
1749 | } | ||
1750 | |||
1751 | static inline void reset_switch_active(struct spu_state *csa, struct spu *spu) | ||
1752 | { | ||
1753 | /* Restore, Step 74: | ||
1754 | * Reset the "context switch active" flag. | ||
1755 | */ | ||
1756 | clear_bit(SPU_CONTEXT_SWITCH_ACTIVE, &spu->flags); | ||
1757 | mb(); | ||
1758 | } | ||
1759 | |||
1760 | static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu) | ||
1761 | { | ||
1762 | /* Restore, Step 75: | ||
1763 | * Re-enable SPU interrupts. | ||
1764 | */ | ||
1765 | spin_lock_irq(&spu->register_lock); | ||
1766 | spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW); | ||
1767 | spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW); | ||
1768 | spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW); | ||
1769 | spin_unlock_irq(&spu->register_lock); | ||
1770 | } | ||
1771 | |||
1772 | static int quiece_spu(struct spu_state *prev, struct spu *spu) | ||
1773 | { | ||
1774 | /* | ||
1775 | * Combined steps 2-18 of SPU context save sequence, which | ||
1776 | * quiesce the SPU state (disable SPU execution, MFC command | ||
1777 | * queues, decrementer, SPU interrupts, etc.). | ||
1778 | * | ||
1779 | * Returns 0 on success. | ||
1780 | * 2 if failed step 2. | ||
1781 | * 6 if failed step 6. | ||
1782 | */ | ||
1783 | |||
1784 | if (check_spu_isolate(prev, spu)) { /* Step 2. */ | ||
1785 | return 2; | ||
1786 | } | ||
1787 | disable_interrupts(prev, spu); /* Step 3. */ | ||
1788 | set_watchdog_timer(prev, spu); /* Step 4. */ | ||
1789 | inhibit_user_access(prev, spu); /* Step 5. */ | ||
1790 | if (check_spu_isolate(prev, spu)) { /* Step 6. */ | ||
1791 | return 6; | ||
1792 | } | ||
1793 | set_switch_pending(prev, spu); /* Step 7. */ | ||
1794 | save_mfc_cntl(prev, spu); /* Step 8. */ | ||
1795 | save_spu_runcntl(prev, spu); /* Step 9. */ | ||
1796 | save_mfc_sr1(prev, spu); /* Step 10. */ | ||
1797 | save_spu_status(prev, spu); /* Step 11. */ | ||
1798 | save_mfc_decr(prev, spu); /* Step 12. */ | ||
1799 | halt_mfc_decr(prev, spu); /* Step 13. */ | ||
1800 | save_timebase(prev, spu); /* Step 14. */ | ||
1801 | remove_other_spu_access(prev, spu); /* Step 15. */ | ||
1802 | do_mfc_mssync(prev, spu); /* Step 16. */ | ||
1803 | issue_mfc_tlbie(prev, spu); /* Step 17. */ | ||
1804 | handle_pending_interrupts(prev, spu); /* Step 18. */ | ||
1805 | |||
1806 | return 0; | ||
1807 | } | ||
1808 | |||
1809 | static void save_csa(struct spu_state *prev, struct spu *spu) | ||
1810 | { | ||
1811 | /* | ||
1812 | * Combine steps 19-44 of SPU context save sequence, which | ||
1813 | * save regions of the privileged & problem state areas. | ||
1814 | */ | ||
1815 | |||
1816 | save_mfc_queues(prev, spu); /* Step 19. */ | ||
1817 | save_ppu_querymask(prev, spu); /* Step 20. */ | ||
1818 | save_ppu_querytype(prev, spu); /* Step 21. */ | ||
1819 | save_mfc_csr_tsq(prev, spu); /* Step 22. */ | ||
1820 | save_mfc_csr_cmd(prev, spu); /* Step 23. */ | ||
1821 | save_mfc_csr_ato(prev, spu); /* Step 24. */ | ||
1822 | save_mfc_tclass_id(prev, spu); /* Step 25. */ | ||
1823 | set_mfc_tclass_id(prev, spu); /* Step 26. */ | ||
1824 | purge_mfc_queue(prev, spu); /* Step 27. */ | ||
1825 | wait_purge_complete(prev, spu); /* Step 28. */ | ||
1826 | save_mfc_slbs(prev, spu); /* Step 29. */ | ||
1827 | setup_mfc_sr1(prev, spu); /* Step 30. */ | ||
1828 | save_spu_npc(prev, spu); /* Step 31. */ | ||
1829 | save_spu_privcntl(prev, spu); /* Step 32. */ | ||
1830 | reset_spu_privcntl(prev, spu); /* Step 33. */ | ||
1831 | save_spu_lslr(prev, spu); /* Step 34. */ | ||
1832 | reset_spu_lslr(prev, spu); /* Step 35. */ | ||
1833 | save_spu_cfg(prev, spu); /* Step 36. */ | ||
1834 | save_pm_trace(prev, spu); /* Step 37. */ | ||
1835 | save_mfc_rag(prev, spu); /* Step 38. */ | ||
1836 | save_ppu_mb_stat(prev, spu); /* Step 39. */ | ||
1837 | save_ppu_mb(prev, spu); /* Step 40. */ | ||
1838 | save_ppuint_mb(prev, spu); /* Step 41. */ | ||
1839 | save_ch_part1(prev, spu); /* Step 42. */ | ||
1840 | save_spu_mb(prev, spu); /* Step 43. */ | ||
1841 | save_mfc_cmd(prev, spu); /* Step 44. */ | ||
1842 | reset_ch(prev, spu); /* Step 45. */ | ||
1843 | } | ||
1844 | |||
1845 | static void save_lscsa(struct spu_state *prev, struct spu *spu) | ||
1846 | { | ||
1847 | /* | ||
1848 | * Perform steps 46-57 of SPU context save sequence, | ||
1849 | * which save regions of the local store and register | ||
1850 | * file. | ||
1851 | */ | ||
1852 | |||
1853 | resume_mfc_queue(prev, spu); /* Step 46. */ | ||
1854 | setup_mfc_slbs(prev, spu); /* Step 47. */ | ||
1855 | set_switch_active(prev, spu); /* Step 48. */ | ||
1856 | enable_interrupts(prev, spu); /* Step 49. */ | ||
1857 | save_ls_16kb(prev, spu); /* Step 50. */ | ||
1858 | set_spu_npc(prev, spu); /* Step 51. */ | ||
1859 | set_signot1(prev, spu); /* Step 52. */ | ||
1860 | set_signot2(prev, spu); /* Step 53. */ | ||
1861 | send_save_code(prev, spu); /* Step 54. */ | ||
1862 | set_ppu_querymask(prev, spu); /* Step 55. */ | ||
1863 | wait_tag_complete(prev, spu); /* Step 56. */ | ||
1864 | wait_spu_stopped(prev, spu); /* Step 57. */ | ||
1865 | } | ||
1866 | |||
1867 | static void harvest(struct spu_state *prev, struct spu *spu) | ||
1868 | { | ||
1869 | /* | ||
1870 | * Perform steps 2-25 of SPU context restore sequence, | ||
1871 | * which resets an SPU either after a failed save, or | ||
1872 | * when using SPU for first time. | ||
1873 | */ | ||
1874 | |||
1875 | disable_interrupts(prev, spu); /* Step 2. */ | ||
1876 | inhibit_user_access(prev, spu); /* Step 3. */ | ||
1877 | terminate_spu_app(prev, spu); /* Step 4. */ | ||
1878 | set_switch_pending(prev, spu); /* Step 5. */ | ||
1879 | remove_other_spu_access(prev, spu); /* Step 6. */ | ||
1880 | suspend_mfc(prev, spu); /* Step 7. */ | ||
1881 | wait_suspend_mfc_complete(prev, spu); /* Step 8. */ | ||
1882 | if (!suspend_spe(prev, spu)) /* Step 9. */ | ||
1883 | clear_spu_status(prev, spu); /* Step 10. */ | ||
1884 | do_mfc_mssync(prev, spu); /* Step 11. */ | ||
1885 | issue_mfc_tlbie(prev, spu); /* Step 12. */ | ||
1886 | handle_pending_interrupts(prev, spu); /* Step 13. */ | ||
1887 | purge_mfc_queue(prev, spu); /* Step 14. */ | ||
1888 | wait_purge_complete(prev, spu); /* Step 15. */ | ||
1889 | reset_spu_privcntl(prev, spu); /* Step 16. */ | ||
1890 | reset_spu_lslr(prev, spu); /* Step 17. */ | ||
1891 | setup_mfc_sr1(prev, spu); /* Step 18. */ | ||
1892 | invalidate_slbs(prev, spu); /* Step 19. */ | ||
1893 | reset_ch_part1(prev, spu); /* Step 20. */ | ||
1894 | reset_ch_part2(prev, spu); /* Step 21. */ | ||
1895 | enable_interrupts(prev, spu); /* Step 22. */ | ||
1896 | set_switch_active(prev, spu); /* Step 23. */ | ||
1897 | set_mfc_tclass_id(prev, spu); /* Step 24. */ | ||
1898 | resume_mfc_queue(prev, spu); /* Step 25. */ | ||
1899 | } | ||
1900 | |||
1901 | static void restore_lscsa(struct spu_state *next, struct spu *spu) | ||
1902 | { | ||
1903 | /* | ||
1904 | * Perform steps 26-40 of SPU context restore sequence, | ||
1905 | * which restores regions of the local store and register | ||
1906 | * file. | ||
1907 | */ | ||
1908 | |||
1909 | set_watchdog_timer(next, spu); /* Step 26. */ | ||
1910 | setup_spu_status_part1(next, spu); /* Step 27. */ | ||
1911 | setup_spu_status_part2(next, spu); /* Step 28. */ | ||
1912 | restore_mfc_rag(next, spu); /* Step 29. */ | ||
1913 | setup_mfc_slbs(next, spu); /* Step 30. */ | ||
1914 | set_spu_npc(next, spu); /* Step 31. */ | ||
1915 | set_signot1(next, spu); /* Step 32. */ | ||
1916 | set_signot2(next, spu); /* Step 33. */ | ||
1917 | setup_decr(next, spu); /* Step 34. */ | ||
1918 | setup_ppu_mb(next, spu); /* Step 35. */ | ||
1919 | setup_ppuint_mb(next, spu); /* Step 36. */ | ||
1920 | send_restore_code(next, spu); /* Step 37. */ | ||
1921 | set_ppu_querymask(next, spu); /* Step 38. */ | ||
1922 | wait_tag_complete(next, spu); /* Step 39. */ | ||
1923 | wait_spu_stopped(next, spu); /* Step 40. */ | ||
1924 | } | ||
1925 | |||
1926 | static void restore_csa(struct spu_state *next, struct spu *spu) | ||
1927 | { | ||
1928 | /* | ||
1929 | * Combine steps 41-76 of SPU context restore sequence, which | ||
1930 | * restore regions of the privileged & problem state areas. | ||
1931 | */ | ||
1932 | |||
1933 | restore_spu_privcntl(next, spu); /* Step 41. */ | ||
1934 | restore_status_part1(next, spu); /* Step 42. */ | ||
1935 | restore_status_part2(next, spu); /* Step 43. */ | ||
1936 | restore_ls_16kb(next, spu); /* Step 44. */ | ||
1937 | wait_tag_complete(next, spu); /* Step 45. */ | ||
1938 | suspend_mfc(next, spu); /* Step 46. */ | ||
1939 | wait_suspend_mfc_complete(next, spu); /* Step 47. */ | ||
1940 | issue_mfc_tlbie(next, spu); /* Step 48. */ | ||
1941 | clear_interrupts(next, spu); /* Step 49. */ | ||
1942 | restore_mfc_queues(next, spu); /* Step 50. */ | ||
1943 | restore_ppu_querymask(next, spu); /* Step 51. */ | ||
1944 | restore_ppu_querytype(next, spu); /* Step 52. */ | ||
1945 | restore_mfc_csr_tsq(next, spu); /* Step 53. */ | ||
1946 | restore_mfc_csr_cmd(next, spu); /* Step 54. */ | ||
1947 | restore_mfc_csr_ato(next, spu); /* Step 55. */ | ||
1948 | restore_mfc_tclass_id(next, spu); /* Step 56. */ | ||
1949 | set_llr_event(next, spu); /* Step 57. */ | ||
1950 | restore_decr_wrapped(next, spu); /* Step 58. */ | ||
1951 | restore_ch_part1(next, spu); /* Step 59. */ | ||
1952 | restore_ch_part2(next, spu); /* Step 60. */ | ||
1953 | restore_spu_lslr(next, spu); /* Step 61. */ | ||
1954 | restore_spu_cfg(next, spu); /* Step 62. */ | ||
1955 | restore_pm_trace(next, spu); /* Step 63. */ | ||
1956 | restore_spu_npc(next, spu); /* Step 64. */ | ||
1957 | restore_spu_mb(next, spu); /* Step 65. */ | ||
1958 | check_ppu_mb_stat(next, spu); /* Step 66. */ | ||
1959 | check_ppuint_mb_stat(next, spu); /* Step 67. */ | ||
1960 | restore_mfc_slbs(next, spu); /* Step 68. */ | ||
1961 | restore_mfc_sr1(next, spu); /* Step 69. */ | ||
1962 | restore_other_spu_access(next, spu); /* Step 70. */ | ||
1963 | restore_spu_runcntl(next, spu); /* Step 71. */ | ||
1964 | restore_mfc_cntl(next, spu); /* Step 72. */ | ||
1965 | enable_user_access(next, spu); /* Step 73. */ | ||
1966 | reset_switch_active(next, spu); /* Step 74. */ | ||
1967 | reenable_interrupts(next, spu); /* Step 75. */ | ||
1968 | } | ||
1969 | |||
1970 | static int __do_spu_save(struct spu_state *prev, struct spu *spu) | ||
1971 | { | ||
1972 | int rc; | ||
1973 | |||
1974 | /* | ||
1975 | * SPU context save can be broken into three phases: | ||
1976 | * | ||
1977 | * (a) quiesce [steps 2-16]. | ||
1978 | * (b) save of CSA, performed by PPE [steps 17-42] | ||
1979 | * (c) save of LSCSA, mostly performed by SPU [steps 43-52]. | ||
1980 | * | ||
1981 | * Returns 0 on success. | ||
1982 | * 2,6 if failed to quiece SPU | ||
1983 | * 53 if SPU-side of save failed. | ||
1984 | */ | ||
1985 | |||
1986 | rc = quiece_spu(prev, spu); /* Steps 2-16. */ | ||
1987 | switch (rc) { | ||
1988 | default: | ||
1989 | case 2: | ||
1990 | case 6: | ||
1991 | harvest(prev, spu); | ||
1992 | return rc; | ||
1993 | break; | ||
1994 | case 0: | ||
1995 | break; | ||
1996 | } | ||
1997 | save_csa(prev, spu); /* Steps 17-43. */ | ||
1998 | save_lscsa(prev, spu); /* Steps 44-53. */ | ||
1999 | return check_save_status(prev, spu); /* Step 54. */ | ||
2000 | } | ||
2001 | |||
2002 | static int __do_spu_restore(struct spu_state *next, struct spu *spu) | ||
2003 | { | ||
2004 | int rc; | ||
2005 | |||
2006 | /* | ||
2007 | * SPU context restore can be broken into three phases: | ||
2008 | * | ||
2009 | * (a) harvest (or reset) SPU [steps 2-24]. | ||
2010 | * (b) restore LSCSA [steps 25-40], mostly performed by SPU. | ||
2011 | * (c) restore CSA [steps 41-76], performed by PPE. | ||
2012 | * | ||
2013 | * The 'harvest' step is not performed here, but rather | ||
2014 | * as needed below. | ||
2015 | */ | ||
2016 | |||
2017 | restore_lscsa(next, spu); /* Steps 24-39. */ | ||
2018 | rc = check_restore_status(next, spu); /* Step 40. */ | ||
2019 | switch (rc) { | ||
2020 | default: | ||
2021 | /* Failed. Return now. */ | ||
2022 | return rc; | ||
2023 | break; | ||
2024 | case 0: | ||
2025 | /* Fall through to next step. */ | ||
2026 | break; | ||
2027 | } | ||
2028 | restore_csa(next, spu); | ||
2029 | |||
2030 | return 0; | ||
2031 | } | ||
2032 | |||
2033 | /** | ||
2034 | * spu_save - SPU context save, with locking. | ||
2035 | * @prev: pointer to SPU context save area, to be saved. | ||
2036 | * @spu: pointer to SPU iomem structure. | ||
2037 | * | ||
2038 | * Acquire locks, perform the save operation then return. | ||
2039 | */ | ||
2040 | int spu_save(struct spu_state *prev, struct spu *spu) | ||
2041 | { | ||
2042 | int rc; | ||
2043 | |||
2044 | acquire_spu_lock(spu); /* Step 1. */ | ||
2045 | rc = __do_spu_save(prev, spu); /* Steps 2-53. */ | ||
2046 | release_spu_lock(spu); | ||
2047 | if (rc) { | ||
2048 | panic("%s failed on SPU[%d], rc=%d.\n", | ||
2049 | __func__, spu->number, rc); | ||
2050 | } | ||
2051 | return rc; | ||
2052 | } | ||
2053 | |||
2054 | /** | ||
2055 | * spu_restore - SPU context restore, with harvest and locking. | ||
2056 | * @new: pointer to SPU context save area, to be restored. | ||
2057 | * @spu: pointer to SPU iomem structure. | ||
2058 | * | ||
2059 | * Perform harvest + restore, as we may not be coming | ||
2060 | * from a previous succesful save operation, and the | ||
2061 | * hardware state is unknown. | ||
2062 | */ | ||
2063 | int spu_restore(struct spu_state *new, struct spu *spu) | ||
2064 | { | ||
2065 | int rc; | ||
2066 | |||
2067 | acquire_spu_lock(spu); | ||
2068 | harvest(NULL, spu); | ||
2069 | spu->stop_code = 0; | ||
2070 | spu->dar = 0; | ||
2071 | spu->dsisr = 0; | ||
2072 | spu->slb_replace = 0; | ||
2073 | spu->class_0_pending = 0; | ||
2074 | rc = __do_spu_restore(new, spu); | ||
2075 | release_spu_lock(spu); | ||
2076 | if (rc) { | ||
2077 | panic("%s failed on SPU[%d] rc=%d.\n", | ||
2078 | __func__, spu->number, rc); | ||
2079 | } | ||
2080 | return rc; | ||
2081 | } | ||
2082 | |||
2083 | /** | ||
2084 | * spu_harvest - SPU harvest (reset) operation | ||
2085 | * @spu: pointer to SPU iomem structure. | ||
2086 | * | ||
2087 | * Perform SPU harvest (reset) operation. | ||
2088 | */ | ||
2089 | void spu_harvest(struct spu *spu) | ||
2090 | { | ||
2091 | acquire_spu_lock(spu); | ||
2092 | harvest(NULL, spu); | ||
2093 | release_spu_lock(spu); | ||
2094 | } | ||
2095 | |||
2096 | static void init_prob(struct spu_state *csa) | ||
2097 | { | ||
2098 | csa->spu_chnlcnt_RW[9] = 1; | ||
2099 | csa->spu_chnlcnt_RW[21] = 16; | ||
2100 | csa->spu_chnlcnt_RW[23] = 1; | ||
2101 | csa->spu_chnlcnt_RW[28] = 1; | ||
2102 | csa->spu_chnlcnt_RW[30] = 1; | ||
2103 | csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP; | ||
2104 | } | ||
2105 | |||
2106 | static void init_priv1(struct spu_state *csa) | ||
2107 | { | ||
2108 | /* Enable decode, relocate, tlbie response, master runcntl. */ | ||
2109 | csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK | | ||
2110 | MFC_STATE1_MASTER_RUN_CONTROL_MASK | | ||
2111 | MFC_STATE1_PROBLEM_STATE_MASK | | ||
2112 | MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK; | ||
2113 | |||
2114 | /* Set storage description. */ | ||
2115 | csa->priv1.mfc_sdr_RW = mfspr(SPRN_SDR1); | ||
2116 | |||
2117 | /* Enable OS-specific set of interrupts. */ | ||
2118 | csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR | | ||
2119 | CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR | | ||
2120 | CLASS0_ENABLE_SPU_ERROR_INTR; | ||
2121 | csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR | | ||
2122 | CLASS1_ENABLE_STORAGE_FAULT_INTR; | ||
2123 | csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR | | ||
2124 | CLASS2_ENABLE_SPU_HALT_INTR; | ||
2125 | } | ||
2126 | |||
2127 | static void init_priv2(struct spu_state *csa) | ||
2128 | { | ||
2129 | csa->priv2.spu_lslr_RW = LS_ADDR_MASK; | ||
2130 | csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE | | ||
2131 | MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION | | ||
2132 | MFC_CNTL_DMA_QUEUES_EMPTY_MASK; | ||
2133 | } | ||
2134 | |||
2135 | /** | ||
2136 | * spu_alloc_csa - allocate and initialize an SPU context save area. | ||
2137 | * | ||
2138 | * Allocate and initialize the contents of an SPU context save area. | ||
2139 | * This includes enabling address translation, interrupt masks, etc., | ||
2140 | * as appropriate for the given OS environment. | ||
2141 | * | ||
2142 | * Note that storage for the 'lscsa' is allocated separately, | ||
2143 | * as it is by far the largest of the context save regions, | ||
2144 | * and may need to be pinned or otherwise specially aligned. | ||
2145 | */ | ||
2146 | void spu_init_csa(struct spu_state *csa) | ||
2147 | { | ||
2148 | struct spu_lscsa *lscsa; | ||
2149 | unsigned char *p; | ||
2150 | |||
2151 | if (!csa) | ||
2152 | return; | ||
2153 | memset(csa, 0, sizeof(struct spu_state)); | ||
2154 | |||
2155 | lscsa = vmalloc(sizeof(struct spu_lscsa)); | ||
2156 | if (!lscsa) | ||
2157 | return; | ||
2158 | |||
2159 | memset(lscsa, 0, sizeof(struct spu_lscsa)); | ||
2160 | csa->lscsa = lscsa; | ||
2161 | csa->register_lock = SPIN_LOCK_UNLOCKED; | ||
2162 | |||
2163 | /* Set LS pages reserved to allow for user-space mapping. */ | ||
2164 | for (p = lscsa->ls; p < lscsa->ls + LS_SIZE; p += PAGE_SIZE) | ||
2165 | SetPageReserved(vmalloc_to_page(p)); | ||
2166 | |||
2167 | init_prob(csa); | ||
2168 | init_priv1(csa); | ||
2169 | init_priv2(csa); | ||
2170 | } | ||
2171 | |||
2172 | void spu_fini_csa(struct spu_state *csa) | ||
2173 | { | ||
2174 | /* Clear reserved bit before vfree. */ | ||
2175 | unsigned char *p; | ||
2176 | for (p = csa->lscsa->ls; p < csa->lscsa->ls + LS_SIZE; p += PAGE_SIZE) | ||
2177 | ClearPageReserved(vmalloc_to_page(p)); | ||
2178 | |||
2179 | vfree(csa->lscsa); | ||
2180 | } | ||
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c new file mode 100644 index 000000000000..d549aa7ebea6 --- /dev/null +++ b/arch/powerpc/platforms/cell/spufs/syscalls.c | |||
@@ -0,0 +1,101 @@ | |||
1 | #include <linux/file.h> | ||
2 | #include <linux/fs.h> | ||
3 | #include <linux/module.h> | ||
4 | #include <linux/mount.h> | ||
5 | #include <linux/namei.h> | ||
6 | |||
7 | #include <asm/uaccess.h> | ||
8 | |||
9 | #include "spufs.h" | ||
10 | |||
11 | /** | ||
12 | * sys_spu_run - run code loaded into an SPU | ||
13 | * | ||
14 | * @unpc: next program counter for the SPU | ||
15 | * @ustatus: status of the SPU | ||
16 | * | ||
17 | * This system call transfers the control of execution of a | ||
18 | * user space thread to an SPU. It will return when the | ||
19 | * SPU has finished executing or when it hits an error | ||
20 | * condition and it will be interrupted if a signal needs | ||
21 | * to be delivered to a handler in user space. | ||
22 | * | ||
23 | * The next program counter is set to the passed value | ||
24 | * before the SPU starts fetching code and the user space | ||
25 | * pointer gets updated with the new value when returning | ||
26 | * from kernel space. | ||
27 | * | ||
28 | * The status value returned from spu_run reflects the | ||
29 | * value of the spu_status register after the SPU has stopped. | ||
30 | * | ||
31 | */ | ||
32 | long do_spu_run(struct file *filp, __u32 __user *unpc, __u32 __user *ustatus) | ||
33 | { | ||
34 | long ret; | ||
35 | struct spufs_inode_info *i; | ||
36 | u32 npc, status; | ||
37 | |||
38 | ret = -EFAULT; | ||
39 | if (get_user(npc, unpc) || get_user(status, ustatus)) | ||
40 | goto out; | ||
41 | |||
42 | /* check if this file was created by spu_create */ | ||
43 | ret = -EINVAL; | ||
44 | if (filp->f_op != &spufs_context_fops) | ||
45 | goto out; | ||
46 | |||
47 | i = SPUFS_I(filp->f_dentry->d_inode); | ||
48 | ret = spufs_run_spu(filp, i->i_ctx, &npc, &status); | ||
49 | |||
50 | if (put_user(npc, unpc) || put_user(status, ustatus)) | ||
51 | ret = -EFAULT; | ||
52 | out: | ||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | #ifndef MODULE | ||
57 | asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus) | ||
58 | { | ||
59 | int fput_needed; | ||
60 | struct file *filp; | ||
61 | long ret; | ||
62 | |||
63 | ret = -EBADF; | ||
64 | filp = fget_light(fd, &fput_needed); | ||
65 | if (filp) { | ||
66 | ret = do_spu_run(filp, unpc, ustatus); | ||
67 | fput_light(filp, fput_needed); | ||
68 | } | ||
69 | |||
70 | return ret; | ||
71 | } | ||
72 | #endif | ||
73 | |||
74 | asmlinkage long sys_spu_create(const char __user *pathname, | ||
75 | unsigned int flags, mode_t mode) | ||
76 | { | ||
77 | char *tmp; | ||
78 | int ret; | ||
79 | |||
80 | tmp = getname(pathname); | ||
81 | ret = PTR_ERR(tmp); | ||
82 | if (!IS_ERR(tmp)) { | ||
83 | struct nameidata nd; | ||
84 | |||
85 | ret = path_lookup(tmp, LOOKUP_PARENT| | ||
86 | LOOKUP_OPEN|LOOKUP_CREATE, &nd); | ||
87 | if (!ret) { | ||
88 | ret = spufs_create_thread(&nd, flags, mode); | ||
89 | path_release(&nd); | ||
90 | } | ||
91 | putname(tmp); | ||
92 | } | ||
93 | |||
94 | return ret; | ||
95 | } | ||
96 | |||
97 | struct spufs_calls spufs_calls = { | ||
98 | .create_thread = sys_spu_create, | ||
99 | .spu_run = do_spu_run, | ||
100 | .owner = THIS_MODULE, | ||
101 | }; | ||
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c index dda5f2c72c25..4ec8ba737e7d 100644 --- a/arch/powerpc/platforms/chrp/setup.c +++ b/arch/powerpc/platforms/chrp/setup.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <asm/hydra.h> | 49 | #include <asm/hydra.h> |
50 | #include <asm/sections.h> | 50 | #include <asm/sections.h> |
51 | #include <asm/time.h> | 51 | #include <asm/time.h> |
52 | #include <asm/btext.h> | ||
53 | #include <asm/i8259.h> | 52 | #include <asm/i8259.h> |
54 | #include <asm/mpic.h> | 53 | #include <asm/mpic.h> |
55 | #include <asm/rtas.h> | 54 | #include <asm/rtas.h> |
@@ -58,7 +57,6 @@ | |||
58 | #include "chrp.h" | 57 | #include "chrp.h" |
59 | 58 | ||
60 | void rtas_indicator_progress(char *, unsigned short); | 59 | void rtas_indicator_progress(char *, unsigned short); |
61 | void btext_progress(char *, unsigned short); | ||
62 | 60 | ||
63 | int _chrp_type; | 61 | int _chrp_type; |
64 | EXPORT_SYMBOL(_chrp_type); | 62 | EXPORT_SYMBOL(_chrp_type); |
@@ -264,11 +262,6 @@ void __init chrp_setup_arch(void) | |||
264 | ppc_md.set_rtc_time = rtas_set_rtc_time; | 262 | ppc_md.set_rtc_time = rtas_set_rtc_time; |
265 | } | 263 | } |
266 | 264 | ||
267 | #ifdef CONFIG_BOOTX_TEXT | ||
268 | if (ppc_md.progress == NULL && boot_text_mapped) | ||
269 | ppc_md.progress = btext_progress; | ||
270 | #endif | ||
271 | |||
272 | #ifdef CONFIG_BLK_DEV_INITRD | 265 | #ifdef CONFIG_BLK_DEV_INITRD |
273 | /* this is fine for chrp */ | 266 | /* this is fine for chrp */ |
274 | initrd_below_start_ok = 1; | 267 | initrd_below_start_ok = 1; |
@@ -522,12 +515,3 @@ void __init chrp_init(void) | |||
522 | smp_ops = &chrp_smp_ops; | 515 | smp_ops = &chrp_smp_ops; |
523 | #endif /* CONFIG_SMP */ | 516 | #endif /* CONFIG_SMP */ |
524 | } | 517 | } |
525 | |||
526 | #ifdef CONFIG_BOOTX_TEXT | ||
527 | void | ||
528 | btext_progress(char *s, unsigned short hex) | ||
529 | { | ||
530 | btext_drawstring(s); | ||
531 | btext_drawstring("\n"); | ||
532 | } | ||
533 | #endif /* CONFIG_BOOTX_TEXT */ | ||
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c index a58daa153686..42e978e4897a 100644 --- a/arch/powerpc/platforms/iseries/irq.c +++ b/arch/powerpc/platforms/iseries/irq.c | |||
@@ -35,161 +35,138 @@ | |||
35 | #include <linux/irq.h> | 35 | #include <linux/irq.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | 37 | ||
38 | #include <asm/paca.h> | ||
38 | #include <asm/iseries/hv_types.h> | 39 | #include <asm/iseries/hv_types.h> |
39 | #include <asm/iseries/hv_lp_event.h> | 40 | #include <asm/iseries/hv_lp_event.h> |
40 | #include <asm/iseries/hv_call_xm.h> | 41 | #include <asm/iseries/hv_call_xm.h> |
42 | #include <asm/iseries/it_lp_queue.h> | ||
41 | 43 | ||
42 | #include "irq.h" | 44 | #include "irq.h" |
43 | #include "call_pci.h" | 45 | #include "call_pci.h" |
44 | 46 | ||
45 | static long Pci_Interrupt_Count; | 47 | #if defined(CONFIG_SMP) |
46 | static long Pci_Event_Count; | 48 | extern void iSeries_smp_message_recv(struct pt_regs *); |
47 | 49 | #endif | |
48 | enum XmPciLpEvent_Subtype { | ||
49 | XmPciLpEvent_BusCreated = 0, // PHB has been created | ||
50 | XmPciLpEvent_BusError = 1, // PHB has failed | ||
51 | XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus | ||
52 | XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed | ||
53 | XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered | ||
54 | XmPciLpEvent_BusRecovered = 12, // PHB has been recovered | ||
55 | XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing | ||
56 | XmPciLpEvent_BridgeError = 21, // Bridge Error | ||
57 | XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt | ||
58 | }; | ||
59 | |||
60 | struct XmPciLpEvent_BusInterrupt { | ||
61 | HvBusNumber busNumber; | ||
62 | HvSubBusNumber subBusNumber; | ||
63 | }; | ||
64 | 50 | ||
65 | struct XmPciLpEvent_NodeInterrupt { | 51 | enum pci_event_type { |
66 | HvBusNumber busNumber; | 52 | pe_bus_created = 0, /* PHB has been created */ |
67 | HvSubBusNumber subBusNumber; | 53 | pe_bus_error = 1, /* PHB has failed */ |
68 | HvAgentId deviceId; | 54 | pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */ |
55 | pe_node_failed = 4, /* Multi-adapter bridge has failed */ | ||
56 | pe_node_recovered = 5, /* Multi-adapter bridge has recovered */ | ||
57 | pe_bus_recovered = 12, /* PHB has been recovered */ | ||
58 | pe_unquiese_bus = 18, /* Secondary bus unqiescing */ | ||
59 | pe_bridge_error = 21, /* Bridge Error */ | ||
60 | pe_slot_interrupt = 22 /* Slot interrupt */ | ||
69 | }; | 61 | }; |
70 | 62 | ||
71 | struct XmPciLpEvent { | 63 | struct pci_event { |
72 | struct HvLpEvent hvLpEvent; | 64 | struct HvLpEvent event; |
73 | |||
74 | union { | 65 | union { |
75 | u64 alignData; // Align on an 8-byte boundary | 66 | u64 __align; /* Align on an 8-byte boundary */ |
76 | |||
77 | struct { | 67 | struct { |
78 | u32 fisr; | 68 | u32 fisr; |
79 | HvBusNumber busNumber; | 69 | HvBusNumber bus_number; |
80 | HvSubBusNumber subBusNumber; | 70 | HvSubBusNumber sub_bus_number; |
81 | HvAgentId deviceId; | 71 | HvAgentId dev_id; |
82 | } slotInterrupt; | 72 | } slot; |
83 | 73 | struct { | |
84 | struct XmPciLpEvent_BusInterrupt busFailed; | 74 | HvBusNumber bus_number; |
85 | struct XmPciLpEvent_BusInterrupt busRecovered; | 75 | HvSubBusNumber sub_bus_number; |
86 | struct XmPciLpEvent_BusInterrupt busCreated; | 76 | } bus; |
87 | 77 | struct { | |
88 | struct XmPciLpEvent_NodeInterrupt nodeFailed; | 78 | HvBusNumber bus_number; |
89 | struct XmPciLpEvent_NodeInterrupt nodeRecovered; | 79 | HvSubBusNumber sub_bus_number; |
90 | 80 | HvAgentId dev_id; | |
91 | } eventData; | 81 | } node; |
92 | 82 | } data; | |
93 | }; | 83 | }; |
94 | 84 | ||
95 | static void intReceived(struct XmPciLpEvent *eventParm, | 85 | static DEFINE_SPINLOCK(pending_irqs_lock); |
96 | struct pt_regs *regsParm) | 86 | static int num_pending_irqs; |
87 | static int pending_irqs[NR_IRQS]; | ||
88 | |||
89 | static void int_received(struct pci_event *event, struct pt_regs *regs) | ||
97 | { | 90 | { |
98 | int irq; | 91 | int irq; |
99 | #ifdef CONFIG_IRQSTACKS | ||
100 | struct thread_info *curtp, *irqtp; | ||
101 | #endif | ||
102 | 92 | ||
103 | ++Pci_Interrupt_Count; | 93 | switch (event->event.xSubtype) { |
104 | 94 | case pe_slot_interrupt: | |
105 | switch (eventParm->hvLpEvent.xSubtype) { | 95 | irq = event->event.xCorrelationToken; |
106 | case XmPciLpEvent_SlotInterrupt: | 96 | if (irq < NR_IRQS) { |
107 | irq = eventParm->hvLpEvent.xCorrelationToken; | 97 | spin_lock(&pending_irqs_lock); |
108 | /* Dispatch the interrupt handlers for this irq */ | 98 | pending_irqs[irq]++; |
109 | #ifdef CONFIG_IRQSTACKS | 99 | num_pending_irqs++; |
110 | /* Switch to the irq stack to handle this */ | 100 | spin_unlock(&pending_irqs_lock); |
111 | curtp = current_thread_info(); | 101 | } else { |
112 | irqtp = hardirq_ctx[smp_processor_id()]; | 102 | printk(KERN_WARNING "int_received: bad irq number %d\n", |
113 | if (curtp != irqtp) { | 103 | irq); |
114 | irqtp->task = curtp->task; | 104 | HvCallPci_eoi(event->data.slot.bus_number, |
115 | irqtp->flags = 0; | 105 | event->data.slot.sub_bus_number, |
116 | call___do_IRQ(irq, regsParm, irqtp); | 106 | event->data.slot.dev_id); |
117 | irqtp->task = NULL; | 107 | } |
118 | if (irqtp->flags) | ||
119 | set_bits(irqtp->flags, &curtp->flags); | ||
120 | } else | ||
121 | #endif | ||
122 | __do_IRQ(irq, regsParm); | ||
123 | HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber, | ||
124 | eventParm->eventData.slotInterrupt.subBusNumber, | ||
125 | eventParm->eventData.slotInterrupt.deviceId); | ||
126 | break; | 108 | break; |
127 | /* Ignore error recovery events for now */ | 109 | /* Ignore error recovery events for now */ |
128 | case XmPciLpEvent_BusCreated: | 110 | case pe_bus_created: |
129 | printk(KERN_INFO "intReceived: system bus %d created\n", | 111 | printk(KERN_INFO "int_received: system bus %d created\n", |
130 | eventParm->eventData.busCreated.busNumber); | 112 | event->data.bus.bus_number); |
131 | break; | 113 | break; |
132 | case XmPciLpEvent_BusError: | 114 | case pe_bus_error: |
133 | case XmPciLpEvent_BusFailed: | 115 | case pe_bus_failed: |
134 | printk(KERN_INFO "intReceived: system bus %d failed\n", | 116 | printk(KERN_INFO "int_received: system bus %d failed\n", |
135 | eventParm->eventData.busFailed.busNumber); | 117 | event->data.bus.bus_number); |
136 | break; | 118 | break; |
137 | case XmPciLpEvent_BusRecovered: | 119 | case pe_bus_recovered: |
138 | case XmPciLpEvent_UnQuiesceBus: | 120 | case pe_unquiese_bus: |
139 | printk(KERN_INFO "intReceived: system bus %d recovered\n", | 121 | printk(KERN_INFO "int_received: system bus %d recovered\n", |
140 | eventParm->eventData.busRecovered.busNumber); | 122 | event->data.bus.bus_number); |
141 | break; | 123 | break; |
142 | case XmPciLpEvent_NodeFailed: | 124 | case pe_node_failed: |
143 | case XmPciLpEvent_BridgeError: | 125 | case pe_bridge_error: |
144 | printk(KERN_INFO | 126 | printk(KERN_INFO |
145 | "intReceived: multi-adapter bridge %d/%d/%d failed\n", | 127 | "int_received: multi-adapter bridge %d/%d/%d failed\n", |
146 | eventParm->eventData.nodeFailed.busNumber, | 128 | event->data.node.bus_number, |
147 | eventParm->eventData.nodeFailed.subBusNumber, | 129 | event->data.node.sub_bus_number, |
148 | eventParm->eventData.nodeFailed.deviceId); | 130 | event->data.node.dev_id); |
149 | break; | 131 | break; |
150 | case XmPciLpEvent_NodeRecovered: | 132 | case pe_node_recovered: |
151 | printk(KERN_INFO | 133 | printk(KERN_INFO |
152 | "intReceived: multi-adapter bridge %d/%d/%d recovered\n", | 134 | "int_received: multi-adapter bridge %d/%d/%d recovered\n", |
153 | eventParm->eventData.nodeRecovered.busNumber, | 135 | event->data.node.bus_number, |
154 | eventParm->eventData.nodeRecovered.subBusNumber, | 136 | event->data.node.sub_bus_number, |
155 | eventParm->eventData.nodeRecovered.deviceId); | 137 | event->data.node.dev_id); |
156 | break; | 138 | break; |
157 | default: | 139 | default: |
158 | printk(KERN_ERR | 140 | printk(KERN_ERR |
159 | "intReceived: unrecognized event subtype 0x%x\n", | 141 | "int_received: unrecognized event subtype 0x%x\n", |
160 | eventParm->hvLpEvent.xSubtype); | 142 | event->event.xSubtype); |
161 | break; | 143 | break; |
162 | } | 144 | } |
163 | } | 145 | } |
164 | 146 | ||
165 | static void XmPciLpEvent_handler(struct HvLpEvent *eventParm, | 147 | static void pci_event_handler(struct HvLpEvent *event, struct pt_regs *regs) |
166 | struct pt_regs *regsParm) | ||
167 | { | 148 | { |
168 | #ifdef CONFIG_PCI | 149 | if (event && (event->xType == HvLpEvent_Type_PciIo)) { |
169 | ++Pci_Event_Count; | 150 | switch (event->xFlags.xFunction) { |
170 | |||
171 | if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) { | ||
172 | switch (eventParm->xFlags.xFunction) { | ||
173 | case HvLpEvent_Function_Int: | 151 | case HvLpEvent_Function_Int: |
174 | intReceived((struct XmPciLpEvent *)eventParm, regsParm); | 152 | int_received((struct pci_event *)event, regs); |
175 | break; | 153 | break; |
176 | case HvLpEvent_Function_Ack: | 154 | case HvLpEvent_Function_Ack: |
177 | printk(KERN_ERR | 155 | printk(KERN_ERR |
178 | "XmPciLpEvent_handler: unexpected ack received\n"); | 156 | "pci_event_handler: unexpected ack received\n"); |
179 | break; | 157 | break; |
180 | default: | 158 | default: |
181 | printk(KERN_ERR | 159 | printk(KERN_ERR |
182 | "XmPciLpEvent_handler: unexpected event function %d\n", | 160 | "pci_event_handler: unexpected event function %d\n", |
183 | (int)eventParm->xFlags.xFunction); | 161 | (int)event->xFlags.xFunction); |
184 | break; | 162 | break; |
185 | } | 163 | } |
186 | } else if (eventParm) | 164 | } else if (event) |
187 | printk(KERN_ERR | 165 | printk(KERN_ERR |
188 | "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n", | 166 | "pci_event_handler: Unrecognized PCI event type 0x%x\n", |
189 | (int)eventParm->xType); | 167 | (int)event->xType); |
190 | else | 168 | else |
191 | printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n"); | 169 | printk(KERN_ERR "pci_event_handler: NULL event received\n"); |
192 | #endif | ||
193 | } | 170 | } |
194 | 171 | ||
195 | /* | 172 | /* |
@@ -199,20 +176,21 @@ static void XmPciLpEvent_handler(struct HvLpEvent *eventParm, | |||
199 | void __init iSeries_init_IRQ(void) | 176 | void __init iSeries_init_IRQ(void) |
200 | { | 177 | { |
201 | /* Register PCI event handler and open an event path */ | 178 | /* Register PCI event handler and open an event path */ |
202 | int xRc; | 179 | int ret; |
203 | 180 | ||
204 | xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, | 181 | ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo, |
205 | &XmPciLpEvent_handler); | 182 | &pci_event_handler); |
206 | if (xRc == 0) { | 183 | if (ret == 0) { |
207 | xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); | 184 | ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0); |
208 | if (xRc != 0) | 185 | if (ret != 0) |
209 | printk(KERN_ERR "iSeries_init_IRQ: open event path " | 186 | printk(KERN_ERR "iseries_init_IRQ: open event path " |
210 | "failed with rc 0x%x\n", xRc); | 187 | "failed with rc 0x%x\n", ret); |
211 | } else | 188 | } else |
212 | printk(KERN_ERR "iSeries_init_IRQ: register handler " | 189 | printk(KERN_ERR "iseries_init_IRQ: register handler " |
213 | "failed with rc 0x%x\n", xRc); | 190 | "failed with rc 0x%x\n", ret); |
214 | } | 191 | } |
215 | 192 | ||
193 | #define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff) | ||
216 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) | 194 | #define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1) |
217 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) | 195 | #define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1) |
218 | #define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) | 196 | #define REAL_IRQ_TO_FUNC(irq) ((irq) & 7) |
@@ -221,40 +199,40 @@ void __init iSeries_init_IRQ(void) | |||
221 | * This will be called by device drivers (via enable_IRQ) | 199 | * This will be called by device drivers (via enable_IRQ) |
222 | * to enable INTA in the bridge interrupt status register. | 200 | * to enable INTA in the bridge interrupt status register. |
223 | */ | 201 | */ |
224 | static void iSeries_enable_IRQ(unsigned int irq) | 202 | static void iseries_enable_IRQ(unsigned int irq) |
225 | { | 203 | { |
226 | u32 bus, deviceId, function, mask; | 204 | u32 bus, dev_id, function, mask; |
227 | const u32 subBus = 0; | 205 | const u32 sub_bus = 0; |
228 | unsigned int rirq = virt_irq_to_real_map[irq]; | 206 | unsigned int rirq = virt_irq_to_real_map[irq]; |
229 | 207 | ||
230 | /* The IRQ has already been locked by the caller */ | 208 | /* The IRQ has already been locked by the caller */ |
231 | bus = REAL_IRQ_TO_BUS(rirq); | 209 | bus = REAL_IRQ_TO_BUS(rirq); |
232 | function = REAL_IRQ_TO_FUNC(rirq); | 210 | function = REAL_IRQ_TO_FUNC(rirq); |
233 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 211 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
234 | 212 | ||
235 | /* Unmask secondary INTA */ | 213 | /* Unmask secondary INTA */ |
236 | mask = 0x80000000; | 214 | mask = 0x80000000; |
237 | HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask); | 215 | HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask); |
238 | } | 216 | } |
239 | 217 | ||
240 | /* This is called by iSeries_activate_IRQs */ | 218 | /* This is called by iseries_activate_IRQs */ |
241 | static unsigned int iSeries_startup_IRQ(unsigned int irq) | 219 | static unsigned int iseries_startup_IRQ(unsigned int irq) |
242 | { | 220 | { |
243 | u32 bus, deviceId, function, mask; | 221 | u32 bus, dev_id, function, mask; |
244 | const u32 subBus = 0; | 222 | const u32 sub_bus = 0; |
245 | unsigned int rirq = virt_irq_to_real_map[irq]; | 223 | unsigned int rirq = virt_irq_to_real_map[irq]; |
246 | 224 | ||
247 | bus = REAL_IRQ_TO_BUS(rirq); | 225 | bus = REAL_IRQ_TO_BUS(rirq); |
248 | function = REAL_IRQ_TO_FUNC(rirq); | 226 | function = REAL_IRQ_TO_FUNC(rirq); |
249 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 227 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
250 | 228 | ||
251 | /* Link the IRQ number to the bridge */ | 229 | /* Link the IRQ number to the bridge */ |
252 | HvCallXm_connectBusUnit(bus, subBus, deviceId, irq); | 230 | HvCallXm_connectBusUnit(bus, sub_bus, dev_id, irq); |
253 | 231 | ||
254 | /* Unmask bridge interrupts in the FISR */ | 232 | /* Unmask bridge interrupts in the FISR */ |
255 | mask = 0x01010000 << function; | 233 | mask = 0x01010000 << function; |
256 | HvCallPci_unmaskFisr(bus, subBus, deviceId, mask); | 234 | HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask); |
257 | iSeries_enable_IRQ(irq); | 235 | iseries_enable_IRQ(irq); |
258 | return 0; | 236 | return 0; |
259 | } | 237 | } |
260 | 238 | ||
@@ -279,78 +257,115 @@ void __init iSeries_activate_IRQs() | |||
279 | } | 257 | } |
280 | 258 | ||
281 | /* this is not called anywhere currently */ | 259 | /* this is not called anywhere currently */ |
282 | static void iSeries_shutdown_IRQ(unsigned int irq) | 260 | static void iseries_shutdown_IRQ(unsigned int irq) |
283 | { | 261 | { |
284 | u32 bus, deviceId, function, mask; | 262 | u32 bus, dev_id, function, mask; |
285 | const u32 subBus = 0; | 263 | const u32 sub_bus = 0; |
286 | unsigned int rirq = virt_irq_to_real_map[irq]; | 264 | unsigned int rirq = virt_irq_to_real_map[irq]; |
287 | 265 | ||
288 | /* irq should be locked by the caller */ | 266 | /* irq should be locked by the caller */ |
289 | bus = REAL_IRQ_TO_BUS(rirq); | 267 | bus = REAL_IRQ_TO_BUS(rirq); |
290 | function = REAL_IRQ_TO_FUNC(rirq); | 268 | function = REAL_IRQ_TO_FUNC(rirq); |
291 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 269 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
292 | 270 | ||
293 | /* Invalidate the IRQ number in the bridge */ | 271 | /* Invalidate the IRQ number in the bridge */ |
294 | HvCallXm_connectBusUnit(bus, subBus, deviceId, 0); | 272 | HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0); |
295 | 273 | ||
296 | /* Mask bridge interrupts in the FISR */ | 274 | /* Mask bridge interrupts in the FISR */ |
297 | mask = 0x01010000 << function; | 275 | mask = 0x01010000 << function; |
298 | HvCallPci_maskFisr(bus, subBus, deviceId, mask); | 276 | HvCallPci_maskFisr(bus, sub_bus, dev_id, mask); |
299 | } | 277 | } |
300 | 278 | ||
301 | /* | 279 | /* |
302 | * This will be called by device drivers (via disable_IRQ) | 280 | * This will be called by device drivers (via disable_IRQ) |
303 | * to disable INTA in the bridge interrupt status register. | 281 | * to disable INTA in the bridge interrupt status register. |
304 | */ | 282 | */ |
305 | static void iSeries_disable_IRQ(unsigned int irq) | 283 | static void iseries_disable_IRQ(unsigned int irq) |
306 | { | 284 | { |
307 | u32 bus, deviceId, function, mask; | 285 | u32 bus, dev_id, function, mask; |
308 | const u32 subBus = 0; | 286 | const u32 sub_bus = 0; |
309 | unsigned int rirq = virt_irq_to_real_map[irq]; | 287 | unsigned int rirq = virt_irq_to_real_map[irq]; |
310 | 288 | ||
311 | /* The IRQ has already been locked by the caller */ | 289 | /* The IRQ has already been locked by the caller */ |
312 | bus = REAL_IRQ_TO_BUS(rirq); | 290 | bus = REAL_IRQ_TO_BUS(rirq); |
313 | function = REAL_IRQ_TO_FUNC(rirq); | 291 | function = REAL_IRQ_TO_FUNC(rirq); |
314 | deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; | 292 | dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function; |
315 | 293 | ||
316 | /* Mask secondary INTA */ | 294 | /* Mask secondary INTA */ |
317 | mask = 0x80000000; | 295 | mask = 0x80000000; |
318 | HvCallPci_maskInterrupts(bus, subBus, deviceId, mask); | 296 | HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask); |
319 | } | 297 | } |
320 | 298 | ||
321 | /* | 299 | static void iseries_end_IRQ(unsigned int irq) |
322 | * This does nothing because there is not enough information | ||
323 | * provided to do the EOI HvCall. This is done by XmPciLpEvent.c | ||
324 | */ | ||
325 | static void iSeries_end_IRQ(unsigned int irq) | ||
326 | { | 300 | { |
301 | unsigned int rirq = virt_irq_to_real_map[irq]; | ||
302 | |||
303 | HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq), | ||
304 | (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq)); | ||
327 | } | 305 | } |
328 | 306 | ||
329 | static hw_irq_controller iSeries_IRQ_handler = { | 307 | static hw_irq_controller iSeries_IRQ_handler = { |
330 | .typename = "iSeries irq controller", | 308 | .typename = "iSeries irq controller", |
331 | .startup = iSeries_startup_IRQ, | 309 | .startup = iseries_startup_IRQ, |
332 | .shutdown = iSeries_shutdown_IRQ, | 310 | .shutdown = iseries_shutdown_IRQ, |
333 | .enable = iSeries_enable_IRQ, | 311 | .enable = iseries_enable_IRQ, |
334 | .disable = iSeries_disable_IRQ, | 312 | .disable = iseries_disable_IRQ, |
335 | .end = iSeries_end_IRQ | 313 | .end = iseries_end_IRQ |
336 | }; | 314 | }; |
337 | 315 | ||
338 | /* | 316 | /* |
339 | * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot | 317 | * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot |
340 | * It calculates the irq value for the slot. | 318 | * It calculates the irq value for the slot. |
341 | * Note that subBusNumber is always 0 (at the moment at least). | 319 | * Note that sub_bus is always 0 (at the moment at least). |
342 | */ | 320 | */ |
343 | int __init iSeries_allocate_IRQ(HvBusNumber busNumber, | 321 | int __init iSeries_allocate_IRQ(HvBusNumber bus, |
344 | HvSubBusNumber subBusNumber, HvAgentId deviceId) | 322 | HvSubBusNumber sub_bus, HvAgentId dev_id) |
345 | { | 323 | { |
346 | int virtirq; | 324 | int virtirq; |
347 | unsigned int realirq; | 325 | unsigned int realirq; |
348 | u8 idsel = (deviceId >> 4); | 326 | u8 idsel = (dev_id >> 4); |
349 | u8 function = deviceId & 7; | 327 | u8 function = dev_id & 7; |
350 | 328 | ||
351 | realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function; | 329 | realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3) |
330 | + function; | ||
352 | virtirq = virt_irq_create_mapping(realirq); | 331 | virtirq = virt_irq_create_mapping(realirq); |
353 | 332 | ||
354 | irq_desc[virtirq].handler = &iSeries_IRQ_handler; | 333 | irq_desc[virtirq].handler = &iSeries_IRQ_handler; |
355 | return virtirq; | 334 | return virtirq; |
356 | } | 335 | } |
336 | |||
337 | /* | ||
338 | * Get the next pending IRQ. | ||
339 | */ | ||
340 | int iSeries_get_irq(struct pt_regs *regs) | ||
341 | { | ||
342 | struct paca_struct *lpaca; | ||
343 | /* -2 means ignore this interrupt */ | ||
344 | int irq = -2; | ||
345 | |||
346 | lpaca = get_paca(); | ||
347 | #ifdef CONFIG_SMP | ||
348 | if (lpaca->lppaca.int_dword.fields.ipi_cnt) { | ||
349 | lpaca->lppaca.int_dword.fields.ipi_cnt = 0; | ||
350 | iSeries_smp_message_recv(regs); | ||
351 | } | ||
352 | #endif /* CONFIG_SMP */ | ||
353 | if (hvlpevent_is_pending()) | ||
354 | process_hvlpevents(regs); | ||
355 | |||
356 | if (num_pending_irqs) { | ||
357 | spin_lock(&pending_irqs_lock); | ||
358 | for (irq = 0; irq < NR_IRQS; irq++) { | ||
359 | if (pending_irqs[irq]) { | ||
360 | pending_irqs[irq]--; | ||
361 | num_pending_irqs--; | ||
362 | break; | ||
363 | } | ||
364 | } | ||
365 | spin_unlock(&pending_irqs_lock); | ||
366 | if (irq >= NR_IRQS) | ||
367 | irq = -2; | ||
368 | } | ||
369 | |||
370 | return irq; | ||
371 | } | ||
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h index 5f643f16ecc0..b9c801ba5a47 100644 --- a/arch/powerpc/platforms/iseries/irq.h +++ b/arch/powerpc/platforms/iseries/irq.h | |||
@@ -4,5 +4,6 @@ | |||
4 | extern void iSeries_init_IRQ(void); | 4 | extern void iSeries_init_IRQ(void); |
5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); | 5 | extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, HvAgentId); |
6 | extern void iSeries_activate_IRQs(void); | 6 | extern void iSeries_activate_IRQs(void); |
7 | extern int iSeries_get_irq(struct pt_regs *); | ||
7 | 8 | ||
8 | #endif /* _ISERIES_IRQ_H */ | 9 | #endif /* _ISERIES_IRQ_H */ |
diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c index bb8c91537f35..ea72385aaf0a 100644 --- a/arch/powerpc/platforms/iseries/lpardata.c +++ b/arch/powerpc/platforms/iseries/lpardata.c | |||
@@ -225,3 +225,10 @@ struct ItVpdAreas itVpdAreas = { | |||
225 | 0,0 | 225 | 0,0 |
226 | } | 226 | } |
227 | }; | 227 | }; |
228 | |||
229 | struct ItLpRegSave iseries_reg_save[] = { | ||
230 | [0 ... (NR_CPUS-1)] = { | ||
231 | .xDesc = 0xd397d9e2, /* "LpRS" */ | ||
232 | .xSize = sizeof(struct ItLpRegSave), | ||
233 | }, | ||
234 | }; | ||
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c index da26639190db..ad5ef80500ce 100644 --- a/arch/powerpc/platforms/iseries/setup.c +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -571,16 +571,6 @@ static void iSeries_show_cpuinfo(struct seq_file *m) | |||
571 | 571 | ||
572 | /* | 572 | /* |
573 | * Document me. | 573 | * Document me. |
574 | * and Implement me. | ||
575 | */ | ||
576 | static int iSeries_get_irq(struct pt_regs *regs) | ||
577 | { | ||
578 | /* -2 means ignore this interrupt */ | ||
579 | return -2; | ||
580 | } | ||
581 | |||
582 | /* | ||
583 | * Document me. | ||
584 | */ | 574 | */ |
585 | static void iSeries_restart(char *cmd) | 575 | static void iSeries_restart(char *cmd) |
586 | { | 576 | { |
diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 7ece8983a105..dd73e38bfb7d 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c | |||
@@ -51,6 +51,7 @@ | |||
51 | #include <asm/pgtable.h> | 51 | #include <asm/pgtable.h> |
52 | #include <asm/bitops.h> | 52 | #include <asm/bitops.h> |
53 | #include <asm/io.h> | 53 | #include <asm/io.h> |
54 | #include <asm/kexec.h> | ||
54 | #include <asm/pci-bridge.h> | 55 | #include <asm/pci-bridge.h> |
55 | #include <asm/iommu.h> | 56 | #include <asm/iommu.h> |
56 | #include <asm/machdep.h> | 57 | #include <asm/machdep.h> |
@@ -191,24 +192,10 @@ static void __init maple_init_early(void) | |||
191 | */ | 192 | */ |
192 | hpte_init_native(); | 193 | hpte_init_native(); |
193 | 194 | ||
194 | /* Find the serial port */ | ||
195 | generic_find_legacy_serial_ports(&physport, &default_speed); | ||
196 | |||
197 | DBG("phys port addr: %lx\n", (long)physport); | ||
198 | |||
199 | if (physport) { | ||
200 | void *comport; | ||
201 | /* Map the uart for udbg. */ | ||
202 | comport = (void *)ioremap(physport, 16); | ||
203 | udbg_init_uart(comport, default_speed); | ||
204 | |||
205 | DBG("Hello World !\n"); | ||
206 | } | ||
207 | |||
208 | /* Setup interrupt mapping options */ | 195 | /* Setup interrupt mapping options */ |
209 | ppc64_interrupt_controller = IC_OPEN_PIC; | 196 | ppc64_interrupt_controller = IC_OPEN_PIC; |
210 | 197 | ||
211 | iommu_init_early_u3(); | 198 | iommu_init_early_dart(); |
212 | 199 | ||
213 | DBG(" <- maple_init_early\n"); | 200 | DBG(" <- maple_init_early\n"); |
214 | } | 201 | } |
@@ -270,7 +257,7 @@ static int __init maple_probe(int platform) | |||
270 | * occupies having to be broken up so the DART itself is not | 257 | * occupies having to be broken up so the DART itself is not |
271 | * part of the cacheable linar mapping | 258 | * part of the cacheable linar mapping |
272 | */ | 259 | */ |
273 | alloc_u3_dart_table(); | 260 | alloc_dart_table(); |
274 | 261 | ||
275 | return 1; | 262 | return 1; |
276 | } | 263 | } |
@@ -292,4 +279,9 @@ struct machdep_calls __initdata maple_md = { | |||
292 | .calibrate_decr = generic_calibrate_decr, | 279 | .calibrate_decr = generic_calibrate_decr, |
293 | .progress = maple_progress, | 280 | .progress = maple_progress, |
294 | .idle_loop = native_idle, | 281 | .idle_loop = native_idle, |
282 | #ifdef CONFIG_KEXEC | ||
283 | .machine_kexec = default_machine_kexec, | ||
284 | .machine_kexec_prepare = default_machine_kexec_prepare, | ||
285 | .machine_crash_shutdown = default_machine_crash_shutdown, | ||
286 | #endif | ||
295 | }; | 287 | }; |
diff --git a/arch/powerpc/platforms/powermac/Makefile b/arch/powerpc/platforms/powermac/Makefile index c9df44fcf571..78093d7f97af 100644 --- a/arch/powerpc/platforms/powermac/Makefile +++ b/arch/powerpc/platforms/powermac/Makefile | |||
@@ -1,9 +1,14 @@ | |||
1 | CFLAGS_bootx_init.o += -fPIC | ||
2 | |||
1 | obj-y += pic.o setup.o time.o feature.o pci.o \ | 3 | obj-y += pic.o setup.o time.o feature.o pci.o \ |
2 | sleep.o low_i2c.o cache.o | 4 | sleep.o low_i2c.o cache.o pfunc_core.o \ |
5 | pfunc_base.o | ||
3 | obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o | 6 | obj-$(CONFIG_PMAC_BACKLIGHT) += backlight.o |
4 | obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o | 7 | obj-$(CONFIG_CPU_FREQ_PMAC) += cpufreq_32.o |
5 | obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o | 8 | obj-$(CONFIG_CPU_FREQ_PMAC64) += cpufreq_64.o |
6 | obj-$(CONFIG_NVRAM) += nvram.o | 9 | obj-$(CONFIG_NVRAM) += nvram.o |
7 | # ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff | 10 | # ppc64 pmac doesn't define CONFIG_NVRAM but needs nvram stuff |
8 | obj-$(CONFIG_PPC64) += nvram.o | 11 | obj-$(CONFIG_PPC64) += nvram.o |
12 | obj-$(CONFIG_PPC32) += bootx_init.o | ||
9 | obj-$(CONFIG_SMP) += smp.o | 13 | obj-$(CONFIG_SMP) += smp.o |
14 | obj-$(CONFIG_PPC_MERGE) += udbg_scc.o udbg_adb.o | ||
diff --git a/arch/powerpc/platforms/powermac/bootx_init.c b/arch/powerpc/platforms/powermac/bootx_init.c new file mode 100644 index 000000000000..fa8b4d7b5ded --- /dev/null +++ b/arch/powerpc/platforms/powermac/bootx_init.c | |||
@@ -0,0 +1,547 @@ | |||
1 | /* | ||
2 | * Early boot support code for BootX bootloader | ||
3 | * | ||
4 | * Copyright (C) 2005 Ben. Herrenschmidt (benh@kernel.crashing.org) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/version.h> | ||
17 | #include <asm/sections.h> | ||
18 | #include <asm/prom.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/bootx.h> | ||
21 | #include <asm/bootinfo.h> | ||
22 | #include <asm/btext.h> | ||
23 | #include <asm/io.h> | ||
24 | |||
25 | #undef DEBUG | ||
26 | #define SET_BOOT_BAT | ||
27 | |||
28 | #ifdef DEBUG | ||
29 | #define DBG(fmt...) do { bootx_printf(fmt); } while(0) | ||
30 | #else | ||
31 | #define DBG(fmt...) do { } while(0) | ||
32 | #endif | ||
33 | |||
34 | extern void __start(unsigned long r3, unsigned long r4, unsigned long r5); | ||
35 | |||
36 | static unsigned long __initdata bootx_dt_strbase; | ||
37 | static unsigned long __initdata bootx_dt_strend; | ||
38 | static unsigned long __initdata bootx_node_chosen; | ||
39 | static boot_infos_t * __initdata bootx_info; | ||
40 | static char __initdata bootx_disp_path[256]; | ||
41 | |||
42 | /* Is boot-info compatible ? */ | ||
43 | #define BOOT_INFO_IS_COMPATIBLE(bi) \ | ||
44 | ((bi)->compatible_version <= BOOT_INFO_VERSION) | ||
45 | #define BOOT_INFO_IS_V2_COMPATIBLE(bi) ((bi)->version >= 2) | ||
46 | #define BOOT_INFO_IS_V4_COMPATIBLE(bi) ((bi)->version >= 4) | ||
47 | |||
48 | #ifdef CONFIG_BOOTX_TEXT | ||
49 | static void __init bootx_printf(const char *format, ...) | ||
50 | { | ||
51 | const char *p, *q, *s; | ||
52 | va_list args; | ||
53 | unsigned long v; | ||
54 | |||
55 | va_start(args, format); | ||
56 | for (p = format; *p != 0; p = q) { | ||
57 | for (q = p; *q != 0 && *q != '\n' && *q != '%'; ++q) | ||
58 | ; | ||
59 | if (q > p) | ||
60 | btext_drawtext(p, q - p); | ||
61 | if (*q == 0) | ||
62 | break; | ||
63 | if (*q == '\n') { | ||
64 | ++q; | ||
65 | btext_flushline(); | ||
66 | btext_drawstring("\r\n"); | ||
67 | btext_flushline(); | ||
68 | continue; | ||
69 | } | ||
70 | ++q; | ||
71 | if (*q == 0) | ||
72 | break; | ||
73 | switch (*q) { | ||
74 | case 's': | ||
75 | ++q; | ||
76 | s = va_arg(args, const char *); | ||
77 | if (s == NULL) | ||
78 | s = "<NULL>"; | ||
79 | btext_drawstring(s); | ||
80 | break; | ||
81 | case 'x': | ||
82 | ++q; | ||
83 | v = va_arg(args, unsigned long); | ||
84 | btext_drawhex(v); | ||
85 | break; | ||
86 | } | ||
87 | } | ||
88 | } | ||
89 | #else /* CONFIG_BOOTX_TEXT */ | ||
90 | static void __init bootx_printf(const char *format, ...) {} | ||
91 | #endif /* CONFIG_BOOTX_TEXT */ | ||
92 | |||
93 | static void * __init bootx_early_getprop(unsigned long base, | ||
94 | unsigned long node, | ||
95 | char *prop) | ||
96 | { | ||
97 | struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node); | ||
98 | u32 *ppp = &np->properties; | ||
99 | |||
100 | while(*ppp) { | ||
101 | struct bootx_dt_prop *pp = | ||
102 | (struct bootx_dt_prop *)(base + *ppp); | ||
103 | |||
104 | if (strcmp((char *)((unsigned long)pp->name + base), | ||
105 | prop) == 0) { | ||
106 | return (void *)((unsigned long)pp->value + base); | ||
107 | } | ||
108 | ppp = &pp->next; | ||
109 | } | ||
110 | return NULL; | ||
111 | } | ||
112 | |||
113 | #define dt_push_token(token, mem) \ | ||
114 | do { \ | ||
115 | *(mem) = _ALIGN_UP(*(mem),4); \ | ||
116 | *((u32 *)*(mem)) = token; \ | ||
117 | *(mem) += 4; \ | ||
118 | } while(0) | ||
119 | |||
120 | static unsigned long __init bootx_dt_find_string(char *str) | ||
121 | { | ||
122 | char *s, *os; | ||
123 | |||
124 | s = os = (char *)bootx_dt_strbase; | ||
125 | s += 4; | ||
126 | while (s < (char *)bootx_dt_strend) { | ||
127 | if (strcmp(s, str) == 0) | ||
128 | return s - os; | ||
129 | s += strlen(s) + 1; | ||
130 | } | ||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | static void __init bootx_dt_add_prop(char *name, void *data, int size, | ||
135 | unsigned long *mem_end) | ||
136 | { | ||
137 | unsigned long soff = bootx_dt_find_string(name); | ||
138 | if (data == NULL) | ||
139 | size = 0; | ||
140 | if (soff == 0) { | ||
141 | bootx_printf("WARNING: Can't find string index for <%s>\n", | ||
142 | name); | ||
143 | return; | ||
144 | } | ||
145 | if (size > 0x20000) { | ||
146 | bootx_printf("WARNING: ignoring large property "); | ||
147 | bootx_printf("%s length 0x%x\n", name, size); | ||
148 | return; | ||
149 | } | ||
150 | dt_push_token(OF_DT_PROP, mem_end); | ||
151 | dt_push_token(size, mem_end); | ||
152 | dt_push_token(soff, mem_end); | ||
153 | |||
154 | /* push property content */ | ||
155 | if (size && data) { | ||
156 | memcpy((void *)*mem_end, data, size); | ||
157 | *mem_end = _ALIGN_UP(*mem_end + size, 4); | ||
158 | } | ||
159 | } | ||
160 | |||
161 | static void __init bootx_add_chosen_props(unsigned long base, | ||
162 | unsigned long *mem_end) | ||
163 | { | ||
164 | u32 val = _MACH_Pmac; | ||
165 | |||
166 | bootx_dt_add_prop("linux,platform", &val, 4, mem_end); | ||
167 | |||
168 | if (bootx_info->kernelParamsOffset) { | ||
169 | char *args = (char *)((unsigned long)bootx_info) + | ||
170 | bootx_info->kernelParamsOffset; | ||
171 | bootx_dt_add_prop("bootargs", args, strlen(args) + 1, mem_end); | ||
172 | } | ||
173 | if (bootx_info->ramDisk) { | ||
174 | val = ((unsigned long)bootx_info) + bootx_info->ramDisk; | ||
175 | bootx_dt_add_prop("linux,initrd-start", &val, 4, mem_end); | ||
176 | val += bootx_info->ramDiskSize; | ||
177 | bootx_dt_add_prop("linux,initrd-end", &val, 4, mem_end); | ||
178 | } | ||
179 | if (strlen(bootx_disp_path)) | ||
180 | bootx_dt_add_prop("linux,stdout-path", bootx_disp_path, | ||
181 | strlen(bootx_disp_path) + 1, mem_end); | ||
182 | } | ||
183 | |||
184 | static void __init bootx_add_display_props(unsigned long base, | ||
185 | unsigned long *mem_end) | ||
186 | { | ||
187 | bootx_dt_add_prop("linux,boot-display", NULL, 0, mem_end); | ||
188 | bootx_dt_add_prop("linux,opened", NULL, 0, mem_end); | ||
189 | } | ||
190 | |||
191 | static void __init bootx_dt_add_string(char *s, unsigned long *mem_end) | ||
192 | { | ||
193 | unsigned int l = strlen(s) + 1; | ||
194 | memcpy((void *)*mem_end, s, l); | ||
195 | bootx_dt_strend = *mem_end = *mem_end + l; | ||
196 | } | ||
197 | |||
198 | static void __init bootx_scan_dt_build_strings(unsigned long base, | ||
199 | unsigned long node, | ||
200 | unsigned long *mem_end) | ||
201 | { | ||
202 | struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node); | ||
203 | u32 *cpp, *ppp = &np->properties; | ||
204 | unsigned long soff; | ||
205 | char *namep; | ||
206 | |||
207 | /* Keep refs to known nodes */ | ||
208 | namep = np->full_name ? (char *)(base + np->full_name) : NULL; | ||
209 | if (namep == NULL) { | ||
210 | bootx_printf("Node without a full name !\n"); | ||
211 | namep = ""; | ||
212 | } | ||
213 | DBG("* strings: %s\n", namep); | ||
214 | |||
215 | if (!strcmp(namep, "/chosen")) { | ||
216 | DBG(" detected /chosen ! adding properties names !\n"); | ||
217 | bootx_dt_add_string("linux,platform", mem_end); | ||
218 | bootx_dt_add_string("linux,stdout-path", mem_end); | ||
219 | bootx_dt_add_string("linux,initrd-start", mem_end); | ||
220 | bootx_dt_add_string("linux,initrd-end", mem_end); | ||
221 | bootx_dt_add_string("bootargs", mem_end); | ||
222 | bootx_node_chosen = node; | ||
223 | } | ||
224 | if (node == bootx_info->dispDeviceRegEntryOffset) { | ||
225 | DBG(" detected display ! adding properties names !\n"); | ||
226 | bootx_dt_add_string("linux,boot-display", mem_end); | ||
227 | bootx_dt_add_string("linux,opened", mem_end); | ||
228 | strncpy(bootx_disp_path, namep, 255); | ||
229 | } | ||
230 | |||
231 | /* get and store all property names */ | ||
232 | while (*ppp) { | ||
233 | struct bootx_dt_prop *pp = | ||
234 | (struct bootx_dt_prop *)(base + *ppp); | ||
235 | |||
236 | namep = pp->name ? (char *)(base + pp->name) : NULL; | ||
237 | if (namep == NULL || strcmp(namep, "name") == 0) | ||
238 | goto next; | ||
239 | /* get/create string entry */ | ||
240 | soff = bootx_dt_find_string(namep); | ||
241 | if (soff == 0) | ||
242 | bootx_dt_add_string(namep, mem_end); | ||
243 | next: | ||
244 | ppp = &pp->next; | ||
245 | } | ||
246 | |||
247 | /* do all our children */ | ||
248 | cpp = &np->child; | ||
249 | while(*cpp) { | ||
250 | np = (struct bootx_dt_node *)(base + *cpp); | ||
251 | bootx_scan_dt_build_strings(base, *cpp, mem_end); | ||
252 | cpp = &np->sibling; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | static void __init bootx_scan_dt_build_struct(unsigned long base, | ||
257 | unsigned long node, | ||
258 | unsigned long *mem_end) | ||
259 | { | ||
260 | struct bootx_dt_node *np = (struct bootx_dt_node *)(base + node); | ||
261 | u32 *cpp, *ppp = &np->properties; | ||
262 | char *namep, *p, *ep, *lp; | ||
263 | int l; | ||
264 | |||
265 | dt_push_token(OF_DT_BEGIN_NODE, mem_end); | ||
266 | |||
267 | /* get the node's full name */ | ||
268 | namep = np->full_name ? (char *)(base + np->full_name) : NULL; | ||
269 | if (namep == NULL) | ||
270 | namep = ""; | ||
271 | l = strlen(namep); | ||
272 | |||
273 | DBG("* struct: %s\n", namep); | ||
274 | |||
275 | /* Fixup an Apple bug where they have bogus \0 chars in the | ||
276 | * middle of the path in some properties, and extract | ||
277 | * the unit name (everything after the last '/'). | ||
278 | */ | ||
279 | memcpy((void *)*mem_end, namep, l + 1); | ||
280 | namep = (char *)*mem_end; | ||
281 | for (lp = p = namep, ep = namep + l; p < ep; p++) { | ||
282 | if (*p == '/') | ||
283 | lp = namep; | ||
284 | else if (*p != 0) | ||
285 | *lp++ = *p; | ||
286 | } | ||
287 | *lp = 0; | ||
288 | *mem_end = _ALIGN_UP((unsigned long)lp + 1, 4); | ||
289 | |||
290 | /* get and store all properties */ | ||
291 | while (*ppp) { | ||
292 | struct bootx_dt_prop *pp = | ||
293 | (struct bootx_dt_prop *)(base + *ppp); | ||
294 | |||
295 | namep = pp->name ? (char *)(base + pp->name) : NULL; | ||
296 | /* Skip "name" */ | ||
297 | if (namep == NULL || !strcmp(namep, "name")) | ||
298 | goto next; | ||
299 | /* Skip "bootargs" in /chosen too as we replace it */ | ||
300 | if (node == bootx_node_chosen && !strcmp(namep, "bootargs")) | ||
301 | goto next; | ||
302 | |||
303 | /* push property head */ | ||
304 | bootx_dt_add_prop(namep, | ||
305 | pp->value ? (void *)(base + pp->value): NULL, | ||
306 | pp->length, mem_end); | ||
307 | next: | ||
308 | ppp = &pp->next; | ||
309 | } | ||
310 | |||
311 | if (node == bootx_node_chosen) | ||
312 | bootx_add_chosen_props(base, mem_end); | ||
313 | if (node == bootx_info->dispDeviceRegEntryOffset) | ||
314 | bootx_add_display_props(base, mem_end); | ||
315 | |||
316 | /* do all our children */ | ||
317 | cpp = &np->child; | ||
318 | while(*cpp) { | ||
319 | np = (struct bootx_dt_node *)(base + *cpp); | ||
320 | bootx_scan_dt_build_struct(base, *cpp, mem_end); | ||
321 | cpp = &np->sibling; | ||
322 | } | ||
323 | |||
324 | dt_push_token(OF_DT_END_NODE, mem_end); | ||
325 | } | ||
326 | |||
327 | static unsigned long __init bootx_flatten_dt(unsigned long start) | ||
328 | { | ||
329 | boot_infos_t *bi = bootx_info; | ||
330 | unsigned long mem_start, mem_end; | ||
331 | struct boot_param_header *hdr; | ||
332 | unsigned long base; | ||
333 | u64 *rsvmap; | ||
334 | |||
335 | /* Start using memory after the big blob passed by BootX, get | ||
336 | * some space for the header | ||
337 | */ | ||
338 | mem_start = mem_end = _ALIGN_UP(((unsigned long)bi) + start, 4); | ||
339 | DBG("Boot params header at: %x\n", mem_start); | ||
340 | hdr = (struct boot_param_header *)mem_start; | ||
341 | mem_end += sizeof(struct boot_param_header); | ||
342 | rsvmap = (u64 *)(_ALIGN_UP(mem_end, 8)); | ||
343 | hdr->off_mem_rsvmap = ((unsigned long)rsvmap) - mem_start; | ||
344 | mem_end = ((unsigned long)rsvmap) + 8 * sizeof(u64); | ||
345 | |||
346 | /* Get base of tree */ | ||
347 | base = ((unsigned long)bi) + bi->deviceTreeOffset; | ||
348 | |||
349 | /* Build string array */ | ||
350 | DBG("Building string array at: %x\n", mem_end); | ||
351 | DBG("Device Tree Base=%x\n", base); | ||
352 | bootx_dt_strbase = mem_end; | ||
353 | mem_end += 4; | ||
354 | bootx_dt_strend = mem_end; | ||
355 | bootx_scan_dt_build_strings(base, 4, &mem_end); | ||
356 | hdr->off_dt_strings = bootx_dt_strbase - mem_start; | ||
357 | hdr->dt_strings_size = bootx_dt_strend - bootx_dt_strbase; | ||
358 | |||
359 | /* Build structure */ | ||
360 | mem_end = _ALIGN(mem_end, 16); | ||
361 | DBG("Building device tree structure at: %x\n", mem_end); | ||
362 | hdr->off_dt_struct = mem_end - mem_start; | ||
363 | bootx_scan_dt_build_struct(base, 4, &mem_end); | ||
364 | dt_push_token(OF_DT_END, &mem_end); | ||
365 | |||
366 | /* Finish header */ | ||
367 | hdr->boot_cpuid_phys = 0; | ||
368 | hdr->magic = OF_DT_HEADER; | ||
369 | hdr->totalsize = mem_end - mem_start; | ||
370 | hdr->version = OF_DT_VERSION; | ||
371 | /* Version 16 is not backward compatible */ | ||
372 | hdr->last_comp_version = 0x10; | ||
373 | |||
374 | /* Reserve the whole thing and copy the reserve map in, we | ||
375 | * also bump mem_reserve_cnt to cause further reservations to | ||
376 | * fail since it's too late. | ||
377 | */ | ||
378 | mem_end = _ALIGN(mem_end, PAGE_SIZE); | ||
379 | DBG("End of boot params: %x\n", mem_end); | ||
380 | rsvmap[0] = mem_start; | ||
381 | rsvmap[1] = mem_end; | ||
382 | rsvmap[2] = 0; | ||
383 | rsvmap[3] = 0; | ||
384 | |||
385 | return (unsigned long)hdr; | ||
386 | } | ||
387 | |||
388 | |||
389 | #ifdef CONFIG_BOOTX_TEXT | ||
390 | static void __init btext_welcome(boot_infos_t *bi) | ||
391 | { | ||
392 | unsigned long flags; | ||
393 | unsigned long pvr; | ||
394 | |||
395 | bootx_printf("Welcome to Linux, kernel " UTS_RELEASE "\n"); | ||
396 | bootx_printf("\nlinked at : 0x%x", KERNELBASE); | ||
397 | bootx_printf("\nframe buffer at : 0x%x", bi->dispDeviceBase); | ||
398 | bootx_printf(" (phys), 0x%x", bi->logicalDisplayBase); | ||
399 | bootx_printf(" (log)"); | ||
400 | bootx_printf("\nklimit : 0x%x",(unsigned long)klimit); | ||
401 | bootx_printf("\nboot_info at : 0x%x", bi); | ||
402 | __asm__ __volatile__ ("mfmsr %0" : "=r" (flags)); | ||
403 | bootx_printf("\nMSR : 0x%x", flags); | ||
404 | __asm__ __volatile__ ("mfspr %0, 287" : "=r" (pvr)); | ||
405 | bootx_printf("\nPVR : 0x%x", pvr); | ||
406 | pvr >>= 16; | ||
407 | if (pvr > 1) { | ||
408 | __asm__ __volatile__ ("mfspr %0, 1008" : "=r" (flags)); | ||
409 | bootx_printf("\nHID0 : 0x%x", flags); | ||
410 | } | ||
411 | if (pvr == 8 || pvr == 12 || pvr == 0x800c) { | ||
412 | __asm__ __volatile__ ("mfspr %0, 1019" : "=r" (flags)); | ||
413 | bootx_printf("\nICTC : 0x%x", flags); | ||
414 | } | ||
415 | #ifdef DEBUG | ||
416 | bootx_printf("\n\n"); | ||
417 | bootx_printf("bi->deviceTreeOffset : 0x%x\n", | ||
418 | bi->deviceTreeOffset); | ||
419 | bootx_printf("bi->deviceTreeSize : 0x%x\n", | ||
420 | bi->deviceTreeSize); | ||
421 | #endif | ||
422 | bootx_printf("\n\n"); | ||
423 | } | ||
424 | #endif /* CONFIG_BOOTX_TEXT */ | ||
425 | |||
426 | void __init bootx_init(unsigned long r3, unsigned long r4) | ||
427 | { | ||
428 | boot_infos_t *bi = (boot_infos_t *) r4; | ||
429 | unsigned long hdr; | ||
430 | unsigned long space; | ||
431 | unsigned long ptr, x; | ||
432 | char *model; | ||
433 | unsigned long offset = reloc_offset(); | ||
434 | |||
435 | reloc_got2(offset); | ||
436 | |||
437 | bootx_info = bi; | ||
438 | |||
439 | /* We haven't cleared any bss at this point, make sure | ||
440 | * what we need is initialized | ||
441 | */ | ||
442 | bootx_dt_strbase = bootx_dt_strend = 0; | ||
443 | bootx_node_chosen = 0; | ||
444 | bootx_disp_path[0] = 0; | ||
445 | |||
446 | if (!BOOT_INFO_IS_V2_COMPATIBLE(bi)) | ||
447 | bi->logicalDisplayBase = bi->dispDeviceBase; | ||
448 | |||
449 | #ifdef CONFIG_BOOTX_TEXT | ||
450 | btext_setup_display(bi->dispDeviceRect[2] - bi->dispDeviceRect[0], | ||
451 | bi->dispDeviceRect[3] - bi->dispDeviceRect[1], | ||
452 | bi->dispDeviceDepth, bi->dispDeviceRowBytes, | ||
453 | (unsigned long)bi->logicalDisplayBase); | ||
454 | btext_clearscreen(); | ||
455 | btext_flushscreen(); | ||
456 | #endif /* CONFIG_BOOTX_TEXT */ | ||
457 | |||
458 | /* | ||
459 | * Test if boot-info is compatible. Done only in config | ||
460 | * CONFIG_BOOTX_TEXT since there is nothing much we can do | ||
461 | * with an incompatible version, except display a message | ||
462 | * and eventually hang the processor... | ||
463 | * | ||
464 | * I'll try to keep enough of boot-info compatible in the | ||
465 | * future to always allow display of this message; | ||
466 | */ | ||
467 | if (!BOOT_INFO_IS_COMPATIBLE(bi)) { | ||
468 | bootx_printf(" !!! WARNING - Incompatible version" | ||
469 | " of BootX !!!\n\n\n"); | ||
470 | for (;;) | ||
471 | ; | ||
472 | } | ||
473 | if (bi->architecture != BOOT_ARCH_PCI) { | ||
474 | bootx_printf(" !!! WARNING - Usupported machine" | ||
475 | " architecture !\n"); | ||
476 | for (;;) | ||
477 | ; | ||
478 | } | ||
479 | |||
480 | #ifdef CONFIG_BOOTX_TEXT | ||
481 | btext_welcome(bi); | ||
482 | #endif | ||
483 | /* New BootX enters kernel with MMU off, i/os are not allowed | ||
484 | * here. This hack will have been done by the boostrap anyway. | ||
485 | */ | ||
486 | if (bi->version < 4) { | ||
487 | /* | ||
488 | * XXX If this is an iMac, turn off the USB controller. | ||
489 | */ | ||
490 | model = (char *) bootx_early_getprop(r4 + bi->deviceTreeOffset, | ||
491 | 4, "model"); | ||
492 | if (model | ||
493 | && (strcmp(model, "iMac,1") == 0 | ||
494 | || strcmp(model, "PowerMac1,1") == 0)) { | ||
495 | bootx_printf("iMac,1 detected, shutting down USB \n"); | ||
496 | out_le32((unsigned *)0x80880008, 1); /* XXX */ | ||
497 | } | ||
498 | } | ||
499 | |||
500 | /* Get a pointer that points above the device tree, args, ramdisk, | ||
501 | * etc... to use for generating the flattened tree | ||
502 | */ | ||
503 | if (bi->version < 5) { | ||
504 | space = bi->deviceTreeOffset + bi->deviceTreeSize; | ||
505 | if (bi->ramDisk) | ||
506 | space = bi->ramDisk + bi->ramDiskSize; | ||
507 | } else | ||
508 | space = bi->totalParamsSize; | ||
509 | |||
510 | bootx_printf("Total space used by parameters & ramdisk: %x \n", space); | ||
511 | |||
512 | /* New BootX will have flushed all TLBs and enters kernel with | ||
513 | * MMU switched OFF, so this should not be useful anymore. | ||
514 | */ | ||
515 | if (bi->version < 4) { | ||
516 | bootx_printf("Touching pages...\n"); | ||
517 | |||
518 | /* | ||
519 | * Touch each page to make sure the PTEs for them | ||
520 | * are in the hash table - the aim is to try to avoid | ||
521 | * getting DSI exceptions while copying the kernel image. | ||
522 | */ | ||
523 | for (ptr = ((unsigned long) &_stext) & PAGE_MASK; | ||
524 | ptr < (unsigned long)bi + space; ptr += PAGE_SIZE) | ||
525 | x = *(volatile unsigned long *)ptr; | ||
526 | } | ||
527 | |||
528 | /* Ok, now we need to generate a flattened device-tree to pass | ||
529 | * to the kernel | ||
530 | */ | ||
531 | bootx_printf("Preparing boot params...\n"); | ||
532 | |||
533 | hdr = bootx_flatten_dt(space); | ||
534 | |||
535 | #ifdef CONFIG_BOOTX_TEXT | ||
536 | #ifdef SET_BOOT_BAT | ||
537 | bootx_printf("Preparing BAT...\n"); | ||
538 | btext_prepare_BAT(); | ||
539 | #else | ||
540 | btext_unmap(); | ||
541 | #endif | ||
542 | #endif | ||
543 | |||
544 | reloc_got2(-offset); | ||
545 | |||
546 | __start(hdr, KERNELBASE + offset, 0); | ||
547 | } | ||
diff --git a/arch/powerpc/platforms/powermac/cpufreq_64.c b/arch/powerpc/platforms/powermac/cpufreq_64.c index 39150342c6f1..a4b50c4109c2 100644 --- a/arch/powerpc/platforms/powermac/cpufreq_64.c +++ b/arch/powerpc/platforms/powermac/cpufreq_64.c | |||
@@ -28,6 +28,7 @@ | |||
28 | #include <asm/cputable.h> | 28 | #include <asm/cputable.h> |
29 | #include <asm/time.h> | 29 | #include <asm/time.h> |
30 | #include <asm/smu.h> | 30 | #include <asm/smu.h> |
31 | #include <asm/pmac_pfunc.h> | ||
31 | 32 | ||
32 | #undef DEBUG | 33 | #undef DEBUG |
33 | 34 | ||
@@ -85,6 +86,10 @@ static u32 *g5_pmode_data; | |||
85 | static int g5_pmode_max; | 86 | static int g5_pmode_max; |
86 | static int g5_pmode_cur; | 87 | static int g5_pmode_cur; |
87 | 88 | ||
89 | static void (*g5_switch_volt)(int speed_mode); | ||
90 | static int (*g5_switch_freq)(int speed_mode); | ||
91 | static int (*g5_query_freq)(void); | ||
92 | |||
88 | static DECLARE_MUTEX(g5_switch_mutex); | 93 | static DECLARE_MUTEX(g5_switch_mutex); |
89 | 94 | ||
90 | 95 | ||
@@ -92,9 +97,11 @@ static struct smu_sdbp_fvt *g5_fvt_table; /* table of op. points */ | |||
92 | static int g5_fvt_count; /* number of op. points */ | 97 | static int g5_fvt_count; /* number of op. points */ |
93 | static int g5_fvt_cur; /* current op. point */ | 98 | static int g5_fvt_cur; /* current op. point */ |
94 | 99 | ||
95 | /* ----------------- real hardware interface */ | 100 | /* |
101 | * SMU based voltage switching for Neo2 platforms | ||
102 | */ | ||
96 | 103 | ||
97 | static void g5_switch_volt(int speed_mode) | 104 | static void g5_smu_switch_volt(int speed_mode) |
98 | { | 105 | { |
99 | struct smu_simple_cmd cmd; | 106 | struct smu_simple_cmd cmd; |
100 | 107 | ||
@@ -105,26 +112,57 @@ static void g5_switch_volt(int speed_mode) | |||
105 | wait_for_completion(&comp); | 112 | wait_for_completion(&comp); |
106 | } | 113 | } |
107 | 114 | ||
108 | static int g5_switch_freq(int speed_mode) | 115 | /* |
116 | * Platform function based voltage/vdnap switching for Neo2 | ||
117 | */ | ||
118 | |||
119 | static struct pmf_function *pfunc_set_vdnap0; | ||
120 | static struct pmf_function *pfunc_vdnap0_complete; | ||
121 | |||
122 | static void g5_vdnap_switch_volt(int speed_mode) | ||
109 | { | 123 | { |
110 | struct cpufreq_freqs freqs; | 124 | struct pmf_args args; |
111 | int to; | 125 | u32 slew, done = 0; |
126 | unsigned long timeout; | ||
112 | 127 | ||
113 | if (g5_pmode_cur == speed_mode) | 128 | slew = (speed_mode == CPUFREQ_LOW) ? 1 : 0; |
114 | return 0; | 129 | args.count = 1; |
130 | args.u[0].p = &slew; | ||
115 | 131 | ||
116 | down(&g5_switch_mutex); | 132 | pmf_call_one(pfunc_set_vdnap0, &args); |
117 | 133 | ||
118 | freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; | 134 | /* It's an irq GPIO so we should be able to just block here, |
119 | freqs.new = g5_cpu_freqs[speed_mode].frequency; | 135 | * I'll do that later after I've properly tested the IRQ code for |
120 | freqs.cpu = 0; | 136 | * platform functions |
137 | */ | ||
138 | timeout = jiffies + HZ/10; | ||
139 | while(!time_after(jiffies, timeout)) { | ||
140 | args.count = 1; | ||
141 | args.u[0].p = &done; | ||
142 | pmf_call_one(pfunc_vdnap0_complete, &args); | ||
143 | if (done) | ||
144 | break; | ||
145 | msleep(1); | ||
146 | } | ||
147 | if (done == 0) | ||
148 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); | ||
149 | } | ||
121 | 150 | ||
122 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | 151 | |
152 | /* | ||
153 | * SCOM based frequency switching for 970FX rev3 | ||
154 | */ | ||
155 | static int g5_scom_switch_freq(int speed_mode) | ||
156 | { | ||
157 | unsigned long flags; | ||
158 | int to; | ||
123 | 159 | ||
124 | /* If frequency is going up, first ramp up the voltage */ | 160 | /* If frequency is going up, first ramp up the voltage */ |
125 | if (speed_mode < g5_pmode_cur) | 161 | if (speed_mode < g5_pmode_cur) |
126 | g5_switch_volt(speed_mode); | 162 | g5_switch_volt(speed_mode); |
127 | 163 | ||
164 | local_irq_save(flags); | ||
165 | |||
128 | /* Clear PCR high */ | 166 | /* Clear PCR high */ |
129 | scom970_write(SCOM_PCR, 0); | 167 | scom970_write(SCOM_PCR, 0); |
130 | /* Clear PCR low */ | 168 | /* Clear PCR low */ |
@@ -147,6 +185,8 @@ static int g5_switch_freq(int speed_mode) | |||
147 | udelay(100); | 185 | udelay(100); |
148 | } | 186 | } |
149 | 187 | ||
188 | local_irq_restore(flags); | ||
189 | |||
150 | /* If frequency is going down, last ramp the voltage */ | 190 | /* If frequency is going down, last ramp the voltage */ |
151 | if (speed_mode > g5_pmode_cur) | 191 | if (speed_mode > g5_pmode_cur) |
152 | g5_switch_volt(speed_mode); | 192 | g5_switch_volt(speed_mode); |
@@ -154,14 +194,10 @@ static int g5_switch_freq(int speed_mode) | |||
154 | g5_pmode_cur = speed_mode; | 194 | g5_pmode_cur = speed_mode; |
155 | ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; | 195 | ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; |
156 | 196 | ||
157 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
158 | |||
159 | up(&g5_switch_mutex); | ||
160 | |||
161 | return 0; | 197 | return 0; |
162 | } | 198 | } |
163 | 199 | ||
164 | static int g5_query_freq(void) | 200 | static int g5_scom_query_freq(void) |
165 | { | 201 | { |
166 | unsigned long psr = scom970_read(SCOM_PSR); | 202 | unsigned long psr = scom970_read(SCOM_PSR); |
167 | int i; | 203 | int i; |
@@ -173,7 +209,104 @@ static int g5_query_freq(void) | |||
173 | return i; | 209 | return i; |
174 | } | 210 | } |
175 | 211 | ||
176 | /* ----------------- cpufreq bookkeeping */ | 212 | /* |
213 | * Platform function based voltage switching for PowerMac7,2 & 7,3 | ||
214 | */ | ||
215 | |||
216 | static struct pmf_function *pfunc_cpu0_volt_high; | ||
217 | static struct pmf_function *pfunc_cpu0_volt_low; | ||
218 | static struct pmf_function *pfunc_cpu1_volt_high; | ||
219 | static struct pmf_function *pfunc_cpu1_volt_low; | ||
220 | |||
221 | static void g5_pfunc_switch_volt(int speed_mode) | ||
222 | { | ||
223 | if (speed_mode == CPUFREQ_HIGH) { | ||
224 | if (pfunc_cpu0_volt_high) | ||
225 | pmf_call_one(pfunc_cpu0_volt_high, NULL); | ||
226 | if (pfunc_cpu1_volt_high) | ||
227 | pmf_call_one(pfunc_cpu1_volt_high, NULL); | ||
228 | } else { | ||
229 | if (pfunc_cpu0_volt_low) | ||
230 | pmf_call_one(pfunc_cpu0_volt_low, NULL); | ||
231 | if (pfunc_cpu1_volt_low) | ||
232 | pmf_call_one(pfunc_cpu1_volt_low, NULL); | ||
233 | } | ||
234 | msleep(10); /* should be faster , to fix */ | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Platform function based frequency switching for PowerMac7,2 & 7,3 | ||
239 | */ | ||
240 | |||
241 | static struct pmf_function *pfunc_cpu_setfreq_high; | ||
242 | static struct pmf_function *pfunc_cpu_setfreq_low; | ||
243 | static struct pmf_function *pfunc_cpu_getfreq; | ||
244 | static struct pmf_function *pfunc_slewing_done;; | ||
245 | |||
246 | static int g5_pfunc_switch_freq(int speed_mode) | ||
247 | { | ||
248 | struct pmf_args args; | ||
249 | u32 done = 0; | ||
250 | unsigned long timeout; | ||
251 | |||
252 | /* If frequency is going up, first ramp up the voltage */ | ||
253 | if (speed_mode < g5_pmode_cur) | ||
254 | g5_switch_volt(speed_mode); | ||
255 | |||
256 | /* Do it */ | ||
257 | if (speed_mode == CPUFREQ_HIGH) | ||
258 | pmf_call_one(pfunc_cpu_setfreq_high, NULL); | ||
259 | else | ||
260 | pmf_call_one(pfunc_cpu_setfreq_low, NULL); | ||
261 | |||
262 | /* It's an irq GPIO so we should be able to just block here, | ||
263 | * I'll do that later after I've properly tested the IRQ code for | ||
264 | * platform functions | ||
265 | */ | ||
266 | timeout = jiffies + HZ/10; | ||
267 | while(!time_after(jiffies, timeout)) { | ||
268 | args.count = 1; | ||
269 | args.u[0].p = &done; | ||
270 | pmf_call_one(pfunc_slewing_done, &args); | ||
271 | if (done) | ||
272 | break; | ||
273 | msleep(1); | ||
274 | } | ||
275 | if (done == 0) | ||
276 | printk(KERN_WARNING "cpufreq: Timeout in clock slewing !\n"); | ||
277 | |||
278 | /* If frequency is going down, last ramp the voltage */ | ||
279 | if (speed_mode > g5_pmode_cur) | ||
280 | g5_switch_volt(speed_mode); | ||
281 | |||
282 | g5_pmode_cur = speed_mode; | ||
283 | ppc_proc_freq = g5_cpu_freqs[speed_mode].frequency * 1000ul; | ||
284 | |||
285 | return 0; | ||
286 | } | ||
287 | |||
288 | static int g5_pfunc_query_freq(void) | ||
289 | { | ||
290 | struct pmf_args args; | ||
291 | u32 val = 0; | ||
292 | |||
293 | args.count = 1; | ||
294 | args.u[0].p = &val; | ||
295 | pmf_call_one(pfunc_cpu_getfreq, &args); | ||
296 | return val ? CPUFREQ_HIGH : CPUFREQ_LOW; | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * Fake voltage switching for platforms with missing support | ||
301 | */ | ||
302 | |||
303 | static void g5_dummy_switch_volt(int speed_mode) | ||
304 | { | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * Common interface to the cpufreq core | ||
309 | */ | ||
177 | 310 | ||
178 | static int g5_cpufreq_verify(struct cpufreq_policy *policy) | 311 | static int g5_cpufreq_verify(struct cpufreq_policy *policy) |
179 | { | 312 | { |
@@ -183,13 +316,30 @@ static int g5_cpufreq_verify(struct cpufreq_policy *policy) | |||
183 | static int g5_cpufreq_target(struct cpufreq_policy *policy, | 316 | static int g5_cpufreq_target(struct cpufreq_policy *policy, |
184 | unsigned int target_freq, unsigned int relation) | 317 | unsigned int target_freq, unsigned int relation) |
185 | { | 318 | { |
186 | unsigned int newstate = 0; | 319 | unsigned int newstate = 0; |
320 | struct cpufreq_freqs freqs; | ||
321 | int rc; | ||
187 | 322 | ||
188 | if (cpufreq_frequency_table_target(policy, g5_cpu_freqs, | 323 | if (cpufreq_frequency_table_target(policy, g5_cpu_freqs, |
189 | target_freq, relation, &newstate)) | 324 | target_freq, relation, &newstate)) |
190 | return -EINVAL; | 325 | return -EINVAL; |
191 | 326 | ||
192 | return g5_switch_freq(newstate); | 327 | if (g5_pmode_cur == newstate) |
328 | return 0; | ||
329 | |||
330 | down(&g5_switch_mutex); | ||
331 | |||
332 | freqs.old = g5_cpu_freqs[g5_pmode_cur].frequency; | ||
333 | freqs.new = g5_cpu_freqs[newstate].frequency; | ||
334 | freqs.cpu = 0; | ||
335 | |||
336 | cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE); | ||
337 | rc = g5_switch_freq(newstate); | ||
338 | cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE); | ||
339 | |||
340 | up(&g5_switch_mutex); | ||
341 | |||
342 | return rc; | ||
193 | } | 343 | } |
194 | 344 | ||
195 | static unsigned int g5_cpufreq_get_speed(unsigned int cpu) | 345 | static unsigned int g5_cpufreq_get_speed(unsigned int cpu) |
@@ -205,6 +355,7 @@ static int g5_cpufreq_cpu_init(struct cpufreq_policy *policy) | |||
205 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; | 355 | policy->governor = CPUFREQ_DEFAULT_GOVERNOR; |
206 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; | 356 | policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; |
207 | policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; | 357 | policy->cur = g5_cpu_freqs[g5_query_freq()].frequency; |
358 | policy->cpus = cpu_possible_map; | ||
208 | cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); | 359 | cpufreq_frequency_table_get_attr(g5_cpu_freqs, policy->cpu); |
209 | 360 | ||
210 | return cpufreq_frequency_table_cpuinfo(policy, | 361 | return cpufreq_frequency_table_cpuinfo(policy, |
@@ -224,19 +375,39 @@ static struct cpufreq_driver g5_cpufreq_driver = { | |||
224 | }; | 375 | }; |
225 | 376 | ||
226 | 377 | ||
227 | static int __init g5_cpufreq_init(void) | 378 | static int __init g5_neo2_cpufreq_init(struct device_node *cpus) |
228 | { | 379 | { |
229 | struct device_node *cpunode; | 380 | struct device_node *cpunode; |
230 | unsigned int psize, ssize; | 381 | unsigned int psize, ssize; |
231 | struct smu_sdbp_header *shdr; | ||
232 | unsigned long max_freq; | 382 | unsigned long max_freq; |
233 | u32 *valp; | 383 | char *freq_method, *volt_method; |
384 | u32 *valp, pvr_hi; | ||
385 | int use_volts_vdnap = 0; | ||
386 | int use_volts_smu = 0; | ||
234 | int rc = -ENODEV; | 387 | int rc = -ENODEV; |
235 | 388 | ||
236 | /* Look for CPU and SMU nodes */ | 389 | /* Check supported platforms */ |
237 | cpunode = of_find_node_by_type(NULL, "cpu"); | 390 | if (machine_is_compatible("PowerMac8,1") || |
238 | if (!cpunode) { | 391 | machine_is_compatible("PowerMac8,2") || |
239 | DBG("No CPU node !\n"); | 392 | machine_is_compatible("PowerMac9,1")) |
393 | use_volts_smu = 1; | ||
394 | else if (machine_is_compatible("PowerMac11,2")) | ||
395 | use_volts_vdnap = 1; | ||
396 | else | ||
397 | return -ENODEV; | ||
398 | |||
399 | /* Get first CPU node */ | ||
400 | for (cpunode = NULL; | ||
401 | (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) { | ||
402 | u32 *reg = | ||
403 | (u32 *)get_property(cpunode, "reg", NULL); | ||
404 | if (reg == NULL || (*reg) != 0) | ||
405 | continue; | ||
406 | if (!strcmp(cpunode->type, "cpu")) | ||
407 | break; | ||
408 | } | ||
409 | if (cpunode == NULL) { | ||
410 | printk(KERN_ERR "cpufreq: Can't find any CPU 0 node\n"); | ||
240 | return -ENODEV; | 411 | return -ENODEV; |
241 | } | 412 | } |
242 | 413 | ||
@@ -246,8 +417,9 @@ static int __init g5_cpufreq_init(void) | |||
246 | DBG("No cpu-version property !\n"); | 417 | DBG("No cpu-version property !\n"); |
247 | goto bail_noprops; | 418 | goto bail_noprops; |
248 | } | 419 | } |
249 | if (((*valp) >> 16) != 0x3c) { | 420 | pvr_hi = (*valp) >> 16; |
250 | DBG("Wrong CPU version: %08x\n", *valp); | 421 | if (pvr_hi != 0x3c && pvr_hi != 0x44) { |
422 | printk(KERN_ERR "cpufreq: Unsupported CPU version\n"); | ||
251 | goto bail_noprops; | 423 | goto bail_noprops; |
252 | } | 424 | } |
253 | 425 | ||
@@ -259,18 +431,50 @@ static int __init g5_cpufreq_init(void) | |||
259 | } | 431 | } |
260 | g5_pmode_max = psize / sizeof(u32) - 1; | 432 | g5_pmode_max = psize / sizeof(u32) - 1; |
261 | 433 | ||
262 | /* Look for the FVT table */ | 434 | if (use_volts_smu) { |
263 | shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); | 435 | struct smu_sdbp_header *shdr; |
264 | if (!shdr) | 436 | |
265 | goto bail_noprops; | 437 | /* Look for the FVT table */ |
266 | g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; | 438 | shdr = smu_get_sdb_partition(SMU_SDB_FVT_ID, NULL); |
267 | ssize = (shdr->len * sizeof(u32)) - sizeof(struct smu_sdbp_header); | 439 | if (!shdr) |
268 | g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt); | 440 | goto bail_noprops; |
269 | g5_fvt_cur = 0; | 441 | g5_fvt_table = (struct smu_sdbp_fvt *)&shdr[1]; |
270 | 442 | ssize = (shdr->len * sizeof(u32)) - | |
271 | /* Sanity checking */ | 443 | sizeof(struct smu_sdbp_header); |
272 | if (g5_fvt_count < 1 || g5_pmode_max < 1) | 444 | g5_fvt_count = ssize / sizeof(struct smu_sdbp_fvt); |
273 | goto bail_noprops; | 445 | g5_fvt_cur = 0; |
446 | |||
447 | /* Sanity checking */ | ||
448 | if (g5_fvt_count < 1 || g5_pmode_max < 1) | ||
449 | goto bail_noprops; | ||
450 | |||
451 | g5_switch_volt = g5_smu_switch_volt; | ||
452 | volt_method = "SMU"; | ||
453 | } else if (use_volts_vdnap) { | ||
454 | struct device_node *root; | ||
455 | |||
456 | root = of_find_node_by_path("/"); | ||
457 | if (root == NULL) { | ||
458 | printk(KERN_ERR "cpufreq: Can't find root of " | ||
459 | "device tree\n"); | ||
460 | goto bail_noprops; | ||
461 | } | ||
462 | pfunc_set_vdnap0 = pmf_find_function(root, "set-vdnap0"); | ||
463 | pfunc_vdnap0_complete = | ||
464 | pmf_find_function(root, "slewing-done"); | ||
465 | if (pfunc_set_vdnap0 == NULL || | ||
466 | pfunc_vdnap0_complete == NULL) { | ||
467 | printk(KERN_ERR "cpufreq: Can't find required " | ||
468 | "platform function\n"); | ||
469 | goto bail_noprops; | ||
470 | } | ||
471 | |||
472 | g5_switch_volt = g5_vdnap_switch_volt; | ||
473 | volt_method = "GPIO"; | ||
474 | } else { | ||
475 | g5_switch_volt = g5_dummy_switch_volt; | ||
476 | volt_method = "none"; | ||
477 | } | ||
274 | 478 | ||
275 | /* | 479 | /* |
276 | * From what I see, clock-frequency is always the maximal frequency. | 480 | * From what I see, clock-frequency is always the maximal frequency. |
@@ -286,19 +490,23 @@ static int __init g5_cpufreq_init(void) | |||
286 | g5_cpu_freqs[0].frequency = max_freq; | 490 | g5_cpu_freqs[0].frequency = max_freq; |
287 | g5_cpu_freqs[1].frequency = max_freq/2; | 491 | g5_cpu_freqs[1].frequency = max_freq/2; |
288 | 492 | ||
289 | /* Check current frequency */ | 493 | /* Set callbacks */ |
290 | g5_pmode_cur = g5_query_freq(); | 494 | g5_switch_freq = g5_scom_switch_freq; |
291 | if (g5_pmode_cur > 1) | 495 | g5_query_freq = g5_scom_query_freq; |
292 | /* We don't support anything but 1:1 and 1:2, fixup ... */ | 496 | freq_method = "SCOM"; |
293 | g5_pmode_cur = 1; | ||
294 | 497 | ||
295 | /* Force apply current frequency to make sure everything is in | 498 | /* Force apply current frequency to make sure everything is in |
296 | * sync (voltage is right for example). Firmware may leave us with | 499 | * sync (voltage is right for example). Firmware may leave us with |
297 | * a strange setting ... | 500 | * a strange setting ... |
298 | */ | 501 | */ |
299 | g5_switch_freq(g5_pmode_cur); | 502 | g5_switch_volt(CPUFREQ_HIGH); |
503 | msleep(10); | ||
504 | g5_pmode_cur = -1; | ||
505 | g5_switch_freq(g5_query_freq()); | ||
300 | 506 | ||
301 | printk(KERN_INFO "Registering G5 CPU frequency driver\n"); | 507 | printk(KERN_INFO "Registering G5 CPU frequency driver\n"); |
508 | printk(KERN_INFO "Frequency method: %s, Voltage method: %s\n", | ||
509 | freq_method, volt_method); | ||
302 | printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", | 510 | printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", |
303 | g5_cpu_freqs[1].frequency/1000, | 511 | g5_cpu_freqs[1].frequency/1000, |
304 | g5_cpu_freqs[0].frequency/1000, | 512 | g5_cpu_freqs[0].frequency/1000, |
@@ -317,6 +525,200 @@ static int __init g5_cpufreq_init(void) | |||
317 | return rc; | 525 | return rc; |
318 | } | 526 | } |
319 | 527 | ||
528 | static int __init g5_pm72_cpufreq_init(struct device_node *cpus) | ||
529 | { | ||
530 | struct device_node *cpuid = NULL, *hwclock = NULL, *cpunode = NULL; | ||
531 | u8 *eeprom = NULL; | ||
532 | u32 *valp; | ||
533 | u64 max_freq, min_freq, ih, il; | ||
534 | int has_volt = 1, rc = 0; | ||
535 | |||
536 | /* Get first CPU node */ | ||
537 | for (cpunode = NULL; | ||
538 | (cpunode = of_get_next_child(cpus, cpunode)) != NULL;) { | ||
539 | if (!strcmp(cpunode->type, "cpu")) | ||
540 | break; | ||
541 | } | ||
542 | if (cpunode == NULL) { | ||
543 | printk(KERN_ERR "cpufreq: Can't find any CPU node\n"); | ||
544 | return -ENODEV; | ||
545 | } | ||
546 | |||
547 | /* Lookup the cpuid eeprom node */ | ||
548 | cpuid = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/cpuid@a0"); | ||
549 | if (cpuid != NULL) | ||
550 | eeprom = (u8 *)get_property(cpuid, "cpuid", NULL); | ||
551 | if (eeprom == NULL) { | ||
552 | printk(KERN_ERR "cpufreq: Can't find cpuid EEPROM !\n"); | ||
553 | rc = -ENODEV; | ||
554 | goto bail; | ||
555 | } | ||
556 | |||
557 | /* Lookup the i2c hwclock */ | ||
558 | for (hwclock = NULL; | ||
559 | (hwclock = of_find_node_by_name(hwclock, "i2c-hwclock")) != NULL;){ | ||
560 | char *loc = get_property(hwclock, "hwctrl-location", NULL); | ||
561 | if (loc == NULL) | ||
562 | continue; | ||
563 | if (strcmp(loc, "CPU CLOCK")) | ||
564 | continue; | ||
565 | if (!get_property(hwclock, "platform-get-frequency", NULL)) | ||
566 | continue; | ||
567 | break; | ||
568 | } | ||
569 | if (hwclock == NULL) { | ||
570 | printk(KERN_ERR "cpufreq: Can't find i2c clock chip !\n"); | ||
571 | rc = -ENODEV; | ||
572 | goto bail; | ||
573 | } | ||
574 | |||
575 | DBG("cpufreq: i2c clock chip found: %s\n", hwclock->full_name); | ||
576 | |||
577 | /* Now get all the platform functions */ | ||
578 | pfunc_cpu_getfreq = | ||
579 | pmf_find_function(hwclock, "get-frequency"); | ||
580 | pfunc_cpu_setfreq_high = | ||
581 | pmf_find_function(hwclock, "set-frequency-high"); | ||
582 | pfunc_cpu_setfreq_low = | ||
583 | pmf_find_function(hwclock, "set-frequency-low"); | ||
584 | pfunc_slewing_done = | ||
585 | pmf_find_function(hwclock, "slewing-done"); | ||
586 | pfunc_cpu0_volt_high = | ||
587 | pmf_find_function(hwclock, "set-voltage-high-0"); | ||
588 | pfunc_cpu0_volt_low = | ||
589 | pmf_find_function(hwclock, "set-voltage-low-0"); | ||
590 | pfunc_cpu1_volt_high = | ||
591 | pmf_find_function(hwclock, "set-voltage-high-1"); | ||
592 | pfunc_cpu1_volt_low = | ||
593 | pmf_find_function(hwclock, "set-voltage-low-1"); | ||
594 | |||
595 | /* Check we have minimum requirements */ | ||
596 | if (pfunc_cpu_getfreq == NULL || pfunc_cpu_setfreq_high == NULL || | ||
597 | pfunc_cpu_setfreq_low == NULL || pfunc_slewing_done == NULL) { | ||
598 | printk(KERN_ERR "cpufreq: Can't find platform functions !\n"); | ||
599 | rc = -ENODEV; | ||
600 | goto bail; | ||
601 | } | ||
602 | |||
603 | /* Check that we have complete sets */ | ||
604 | if (pfunc_cpu0_volt_high == NULL || pfunc_cpu0_volt_low == NULL) { | ||
605 | pmf_put_function(pfunc_cpu0_volt_high); | ||
606 | pmf_put_function(pfunc_cpu0_volt_low); | ||
607 | pfunc_cpu0_volt_high = pfunc_cpu0_volt_low = NULL; | ||
608 | has_volt = 0; | ||
609 | } | ||
610 | if (!has_volt || | ||
611 | pfunc_cpu1_volt_high == NULL || pfunc_cpu1_volt_low == NULL) { | ||
612 | pmf_put_function(pfunc_cpu1_volt_high); | ||
613 | pmf_put_function(pfunc_cpu1_volt_low); | ||
614 | pfunc_cpu1_volt_high = pfunc_cpu1_volt_low = NULL; | ||
615 | } | ||
616 | |||
617 | /* Note: The device tree also contains a "platform-set-values" | ||
618 | * function for which I haven't quite figured out the usage. It | ||
619 | * might have to be called on init and/or wakeup, I'm not too sure | ||
620 | * but things seem to work fine without it so far ... | ||
621 | */ | ||
622 | |||
623 | /* Get max frequency from device-tree */ | ||
624 | valp = (u32 *)get_property(cpunode, "clock-frequency", NULL); | ||
625 | if (!valp) { | ||
626 | printk(KERN_ERR "cpufreq: Can't find CPU frequency !\n"); | ||
627 | rc = -ENODEV; | ||
628 | goto bail; | ||
629 | } | ||
630 | |||
631 | max_freq = (*valp)/1000; | ||
632 | |||
633 | /* Now calculate reduced frequency by using the cpuid input freq | ||
634 | * ratio. This requires 64 bits math unless we are willing to lose | ||
635 | * some precision | ||
636 | */ | ||
637 | ih = *((u32 *)(eeprom + 0x10)); | ||
638 | il = *((u32 *)(eeprom + 0x20)); | ||
639 | min_freq = 0; | ||
640 | if (ih != 0 && il != 0) | ||
641 | min_freq = (max_freq * il) / ih; | ||
642 | |||
643 | /* Sanity check */ | ||
644 | if (min_freq >= max_freq || min_freq < 1000) { | ||
645 | printk(KERN_ERR "cpufreq: Can't calculate low frequency !\n"); | ||
646 | rc = -ENODEV; | ||
647 | goto bail; | ||
648 | } | ||
649 | g5_cpu_freqs[0].frequency = max_freq; | ||
650 | g5_cpu_freqs[1].frequency = min_freq; | ||
651 | |||
652 | /* Set callbacks */ | ||
653 | g5_switch_volt = g5_pfunc_switch_volt; | ||
654 | g5_switch_freq = g5_pfunc_switch_freq; | ||
655 | g5_query_freq = g5_pfunc_query_freq; | ||
656 | |||
657 | /* Force apply current frequency to make sure everything is in | ||
658 | * sync (voltage is right for example). Firmware may leave us with | ||
659 | * a strange setting ... | ||
660 | */ | ||
661 | g5_switch_volt(CPUFREQ_HIGH); | ||
662 | msleep(10); | ||
663 | g5_pmode_cur = -1; | ||
664 | g5_switch_freq(g5_query_freq()); | ||
665 | |||
666 | printk(KERN_INFO "Registering G5 CPU frequency driver\n"); | ||
667 | printk(KERN_INFO "Frequency method: i2c/pfunc, " | ||
668 | "Voltage method: %s\n", has_volt ? "i2c/pfunc" : "none"); | ||
669 | printk(KERN_INFO "Low: %d Mhz, High: %d Mhz, Cur: %d MHz\n", | ||
670 | g5_cpu_freqs[1].frequency/1000, | ||
671 | g5_cpu_freqs[0].frequency/1000, | ||
672 | g5_cpu_freqs[g5_pmode_cur].frequency/1000); | ||
673 | |||
674 | rc = cpufreq_register_driver(&g5_cpufreq_driver); | ||
675 | bail: | ||
676 | if (rc != 0) { | ||
677 | pmf_put_function(pfunc_cpu_getfreq); | ||
678 | pmf_put_function(pfunc_cpu_setfreq_high); | ||
679 | pmf_put_function(pfunc_cpu_setfreq_low); | ||
680 | pmf_put_function(pfunc_slewing_done); | ||
681 | pmf_put_function(pfunc_cpu0_volt_high); | ||
682 | pmf_put_function(pfunc_cpu0_volt_low); | ||
683 | pmf_put_function(pfunc_cpu1_volt_high); | ||
684 | pmf_put_function(pfunc_cpu1_volt_low); | ||
685 | } | ||
686 | of_node_put(hwclock); | ||
687 | of_node_put(cpuid); | ||
688 | of_node_put(cpunode); | ||
689 | |||
690 | return rc; | ||
691 | } | ||
692 | |||
693 | static int __init g5_rm31_cpufreq_init(struct device_node *cpus) | ||
694 | { | ||
695 | /* NYI */ | ||
696 | return 0; | ||
697 | } | ||
698 | |||
699 | static int __init g5_cpufreq_init(void) | ||
700 | { | ||
701 | struct device_node *cpus; | ||
702 | int rc; | ||
703 | |||
704 | cpus = of_find_node_by_path("/cpus"); | ||
705 | if (cpus == NULL) { | ||
706 | DBG("No /cpus node !\n"); | ||
707 | return -ENODEV; | ||
708 | } | ||
709 | |||
710 | if (machine_is_compatible("PowerMac7,2") || | ||
711 | machine_is_compatible("PowerMac7,3")) | ||
712 | rc = g5_pm72_cpufreq_init(cpus); | ||
713 | else if (machine_is_compatible("RackMac3,1")) | ||
714 | rc = g5_rm31_cpufreq_init(cpus); | ||
715 | else | ||
716 | rc = g5_neo2_cpufreq_init(cpus); | ||
717 | |||
718 | of_node_put(cpus); | ||
719 | return rc; | ||
720 | } | ||
721 | |||
320 | module_init(g5_cpufreq_init); | 722 | module_init(g5_cpufreq_init); |
321 | 723 | ||
322 | 724 | ||
diff --git a/arch/powerpc/platforms/powermac/feature.c b/arch/powerpc/platforms/powermac/feature.c index f6e22da2a5da..558dd0692092 100644 --- a/arch/powerpc/platforms/powermac/feature.c +++ b/arch/powerpc/platforms/powermac/feature.c | |||
@@ -58,12 +58,11 @@ extern int powersave_lowspeed; | |||
58 | extern int powersave_nap; | 58 | extern int powersave_nap; |
59 | extern struct device_node *k2_skiplist[2]; | 59 | extern struct device_node *k2_skiplist[2]; |
60 | 60 | ||
61 | |||
62 | /* | 61 | /* |
63 | * We use a single global lock to protect accesses. Each driver has | 62 | * We use a single global lock to protect accesses. Each driver has |
64 | * to take care of its own locking | 63 | * to take care of its own locking |
65 | */ | 64 | */ |
66 | static DEFINE_SPINLOCK(feature_lock); | 65 | DEFINE_SPINLOCK(feature_lock); |
67 | 66 | ||
68 | #define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); | 67 | #define LOCK(flags) spin_lock_irqsave(&feature_lock, flags); |
69 | #define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); | 68 | #define UNLOCK(flags) spin_unlock_irqrestore(&feature_lock, flags); |
@@ -101,26 +100,17 @@ static const char *macio_names[] = | |||
101 | "Keylargo", | 100 | "Keylargo", |
102 | "Pangea", | 101 | "Pangea", |
103 | "Intrepid", | 102 | "Intrepid", |
104 | "K2" | 103 | "K2", |
104 | "Shasta", | ||
105 | }; | 105 | }; |
106 | 106 | ||
107 | 107 | ||
108 | struct device_node *uninorth_node; | ||
109 | u32 __iomem *uninorth_base; | ||
108 | 110 | ||
109 | /* | ||
110 | * Uninorth reg. access. Note that Uni-N regs are big endian | ||
111 | */ | ||
112 | |||
113 | #define UN_REG(r) (uninorth_base + ((r) >> 2)) | ||
114 | #define UN_IN(r) (in_be32(UN_REG(r))) | ||
115 | #define UN_OUT(r,v) (out_be32(UN_REG(r), (v))) | ||
116 | #define UN_BIS(r,v) (UN_OUT((r), UN_IN(r) | (v))) | ||
117 | #define UN_BIC(r,v) (UN_OUT((r), UN_IN(r) & ~(v))) | ||
118 | |||
119 | static struct device_node *uninorth_node; | ||
120 | static u32 __iomem *uninorth_base; | ||
121 | static u32 uninorth_rev; | 111 | static u32 uninorth_rev; |
122 | static int uninorth_u3; | 112 | static int uninorth_maj; |
123 | static void __iomem *u3_ht; | 113 | static void __iomem *u3_ht_base; |
124 | 114 | ||
125 | /* | 115 | /* |
126 | * For each motherboard family, we have a table of functions pointers | 116 | * For each motherboard family, we have a table of functions pointers |
@@ -1399,8 +1389,15 @@ static long g5_fw_enable(struct device_node *node, long param, long value) | |||
1399 | static long g5_mpic_enable(struct device_node *node, long param, long value) | 1389 | static long g5_mpic_enable(struct device_node *node, long param, long value) |
1400 | { | 1390 | { |
1401 | unsigned long flags; | 1391 | unsigned long flags; |
1392 | struct device_node *parent = of_get_parent(node); | ||
1393 | int is_u3; | ||
1402 | 1394 | ||
1403 | if (node->parent == NULL || strcmp(node->parent->name, "u3")) | 1395 | if (parent == NULL) |
1396 | return 0; | ||
1397 | is_u3 = strcmp(parent->name, "u3") == 0 || | ||
1398 | strcmp(parent->name, "u4") == 0; | ||
1399 | of_node_put(parent); | ||
1400 | if (!is_u3) | ||
1404 | return 0; | 1401 | return 0; |
1405 | 1402 | ||
1406 | LOCK(flags); | 1403 | LOCK(flags); |
@@ -1445,20 +1442,53 @@ static long g5_i2s_enable(struct device_node *node, long param, long value) | |||
1445 | /* Very crude implementation for now */ | 1442 | /* Very crude implementation for now */ |
1446 | struct macio_chip *macio = &macio_chips[0]; | 1443 | struct macio_chip *macio = &macio_chips[0]; |
1447 | unsigned long flags; | 1444 | unsigned long flags; |
1448 | 1445 | int cell; | |
1449 | if (value == 0) | 1446 | u32 fcrs[3][3] = { |
1450 | return 0; /* don't disable yet */ | 1447 | { 0, |
1448 | K2_FCR1_I2S0_CELL_ENABLE | | ||
1449 | K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE, | ||
1450 | KL3_I2S0_CLK18_ENABLE | ||
1451 | }, | ||
1452 | { KL0_SCC_A_INTF_ENABLE, | ||
1453 | K2_FCR1_I2S1_CELL_ENABLE | | ||
1454 | K2_FCR1_I2S1_CLK_ENABLE_BIT | K2_FCR1_I2S1_ENABLE, | ||
1455 | KL3_I2S1_CLK18_ENABLE | ||
1456 | }, | ||
1457 | { KL0_SCC_B_INTF_ENABLE, | ||
1458 | SH_FCR1_I2S2_CELL_ENABLE | | ||
1459 | SH_FCR1_I2S2_CLK_ENABLE_BIT | SH_FCR1_I2S2_ENABLE, | ||
1460 | SH_FCR3_I2S2_CLK18_ENABLE | ||
1461 | }, | ||
1462 | }; | ||
1463 | |||
1464 | if (macio->type != macio_keylargo2 && macio->type != macio_shasta) | ||
1465 | return -ENODEV; | ||
1466 | if (strncmp(node->name, "i2s-", 4)) | ||
1467 | return -ENODEV; | ||
1468 | cell = node->name[4] - 'a'; | ||
1469 | switch(cell) { | ||
1470 | case 0: | ||
1471 | case 1: | ||
1472 | break; | ||
1473 | case 2: | ||
1474 | if (macio->type == macio_shasta) | ||
1475 | break; | ||
1476 | default: | ||
1477 | return -ENODEV; | ||
1478 | } | ||
1451 | 1479 | ||
1452 | LOCK(flags); | 1480 | LOCK(flags); |
1453 | MACIO_BIS(KEYLARGO_FCR3, KL3_CLK45_ENABLE | KL3_CLK49_ENABLE | | 1481 | if (value) { |
1454 | KL3_I2S0_CLK18_ENABLE); | 1482 | MACIO_BIC(KEYLARGO_FCR0, fcrs[cell][0]); |
1455 | udelay(10); | 1483 | MACIO_BIS(KEYLARGO_FCR1, fcrs[cell][1]); |
1456 | MACIO_BIS(KEYLARGO_FCR1, K2_FCR1_I2S0_CELL_ENABLE | | 1484 | MACIO_BIS(KEYLARGO_FCR3, fcrs[cell][2]); |
1457 | K2_FCR1_I2S0_CLK_ENABLE_BIT | K2_FCR1_I2S0_ENABLE); | 1485 | } else { |
1486 | MACIO_BIC(KEYLARGO_FCR3, fcrs[cell][2]); | ||
1487 | MACIO_BIC(KEYLARGO_FCR1, fcrs[cell][1]); | ||
1488 | MACIO_BIS(KEYLARGO_FCR0, fcrs[cell][0]); | ||
1489 | } | ||
1458 | udelay(10); | 1490 | udelay(10); |
1459 | MACIO_BIC(KEYLARGO_FCR1, K2_FCR1_I2S0_RESET); | ||
1460 | UNLOCK(flags); | 1491 | UNLOCK(flags); |
1461 | udelay(10); | ||
1462 | 1492 | ||
1463 | return 0; | 1493 | return 0; |
1464 | } | 1494 | } |
@@ -1473,7 +1503,7 @@ static long g5_reset_cpu(struct device_node *node, long param, long value) | |||
1473 | struct device_node *np; | 1503 | struct device_node *np; |
1474 | 1504 | ||
1475 | macio = &macio_chips[0]; | 1505 | macio = &macio_chips[0]; |
1476 | if (macio->type != macio_keylargo2) | 1506 | if (macio->type != macio_keylargo2 && macio->type != macio_shasta) |
1477 | return -ENODEV; | 1507 | return -ENODEV; |
1478 | 1508 | ||
1479 | np = find_path_device("/cpus"); | 1509 | np = find_path_device("/cpus"); |
@@ -1512,14 +1542,17 @@ static long g5_reset_cpu(struct device_node *node, long param, long value) | |||
1512 | */ | 1542 | */ |
1513 | void g5_phy_disable_cpu1(void) | 1543 | void g5_phy_disable_cpu1(void) |
1514 | { | 1544 | { |
1515 | UN_OUT(U3_API_PHY_CONFIG_1, 0); | 1545 | if (uninorth_maj == 3) |
1546 | UN_OUT(U3_API_PHY_CONFIG_1, 0); | ||
1516 | } | 1547 | } |
1517 | #endif /* CONFIG_POWER4 */ | 1548 | #endif /* CONFIG_POWER4 */ |
1518 | 1549 | ||
1519 | #ifndef CONFIG_POWER4 | 1550 | #ifndef CONFIG_POWER4 |
1520 | 1551 | ||
1521 | static void | 1552 | |
1522 | keylargo_shutdown(struct macio_chip *macio, int sleep_mode) | 1553 | #ifdef CONFIG_PM |
1554 | |||
1555 | static void keylargo_shutdown(struct macio_chip *macio, int sleep_mode) | ||
1523 | { | 1556 | { |
1524 | u32 temp; | 1557 | u32 temp; |
1525 | 1558 | ||
@@ -1572,8 +1605,7 @@ keylargo_shutdown(struct macio_chip *macio, int sleep_mode) | |||
1572 | (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); | 1605 | (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); |
1573 | } | 1606 | } |
1574 | 1607 | ||
1575 | static void | 1608 | static void pangea_shutdown(struct macio_chip *macio, int sleep_mode) |
1576 | pangea_shutdown(struct macio_chip *macio, int sleep_mode) | ||
1577 | { | 1609 | { |
1578 | u32 temp; | 1610 | u32 temp; |
1579 | 1611 | ||
@@ -1606,8 +1638,7 @@ pangea_shutdown(struct macio_chip *macio, int sleep_mode) | |||
1606 | (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); | 1638 | (void)MACIO_IN32(KEYLARGO_FCR0); mdelay(1); |
1607 | } | 1639 | } |
1608 | 1640 | ||
1609 | static void | 1641 | static void intrepid_shutdown(struct macio_chip *macio, int sleep_mode) |
1610 | intrepid_shutdown(struct macio_chip *macio, int sleep_mode) | ||
1611 | { | 1642 | { |
1612 | u32 temp; | 1643 | u32 temp; |
1613 | 1644 | ||
@@ -1635,124 +1666,6 @@ intrepid_shutdown(struct macio_chip *macio, int sleep_mode) | |||
1635 | } | 1666 | } |
1636 | 1667 | ||
1637 | 1668 | ||
1638 | void pmac_tweak_clock_spreading(int enable) | ||
1639 | { | ||
1640 | struct macio_chip *macio = &macio_chips[0]; | ||
1641 | |||
1642 | /* Hack for doing clock spreading on some machines PowerBooks and | ||
1643 | * iBooks. This implements the "platform-do-clockspreading" OF | ||
1644 | * property as decoded manually on various models. For safety, we also | ||
1645 | * check the product ID in the device-tree in cases we'll whack the i2c | ||
1646 | * chip to make reasonably sure we won't set wrong values in there | ||
1647 | * | ||
1648 | * Of course, ultimately, we have to implement a real parser for | ||
1649 | * the platform-do-* stuff... | ||
1650 | */ | ||
1651 | |||
1652 | if (macio->type == macio_intrepid) { | ||
1653 | struct device_node *clock = | ||
1654 | of_find_node_by_path("/uni-n@f8000000/hw-clock"); | ||
1655 | if (clock && get_property(clock, "platform-do-clockspreading", | ||
1656 | NULL)) { | ||
1657 | printk(KERN_INFO "%sabling clock spreading on Intrepid" | ||
1658 | " ASIC\n", enable ? "En" : "Dis"); | ||
1659 | if (enable) | ||
1660 | UN_OUT(UNI_N_CLOCK_SPREADING, 2); | ||
1661 | else | ||
1662 | UN_OUT(UNI_N_CLOCK_SPREADING, 0); | ||
1663 | mdelay(40); | ||
1664 | } | ||
1665 | of_node_put(clock); | ||
1666 | } | ||
1667 | |||
1668 | while (machine_is_compatible("PowerBook5,2") || | ||
1669 | machine_is_compatible("PowerBook5,3") || | ||
1670 | machine_is_compatible("PowerBook6,2") || | ||
1671 | machine_is_compatible("PowerBook6,3")) { | ||
1672 | struct device_node *ui2c = of_find_node_by_type(NULL, "i2c"); | ||
1673 | struct device_node *dt = of_find_node_by_name(NULL, "device-tree"); | ||
1674 | u8 buffer[9]; | ||
1675 | u32 *productID; | ||
1676 | int i, rc, changed = 0; | ||
1677 | |||
1678 | if (dt == NULL) | ||
1679 | break; | ||
1680 | productID = (u32 *)get_property(dt, "pid#", NULL); | ||
1681 | if (productID == NULL) | ||
1682 | break; | ||
1683 | while(ui2c) { | ||
1684 | struct device_node *p = of_get_parent(ui2c); | ||
1685 | if (p && !strcmp(p->name, "uni-n")) | ||
1686 | break; | ||
1687 | ui2c = of_find_node_by_type(ui2c, "i2c"); | ||
1688 | } | ||
1689 | if (ui2c == NULL) | ||
1690 | break; | ||
1691 | DBG("Trying to bump clock speed for PID: %08x...\n", *productID); | ||
1692 | rc = pmac_low_i2c_open(ui2c, 1); | ||
1693 | if (rc != 0) | ||
1694 | break; | ||
1695 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
1696 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
1697 | DBG("read result: %d,", rc); | ||
1698 | if (rc != 0) { | ||
1699 | pmac_low_i2c_close(ui2c); | ||
1700 | break; | ||
1701 | } | ||
1702 | for (i=0; i<9; i++) | ||
1703 | DBG(" %02x", buffer[i]); | ||
1704 | DBG("\n"); | ||
1705 | |||
1706 | switch(*productID) { | ||
1707 | case 0x1182: /* AlBook 12" rev 2 */ | ||
1708 | case 0x1183: /* iBook G4 12" */ | ||
1709 | buffer[0] = (buffer[0] & 0x8f) | 0x70; | ||
1710 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
1711 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
1712 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
1713 | buffer[7] = (buffer[7] & 0x00) | (enable ? 0xc0 : 0xba); | ||
1714 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
1715 | changed = 1; | ||
1716 | break; | ||
1717 | case 0x3142: /* AlBook 15" (ATI M10) */ | ||
1718 | case 0x3143: /* AlBook 17" (ATI M10) */ | ||
1719 | buffer[0] = (buffer[0] & 0xaf) | 0x50; | ||
1720 | buffer[2] = (buffer[2] & 0x7f) | 0x00; | ||
1721 | buffer[5] = (buffer[5] & 0x80) | 0x31; | ||
1722 | buffer[6] = (buffer[6] & 0x40) | 0xb0; | ||
1723 | buffer[7] = (buffer[7] & 0x00) | (enable ? 0xd0 : 0xc0); | ||
1724 | buffer[8] = (buffer[8] & 0x00) | 0x30; | ||
1725 | changed = 1; | ||
1726 | break; | ||
1727 | default: | ||
1728 | DBG("i2c-hwclock: Machine model not handled\n"); | ||
1729 | break; | ||
1730 | } | ||
1731 | if (!changed) { | ||
1732 | pmac_low_i2c_close(ui2c); | ||
1733 | break; | ||
1734 | } | ||
1735 | printk(KERN_INFO "%sabling clock spreading on i2c clock chip\n", | ||
1736 | enable ? "En" : "Dis"); | ||
1737 | |||
1738 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_stdsub); | ||
1739 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_write, 0x80, buffer, 9); | ||
1740 | DBG("write result: %d,", rc); | ||
1741 | pmac_low_i2c_setmode(ui2c, pmac_low_i2c_mode_combined); | ||
1742 | rc = pmac_low_i2c_xfer(ui2c, 0xd2 | pmac_low_i2c_read, 0x80, buffer, 9); | ||
1743 | DBG("read result: %d,", rc); | ||
1744 | if (rc != 0) { | ||
1745 | pmac_low_i2c_close(ui2c); | ||
1746 | break; | ||
1747 | } | ||
1748 | for (i=0; i<9; i++) | ||
1749 | DBG(" %02x", buffer[i]); | ||
1750 | pmac_low_i2c_close(ui2c); | ||
1751 | break; | ||
1752 | } | ||
1753 | } | ||
1754 | |||
1755 | |||
1756 | static int | 1669 | static int |
1757 | core99_sleep(void) | 1670 | core99_sleep(void) |
1758 | { | 1671 | { |
@@ -1909,6 +1822,8 @@ core99_wake_up(void) | |||
1909 | return 0; | 1822 | return 0; |
1910 | } | 1823 | } |
1911 | 1824 | ||
1825 | #endif /* CONFIG_PM */ | ||
1826 | |||
1912 | static long | 1827 | static long |
1913 | core99_sleep_state(struct device_node *node, long param, long value) | 1828 | core99_sleep_state(struct device_node *node, long param, long value) |
1914 | { | 1829 | { |
@@ -1930,10 +1845,13 @@ core99_sleep_state(struct device_node *node, long param, long value) | |||
1930 | if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) | 1845 | if ((pmac_mb.board_flags & PMAC_MB_CAN_SLEEP) == 0) |
1931 | return -EPERM; | 1846 | return -EPERM; |
1932 | 1847 | ||
1848 | #ifdef CONFIG_PM | ||
1933 | if (value == 1) | 1849 | if (value == 1) |
1934 | return core99_sleep(); | 1850 | return core99_sleep(); |
1935 | else if (value == 0) | 1851 | else if (value == 0) |
1936 | return core99_wake_up(); | 1852 | return core99_wake_up(); |
1853 | |||
1854 | #endif /* CONFIG_PM */ | ||
1937 | return 0; | 1855 | return 0; |
1938 | } | 1856 | } |
1939 | 1857 | ||
@@ -2057,7 +1975,9 @@ static struct feature_table_entry core99_features[] = { | |||
2057 | { PMAC_FTR_USB_ENABLE, core99_usb_enable }, | 1975 | { PMAC_FTR_USB_ENABLE, core99_usb_enable }, |
2058 | { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, | 1976 | { PMAC_FTR_1394_ENABLE, core99_firewire_enable }, |
2059 | { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, | 1977 | { PMAC_FTR_1394_CABLE_POWER, core99_firewire_cable_power }, |
1978 | #ifdef CONFIG_PM | ||
2060 | { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, | 1979 | { PMAC_FTR_SLEEP_STATE, core99_sleep_state }, |
1980 | #endif | ||
2061 | #ifdef CONFIG_SMP | 1981 | #ifdef CONFIG_SMP |
2062 | { PMAC_FTR_RESET_CPU, core99_reset_cpu }, | 1982 | { PMAC_FTR_RESET_CPU, core99_reset_cpu }, |
2063 | #endif /* CONFIG_SMP */ | 1983 | #endif /* CONFIG_SMP */ |
@@ -2427,6 +2347,14 @@ static struct pmac_mb_def pmac_mb_defs[] = { | |||
2427 | PMAC_TYPE_POWERMAC_G5_U3L, g5_features, | 2347 | PMAC_TYPE_POWERMAC_G5_U3L, g5_features, |
2428 | 0, | 2348 | 0, |
2429 | }, | 2349 | }, |
2350 | { "PowerMac11,2", "PowerMac G5 Dual Core", | ||
2351 | PMAC_TYPE_POWERMAC_G5_U3L, g5_features, | ||
2352 | 0, | ||
2353 | }, | ||
2354 | { "PowerMac12,1", "iMac G5 (iSight)", | ||
2355 | PMAC_TYPE_POWERMAC_G5_U3L, g5_features, | ||
2356 | 0, | ||
2357 | }, | ||
2430 | { "RackMac3,1", "XServe G5", | 2358 | { "RackMac3,1", "XServe G5", |
2431 | PMAC_TYPE_XSERVE_G5, g5_features, | 2359 | PMAC_TYPE_XSERVE_G5, g5_features, |
2432 | 0, | 2360 | 0, |
@@ -2539,6 +2467,11 @@ static int __init probe_motherboard(void) | |||
2539 | pmac_mb.model_name = "Unknown K2-based"; | 2467 | pmac_mb.model_name = "Unknown K2-based"; |
2540 | pmac_mb.features = g5_features; | 2468 | pmac_mb.features = g5_features; |
2541 | break; | 2469 | break; |
2470 | case macio_shasta: | ||
2471 | pmac_mb.model_id = PMAC_TYPE_UNKNOWN_SHASTA; | ||
2472 | pmac_mb.model_name = "Unknown Shasta-based"; | ||
2473 | pmac_mb.features = g5_features; | ||
2474 | break; | ||
2542 | #endif /* CONFIG_POWER4 */ | 2475 | #endif /* CONFIG_POWER4 */ |
2543 | default: | 2476 | default: |
2544 | return -ENODEV; | 2477 | return -ENODEV; |
@@ -2607,6 +2540,8 @@ found: | |||
2607 | */ | 2540 | */ |
2608 | static void __init probe_uninorth(void) | 2541 | static void __init probe_uninorth(void) |
2609 | { | 2542 | { |
2543 | u32 *addrp; | ||
2544 | phys_addr_t address; | ||
2610 | unsigned long actrl; | 2545 | unsigned long actrl; |
2611 | 2546 | ||
2612 | /* Locate core99 Uni-N */ | 2547 | /* Locate core99 Uni-N */ |
@@ -2614,22 +2549,31 @@ static void __init probe_uninorth(void) | |||
2614 | /* Locate G5 u3 */ | 2549 | /* Locate G5 u3 */ |
2615 | if (uninorth_node == NULL) { | 2550 | if (uninorth_node == NULL) { |
2616 | uninorth_node = of_find_node_by_name(NULL, "u3"); | 2551 | uninorth_node = of_find_node_by_name(NULL, "u3"); |
2617 | uninorth_u3 = 1; | 2552 | uninorth_maj = 3; |
2618 | } | 2553 | } |
2619 | if (uninorth_node && uninorth_node->n_addrs > 0) { | 2554 | /* Locate G5 u4 */ |
2620 | unsigned long address = uninorth_node->addrs[0].address; | 2555 | if (uninorth_node == NULL) { |
2621 | uninorth_base = ioremap(address, 0x40000); | 2556 | uninorth_node = of_find_node_by_name(NULL, "u4"); |
2622 | uninorth_rev = in_be32(UN_REG(UNI_N_VERSION)); | 2557 | uninorth_maj = 4; |
2623 | if (uninorth_u3) | 2558 | } |
2624 | u3_ht = ioremap(address + U3_HT_CONFIG_BASE, 0x1000); | 2559 | if (uninorth_node == NULL) |
2625 | } else | ||
2626 | uninorth_node = NULL; | ||
2627 | |||
2628 | if (!uninorth_node) | ||
2629 | return; | 2560 | return; |
2630 | 2561 | ||
2631 | printk(KERN_INFO "Found %s memory controller & host bridge, revision: %d\n", | 2562 | addrp = (u32 *)get_property(uninorth_node, "reg", NULL); |
2632 | uninorth_u3 ? "U3" : "UniNorth", uninorth_rev); | 2563 | if (addrp == NULL) |
2564 | return; | ||
2565 | address = of_translate_address(uninorth_node, addrp); | ||
2566 | if (address == 0) | ||
2567 | return; | ||
2568 | uninorth_base = ioremap(address, 0x40000); | ||
2569 | uninorth_rev = in_be32(UN_REG(UNI_N_VERSION)); | ||
2570 | if (uninorth_maj == 3 || uninorth_maj == 4) | ||
2571 | u3_ht_base = ioremap(address + U3_HT_CONFIG_BASE, 0x1000); | ||
2572 | |||
2573 | printk(KERN_INFO "Found %s memory controller & host bridge" | ||
2574 | " @ 0x%08x revision: 0x%02x\n", uninorth_maj == 3 ? "U3" : | ||
2575 | uninorth_maj == 4 ? "U4" : "UniNorth", | ||
2576 | (unsigned int)address, uninorth_rev); | ||
2633 | printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base); | 2577 | printk(KERN_INFO "Mapped at 0x%08lx\n", (unsigned long)uninorth_base); |
2634 | 2578 | ||
2635 | /* Set the arbitrer QAck delay according to what Apple does | 2579 | /* Set the arbitrer QAck delay according to what Apple does |
@@ -2637,7 +2581,8 @@ static void __init probe_uninorth(void) | |||
2637 | if (uninorth_rev < 0x11) { | 2581 | if (uninorth_rev < 0x11) { |
2638 | actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK; | 2582 | actrl = UN_IN(UNI_N_ARB_CTRL) & ~UNI_N_ARB_CTRL_QACK_DELAY_MASK; |
2639 | actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 : | 2583 | actrl |= ((uninorth_rev < 3) ? UNI_N_ARB_CTRL_QACK_DELAY105 : |
2640 | UNI_N_ARB_CTRL_QACK_DELAY) << UNI_N_ARB_CTRL_QACK_DELAY_SHIFT; | 2584 | UNI_N_ARB_CTRL_QACK_DELAY) << |
2585 | UNI_N_ARB_CTRL_QACK_DELAY_SHIFT; | ||
2641 | UN_OUT(UNI_N_ARB_CTRL, actrl); | 2586 | UN_OUT(UNI_N_ARB_CTRL, actrl); |
2642 | } | 2587 | } |
2643 | 2588 | ||
@@ -2645,7 +2590,8 @@ static void __init probe_uninorth(void) | |||
2645 | * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI | 2590 | * revs 1.5 to 2.O and Pangea. Seem to toggle the UniN Maxbus/PCI |
2646 | * memory timeout | 2591 | * memory timeout |
2647 | */ | 2592 | */ |
2648 | if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || uninorth_rev == 0xc0) | 2593 | if ((uninorth_rev >= 0x11 && uninorth_rev <= 0x24) || |
2594 | uninorth_rev == 0xc0) | ||
2649 | UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff); | 2595 | UN_OUT(0x2160, UN_IN(0x2160) & 0x00ffffff); |
2650 | } | 2596 | } |
2651 | 2597 | ||
@@ -2653,18 +2599,17 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ | |||
2653 | { | 2599 | { |
2654 | struct device_node* node; | 2600 | struct device_node* node; |
2655 | int i; | 2601 | int i; |
2656 | volatile u32 __iomem * base; | 2602 | volatile u32 __iomem *base; |
2657 | u32* revp; | 2603 | u32 *addrp, *revp; |
2604 | phys_addr_t addr; | ||
2605 | u64 size; | ||
2658 | 2606 | ||
2659 | node = find_devices(name); | 2607 | for (node = NULL; (node = of_find_node_by_name(node, name)) != NULL;) { |
2660 | if (!node || !node->n_addrs) | 2608 | if (!compat) |
2661 | return; | 2609 | break; |
2662 | if (compat) | 2610 | if (device_is_compatible(node, compat)) |
2663 | do { | 2611 | break; |
2664 | if (device_is_compatible(node, compat)) | 2612 | } |
2665 | break; | ||
2666 | node = node->next; | ||
2667 | } while (node); | ||
2668 | if (!node) | 2613 | if (!node) |
2669 | return; | 2614 | return; |
2670 | for(i=0; i<MAX_MACIO_CHIPS; i++) { | 2615 | for(i=0; i<MAX_MACIO_CHIPS; i++) { |
@@ -2673,22 +2618,38 @@ static void __init probe_one_macio(const char *name, const char *compat, int typ | |||
2673 | if (macio_chips[i].of_node == node) | 2618 | if (macio_chips[i].of_node == node) |
2674 | return; | 2619 | return; |
2675 | } | 2620 | } |
2621 | |||
2676 | if (i >= MAX_MACIO_CHIPS) { | 2622 | if (i >= MAX_MACIO_CHIPS) { |
2677 | printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); | 2623 | printk(KERN_ERR "pmac_feature: Please increase MAX_MACIO_CHIPS !\n"); |
2678 | printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name); | 2624 | printk(KERN_ERR "pmac_feature: %s skipped\n", node->full_name); |
2679 | return; | 2625 | return; |
2680 | } | 2626 | } |
2681 | base = ioremap(node->addrs[0].address, node->addrs[0].size); | 2627 | addrp = of_get_pci_address(node, 0, &size, NULL); |
2628 | if (addrp == NULL) { | ||
2629 | printk(KERN_ERR "pmac_feature: %s: can't find base !\n", | ||
2630 | node->full_name); | ||
2631 | return; | ||
2632 | } | ||
2633 | addr = of_translate_address(node, addrp); | ||
2634 | if (addr == 0) { | ||
2635 | printk(KERN_ERR "pmac_feature: %s, can't translate base !\n", | ||
2636 | node->full_name); | ||
2637 | return; | ||
2638 | } | ||
2639 | base = ioremap(addr, (unsigned long)size); | ||
2682 | if (!base) { | 2640 | if (!base) { |
2683 | printk(KERN_ERR "pmac_feature: Can't map mac-io chip !\n"); | 2641 | printk(KERN_ERR "pmac_feature: %s, can't map mac-io chip !\n", |
2642 | node->full_name); | ||
2684 | return; | 2643 | return; |
2685 | } | 2644 | } |
2686 | if (type == macio_keylargo) { | 2645 | if (type == macio_keylargo || type == macio_keylargo2) { |
2687 | u32 *did = (u32 *)get_property(node, "device-id", NULL); | 2646 | u32 *did = (u32 *)get_property(node, "device-id", NULL); |
2688 | if (*did == 0x00000025) | 2647 | if (*did == 0x00000025) |
2689 | type = macio_pangea; | 2648 | type = macio_pangea; |
2690 | if (*did == 0x0000003e) | 2649 | if (*did == 0x0000003e) |
2691 | type = macio_intrepid; | 2650 | type = macio_intrepid; |
2651 | if (*did == 0x0000004f) | ||
2652 | type = macio_shasta; | ||
2692 | } | 2653 | } |
2693 | macio_chips[i].of_node = node; | 2654 | macio_chips[i].of_node = node; |
2694 | macio_chips[i].type = type; | 2655 | macio_chips[i].type = type; |
@@ -2787,7 +2748,8 @@ set_initial_features(void) | |||
2787 | } | 2748 | } |
2788 | 2749 | ||
2789 | #ifdef CONFIG_POWER4 | 2750 | #ifdef CONFIG_POWER4 |
2790 | if (macio_chips[0].type == macio_keylargo2) { | 2751 | if (macio_chips[0].type == macio_keylargo2 || |
2752 | macio_chips[0].type == macio_shasta) { | ||
2791 | #ifndef CONFIG_SMP | 2753 | #ifndef CONFIG_SMP |
2792 | /* On SMP machines running UP, we have the second CPU eating | 2754 | /* On SMP machines running UP, we have the second CPU eating |
2793 | * bus cycles. We need to take it off the bus. This is done | 2755 | * bus cycles. We need to take it off the bus. This is done |
@@ -2896,12 +2858,6 @@ set_initial_features(void) | |||
2896 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); | 2858 | MACIO_BIC(HEATHROW_FCR, HRW_SOUND_POWER_N); |
2897 | } | 2859 | } |
2898 | 2860 | ||
2899 | /* Some machine models need the clock chip to be properly setup for | ||
2900 | * clock spreading now. This should be a platform function but we | ||
2901 | * don't do these at the moment | ||
2902 | */ | ||
2903 | pmac_tweak_clock_spreading(1); | ||
2904 | |||
2905 | #endif /* CONFIG_POWER4 */ | 2861 | #endif /* CONFIG_POWER4 */ |
2906 | 2862 | ||
2907 | /* On all machines, switch modem & serial ports off */ | 2863 | /* On all machines, switch modem & serial ports off */ |
@@ -2929,9 +2885,6 @@ pmac_feature_init(void) | |||
2929 | return; | 2885 | return; |
2930 | } | 2886 | } |
2931 | 2887 | ||
2932 | /* Setup low-level i2c stuffs */ | ||
2933 | pmac_init_low_i2c(); | ||
2934 | |||
2935 | /* Probe machine type */ | 2888 | /* Probe machine type */ |
2936 | if (probe_motherboard()) | 2889 | if (probe_motherboard()) |
2937 | printk(KERN_WARNING "Unknown PowerMac !\n"); | 2890 | printk(KERN_WARNING "Unknown PowerMac !\n"); |
@@ -2942,26 +2895,6 @@ pmac_feature_init(void) | |||
2942 | set_initial_features(); | 2895 | set_initial_features(); |
2943 | } | 2896 | } |
2944 | 2897 | ||
2945 | int __init pmac_feature_late_init(void) | ||
2946 | { | ||
2947 | #if 0 | ||
2948 | struct device_node *np; | ||
2949 | |||
2950 | /* Request some resources late */ | ||
2951 | if (uninorth_node) | ||
2952 | request_OF_resource(uninorth_node, 0, NULL); | ||
2953 | np = find_devices("hammerhead"); | ||
2954 | if (np) | ||
2955 | request_OF_resource(np, 0, NULL); | ||
2956 | np = find_devices("interrupt-controller"); | ||
2957 | if (np) | ||
2958 | request_OF_resource(np, 0, NULL); | ||
2959 | #endif | ||
2960 | return 0; | ||
2961 | } | ||
2962 | |||
2963 | device_initcall(pmac_feature_late_init); | ||
2964 | |||
2965 | #if 0 | 2898 | #if 0 |
2966 | static void dump_HT_speeds(char *name, u32 cfg, u32 frq) | 2899 | static void dump_HT_speeds(char *name, u32 cfg, u32 frq) |
2967 | { | 2900 | { |
@@ -2984,9 +2917,9 @@ void __init pmac_check_ht_link(void) | |||
2984 | u8 px_bus, px_devfn; | 2917 | u8 px_bus, px_devfn; |
2985 | struct pci_controller *px_hose; | 2918 | struct pci_controller *px_hose; |
2986 | 2919 | ||
2987 | (void)in_be32(u3_ht + U3_HT_LINK_COMMAND); | 2920 | (void)in_be32(u3_ht_base + U3_HT_LINK_COMMAND); |
2988 | ucfg = cfg = in_be32(u3_ht + U3_HT_LINK_CONFIG); | 2921 | ucfg = cfg = in_be32(u3_ht_base + U3_HT_LINK_CONFIG); |
2989 | ufreq = freq = in_be32(u3_ht + U3_HT_LINK_FREQ); | 2922 | ufreq = freq = in_be32(u3_ht_base + U3_HT_LINK_FREQ); |
2990 | dump_HT_speeds("U3 HyperTransport", cfg, freq); | 2923 | dump_HT_speeds("U3 HyperTransport", cfg, freq); |
2991 | 2924 | ||
2992 | pcix_node = of_find_compatible_node(NULL, "pci", "pci-x"); | 2925 | pcix_node = of_find_compatible_node(NULL, "pci", "pci-x"); |
diff --git a/arch/powerpc/platforms/powermac/low_i2c.c b/arch/powerpc/platforms/powermac/low_i2c.c index f3f39e8e337a..535c802b369f 100644 --- a/arch/powerpc/platforms/powermac/low_i2c.c +++ b/arch/powerpc/platforms/powermac/low_i2c.c | |||
@@ -1,22 +1,34 @@ | |||
1 | /* | 1 | /* |
2 | * arch/ppc/platforms/pmac_low_i2c.c | 2 | * arch/powerpc/platforms/powermac/low_i2c.c |
3 | * | 3 | * |
4 | * Copyright (C) 2003 Ben. Herrenschmidt (benh@kernel.crashing.org) | 4 | * Copyright (C) 2003-2005 Ben. Herrenschmidt (benh@kernel.crashing.org) |
5 | * | 5 | * |
6 | * This program is free software; you can redistribute it and/or | 6 | * This program is free software; you can redistribute it and/or |
7 | * modify it under the terms of the GNU General Public License | 7 | * modify it under the terms of the GNU General Public License |
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * This file contains some low-level i2c access routines that | 11 | * The linux i2c layer isn't completely suitable for our needs for various |
12 | * need to be used by various bits of the PowerMac platform code | 12 | * reasons ranging from too late initialisation to semantics not perfectly |
13 | * at times where the real asynchronous & interrupt driven driver | 13 | * matching some requirements of the apple platform functions etc... |
14 | * cannot be used. The API borrows some semantics from the darwin | 14 | * |
15 | * driver in order to ease the implementation of the platform | 15 | * This file thus provides a simple low level unified i2c interface for |
16 | * properties parser | 16 | * powermac that covers the various types of i2c busses used in Apple machines. |
17 | * For now, keywest, PMU and SMU, though we could add Cuda, or other bit | ||
18 | * banging busses found on older chipstes in earlier machines if we ever need | ||
19 | * one of them. | ||
20 | * | ||
21 | * The drivers in this file are synchronous/blocking. In addition, the | ||
22 | * keywest one is fairly slow due to the use of msleep instead of interrupts | ||
23 | * as the interrupt is currently used by i2c-keywest. In the long run, we | ||
24 | * might want to get rid of those high-level interfaces to linux i2c layer | ||
25 | * either completely (converting all drivers) or replacing them all with a | ||
26 | * single stub driver on top of this one. Once done, the interrupt will be | ||
27 | * available for our use. | ||
17 | */ | 28 | */ |
18 | 29 | ||
19 | #undef DEBUG | 30 | #undef DEBUG |
31 | #undef DEBUG_LOW | ||
20 | 32 | ||
21 | #include <linux/config.h> | 33 | #include <linux/config.h> |
22 | #include <linux/types.h> | 34 | #include <linux/types.h> |
@@ -25,66 +37,91 @@ | |||
25 | #include <linux/module.h> | 37 | #include <linux/module.h> |
26 | #include <linux/adb.h> | 38 | #include <linux/adb.h> |
27 | #include <linux/pmu.h> | 39 | #include <linux/pmu.h> |
40 | #include <linux/delay.h> | ||
41 | #include <linux/completion.h> | ||
42 | #include <linux/platform_device.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <linux/completion.h> | ||
45 | #include <linux/timer.h> | ||
28 | #include <asm/keylargo.h> | 46 | #include <asm/keylargo.h> |
29 | #include <asm/uninorth.h> | 47 | #include <asm/uninorth.h> |
30 | #include <asm/io.h> | 48 | #include <asm/io.h> |
31 | #include <asm/prom.h> | 49 | #include <asm/prom.h> |
32 | #include <asm/machdep.h> | 50 | #include <asm/machdep.h> |
51 | #include <asm/smu.h> | ||
52 | #include <asm/pmac_pfunc.h> | ||
33 | #include <asm/pmac_low_i2c.h> | 53 | #include <asm/pmac_low_i2c.h> |
34 | 54 | ||
35 | #define MAX_LOW_I2C_HOST 4 | ||
36 | |||
37 | #ifdef DEBUG | 55 | #ifdef DEBUG |
38 | #define DBG(x...) do {\ | 56 | #define DBG(x...) do {\ |
39 | printk(KERN_DEBUG "KW:" x); \ | 57 | printk(KERN_DEBUG "low_i2c:" x); \ |
40 | } while(0) | 58 | } while(0) |
41 | #else | 59 | #else |
42 | #define DBG(x...) | 60 | #define DBG(x...) |
43 | #endif | 61 | #endif |
44 | 62 | ||
45 | struct low_i2c_host; | 63 | #ifdef DEBUG_LOW |
46 | 64 | #define DBG_LOW(x...) do {\ | |
47 | typedef int (*low_i2c_func_t)(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len); | 65 | printk(KERN_DEBUG "low_i2c:" x); \ |
66 | } while(0) | ||
67 | #else | ||
68 | #define DBG_LOW(x...) | ||
69 | #endif | ||
48 | 70 | ||
49 | struct low_i2c_host | ||
50 | { | ||
51 | struct device_node *np; /* OF device node */ | ||
52 | struct semaphore mutex; /* Access mutex for use by i2c-keywest */ | ||
53 | low_i2c_func_t func; /* Access function */ | ||
54 | unsigned int is_open : 1; /* Poor man's access control */ | ||
55 | int mode; /* Current mode */ | ||
56 | int channel; /* Current channel */ | ||
57 | int num_channels; /* Number of channels */ | ||
58 | void __iomem *base; /* For keywest-i2c, base address */ | ||
59 | int bsteps; /* And register stepping */ | ||
60 | int speed; /* And speed */ | ||
61 | }; | ||
62 | 71 | ||
63 | static struct low_i2c_host low_i2c_hosts[MAX_LOW_I2C_HOST]; | 72 | static int pmac_i2c_force_poll = 1; |
64 | 73 | ||
65 | /* No locking is necessary on allocation, we are running way before | 74 | /* |
66 | * anything can race with us | 75 | * A bus structure. Each bus in the system has such a structure associated. |
67 | */ | 76 | */ |
68 | static struct low_i2c_host *find_low_i2c_host(struct device_node *np) | 77 | struct pmac_i2c_bus |
69 | { | 78 | { |
70 | int i; | 79 | struct list_head link; |
80 | struct device_node *controller; | ||
81 | struct device_node *busnode; | ||
82 | int type; | ||
83 | int flags; | ||
84 | struct i2c_adapter *adapter; | ||
85 | void *hostdata; | ||
86 | int channel; /* some hosts have multiple */ | ||
87 | int mode; /* current mode */ | ||
88 | struct semaphore sem; | ||
89 | int opened; | ||
90 | int polled; /* open mode */ | ||
91 | struct platform_device *platform_dev; | ||
92 | |||
93 | /* ops */ | ||
94 | int (*open)(struct pmac_i2c_bus *bus); | ||
95 | void (*close)(struct pmac_i2c_bus *bus); | ||
96 | int (*xfer)(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, | ||
97 | u32 subaddr, u8 *data, int len); | ||
98 | }; | ||
71 | 99 | ||
72 | for (i = 0; i < MAX_LOW_I2C_HOST; i++) | 100 | static LIST_HEAD(pmac_i2c_busses); |
73 | if (low_i2c_hosts[i].np == np) | ||
74 | return &low_i2c_hosts[i]; | ||
75 | return NULL; | ||
76 | } | ||
77 | 101 | ||
78 | /* | 102 | /* |
79 | * | 103 | * Keywest implementation |
80 | * i2c-keywest implementation (UniNorth, U2, U3, Keylargo's) | ||
81 | * | ||
82 | */ | 104 | */ |
83 | 105 | ||
84 | /* | 106 | struct pmac_i2c_host_kw |
85 | * Keywest i2c definitions borrowed from drivers/i2c/i2c-keywest.h, | 107 | { |
86 | * should be moved somewhere in include/asm-ppc/ | 108 | struct semaphore mutex; /* Access mutex for use by |
87 | */ | 109 | * i2c-keywest */ |
110 | void __iomem *base; /* register base address */ | ||
111 | int bsteps; /* register stepping */ | ||
112 | int speed; /* speed */ | ||
113 | int irq; | ||
114 | u8 *data; | ||
115 | unsigned len; | ||
116 | int state; | ||
117 | int rw; | ||
118 | int polled; | ||
119 | int result; | ||
120 | struct completion complete; | ||
121 | spinlock_t lock; | ||
122 | struct timer_list timeout_timer; | ||
123 | }; | ||
124 | |||
88 | /* Register indices */ | 125 | /* Register indices */ |
89 | typedef enum { | 126 | typedef enum { |
90 | reg_mode = 0, | 127 | reg_mode = 0, |
@@ -97,6 +134,8 @@ typedef enum { | |||
97 | reg_data | 134 | reg_data |
98 | } reg_t; | 135 | } reg_t; |
99 | 136 | ||
137 | /* The Tumbler audio equalizer can be really slow sometimes */ | ||
138 | #define KW_POLL_TIMEOUT (2*HZ) | ||
100 | 139 | ||
101 | /* Mode register */ | 140 | /* Mode register */ |
102 | #define KW_I2C_MODE_100KHZ 0x00 | 141 | #define KW_I2C_MODE_100KHZ 0x00 |
@@ -140,8 +179,9 @@ enum { | |||
140 | }; | 179 | }; |
141 | 180 | ||
142 | #define WRONG_STATE(name) do {\ | 181 | #define WRONG_STATE(name) do {\ |
143 | printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s (isr: %02x)\n", \ | 182 | printk(KERN_DEBUG "KW: wrong state. Got %s, state: %s " \ |
144 | name, __kw_state_names[state], isr); \ | 183 | "(isr: %02x)\n", \ |
184 | name, __kw_state_names[host->state], isr); \ | ||
145 | } while(0) | 185 | } while(0) |
146 | 186 | ||
147 | static const char *__kw_state_names[] = { | 187 | static const char *__kw_state_names[] = { |
@@ -153,120 +193,137 @@ static const char *__kw_state_names[] = { | |||
153 | "state_dead" | 193 | "state_dead" |
154 | }; | 194 | }; |
155 | 195 | ||
156 | static inline u8 __kw_read_reg(struct low_i2c_host *host, reg_t reg) | 196 | static inline u8 __kw_read_reg(struct pmac_i2c_host_kw *host, reg_t reg) |
157 | { | 197 | { |
158 | return readb(host->base + (((unsigned int)reg) << host->bsteps)); | 198 | return readb(host->base + (((unsigned int)reg) << host->bsteps)); |
159 | } | 199 | } |
160 | 200 | ||
161 | static inline void __kw_write_reg(struct low_i2c_host *host, reg_t reg, u8 val) | 201 | static inline void __kw_write_reg(struct pmac_i2c_host_kw *host, |
202 | reg_t reg, u8 val) | ||
162 | { | 203 | { |
163 | writeb(val, host->base + (((unsigned)reg) << host->bsteps)); | 204 | writeb(val, host->base + (((unsigned)reg) << host->bsteps)); |
164 | (void)__kw_read_reg(host, reg_subaddr); | 205 | (void)__kw_read_reg(host, reg_subaddr); |
165 | } | 206 | } |
166 | 207 | ||
167 | #define kw_write_reg(reg, val) __kw_write_reg(host, reg, val) | 208 | #define kw_write_reg(reg, val) __kw_write_reg(host, reg, val) |
168 | #define kw_read_reg(reg) __kw_read_reg(host, reg) | 209 | #define kw_read_reg(reg) __kw_read_reg(host, reg) |
169 | |||
170 | 210 | ||
171 | /* Don't schedule, the g5 fan controller is too | 211 | static u8 kw_i2c_wait_interrupt(struct pmac_i2c_host_kw *host) |
172 | * timing sensitive | ||
173 | */ | ||
174 | static u8 kw_wait_interrupt(struct low_i2c_host* host) | ||
175 | { | 212 | { |
176 | int i, j; | 213 | int i, j; |
177 | u8 isr; | 214 | u8 isr; |
178 | 215 | ||
179 | for (i = 0; i < 100000; i++) { | 216 | for (i = 0; i < 1000; i++) { |
180 | isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK; | 217 | isr = kw_read_reg(reg_isr) & KW_I2C_IRQ_MASK; |
181 | if (isr != 0) | 218 | if (isr != 0) |
182 | return isr; | 219 | return isr; |
183 | 220 | ||
184 | /* This code is used with the timebase frozen, we cannot rely | 221 | /* This code is used with the timebase frozen, we cannot rely |
185 | * on udelay ! For now, just use a bogus loop | 222 | * on udelay nor schedule when in polled mode ! |
223 | * For now, just use a bogus loop.... | ||
186 | */ | 224 | */ |
187 | for (j = 1; j < 10000; j++) | 225 | if (host->polled) { |
188 | mb(); | 226 | for (j = 1; j < 100000; j++) |
227 | mb(); | ||
228 | } else | ||
229 | msleep(1); | ||
189 | } | 230 | } |
190 | return isr; | 231 | return isr; |
191 | } | 232 | } |
192 | 233 | ||
193 | static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int *rc, u8 **data, int *len, u8 isr) | 234 | static void kw_i2c_handle_interrupt(struct pmac_i2c_host_kw *host, u8 isr) |
194 | { | 235 | { |
195 | u8 ack; | 236 | u8 ack; |
196 | 237 | ||
197 | DBG("kw_handle_interrupt(%s, isr: %x)\n", __kw_state_names[state], isr); | 238 | DBG_LOW("kw_handle_interrupt(%s, isr: %x)\n", |
239 | __kw_state_names[host->state], isr); | ||
240 | |||
241 | if (host->state == state_idle) { | ||
242 | printk(KERN_WARNING "low_i2c: Keywest got an out of state" | ||
243 | " interrupt, ignoring\n"); | ||
244 | kw_write_reg(reg_isr, isr); | ||
245 | return; | ||
246 | } | ||
198 | 247 | ||
199 | if (isr == 0) { | 248 | if (isr == 0) { |
200 | if (state != state_stop) { | 249 | if (host->state != state_stop) { |
201 | DBG("KW: Timeout !\n"); | 250 | DBG_LOW("KW: Timeout !\n"); |
202 | *rc = -EIO; | 251 | host->result = -EIO; |
203 | goto stop; | 252 | goto stop; |
204 | } | 253 | } |
205 | if (state == state_stop) { | 254 | if (host->state == state_stop) { |
206 | ack = kw_read_reg(reg_status); | 255 | ack = kw_read_reg(reg_status); |
207 | if (!(ack & KW_I2C_STAT_BUSY)) { | 256 | if (ack & KW_I2C_STAT_BUSY) |
208 | state = state_idle; | 257 | kw_write_reg(reg_status, 0); |
209 | kw_write_reg(reg_ier, 0x00); | 258 | host->state = state_idle; |
210 | } | 259 | kw_write_reg(reg_ier, 0x00); |
260 | if (!host->polled) | ||
261 | complete(&host->complete); | ||
211 | } | 262 | } |
212 | return state; | 263 | return; |
213 | } | 264 | } |
214 | 265 | ||
215 | if (isr & KW_I2C_IRQ_ADDR) { | 266 | if (isr & KW_I2C_IRQ_ADDR) { |
216 | ack = kw_read_reg(reg_status); | 267 | ack = kw_read_reg(reg_status); |
217 | if (state != state_addr) { | 268 | if (host->state != state_addr) { |
218 | kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); | 269 | kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); |
219 | WRONG_STATE("KW_I2C_IRQ_ADDR"); | 270 | WRONG_STATE("KW_I2C_IRQ_ADDR"); |
220 | *rc = -EIO; | 271 | host->result = -EIO; |
221 | goto stop; | 272 | goto stop; |
222 | } | 273 | } |
223 | if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { | 274 | if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { |
224 | *rc = -ENODEV; | 275 | host->result = -ENODEV; |
225 | DBG("KW: NAK on address\n"); | 276 | DBG_LOW("KW: NAK on address\n"); |
226 | return state_stop; | 277 | host->state = state_stop; |
278 | return; | ||
227 | } else { | 279 | } else { |
228 | if (rw) { | 280 | if (host->len == 0) { |
229 | state = state_read; | 281 | kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); |
230 | if (*len > 1) | 282 | goto stop; |
231 | kw_write_reg(reg_control, KW_I2C_CTL_AAK); | 283 | } |
284 | if (host->rw) { | ||
285 | host->state = state_read; | ||
286 | if (host->len > 1) | ||
287 | kw_write_reg(reg_control, | ||
288 | KW_I2C_CTL_AAK); | ||
232 | } else { | 289 | } else { |
233 | state = state_write; | 290 | host->state = state_write; |
234 | kw_write_reg(reg_data, **data); | 291 | kw_write_reg(reg_data, *(host->data++)); |
235 | (*data)++; (*len)--; | 292 | host->len--; |
236 | } | 293 | } |
237 | } | 294 | } |
238 | kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); | 295 | kw_write_reg(reg_isr, KW_I2C_IRQ_ADDR); |
239 | } | 296 | } |
240 | 297 | ||
241 | if (isr & KW_I2C_IRQ_DATA) { | 298 | if (isr & KW_I2C_IRQ_DATA) { |
242 | if (state == state_read) { | 299 | if (host->state == state_read) { |
243 | **data = kw_read_reg(reg_data); | 300 | *(host->data++) = kw_read_reg(reg_data); |
244 | (*data)++; (*len)--; | 301 | host->len--; |
245 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); | 302 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); |
246 | if ((*len) == 0) | 303 | if (host->len == 0) |
247 | state = state_stop; | 304 | host->state = state_stop; |
248 | else if ((*len) == 1) | 305 | else if (host->len == 1) |
249 | kw_write_reg(reg_control, 0); | 306 | kw_write_reg(reg_control, 0); |
250 | } else if (state == state_write) { | 307 | } else if (host->state == state_write) { |
251 | ack = kw_read_reg(reg_status); | 308 | ack = kw_read_reg(reg_status); |
252 | if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { | 309 | if ((ack & KW_I2C_STAT_LAST_AAK) == 0) { |
253 | DBG("KW: nack on data write\n"); | 310 | DBG_LOW("KW: nack on data write\n"); |
254 | *rc = -EIO; | 311 | host->result = -EIO; |
255 | goto stop; | 312 | goto stop; |
256 | } else if (*len) { | 313 | } else if (host->len) { |
257 | kw_write_reg(reg_data, **data); | 314 | kw_write_reg(reg_data, *(host->data++)); |
258 | (*data)++; (*len)--; | 315 | host->len--; |
259 | } else { | 316 | } else { |
260 | kw_write_reg(reg_control, KW_I2C_CTL_STOP); | 317 | kw_write_reg(reg_control, KW_I2C_CTL_STOP); |
261 | state = state_stop; | 318 | host->state = state_stop; |
262 | *rc = 0; | 319 | host->result = 0; |
263 | } | 320 | } |
264 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); | 321 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); |
265 | } else { | 322 | } else { |
266 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); | 323 | kw_write_reg(reg_isr, KW_I2C_IRQ_DATA); |
267 | WRONG_STATE("KW_I2C_IRQ_DATA"); | 324 | WRONG_STATE("KW_I2C_IRQ_DATA"); |
268 | if (state != state_stop) { | 325 | if (host->state != state_stop) { |
269 | *rc = -EIO; | 326 | host->result = -EIO; |
270 | goto stop; | 327 | goto stop; |
271 | } | 328 | } |
272 | } | 329 | } |
@@ -274,98 +331,194 @@ static int kw_handle_interrupt(struct low_i2c_host *host, int state, int rw, int | |||
274 | 331 | ||
275 | if (isr & KW_I2C_IRQ_STOP) { | 332 | if (isr & KW_I2C_IRQ_STOP) { |
276 | kw_write_reg(reg_isr, KW_I2C_IRQ_STOP); | 333 | kw_write_reg(reg_isr, KW_I2C_IRQ_STOP); |
277 | if (state != state_stop) { | 334 | if (host->state != state_stop) { |
278 | WRONG_STATE("KW_I2C_IRQ_STOP"); | 335 | WRONG_STATE("KW_I2C_IRQ_STOP"); |
279 | *rc = -EIO; | 336 | host->result = -EIO; |
280 | } | 337 | } |
281 | return state_idle; | 338 | host->state = state_idle; |
339 | if (!host->polled) | ||
340 | complete(&host->complete); | ||
282 | } | 341 | } |
283 | 342 | ||
284 | if (isr & KW_I2C_IRQ_START) | 343 | if (isr & KW_I2C_IRQ_START) |
285 | kw_write_reg(reg_isr, KW_I2C_IRQ_START); | 344 | kw_write_reg(reg_isr, KW_I2C_IRQ_START); |
286 | 345 | ||
287 | return state; | 346 | return; |
288 | |||
289 | stop: | 347 | stop: |
290 | kw_write_reg(reg_control, KW_I2C_CTL_STOP); | 348 | kw_write_reg(reg_control, KW_I2C_CTL_STOP); |
291 | return state_stop; | 349 | host->state = state_stop; |
350 | return; | ||
292 | } | 351 | } |
293 | 352 | ||
294 | static int keywest_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 subaddr, u8 *data, int len) | 353 | /* Interrupt handler */ |
354 | static irqreturn_t kw_i2c_irq(int irq, void *dev_id, struct pt_regs *regs) | ||
295 | { | 355 | { |
356 | struct pmac_i2c_host_kw *host = dev_id; | ||
357 | unsigned long flags; | ||
358 | |||
359 | spin_lock_irqsave(&host->lock, flags); | ||
360 | del_timer(&host->timeout_timer); | ||
361 | kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); | ||
362 | if (host->state != state_idle) { | ||
363 | host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; | ||
364 | add_timer(&host->timeout_timer); | ||
365 | } | ||
366 | spin_unlock_irqrestore(&host->lock, flags); | ||
367 | return IRQ_HANDLED; | ||
368 | } | ||
369 | |||
370 | static void kw_i2c_timeout(unsigned long data) | ||
371 | { | ||
372 | struct pmac_i2c_host_kw *host = (struct pmac_i2c_host_kw *)data; | ||
373 | unsigned long flags; | ||
374 | |||
375 | spin_lock_irqsave(&host->lock, flags); | ||
376 | kw_i2c_handle_interrupt(host, kw_read_reg(reg_isr)); | ||
377 | if (host->state != state_idle) { | ||
378 | host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; | ||
379 | add_timer(&host->timeout_timer); | ||
380 | } | ||
381 | spin_unlock_irqrestore(&host->lock, flags); | ||
382 | } | ||
383 | |||
384 | static int kw_i2c_open(struct pmac_i2c_bus *bus) | ||
385 | { | ||
386 | struct pmac_i2c_host_kw *host = bus->hostdata; | ||
387 | down(&host->mutex); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | static void kw_i2c_close(struct pmac_i2c_bus *bus) | ||
392 | { | ||
393 | struct pmac_i2c_host_kw *host = bus->hostdata; | ||
394 | up(&host->mutex); | ||
395 | } | ||
396 | |||
397 | static int kw_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, | ||
398 | u32 subaddr, u8 *data, int len) | ||
399 | { | ||
400 | struct pmac_i2c_host_kw *host = bus->hostdata; | ||
296 | u8 mode_reg = host->speed; | 401 | u8 mode_reg = host->speed; |
297 | int state = state_addr; | 402 | int use_irq = host->irq != NO_IRQ && !bus->polled; |
298 | int rc = 0; | ||
299 | 403 | ||
300 | /* Setup mode & subaddress if any */ | 404 | /* Setup mode & subaddress if any */ |
301 | switch(host->mode) { | 405 | switch(bus->mode) { |
302 | case pmac_low_i2c_mode_dumb: | 406 | case pmac_i2c_mode_dumb: |
303 | printk(KERN_ERR "low_i2c: Dumb mode not supported !\n"); | ||
304 | return -EINVAL; | 407 | return -EINVAL; |
305 | case pmac_low_i2c_mode_std: | 408 | case pmac_i2c_mode_std: |
306 | mode_reg |= KW_I2C_MODE_STANDARD; | 409 | mode_reg |= KW_I2C_MODE_STANDARD; |
410 | if (subsize != 0) | ||
411 | return -EINVAL; | ||
307 | break; | 412 | break; |
308 | case pmac_low_i2c_mode_stdsub: | 413 | case pmac_i2c_mode_stdsub: |
309 | mode_reg |= KW_I2C_MODE_STANDARDSUB; | 414 | mode_reg |= KW_I2C_MODE_STANDARDSUB; |
415 | if (subsize != 1) | ||
416 | return -EINVAL; | ||
310 | break; | 417 | break; |
311 | case pmac_low_i2c_mode_combined: | 418 | case pmac_i2c_mode_combined: |
312 | mode_reg |= KW_I2C_MODE_COMBINED; | 419 | mode_reg |= KW_I2C_MODE_COMBINED; |
420 | if (subsize != 1) | ||
421 | return -EINVAL; | ||
313 | break; | 422 | break; |
314 | } | 423 | } |
315 | 424 | ||
316 | /* Setup channel & clear pending irqs */ | 425 | /* Setup channel & clear pending irqs */ |
317 | kw_write_reg(reg_isr, kw_read_reg(reg_isr)); | 426 | kw_write_reg(reg_isr, kw_read_reg(reg_isr)); |
318 | kw_write_reg(reg_mode, mode_reg | (host->channel << 4)); | 427 | kw_write_reg(reg_mode, mode_reg | (bus->channel << 4)); |
319 | kw_write_reg(reg_status, 0); | 428 | kw_write_reg(reg_status, 0); |
320 | 429 | ||
321 | /* Set up address and r/w bit */ | 430 | /* Set up address and r/w bit, strip possible stale bus number from |
322 | kw_write_reg(reg_addr, addr); | 431 | * address top bits |
432 | */ | ||
433 | kw_write_reg(reg_addr, addrdir & 0xff); | ||
323 | 434 | ||
324 | /* Set up the sub address */ | 435 | /* Set up the sub address */ |
325 | if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB | 436 | if ((mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_STANDARDSUB |
326 | || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED) | 437 | || (mode_reg & KW_I2C_MODE_MODE_MASK) == KW_I2C_MODE_COMBINED) |
327 | kw_write_reg(reg_subaddr, subaddr); | 438 | kw_write_reg(reg_subaddr, subaddr); |
328 | 439 | ||
329 | /* Start sending address & disable interrupt*/ | 440 | /* Prepare for async operations */ |
330 | kw_write_reg(reg_ier, 0 /*KW_I2C_IRQ_MASK*/); | 441 | host->data = data; |
442 | host->len = len; | ||
443 | host->state = state_addr; | ||
444 | host->result = 0; | ||
445 | host->rw = (addrdir & 1); | ||
446 | host->polled = bus->polled; | ||
447 | |||
448 | /* Enable interrupt if not using polled mode and interrupt is | ||
449 | * available | ||
450 | */ | ||
451 | if (use_irq) { | ||
452 | /* Clear completion */ | ||
453 | INIT_COMPLETION(host->complete); | ||
454 | /* Ack stale interrupts */ | ||
455 | kw_write_reg(reg_isr, kw_read_reg(reg_isr)); | ||
456 | /* Arm timeout */ | ||
457 | host->timeout_timer.expires = jiffies + KW_POLL_TIMEOUT; | ||
458 | add_timer(&host->timeout_timer); | ||
459 | /* Enable emission */ | ||
460 | kw_write_reg(reg_ier, KW_I2C_IRQ_MASK); | ||
461 | } | ||
462 | |||
463 | /* Start sending address */ | ||
331 | kw_write_reg(reg_control, KW_I2C_CTL_XADDR); | 464 | kw_write_reg(reg_control, KW_I2C_CTL_XADDR); |
332 | 465 | ||
333 | /* State machine, to turn into an interrupt handler */ | 466 | /* Wait for completion */ |
334 | while(state != state_idle) { | 467 | if (use_irq) |
335 | u8 isr = kw_wait_interrupt(host); | 468 | wait_for_completion(&host->complete); |
336 | state = kw_handle_interrupt(host, state, addr & 1, &rc, &data, &len, isr); | 469 | else { |
470 | while(host->state != state_idle) { | ||
471 | unsigned long flags; | ||
472 | |||
473 | u8 isr = kw_i2c_wait_interrupt(host); | ||
474 | spin_lock_irqsave(&host->lock, flags); | ||
475 | kw_i2c_handle_interrupt(host, isr); | ||
476 | spin_unlock_irqrestore(&host->lock, flags); | ||
477 | } | ||
337 | } | 478 | } |
338 | 479 | ||
339 | return rc; | 480 | /* Disable emission */ |
481 | kw_write_reg(reg_ier, 0); | ||
482 | |||
483 | return host->result; | ||
340 | } | 484 | } |
341 | 485 | ||
342 | static void keywest_low_i2c_add(struct device_node *np) | 486 | static struct pmac_i2c_host_kw *__init kw_i2c_host_init(struct device_node *np) |
343 | { | 487 | { |
344 | struct low_i2c_host *host = find_low_i2c_host(NULL); | 488 | struct pmac_i2c_host_kw *host; |
345 | u32 *psteps, *prate, steps, aoffset = 0; | 489 | u32 *psteps, *prate, *addrp, steps; |
346 | struct device_node *parent; | ||
347 | 490 | ||
491 | host = kzalloc(sizeof(struct pmac_i2c_host_kw), GFP_KERNEL); | ||
348 | if (host == NULL) { | 492 | if (host == NULL) { |
349 | printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", | 493 | printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", |
350 | np->full_name); | 494 | np->full_name); |
351 | return; | 495 | return NULL; |
352 | } | 496 | } |
353 | memset(host, 0, sizeof(*host)); | ||
354 | 497 | ||
498 | /* Apple is kind enough to provide a valid AAPL,address property | ||
499 | * on all i2c keywest nodes so far ... we would have to fallback | ||
500 | * to macio parsing if that wasn't the case | ||
501 | */ | ||
502 | addrp = (u32 *)get_property(np, "AAPL,address", NULL); | ||
503 | if (addrp == NULL) { | ||
504 | printk(KERN_ERR "low_i2c: Can't find address for %s\n", | ||
505 | np->full_name); | ||
506 | kfree(host); | ||
507 | return NULL; | ||
508 | } | ||
355 | init_MUTEX(&host->mutex); | 509 | init_MUTEX(&host->mutex); |
356 | host->np = of_node_get(np); | 510 | init_completion(&host->complete); |
511 | spin_lock_init(&host->lock); | ||
512 | init_timer(&host->timeout_timer); | ||
513 | host->timeout_timer.function = kw_i2c_timeout; | ||
514 | host->timeout_timer.data = (unsigned long)host; | ||
515 | |||
357 | psteps = (u32 *)get_property(np, "AAPL,address-step", NULL); | 516 | psteps = (u32 *)get_property(np, "AAPL,address-step", NULL); |
358 | steps = psteps ? (*psteps) : 0x10; | 517 | steps = psteps ? (*psteps) : 0x10; |
359 | for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) | 518 | for (host->bsteps = 0; (steps & 0x01) == 0; host->bsteps++) |
360 | steps >>= 1; | 519 | steps >>= 1; |
361 | parent = of_get_parent(np); | ||
362 | host->num_channels = 1; | ||
363 | if (parent && parent->name[0] == 'u') { | ||
364 | host->num_channels = 2; | ||
365 | aoffset = 3; | ||
366 | } | ||
367 | /* Select interface rate */ | 520 | /* Select interface rate */ |
368 | host->speed = KW_I2C_MODE_100KHZ; | 521 | host->speed = KW_I2C_MODE_25KHZ; |
369 | prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL); | 522 | prate = (u32 *)get_property(np, "AAPL,i2c-rate", NULL); |
370 | if (prate) switch(*prate) { | 523 | if (prate) switch(*prate) { |
371 | case 100: | 524 | case 100: |
@@ -378,146 +531,981 @@ static void keywest_low_i2c_add(struct device_node *np) | |||
378 | host->speed = KW_I2C_MODE_25KHZ; | 531 | host->speed = KW_I2C_MODE_25KHZ; |
379 | break; | 532 | break; |
380 | } | 533 | } |
534 | if (np->n_intrs > 0) | ||
535 | host->irq = np->intrs[0].line; | ||
536 | else | ||
537 | host->irq = NO_IRQ; | ||
538 | |||
539 | host->base = ioremap((*addrp), 0x1000); | ||
540 | if (host->base == NULL) { | ||
541 | printk(KERN_ERR "low_i2c: Can't map registers for %s\n", | ||
542 | np->full_name); | ||
543 | kfree(host); | ||
544 | return NULL; | ||
545 | } | ||
546 | |||
547 | /* Make sure IRA is disabled */ | ||
548 | kw_write_reg(reg_ier, 0); | ||
549 | |||
550 | /* Request chip interrupt */ | ||
551 | if (request_irq(host->irq, kw_i2c_irq, SA_SHIRQ, "keywest i2c", host)) | ||
552 | host->irq = NO_IRQ; | ||
553 | |||
554 | printk(KERN_INFO "KeyWest i2c @0x%08x irq %d %s\n", | ||
555 | *addrp, host->irq, np->full_name); | ||
381 | 556 | ||
382 | host->mode = pmac_low_i2c_mode_std; | 557 | return host; |
383 | host->base = ioremap(np->addrs[0].address + aoffset, | ||
384 | np->addrs[0].size); | ||
385 | host->func = keywest_low_i2c_func; | ||
386 | } | 558 | } |
387 | 559 | ||
560 | |||
561 | static void __init kw_i2c_add(struct pmac_i2c_host_kw *host, | ||
562 | struct device_node *controller, | ||
563 | struct device_node *busnode, | ||
564 | int channel) | ||
565 | { | ||
566 | struct pmac_i2c_bus *bus; | ||
567 | |||
568 | bus = kzalloc(sizeof(struct pmac_i2c_bus), GFP_KERNEL); | ||
569 | if (bus == NULL) | ||
570 | return; | ||
571 | |||
572 | bus->controller = of_node_get(controller); | ||
573 | bus->busnode = of_node_get(busnode); | ||
574 | bus->type = pmac_i2c_bus_keywest; | ||
575 | bus->hostdata = host; | ||
576 | bus->channel = channel; | ||
577 | bus->mode = pmac_i2c_mode_std; | ||
578 | bus->open = kw_i2c_open; | ||
579 | bus->close = kw_i2c_close; | ||
580 | bus->xfer = kw_i2c_xfer; | ||
581 | init_MUTEX(&bus->sem); | ||
582 | if (controller == busnode) | ||
583 | bus->flags = pmac_i2c_multibus; | ||
584 | list_add(&bus->link, &pmac_i2c_busses); | ||
585 | |||
586 | printk(KERN_INFO " channel %d bus %s\n", channel, | ||
587 | (controller == busnode) ? "<multibus>" : busnode->full_name); | ||
588 | } | ||
589 | |||
590 | static void __init kw_i2c_probe(void) | ||
591 | { | ||
592 | struct device_node *np, *child, *parent; | ||
593 | |||
594 | /* Probe keywest-i2c busses */ | ||
595 | for (np = NULL; | ||
596 | (np = of_find_compatible_node(np, "i2c","keywest-i2c")) != NULL;){ | ||
597 | struct pmac_i2c_host_kw *host; | ||
598 | int multibus, chans, i; | ||
599 | |||
600 | /* Found one, init a host structure */ | ||
601 | host = kw_i2c_host_init(np); | ||
602 | if (host == NULL) | ||
603 | continue; | ||
604 | |||
605 | /* Now check if we have a multibus setup (old style) or if we | ||
606 | * have proper bus nodes. Note that the "new" way (proper bus | ||
607 | * nodes) might cause us to not create some busses that are | ||
608 | * kept hidden in the device-tree. In the future, we might | ||
609 | * want to work around that by creating busses without a node | ||
610 | * but not for now | ||
611 | */ | ||
612 | child = of_get_next_child(np, NULL); | ||
613 | multibus = !child || strcmp(child->name, "i2c-bus"); | ||
614 | of_node_put(child); | ||
615 | |||
616 | /* For a multibus setup, we get the bus count based on the | ||
617 | * parent type | ||
618 | */ | ||
619 | if (multibus) { | ||
620 | parent = of_get_parent(np); | ||
621 | if (parent == NULL) | ||
622 | continue; | ||
623 | chans = parent->name[0] == 'u' ? 2 : 1; | ||
624 | for (i = 0; i < chans; i++) | ||
625 | kw_i2c_add(host, np, np, i); | ||
626 | } else { | ||
627 | for (child = NULL; | ||
628 | (child = of_get_next_child(np, child)) != NULL;) { | ||
629 | u32 *reg = | ||
630 | (u32 *)get_property(child, "reg", NULL); | ||
631 | if (reg == NULL) | ||
632 | continue; | ||
633 | kw_i2c_add(host, np, child, *reg); | ||
634 | } | ||
635 | } | ||
636 | } | ||
637 | } | ||
638 | |||
639 | |||
388 | /* | 640 | /* |
389 | * | 641 | * |
390 | * PMU implementation | 642 | * PMU implementation |
391 | * | 643 | * |
392 | */ | 644 | */ |
393 | 645 | ||
394 | |||
395 | #ifdef CONFIG_ADB_PMU | 646 | #ifdef CONFIG_ADB_PMU |
396 | 647 | ||
397 | static int pmu_low_i2c_func(struct low_i2c_host *host, u8 addr, u8 sub, u8 *data, int len) | 648 | /* |
649 | * i2c command block to the PMU | ||
650 | */ | ||
651 | struct pmu_i2c_hdr { | ||
652 | u8 bus; | ||
653 | u8 mode; | ||
654 | u8 bus2; | ||
655 | u8 address; | ||
656 | u8 sub_addr; | ||
657 | u8 comb_addr; | ||
658 | u8 count; | ||
659 | u8 data[]; | ||
660 | }; | ||
661 | |||
662 | static void pmu_i2c_complete(struct adb_request *req) | ||
398 | { | 663 | { |
399 | // TODO | 664 | complete(req->arg); |
400 | return -ENODEV; | ||
401 | } | 665 | } |
402 | 666 | ||
403 | static void pmu_low_i2c_add(struct device_node *np) | 667 | static int pmu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, |
668 | u32 subaddr, u8 *data, int len) | ||
404 | { | 669 | { |
405 | struct low_i2c_host *host = find_low_i2c_host(NULL); | 670 | struct adb_request *req = bus->hostdata; |
671 | struct pmu_i2c_hdr *hdr = (struct pmu_i2c_hdr *)&req->data[1]; | ||
672 | struct completion comp; | ||
673 | int read = addrdir & 1; | ||
674 | int retry; | ||
675 | int rc = 0; | ||
406 | 676 | ||
407 | if (host == NULL) { | 677 | /* For now, limit ourselves to 16 bytes transfers */ |
408 | printk(KERN_ERR "low_i2c: Can't allocate host for %s\n", | 678 | if (len > 16) |
409 | np->full_name); | 679 | return -EINVAL; |
410 | return; | 680 | |
681 | init_completion(&comp); | ||
682 | |||
683 | for (retry = 0; retry < 16; retry++) { | ||
684 | memset(req, 0, sizeof(struct adb_request)); | ||
685 | hdr->bus = bus->channel; | ||
686 | hdr->count = len; | ||
687 | |||
688 | switch(bus->mode) { | ||
689 | case pmac_i2c_mode_std: | ||
690 | if (subsize != 0) | ||
691 | return -EINVAL; | ||
692 | hdr->address = addrdir; | ||
693 | hdr->mode = PMU_I2C_MODE_SIMPLE; | ||
694 | break; | ||
695 | case pmac_i2c_mode_stdsub: | ||
696 | case pmac_i2c_mode_combined: | ||
697 | if (subsize != 1) | ||
698 | return -EINVAL; | ||
699 | hdr->address = addrdir & 0xfe; | ||
700 | hdr->comb_addr = addrdir; | ||
701 | hdr->sub_addr = subaddr; | ||
702 | if (bus->mode == pmac_i2c_mode_stdsub) | ||
703 | hdr->mode = PMU_I2C_MODE_STDSUB; | ||
704 | else | ||
705 | hdr->mode = PMU_I2C_MODE_COMBINED; | ||
706 | break; | ||
707 | default: | ||
708 | return -EINVAL; | ||
709 | } | ||
710 | |||
711 | INIT_COMPLETION(comp); | ||
712 | req->data[0] = PMU_I2C_CMD; | ||
713 | req->reply[0] = 0xff; | ||
714 | req->nbytes = sizeof(struct pmu_i2c_hdr) + 1; | ||
715 | req->done = pmu_i2c_complete; | ||
716 | req->arg = ∁ | ||
717 | if (!read && len) { | ||
718 | memcpy(hdr->data, data, len); | ||
719 | req->nbytes += len; | ||
720 | } | ||
721 | rc = pmu_queue_request(req); | ||
722 | if (rc) | ||
723 | return rc; | ||
724 | wait_for_completion(&comp); | ||
725 | if (req->reply[0] == PMU_I2C_STATUS_OK) | ||
726 | break; | ||
727 | msleep(15); | ||
411 | } | 728 | } |
412 | memset(host, 0, sizeof(*host)); | 729 | if (req->reply[0] != PMU_I2C_STATUS_OK) |
730 | return -EIO; | ||
413 | 731 | ||
414 | init_MUTEX(&host->mutex); | 732 | for (retry = 0; retry < 16; retry++) { |
415 | host->np = of_node_get(np); | 733 | memset(req, 0, sizeof(struct adb_request)); |
416 | host->num_channels = 3; | 734 | |
417 | host->mode = pmac_low_i2c_mode_std; | 735 | /* I know that looks like a lot, slow as hell, but darwin |
418 | host->func = pmu_low_i2c_func; | 736 | * does it so let's be on the safe side for now |
737 | */ | ||
738 | msleep(15); | ||
739 | |||
740 | hdr->bus = PMU_I2C_BUS_STATUS; | ||
741 | |||
742 | INIT_COMPLETION(comp); | ||
743 | req->data[0] = PMU_I2C_CMD; | ||
744 | req->reply[0] = 0xff; | ||
745 | req->nbytes = 2; | ||
746 | req->done = pmu_i2c_complete; | ||
747 | req->arg = ∁ | ||
748 | rc = pmu_queue_request(req); | ||
749 | if (rc) | ||
750 | return rc; | ||
751 | wait_for_completion(&comp); | ||
752 | |||
753 | if (req->reply[0] == PMU_I2C_STATUS_OK && !read) | ||
754 | return 0; | ||
755 | if (req->reply[0] == PMU_I2C_STATUS_DATAREAD && read) { | ||
756 | int rlen = req->reply_len - 1; | ||
757 | |||
758 | if (rlen != len) { | ||
759 | printk(KERN_WARNING "low_i2c: PMU returned %d" | ||
760 | " bytes, expected %d !\n", rlen, len); | ||
761 | return -EIO; | ||
762 | } | ||
763 | if (len) | ||
764 | memcpy(data, &req->reply[1], len); | ||
765 | return 0; | ||
766 | } | ||
767 | } | ||
768 | return -EIO; | ||
769 | } | ||
770 | |||
771 | static void __init pmu_i2c_probe(void) | ||
772 | { | ||
773 | struct pmac_i2c_bus *bus; | ||
774 | struct device_node *busnode; | ||
775 | int channel, sz; | ||
776 | |||
777 | if (!pmu_present()) | ||
778 | return; | ||
779 | |||
780 | /* There might or might not be a "pmu-i2c" node, we use that | ||
781 | * or via-pmu itself, whatever we find. I haven't seen a machine | ||
782 | * with separate bus nodes, so we assume a multibus setup | ||
783 | */ | ||
784 | busnode = of_find_node_by_name(NULL, "pmu-i2c"); | ||
785 | if (busnode == NULL) | ||
786 | busnode = of_find_node_by_name(NULL, "via-pmu"); | ||
787 | if (busnode == NULL) | ||
788 | return; | ||
789 | |||
790 | printk(KERN_INFO "PMU i2c %s\n", busnode->full_name); | ||
791 | |||
792 | /* | ||
793 | * We add bus 1 and 2 only for now, bus 0 is "special" | ||
794 | */ | ||
795 | for (channel = 1; channel <= 2; channel++) { | ||
796 | sz = sizeof(struct pmac_i2c_bus) + sizeof(struct adb_request); | ||
797 | bus = kzalloc(sz, GFP_KERNEL); | ||
798 | if (bus == NULL) | ||
799 | return; | ||
800 | |||
801 | bus->controller = busnode; | ||
802 | bus->busnode = busnode; | ||
803 | bus->type = pmac_i2c_bus_pmu; | ||
804 | bus->channel = channel; | ||
805 | bus->mode = pmac_i2c_mode_std; | ||
806 | bus->hostdata = bus + 1; | ||
807 | bus->xfer = pmu_i2c_xfer; | ||
808 | init_MUTEX(&bus->sem); | ||
809 | bus->flags = pmac_i2c_multibus; | ||
810 | list_add(&bus->link, &pmac_i2c_busses); | ||
811 | |||
812 | printk(KERN_INFO " channel %d bus <multibus>\n", channel); | ||
813 | } | ||
419 | } | 814 | } |
420 | 815 | ||
421 | #endif /* CONFIG_ADB_PMU */ | 816 | #endif /* CONFIG_ADB_PMU */ |
422 | 817 | ||
423 | void __init pmac_init_low_i2c(void) | 818 | |
819 | /* | ||
820 | * | ||
821 | * SMU implementation | ||
822 | * | ||
823 | */ | ||
824 | |||
825 | #ifdef CONFIG_PMAC_SMU | ||
826 | |||
827 | static void smu_i2c_complete(struct smu_i2c_cmd *cmd, void *misc) | ||
424 | { | 828 | { |
425 | struct device_node *np; | 829 | complete(misc); |
830 | } | ||
426 | 831 | ||
427 | /* Probe keywest-i2c busses */ | 832 | static int smu_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, |
428 | np = of_find_compatible_node(NULL, "i2c", "keywest-i2c"); | 833 | u32 subaddr, u8 *data, int len) |
429 | while(np) { | 834 | { |
430 | keywest_low_i2c_add(np); | 835 | struct smu_i2c_cmd *cmd = bus->hostdata; |
431 | np = of_find_compatible_node(np, "i2c", "keywest-i2c"); | 836 | struct completion comp; |
837 | int read = addrdir & 1; | ||
838 | int rc = 0; | ||
839 | |||
840 | if ((read && len > SMU_I2C_READ_MAX) || | ||
841 | ((!read) && len > SMU_I2C_WRITE_MAX)) | ||
842 | return -EINVAL; | ||
843 | |||
844 | memset(cmd, 0, sizeof(struct smu_i2c_cmd)); | ||
845 | cmd->info.bus = bus->channel; | ||
846 | cmd->info.devaddr = addrdir; | ||
847 | cmd->info.datalen = len; | ||
848 | |||
849 | switch(bus->mode) { | ||
850 | case pmac_i2c_mode_std: | ||
851 | if (subsize != 0) | ||
852 | return -EINVAL; | ||
853 | cmd->info.type = SMU_I2C_TRANSFER_SIMPLE; | ||
854 | break; | ||
855 | case pmac_i2c_mode_stdsub: | ||
856 | case pmac_i2c_mode_combined: | ||
857 | if (subsize > 3 || subsize < 1) | ||
858 | return -EINVAL; | ||
859 | cmd->info.sublen = subsize; | ||
860 | /* that's big-endian only but heh ! */ | ||
861 | memcpy(&cmd->info.subaddr, ((char *)&subaddr) + (4 - subsize), | ||
862 | subsize); | ||
863 | if (bus->mode == pmac_i2c_mode_stdsub) | ||
864 | cmd->info.type = SMU_I2C_TRANSFER_STDSUB; | ||
865 | else | ||
866 | cmd->info.type = SMU_I2C_TRANSFER_COMBINED; | ||
867 | break; | ||
868 | default: | ||
869 | return -EINVAL; | ||
432 | } | 870 | } |
871 | if (!read && len) | ||
872 | memcpy(cmd->info.data, data, len); | ||
873 | |||
874 | init_completion(&comp); | ||
875 | cmd->done = smu_i2c_complete; | ||
876 | cmd->misc = ∁ | ||
877 | rc = smu_queue_i2c(cmd); | ||
878 | if (rc < 0) | ||
879 | return rc; | ||
880 | wait_for_completion(&comp); | ||
881 | rc = cmd->status; | ||
882 | |||
883 | if (read && len) | ||
884 | memcpy(data, cmd->info.data, len); | ||
885 | return rc < 0 ? rc : 0; | ||
886 | } | ||
887 | |||
888 | static void __init smu_i2c_probe(void) | ||
889 | { | ||
890 | struct device_node *controller, *busnode; | ||
891 | struct pmac_i2c_bus *bus; | ||
892 | u32 *reg; | ||
893 | int sz; | ||
894 | |||
895 | if (!smu_present()) | ||
896 | return; | ||
897 | |||
898 | controller = of_find_node_by_name(NULL, "smu-i2c-control"); | ||
899 | if (controller == NULL) | ||
900 | controller = of_find_node_by_name(NULL, "smu"); | ||
901 | if (controller == NULL) | ||
902 | return; | ||
903 | |||
904 | printk(KERN_INFO "SMU i2c %s\n", controller->full_name); | ||
905 | |||
906 | /* Look for childs, note that they might not be of the right | ||
907 | * type as older device trees mix i2c busses and other thigns | ||
908 | * at the same level | ||
909 | */ | ||
910 | for (busnode = NULL; | ||
911 | (busnode = of_get_next_child(controller, busnode)) != NULL;) { | ||
912 | if (strcmp(busnode->type, "i2c") && | ||
913 | strcmp(busnode->type, "i2c-bus")) | ||
914 | continue; | ||
915 | reg = (u32 *)get_property(busnode, "reg", NULL); | ||
916 | if (reg == NULL) | ||
917 | continue; | ||
918 | |||
919 | sz = sizeof(struct pmac_i2c_bus) + sizeof(struct smu_i2c_cmd); | ||
920 | bus = kzalloc(sz, GFP_KERNEL); | ||
921 | if (bus == NULL) | ||
922 | return; | ||
923 | |||
924 | bus->controller = controller; | ||
925 | bus->busnode = of_node_get(busnode); | ||
926 | bus->type = pmac_i2c_bus_smu; | ||
927 | bus->channel = *reg; | ||
928 | bus->mode = pmac_i2c_mode_std; | ||
929 | bus->hostdata = bus + 1; | ||
930 | bus->xfer = smu_i2c_xfer; | ||
931 | init_MUTEX(&bus->sem); | ||
932 | bus->flags = 0; | ||
933 | list_add(&bus->link, &pmac_i2c_busses); | ||
934 | |||
935 | printk(KERN_INFO " channel %x bus %s\n", | ||
936 | bus->channel, busnode->full_name); | ||
937 | } | ||
938 | } | ||
939 | |||
940 | #endif /* CONFIG_PMAC_SMU */ | ||
941 | |||
942 | /* | ||
943 | * | ||
944 | * Core code | ||
945 | * | ||
946 | */ | ||
433 | 947 | ||
434 | #ifdef CONFIG_ADB_PMU | ||
435 | /* Probe PMU busses */ | ||
436 | np = of_find_node_by_name(NULL, "via-pmu"); | ||
437 | if (np) | ||
438 | pmu_low_i2c_add(np); | ||
439 | #endif /* CONFIG_ADB_PMU */ | ||
440 | 948 | ||
441 | /* TODO: Add CUDA support as well */ | 949 | struct pmac_i2c_bus *pmac_i2c_find_bus(struct device_node *node) |
950 | { | ||
951 | struct device_node *p = of_node_get(node); | ||
952 | struct device_node *prev = NULL; | ||
953 | struct pmac_i2c_bus *bus; | ||
954 | |||
955 | while(p) { | ||
956 | list_for_each_entry(bus, &pmac_i2c_busses, link) { | ||
957 | if (p == bus->busnode) { | ||
958 | if (prev && bus->flags & pmac_i2c_multibus) { | ||
959 | u32 *reg; | ||
960 | reg = (u32 *)get_property(prev, "reg", | ||
961 | NULL); | ||
962 | if (!reg) | ||
963 | continue; | ||
964 | if (((*reg) >> 8) != bus->channel) | ||
965 | continue; | ||
966 | } | ||
967 | of_node_put(p); | ||
968 | of_node_put(prev); | ||
969 | return bus; | ||
970 | } | ||
971 | } | ||
972 | of_node_put(prev); | ||
973 | prev = p; | ||
974 | p = of_get_parent(p); | ||
975 | } | ||
976 | return NULL; | ||
442 | } | 977 | } |
978 | EXPORT_SYMBOL_GPL(pmac_i2c_find_bus); | ||
979 | |||
980 | u8 pmac_i2c_get_dev_addr(struct device_node *device) | ||
981 | { | ||
982 | u32 *reg = (u32 *)get_property(device, "reg", NULL); | ||
983 | |||
984 | if (reg == NULL) | ||
985 | return 0; | ||
986 | |||
987 | return (*reg) & 0xff; | ||
988 | } | ||
989 | EXPORT_SYMBOL_GPL(pmac_i2c_get_dev_addr); | ||
990 | |||
991 | struct device_node *pmac_i2c_get_controller(struct pmac_i2c_bus *bus) | ||
992 | { | ||
993 | return bus->controller; | ||
994 | } | ||
995 | EXPORT_SYMBOL_GPL(pmac_i2c_get_controller); | ||
996 | |||
997 | struct device_node *pmac_i2c_get_bus_node(struct pmac_i2c_bus *bus) | ||
998 | { | ||
999 | return bus->busnode; | ||
1000 | } | ||
1001 | EXPORT_SYMBOL_GPL(pmac_i2c_get_bus_node); | ||
1002 | |||
1003 | int pmac_i2c_get_type(struct pmac_i2c_bus *bus) | ||
1004 | { | ||
1005 | return bus->type; | ||
1006 | } | ||
1007 | EXPORT_SYMBOL_GPL(pmac_i2c_get_type); | ||
1008 | |||
1009 | int pmac_i2c_get_flags(struct pmac_i2c_bus *bus) | ||
1010 | { | ||
1011 | return bus->flags; | ||
1012 | } | ||
1013 | EXPORT_SYMBOL_GPL(pmac_i2c_get_flags); | ||
1014 | |||
1015 | int pmac_i2c_get_channel(struct pmac_i2c_bus *bus) | ||
1016 | { | ||
1017 | return bus->channel; | ||
1018 | } | ||
1019 | EXPORT_SYMBOL_GPL(pmac_i2c_get_channel); | ||
1020 | |||
1021 | |||
1022 | void pmac_i2c_attach_adapter(struct pmac_i2c_bus *bus, | ||
1023 | struct i2c_adapter *adapter) | ||
1024 | { | ||
1025 | WARN_ON(bus->adapter != NULL); | ||
1026 | bus->adapter = adapter; | ||
1027 | } | ||
1028 | EXPORT_SYMBOL_GPL(pmac_i2c_attach_adapter); | ||
1029 | |||
1030 | void pmac_i2c_detach_adapter(struct pmac_i2c_bus *bus, | ||
1031 | struct i2c_adapter *adapter) | ||
1032 | { | ||
1033 | WARN_ON(bus->adapter != adapter); | ||
1034 | bus->adapter = NULL; | ||
1035 | } | ||
1036 | EXPORT_SYMBOL_GPL(pmac_i2c_detach_adapter); | ||
1037 | |||
1038 | struct i2c_adapter *pmac_i2c_get_adapter(struct pmac_i2c_bus *bus) | ||
1039 | { | ||
1040 | return bus->adapter; | ||
1041 | } | ||
1042 | EXPORT_SYMBOL_GPL(pmac_i2c_get_adapter); | ||
1043 | |||
1044 | struct pmac_i2c_bus *pmac_i2c_adapter_to_bus(struct i2c_adapter *adapter) | ||
1045 | { | ||
1046 | struct pmac_i2c_bus *bus; | ||
1047 | |||
1048 | list_for_each_entry(bus, &pmac_i2c_busses, link) | ||
1049 | if (bus->adapter == adapter) | ||
1050 | return bus; | ||
1051 | return NULL; | ||
1052 | } | ||
1053 | EXPORT_SYMBOL_GPL(pmac_i2c_adapter_to_bus); | ||
1054 | |||
1055 | extern int pmac_i2c_match_adapter(struct device_node *dev, | ||
1056 | struct i2c_adapter *adapter) | ||
1057 | { | ||
1058 | struct pmac_i2c_bus *bus = pmac_i2c_find_bus(dev); | ||
1059 | |||
1060 | if (bus == NULL) | ||
1061 | return 0; | ||
1062 | return (bus->adapter == adapter); | ||
1063 | } | ||
1064 | EXPORT_SYMBOL_GPL(pmac_i2c_match_adapter); | ||
443 | 1065 | ||
444 | int pmac_low_i2c_lock(struct device_node *np) | 1066 | int pmac_low_i2c_lock(struct device_node *np) |
445 | { | 1067 | { |
446 | struct low_i2c_host *host = find_low_i2c_host(np); | 1068 | struct pmac_i2c_bus *bus, *found = NULL; |
447 | 1069 | ||
448 | if (!host) | 1070 | list_for_each_entry(bus, &pmac_i2c_busses, link) { |
1071 | if (np == bus->controller) { | ||
1072 | found = bus; | ||
1073 | break; | ||
1074 | } | ||
1075 | } | ||
1076 | if (!found) | ||
449 | return -ENODEV; | 1077 | return -ENODEV; |
450 | down(&host->mutex); | 1078 | return pmac_i2c_open(bus, 0); |
451 | return 0; | ||
452 | } | 1079 | } |
453 | EXPORT_SYMBOL(pmac_low_i2c_lock); | 1080 | EXPORT_SYMBOL_GPL(pmac_low_i2c_lock); |
454 | 1081 | ||
455 | int pmac_low_i2c_unlock(struct device_node *np) | 1082 | int pmac_low_i2c_unlock(struct device_node *np) |
456 | { | 1083 | { |
457 | struct low_i2c_host *host = find_low_i2c_host(np); | 1084 | struct pmac_i2c_bus *bus, *found = NULL; |
458 | 1085 | ||
459 | if (!host) | 1086 | list_for_each_entry(bus, &pmac_i2c_busses, link) { |
1087 | if (np == bus->controller) { | ||
1088 | found = bus; | ||
1089 | break; | ||
1090 | } | ||
1091 | } | ||
1092 | if (!found) | ||
460 | return -ENODEV; | 1093 | return -ENODEV; |
461 | up(&host->mutex); | 1094 | pmac_i2c_close(bus); |
462 | return 0; | 1095 | return 0; |
463 | } | 1096 | } |
464 | EXPORT_SYMBOL(pmac_low_i2c_unlock); | 1097 | EXPORT_SYMBOL_GPL(pmac_low_i2c_unlock); |
465 | 1098 | ||
466 | 1099 | ||
467 | int pmac_low_i2c_open(struct device_node *np, int channel) | 1100 | int pmac_i2c_open(struct pmac_i2c_bus *bus, int polled) |
468 | { | 1101 | { |
469 | struct low_i2c_host *host = find_low_i2c_host(np); | 1102 | int rc; |
1103 | |||
1104 | down(&bus->sem); | ||
1105 | bus->polled = polled || pmac_i2c_force_poll; | ||
1106 | bus->opened = 1; | ||
1107 | bus->mode = pmac_i2c_mode_std; | ||
1108 | if (bus->open && (rc = bus->open(bus)) != 0) { | ||
1109 | bus->opened = 0; | ||
1110 | up(&bus->sem); | ||
1111 | return rc; | ||
1112 | } | ||
1113 | return 0; | ||
1114 | } | ||
1115 | EXPORT_SYMBOL_GPL(pmac_i2c_open); | ||
470 | 1116 | ||
471 | if (!host) | 1117 | void pmac_i2c_close(struct pmac_i2c_bus *bus) |
472 | return -ENODEV; | 1118 | { |
1119 | WARN_ON(!bus->opened); | ||
1120 | if (bus->close) | ||
1121 | bus->close(bus); | ||
1122 | bus->opened = 0; | ||
1123 | up(&bus->sem); | ||
1124 | } | ||
1125 | EXPORT_SYMBOL_GPL(pmac_i2c_close); | ||
473 | 1126 | ||
474 | if (channel >= host->num_channels) | 1127 | int pmac_i2c_setmode(struct pmac_i2c_bus *bus, int mode) |
1128 | { | ||
1129 | WARN_ON(!bus->opened); | ||
1130 | |||
1131 | /* Report me if you see the error below as there might be a new | ||
1132 | * "combined4" mode that I need to implement for the SMU bus | ||
1133 | */ | ||
1134 | if (mode < pmac_i2c_mode_dumb || mode > pmac_i2c_mode_combined) { | ||
1135 | printk(KERN_ERR "low_i2c: Invalid mode %d requested on" | ||
1136 | " bus %s !\n", mode, bus->busnode->full_name); | ||
475 | return -EINVAL; | 1137 | return -EINVAL; |
476 | 1138 | } | |
477 | down(&host->mutex); | 1139 | bus->mode = mode; |
478 | host->is_open = 1; | ||
479 | host->channel = channel; | ||
480 | 1140 | ||
481 | return 0; | 1141 | return 0; |
482 | } | 1142 | } |
483 | EXPORT_SYMBOL(pmac_low_i2c_open); | 1143 | EXPORT_SYMBOL_GPL(pmac_i2c_setmode); |
484 | 1144 | ||
485 | int pmac_low_i2c_close(struct device_node *np) | 1145 | int pmac_i2c_xfer(struct pmac_i2c_bus *bus, u8 addrdir, int subsize, |
1146 | u32 subaddr, u8 *data, int len) | ||
486 | { | 1147 | { |
487 | struct low_i2c_host *host = find_low_i2c_host(np); | 1148 | int rc; |
488 | 1149 | ||
489 | if (!host) | 1150 | WARN_ON(!bus->opened); |
490 | return -ENODEV; | ||
491 | 1151 | ||
492 | host->is_open = 0; | 1152 | DBG("xfer() chan=%d, addrdir=0x%x, mode=%d, subsize=%d, subaddr=0x%x," |
493 | up(&host->mutex); | 1153 | " %d bytes, bus %s\n", bus->channel, addrdir, bus->mode, subsize, |
1154 | subaddr, len, bus->busnode->full_name); | ||
494 | 1155 | ||
495 | return 0; | 1156 | rc = bus->xfer(bus, addrdir, subsize, subaddr, data, len); |
1157 | |||
1158 | #ifdef DEBUG | ||
1159 | if (rc) | ||
1160 | DBG("xfer error %d\n", rc); | ||
1161 | #endif | ||
1162 | return rc; | ||
496 | } | 1163 | } |
497 | EXPORT_SYMBOL(pmac_low_i2c_close); | 1164 | EXPORT_SYMBOL_GPL(pmac_i2c_xfer); |
1165 | |||
1166 | /* some quirks for platform function decoding */ | ||
1167 | enum { | ||
1168 | pmac_i2c_quirk_invmask = 0x00000001u, | ||
1169 | }; | ||
498 | 1170 | ||
499 | int pmac_low_i2c_setmode(struct device_node *np, int mode) | 1171 | static void pmac_i2c_devscan(void (*callback)(struct device_node *dev, |
1172 | int quirks)) | ||
500 | { | 1173 | { |
501 | struct low_i2c_host *host = find_low_i2c_host(np); | 1174 | struct pmac_i2c_bus *bus; |
1175 | struct device_node *np; | ||
1176 | static struct whitelist_ent { | ||
1177 | char *name; | ||
1178 | char *compatible; | ||
1179 | int quirks; | ||
1180 | } whitelist[] = { | ||
1181 | /* XXX Study device-tree's & apple drivers are get the quirks | ||
1182 | * right ! | ||
1183 | */ | ||
1184 | { "i2c-hwclock", NULL, pmac_i2c_quirk_invmask }, | ||
1185 | { "i2c-cpu-voltage", NULL, 0}, | ||
1186 | { "temp-monitor", NULL, 0 }, | ||
1187 | { "supply-monitor", NULL, 0 }, | ||
1188 | { NULL, NULL, 0 }, | ||
1189 | }; | ||
1190 | |||
1191 | /* Only some devices need to have platform functions instanciated | ||
1192 | * here. For now, we have a table. Others, like 9554 i2c GPIOs used | ||
1193 | * on Xserve, if we ever do a driver for them, will use their own | ||
1194 | * platform function instance | ||
1195 | */ | ||
1196 | list_for_each_entry(bus, &pmac_i2c_busses, link) { | ||
1197 | for (np = NULL; | ||
1198 | (np = of_get_next_child(bus->busnode, np)) != NULL;) { | ||
1199 | struct whitelist_ent *p; | ||
1200 | /* If multibus, check if device is on that bus */ | ||
1201 | if (bus->flags & pmac_i2c_multibus) | ||
1202 | if (bus != pmac_i2c_find_bus(np)) | ||
1203 | continue; | ||
1204 | for (p = whitelist; p->name != NULL; p++) { | ||
1205 | if (strcmp(np->name, p->name)) | ||
1206 | continue; | ||
1207 | if (p->compatible && | ||
1208 | !device_is_compatible(np, p->compatible)) | ||
1209 | continue; | ||
1210 | callback(np, p->quirks); | ||
1211 | break; | ||
1212 | } | ||
1213 | } | ||
1214 | } | ||
1215 | } | ||
502 | 1216 | ||
503 | if (!host) | 1217 | #define MAX_I2C_DATA 64 |
504 | return -ENODEV; | 1218 | |
505 | WARN_ON(!host->is_open); | 1219 | struct pmac_i2c_pf_inst |
506 | host->mode = mode; | 1220 | { |
1221 | struct pmac_i2c_bus *bus; | ||
1222 | u8 addr; | ||
1223 | u8 buffer[MAX_I2C_DATA]; | ||
1224 | u8 scratch[MAX_I2C_DATA]; | ||
1225 | int bytes; | ||
1226 | int quirks; | ||
1227 | }; | ||
1228 | |||
1229 | static void* pmac_i2c_do_begin(struct pmf_function *func, struct pmf_args *args) | ||
1230 | { | ||
1231 | struct pmac_i2c_pf_inst *inst; | ||
1232 | struct pmac_i2c_bus *bus; | ||
1233 | |||
1234 | bus = pmac_i2c_find_bus(func->node); | ||
1235 | if (bus == NULL) { | ||
1236 | printk(KERN_ERR "low_i2c: Can't find bus for %s (pfunc)\n", | ||
1237 | func->node->full_name); | ||
1238 | return NULL; | ||
1239 | } | ||
1240 | if (pmac_i2c_open(bus, 0)) { | ||
1241 | printk(KERN_ERR "low_i2c: Can't open i2c bus for %s (pfunc)\n", | ||
1242 | func->node->full_name); | ||
1243 | return NULL; | ||
1244 | } | ||
1245 | |||
1246 | /* XXX might need GFP_ATOMIC when called during the suspend process, | ||
1247 | * but then, there are already lots of issues with suspending when | ||
1248 | * near OOM that need to be resolved, the allocator itself should | ||
1249 | * probably make GFP_NOIO implicit during suspend | ||
1250 | */ | ||
1251 | inst = kzalloc(sizeof(struct pmac_i2c_pf_inst), GFP_KERNEL); | ||
1252 | if (inst == NULL) { | ||
1253 | pmac_i2c_close(bus); | ||
1254 | return NULL; | ||
1255 | } | ||
1256 | inst->bus = bus; | ||
1257 | inst->addr = pmac_i2c_get_dev_addr(func->node); | ||
1258 | inst->quirks = (int)(long)func->driver_data; | ||
1259 | return inst; | ||
1260 | } | ||
1261 | |||
1262 | static void pmac_i2c_do_end(struct pmf_function *func, void *instdata) | ||
1263 | { | ||
1264 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1265 | |||
1266 | if (inst == NULL) | ||
1267 | return; | ||
1268 | pmac_i2c_close(inst->bus); | ||
1269 | if (inst) | ||
1270 | kfree(inst); | ||
1271 | } | ||
1272 | |||
1273 | static int pmac_i2c_do_read(PMF_STD_ARGS, u32 len) | ||
1274 | { | ||
1275 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1276 | |||
1277 | inst->bytes = len; | ||
1278 | return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 0, 0, | ||
1279 | inst->buffer, len); | ||
1280 | } | ||
1281 | |||
1282 | static int pmac_i2c_do_write(PMF_STD_ARGS, u32 len, const u8 *data) | ||
1283 | { | ||
1284 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1285 | |||
1286 | return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0, | ||
1287 | (u8 *)data, len); | ||
1288 | } | ||
1289 | |||
1290 | /* This function is used to do the masking & OR'ing for the "rmw" type | ||
1291 | * callbacks. Ze should apply the mask and OR in the values in the | ||
1292 | * buffer before writing back. The problem is that it seems that | ||
1293 | * various darwin drivers implement the mask/or differently, thus | ||
1294 | * we need to check the quirks first | ||
1295 | */ | ||
1296 | static void pmac_i2c_do_apply_rmw(struct pmac_i2c_pf_inst *inst, | ||
1297 | u32 len, const u8 *mask, const u8 *val) | ||
1298 | { | ||
1299 | int i; | ||
1300 | |||
1301 | if (inst->quirks & pmac_i2c_quirk_invmask) { | ||
1302 | for (i = 0; i < len; i ++) | ||
1303 | inst->scratch[i] = (inst->buffer[i] & mask[i]) | val[i]; | ||
1304 | } else { | ||
1305 | for (i = 0; i < len; i ++) | ||
1306 | inst->scratch[i] = (inst->buffer[i] & ~mask[i]) | ||
1307 | | (val[i] & mask[i]); | ||
1308 | } | ||
1309 | } | ||
1310 | |||
1311 | static int pmac_i2c_do_rmw(PMF_STD_ARGS, u32 masklen, u32 valuelen, | ||
1312 | u32 totallen, const u8 *maskdata, | ||
1313 | const u8 *valuedata) | ||
1314 | { | ||
1315 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1316 | |||
1317 | if (masklen > inst->bytes || valuelen > inst->bytes || | ||
1318 | totallen > inst->bytes || valuelen > masklen) | ||
1319 | return -EINVAL; | ||
1320 | |||
1321 | pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata); | ||
1322 | |||
1323 | return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 0, 0, | ||
1324 | inst->scratch, totallen); | ||
1325 | } | ||
1326 | |||
1327 | static int pmac_i2c_do_read_sub(PMF_STD_ARGS, u8 subaddr, u32 len) | ||
1328 | { | ||
1329 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1330 | |||
1331 | inst->bytes = len; | ||
1332 | return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_read, 1, subaddr, | ||
1333 | inst->buffer, len); | ||
1334 | } | ||
1335 | |||
1336 | static int pmac_i2c_do_write_sub(PMF_STD_ARGS, u8 subaddr, u32 len, | ||
1337 | const u8 *data) | ||
1338 | { | ||
1339 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1340 | |||
1341 | return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1, | ||
1342 | subaddr, (u8 *)data, len); | ||
1343 | } | ||
507 | 1344 | ||
1345 | static int pmac_i2c_do_set_mode(PMF_STD_ARGS, int mode) | ||
1346 | { | ||
1347 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1348 | |||
1349 | return pmac_i2c_setmode(inst->bus, mode); | ||
1350 | } | ||
1351 | |||
1352 | static int pmac_i2c_do_rmw_sub(PMF_STD_ARGS, u8 subaddr, u32 masklen, | ||
1353 | u32 valuelen, u32 totallen, const u8 *maskdata, | ||
1354 | const u8 *valuedata) | ||
1355 | { | ||
1356 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1357 | |||
1358 | if (masklen > inst->bytes || valuelen > inst->bytes || | ||
1359 | totallen > inst->bytes || valuelen > masklen) | ||
1360 | return -EINVAL; | ||
1361 | |||
1362 | pmac_i2c_do_apply_rmw(inst, masklen, maskdata, valuedata); | ||
1363 | |||
1364 | return pmac_i2c_xfer(inst->bus, inst->addr | pmac_i2c_write, 1, | ||
1365 | subaddr, inst->scratch, totallen); | ||
1366 | } | ||
1367 | |||
1368 | static int pmac_i2c_do_mask_and_comp(PMF_STD_ARGS, u32 len, | ||
1369 | const u8 *maskdata, | ||
1370 | const u8 *valuedata) | ||
1371 | { | ||
1372 | struct pmac_i2c_pf_inst *inst = instdata; | ||
1373 | int i, match; | ||
1374 | |||
1375 | /* Get return value pointer, it's assumed to be a u32 */ | ||
1376 | if (!args || !args->count || !args->u[0].p) | ||
1377 | return -EINVAL; | ||
1378 | |||
1379 | /* Check buffer */ | ||
1380 | if (len > inst->bytes) | ||
1381 | return -EINVAL; | ||
1382 | |||
1383 | for (i = 0, match = 1; match && i < len; i ++) | ||
1384 | if ((inst->buffer[i] & maskdata[i]) != valuedata[i]) | ||
1385 | match = 0; | ||
1386 | *args->u[0].p = match; | ||
508 | return 0; | 1387 | return 0; |
509 | } | 1388 | } |
510 | EXPORT_SYMBOL(pmac_low_i2c_setmode); | ||
511 | 1389 | ||
512 | int pmac_low_i2c_xfer(struct device_node *np, u8 addrdir, u8 subaddr, u8 *data, int len) | 1390 | static int pmac_i2c_do_delay(PMF_STD_ARGS, u32 duration) |
513 | { | 1391 | { |
514 | struct low_i2c_host *host = find_low_i2c_host(np); | 1392 | msleep((duration + 999) / 1000); |
1393 | return 0; | ||
1394 | } | ||
515 | 1395 | ||
516 | if (!host) | ||
517 | return -ENODEV; | ||
518 | WARN_ON(!host->is_open); | ||
519 | 1396 | ||
520 | return host->func(host, addrdir, subaddr, data, len); | 1397 | static struct pmf_handlers pmac_i2c_pfunc_handlers = { |
1398 | .begin = pmac_i2c_do_begin, | ||
1399 | .end = pmac_i2c_do_end, | ||
1400 | .read_i2c = pmac_i2c_do_read, | ||
1401 | .write_i2c = pmac_i2c_do_write, | ||
1402 | .rmw_i2c = pmac_i2c_do_rmw, | ||
1403 | .read_i2c_sub = pmac_i2c_do_read_sub, | ||
1404 | .write_i2c_sub = pmac_i2c_do_write_sub, | ||
1405 | .rmw_i2c_sub = pmac_i2c_do_rmw_sub, | ||
1406 | .set_i2c_mode = pmac_i2c_do_set_mode, | ||
1407 | .mask_and_compare = pmac_i2c_do_mask_and_comp, | ||
1408 | .delay = pmac_i2c_do_delay, | ||
1409 | }; | ||
1410 | |||
1411 | static void __init pmac_i2c_dev_create(struct device_node *np, int quirks) | ||
1412 | { | ||
1413 | DBG("dev_create(%s)\n", np->full_name); | ||
1414 | |||
1415 | pmf_register_driver(np, &pmac_i2c_pfunc_handlers, | ||
1416 | (void *)(long)quirks); | ||
1417 | } | ||
1418 | |||
1419 | static void __init pmac_i2c_dev_init(struct device_node *np, int quirks) | ||
1420 | { | ||
1421 | DBG("dev_create(%s)\n", np->full_name); | ||
1422 | |||
1423 | pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_INIT, NULL); | ||
1424 | } | ||
1425 | |||
1426 | static void pmac_i2c_dev_suspend(struct device_node *np, int quirks) | ||
1427 | { | ||
1428 | DBG("dev_suspend(%s)\n", np->full_name); | ||
1429 | pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_SLEEP, NULL); | ||
1430 | } | ||
1431 | |||
1432 | static void pmac_i2c_dev_resume(struct device_node *np, int quirks) | ||
1433 | { | ||
1434 | DBG("dev_resume(%s)\n", np->full_name); | ||
1435 | pmf_do_functions(np, NULL, 0, PMF_FLAGS_ON_WAKE, NULL); | ||
1436 | } | ||
1437 | |||
1438 | void pmac_pfunc_i2c_suspend(void) | ||
1439 | { | ||
1440 | pmac_i2c_devscan(pmac_i2c_dev_suspend); | ||
1441 | } | ||
1442 | |||
1443 | void pmac_pfunc_i2c_resume(void) | ||
1444 | { | ||
1445 | pmac_i2c_devscan(pmac_i2c_dev_resume); | ||
1446 | } | ||
1447 | |||
1448 | /* | ||
1449 | * Initialize us: probe all i2c busses on the machine, instantiate | ||
1450 | * busses and platform functions as needed. | ||
1451 | */ | ||
1452 | /* This is non-static as it might be called early by smp code */ | ||
1453 | int __init pmac_i2c_init(void) | ||
1454 | { | ||
1455 | static int i2c_inited; | ||
1456 | |||
1457 | if (i2c_inited) | ||
1458 | return 0; | ||
1459 | i2c_inited = 1; | ||
1460 | |||
1461 | /* Probe keywest-i2c busses */ | ||
1462 | kw_i2c_probe(); | ||
1463 | |||
1464 | #ifdef CONFIG_ADB_PMU | ||
1465 | /* Probe PMU i2c busses */ | ||
1466 | pmu_i2c_probe(); | ||
1467 | #endif | ||
1468 | |||
1469 | #ifdef CONFIG_PMAC_SMU | ||
1470 | /* Probe SMU i2c busses */ | ||
1471 | smu_i2c_probe(); | ||
1472 | #endif | ||
1473 | |||
1474 | /* Now add plaform functions for some known devices */ | ||
1475 | pmac_i2c_devscan(pmac_i2c_dev_create); | ||
1476 | |||
1477 | return 0; | ||
521 | } | 1478 | } |
522 | EXPORT_SYMBOL(pmac_low_i2c_xfer); | 1479 | arch_initcall(pmac_i2c_init); |
1480 | |||
1481 | /* Since pmac_i2c_init can be called too early for the platform device | ||
1482 | * registration, we need to do it at a later time. In our case, subsys | ||
1483 | * happens to fit well, though I agree it's a bit of a hack... | ||
1484 | */ | ||
1485 | static int __init pmac_i2c_create_platform_devices(void) | ||
1486 | { | ||
1487 | struct pmac_i2c_bus *bus; | ||
1488 | int i = 0; | ||
1489 | |||
1490 | /* In the case where we are initialized from smp_init(), we must | ||
1491 | * not use the timer (and thus the irq). It's safe from now on | ||
1492 | * though | ||
1493 | */ | ||
1494 | pmac_i2c_force_poll = 0; | ||
1495 | |||
1496 | /* Create platform devices */ | ||
1497 | list_for_each_entry(bus, &pmac_i2c_busses, link) { | ||
1498 | bus->platform_dev = | ||
1499 | platform_device_alloc("i2c-powermac", i++); | ||
1500 | if (bus->platform_dev == NULL) | ||
1501 | return -ENOMEM; | ||
1502 | bus->platform_dev->dev.platform_data = bus; | ||
1503 | platform_device_add(bus->platform_dev); | ||
1504 | } | ||
1505 | |||
1506 | /* Now call platform "init" functions */ | ||
1507 | pmac_i2c_devscan(pmac_i2c_dev_init); | ||
523 | 1508 | ||
1509 | return 0; | ||
1510 | } | ||
1511 | subsys_initcall(pmac_i2c_create_platform_devices); | ||
diff --git a/arch/powerpc/platforms/powermac/nvram.c b/arch/powerpc/platforms/powermac/nvram.c index 4042e2f06ee0..3ebd045a3350 100644 --- a/arch/powerpc/platforms/powermac/nvram.c +++ b/arch/powerpc/platforms/powermac/nvram.c | |||
@@ -514,7 +514,7 @@ static void core99_nvram_sync(void) | |||
514 | #endif | 514 | #endif |
515 | } | 515 | } |
516 | 516 | ||
517 | static int __init core99_nvram_setup(struct device_node *dp) | 517 | static int __init core99_nvram_setup(struct device_node *dp, unsigned long addr) |
518 | { | 518 | { |
519 | int i; | 519 | int i; |
520 | u32 gen_bank0, gen_bank1; | 520 | u32 gen_bank0, gen_bank1; |
@@ -528,7 +528,7 @@ static int __init core99_nvram_setup(struct device_node *dp) | |||
528 | printk(KERN_ERR "nvram: can't allocate ram image\n"); | 528 | printk(KERN_ERR "nvram: can't allocate ram image\n"); |
529 | return -ENOMEM; | 529 | return -ENOMEM; |
530 | } | 530 | } |
531 | nvram_data = ioremap(dp->addrs[0].address, NVRAM_SIZE*2); | 531 | nvram_data = ioremap(addr, NVRAM_SIZE*2); |
532 | nvram_naddrs = 1; /* Make sure we get the correct case */ | 532 | nvram_naddrs = 1; /* Make sure we get the correct case */ |
533 | 533 | ||
534 | DBG("nvram: Checking bank 0...\n"); | 534 | DBG("nvram: Checking bank 0...\n"); |
@@ -549,6 +549,7 @@ static int __init core99_nvram_setup(struct device_node *dp) | |||
549 | ppc_md.nvram_write = core99_nvram_write; | 549 | ppc_md.nvram_write = core99_nvram_write; |
550 | ppc_md.nvram_size = core99_nvram_size; | 550 | ppc_md.nvram_size = core99_nvram_size; |
551 | ppc_md.nvram_sync = core99_nvram_sync; | 551 | ppc_md.nvram_sync = core99_nvram_sync; |
552 | ppc_md.machine_shutdown = core99_nvram_sync; | ||
552 | /* | 553 | /* |
553 | * Maybe we could be smarter here though making an exclusive list | 554 | * Maybe we could be smarter here though making an exclusive list |
554 | * of known flash chips is a bit nasty as older OF didn't provide us | 555 | * of known flash chips is a bit nasty as older OF didn't provide us |
@@ -569,34 +570,48 @@ static int __init core99_nvram_setup(struct device_node *dp) | |||
569 | int __init pmac_nvram_init(void) | 570 | int __init pmac_nvram_init(void) |
570 | { | 571 | { |
571 | struct device_node *dp; | 572 | struct device_node *dp; |
573 | struct resource r1, r2; | ||
574 | unsigned int s1 = 0, s2 = 0; | ||
572 | int err = 0; | 575 | int err = 0; |
573 | 576 | ||
574 | nvram_naddrs = 0; | 577 | nvram_naddrs = 0; |
575 | 578 | ||
576 | dp = find_devices("nvram"); | 579 | dp = of_find_node_by_name(NULL, "nvram"); |
577 | if (dp == NULL) { | 580 | if (dp == NULL) { |
578 | printk(KERN_ERR "Can't find NVRAM device\n"); | 581 | printk(KERN_ERR "Can't find NVRAM device\n"); |
579 | return -ENODEV; | 582 | return -ENODEV; |
580 | } | 583 | } |
581 | nvram_naddrs = dp->n_addrs; | 584 | |
585 | /* Try to obtain an address */ | ||
586 | if (of_address_to_resource(dp, 0, &r1) == 0) { | ||
587 | nvram_naddrs = 1; | ||
588 | s1 = (r1.end - r1.start) + 1; | ||
589 | if (of_address_to_resource(dp, 1, &r2) == 0) { | ||
590 | nvram_naddrs = 2; | ||
591 | s2 = (r2.end - r2.start) + 1; | ||
592 | } | ||
593 | } | ||
594 | |||
582 | is_core_99 = device_is_compatible(dp, "nvram,flash"); | 595 | is_core_99 = device_is_compatible(dp, "nvram,flash"); |
583 | if (is_core_99) | 596 | if (is_core_99) { |
584 | err = core99_nvram_setup(dp); | 597 | err = core99_nvram_setup(dp, r1.start); |
598 | goto bail; | ||
599 | } | ||
600 | |||
585 | #ifdef CONFIG_PPC32 | 601 | #ifdef CONFIG_PPC32 |
586 | else if (_machine == _MACH_chrp && nvram_naddrs == 1) { | 602 | if (_machine == _MACH_chrp && nvram_naddrs == 1) { |
587 | nvram_data = ioremap(dp->addrs[0].address + isa_mem_base, | 603 | nvram_data = ioremap(r1.start, s1); |
588 | dp->addrs[0].size); | ||
589 | nvram_mult = 1; | 604 | nvram_mult = 1; |
590 | ppc_md.nvram_read_val = direct_nvram_read_byte; | 605 | ppc_md.nvram_read_val = direct_nvram_read_byte; |
591 | ppc_md.nvram_write_val = direct_nvram_write_byte; | 606 | ppc_md.nvram_write_val = direct_nvram_write_byte; |
592 | } else if (nvram_naddrs == 1) { | 607 | } else if (nvram_naddrs == 1) { |
593 | nvram_data = ioremap(dp->addrs[0].address, dp->addrs[0].size); | 608 | nvram_data = ioremap(r1.start, s1); |
594 | nvram_mult = (dp->addrs[0].size + NVRAM_SIZE - 1) / NVRAM_SIZE; | 609 | nvram_mult = (s1 + NVRAM_SIZE - 1) / NVRAM_SIZE; |
595 | ppc_md.nvram_read_val = direct_nvram_read_byte; | 610 | ppc_md.nvram_read_val = direct_nvram_read_byte; |
596 | ppc_md.nvram_write_val = direct_nvram_write_byte; | 611 | ppc_md.nvram_write_val = direct_nvram_write_byte; |
597 | } else if (nvram_naddrs == 2) { | 612 | } else if (nvram_naddrs == 2) { |
598 | nvram_addr = ioremap(dp->addrs[0].address, dp->addrs[0].size); | 613 | nvram_addr = ioremap(r1.start, s1); |
599 | nvram_data = ioremap(dp->addrs[1].address, dp->addrs[1].size); | 614 | nvram_data = ioremap(r2.start, s2); |
600 | ppc_md.nvram_read_val = indirect_nvram_read_byte; | 615 | ppc_md.nvram_read_val = indirect_nvram_read_byte; |
601 | ppc_md.nvram_write_val = indirect_nvram_write_byte; | 616 | ppc_md.nvram_write_val = indirect_nvram_write_byte; |
602 | } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { | 617 | } else if (nvram_naddrs == 0 && sys_ctrler == SYS_CTRLER_PMU) { |
@@ -605,13 +620,15 @@ int __init pmac_nvram_init(void) | |||
605 | ppc_md.nvram_read_val = pmu_nvram_read_byte; | 620 | ppc_md.nvram_read_val = pmu_nvram_read_byte; |
606 | ppc_md.nvram_write_val = pmu_nvram_write_byte; | 621 | ppc_md.nvram_write_val = pmu_nvram_write_byte; |
607 | #endif /* CONFIG_ADB_PMU */ | 622 | #endif /* CONFIG_ADB_PMU */ |
608 | } | 623 | } else { |
609 | #endif | ||
610 | else { | ||
611 | printk(KERN_ERR "Incompatible type of NVRAM\n"); | 624 | printk(KERN_ERR "Incompatible type of NVRAM\n"); |
612 | return -ENXIO; | 625 | err = -ENXIO; |
613 | } | 626 | } |
614 | lookup_partitions(); | 627 | #endif /* CONFIG_PPC32 */ |
628 | bail: | ||
629 | of_node_put(dp); | ||
630 | if (err == 0) | ||
631 | lookup_partitions(); | ||
615 | return err; | 632 | return err; |
616 | } | 633 | } |
617 | 634 | ||
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c index 443be526cde7..f671ed253901 100644 --- a/arch/powerpc/platforms/powermac/pci.c +++ b/arch/powerpc/platforms/powermac/pci.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Support for PCI bridges found on Power Macintoshes. | 2 | * Support for PCI bridges found on Power Macintoshes. |
3 | * | 3 | * |
4 | * Copyright (C) 2003 Benjamin Herrenschmuidt (benh@kernel.crashing.org) | 4 | * Copyright (C) 2003-2005 Benjamin Herrenschmuidt (benh@kernel.crashing.org) |
5 | * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) | 5 | * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
@@ -25,7 +25,7 @@ | |||
25 | #include <asm/pmac_feature.h> | 25 | #include <asm/pmac_feature.h> |
26 | #include <asm/grackle.h> | 26 | #include <asm/grackle.h> |
27 | #ifdef CONFIG_PPC64 | 27 | #ifdef CONFIG_PPC64 |
28 | #include <asm/iommu.h> | 28 | //#include <asm/iommu.h> |
29 | #include <asm/ppc-pci.h> | 29 | #include <asm/ppc-pci.h> |
30 | #endif | 30 | #endif |
31 | 31 | ||
@@ -44,6 +44,7 @@ static int add_bridge(struct device_node *dev); | |||
44 | static int has_uninorth; | 44 | static int has_uninorth; |
45 | #ifdef CONFIG_PPC64 | 45 | #ifdef CONFIG_PPC64 |
46 | static struct pci_controller *u3_agp; | 46 | static struct pci_controller *u3_agp; |
47 | static struct pci_controller *u4_pcie; | ||
47 | static struct pci_controller *u3_ht; | 48 | static struct pci_controller *u3_ht; |
48 | #endif /* CONFIG_PPC64 */ | 49 | #endif /* CONFIG_PPC64 */ |
49 | 50 | ||
@@ -97,11 +98,8 @@ static void __init fixup_bus_range(struct device_node *bridge) | |||
97 | 98 | ||
98 | /* Lookup the "bus-range" property for the hose */ | 99 | /* Lookup the "bus-range" property for the hose */ |
99 | bus_range = (int *) get_property(bridge, "bus-range", &len); | 100 | bus_range = (int *) get_property(bridge, "bus-range", &len); |
100 | if (bus_range == NULL || len < 2 * sizeof(int)) { | 101 | if (bus_range == NULL || len < 2 * sizeof(int)) |
101 | printk(KERN_WARNING "Can't get bus-range for %s\n", | ||
102 | bridge->full_name); | ||
103 | return; | 102 | return; |
104 | } | ||
105 | bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); | 103 | bus_range[1] = fixup_one_level_bus_range(bridge->child, bus_range[1]); |
106 | } | 104 | } |
107 | 105 | ||
@@ -128,14 +126,14 @@ static void __init fixup_bus_range(struct device_node *bridge) | |||
128 | */ | 126 | */ |
129 | 127 | ||
130 | #define MACRISC_CFA0(devfn, off) \ | 128 | #define MACRISC_CFA0(devfn, off) \ |
131 | ((1 << (unsigned long)PCI_SLOT(dev_fn)) \ | 129 | ((1 << (unsigned int)PCI_SLOT(dev_fn)) \ |
132 | | (((unsigned long)PCI_FUNC(dev_fn)) << 8) \ | 130 | | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \ |
133 | | (((unsigned long)(off)) & 0xFCUL)) | 131 | | (((unsigned int)(off)) & 0xFCUL)) |
134 | 132 | ||
135 | #define MACRISC_CFA1(bus, devfn, off) \ | 133 | #define MACRISC_CFA1(bus, devfn, off) \ |
136 | ((((unsigned long)(bus)) << 16) \ | 134 | ((((unsigned int)(bus)) << 16) \ |
137 | |(((unsigned long)(devfn)) << 8) \ | 135 | |(((unsigned int)(devfn)) << 8) \ |
138 | |(((unsigned long)(off)) & 0xFCUL) \ | 136 | |(((unsigned int)(off)) & 0xFCUL) \ |
139 | |1UL) | 137 | |1UL) |
140 | 138 | ||
141 | static unsigned long macrisc_cfg_access(struct pci_controller* hose, | 139 | static unsigned long macrisc_cfg_access(struct pci_controller* hose, |
@@ -168,7 +166,8 @@ static int macrisc_read_config(struct pci_bus *bus, unsigned int devfn, | |||
168 | hose = pci_bus_to_host(bus); | 166 | hose = pci_bus_to_host(bus); |
169 | if (hose == NULL) | 167 | if (hose == NULL) |
170 | return PCIBIOS_DEVICE_NOT_FOUND; | 168 | return PCIBIOS_DEVICE_NOT_FOUND; |
171 | 169 | if (offset >= 0x100) | |
170 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
172 | addr = macrisc_cfg_access(hose, bus->number, devfn, offset); | 171 | addr = macrisc_cfg_access(hose, bus->number, devfn, offset); |
173 | if (!addr) | 172 | if (!addr) |
174 | return PCIBIOS_DEVICE_NOT_FOUND; | 173 | return PCIBIOS_DEVICE_NOT_FOUND; |
@@ -199,7 +198,8 @@ static int macrisc_write_config(struct pci_bus *bus, unsigned int devfn, | |||
199 | hose = pci_bus_to_host(bus); | 198 | hose = pci_bus_to_host(bus); |
200 | if (hose == NULL) | 199 | if (hose == NULL) |
201 | return PCIBIOS_DEVICE_NOT_FOUND; | 200 | return PCIBIOS_DEVICE_NOT_FOUND; |
202 | 201 | if (offset >= 0x100) | |
202 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
203 | addr = macrisc_cfg_access(hose, bus->number, devfn, offset); | 203 | addr = macrisc_cfg_access(hose, bus->number, devfn, offset); |
204 | if (!addr) | 204 | if (!addr) |
205 | return PCIBIOS_DEVICE_NOT_FOUND; | 205 | return PCIBIOS_DEVICE_NOT_FOUND; |
@@ -234,12 +234,13 @@ static struct pci_ops macrisc_pci_ops = | |||
234 | /* | 234 | /* |
235 | * Verify that a specific (bus, dev_fn) exists on chaos | 235 | * Verify that a specific (bus, dev_fn) exists on chaos |
236 | */ | 236 | */ |
237 | static int | 237 | static int chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) |
238 | chaos_validate_dev(struct pci_bus *bus, int devfn, int offset) | ||
239 | { | 238 | { |
240 | struct device_node *np; | 239 | struct device_node *np; |
241 | u32 *vendor, *device; | 240 | u32 *vendor, *device; |
242 | 241 | ||
242 | if (offset >= 0x100) | ||
243 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
243 | np = pci_busdev_to_OF_node(bus, devfn); | 244 | np = pci_busdev_to_OF_node(bus, devfn); |
244 | if (np == NULL) | 245 | if (np == NULL) |
245 | return PCIBIOS_DEVICE_NOT_FOUND; | 246 | return PCIBIOS_DEVICE_NOT_FOUND; |
@@ -285,15 +286,13 @@ static struct pci_ops chaos_pci_ops = | |||
285 | }; | 286 | }; |
286 | 287 | ||
287 | static void __init setup_chaos(struct pci_controller *hose, | 288 | static void __init setup_chaos(struct pci_controller *hose, |
288 | struct reg_property *addr) | 289 | struct resource *addr) |
289 | { | 290 | { |
290 | /* assume a `chaos' bridge */ | 291 | /* assume a `chaos' bridge */ |
291 | hose->ops = &chaos_pci_ops; | 292 | hose->ops = &chaos_pci_ops; |
292 | hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); | 293 | hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); |
293 | hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); | 294 | hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); |
294 | } | 295 | } |
295 | #else | ||
296 | #define setup_chaos(hose, addr) | ||
297 | #endif /* CONFIG_PPC32 */ | 296 | #endif /* CONFIG_PPC32 */ |
298 | 297 | ||
299 | #ifdef CONFIG_PPC64 | 298 | #ifdef CONFIG_PPC64 |
@@ -326,7 +325,7 @@ static int u3_ht_skip_device(struct pci_controller *hose, | |||
326 | else | 325 | else |
327 | busdn = hose->arch_data; | 326 | busdn = hose->arch_data; |
328 | for (dn = busdn->child; dn; dn = dn->sibling) | 327 | for (dn = busdn->child; dn; dn = dn->sibling) |
329 | if (dn->data && PCI_DN(dn)->devfn == devfn) | 328 | if (PCI_DN(dn) && PCI_DN(dn)->devfn == devfn) |
330 | break; | 329 | break; |
331 | if (dn == NULL) | 330 | if (dn == NULL) |
332 | return -1; | 331 | return -1; |
@@ -343,10 +342,10 @@ static int u3_ht_skip_device(struct pci_controller *hose, | |||
343 | } | 342 | } |
344 | 343 | ||
345 | #define U3_HT_CFA0(devfn, off) \ | 344 | #define U3_HT_CFA0(devfn, off) \ |
346 | ((((unsigned long)devfn) << 8) | offset) | 345 | ((((unsigned int)devfn) << 8) | offset) |
347 | #define U3_HT_CFA1(bus, devfn, off) \ | 346 | #define U3_HT_CFA1(bus, devfn, off) \ |
348 | (U3_HT_CFA0(devfn, off) \ | 347 | (U3_HT_CFA0(devfn, off) \ |
349 | + (((unsigned long)bus) << 16) \ | 348 | + (((unsigned int)bus) << 16) \ |
350 | + 0x01000000UL) | 349 | + 0x01000000UL) |
351 | 350 | ||
352 | static unsigned long u3_ht_cfg_access(struct pci_controller* hose, | 351 | static unsigned long u3_ht_cfg_access(struct pci_controller* hose, |
@@ -356,9 +355,11 @@ static unsigned long u3_ht_cfg_access(struct pci_controller* hose, | |||
356 | /* For now, we don't self probe U3 HT bridge */ | 355 | /* For now, we don't self probe U3 HT bridge */ |
357 | if (PCI_SLOT(devfn) == 0) | 356 | if (PCI_SLOT(devfn) == 0) |
358 | return 0; | 357 | return 0; |
359 | return ((unsigned long)hose->cfg_data) + U3_HT_CFA0(devfn, offset); | 358 | return ((unsigned long)hose->cfg_data) + |
359 | U3_HT_CFA0(devfn, offset); | ||
360 | } else | 360 | } else |
361 | return ((unsigned long)hose->cfg_data) + U3_HT_CFA1(bus, devfn, offset); | 361 | return ((unsigned long)hose->cfg_data) + |
362 | U3_HT_CFA1(bus, devfn, offset); | ||
362 | } | 363 | } |
363 | 364 | ||
364 | static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, | 365 | static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, |
@@ -370,7 +371,8 @@ static int u3_ht_read_config(struct pci_bus *bus, unsigned int devfn, | |||
370 | hose = pci_bus_to_host(bus); | 371 | hose = pci_bus_to_host(bus); |
371 | if (hose == NULL) | 372 | if (hose == NULL) |
372 | return PCIBIOS_DEVICE_NOT_FOUND; | 373 | return PCIBIOS_DEVICE_NOT_FOUND; |
373 | 374 | if (offset >= 0x100) | |
375 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
374 | addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); | 376 | addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); |
375 | if (!addr) | 377 | if (!addr) |
376 | return PCIBIOS_DEVICE_NOT_FOUND; | 378 | return PCIBIOS_DEVICE_NOT_FOUND; |
@@ -419,7 +421,8 @@ static int u3_ht_write_config(struct pci_bus *bus, unsigned int devfn, | |||
419 | hose = pci_bus_to_host(bus); | 421 | hose = pci_bus_to_host(bus); |
420 | if (hose == NULL) | 422 | if (hose == NULL) |
421 | return PCIBIOS_DEVICE_NOT_FOUND; | 423 | return PCIBIOS_DEVICE_NOT_FOUND; |
422 | 424 | if (offset >= 0x100) | |
425 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
423 | addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); | 426 | addr = u3_ht_cfg_access(hose, bus->number, devfn, offset); |
424 | if (!addr) | 427 | if (!addr) |
425 | return PCIBIOS_DEVICE_NOT_FOUND; | 428 | return PCIBIOS_DEVICE_NOT_FOUND; |
@@ -459,6 +462,112 @@ static struct pci_ops u3_ht_pci_ops = | |||
459 | u3_ht_read_config, | 462 | u3_ht_read_config, |
460 | u3_ht_write_config | 463 | u3_ht_write_config |
461 | }; | 464 | }; |
465 | |||
466 | #define U4_PCIE_CFA0(devfn, off) \ | ||
467 | ((1 << ((unsigned int)PCI_SLOT(dev_fn))) \ | ||
468 | | (((unsigned int)PCI_FUNC(dev_fn)) << 8) \ | ||
469 | | ((((unsigned int)(off)) >> 8) << 28) \ | ||
470 | | (((unsigned int)(off)) & 0xfcU)) | ||
471 | |||
472 | #define U4_PCIE_CFA1(bus, devfn, off) \ | ||
473 | ((((unsigned int)(bus)) << 16) \ | ||
474 | |(((unsigned int)(devfn)) << 8) \ | ||
475 | | ((((unsigned int)(off)) >> 8) << 28) \ | ||
476 | |(((unsigned int)(off)) & 0xfcU) \ | ||
477 | |1UL) | ||
478 | |||
479 | static unsigned long u4_pcie_cfg_access(struct pci_controller* hose, | ||
480 | u8 bus, u8 dev_fn, int offset) | ||
481 | { | ||
482 | unsigned int caddr; | ||
483 | |||
484 | if (bus == hose->first_busno) { | ||
485 | caddr = U4_PCIE_CFA0(dev_fn, offset); | ||
486 | } else | ||
487 | caddr = U4_PCIE_CFA1(bus, dev_fn, offset); | ||
488 | |||
489 | /* Uninorth will return garbage if we don't read back the value ! */ | ||
490 | do { | ||
491 | out_le32(hose->cfg_addr, caddr); | ||
492 | } while (in_le32(hose->cfg_addr) != caddr); | ||
493 | |||
494 | offset &= 0x03; | ||
495 | return ((unsigned long)hose->cfg_data) + offset; | ||
496 | } | ||
497 | |||
498 | static int u4_pcie_read_config(struct pci_bus *bus, unsigned int devfn, | ||
499 | int offset, int len, u32 *val) | ||
500 | { | ||
501 | struct pci_controller *hose; | ||
502 | unsigned long addr; | ||
503 | |||
504 | hose = pci_bus_to_host(bus); | ||
505 | if (hose == NULL) | ||
506 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
507 | if (offset >= 0x1000) | ||
508 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
509 | addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); | ||
510 | if (!addr) | ||
511 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
512 | /* | ||
513 | * Note: the caller has already checked that offset is | ||
514 | * suitably aligned and that len is 1, 2 or 4. | ||
515 | */ | ||
516 | switch (len) { | ||
517 | case 1: | ||
518 | *val = in_8((u8 *)addr); | ||
519 | break; | ||
520 | case 2: | ||
521 | *val = in_le16((u16 *)addr); | ||
522 | break; | ||
523 | default: | ||
524 | *val = in_le32((u32 *)addr); | ||
525 | break; | ||
526 | } | ||
527 | return PCIBIOS_SUCCESSFUL; | ||
528 | } | ||
529 | |||
530 | static int u4_pcie_write_config(struct pci_bus *bus, unsigned int devfn, | ||
531 | int offset, int len, u32 val) | ||
532 | { | ||
533 | struct pci_controller *hose; | ||
534 | unsigned long addr; | ||
535 | |||
536 | hose = pci_bus_to_host(bus); | ||
537 | if (hose == NULL) | ||
538 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
539 | if (offset >= 0x1000) | ||
540 | return PCIBIOS_BAD_REGISTER_NUMBER; | ||
541 | addr = u4_pcie_cfg_access(hose, bus->number, devfn, offset); | ||
542 | if (!addr) | ||
543 | return PCIBIOS_DEVICE_NOT_FOUND; | ||
544 | /* | ||
545 | * Note: the caller has already checked that offset is | ||
546 | * suitably aligned and that len is 1, 2 or 4. | ||
547 | */ | ||
548 | switch (len) { | ||
549 | case 1: | ||
550 | out_8((u8 *)addr, val); | ||
551 | (void) in_8((u8 *)addr); | ||
552 | break; | ||
553 | case 2: | ||
554 | out_le16((u16 *)addr, val); | ||
555 | (void) in_le16((u16 *)addr); | ||
556 | break; | ||
557 | default: | ||
558 | out_le32((u32 *)addr, val); | ||
559 | (void) in_le32((u32 *)addr); | ||
560 | break; | ||
561 | } | ||
562 | return PCIBIOS_SUCCESSFUL; | ||
563 | } | ||
564 | |||
565 | static struct pci_ops u4_pcie_pci_ops = | ||
566 | { | ||
567 | u4_pcie_read_config, | ||
568 | u4_pcie_write_config | ||
569 | }; | ||
570 | |||
462 | #endif /* CONFIG_PPC64 */ | 571 | #endif /* CONFIG_PPC64 */ |
463 | 572 | ||
464 | #ifdef CONFIG_PPC32 | 573 | #ifdef CONFIG_PPC32 |
@@ -532,7 +641,8 @@ static void __init init_p2pbridge(void) | |||
532 | } | 641 | } |
533 | if (early_read_config_word(hose, bus, devfn, | 642 | if (early_read_config_word(hose, bus, devfn, |
534 | PCI_BRIDGE_CONTROL, &val) < 0) { | 643 | PCI_BRIDGE_CONTROL, &val) < 0) { |
535 | printk(KERN_ERR "init_p2pbridge: couldn't read bridge control\n"); | 644 | printk(KERN_ERR "init_p2pbridge: couldn't read bridge" |
645 | " control\n"); | ||
536 | return; | 646 | return; |
537 | } | 647 | } |
538 | val &= ~PCI_BRIDGE_CTL_MASTER_ABORT; | 648 | val &= ~PCI_BRIDGE_CTL_MASTER_ABORT; |
@@ -576,36 +686,38 @@ static void __init fixup_nec_usb2(void) | |||
576 | continue; | 686 | continue; |
577 | early_read_config_dword(hose, bus, devfn, 0xe4, &data); | 687 | early_read_config_dword(hose, bus, devfn, 0xe4, &data); |
578 | if (data & 1UL) { | 688 | if (data & 1UL) { |
579 | printk("Found NEC PD720100A USB2 chip with disabled EHCI, fixing up...\n"); | 689 | printk("Found NEC PD720100A USB2 chip with disabled" |
690 | " EHCI, fixing up...\n"); | ||
580 | data &= ~1UL; | 691 | data &= ~1UL; |
581 | early_write_config_dword(hose, bus, devfn, 0xe4, data); | 692 | early_write_config_dword(hose, bus, devfn, 0xe4, data); |
582 | early_write_config_byte(hose, bus, devfn | 2, PCI_INTERRUPT_LINE, | 693 | early_write_config_byte(hose, bus, |
694 | devfn | 2, PCI_INTERRUPT_LINE, | ||
583 | nec->intrs[0].line); | 695 | nec->intrs[0].line); |
584 | } | 696 | } |
585 | } | 697 | } |
586 | } | 698 | } |
587 | 699 | ||
588 | static void __init setup_bandit(struct pci_controller *hose, | 700 | static void __init setup_bandit(struct pci_controller *hose, |
589 | struct reg_property *addr) | 701 | struct resource *addr) |
590 | { | 702 | { |
591 | hose->ops = ¯isc_pci_ops; | 703 | hose->ops = ¯isc_pci_ops; |
592 | hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); | 704 | hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); |
593 | hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); | 705 | hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); |
594 | init_bandit(hose); | 706 | init_bandit(hose); |
595 | } | 707 | } |
596 | 708 | ||
597 | static int __init setup_uninorth(struct pci_controller *hose, | 709 | static int __init setup_uninorth(struct pci_controller *hose, |
598 | struct reg_property *addr) | 710 | struct resource *addr) |
599 | { | 711 | { |
600 | pci_assign_all_buses = 1; | 712 | pci_assign_all_buses = 1; |
601 | has_uninorth = 1; | 713 | has_uninorth = 1; |
602 | hose->ops = ¯isc_pci_ops; | 714 | hose->ops = ¯isc_pci_ops; |
603 | hose->cfg_addr = ioremap(addr->address + 0x800000, 0x1000); | 715 | hose->cfg_addr = ioremap(addr->start + 0x800000, 0x1000); |
604 | hose->cfg_data = ioremap(addr->address + 0xc00000, 0x1000); | 716 | hose->cfg_data = ioremap(addr->start + 0xc00000, 0x1000); |
605 | /* We "know" that the bridge at f2000000 has the PCI slots. */ | 717 | /* We "know" that the bridge at f2000000 has the PCI slots. */ |
606 | return addr->address == 0xf2000000; | 718 | return addr->start == 0xf2000000; |
607 | } | 719 | } |
608 | #endif | 720 | #endif /* CONFIG_PPC32 */ |
609 | 721 | ||
610 | #ifdef CONFIG_PPC64 | 722 | #ifdef CONFIG_PPC64 |
611 | static void __init setup_u3_agp(struct pci_controller* hose) | 723 | static void __init setup_u3_agp(struct pci_controller* hose) |
@@ -625,15 +737,36 @@ static void __init setup_u3_agp(struct pci_controller* hose) | |||
625 | hose->ops = ¯isc_pci_ops; | 737 | hose->ops = ¯isc_pci_ops; |
626 | hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); | 738 | hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); |
627 | hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); | 739 | hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); |
628 | |||
629 | u3_agp = hose; | 740 | u3_agp = hose; |
630 | } | 741 | } |
631 | 742 | ||
743 | static void __init setup_u4_pcie(struct pci_controller* hose) | ||
744 | { | ||
745 | /* We currently only implement the "non-atomic" config space, to | ||
746 | * be optimised later. | ||
747 | */ | ||
748 | hose->ops = &u4_pcie_pci_ops; | ||
749 | hose->cfg_addr = ioremap(0xf0000000 + 0x800000, 0x1000); | ||
750 | hose->cfg_data = ioremap(0xf0000000 + 0xc00000, 0x1000); | ||
751 | |||
752 | /* The bus contains a bridge from root -> device, we need to | ||
753 | * make it visible on bus 0 so that we pick the right type | ||
754 | * of config cycles. If we didn't, we would have to force all | ||
755 | * config cycles to be type 1. So we override the "bus-range" | ||
756 | * property here | ||
757 | */ | ||
758 | hose->first_busno = 0x00; | ||
759 | hose->last_busno = 0xff; | ||
760 | u4_pcie = hose; | ||
761 | } | ||
762 | |||
632 | static void __init setup_u3_ht(struct pci_controller* hose) | 763 | static void __init setup_u3_ht(struct pci_controller* hose) |
633 | { | 764 | { |
634 | struct device_node *np = (struct device_node *)hose->arch_data; | 765 | struct device_node *np = (struct device_node *)hose->arch_data; |
766 | struct pci_controller *other = NULL; | ||
635 | int i, cur; | 767 | int i, cur; |
636 | 768 | ||
769 | |||
637 | hose->ops = &u3_ht_pci_ops; | 770 | hose->ops = &u3_ht_pci_ops; |
638 | 771 | ||
639 | /* We hard code the address because of the different size of | 772 | /* We hard code the address because of the different size of |
@@ -667,11 +800,20 @@ static void __init setup_u3_ht(struct pci_controller* hose) | |||
667 | 800 | ||
668 | u3_ht = hose; | 801 | u3_ht = hose; |
669 | 802 | ||
670 | if (u3_agp == NULL) { | 803 | if (u3_agp != NULL) |
671 | DBG("U3 has no AGP, using full resource range\n"); | 804 | other = u3_agp; |
805 | else if (u4_pcie != NULL) | ||
806 | other = u4_pcie; | ||
807 | |||
808 | if (other == NULL) { | ||
809 | DBG("U3/4 has no AGP/PCIE, using full resource range\n"); | ||
672 | return; | 810 | return; |
673 | } | 811 | } |
674 | 812 | ||
813 | /* Fixup bus range vs. PCIE */ | ||
814 | if (u4_pcie) | ||
815 | hose->last_busno = u4_pcie->first_busno - 1; | ||
816 | |||
675 | /* We "remove" the AGP resources from the resources allocated to HT, | 817 | /* We "remove" the AGP resources from the resources allocated to HT, |
676 | * that is we create "holes". However, that code does assumptions | 818 | * that is we create "holes". However, that code does assumptions |
677 | * that so far happen to be true (cross fingers...), typically that | 819 | * that so far happen to be true (cross fingers...), typically that |
@@ -679,7 +821,7 @@ static void __init setup_u3_ht(struct pci_controller* hose) | |||
679 | */ | 821 | */ |
680 | cur = 0; | 822 | cur = 0; |
681 | for (i=0; i<3; i++) { | 823 | for (i=0; i<3; i++) { |
682 | struct resource *res = &u3_agp->mem_resources[i]; | 824 | struct resource *res = &other->mem_resources[i]; |
683 | if (res->flags != IORESOURCE_MEM) | 825 | if (res->flags != IORESOURCE_MEM) |
684 | continue; | 826 | continue; |
685 | /* We don't care about "fine" resources */ | 827 | /* We don't care about "fine" resources */ |
@@ -722,7 +864,7 @@ static void __init setup_u3_ht(struct pci_controller* hose) | |||
722 | hose->mem_resources[cur-1].end = res->start - 1; | 864 | hose->mem_resources[cur-1].end = res->start - 1; |
723 | } | 865 | } |
724 | } | 866 | } |
725 | #endif | 867 | #endif /* CONFIG_PPC64 */ |
726 | 868 | ||
727 | /* | 869 | /* |
728 | * We assume that if we have a G3 powermac, we have one bridge called | 870 | * We assume that if we have a G3 powermac, we have one bridge called |
@@ -733,24 +875,17 @@ static int __init add_bridge(struct device_node *dev) | |||
733 | { | 875 | { |
734 | int len; | 876 | int len; |
735 | struct pci_controller *hose; | 877 | struct pci_controller *hose; |
736 | #ifdef CONFIG_PPC32 | 878 | struct resource rsrc; |
737 | struct reg_property *addr; | ||
738 | #endif | ||
739 | char *disp_name; | 879 | char *disp_name; |
740 | int *bus_range; | 880 | int *bus_range; |
741 | int primary = 1; | 881 | int primary = 1, has_address = 0; |
742 | 882 | ||
743 | DBG("Adding PCI host bridge %s\n", dev->full_name); | 883 | DBG("Adding PCI host bridge %s\n", dev->full_name); |
744 | 884 | ||
745 | #ifdef CONFIG_PPC32 | 885 | /* Fetch host bridge registers address */ |
746 | /* XXX fix this */ | 886 | has_address = (of_address_to_resource(dev, 0, &rsrc) == 0); |
747 | addr = (struct reg_property *) get_property(dev, "reg", &len); | 887 | |
748 | if (addr == NULL || len < sizeof(*addr)) { | 888 | /* Get bus range if any */ |
749 | printk(KERN_WARNING "Can't use %s: no address\n", | ||
750 | dev->full_name); | ||
751 | return -ENODEV; | ||
752 | } | ||
753 | #endif | ||
754 | bus_range = (int *) get_property(dev, "bus-range", &len); | 889 | bus_range = (int *) get_property(dev, "bus-range", &len); |
755 | if (bus_range == NULL || len < 2 * sizeof(int)) { | 890 | if (bus_range == NULL || len < 2 * sizeof(int)) { |
756 | printk(KERN_WARNING "Can't get bus-range for %s, assume" | 891 | printk(KERN_WARNING "Can't get bus-range for %s, assume" |
@@ -770,6 +905,8 @@ static int __init add_bridge(struct device_node *dev) | |||
770 | hose->last_busno = bus_range ? bus_range[1] : 0xff; | 905 | hose->last_busno = bus_range ? bus_range[1] : 0xff; |
771 | 906 | ||
772 | disp_name = NULL; | 907 | disp_name = NULL; |
908 | |||
909 | /* 64 bits only bridges */ | ||
773 | #ifdef CONFIG_PPC64 | 910 | #ifdef CONFIG_PPC64 |
774 | if (device_is_compatible(dev, "u3-agp")) { | 911 | if (device_is_compatible(dev, "u3-agp")) { |
775 | setup_u3_agp(hose); | 912 | setup_u3_agp(hose); |
@@ -779,28 +916,37 @@ static int __init add_bridge(struct device_node *dev) | |||
779 | setup_u3_ht(hose); | 916 | setup_u3_ht(hose); |
780 | disp_name = "U3-HT"; | 917 | disp_name = "U3-HT"; |
781 | primary = 1; | 918 | primary = 1; |
919 | } else if (device_is_compatible(dev, "u4-pcie")) { | ||
920 | setup_u4_pcie(hose); | ||
921 | disp_name = "U4-PCIE"; | ||
922 | primary = 0; | ||
782 | } | 923 | } |
783 | printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number: %d->%d\n", | 924 | printk(KERN_INFO "Found %s PCI host bridge. Firmware bus number:" |
784 | disp_name, hose->first_busno, hose->last_busno); | 925 | " %d->%d\n", disp_name, hose->first_busno, hose->last_busno); |
785 | #else | 926 | #endif /* CONFIG_PPC64 */ |
927 | |||
928 | /* 32 bits only bridges */ | ||
929 | #ifdef CONFIG_PPC32 | ||
786 | if (device_is_compatible(dev, "uni-north")) { | 930 | if (device_is_compatible(dev, "uni-north")) { |
787 | primary = setup_uninorth(hose, addr); | 931 | primary = setup_uninorth(hose, &rsrc); |
788 | disp_name = "UniNorth"; | 932 | disp_name = "UniNorth"; |
789 | } else if (strcmp(dev->name, "pci") == 0) { | 933 | } else if (strcmp(dev->name, "pci") == 0) { |
790 | /* XXX assume this is a mpc106 (grackle) */ | 934 | /* XXX assume this is a mpc106 (grackle) */ |
791 | setup_grackle(hose); | 935 | setup_grackle(hose); |
792 | disp_name = "Grackle (MPC106)"; | 936 | disp_name = "Grackle (MPC106)"; |
793 | } else if (strcmp(dev->name, "bandit") == 0) { | 937 | } else if (strcmp(dev->name, "bandit") == 0) { |
794 | setup_bandit(hose, addr); | 938 | setup_bandit(hose, &rsrc); |
795 | disp_name = "Bandit"; | 939 | disp_name = "Bandit"; |
796 | } else if (strcmp(dev->name, "chaos") == 0) { | 940 | } else if (strcmp(dev->name, "chaos") == 0) { |
797 | setup_chaos(hose, addr); | 941 | setup_chaos(hose, &rsrc); |
798 | disp_name = "Chaos"; | 942 | disp_name = "Chaos"; |
799 | primary = 0; | 943 | primary = 0; |
800 | } | 944 | } |
801 | printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. Firmware bus number: %d->%d\n", | 945 | printk(KERN_INFO "Found %s PCI host bridge at 0x%08lx. " |
802 | disp_name, addr->address, hose->first_busno, hose->last_busno); | 946 | "Firmware bus number: %d->%d\n", |
803 | #endif | 947 | disp_name, rsrc.start, hose->first_busno, hose->last_busno); |
948 | #endif /* CONFIG_PPC32 */ | ||
949 | |||
804 | DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", | 950 | DBG(" ->Hose at 0x%p, cfg_addr=0x%p,cfg_data=0x%p\n", |
805 | hose, hose->cfg_addr, hose->cfg_data); | 951 | hose, hose->cfg_addr, hose->cfg_data); |
806 | 952 | ||
@@ -814,8 +960,7 @@ static int __init add_bridge(struct device_node *dev) | |||
814 | return 0; | 960 | return 0; |
815 | } | 961 | } |
816 | 962 | ||
817 | static void __init | 963 | static void __init pcibios_fixup_OF_interrupts(void) |
818 | pcibios_fixup_OF_interrupts(void) | ||
819 | { | 964 | { |
820 | struct pci_dev* dev = NULL; | 965 | struct pci_dev* dev = NULL; |
821 | 966 | ||
@@ -835,8 +980,7 @@ pcibios_fixup_OF_interrupts(void) | |||
835 | } | 980 | } |
836 | } | 981 | } |
837 | 982 | ||
838 | void __init | 983 | void __init pmac_pcibios_fixup(void) |
839 | pmac_pcibios_fixup(void) | ||
840 | { | 984 | { |
841 | /* Fixup interrupts according to OF tree */ | 985 | /* Fixup interrupts according to OF tree */ |
842 | pcibios_fixup_OF_interrupts(); | 986 | pcibios_fixup_OF_interrupts(); |
@@ -899,6 +1043,8 @@ void __init pmac_pci_init(void) | |||
899 | pci_setup_phb_io(u3_ht, 1); | 1043 | pci_setup_phb_io(u3_ht, 1); |
900 | if (u3_agp) | 1044 | if (u3_agp) |
901 | pci_setup_phb_io(u3_agp, 0); | 1045 | pci_setup_phb_io(u3_agp, 0); |
1046 | if (u4_pcie) | ||
1047 | pci_setup_phb_io(u4_pcie, 0); | ||
902 | 1048 | ||
903 | /* | 1049 | /* |
904 | * On ppc64, fixup the IO resources on our host bridges as | 1050 | * On ppc64, fixup the IO resources on our host bridges as |
@@ -911,7 +1057,8 @@ void __init pmac_pci_init(void) | |||
911 | 1057 | ||
912 | /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We | 1058 | /* Fixup the PCI<->OF mapping for U3 AGP due to bus renumbering. We |
913 | * assume there is no P2P bridge on the AGP bus, which should be a | 1059 | * assume there is no P2P bridge on the AGP bus, which should be a |
914 | * safe assumptions hopefully. | 1060 | * safe assumptions for now. We should do something better in the |
1061 | * future though | ||
915 | */ | 1062 | */ |
916 | if (u3_agp) { | 1063 | if (u3_agp) { |
917 | struct device_node *np = u3_agp->arch_data; | 1064 | struct device_node *np = u3_agp->arch_data; |
@@ -919,7 +1066,6 @@ void __init pmac_pci_init(void) | |||
919 | for (np = np->child; np; np = np->sibling) | 1066 | for (np = np->child; np; np = np->sibling) |
920 | PCI_DN(np)->busno = 0xf0; | 1067 | PCI_DN(np)->busno = 0xf0; |
921 | } | 1068 | } |
922 | |||
923 | /* pmac_check_ht_link(); */ | 1069 | /* pmac_check_ht_link(); */ |
924 | 1070 | ||
925 | /* Tell pci.c to not use the common resource allocation mechanism */ | 1071 | /* Tell pci.c to not use the common resource allocation mechanism */ |
@@ -1126,7 +1272,8 @@ void pmac_pci_fixup_pciata(struct pci_dev* dev) | |||
1126 | good: | 1272 | good: |
1127 | pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); | 1273 | pci_read_config_byte(dev, PCI_CLASS_PROG, &progif); |
1128 | if ((progif & 5) != 5) { | 1274 | if ((progif & 5) != 5) { |
1129 | printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", pci_name(dev)); | 1275 | printk(KERN_INFO "Forcing PCI IDE into native mode: %s\n", |
1276 | pci_name(dev)); | ||
1130 | (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5); | 1277 | (void) pci_write_config_byte(dev, PCI_CLASS_PROG, progif|5); |
1131 | if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || | 1278 | if (pci_read_config_byte(dev, PCI_CLASS_PROG, &progif) || |
1132 | (progif & 5) != 5) | 1279 | (progif & 5) != 5) |
@@ -1152,7 +1299,8 @@ static void fixup_k2_sata(struct pci_dev* dev) | |||
1152 | for (i = 0; i < 6; i++) { | 1299 | for (i = 0; i < 6; i++) { |
1153 | dev->resource[i].start = dev->resource[i].end = 0; | 1300 | dev->resource[i].start = dev->resource[i].end = 0; |
1154 | dev->resource[i].flags = 0; | 1301 | dev->resource[i].flags = 0; |
1155 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); | 1302 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, |
1303 | 0); | ||
1156 | } | 1304 | } |
1157 | } else { | 1305 | } else { |
1158 | pci_read_config_word(dev, PCI_COMMAND, &cmd); | 1306 | pci_read_config_word(dev, PCI_COMMAND, &cmd); |
@@ -1161,7 +1309,8 @@ static void fixup_k2_sata(struct pci_dev* dev) | |||
1161 | for (i = 0; i < 5; i++) { | 1309 | for (i = 0; i < 5; i++) { |
1162 | dev->resource[i].start = dev->resource[i].end = 0; | 1310 | dev->resource[i].start = dev->resource[i].end = 0; |
1163 | dev->resource[i].flags = 0; | 1311 | dev->resource[i].flags = 0; |
1164 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, 0); | 1312 | pci_write_config_dword(dev, PCI_BASE_ADDRESS_0 + 4 * i, |
1313 | 0); | ||
1165 | } | 1314 | } |
1166 | } | 1315 | } |
1167 | } | 1316 | } |
diff --git a/arch/powerpc/platforms/powermac/pfunc_base.c b/arch/powerpc/platforms/powermac/pfunc_base.c new file mode 100644 index 000000000000..4ffd2a9832a0 --- /dev/null +++ b/arch/powerpc/platforms/powermac/pfunc_base.c | |||
@@ -0,0 +1,405 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/types.h> | ||
3 | #include <linux/init.h> | ||
4 | #include <linux/delay.h> | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/spinlock.h> | ||
8 | |||
9 | #include <asm/pmac_feature.h> | ||
10 | #include <asm/pmac_pfunc.h> | ||
11 | |||
12 | #define DBG(fmt...) printk(fmt) | ||
13 | |||
14 | static irqreturn_t macio_gpio_irq(int irq, void *data, struct pt_regs *regs) | ||
15 | { | ||
16 | pmf_do_irq(data); | ||
17 | |||
18 | return IRQ_HANDLED; | ||
19 | } | ||
20 | |||
21 | static int macio_do_gpio_irq_enable(struct pmf_function *func) | ||
22 | { | ||
23 | if (func->node->n_intrs < 1) | ||
24 | return -EINVAL; | ||
25 | |||
26 | return request_irq(func->node->intrs[0].line, macio_gpio_irq, 0, | ||
27 | func->node->name, func); | ||
28 | } | ||
29 | |||
30 | static int macio_do_gpio_irq_disable(struct pmf_function *func) | ||
31 | { | ||
32 | if (func->node->n_intrs < 1) | ||
33 | return -EINVAL; | ||
34 | |||
35 | free_irq(func->node->intrs[0].line, func); | ||
36 | return 0; | ||
37 | } | ||
38 | |||
39 | static int macio_do_gpio_write(PMF_STD_ARGS, u8 value, u8 mask) | ||
40 | { | ||
41 | u8 __iomem *addr = (u8 __iomem *)func->driver_data; | ||
42 | unsigned long flags; | ||
43 | u8 tmp; | ||
44 | |||
45 | /* Check polarity */ | ||
46 | if (args && args->count && !args->u[0].v) | ||
47 | value = ~value; | ||
48 | |||
49 | /* Toggle the GPIO */ | ||
50 | spin_lock_irqsave(&feature_lock, flags); | ||
51 | tmp = readb(addr); | ||
52 | tmp = (tmp & ~mask) | (value & mask); | ||
53 | DBG("Do write 0x%02x to GPIO %s (%p)\n", | ||
54 | tmp, func->node->full_name, addr); | ||
55 | writeb(tmp, addr); | ||
56 | spin_unlock_irqrestore(&feature_lock, flags); | ||
57 | |||
58 | return 0; | ||
59 | } | ||
60 | |||
61 | static int macio_do_gpio_read(PMF_STD_ARGS, u8 mask, int rshift, u8 xor) | ||
62 | { | ||
63 | u8 __iomem *addr = (u8 __iomem *)func->driver_data; | ||
64 | u32 value; | ||
65 | |||
66 | /* Check if we have room for reply */ | ||
67 | if (args == NULL || args->count == 0 || args->u[0].p == NULL) | ||
68 | return -EINVAL; | ||
69 | |||
70 | value = readb(addr); | ||
71 | *args->u[0].p = ((value & mask) >> rshift) ^ xor; | ||
72 | |||
73 | return 0; | ||
74 | } | ||
75 | |||
76 | static int macio_do_delay(PMF_STD_ARGS, u32 duration) | ||
77 | { | ||
78 | /* assume we can sleep ! */ | ||
79 | msleep((duration + 999) / 1000); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static struct pmf_handlers macio_gpio_handlers = { | ||
84 | .irq_enable = macio_do_gpio_irq_enable, | ||
85 | .irq_disable = macio_do_gpio_irq_disable, | ||
86 | .write_gpio = macio_do_gpio_write, | ||
87 | .read_gpio = macio_do_gpio_read, | ||
88 | .delay = macio_do_delay, | ||
89 | }; | ||
90 | |||
91 | static void macio_gpio_init_one(struct macio_chip *macio) | ||
92 | { | ||
93 | struct device_node *gparent, *gp; | ||
94 | |||
95 | /* | ||
96 | * Find the "gpio" parent node | ||
97 | */ | ||
98 | |||
99 | for (gparent = NULL; | ||
100 | (gparent = of_get_next_child(macio->of_node, gparent)) != NULL;) | ||
101 | if (strcmp(gparent->name, "gpio") == 0) | ||
102 | break; | ||
103 | if (gparent == NULL) | ||
104 | return; | ||
105 | |||
106 | DBG("Installing GPIO functions for macio %s\n", | ||
107 | macio->of_node->full_name); | ||
108 | |||
109 | /* | ||
110 | * Ok, got one, we dont need anything special to track them down, so | ||
111 | * we just create them all | ||
112 | */ | ||
113 | for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) { | ||
114 | u32 *reg = (u32 *)get_property(gp, "reg", NULL); | ||
115 | unsigned long offset; | ||
116 | if (reg == NULL) | ||
117 | continue; | ||
118 | offset = *reg; | ||
119 | /* Deal with old style device-tree. We can safely hard code the | ||
120 | * offset for now too even if it's a bit gross ... | ||
121 | */ | ||
122 | if (offset < 0x50) | ||
123 | offset += 0x50; | ||
124 | offset += (unsigned long)macio->base; | ||
125 | pmf_register_driver(gp, &macio_gpio_handlers, (void *)offset); | ||
126 | } | ||
127 | |||
128 | DBG("Calling initial GPIO functions for macio %s\n", | ||
129 | macio->of_node->full_name); | ||
130 | |||
131 | /* And now we run all the init ones */ | ||
132 | for (gp = NULL; (gp = of_get_next_child(gparent, gp)) != NULL;) | ||
133 | pmf_do_functions(gp, NULL, 0, PMF_FLAGS_ON_INIT, NULL); | ||
134 | |||
135 | /* Note: We do not at this point implement the "at sleep" or "at wake" | ||
136 | * functions. I yet to find any for GPIOs anyway | ||
137 | */ | ||
138 | } | ||
139 | |||
140 | static int macio_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask) | ||
141 | { | ||
142 | struct macio_chip *macio = func->driver_data; | ||
143 | unsigned long flags; | ||
144 | |||
145 | spin_lock_irqsave(&feature_lock, flags); | ||
146 | MACIO_OUT32(offset, (MACIO_IN32(offset) & ~mask) | (value & mask)); | ||
147 | spin_unlock_irqrestore(&feature_lock, flags); | ||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int macio_do_read_reg32(PMF_STD_ARGS, u32 offset) | ||
152 | { | ||
153 | struct macio_chip *macio = func->driver_data; | ||
154 | |||
155 | /* Check if we have room for reply */ | ||
156 | if (args == NULL || args->count == 0 || args->u[0].p == NULL) | ||
157 | return -EINVAL; | ||
158 | |||
159 | *args->u[0].p = MACIO_IN32(offset); | ||
160 | return 0; | ||
161 | } | ||
162 | |||
163 | static int macio_do_write_reg8(PMF_STD_ARGS, u32 offset, u8 value, u8 mask) | ||
164 | { | ||
165 | struct macio_chip *macio = func->driver_data; | ||
166 | unsigned long flags; | ||
167 | |||
168 | spin_lock_irqsave(&feature_lock, flags); | ||
169 | MACIO_OUT8(offset, (MACIO_IN8(offset) & ~mask) | (value & mask)); | ||
170 | spin_unlock_irqrestore(&feature_lock, flags); | ||
171 | return 0; | ||
172 | } | ||
173 | |||
174 | static int macio_do_read_reg8(PMF_STD_ARGS, u32 offset) | ||
175 | { | ||
176 | struct macio_chip *macio = func->driver_data; | ||
177 | |||
178 | /* Check if we have room for reply */ | ||
179 | if (args == NULL || args->count == 0 || args->u[0].p == NULL) | ||
180 | return -EINVAL; | ||
181 | |||
182 | *((u8 *)(args->u[0].p)) = MACIO_IN8(offset); | ||
183 | return 0; | ||
184 | } | ||
185 | |||
186 | static int macio_do_read_reg32_msrx(PMF_STD_ARGS, u32 offset, u32 mask, | ||
187 | u32 shift, u32 xor) | ||
188 | { | ||
189 | struct macio_chip *macio = func->driver_data; | ||
190 | |||
191 | /* Check if we have room for reply */ | ||
192 | if (args == NULL || args->count == 0 || args->u[0].p == NULL) | ||
193 | return -EINVAL; | ||
194 | |||
195 | *args->u[0].p = ((MACIO_IN32(offset) & mask) >> shift) ^ xor; | ||
196 | return 0; | ||
197 | } | ||
198 | |||
199 | static int macio_do_read_reg8_msrx(PMF_STD_ARGS, u32 offset, u32 mask, | ||
200 | u32 shift, u32 xor) | ||
201 | { | ||
202 | struct macio_chip *macio = func->driver_data; | ||
203 | |||
204 | /* Check if we have room for reply */ | ||
205 | if (args == NULL || args->count == 0 || args->u[0].p == NULL) | ||
206 | return -EINVAL; | ||
207 | |||
208 | *((u8 *)(args->u[0].p)) = ((MACIO_IN8(offset) & mask) >> shift) ^ xor; | ||
209 | return 0; | ||
210 | } | ||
211 | |||
212 | static int macio_do_write_reg32_slm(PMF_STD_ARGS, u32 offset, u32 shift, | ||
213 | u32 mask) | ||
214 | { | ||
215 | struct macio_chip *macio = func->driver_data; | ||
216 | unsigned long flags; | ||
217 | u32 tmp, val; | ||
218 | |||
219 | /* Check args */ | ||
220 | if (args == NULL || args->count == 0) | ||
221 | return -EINVAL; | ||
222 | |||
223 | spin_lock_irqsave(&feature_lock, flags); | ||
224 | tmp = MACIO_IN32(offset); | ||
225 | val = args->u[0].v << shift; | ||
226 | tmp = (tmp & ~mask) | (val & mask); | ||
227 | MACIO_OUT32(offset, tmp); | ||
228 | spin_unlock_irqrestore(&feature_lock, flags); | ||
229 | return 0; | ||
230 | } | ||
231 | |||
232 | static int macio_do_write_reg8_slm(PMF_STD_ARGS, u32 offset, u32 shift, | ||
233 | u32 mask) | ||
234 | { | ||
235 | struct macio_chip *macio = func->driver_data; | ||
236 | unsigned long flags; | ||
237 | u32 tmp, val; | ||
238 | |||
239 | /* Check args */ | ||
240 | if (args == NULL || args->count == 0) | ||
241 | return -EINVAL; | ||
242 | |||
243 | spin_lock_irqsave(&feature_lock, flags); | ||
244 | tmp = MACIO_IN8(offset); | ||
245 | val = args->u[0].v << shift; | ||
246 | tmp = (tmp & ~mask) | (val & mask); | ||
247 | MACIO_OUT8(offset, tmp); | ||
248 | spin_unlock_irqrestore(&feature_lock, flags); | ||
249 | return 0; | ||
250 | } | ||
251 | |||
252 | static struct pmf_handlers macio_mmio_handlers = { | ||
253 | .write_reg32 = macio_do_write_reg32, | ||
254 | .read_reg32 = macio_do_read_reg32, | ||
255 | .write_reg8 = macio_do_write_reg8, | ||
256 | .read_reg32 = macio_do_read_reg8, | ||
257 | .read_reg32_msrx = macio_do_read_reg32_msrx, | ||
258 | .read_reg8_msrx = macio_do_read_reg8_msrx, | ||
259 | .write_reg32_slm = macio_do_write_reg32_slm, | ||
260 | .write_reg8_slm = macio_do_write_reg8_slm, | ||
261 | .delay = macio_do_delay, | ||
262 | }; | ||
263 | |||
264 | static void macio_mmio_init_one(struct macio_chip *macio) | ||
265 | { | ||
266 | DBG("Installing MMIO functions for macio %s\n", | ||
267 | macio->of_node->full_name); | ||
268 | |||
269 | pmf_register_driver(macio->of_node, &macio_mmio_handlers, macio); | ||
270 | } | ||
271 | |||
272 | static struct device_node *unin_hwclock; | ||
273 | |||
274 | static int unin_do_write_reg32(PMF_STD_ARGS, u32 offset, u32 value, u32 mask) | ||
275 | { | ||
276 | unsigned long flags; | ||
277 | |||
278 | spin_lock_irqsave(&feature_lock, flags); | ||
279 | /* This is fairly bogus in darwin, but it should work for our needs | ||
280 | * implemeted that way: | ||
281 | */ | ||
282 | UN_OUT(offset, (UN_IN(offset) & ~mask) | (value & mask)); | ||
283 | spin_unlock_irqrestore(&feature_lock, flags); | ||
284 | return 0; | ||
285 | } | ||
286 | |||
287 | |||
288 | static struct pmf_handlers unin_mmio_handlers = { | ||
289 | .write_reg32 = unin_do_write_reg32, | ||
290 | .delay = macio_do_delay, | ||
291 | }; | ||
292 | |||
293 | static void uninorth_install_pfunc(void) | ||
294 | { | ||
295 | struct device_node *np; | ||
296 | |||
297 | DBG("Installing functions for UniN %s\n", | ||
298 | uninorth_node->full_name); | ||
299 | |||
300 | /* | ||
301 | * Install handlers for the bridge itself | ||
302 | */ | ||
303 | pmf_register_driver(uninorth_node, &unin_mmio_handlers, NULL); | ||
304 | pmf_do_functions(uninorth_node, NULL, 0, PMF_FLAGS_ON_INIT, NULL); | ||
305 | |||
306 | |||
307 | /* | ||
308 | * Install handlers for the hwclock child if any | ||
309 | */ | ||
310 | for (np = NULL; (np = of_get_next_child(uninorth_node, np)) != NULL;) | ||
311 | if (strcmp(np->name, "hw-clock") == 0) { | ||
312 | unin_hwclock = np; | ||
313 | break; | ||
314 | } | ||
315 | if (unin_hwclock) { | ||
316 | DBG("Installing functions for UniN clock %s\n", | ||
317 | unin_hwclock->full_name); | ||
318 | pmf_register_driver(unin_hwclock, &unin_mmio_handlers, NULL); | ||
319 | pmf_do_functions(unin_hwclock, NULL, 0, PMF_FLAGS_ON_INIT, | ||
320 | NULL); | ||
321 | } | ||
322 | } | ||
323 | |||
324 | /* We export this as the SMP code might init us early */ | ||
325 | int __init pmac_pfunc_base_install(void) | ||
326 | { | ||
327 | static int pfbase_inited; | ||
328 | int i; | ||
329 | |||
330 | if (pfbase_inited) | ||
331 | return 0; | ||
332 | pfbase_inited = 1; | ||
333 | |||
334 | |||
335 | DBG("Installing base platform functions...\n"); | ||
336 | |||
337 | /* | ||
338 | * Locate mac-io chips and install handlers | ||
339 | */ | ||
340 | for (i = 0 ; i < MAX_MACIO_CHIPS; i++) { | ||
341 | if (macio_chips[i].of_node) { | ||
342 | macio_mmio_init_one(&macio_chips[i]); | ||
343 | macio_gpio_init_one(&macio_chips[i]); | ||
344 | } | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * Install handlers for northbridge and direct mapped hwclock | ||
349 | * if any. We do not implement the config space access callback | ||
350 | * which is only ever used for functions that we do not call in | ||
351 | * the current driver (enabling/disabling cells in U2, mostly used | ||
352 | * to restore the PCI settings, we do that differently) | ||
353 | */ | ||
354 | if (uninorth_node && uninorth_base) | ||
355 | uninorth_install_pfunc(); | ||
356 | |||
357 | DBG("All base functions installed\n"); | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | arch_initcall(pmac_pfunc_base_install); | ||
363 | |||
364 | #ifdef CONFIG_PM | ||
365 | |||
366 | /* Those can be called by pmac_feature. Ultimately, I should use a sysdev | ||
367 | * or a device, but for now, that's good enough until I sort out some | ||
368 | * ordering issues. Also, we do not bother with GPIOs, as so far I yet have | ||
369 | * to see a case where a GPIO function has the on-suspend or on-resume bit | ||
370 | */ | ||
371 | void pmac_pfunc_base_suspend(void) | ||
372 | { | ||
373 | int i; | ||
374 | |||
375 | for (i = 0 ; i < MAX_MACIO_CHIPS; i++) { | ||
376 | if (macio_chips[i].of_node) | ||
377 | pmf_do_functions(macio_chips[i].of_node, NULL, 0, | ||
378 | PMF_FLAGS_ON_SLEEP, NULL); | ||
379 | } | ||
380 | if (uninorth_node) | ||
381 | pmf_do_functions(uninorth_node, NULL, 0, | ||
382 | PMF_FLAGS_ON_SLEEP, NULL); | ||
383 | if (unin_hwclock) | ||
384 | pmf_do_functions(unin_hwclock, NULL, 0, | ||
385 | PMF_FLAGS_ON_SLEEP, NULL); | ||
386 | } | ||
387 | |||
388 | void pmac_pfunc_base_resume(void) | ||
389 | { | ||
390 | int i; | ||
391 | |||
392 | if (unin_hwclock) | ||
393 | pmf_do_functions(unin_hwclock, NULL, 0, | ||
394 | PMF_FLAGS_ON_WAKE, NULL); | ||
395 | if (uninorth_node) | ||
396 | pmf_do_functions(uninorth_node, NULL, 0, | ||
397 | PMF_FLAGS_ON_WAKE, NULL); | ||
398 | for (i = 0 ; i < MAX_MACIO_CHIPS; i++) { | ||
399 | if (macio_chips[i].of_node) | ||
400 | pmf_do_functions(macio_chips[i].of_node, NULL, 0, | ||
401 | PMF_FLAGS_ON_WAKE, NULL); | ||
402 | } | ||
403 | } | ||
404 | |||
405 | #endif /* CONFIG_PM */ | ||
diff --git a/arch/powerpc/platforms/powermac/pfunc_core.c b/arch/powerpc/platforms/powermac/pfunc_core.c new file mode 100644 index 000000000000..c32c623001dc --- /dev/null +++ b/arch/powerpc/platforms/powermac/pfunc_core.c | |||
@@ -0,0 +1,989 @@ | |||
1 | /* | ||
2 | * | ||
3 | * FIXME: Properly make this race free with refcounting etc... | ||
4 | * | ||
5 | * FIXME: LOCKING !!! | ||
6 | */ | ||
7 | |||
8 | #include <linux/config.h> | ||
9 | #include <linux/init.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/module.h> | ||
14 | |||
15 | #include <asm/semaphore.h> | ||
16 | #include <asm/prom.h> | ||
17 | #include <asm/pmac_pfunc.h> | ||
18 | |||
19 | /* Debug */ | ||
20 | #define LOG_PARSE(fmt...) | ||
21 | #define LOG_ERROR(fmt...) printk(fmt) | ||
22 | #define LOG_BLOB(t,b,c) | ||
23 | #define DBG(fmt...) printk(fmt) | ||
24 | |||
25 | /* Command numbers */ | ||
26 | #define PMF_CMD_LIST 0 | ||
27 | #define PMF_CMD_WRITE_GPIO 1 | ||
28 | #define PMF_CMD_READ_GPIO 2 | ||
29 | #define PMF_CMD_WRITE_REG32 3 | ||
30 | #define PMF_CMD_READ_REG32 4 | ||
31 | #define PMF_CMD_WRITE_REG16 5 | ||
32 | #define PMF_CMD_READ_REG16 6 | ||
33 | #define PMF_CMD_WRITE_REG8 7 | ||
34 | #define PMF_CMD_READ_REG8 8 | ||
35 | #define PMF_CMD_DELAY 9 | ||
36 | #define PMF_CMD_WAIT_REG32 10 | ||
37 | #define PMF_CMD_WAIT_REG16 11 | ||
38 | #define PMF_CMD_WAIT_REG8 12 | ||
39 | #define PMF_CMD_READ_I2C 13 | ||
40 | #define PMF_CMD_WRITE_I2C 14 | ||
41 | #define PMF_CMD_RMW_I2C 15 | ||
42 | #define PMF_CMD_GEN_I2C 16 | ||
43 | #define PMF_CMD_SHIFT_BYTES_RIGHT 17 | ||
44 | #define PMF_CMD_SHIFT_BYTES_LEFT 18 | ||
45 | #define PMF_CMD_READ_CFG 19 | ||
46 | #define PMF_CMD_WRITE_CFG 20 | ||
47 | #define PMF_CMD_RMW_CFG 21 | ||
48 | #define PMF_CMD_READ_I2C_SUBADDR 22 | ||
49 | #define PMF_CMD_WRITE_I2C_SUBADDR 23 | ||
50 | #define PMF_CMD_SET_I2C_MODE 24 | ||
51 | #define PMF_CMD_RMW_I2C_SUBADDR 25 | ||
52 | #define PMF_CMD_READ_REG32_MASK_SHR_XOR 26 | ||
53 | #define PMF_CMD_READ_REG16_MASK_SHR_XOR 27 | ||
54 | #define PMF_CMD_READ_REG8_MASK_SHR_XOR 28 | ||
55 | #define PMF_CMD_WRITE_REG32_SHL_MASK 29 | ||
56 | #define PMF_CMD_WRITE_REG16_SHL_MASK 30 | ||
57 | #define PMF_CMD_WRITE_REG8_SHL_MASK 31 | ||
58 | #define PMF_CMD_MASK_AND_COMPARE 32 | ||
59 | #define PMF_CMD_COUNT 33 | ||
60 | |||
61 | /* This structure holds the state of the parser while walking through | ||
62 | * a function definition | ||
63 | */ | ||
64 | struct pmf_cmd { | ||
65 | const void *cmdptr; | ||
66 | const void *cmdend; | ||
67 | struct pmf_function *func; | ||
68 | void *instdata; | ||
69 | struct pmf_args *args; | ||
70 | int error; | ||
71 | }; | ||
72 | |||
73 | #if 0 | ||
74 | /* Debug output */ | ||
75 | static void print_blob(const char *title, const void *blob, int bytes) | ||
76 | { | ||
77 | printk("%s", title); | ||
78 | while(bytes--) { | ||
79 | printk("%02x ", *((u8 *)blob)); | ||
80 | blob += 1; | ||
81 | } | ||
82 | printk("\n"); | ||
83 | } | ||
84 | #endif | ||
85 | |||
86 | /* | ||
87 | * Parser helpers | ||
88 | */ | ||
89 | |||
90 | static u32 pmf_next32(struct pmf_cmd *cmd) | ||
91 | { | ||
92 | u32 value; | ||
93 | if ((cmd->cmdend - cmd->cmdptr) < 4) { | ||
94 | cmd->error = 1; | ||
95 | return 0; | ||
96 | } | ||
97 | value = *((u32 *)cmd->cmdptr); | ||
98 | cmd->cmdptr += 4; | ||
99 | return value; | ||
100 | } | ||
101 | |||
102 | static const void* pmf_next_blob(struct pmf_cmd *cmd, int count) | ||
103 | { | ||
104 | const void *value; | ||
105 | if ((cmd->cmdend - cmd->cmdptr) < count) { | ||
106 | cmd->error = 1; | ||
107 | return NULL; | ||
108 | } | ||
109 | value = cmd->cmdptr; | ||
110 | cmd->cmdptr += count; | ||
111 | return value; | ||
112 | } | ||
113 | |||
114 | /* | ||
115 | * Individual command parsers | ||
116 | */ | ||
117 | |||
118 | #define PMF_PARSE_CALL(name, cmd, handlers, p...) \ | ||
119 | do { \ | ||
120 | if (cmd->error) \ | ||
121 | return -ENXIO; \ | ||
122 | if (handlers == NULL) \ | ||
123 | return 0; \ | ||
124 | if (handlers->name) \ | ||
125 | return handlers->name(cmd->func, cmd->instdata, \ | ||
126 | cmd->args, p); \ | ||
127 | return -1; \ | ||
128 | } while(0) \ | ||
129 | |||
130 | |||
131 | static int pmf_parser_write_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
132 | { | ||
133 | u8 value = (u8)pmf_next32(cmd); | ||
134 | u8 mask = (u8)pmf_next32(cmd); | ||
135 | |||
136 | LOG_PARSE("pmf: write_gpio(value: %02x, mask: %02x)\n", value, mask); | ||
137 | |||
138 | PMF_PARSE_CALL(write_gpio, cmd, h, value, mask); | ||
139 | } | ||
140 | |||
141 | static int pmf_parser_read_gpio(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
142 | { | ||
143 | u8 mask = (u8)pmf_next32(cmd); | ||
144 | int rshift = (int)pmf_next32(cmd); | ||
145 | u8 xor = (u8)pmf_next32(cmd); | ||
146 | |||
147 | LOG_PARSE("pmf: read_gpio(mask: %02x, rshift: %d, xor: %02x)\n", | ||
148 | mask, rshift, xor); | ||
149 | |||
150 | PMF_PARSE_CALL(read_gpio, cmd, h, mask, rshift, xor); | ||
151 | } | ||
152 | |||
153 | static int pmf_parser_write_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
154 | { | ||
155 | u32 offset = pmf_next32(cmd); | ||
156 | u32 value = pmf_next32(cmd); | ||
157 | u32 mask = pmf_next32(cmd); | ||
158 | |||
159 | LOG_PARSE("pmf: write_reg32(offset: %08x, value: %08x, mask: %08x)\n", | ||
160 | offset, value, mask); | ||
161 | |||
162 | PMF_PARSE_CALL(write_reg32, cmd, h, offset, value, mask); | ||
163 | } | ||
164 | |||
165 | static int pmf_parser_read_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
166 | { | ||
167 | u32 offset = pmf_next32(cmd); | ||
168 | |||
169 | LOG_PARSE("pmf: read_reg32(offset: %08x)\n", offset); | ||
170 | |||
171 | PMF_PARSE_CALL(read_reg32, cmd, h, offset); | ||
172 | } | ||
173 | |||
174 | |||
175 | static int pmf_parser_write_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
176 | { | ||
177 | u32 offset = pmf_next32(cmd); | ||
178 | u16 value = (u16)pmf_next32(cmd); | ||
179 | u16 mask = (u16)pmf_next32(cmd); | ||
180 | |||
181 | LOG_PARSE("pmf: write_reg16(offset: %08x, value: %04x, mask: %04x)\n", | ||
182 | offset, value, mask); | ||
183 | |||
184 | PMF_PARSE_CALL(write_reg16, cmd, h, offset, value, mask); | ||
185 | } | ||
186 | |||
187 | static int pmf_parser_read_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
188 | { | ||
189 | u32 offset = pmf_next32(cmd); | ||
190 | |||
191 | LOG_PARSE("pmf: read_reg16(offset: %08x)\n", offset); | ||
192 | |||
193 | PMF_PARSE_CALL(read_reg16, cmd, h, offset); | ||
194 | } | ||
195 | |||
196 | |||
197 | static int pmf_parser_write_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
198 | { | ||
199 | u32 offset = pmf_next32(cmd); | ||
200 | u8 value = (u16)pmf_next32(cmd); | ||
201 | u8 mask = (u16)pmf_next32(cmd); | ||
202 | |||
203 | LOG_PARSE("pmf: write_reg8(offset: %08x, value: %02x, mask: %02x)\n", | ||
204 | offset, value, mask); | ||
205 | |||
206 | PMF_PARSE_CALL(write_reg8, cmd, h, offset, value, mask); | ||
207 | } | ||
208 | |||
209 | static int pmf_parser_read_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
210 | { | ||
211 | u32 offset = pmf_next32(cmd); | ||
212 | |||
213 | LOG_PARSE("pmf: read_reg8(offset: %08x)\n", offset); | ||
214 | |||
215 | PMF_PARSE_CALL(read_reg8, cmd, h, offset); | ||
216 | } | ||
217 | |||
218 | static int pmf_parser_delay(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
219 | { | ||
220 | u32 duration = pmf_next32(cmd); | ||
221 | |||
222 | LOG_PARSE("pmf: delay(duration: %d us)\n", duration); | ||
223 | |||
224 | PMF_PARSE_CALL(delay, cmd, h, duration); | ||
225 | } | ||
226 | |||
227 | static int pmf_parser_wait_reg32(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
228 | { | ||
229 | u32 offset = pmf_next32(cmd); | ||
230 | u32 value = pmf_next32(cmd); | ||
231 | u32 mask = pmf_next32(cmd); | ||
232 | |||
233 | LOG_PARSE("pmf: wait_reg32(offset: %08x, comp_value: %08x,mask: %08x)\n", | ||
234 | offset, value, mask); | ||
235 | |||
236 | PMF_PARSE_CALL(wait_reg32, cmd, h, offset, value, mask); | ||
237 | } | ||
238 | |||
239 | static int pmf_parser_wait_reg16(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
240 | { | ||
241 | u32 offset = pmf_next32(cmd); | ||
242 | u16 value = (u16)pmf_next32(cmd); | ||
243 | u16 mask = (u16)pmf_next32(cmd); | ||
244 | |||
245 | LOG_PARSE("pmf: wait_reg16(offset: %08x, comp_value: %04x,mask: %04x)\n", | ||
246 | offset, value, mask); | ||
247 | |||
248 | PMF_PARSE_CALL(wait_reg16, cmd, h, offset, value, mask); | ||
249 | } | ||
250 | |||
251 | static int pmf_parser_wait_reg8(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
252 | { | ||
253 | u32 offset = pmf_next32(cmd); | ||
254 | u8 value = (u8)pmf_next32(cmd); | ||
255 | u8 mask = (u8)pmf_next32(cmd); | ||
256 | |||
257 | LOG_PARSE("pmf: wait_reg8(offset: %08x, comp_value: %02x,mask: %02x)\n", | ||
258 | offset, value, mask); | ||
259 | |||
260 | PMF_PARSE_CALL(wait_reg8, cmd, h, offset, value, mask); | ||
261 | } | ||
262 | |||
263 | static int pmf_parser_read_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
264 | { | ||
265 | u32 bytes = pmf_next32(cmd); | ||
266 | |||
267 | LOG_PARSE("pmf: read_i2c(bytes: %ud)\n", bytes); | ||
268 | |||
269 | PMF_PARSE_CALL(read_i2c, cmd, h, bytes); | ||
270 | } | ||
271 | |||
272 | static int pmf_parser_write_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
273 | { | ||
274 | u32 bytes = pmf_next32(cmd); | ||
275 | const void *blob = pmf_next_blob(cmd, bytes); | ||
276 | |||
277 | LOG_PARSE("pmf: write_i2c(bytes: %ud) ...\n", bytes); | ||
278 | LOG_BLOB("pmf: data: \n", blob, bytes); | ||
279 | |||
280 | PMF_PARSE_CALL(write_i2c, cmd, h, bytes, blob); | ||
281 | } | ||
282 | |||
283 | |||
284 | static int pmf_parser_rmw_i2c(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
285 | { | ||
286 | u32 maskbytes = pmf_next32(cmd); | ||
287 | u32 valuesbytes = pmf_next32(cmd); | ||
288 | u32 totalbytes = pmf_next32(cmd); | ||
289 | const void *maskblob = pmf_next_blob(cmd, maskbytes); | ||
290 | const void *valuesblob = pmf_next_blob(cmd, valuesbytes); | ||
291 | |||
292 | LOG_PARSE("pmf: rmw_i2c(maskbytes: %ud, valuebytes: %ud, " | ||
293 | "totalbytes: %d) ...\n", | ||
294 | maskbytes, valuesbytes, totalbytes); | ||
295 | LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes); | ||
296 | LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes); | ||
297 | |||
298 | PMF_PARSE_CALL(rmw_i2c, cmd, h, maskbytes, valuesbytes, totalbytes, | ||
299 | maskblob, valuesblob); | ||
300 | } | ||
301 | |||
302 | static int pmf_parser_read_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
303 | { | ||
304 | u32 offset = pmf_next32(cmd); | ||
305 | u32 bytes = pmf_next32(cmd); | ||
306 | |||
307 | LOG_PARSE("pmf: read_cfg(offset: %x, bytes: %ud)\n", offset, bytes); | ||
308 | |||
309 | PMF_PARSE_CALL(read_cfg, cmd, h, offset, bytes); | ||
310 | } | ||
311 | |||
312 | |||
313 | static int pmf_parser_write_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
314 | { | ||
315 | u32 offset = pmf_next32(cmd); | ||
316 | u32 bytes = pmf_next32(cmd); | ||
317 | const void *blob = pmf_next_blob(cmd, bytes); | ||
318 | |||
319 | LOG_PARSE("pmf: write_cfg(offset: %x, bytes: %ud)\n", offset, bytes); | ||
320 | LOG_BLOB("pmf: data: \n", blob, bytes); | ||
321 | |||
322 | PMF_PARSE_CALL(write_cfg, cmd, h, offset, bytes, blob); | ||
323 | } | ||
324 | |||
325 | static int pmf_parser_rmw_cfg(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
326 | { | ||
327 | u32 offset = pmf_next32(cmd); | ||
328 | u32 maskbytes = pmf_next32(cmd); | ||
329 | u32 valuesbytes = pmf_next32(cmd); | ||
330 | u32 totalbytes = pmf_next32(cmd); | ||
331 | const void *maskblob = pmf_next_blob(cmd, maskbytes); | ||
332 | const void *valuesblob = pmf_next_blob(cmd, valuesbytes); | ||
333 | |||
334 | LOG_PARSE("pmf: rmw_cfg(maskbytes: %ud, valuebytes: %ud," | ||
335 | " totalbytes: %d) ...\n", | ||
336 | maskbytes, valuesbytes, totalbytes); | ||
337 | LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes); | ||
338 | LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes); | ||
339 | |||
340 | PMF_PARSE_CALL(rmw_cfg, cmd, h, offset, maskbytes, valuesbytes, | ||
341 | totalbytes, maskblob, valuesblob); | ||
342 | } | ||
343 | |||
344 | |||
345 | static int pmf_parser_read_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
346 | { | ||
347 | u8 subaddr = (u8)pmf_next32(cmd); | ||
348 | u32 bytes = pmf_next32(cmd); | ||
349 | |||
350 | LOG_PARSE("pmf: read_i2c_sub(subaddr: %x, bytes: %ud)\n", | ||
351 | subaddr, bytes); | ||
352 | |||
353 | PMF_PARSE_CALL(read_i2c_sub, cmd, h, subaddr, bytes); | ||
354 | } | ||
355 | |||
356 | static int pmf_parser_write_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
357 | { | ||
358 | u8 subaddr = (u8)pmf_next32(cmd); | ||
359 | u32 bytes = pmf_next32(cmd); | ||
360 | const void *blob = pmf_next_blob(cmd, bytes); | ||
361 | |||
362 | LOG_PARSE("pmf: write_i2c_sub(subaddr: %x, bytes: %ud) ...\n", | ||
363 | subaddr, bytes); | ||
364 | LOG_BLOB("pmf: data: \n", blob, bytes); | ||
365 | |||
366 | PMF_PARSE_CALL(write_i2c_sub, cmd, h, subaddr, bytes, blob); | ||
367 | } | ||
368 | |||
369 | static int pmf_parser_set_i2c_mode(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
370 | { | ||
371 | u32 mode = pmf_next32(cmd); | ||
372 | |||
373 | LOG_PARSE("pmf: set_i2c_mode(mode: %d)\n", mode); | ||
374 | |||
375 | PMF_PARSE_CALL(set_i2c_mode, cmd, h, mode); | ||
376 | } | ||
377 | |||
378 | |||
379 | static int pmf_parser_rmw_i2c_sub(struct pmf_cmd *cmd, struct pmf_handlers *h) | ||
380 | { | ||
381 | u8 subaddr = (u8)pmf_next32(cmd); | ||
382 | u32 maskbytes = pmf_next32(cmd); | ||
383 | u32 valuesbytes = pmf_next32(cmd); | ||
384 | u32 totalbytes = pmf_next32(cmd); | ||
385 | const void *maskblob = pmf_next_blob(cmd, maskbytes); | ||
386 | const void *valuesblob = pmf_next_blob(cmd, valuesbytes); | ||
387 | |||
388 | LOG_PARSE("pmf: rmw_i2c_sub(subaddr: %x, maskbytes: %ud, valuebytes: %ud" | ||
389 | ", totalbytes: %d) ...\n", | ||
390 | subaddr, maskbytes, valuesbytes, totalbytes); | ||
391 | LOG_BLOB("pmf: mask data: \n", maskblob, maskbytes); | ||
392 | LOG_BLOB("pmf: values data: \n", valuesblob, valuesbytes); | ||
393 | |||
394 | PMF_PARSE_CALL(rmw_i2c_sub, cmd, h, subaddr, maskbytes, valuesbytes, | ||
395 | totalbytes, maskblob, valuesblob); | ||
396 | } | ||
397 | |||
398 | static int pmf_parser_read_reg32_msrx(struct pmf_cmd *cmd, | ||
399 | struct pmf_handlers *h) | ||
400 | { | ||
401 | u32 offset = pmf_next32(cmd); | ||
402 | u32 mask = pmf_next32(cmd); | ||
403 | u32 shift = pmf_next32(cmd); | ||
404 | u32 xor = pmf_next32(cmd); | ||
405 | |||
406 | LOG_PARSE("pmf: read_reg32_msrx(offset: %x, mask: %x, shift: %x," | ||
407 | " xor: %x\n", offset, mask, shift, xor); | ||
408 | |||
409 | PMF_PARSE_CALL(read_reg32_msrx, cmd, h, offset, mask, shift, xor); | ||
410 | } | ||
411 | |||
412 | static int pmf_parser_read_reg16_msrx(struct pmf_cmd *cmd, | ||
413 | struct pmf_handlers *h) | ||
414 | { | ||
415 | u32 offset = pmf_next32(cmd); | ||
416 | u32 mask = pmf_next32(cmd); | ||
417 | u32 shift = pmf_next32(cmd); | ||
418 | u32 xor = pmf_next32(cmd); | ||
419 | |||
420 | LOG_PARSE("pmf: read_reg16_msrx(offset: %x, mask: %x, shift: %x," | ||
421 | " xor: %x\n", offset, mask, shift, xor); | ||
422 | |||
423 | PMF_PARSE_CALL(read_reg16_msrx, cmd, h, offset, mask, shift, xor); | ||
424 | } | ||
425 | static int pmf_parser_read_reg8_msrx(struct pmf_cmd *cmd, | ||
426 | struct pmf_handlers *h) | ||
427 | { | ||
428 | u32 offset = pmf_next32(cmd); | ||
429 | u32 mask = pmf_next32(cmd); | ||
430 | u32 shift = pmf_next32(cmd); | ||
431 | u32 xor = pmf_next32(cmd); | ||
432 | |||
433 | LOG_PARSE("pmf: read_reg8_msrx(offset: %x, mask: %x, shift: %x," | ||
434 | " xor: %x\n", offset, mask, shift, xor); | ||
435 | |||
436 | PMF_PARSE_CALL(read_reg8_msrx, cmd, h, offset, mask, shift, xor); | ||
437 | } | ||
438 | |||
439 | static int pmf_parser_write_reg32_slm(struct pmf_cmd *cmd, | ||
440 | struct pmf_handlers *h) | ||
441 | { | ||
442 | u32 offset = pmf_next32(cmd); | ||
443 | u32 shift = pmf_next32(cmd); | ||
444 | u32 mask = pmf_next32(cmd); | ||
445 | |||
446 | LOG_PARSE("pmf: write_reg32_slm(offset: %x, shift: %x, mask: %x\n", | ||
447 | offset, shift, mask); | ||
448 | |||
449 | PMF_PARSE_CALL(write_reg32_slm, cmd, h, offset, shift, mask); | ||
450 | } | ||
451 | |||
452 | static int pmf_parser_write_reg16_slm(struct pmf_cmd *cmd, | ||
453 | struct pmf_handlers *h) | ||
454 | { | ||
455 | u32 offset = pmf_next32(cmd); | ||
456 | u32 shift = pmf_next32(cmd); | ||
457 | u32 mask = pmf_next32(cmd); | ||
458 | |||
459 | LOG_PARSE("pmf: write_reg16_slm(offset: %x, shift: %x, mask: %x\n", | ||
460 | offset, shift, mask); | ||
461 | |||
462 | PMF_PARSE_CALL(write_reg16_slm, cmd, h, offset, shift, mask); | ||
463 | } | ||
464 | |||
465 | static int pmf_parser_write_reg8_slm(struct pmf_cmd *cmd, | ||
466 | struct pmf_handlers *h) | ||
467 | { | ||
468 | u32 offset = pmf_next32(cmd); | ||
469 | u32 shift = pmf_next32(cmd); | ||
470 | u32 mask = pmf_next32(cmd); | ||
471 | |||
472 | LOG_PARSE("pmf: write_reg8_slm(offset: %x, shift: %x, mask: %x\n", | ||
473 | offset, shift, mask); | ||
474 | |||
475 | PMF_PARSE_CALL(write_reg8_slm, cmd, h, offset, shift, mask); | ||
476 | } | ||
477 | |||
478 | static int pmf_parser_mask_and_compare(struct pmf_cmd *cmd, | ||
479 | struct pmf_handlers *h) | ||
480 | { | ||
481 | u32 bytes = pmf_next32(cmd); | ||
482 | const void *maskblob = pmf_next_blob(cmd, bytes); | ||
483 | const void *valuesblob = pmf_next_blob(cmd, bytes); | ||
484 | |||
485 | LOG_PARSE("pmf: mask_and_compare(length: %ud ...\n", bytes); | ||
486 | LOG_BLOB("pmf: mask data: \n", maskblob, bytes); | ||
487 | LOG_BLOB("pmf: values data: \n", valuesblob, bytes); | ||
488 | |||
489 | PMF_PARSE_CALL(mask_and_compare, cmd, h, | ||
490 | bytes, maskblob, valuesblob); | ||
491 | } | ||
492 | |||
493 | |||
494 | typedef int (*pmf_cmd_parser_t)(struct pmf_cmd *cmd, struct pmf_handlers *h); | ||
495 | |||
496 | static pmf_cmd_parser_t pmf_parsers[PMF_CMD_COUNT] = | ||
497 | { | ||
498 | NULL, | ||
499 | pmf_parser_write_gpio, | ||
500 | pmf_parser_read_gpio, | ||
501 | pmf_parser_write_reg32, | ||
502 | pmf_parser_read_reg32, | ||
503 | pmf_parser_write_reg16, | ||
504 | pmf_parser_read_reg16, | ||
505 | pmf_parser_write_reg8, | ||
506 | pmf_parser_read_reg8, | ||
507 | pmf_parser_delay, | ||
508 | pmf_parser_wait_reg32, | ||
509 | pmf_parser_wait_reg16, | ||
510 | pmf_parser_wait_reg8, | ||
511 | pmf_parser_read_i2c, | ||
512 | pmf_parser_write_i2c, | ||
513 | pmf_parser_rmw_i2c, | ||
514 | NULL, /* Bogus command */ | ||
515 | NULL, /* Shift bytes right: NYI */ | ||
516 | NULL, /* Shift bytes left: NYI */ | ||
517 | pmf_parser_read_cfg, | ||
518 | pmf_parser_write_cfg, | ||
519 | pmf_parser_rmw_cfg, | ||
520 | pmf_parser_read_i2c_sub, | ||
521 | pmf_parser_write_i2c_sub, | ||
522 | pmf_parser_set_i2c_mode, | ||
523 | pmf_parser_rmw_i2c_sub, | ||
524 | pmf_parser_read_reg32_msrx, | ||
525 | pmf_parser_read_reg16_msrx, | ||
526 | pmf_parser_read_reg8_msrx, | ||
527 | pmf_parser_write_reg32_slm, | ||
528 | pmf_parser_write_reg16_slm, | ||
529 | pmf_parser_write_reg8_slm, | ||
530 | pmf_parser_mask_and_compare, | ||
531 | }; | ||
532 | |||
533 | struct pmf_device { | ||
534 | struct list_head link; | ||
535 | struct device_node *node; | ||
536 | struct pmf_handlers *handlers; | ||
537 | struct list_head functions; | ||
538 | struct kref ref; | ||
539 | }; | ||
540 | |||
541 | static LIST_HEAD(pmf_devices); | ||
542 | static spinlock_t pmf_lock = SPIN_LOCK_UNLOCKED; | ||
543 | |||
544 | static void pmf_release_device(struct kref *kref) | ||
545 | { | ||
546 | struct pmf_device *dev = container_of(kref, struct pmf_device, ref); | ||
547 | kfree(dev); | ||
548 | } | ||
549 | |||
550 | static inline void pmf_put_device(struct pmf_device *dev) | ||
551 | { | ||
552 | kref_put(&dev->ref, pmf_release_device); | ||
553 | } | ||
554 | |||
555 | static inline struct pmf_device *pmf_get_device(struct pmf_device *dev) | ||
556 | { | ||
557 | kref_get(&dev->ref); | ||
558 | return dev; | ||
559 | } | ||
560 | |||
561 | static inline struct pmf_device *pmf_find_device(struct device_node *np) | ||
562 | { | ||
563 | struct pmf_device *dev; | ||
564 | |||
565 | list_for_each_entry(dev, &pmf_devices, link) { | ||
566 | if (dev->node == np) | ||
567 | return pmf_get_device(dev); | ||
568 | } | ||
569 | return NULL; | ||
570 | } | ||
571 | |||
572 | static int pmf_parse_one(struct pmf_function *func, | ||
573 | struct pmf_handlers *handlers, | ||
574 | void *instdata, struct pmf_args *args) | ||
575 | { | ||
576 | struct pmf_cmd cmd; | ||
577 | u32 ccode; | ||
578 | int count, rc; | ||
579 | |||
580 | cmd.cmdptr = func->data; | ||
581 | cmd.cmdend = func->data + func->length; | ||
582 | cmd.func = func; | ||
583 | cmd.instdata = instdata; | ||
584 | cmd.args = args; | ||
585 | cmd.error = 0; | ||
586 | |||
587 | LOG_PARSE("pmf: func %s, %d bytes, %s...\n", | ||
588 | func->name, func->length, | ||
589 | handlers ? "executing" : "parsing"); | ||
590 | |||
591 | /* One subcommand to parse for now */ | ||
592 | count = 1; | ||
593 | |||
594 | while(count-- && cmd.cmdptr < cmd.cmdend) { | ||
595 | /* Get opcode */ | ||
596 | ccode = pmf_next32(&cmd); | ||
597 | /* Check if we are hitting a command list, fetch new count */ | ||
598 | if (ccode == 0) { | ||
599 | count = pmf_next32(&cmd) - 1; | ||
600 | ccode = pmf_next32(&cmd); | ||
601 | } | ||
602 | if (cmd.error) { | ||
603 | LOG_ERROR("pmf: parse error, not enough data\n"); | ||
604 | return -ENXIO; | ||
605 | } | ||
606 | if (ccode >= PMF_CMD_COUNT) { | ||
607 | LOG_ERROR("pmf: command code %d unknown !\n", ccode); | ||
608 | return -ENXIO; | ||
609 | } | ||
610 | if (pmf_parsers[ccode] == NULL) { | ||
611 | LOG_ERROR("pmf: no parser for command %d !\n", ccode); | ||
612 | return -ENXIO; | ||
613 | } | ||
614 | rc = pmf_parsers[ccode](&cmd, handlers); | ||
615 | if (rc != 0) { | ||
616 | LOG_ERROR("pmf: parser for command %d returned" | ||
617 | " error %d\n", ccode, rc); | ||
618 | return rc; | ||
619 | } | ||
620 | } | ||
621 | |||
622 | /* We are doing an initial parse pass, we need to adjust the size */ | ||
623 | if (handlers == NULL) | ||
624 | func->length = cmd.cmdptr - func->data; | ||
625 | |||
626 | return 0; | ||
627 | } | ||
628 | |||
629 | static int pmf_add_function_prop(struct pmf_device *dev, void *driverdata, | ||
630 | const char *name, u32 *data, | ||
631 | unsigned int length) | ||
632 | { | ||
633 | int count = 0; | ||
634 | struct pmf_function *func = NULL; | ||
635 | |||
636 | DBG("pmf: Adding functions for platform-do-%s\n", name); | ||
637 | |||
638 | while (length >= 12) { | ||
639 | /* Allocate a structure */ | ||
640 | func = kzalloc(sizeof(struct pmf_function), GFP_KERNEL); | ||
641 | if (func == NULL) | ||
642 | goto bail; | ||
643 | kref_init(&func->ref); | ||
644 | INIT_LIST_HEAD(&func->irq_clients); | ||
645 | func->node = dev->node; | ||
646 | func->driver_data = driverdata; | ||
647 | func->name = name; | ||
648 | func->phandle = data[0]; | ||
649 | func->flags = data[1]; | ||
650 | data += 2; | ||
651 | length -= 8; | ||
652 | func->data = data; | ||
653 | func->length = length; | ||
654 | func->dev = dev; | ||
655 | DBG("pmf: idx %d: flags=%08x, phandle=%08x " | ||
656 | " %d bytes remaining, parsing...\n", | ||
657 | count+1, func->flags, func->phandle, length); | ||
658 | if (pmf_parse_one(func, NULL, NULL, NULL)) { | ||
659 | kfree(func); | ||
660 | goto bail; | ||
661 | } | ||
662 | length -= func->length; | ||
663 | data = (u32 *)(((u8 *)data) + func->length); | ||
664 | list_add(&func->link, &dev->functions); | ||
665 | pmf_get_device(dev); | ||
666 | count++; | ||
667 | } | ||
668 | bail: | ||
669 | DBG("pmf: Added %d functions\n", count); | ||
670 | |||
671 | return count; | ||
672 | } | ||
673 | |||
674 | static int pmf_add_functions(struct pmf_device *dev, void *driverdata) | ||
675 | { | ||
676 | struct property *pp; | ||
677 | #define PP_PREFIX "platform-do-" | ||
678 | const int plen = strlen(PP_PREFIX); | ||
679 | int count = 0; | ||
680 | |||
681 | for (pp = dev->node->properties; pp != 0; pp = pp->next) { | ||
682 | char *name; | ||
683 | if (strncmp(pp->name, PP_PREFIX, plen) != 0) | ||
684 | continue; | ||
685 | name = pp->name + plen; | ||
686 | if (strlen(name) && pp->length >= 12) | ||
687 | count += pmf_add_function_prop(dev, driverdata, name, | ||
688 | (u32 *)pp->value, | ||
689 | pp->length); | ||
690 | } | ||
691 | return count; | ||
692 | } | ||
693 | |||
694 | |||
695 | int pmf_register_driver(struct device_node *np, | ||
696 | struct pmf_handlers *handlers, | ||
697 | void *driverdata) | ||
698 | { | ||
699 | struct pmf_device *dev; | ||
700 | unsigned long flags; | ||
701 | int rc = 0; | ||
702 | |||
703 | if (handlers == NULL) | ||
704 | return -EINVAL; | ||
705 | |||
706 | DBG("pmf: registering driver for node %s\n", np->full_name); | ||
707 | |||
708 | spin_lock_irqsave(&pmf_lock, flags); | ||
709 | dev = pmf_find_device(np); | ||
710 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
711 | if (dev != NULL) { | ||
712 | DBG("pmf: already there !\n"); | ||
713 | pmf_put_device(dev); | ||
714 | return -EBUSY; | ||
715 | } | ||
716 | |||
717 | dev = kzalloc(sizeof(struct pmf_device), GFP_KERNEL); | ||
718 | if (dev == NULL) { | ||
719 | DBG("pmf: no memory !\n"); | ||
720 | return -ENOMEM; | ||
721 | } | ||
722 | kref_init(&dev->ref); | ||
723 | dev->node = of_node_get(np); | ||
724 | dev->handlers = handlers; | ||
725 | INIT_LIST_HEAD(&dev->functions); | ||
726 | |||
727 | rc = pmf_add_functions(dev, driverdata); | ||
728 | if (rc == 0) { | ||
729 | DBG("pmf: no functions, disposing.. \n"); | ||
730 | of_node_put(np); | ||
731 | kfree(dev); | ||
732 | return -ENODEV; | ||
733 | } | ||
734 | |||
735 | spin_lock_irqsave(&pmf_lock, flags); | ||
736 | list_add(&dev->link, &pmf_devices); | ||
737 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
738 | |||
739 | return 0; | ||
740 | } | ||
741 | EXPORT_SYMBOL_GPL(pmf_register_driver); | ||
742 | |||
743 | struct pmf_function *pmf_get_function(struct pmf_function *func) | ||
744 | { | ||
745 | if (!try_module_get(func->dev->handlers->owner)) | ||
746 | return NULL; | ||
747 | kref_get(&func->ref); | ||
748 | return func; | ||
749 | } | ||
750 | EXPORT_SYMBOL_GPL(pmf_get_function); | ||
751 | |||
752 | static void pmf_release_function(struct kref *kref) | ||
753 | { | ||
754 | struct pmf_function *func = | ||
755 | container_of(kref, struct pmf_function, ref); | ||
756 | pmf_put_device(func->dev); | ||
757 | kfree(func); | ||
758 | } | ||
759 | |||
760 | static inline void __pmf_put_function(struct pmf_function *func) | ||
761 | { | ||
762 | kref_put(&func->ref, pmf_release_function); | ||
763 | } | ||
764 | |||
765 | void pmf_put_function(struct pmf_function *func) | ||
766 | { | ||
767 | if (func == NULL) | ||
768 | return; | ||
769 | module_put(func->dev->handlers->owner); | ||
770 | __pmf_put_function(func); | ||
771 | } | ||
772 | EXPORT_SYMBOL_GPL(pmf_put_function); | ||
773 | |||
774 | void pmf_unregister_driver(struct device_node *np) | ||
775 | { | ||
776 | struct pmf_device *dev; | ||
777 | unsigned long flags; | ||
778 | |||
779 | DBG("pmf: unregistering driver for node %s\n", np->full_name); | ||
780 | |||
781 | spin_lock_irqsave(&pmf_lock, flags); | ||
782 | dev = pmf_find_device(np); | ||
783 | if (dev == NULL) { | ||
784 | DBG("pmf: not such driver !\n"); | ||
785 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
786 | return; | ||
787 | } | ||
788 | list_del(&dev->link); | ||
789 | |||
790 | while(!list_empty(&dev->functions)) { | ||
791 | struct pmf_function *func = | ||
792 | list_entry(dev->functions.next, typeof(*func), link); | ||
793 | list_del(&func->link); | ||
794 | __pmf_put_function(func); | ||
795 | } | ||
796 | |||
797 | pmf_put_device(dev); | ||
798 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
799 | } | ||
800 | EXPORT_SYMBOL_GPL(pmf_unregister_driver); | ||
801 | |||
802 | struct pmf_function *__pmf_find_function(struct device_node *target, | ||
803 | const char *name, u32 flags) | ||
804 | { | ||
805 | struct device_node *actor = of_node_get(target); | ||
806 | struct pmf_device *dev; | ||
807 | struct pmf_function *func, *result = NULL; | ||
808 | char fname[64]; | ||
809 | u32 *prop, ph; | ||
810 | |||
811 | /* | ||
812 | * Look for a "platform-*" function reference. If we can't find | ||
813 | * one, then we fallback to a direct call attempt | ||
814 | */ | ||
815 | snprintf(fname, 63, "platform-%s", name); | ||
816 | prop = (u32 *)get_property(target, fname, NULL); | ||
817 | if (prop == NULL) | ||
818 | goto find_it; | ||
819 | ph = *prop; | ||
820 | if (ph == 0) | ||
821 | goto find_it; | ||
822 | |||
823 | /* | ||
824 | * Ok, now try to find the actor. If we can't find it, we fail, | ||
825 | * there is no point in falling back there | ||
826 | */ | ||
827 | of_node_put(actor); | ||
828 | actor = of_find_node_by_phandle(ph); | ||
829 | if (actor == NULL) | ||
830 | return NULL; | ||
831 | find_it: | ||
832 | dev = pmf_find_device(actor); | ||
833 | if (dev == NULL) | ||
834 | return NULL; | ||
835 | |||
836 | list_for_each_entry(func, &dev->functions, link) { | ||
837 | if (name && strcmp(name, func->name)) | ||
838 | continue; | ||
839 | if (func->phandle && target->node != func->phandle) | ||
840 | continue; | ||
841 | if ((func->flags & flags) == 0) | ||
842 | continue; | ||
843 | result = func; | ||
844 | break; | ||
845 | } | ||
846 | of_node_put(actor); | ||
847 | pmf_put_device(dev); | ||
848 | return result; | ||
849 | } | ||
850 | |||
851 | |||
852 | int pmf_register_irq_client(struct device_node *target, | ||
853 | const char *name, | ||
854 | struct pmf_irq_client *client) | ||
855 | { | ||
856 | struct pmf_function *func; | ||
857 | unsigned long flags; | ||
858 | |||
859 | spin_lock_irqsave(&pmf_lock, flags); | ||
860 | func = __pmf_find_function(target, name, PMF_FLAGS_INT_GEN); | ||
861 | if (func == NULL) { | ||
862 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
863 | return -ENODEV; | ||
864 | } | ||
865 | list_add(&client->link, &func->irq_clients); | ||
866 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
867 | |||
868 | return 0; | ||
869 | } | ||
870 | EXPORT_SYMBOL_GPL(pmf_register_irq_client); | ||
871 | |||
872 | void pmf_unregister_irq_client(struct device_node *np, | ||
873 | const char *name, | ||
874 | struct pmf_irq_client *client) | ||
875 | { | ||
876 | unsigned long flags; | ||
877 | |||
878 | spin_lock_irqsave(&pmf_lock, flags); | ||
879 | list_del(&client->link); | ||
880 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
881 | } | ||
882 | EXPORT_SYMBOL_GPL(pmf_unregister_irq_client); | ||
883 | |||
884 | |||
885 | void pmf_do_irq(struct pmf_function *func) | ||
886 | { | ||
887 | unsigned long flags; | ||
888 | struct pmf_irq_client *client; | ||
889 | |||
890 | /* For now, using a spinlock over the whole function. Can be made | ||
891 | * to drop the lock using 2 lists if necessary | ||
892 | */ | ||
893 | spin_lock_irqsave(&pmf_lock, flags); | ||
894 | list_for_each_entry(client, &func->irq_clients, link) { | ||
895 | if (!try_module_get(client->owner)) | ||
896 | continue; | ||
897 | client->handler(client->data); | ||
898 | module_put(client->owner); | ||
899 | } | ||
900 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
901 | } | ||
902 | EXPORT_SYMBOL_GPL(pmf_do_irq); | ||
903 | |||
904 | |||
905 | int pmf_call_one(struct pmf_function *func, struct pmf_args *args) | ||
906 | { | ||
907 | struct pmf_device *dev = func->dev; | ||
908 | void *instdata = NULL; | ||
909 | int rc = 0; | ||
910 | |||
911 | DBG(" ** pmf_call_one(%s/%s) **\n", dev->node->full_name, func->name); | ||
912 | |||
913 | if (dev->handlers->begin) | ||
914 | instdata = dev->handlers->begin(func, args); | ||
915 | rc = pmf_parse_one(func, dev->handlers, instdata, args); | ||
916 | if (dev->handlers->end) | ||
917 | dev->handlers->end(func, instdata); | ||
918 | |||
919 | return rc; | ||
920 | } | ||
921 | EXPORT_SYMBOL_GPL(pmf_call_one); | ||
922 | |||
923 | int pmf_do_functions(struct device_node *np, const char *name, | ||
924 | u32 phandle, u32 fflags, struct pmf_args *args) | ||
925 | { | ||
926 | struct pmf_device *dev; | ||
927 | struct pmf_function *func, *tmp; | ||
928 | unsigned long flags; | ||
929 | int rc = -ENODEV; | ||
930 | |||
931 | spin_lock_irqsave(&pmf_lock, flags); | ||
932 | |||
933 | dev = pmf_find_device(np); | ||
934 | if (dev == NULL) { | ||
935 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
936 | return -ENODEV; | ||
937 | } | ||
938 | list_for_each_entry_safe(func, tmp, &dev->functions, link) { | ||
939 | if (name && strcmp(name, func->name)) | ||
940 | continue; | ||
941 | if (phandle && func->phandle && phandle != func->phandle) | ||
942 | continue; | ||
943 | if ((func->flags & fflags) == 0) | ||
944 | continue; | ||
945 | if (pmf_get_function(func) == NULL) | ||
946 | continue; | ||
947 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
948 | rc = pmf_call_one(func, args); | ||
949 | pmf_put_function(func); | ||
950 | spin_lock_irqsave(&pmf_lock, flags); | ||
951 | } | ||
952 | pmf_put_device(dev); | ||
953 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
954 | |||
955 | return rc; | ||
956 | } | ||
957 | EXPORT_SYMBOL_GPL(pmf_do_functions); | ||
958 | |||
959 | |||
960 | struct pmf_function *pmf_find_function(struct device_node *target, | ||
961 | const char *name) | ||
962 | { | ||
963 | struct pmf_function *func; | ||
964 | unsigned long flags; | ||
965 | |||
966 | spin_lock_irqsave(&pmf_lock, flags); | ||
967 | func = __pmf_find_function(target, name, PMF_FLAGS_ON_DEMAND); | ||
968 | if (func) | ||
969 | func = pmf_get_function(func); | ||
970 | spin_unlock_irqrestore(&pmf_lock, flags); | ||
971 | return func; | ||
972 | } | ||
973 | EXPORT_SYMBOL_GPL(pmf_find_function); | ||
974 | |||
975 | int pmf_call_function(struct device_node *target, const char *name, | ||
976 | struct pmf_args *args) | ||
977 | { | ||
978 | struct pmf_function *func = pmf_find_function(target, name); | ||
979 | int rc; | ||
980 | |||
981 | if (func == NULL) | ||
982 | return -ENODEV; | ||
983 | |||
984 | rc = pmf_call_one(func, args); | ||
985 | pmf_put_function(func); | ||
986 | return rc; | ||
987 | } | ||
988 | EXPORT_SYMBOL_GPL(pmf_call_function); | ||
989 | |||
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c index 90040c49494d..18bf3011d1e3 100644 --- a/arch/powerpc/platforms/powermac/pic.c +++ b/arch/powerpc/platforms/powermac/pic.c | |||
@@ -5,8 +5,8 @@ | |||
5 | * in a separate file | 5 | * in a separate file |
6 | * | 6 | * |
7 | * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) | 7 | * Copyright (C) 1997 Paul Mackerras (paulus@samba.org) |
8 | * | 8 | * Copyright (C) 2005 Benjamin Herrenschmidt (benh@kernel.crashing.org) |
9 | * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) | 9 | * IBM, Corp. |
10 | * | 10 | * |
11 | * This program is free software; you can redistribute it and/or | 11 | * This program is free software; you can redistribute it and/or |
12 | * modify it under the terms of the GNU General Public License | 12 | * modify it under the terms of the GNU General Public License |
@@ -54,12 +54,7 @@ struct pmac_irq_hw { | |||
54 | }; | 54 | }; |
55 | 55 | ||
56 | /* Default addresses */ | 56 | /* Default addresses */ |
57 | static volatile struct pmac_irq_hw *pmac_irq_hw[4] = { | 57 | static volatile struct pmac_irq_hw __iomem *pmac_irq_hw[4]; |
58 | (struct pmac_irq_hw *) 0xf3000020, | ||
59 | (struct pmac_irq_hw *) 0xf3000010, | ||
60 | (struct pmac_irq_hw *) 0xf4000020, | ||
61 | (struct pmac_irq_hw *) 0xf4000010, | ||
62 | }; | ||
63 | 58 | ||
64 | #define GC_LEVEL_MASK 0x3ff00000 | 59 | #define GC_LEVEL_MASK 0x3ff00000 |
65 | #define OHARE_LEVEL_MASK 0x1ff00000 | 60 | #define OHARE_LEVEL_MASK 0x1ff00000 |
@@ -82,8 +77,7 @@ static unsigned long ppc_lost_interrupts[NR_MASK_WORDS]; | |||
82 | * since it can lose interrupts (see pmac_set_irq_mask). | 77 | * since it can lose interrupts (see pmac_set_irq_mask). |
83 | * -- Cort | 78 | * -- Cort |
84 | */ | 79 | */ |
85 | void | 80 | void __set_lost(unsigned long irq_nr, int nokick) |
86 | __set_lost(unsigned long irq_nr, int nokick) | ||
87 | { | 81 | { |
88 | if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { | 82 | if (!test_and_set_bit(irq_nr, ppc_lost_interrupts)) { |
89 | atomic_inc(&ppc_n_lost_interrupts); | 83 | atomic_inc(&ppc_n_lost_interrupts); |
@@ -92,8 +86,7 @@ __set_lost(unsigned long irq_nr, int nokick) | |||
92 | } | 86 | } |
93 | } | 87 | } |
94 | 88 | ||
95 | static void | 89 | static void pmac_mask_and_ack_irq(unsigned int irq_nr) |
96 | pmac_mask_and_ack_irq(unsigned int irq_nr) | ||
97 | { | 90 | { |
98 | unsigned long bit = 1UL << (irq_nr & 0x1f); | 91 | unsigned long bit = 1UL << (irq_nr & 0x1f); |
99 | int i = irq_nr >> 5; | 92 | int i = irq_nr >> 5; |
@@ -224,8 +217,7 @@ static irqreturn_t gatwick_action(int cpl, void *dev_id, struct pt_regs *regs) | |||
224 | return IRQ_NONE; | 217 | return IRQ_NONE; |
225 | } | 218 | } |
226 | 219 | ||
227 | int | 220 | static int pmac_get_irq(struct pt_regs *regs) |
228 | pmac_get_irq(struct pt_regs *regs) | ||
229 | { | 221 | { |
230 | int irq; | 222 | int irq; |
231 | unsigned long bits = 0; | 223 | unsigned long bits = 0; |
@@ -256,34 +248,40 @@ pmac_get_irq(struct pt_regs *regs) | |||
256 | 248 | ||
257 | /* This routine will fix some missing interrupt values in the device tree | 249 | /* This routine will fix some missing interrupt values in the device tree |
258 | * on the gatwick mac-io controller used by some PowerBooks | 250 | * on the gatwick mac-io controller used by some PowerBooks |
251 | * | ||
252 | * Walking of OF nodes could use a bit more fixing up here, but it's not | ||
253 | * very important as this is all boot time code on static portions of the | ||
254 | * device-tree. | ||
255 | * | ||
256 | * However, the modifications done to "intrs" will have to be removed and | ||
257 | * replaced with proper updates of the "interrupts" properties or | ||
258 | * AAPL,interrupts, yet to be decided, once the dynamic parsing is there. | ||
259 | */ | 259 | */ |
260 | static void __init | 260 | static void __init pmac_fix_gatwick_interrupts(struct device_node *gw, |
261 | pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) | 261 | int irq_base) |
262 | { | 262 | { |
263 | struct device_node *node; | 263 | struct device_node *node; |
264 | int count; | 264 | int count; |
265 | 265 | ||
266 | memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); | 266 | memset(gatwick_int_pool, 0, sizeof(gatwick_int_pool)); |
267 | node = gw->child; | ||
268 | count = 0; | 267 | count = 0; |
269 | while(node) | 268 | for (node = NULL; (node = of_get_next_child(gw, node)) != NULL;) { |
270 | { | ||
271 | /* Fix SCC */ | 269 | /* Fix SCC */ |
272 | if (strcasecmp(node->name, "escc") == 0) | 270 | if ((strcasecmp(node->name, "escc") == 0) && node->child) { |
273 | if (node->child) { | 271 | if (node->child->n_intrs < 3) { |
274 | if (node->child->n_intrs < 3) { | 272 | node->child->intrs = &gatwick_int_pool[count]; |
275 | node->child->intrs = &gatwick_int_pool[count]; | 273 | count += 3; |
276 | count += 3; | ||
277 | } | ||
278 | node->child->n_intrs = 3; | ||
279 | node->child->intrs[0].line = 15+irq_base; | ||
280 | node->child->intrs[1].line = 4+irq_base; | ||
281 | node->child->intrs[2].line = 5+irq_base; | ||
282 | printk(KERN_INFO "irq: fixed SCC on second controller (%d,%d,%d)\n", | ||
283 | node->child->intrs[0].line, | ||
284 | node->child->intrs[1].line, | ||
285 | node->child->intrs[2].line); | ||
286 | } | 274 | } |
275 | node->child->n_intrs = 3; | ||
276 | node->child->intrs[0].line = 15+irq_base; | ||
277 | node->child->intrs[1].line = 4+irq_base; | ||
278 | node->child->intrs[2].line = 5+irq_base; | ||
279 | printk(KERN_INFO "irq: fixed SCC on gatwick" | ||
280 | " (%d,%d,%d)\n", | ||
281 | node->child->intrs[0].line, | ||
282 | node->child->intrs[1].line, | ||
283 | node->child->intrs[2].line); | ||
284 | } | ||
287 | /* Fix media-bay & left SWIM */ | 285 | /* Fix media-bay & left SWIM */ |
288 | if (strcasecmp(node->name, "media-bay") == 0) { | 286 | if (strcasecmp(node->name, "media-bay") == 0) { |
289 | struct device_node* ya_node; | 287 | struct device_node* ya_node; |
@@ -292,12 +290,11 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) | |||
292 | node->intrs = &gatwick_int_pool[count++]; | 290 | node->intrs = &gatwick_int_pool[count++]; |
293 | node->n_intrs = 1; | 291 | node->n_intrs = 1; |
294 | node->intrs[0].line = 29+irq_base; | 292 | node->intrs[0].line = 29+irq_base; |
295 | printk(KERN_INFO "irq: fixed media-bay on second controller (%d)\n", | 293 | printk(KERN_INFO "irq: fixed media-bay on gatwick" |
296 | node->intrs[0].line); | 294 | " (%d)\n", node->intrs[0].line); |
297 | 295 | ||
298 | ya_node = node->child; | 296 | ya_node = node->child; |
299 | while(ya_node) | 297 | while(ya_node) { |
300 | { | ||
301 | if (strcasecmp(ya_node->name, "floppy") == 0) { | 298 | if (strcasecmp(ya_node->name, "floppy") == 0) { |
302 | if (ya_node->n_intrs < 2) { | 299 | if (ya_node->n_intrs < 2) { |
303 | ya_node->intrs = &gatwick_int_pool[count]; | 300 | ya_node->intrs = &gatwick_int_pool[count]; |
@@ -323,7 +320,6 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) | |||
323 | ya_node = ya_node->sibling; | 320 | ya_node = ya_node->sibling; |
324 | } | 321 | } |
325 | } | 322 | } |
326 | node = node->sibling; | ||
327 | } | 323 | } |
328 | if (count > 10) { | 324 | if (count > 10) { |
329 | printk("WARNING !! Gatwick interrupt pool overflow\n"); | 325 | printk("WARNING !! Gatwick interrupt pool overflow\n"); |
@@ -338,45 +334,41 @@ pmac_fix_gatwick_interrupts(struct device_node *gw, int irq_base) | |||
338 | * controller. If we find this second ohare, set it up and fix the | 334 | * controller. If we find this second ohare, set it up and fix the |
339 | * interrupt value in the device tree for the ethernet chip. | 335 | * interrupt value in the device tree for the ethernet chip. |
340 | */ | 336 | */ |
341 | static int __init enable_second_ohare(void) | 337 | static void __init enable_second_ohare(struct device_node *np) |
342 | { | 338 | { |
343 | unsigned char bus, devfn; | 339 | unsigned char bus, devfn; |
344 | unsigned short cmd; | 340 | unsigned short cmd; |
345 | unsigned long addr; | ||
346 | struct device_node *irqctrler = find_devices("pci106b,7"); | ||
347 | struct device_node *ether; | 341 | struct device_node *ether; |
348 | 342 | ||
349 | if (irqctrler == NULL || irqctrler->n_addrs <= 0) | 343 | /* This code doesn't strictly belong here, it could be part of |
350 | return -1; | 344 | * either the PCI initialisation or the feature code. It's kept |
351 | addr = (unsigned long) ioremap(irqctrler->addrs[0].address, 0x40); | 345 | * here for historical reasons. |
352 | pmac_irq_hw[1] = (volatile struct pmac_irq_hw *)(addr + 0x20); | 346 | */ |
353 | max_irqs = 64; | 347 | if (pci_device_from_OF_node(np, &bus, &devfn) == 0) { |
354 | if (pci_device_from_OF_node(irqctrler, &bus, &devfn) == 0) { | 348 | struct pci_controller* hose = |
355 | struct pci_controller* hose = pci_find_hose_for_OF_device(irqctrler); | 349 | pci_find_hose_for_OF_device(np); |
356 | if (!hose) | 350 | if (!hose) { |
357 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); | 351 | printk(KERN_ERR "Can't find PCI hose for OHare2 !\n"); |
358 | else { | 352 | return; |
359 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
360 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
361 | cmd &= ~PCI_COMMAND_IO; | ||
362 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
363 | } | 353 | } |
354 | early_read_config_word(hose, bus, devfn, PCI_COMMAND, &cmd); | ||
355 | cmd |= PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER; | ||
356 | cmd &= ~PCI_COMMAND_IO; | ||
357 | early_write_config_word(hose, bus, devfn, PCI_COMMAND, cmd); | ||
364 | } | 358 | } |
365 | 359 | ||
366 | /* Fix interrupt for the modem/ethernet combo controller. The number | 360 | /* Fix interrupt for the modem/ethernet combo controller. The number |
367 | in the device tree (27) is bogus (correct for the ethernet-only | 361 | * in the device tree (27) is bogus (correct for the ethernet-only |
368 | board but not the combo ethernet/modem board). | 362 | * board but not the combo ethernet/modem board). |
369 | The real interrupt is 28 on the second controller -> 28+32 = 60. | 363 | * The real interrupt is 28 on the second controller -> 28+32 = 60. |
370 | */ | 364 | */ |
371 | ether = find_devices("pci1011,14"); | 365 | ether = of_find_node_by_name(NULL, "pci1011,14"); |
372 | if (ether && ether->n_intrs > 0) { | 366 | if (ether && ether->n_intrs > 0) { |
373 | ether->intrs[0].line = 60; | 367 | ether->intrs[0].line = 60; |
374 | printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", | 368 | printk(KERN_INFO "irq: Fixed ethernet IRQ to %d\n", |
375 | ether->intrs[0].line); | 369 | ether->intrs[0].line); |
376 | } | 370 | } |
377 | 371 | of_node_put(ether); | |
378 | /* Return the interrupt number of the cascade */ | ||
379 | return irqctrler->intrs[0].line; | ||
380 | } | 372 | } |
381 | 373 | ||
382 | #ifdef CONFIG_XMON | 374 | #ifdef CONFIG_XMON |
@@ -394,189 +386,251 @@ static struct irqaction gatwick_cascade_action = { | |||
394 | .mask = CPU_MASK_NONE, | 386 | .mask = CPU_MASK_NONE, |
395 | .name = "cascade", | 387 | .name = "cascade", |
396 | }; | 388 | }; |
397 | #endif /* CONFIG_PPC32 */ | ||
398 | 389 | ||
399 | static int pmac_u3_cascade(struct pt_regs *regs, void *data) | 390 | static void __init pmac_pic_probe_oldstyle(void) |
400 | { | 391 | { |
401 | return mpic_get_one_irq((struct mpic *)data, regs); | ||
402 | } | ||
403 | |||
404 | void __init pmac_pic_init(void) | ||
405 | { | ||
406 | struct device_node *irqctrler = NULL; | ||
407 | struct device_node *irqctrler2 = NULL; | ||
408 | struct device_node *np; | ||
409 | #ifdef CONFIG_PPC32 | ||
410 | int i; | 392 | int i; |
411 | unsigned long addr; | ||
412 | int irq_cascade = -1; | 393 | int irq_cascade = -1; |
413 | #endif | 394 | struct device_node *master = NULL; |
414 | struct mpic *mpic1, *mpic2; | 395 | struct device_node *slave = NULL; |
396 | u8 __iomem *addr; | ||
397 | struct resource r; | ||
415 | 398 | ||
416 | /* We first try to detect Apple's new Core99 chipset, since mac-io | 399 | /* Set our get_irq function */ |
417 | * is quite different on those machines and contains an IBM MPIC2. | 400 | ppc_md.get_irq = pmac_get_irq; |
418 | */ | ||
419 | np = find_type_devices("open-pic"); | ||
420 | while (np) { | ||
421 | if (np->parent && !strcmp(np->parent->name, "u3")) | ||
422 | irqctrler2 = np; | ||
423 | else | ||
424 | irqctrler = np; | ||
425 | np = np->next; | ||
426 | } | ||
427 | if (irqctrler != NULL && irqctrler->n_addrs > 0) { | ||
428 | unsigned char senses[128]; | ||
429 | |||
430 | printk(KERN_INFO "PowerMac using OpenPIC irq controller at 0x%08x\n", | ||
431 | (unsigned int)irqctrler->addrs[0].address); | ||
432 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler, 0, 0); | ||
433 | |||
434 | prom_get_irq_senses(senses, 0, 128); | ||
435 | mpic1 = mpic_alloc(irqctrler->addrs[0].address, | ||
436 | MPIC_PRIMARY | MPIC_WANTS_RESET, | ||
437 | 0, 0, 128, 252, senses, 128, " OpenPIC "); | ||
438 | BUG_ON(mpic1 == NULL); | ||
439 | mpic_init(mpic1); | ||
440 | |||
441 | if (irqctrler2 != NULL && irqctrler2->n_intrs > 0 && | ||
442 | irqctrler2->n_addrs > 0) { | ||
443 | printk(KERN_INFO "Slave OpenPIC at 0x%08x hooked on IRQ %d\n", | ||
444 | (u32)irqctrler2->addrs[0].address, | ||
445 | irqctrler2->intrs[0].line); | ||
446 | |||
447 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, irqctrler2, 0, 0); | ||
448 | prom_get_irq_senses(senses, 128, 128 + 124); | ||
449 | |||
450 | /* We don't need to set MPIC_BROKEN_U3 here since we don't have | ||
451 | * hypertransport interrupts routed to it | ||
452 | */ | ||
453 | mpic2 = mpic_alloc(irqctrler2->addrs[0].address, | ||
454 | MPIC_BIG_ENDIAN | MPIC_WANTS_RESET, | ||
455 | 0, 128, 124, 0, senses, 124, | ||
456 | " U3-MPIC "); | ||
457 | BUG_ON(mpic2 == NULL); | ||
458 | mpic_init(mpic2); | ||
459 | mpic_setup_cascade(irqctrler2->intrs[0].line, | ||
460 | pmac_u3_cascade, mpic2); | ||
461 | } | ||
462 | #if defined(CONFIG_XMON) && defined(CONFIG_PPC32) | ||
463 | { | ||
464 | struct device_node* pswitch; | ||
465 | int nmi_irq; | ||
466 | |||
467 | pswitch = find_devices("programmer-switch"); | ||
468 | if (pswitch && pswitch->n_intrs) { | ||
469 | nmi_irq = pswitch->intrs[0].line; | ||
470 | mpic_irq_set_priority(nmi_irq, 9); | ||
471 | setup_irq(nmi_irq, &xmon_action); | ||
472 | } | ||
473 | } | ||
474 | #endif /* CONFIG_XMON */ | ||
475 | return; | ||
476 | } | ||
477 | irqctrler = NULL; | ||
478 | 401 | ||
479 | #ifdef CONFIG_PPC32 | 402 | /* |
480 | /* Get the level/edge settings, assume if it's not | 403 | * Find the interrupt controller type & node |
481 | * a Grand Central nor an OHare, then it's an Heathrow | ||
482 | * (or Paddington). | ||
483 | */ | 404 | */ |
484 | ppc_md.get_irq = pmac_get_irq; | 405 | |
485 | if (find_devices("gc")) | 406 | if ((master = of_find_node_by_name(NULL, "gc")) != NULL) { |
407 | max_irqs = max_real_irqs = 32; | ||
486 | level_mask[0] = GC_LEVEL_MASK; | 408 | level_mask[0] = GC_LEVEL_MASK; |
487 | else if (find_devices("ohare")) { | 409 | } else if ((master = of_find_node_by_name(NULL, "ohare")) != NULL) { |
410 | max_irqs = max_real_irqs = 32; | ||
488 | level_mask[0] = OHARE_LEVEL_MASK; | 411 | level_mask[0] = OHARE_LEVEL_MASK; |
412 | |||
489 | /* We might have a second cascaded ohare */ | 413 | /* We might have a second cascaded ohare */ |
490 | level_mask[1] = OHARE_LEVEL_MASK; | 414 | slave = of_find_node_by_name(NULL, "pci106b,7"); |
491 | } else { | 415 | if (slave) { |
416 | max_irqs = 64; | ||
417 | level_mask[1] = OHARE_LEVEL_MASK; | ||
418 | enable_second_ohare(slave); | ||
419 | } | ||
420 | } else if ((master = of_find_node_by_name(NULL, "mac-io")) != NULL) { | ||
421 | max_irqs = max_real_irqs = 64; | ||
492 | level_mask[0] = HEATHROW_LEVEL_MASK; | 422 | level_mask[0] = HEATHROW_LEVEL_MASK; |
493 | level_mask[1] = 0; | 423 | level_mask[1] = 0; |
424 | |||
494 | /* We might have a second cascaded heathrow */ | 425 | /* We might have a second cascaded heathrow */ |
495 | level_mask[2] = HEATHROW_LEVEL_MASK; | 426 | slave = of_find_node_by_name(master, "mac-io"); |
496 | level_mask[3] = 0; | 427 | |
497 | } | 428 | /* Check ordering of master & slave */ |
429 | if (device_is_compatible(master, "gatwick")) { | ||
430 | struct device_node *tmp; | ||
431 | BUG_ON(slave == NULL); | ||
432 | tmp = master; | ||
433 | master = slave; | ||
434 | slave = tmp; | ||
435 | } | ||
498 | 436 | ||
499 | /* | 437 | /* We found a slave */ |
500 | * G3 powermacs and 1999 G3 PowerBooks have 64 interrupts, | 438 | if (slave) { |
501 | * 1998 G3 Series PowerBooks have 128, | ||
502 | * other powermacs have 32. | ||
503 | * The combo ethernet/modem card for the Powerstar powerbooks | ||
504 | * (2400/3400/3500, ohare based) has a second ohare chip | ||
505 | * effectively making a total of 64. | ||
506 | */ | ||
507 | max_irqs = max_real_irqs = 32; | ||
508 | irqctrler = find_devices("mac-io"); | ||
509 | if (irqctrler) | ||
510 | { | ||
511 | max_real_irqs = 64; | ||
512 | if (irqctrler->next) | ||
513 | max_irqs = 128; | 439 | max_irqs = 128; |
514 | else | 440 | level_mask[2] = HEATHROW_LEVEL_MASK; |
515 | max_irqs = 64; | 441 | level_mask[3] = 0; |
442 | pmac_fix_gatwick_interrupts(slave, max_real_irqs); | ||
443 | } | ||
516 | } | 444 | } |
445 | BUG_ON(master == NULL); | ||
446 | |||
447 | /* Set the handler for the main PIC */ | ||
517 | for ( i = 0; i < max_real_irqs ; i++ ) | 448 | for ( i = 0; i < max_real_irqs ; i++ ) |
518 | irq_desc[i].handler = &pmac_pic; | 449 | irq_desc[i].handler = &pmac_pic; |
519 | 450 | ||
520 | /* get addresses of first controller */ | 451 | /* Get addresses of first controller if we have a node for it */ |
521 | if (irqctrler) { | 452 | BUG_ON(of_address_to_resource(master, 0, &r)); |
522 | if (irqctrler->n_addrs > 0) { | ||
523 | addr = (unsigned long) | ||
524 | ioremap(irqctrler->addrs[0].address, 0x40); | ||
525 | for (i = 0; i < 2; ++i) | ||
526 | pmac_irq_hw[i] = (volatile struct pmac_irq_hw*) | ||
527 | (addr + (2 - i) * 0x10); | ||
528 | } | ||
529 | 453 | ||
530 | /* get addresses of second controller */ | 454 | /* Map interrupts of primary controller */ |
531 | irqctrler = irqctrler->next; | 455 | addr = (u8 __iomem *) ioremap(r.start, 0x40); |
532 | if (irqctrler && irqctrler->n_addrs > 0) { | 456 | i = 0; |
533 | addr = (unsigned long) | 457 | pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) |
534 | ioremap(irqctrler->addrs[0].address, 0x40); | 458 | (addr + 0x20); |
535 | for (i = 2; i < 4; ++i) | 459 | if (max_real_irqs > 32) |
536 | pmac_irq_hw[i] = (volatile struct pmac_irq_hw*) | 460 | pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) |
537 | (addr + (4 - i) * 0x10); | 461 | (addr + 0x10); |
538 | irq_cascade = irqctrler->intrs[0].line; | 462 | of_node_put(master); |
539 | if (device_is_compatible(irqctrler, "gatwick")) | 463 | |
540 | pmac_fix_gatwick_interrupts(irqctrler, max_real_irqs); | 464 | printk(KERN_INFO "irq: Found primary Apple PIC %s for %d irqs\n", |
541 | } | 465 | master->full_name, max_real_irqs); |
542 | } else { | 466 | |
543 | /* older powermacs have a GC (grand central) or ohare at | 467 | /* Map interrupts of cascaded controller */ |
544 | f3000000, with interrupt control registers at f3000020. */ | 468 | if (slave && !of_address_to_resource(slave, 0, &r)) { |
545 | addr = (unsigned long) ioremap(0xf3000000, 0x40); | 469 | addr = (u8 __iomem *)ioremap(r.start, 0x40); |
546 | pmac_irq_hw[0] = (volatile struct pmac_irq_hw *) (addr + 0x20); | 470 | pmac_irq_hw[i++] = (volatile struct pmac_irq_hw __iomem *) |
471 | (addr + 0x20); | ||
472 | if (max_irqs > 64) | ||
473 | pmac_irq_hw[i++] = | ||
474 | (volatile struct pmac_irq_hw __iomem *) | ||
475 | (addr + 0x10); | ||
476 | irq_cascade = slave->intrs[0].line; | ||
477 | |||
478 | printk(KERN_INFO "irq: Found slave Apple PIC %s for %d irqs" | ||
479 | " cascade: %d\n", slave->full_name, | ||
480 | max_irqs - max_real_irqs, irq_cascade); | ||
547 | } | 481 | } |
548 | 482 | of_node_put(slave); | |
549 | /* PowerBooks 3400 and 3500 can have a second controller in a second | ||
550 | ohare chip, on the combo ethernet/modem card */ | ||
551 | if (machine_is_compatible("AAPL,3400/2400") | ||
552 | || machine_is_compatible("AAPL,3500")) | ||
553 | irq_cascade = enable_second_ohare(); | ||
554 | 483 | ||
555 | /* disable all interrupts in all controllers */ | 484 | /* disable all interrupts in all controllers */ |
556 | for (i = 0; i * 32 < max_irqs; ++i) | 485 | for (i = 0; i * 32 < max_irqs; ++i) |
557 | out_le32(&pmac_irq_hw[i]->enable, 0); | 486 | out_le32(&pmac_irq_hw[i]->enable, 0); |
487 | |||
558 | /* mark level interrupts */ | 488 | /* mark level interrupts */ |
559 | for (i = 0; i < max_irqs; i++) | 489 | for (i = 0; i < max_irqs; i++) |
560 | if (level_mask[i >> 5] & (1UL << (i & 0x1f))) | 490 | if (level_mask[i >> 5] & (1UL << (i & 0x1f))) |
561 | irq_desc[i].status = IRQ_LEVEL; | 491 | irq_desc[i].status = IRQ_LEVEL; |
562 | 492 | ||
563 | /* get interrupt line of secondary interrupt controller */ | 493 | /* Setup handlers for secondary controller and hook cascade irq*/ |
564 | if (irq_cascade >= 0) { | 494 | if (slave) { |
565 | printk(KERN_INFO "irq: secondary controller on irq %d\n", | ||
566 | (int)irq_cascade); | ||
567 | for ( i = max_real_irqs ; i < max_irqs ; i++ ) | 495 | for ( i = max_real_irqs ; i < max_irqs ; i++ ) |
568 | irq_desc[i].handler = &gatwick_pic; | 496 | irq_desc[i].handler = &gatwick_pic; |
569 | setup_irq(irq_cascade, &gatwick_cascade_action); | 497 | setup_irq(irq_cascade, &gatwick_cascade_action); |
570 | } | 498 | } |
571 | printk("System has %d possible interrupts\n", max_irqs); | 499 | printk(KERN_INFO "irq: System has %d possible interrupts\n", max_irqs); |
572 | if (max_irqs != max_real_irqs) | ||
573 | printk(KERN_DEBUG "%d interrupts on main controller\n", | ||
574 | max_real_irqs); | ||
575 | |||
576 | #ifdef CONFIG_XMON | 500 | #ifdef CONFIG_XMON |
577 | setup_irq(20, &xmon_action); | 501 | setup_irq(20, &xmon_action); |
578 | #endif /* CONFIG_XMON */ | 502 | #endif |
579 | #endif /* CONFIG_PPC32 */ | 503 | } |
504 | #endif /* CONFIG_PPC32 */ | ||
505 | |||
506 | static int pmac_u3_cascade(struct pt_regs *regs, void *data) | ||
507 | { | ||
508 | return mpic_get_one_irq((struct mpic *)data, regs); | ||
509 | } | ||
510 | |||
511 | static void __init pmac_pic_setup_mpic_nmi(struct mpic *mpic) | ||
512 | { | ||
513 | #if defined(CONFIG_XMON) && defined(CONFIG_PPC32) | ||
514 | struct device_node* pswitch; | ||
515 | int nmi_irq; | ||
516 | |||
517 | pswitch = of_find_node_by_name(NULL, "programmer-switch"); | ||
518 | if (pswitch && pswitch->n_intrs) { | ||
519 | nmi_irq = pswitch->intrs[0].line; | ||
520 | mpic_irq_set_priority(nmi_irq, 9); | ||
521 | setup_irq(nmi_irq, &xmon_action); | ||
522 | } | ||
523 | of_node_put(pswitch); | ||
524 | #endif /* defined(CONFIG_XMON) && defined(CONFIG_PPC32) */ | ||
525 | } | ||
526 | |||
527 | static struct mpic * __init pmac_setup_one_mpic(struct device_node *np, | ||
528 | int master) | ||
529 | { | ||
530 | unsigned char senses[128]; | ||
531 | int offset = master ? 0 : 128; | ||
532 | int count = master ? 128 : 124; | ||
533 | const char *name = master ? " MPIC 1 " : " MPIC 2 "; | ||
534 | struct resource r; | ||
535 | struct mpic *mpic; | ||
536 | unsigned int flags = master ? MPIC_PRIMARY : 0; | ||
537 | int rc; | ||
538 | |||
539 | rc = of_address_to_resource(np, 0, &r); | ||
540 | if (rc) | ||
541 | return NULL; | ||
542 | |||
543 | pmac_call_feature(PMAC_FTR_ENABLE_MPIC, np, 0, 0); | ||
544 | |||
545 | prom_get_irq_senses(senses, offset, offset + count); | ||
546 | |||
547 | flags |= MPIC_WANTS_RESET; | ||
548 | if (get_property(np, "big-endian", NULL)) | ||
549 | flags |= MPIC_BIG_ENDIAN; | ||
550 | |||
551 | /* Primary Big Endian means HT interrupts. This is quite dodgy | ||
552 | * but works until I find a better way | ||
553 | */ | ||
554 | if (master && (flags & MPIC_BIG_ENDIAN)) | ||
555 | flags |= MPIC_BROKEN_U3; | ||
556 | |||
557 | mpic = mpic_alloc(r.start, flags, 0, offset, count, master ? 252 : 0, | ||
558 | senses, count, name); | ||
559 | if (mpic == NULL) | ||
560 | return NULL; | ||
561 | |||
562 | mpic_init(mpic); | ||
563 | |||
564 | return mpic; | ||
565 | } | ||
566 | |||
567 | static int __init pmac_pic_probe_mpic(void) | ||
568 | { | ||
569 | struct mpic *mpic1, *mpic2; | ||
570 | struct device_node *np, *master = NULL, *slave = NULL; | ||
571 | |||
572 | /* We can have up to 2 MPICs cascaded */ | ||
573 | for (np = NULL; (np = of_find_node_by_type(np, "open-pic")) | ||
574 | != NULL;) { | ||
575 | if (master == NULL && | ||
576 | get_property(np, "interrupts", NULL) == NULL) | ||
577 | master = of_node_get(np); | ||
578 | else if (slave == NULL) | ||
579 | slave = of_node_get(np); | ||
580 | if (master && slave) | ||
581 | break; | ||
582 | } | ||
583 | |||
584 | /* Check for bogus setups */ | ||
585 | if (master == NULL && slave != NULL) { | ||
586 | master = slave; | ||
587 | slave = NULL; | ||
588 | } | ||
589 | |||
590 | /* Not found, default to good old pmac pic */ | ||
591 | if (master == NULL) | ||
592 | return -ENODEV; | ||
593 | |||
594 | /* Set master handler */ | ||
595 | ppc_md.get_irq = mpic_get_irq; | ||
596 | |||
597 | /* Setup master */ | ||
598 | mpic1 = pmac_setup_one_mpic(master, 1); | ||
599 | BUG_ON(mpic1 == NULL); | ||
600 | |||
601 | /* Install NMI if any */ | ||
602 | pmac_pic_setup_mpic_nmi(mpic1); | ||
603 | |||
604 | of_node_put(master); | ||
605 | |||
606 | /* No slave, let's go out */ | ||
607 | if (slave == NULL || slave->n_intrs < 1) | ||
608 | return 0; | ||
609 | |||
610 | mpic2 = pmac_setup_one_mpic(slave, 0); | ||
611 | if (mpic2 == NULL) { | ||
612 | printk(KERN_ERR "Failed to setup slave MPIC\n"); | ||
613 | of_node_put(slave); | ||
614 | return 0; | ||
615 | } | ||
616 | mpic_setup_cascade(slave->intrs[0].line, pmac_u3_cascade, mpic2); | ||
617 | |||
618 | of_node_put(slave); | ||
619 | return 0; | ||
620 | } | ||
621 | |||
622 | |||
623 | void __init pmac_pic_init(void) | ||
624 | { | ||
625 | /* We first try to detect Apple's new Core99 chipset, since mac-io | ||
626 | * is quite different on those machines and contains an IBM MPIC2. | ||
627 | */ | ||
628 | if (pmac_pic_probe_mpic() == 0) | ||
629 | return; | ||
630 | |||
631 | #ifdef CONFIG_PPC32 | ||
632 | pmac_pic_probe_oldstyle(); | ||
633 | #endif | ||
580 | } | 634 | } |
581 | 635 | ||
582 | #if defined(CONFIG_PM) && defined(CONFIG_PPC32) | 636 | #if defined(CONFIG_PM) && defined(CONFIG_PPC32) |
diff --git a/arch/powerpc/platforms/powermac/pmac.h b/arch/powerpc/platforms/powermac/pmac.h index 2ad25e13423e..21c7b0f8f329 100644 --- a/arch/powerpc/platforms/powermac/pmac.h +++ b/arch/powerpc/platforms/powermac/pmac.h | |||
@@ -42,10 +42,6 @@ extern void pmac_ide_init_hwif_ports(hw_regs_t *hw, | |||
42 | unsigned long data_port, unsigned long ctrl_port, int *irq); | 42 | unsigned long data_port, unsigned long ctrl_port, int *irq); |
43 | 43 | ||
44 | extern int pmac_nvram_init(void); | 44 | extern int pmac_nvram_init(void); |
45 | 45 | extern void pmac_pic_init(void); | |
46 | extern struct hw_interrupt_type pmac_pic; | ||
47 | |||
48 | void pmac_pic_init(void); | ||
49 | int pmac_get_irq(struct pt_regs *regs); | ||
50 | 46 | ||
51 | #endif /* __PMAC_H__ */ | 47 | #endif /* __PMAC_H__ */ |
diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 7acb0546671f..3b1a9d4fcbc6 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c | |||
@@ -60,6 +60,7 @@ | |||
60 | #include <asm/system.h> | 60 | #include <asm/system.h> |
61 | #include <asm/pgtable.h> | 61 | #include <asm/pgtable.h> |
62 | #include <asm/io.h> | 62 | #include <asm/io.h> |
63 | #include <asm/kexec.h> | ||
63 | #include <asm/pci-bridge.h> | 64 | #include <asm/pci-bridge.h> |
64 | #include <asm/ohare.h> | 65 | #include <asm/ohare.h> |
65 | #include <asm/mediabay.h> | 66 | #include <asm/mediabay.h> |
@@ -74,8 +75,8 @@ | |||
74 | #include <asm/iommu.h> | 75 | #include <asm/iommu.h> |
75 | #include <asm/smu.h> | 76 | #include <asm/smu.h> |
76 | #include <asm/pmc.h> | 77 | #include <asm/pmc.h> |
77 | #include <asm/mpic.h> | ||
78 | #include <asm/lmb.h> | 78 | #include <asm/lmb.h> |
79 | #include <asm/udbg.h> | ||
79 | 80 | ||
80 | #include "pmac.h" | 81 | #include "pmac.h" |
81 | 82 | ||
@@ -321,16 +322,6 @@ void __init pmac_setup_arch(void) | |||
321 | l2cr_init(); | 322 | l2cr_init(); |
322 | #endif /* CONFIG_PPC32 */ | 323 | #endif /* CONFIG_PPC32 */ |
323 | 324 | ||
324 | #ifdef CONFIG_PPC64 | ||
325 | /* Probe motherboard chipset */ | ||
326 | /* this is done earlier in setup_arch for 32-bit */ | ||
327 | pmac_feature_init(); | ||
328 | |||
329 | /* We can NAP */ | ||
330 | powersave_nap = 1; | ||
331 | printk(KERN_INFO "Using native/NAP idle loop\n"); | ||
332 | #endif | ||
333 | |||
334 | #ifdef CONFIG_KGDB | 325 | #ifdef CONFIG_KGDB |
335 | zs_kgdb_hook(0); | 326 | zs_kgdb_hook(0); |
336 | #endif | 327 | #endif |
@@ -354,7 +345,7 @@ void __init pmac_setup_arch(void) | |||
354 | 345 | ||
355 | #ifdef CONFIG_SMP | 346 | #ifdef CONFIG_SMP |
356 | /* Check for Core99 */ | 347 | /* Check for Core99 */ |
357 | if (find_devices("uni-n") || find_devices("u3")) | 348 | if (find_devices("uni-n") || find_devices("u3") || find_devices("u4")) |
358 | smp_ops = &core99_smp_ops; | 349 | smp_ops = &core99_smp_ops; |
359 | #ifdef CONFIG_PPC32 | 350 | #ifdef CONFIG_PPC32 |
360 | else | 351 | else |
@@ -621,35 +612,31 @@ static void __init pmac_init_early(void) | |||
621 | * and call ioremap | 612 | * and call ioremap |
622 | */ | 613 | */ |
623 | hpte_init_native(); | 614 | hpte_init_native(); |
615 | #endif | ||
624 | 616 | ||
625 | /* Init SCC */ | 617 | /* Enable early btext debug if requested */ |
626 | if (strstr(cmd_line, "sccdbg")) { | 618 | if (strstr(cmd_line, "btextdbg")) { |
627 | sccdbg = 1; | 619 | udbg_adb_init_early(); |
628 | udbg_init_scc(NULL); | 620 | register_early_udbg_console(); |
629 | } | 621 | } |
630 | 622 | ||
631 | /* Setup interrupt mapping options */ | 623 | /* Probe motherboard chipset */ |
632 | ppc64_interrupt_controller = IC_OPEN_PIC; | 624 | pmac_feature_init(); |
633 | 625 | ||
634 | iommu_init_early_u3(); | 626 | /* We can NAP */ |
635 | #endif | 627 | powersave_nap = 1; |
636 | } | 628 | printk(KERN_INFO "Using native/NAP idle loop\n"); |
629 | |||
630 | /* Initialize debug stuff */ | ||
631 | udbg_scc_init(!!strstr(cmd_line, "sccdbg")); | ||
632 | udbg_adb_init(!!strstr(cmd_line, "btextdbg")); | ||
637 | 633 | ||
638 | static void __init pmac_progress(char *s, unsigned short hex) | ||
639 | { | ||
640 | #ifdef CONFIG_PPC64 | 634 | #ifdef CONFIG_PPC64 |
641 | if (sccdbg) { | 635 | /* Setup interrupt mapping options */ |
642 | udbg_puts(s); | 636 | ppc64_interrupt_controller = IC_OPEN_PIC; |
643 | udbg_puts("\n"); | 637 | |
644 | return; | 638 | iommu_init_early_dart(); |
645 | } | ||
646 | #endif | 639 | #endif |
647 | #ifdef CONFIG_BOOTX_TEXT | ||
648 | if (boot_text_mapped) { | ||
649 | btext_drawstring(s); | ||
650 | btext_drawchar('\n'); | ||
651 | } | ||
652 | #endif /* CONFIG_BOOTX_TEXT */ | ||
653 | } | 640 | } |
654 | 641 | ||
655 | /* | 642 | /* |
@@ -663,35 +650,14 @@ static int pmac_check_legacy_ioport(unsigned int baseport) | |||
663 | 650 | ||
664 | static int __init pmac_declare_of_platform_devices(void) | 651 | static int __init pmac_declare_of_platform_devices(void) |
665 | { | 652 | { |
666 | struct device_node *np, *npp; | 653 | struct device_node *np; |
667 | 654 | ||
668 | np = find_devices("uni-n"); | 655 | np = of_find_node_by_name(NULL, "valkyrie"); |
669 | if (np) { | ||
670 | for (np = np->child; np != NULL; np = np->sibling) | ||
671 | if (strncmp(np->name, "i2c", 3) == 0) { | ||
672 | of_platform_device_create(np, "uni-n-i2c", | ||
673 | NULL); | ||
674 | break; | ||
675 | } | ||
676 | } | ||
677 | np = find_devices("valkyrie"); | ||
678 | if (np) | 656 | if (np) |
679 | of_platform_device_create(np, "valkyrie", NULL); | 657 | of_platform_device_create(np, "valkyrie", NULL); |
680 | np = find_devices("platinum"); | 658 | np = of_find_node_by_name(NULL, "platinum"); |
681 | if (np) | 659 | if (np) |
682 | of_platform_device_create(np, "platinum", NULL); | 660 | of_platform_device_create(np, "platinum", NULL); |
683 | |||
684 | npp = of_find_node_by_name(NULL, "u3"); | ||
685 | if (npp) { | ||
686 | for (np = NULL; (np = of_get_next_child(npp, np)) != NULL;) { | ||
687 | if (strncmp(np->name, "i2c", 3) == 0) { | ||
688 | of_platform_device_create(np, "u3-i2c", NULL); | ||
689 | of_node_put(np); | ||
690 | break; | ||
691 | } | ||
692 | } | ||
693 | of_node_put(npp); | ||
694 | } | ||
695 | np = of_find_node_by_type(NULL, "smu"); | 661 | np = of_find_node_by_type(NULL, "smu"); |
696 | if (np) { | 662 | if (np) { |
697 | of_platform_device_create(np, "smu", NULL); | 663 | of_platform_device_create(np, "smu", NULL); |
@@ -718,7 +684,7 @@ static int __init pmac_probe(int platform) | |||
718 | * occupies having to be broken up so the DART itself is not | 684 | * occupies having to be broken up so the DART itself is not |
719 | * part of the cacheable linar mapping | 685 | * part of the cacheable linar mapping |
720 | */ | 686 | */ |
721 | alloc_u3_dart_table(); | 687 | alloc_dart_table(); |
722 | #endif | 688 | #endif |
723 | 689 | ||
724 | #ifdef CONFIG_PMAC_SMU | 690 | #ifdef CONFIG_PMAC_SMU |
@@ -734,15 +700,17 @@ static int __init pmac_probe(int platform) | |||
734 | } | 700 | } |
735 | 701 | ||
736 | #ifdef CONFIG_PPC64 | 702 | #ifdef CONFIG_PPC64 |
737 | static int pmac_probe_mode(struct pci_bus *bus) | 703 | /* Move that to pci.c */ |
704 | static int pmac_pci_probe_mode(struct pci_bus *bus) | ||
738 | { | 705 | { |
739 | struct device_node *node = bus->sysdata; | 706 | struct device_node *node = bus->sysdata; |
740 | 707 | ||
741 | /* We need to use normal PCI probing for the AGP bus, | 708 | /* We need to use normal PCI probing for the AGP bus, |
742 | since the device for the AGP bridge isn't in the tree. */ | 709 | * since the device for the AGP bridge isn't in the tree. |
743 | if (bus->self == NULL && device_is_compatible(node, "u3-agp")) | 710 | */ |
711 | if (bus->self == NULL && (device_is_compatible(node, "u3-agp") || | ||
712 | device_is_compatible(node, "u4-pcie"))) | ||
744 | return PCI_PROBE_NORMAL; | 713 | return PCI_PROBE_NORMAL; |
745 | |||
746 | return PCI_PROBE_DEVTREE; | 714 | return PCI_PROBE_DEVTREE; |
747 | } | 715 | } |
748 | #endif | 716 | #endif |
@@ -756,7 +724,7 @@ struct machdep_calls __initdata pmac_md = { | |||
756 | .init_early = pmac_init_early, | 724 | .init_early = pmac_init_early, |
757 | .show_cpuinfo = pmac_show_cpuinfo, | 725 | .show_cpuinfo = pmac_show_cpuinfo, |
758 | .init_IRQ = pmac_pic_init, | 726 | .init_IRQ = pmac_pic_init, |
759 | .get_irq = mpic_get_irq, /* changed later */ | 727 | .get_irq = NULL, /* changed later */ |
760 | .pcibios_fixup = pmac_pcibios_fixup, | 728 | .pcibios_fixup = pmac_pcibios_fixup, |
761 | .restart = pmac_restart, | 729 | .restart = pmac_restart, |
762 | .power_off = pmac_power_off, | 730 | .power_off = pmac_power_off, |
@@ -768,12 +736,17 @@ struct machdep_calls __initdata pmac_md = { | |||
768 | .calibrate_decr = pmac_calibrate_decr, | 736 | .calibrate_decr = pmac_calibrate_decr, |
769 | .feature_call = pmac_do_feature_call, | 737 | .feature_call = pmac_do_feature_call, |
770 | .check_legacy_ioport = pmac_check_legacy_ioport, | 738 | .check_legacy_ioport = pmac_check_legacy_ioport, |
771 | .progress = pmac_progress, | 739 | .progress = udbg_progress, |
772 | #ifdef CONFIG_PPC64 | 740 | #ifdef CONFIG_PPC64 |
773 | .pci_probe_mode = pmac_probe_mode, | 741 | .pci_probe_mode = pmac_pci_probe_mode, |
774 | .idle_loop = native_idle, | 742 | .idle_loop = native_idle, |
775 | .enable_pmcs = power4_enable_pmcs, | 743 | .enable_pmcs = power4_enable_pmcs, |
744 | #ifdef CONFIG_KEXEC | ||
745 | .machine_kexec = default_machine_kexec, | ||
746 | .machine_kexec_prepare = default_machine_kexec_prepare, | ||
747 | .machine_crash_shutdown = default_machine_crash_shutdown, | ||
776 | #endif | 748 | #endif |
749 | #endif /* CONFIG_PPC64 */ | ||
777 | #ifdef CONFIG_PPC32 | 750 | #ifdef CONFIG_PPC32 |
778 | .pcibios_enable_device_hook = pmac_pci_enable_device_hook, | 751 | .pcibios_enable_device_hook = pmac_pci_enable_device_hook, |
779 | .pcibios_after_init = pmac_pcibios_after_init, | 752 | .pcibios_after_init = pmac_pcibios_after_init, |
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c index fb2a7c798e82..0df2cdcd805c 100644 --- a/arch/powerpc/platforms/powermac/smp.c +++ b/arch/powerpc/platforms/powermac/smp.c | |||
@@ -52,8 +52,9 @@ | |||
52 | #include <asm/cacheflush.h> | 52 | #include <asm/cacheflush.h> |
53 | #include <asm/keylargo.h> | 53 | #include <asm/keylargo.h> |
54 | #include <asm/pmac_low_i2c.h> | 54 | #include <asm/pmac_low_i2c.h> |
55 | #include <asm/pmac_pfunc.h> | ||
55 | 56 | ||
56 | #undef DEBUG | 57 | #define DEBUG |
57 | 58 | ||
58 | #ifdef DEBUG | 59 | #ifdef DEBUG |
59 | #define DBG(fmt...) udbg_printf(fmt) | 60 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -62,6 +63,7 @@ | |||
62 | #endif | 63 | #endif |
63 | 64 | ||
64 | extern void __secondary_start_pmac_0(void); | 65 | extern void __secondary_start_pmac_0(void); |
66 | extern int pmac_pfunc_base_install(void); | ||
65 | 67 | ||
66 | #ifdef CONFIG_PPC32 | 68 | #ifdef CONFIG_PPC32 |
67 | 69 | ||
@@ -361,7 +363,6 @@ static void __init psurge_dual_sync_tb(int cpu_nr) | |||
361 | set_dec(tb_ticks_per_jiffy); | 363 | set_dec(tb_ticks_per_jiffy); |
362 | /* XXX fixme */ | 364 | /* XXX fixme */ |
363 | set_tb(0, 0); | 365 | set_tb(0, 0); |
364 | last_jiffy_stamp(cpu_nr) = 0; | ||
365 | 366 | ||
366 | if (cpu_nr > 0) { | 367 | if (cpu_nr > 0) { |
367 | mb(); | 368 | mb(); |
@@ -429,15 +430,62 @@ struct smp_ops_t psurge_smp_ops = { | |||
429 | }; | 430 | }; |
430 | #endif /* CONFIG_PPC32 - actually powersurge support */ | 431 | #endif /* CONFIG_PPC32 - actually powersurge support */ |
431 | 432 | ||
433 | /* | ||
434 | * Core 99 and later support | ||
435 | */ | ||
436 | |||
437 | static void (*pmac_tb_freeze)(int freeze); | ||
438 | static unsigned long timebase; | ||
439 | static int tb_req; | ||
440 | |||
441 | static void smp_core99_give_timebase(void) | ||
442 | { | ||
443 | unsigned long flags; | ||
444 | |||
445 | local_irq_save(flags); | ||
446 | |||
447 | while(!tb_req) | ||
448 | barrier(); | ||
449 | tb_req = 0; | ||
450 | (*pmac_tb_freeze)(1); | ||
451 | mb(); | ||
452 | timebase = get_tb(); | ||
453 | mb(); | ||
454 | while (timebase) | ||
455 | barrier(); | ||
456 | mb(); | ||
457 | (*pmac_tb_freeze)(0); | ||
458 | mb(); | ||
459 | |||
460 | local_irq_restore(flags); | ||
461 | } | ||
462 | |||
463 | |||
464 | static void __devinit smp_core99_take_timebase(void) | ||
465 | { | ||
466 | unsigned long flags; | ||
467 | |||
468 | local_irq_save(flags); | ||
469 | |||
470 | tb_req = 1; | ||
471 | mb(); | ||
472 | while (!timebase) | ||
473 | barrier(); | ||
474 | mb(); | ||
475 | set_tb(timebase >> 32, timebase & 0xffffffff); | ||
476 | timebase = 0; | ||
477 | mb(); | ||
478 | set_dec(tb_ticks_per_jiffy/2); | ||
479 | |||
480 | local_irq_restore(flags); | ||
481 | } | ||
482 | |||
432 | #ifdef CONFIG_PPC64 | 483 | #ifdef CONFIG_PPC64 |
433 | /* | 484 | /* |
434 | * G5s enable/disable the timebase via an i2c-connected clock chip. | 485 | * G5s enable/disable the timebase via an i2c-connected clock chip. |
435 | */ | 486 | */ |
436 | static struct device_node *pmac_tb_clock_chip_host; | 487 | static struct pmac_i2c_bus *pmac_tb_clock_chip_host; |
437 | static u8 pmac_tb_pulsar_addr; | 488 | static u8 pmac_tb_pulsar_addr; |
438 | static void (*pmac_tb_freeze)(int freeze); | ||
439 | static DEFINE_SPINLOCK(timebase_lock); | ||
440 | static unsigned long timebase; | ||
441 | 489 | ||
442 | static void smp_core99_cypress_tb_freeze(int freeze) | 490 | static void smp_core99_cypress_tb_freeze(int freeze) |
443 | { | 491 | { |
@@ -447,19 +495,20 @@ static void smp_core99_cypress_tb_freeze(int freeze) | |||
447 | /* Strangely, the device-tree says address is 0xd2, but darwin | 495 | /* Strangely, the device-tree says address is 0xd2, but darwin |
448 | * accesses 0xd0 ... | 496 | * accesses 0xd0 ... |
449 | */ | 497 | */ |
450 | pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined); | 498 | pmac_i2c_setmode(pmac_tb_clock_chip_host, |
451 | rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, | 499 | pmac_i2c_mode_combined); |
452 | 0xd0 | pmac_low_i2c_read, | 500 | rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, |
453 | 0x81, &data, 1); | 501 | 0xd0 | pmac_i2c_read, |
502 | 1, 0x81, &data, 1); | ||
454 | if (rc != 0) | 503 | if (rc != 0) |
455 | goto bail; | 504 | goto bail; |
456 | 505 | ||
457 | data = (data & 0xf3) | (freeze ? 0x00 : 0x0c); | 506 | data = (data & 0xf3) | (freeze ? 0x00 : 0x0c); |
458 | 507 | ||
459 | pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub); | 508 | pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub); |
460 | rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, | 509 | rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, |
461 | 0xd0 | pmac_low_i2c_write, | 510 | 0xd0 | pmac_i2c_write, |
462 | 0x81, &data, 1); | 511 | 1, 0x81, &data, 1); |
463 | 512 | ||
464 | bail: | 513 | bail: |
465 | if (rc != 0) { | 514 | if (rc != 0) { |
@@ -475,19 +524,20 @@ static void smp_core99_pulsar_tb_freeze(int freeze) | |||
475 | u8 data; | 524 | u8 data; |
476 | int rc; | 525 | int rc; |
477 | 526 | ||
478 | pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_combined); | 527 | pmac_i2c_setmode(pmac_tb_clock_chip_host, |
479 | rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, | 528 | pmac_i2c_mode_combined); |
480 | pmac_tb_pulsar_addr | pmac_low_i2c_read, | 529 | rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, |
481 | 0x2e, &data, 1); | 530 | pmac_tb_pulsar_addr | pmac_i2c_read, |
531 | 1, 0x2e, &data, 1); | ||
482 | if (rc != 0) | 532 | if (rc != 0) |
483 | goto bail; | 533 | goto bail; |
484 | 534 | ||
485 | data = (data & 0x88) | (freeze ? 0x11 : 0x22); | 535 | data = (data & 0x88) | (freeze ? 0x11 : 0x22); |
486 | 536 | ||
487 | pmac_low_i2c_setmode(pmac_tb_clock_chip_host, pmac_low_i2c_mode_stdsub); | 537 | pmac_i2c_setmode(pmac_tb_clock_chip_host, pmac_i2c_mode_stdsub); |
488 | rc = pmac_low_i2c_xfer(pmac_tb_clock_chip_host, | 538 | rc = pmac_i2c_xfer(pmac_tb_clock_chip_host, |
489 | pmac_tb_pulsar_addr | pmac_low_i2c_write, | 539 | pmac_tb_pulsar_addr | pmac_i2c_write, |
490 | 0x2e, &data, 1); | 540 | 1, 0x2e, &data, 1); |
491 | bail: | 541 | bail: |
492 | if (rc != 0) { | 542 | if (rc != 0) { |
493 | printk(KERN_ERR "Pulsar Timebase %s rc: %d\n", | 543 | printk(KERN_ERR "Pulsar Timebase %s rc: %d\n", |
@@ -496,54 +546,14 @@ static void smp_core99_pulsar_tb_freeze(int freeze) | |||
496 | } | 546 | } |
497 | } | 547 | } |
498 | 548 | ||
499 | 549 | static void __init smp_core99_setup_i2c_hwsync(int ncpus) | |
500 | static void smp_core99_give_timebase(void) | ||
501 | { | ||
502 | /* Open i2c bus for synchronous access */ | ||
503 | if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0)) | ||
504 | panic("Can't open i2c for TB sync !\n"); | ||
505 | |||
506 | spin_lock(&timebase_lock); | ||
507 | (*pmac_tb_freeze)(1); | ||
508 | mb(); | ||
509 | timebase = get_tb(); | ||
510 | spin_unlock(&timebase_lock); | ||
511 | |||
512 | while (timebase) | ||
513 | barrier(); | ||
514 | |||
515 | spin_lock(&timebase_lock); | ||
516 | (*pmac_tb_freeze)(0); | ||
517 | spin_unlock(&timebase_lock); | ||
518 | |||
519 | /* Close i2c bus */ | ||
520 | pmac_low_i2c_close(pmac_tb_clock_chip_host); | ||
521 | } | ||
522 | |||
523 | |||
524 | static void __devinit smp_core99_take_timebase(void) | ||
525 | { | ||
526 | while (!timebase) | ||
527 | barrier(); | ||
528 | spin_lock(&timebase_lock); | ||
529 | set_tb(timebase >> 32, timebase & 0xffffffff); | ||
530 | timebase = 0; | ||
531 | spin_unlock(&timebase_lock); | ||
532 | } | ||
533 | |||
534 | static void __init smp_core99_setup(int ncpus) | ||
535 | { | 550 | { |
536 | struct device_node *cc = NULL; | 551 | struct device_node *cc = NULL; |
537 | struct device_node *p; | 552 | struct device_node *p; |
553 | const char *name = NULL; | ||
538 | u32 *reg; | 554 | u32 *reg; |
539 | int ok; | 555 | int ok; |
540 | 556 | ||
541 | /* HW sync only on these platforms */ | ||
542 | if (!machine_is_compatible("PowerMac7,2") && | ||
543 | !machine_is_compatible("PowerMac7,3") && | ||
544 | !machine_is_compatible("RackMac3,1")) | ||
545 | return; | ||
546 | |||
547 | /* Look for the clock chip */ | 557 | /* Look for the clock chip */ |
548 | while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) { | 558 | while ((cc = of_find_node_by_name(cc, "i2c-hwclock")) != NULL) { |
549 | p = of_get_parent(cc); | 559 | p = of_get_parent(cc); |
@@ -552,124 +562,86 @@ static void __init smp_core99_setup(int ncpus) | |||
552 | if (!ok) | 562 | if (!ok) |
553 | continue; | 563 | continue; |
554 | 564 | ||
565 | pmac_tb_clock_chip_host = pmac_i2c_find_bus(cc); | ||
566 | if (pmac_tb_clock_chip_host == NULL) | ||
567 | continue; | ||
555 | reg = (u32 *)get_property(cc, "reg", NULL); | 568 | reg = (u32 *)get_property(cc, "reg", NULL); |
556 | if (reg == NULL) | 569 | if (reg == NULL) |
557 | continue; | 570 | continue; |
558 | |||
559 | switch (*reg) { | 571 | switch (*reg) { |
560 | case 0xd2: | 572 | case 0xd2: |
561 | if (device_is_compatible(cc, "pulsar-legacy-slewing")) { | 573 | if (device_is_compatible(cc,"pulsar-legacy-slewing")) { |
562 | pmac_tb_freeze = smp_core99_pulsar_tb_freeze; | 574 | pmac_tb_freeze = smp_core99_pulsar_tb_freeze; |
563 | pmac_tb_pulsar_addr = 0xd2; | 575 | pmac_tb_pulsar_addr = 0xd2; |
564 | printk(KERN_INFO "Timebase clock is Pulsar chip\n"); | 576 | name = "Pulsar"; |
565 | } else if (device_is_compatible(cc, "cy28508")) { | 577 | } else if (device_is_compatible(cc, "cy28508")) { |
566 | pmac_tb_freeze = smp_core99_cypress_tb_freeze; | 578 | pmac_tb_freeze = smp_core99_cypress_tb_freeze; |
567 | printk(KERN_INFO "Timebase clock is Cypress chip\n"); | 579 | name = "Cypress"; |
568 | } | 580 | } |
569 | break; | 581 | break; |
570 | case 0xd4: | 582 | case 0xd4: |
571 | pmac_tb_freeze = smp_core99_pulsar_tb_freeze; | 583 | pmac_tb_freeze = smp_core99_pulsar_tb_freeze; |
572 | pmac_tb_pulsar_addr = 0xd4; | 584 | pmac_tb_pulsar_addr = 0xd4; |
573 | printk(KERN_INFO "Timebase clock is Pulsar chip\n"); | 585 | name = "Pulsar"; |
574 | break; | 586 | break; |
575 | } | 587 | } |
576 | if (pmac_tb_freeze != NULL) { | 588 | if (pmac_tb_freeze != NULL) |
577 | pmac_tb_clock_chip_host = of_get_parent(cc); | ||
578 | of_node_put(cc); | ||
579 | break; | 589 | break; |
580 | } | ||
581 | } | 590 | } |
582 | if (pmac_tb_freeze == NULL) { | 591 | if (pmac_tb_freeze != NULL) { |
583 | smp_ops->give_timebase = smp_generic_give_timebase; | 592 | /* Open i2c bus for synchronous access */ |
584 | smp_ops->take_timebase = smp_generic_take_timebase; | 593 | if (pmac_i2c_open(pmac_tb_clock_chip_host, 1)) { |
594 | printk(KERN_ERR "Failed top open i2c bus for clock" | ||
595 | " sync, fallback to software sync !\n"); | ||
596 | goto no_i2c_sync; | ||
597 | } | ||
598 | printk(KERN_INFO "Processor timebase sync using %s i2c clock\n", | ||
599 | name); | ||
600 | return; | ||
585 | } | 601 | } |
602 | no_i2c_sync: | ||
603 | pmac_tb_freeze = NULL; | ||
604 | pmac_tb_clock_chip_host = NULL; | ||
586 | } | 605 | } |
587 | 606 | ||
588 | /* nothing to do here, caches are already set up by service processor */ | 607 | |
589 | static inline void __devinit core99_init_caches(int cpu) | 608 | |
609 | /* | ||
610 | * Newer G5s uses a platform function | ||
611 | */ | ||
612 | |||
613 | static void smp_core99_pfunc_tb_freeze(int freeze) | ||
590 | { | 614 | { |
615 | struct device_node *cpus; | ||
616 | struct pmf_args args; | ||
617 | |||
618 | cpus = of_find_node_by_path("/cpus"); | ||
619 | BUG_ON(cpus == NULL); | ||
620 | args.count = 1; | ||
621 | args.u[0].v = !freeze; | ||
622 | pmf_call_function(cpus, "cpu-timebase", &args); | ||
623 | of_node_put(cpus); | ||
591 | } | 624 | } |
592 | 625 | ||
593 | #else /* CONFIG_PPC64 */ | 626 | #else /* CONFIG_PPC64 */ |
594 | 627 | ||
595 | /* | 628 | /* |
596 | * SMP G4 powermacs use a GPIO to enable/disable the timebase. | 629 | * SMP G4 use a GPIO to enable/disable the timebase. |
597 | */ | 630 | */ |
598 | 631 | ||
599 | static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */ | 632 | static unsigned int core99_tb_gpio; /* Timebase freeze GPIO */ |
600 | 633 | ||
601 | static unsigned int pri_tb_hi, pri_tb_lo; | 634 | static void smp_core99_gpio_tb_freeze(int freeze) |
602 | static unsigned int pri_tb_stamp; | ||
603 | |||
604 | /* not __init, called in sleep/wakeup code */ | ||
605 | void smp_core99_give_timebase(void) | ||
606 | { | 635 | { |
607 | unsigned long flags; | 636 | if (freeze) |
608 | unsigned int t; | 637 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4); |
609 | 638 | else | |
610 | /* wait for the secondary to be in take_timebase */ | 639 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0); |
611 | for (t = 100000; t > 0 && !sec_tb_reset; --t) | ||
612 | udelay(10); | ||
613 | if (!sec_tb_reset) { | ||
614 | printk(KERN_WARNING "Timeout waiting sync on second CPU\n"); | ||
615 | return; | ||
616 | } | ||
617 | |||
618 | /* freeze the timebase and read it */ | ||
619 | /* disable interrupts so the timebase is disabled for the | ||
620 | shortest possible time */ | ||
621 | local_irq_save(flags); | ||
622 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 4); | ||
623 | pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); | 640 | pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); |
624 | mb(); | ||
625 | pri_tb_hi = get_tbu(); | ||
626 | pri_tb_lo = get_tbl(); | ||
627 | pri_tb_stamp = last_jiffy_stamp(smp_processor_id()); | ||
628 | mb(); | ||
629 | |||
630 | /* tell the secondary we're ready */ | ||
631 | sec_tb_reset = 2; | ||
632 | mb(); | ||
633 | |||
634 | /* wait for the secondary to have taken it */ | ||
635 | /* note: can't use udelay here, since it needs the timebase running */ | ||
636 | for (t = 10000000; t > 0 && sec_tb_reset; --t) | ||
637 | barrier(); | ||
638 | if (sec_tb_reset) | ||
639 | /* XXX BUG_ON here? */ | ||
640 | printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n"); | ||
641 | |||
642 | /* Now, restart the timebase by leaving the GPIO to an open collector */ | ||
643 | pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0); | ||
644 | pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, core99_tb_gpio, 0); | ||
645 | local_irq_restore(flags); | ||
646 | } | 641 | } |
647 | 642 | ||
648 | /* not __init, called in sleep/wakeup code */ | ||
649 | void smp_core99_take_timebase(void) | ||
650 | { | ||
651 | unsigned long flags; | ||
652 | |||
653 | /* tell the primary we're here */ | ||
654 | sec_tb_reset = 1; | ||
655 | mb(); | ||
656 | |||
657 | /* wait for the primary to set pri_tb_hi/lo */ | ||
658 | while (sec_tb_reset < 2) | ||
659 | mb(); | ||
660 | |||
661 | /* set our stuff the same as the primary */ | ||
662 | local_irq_save(flags); | ||
663 | set_dec(1); | ||
664 | set_tb(pri_tb_hi, pri_tb_lo); | ||
665 | last_jiffy_stamp(smp_processor_id()) = pri_tb_stamp; | ||
666 | mb(); | ||
667 | 643 | ||
668 | /* tell the primary we're done */ | 644 | #endif /* !CONFIG_PPC64 */ |
669 | sec_tb_reset = 0; | ||
670 | mb(); | ||
671 | local_irq_restore(flags); | ||
672 | } | ||
673 | 645 | ||
674 | /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */ | 646 | /* L2 and L3 cache settings to pass from CPU0 to CPU1 on G4 cpus */ |
675 | volatile static long int core99_l2_cache; | 647 | volatile static long int core99_l2_cache; |
@@ -677,6 +649,7 @@ volatile static long int core99_l3_cache; | |||
677 | 649 | ||
678 | static void __devinit core99_init_caches(int cpu) | 650 | static void __devinit core99_init_caches(int cpu) |
679 | { | 651 | { |
652 | #ifndef CONFIG_PPC64 | ||
680 | if (!cpu_has_feature(CPU_FTR_L2CR)) | 653 | if (!cpu_has_feature(CPU_FTR_L2CR)) |
681 | return; | 654 | return; |
682 | 655 | ||
@@ -702,30 +675,76 @@ static void __devinit core99_init_caches(int cpu) | |||
702 | _set_L3CR(core99_l3_cache); | 675 | _set_L3CR(core99_l3_cache); |
703 | printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); | 676 | printk("CPU%d: L3CR set to %lx\n", cpu, core99_l3_cache); |
704 | } | 677 | } |
678 | #endif /* !CONFIG_PPC64 */ | ||
705 | } | 679 | } |
706 | 680 | ||
707 | static void __init smp_core99_setup(int ncpus) | 681 | static void __init smp_core99_setup(int ncpus) |
708 | { | 682 | { |
709 | struct device_node *cpu; | 683 | #ifdef CONFIG_PPC64 |
710 | u32 *tbprop = NULL; | 684 | |
711 | int i; | 685 | /* i2c based HW sync on some G5s */ |
686 | if (machine_is_compatible("PowerMac7,2") || | ||
687 | machine_is_compatible("PowerMac7,3") || | ||
688 | machine_is_compatible("RackMac3,1")) | ||
689 | smp_core99_setup_i2c_hwsync(ncpus); | ||
712 | 690 | ||
713 | core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */ | 691 | /* pfunc based HW sync on recent G5s */ |
714 | cpu = of_find_node_by_type(NULL, "cpu"); | 692 | if (pmac_tb_freeze == NULL) { |
715 | if (cpu != NULL) { | 693 | struct device_node *cpus = |
716 | tbprop = (u32 *)get_property(cpu, "timebase-enable", NULL); | 694 | of_find_node_by_path("/cpus"); |
717 | if (tbprop) | 695 | if (cpus && |
718 | core99_tb_gpio = *tbprop; | 696 | get_property(cpus, "platform-cpu-timebase", NULL)) { |
719 | of_node_put(cpu); | 697 | pmac_tb_freeze = smp_core99_pfunc_tb_freeze; |
698 | printk(KERN_INFO "Processor timebase sync using" | ||
699 | " platform function\n"); | ||
700 | } | ||
720 | } | 701 | } |
721 | 702 | ||
722 | /* XXX should get this from reg properties */ | 703 | #else /* CONFIG_PPC64 */ |
723 | for (i = 1; i < ncpus; ++i) | 704 | |
724 | smp_hw_index[i] = i; | 705 | /* GPIO based HW sync on ppc32 Core99 */ |
725 | powersave_nap = 0; | 706 | if (pmac_tb_freeze == NULL && !machine_is_compatible("MacRISC4")) { |
726 | } | 707 | struct device_node *cpu; |
708 | u32 *tbprop = NULL; | ||
709 | |||
710 | core99_tb_gpio = KL_GPIO_TB_ENABLE; /* default value */ | ||
711 | cpu = of_find_node_by_type(NULL, "cpu"); | ||
712 | if (cpu != NULL) { | ||
713 | tbprop = (u32 *)get_property(cpu, "timebase-enable", | ||
714 | NULL); | ||
715 | if (tbprop) | ||
716 | core99_tb_gpio = *tbprop; | ||
717 | of_node_put(cpu); | ||
718 | } | ||
719 | pmac_tb_freeze = smp_core99_gpio_tb_freeze; | ||
720 | printk(KERN_INFO "Processor timebase sync using" | ||
721 | " GPIO 0x%02x\n", core99_tb_gpio); | ||
722 | } | ||
723 | |||
724 | #endif /* CONFIG_PPC64 */ | ||
725 | |||
726 | /* No timebase sync, fallback to software */ | ||
727 | if (pmac_tb_freeze == NULL) { | ||
728 | smp_ops->give_timebase = smp_generic_give_timebase; | ||
729 | smp_ops->take_timebase = smp_generic_take_timebase; | ||
730 | printk(KERN_INFO "Processor timebase sync using software\n"); | ||
731 | } | ||
732 | |||
733 | #ifndef CONFIG_PPC64 | ||
734 | { | ||
735 | int i; | ||
736 | |||
737 | /* XXX should get this from reg properties */ | ||
738 | for (i = 1; i < ncpus; ++i) | ||
739 | smp_hw_index[i] = i; | ||
740 | } | ||
727 | #endif | 741 | #endif |
728 | 742 | ||
743 | /* 32 bits SMP can't NAP */ | ||
744 | if (!machine_is_compatible("MacRISC4")) | ||
745 | powersave_nap = 0; | ||
746 | } | ||
747 | |||
729 | static int __init smp_core99_probe(void) | 748 | static int __init smp_core99_probe(void) |
730 | { | 749 | { |
731 | struct device_node *cpus; | 750 | struct device_node *cpus; |
@@ -743,8 +762,19 @@ static int __init smp_core99_probe(void) | |||
743 | if (ncpus <= 1) | 762 | if (ncpus <= 1) |
744 | return 1; | 763 | return 1; |
745 | 764 | ||
765 | /* We need to perform some early initialisations before we can start | ||
766 | * setting up SMP as we are running before initcalls | ||
767 | */ | ||
768 | pmac_pfunc_base_install(); | ||
769 | pmac_i2c_init(); | ||
770 | |||
771 | /* Setup various bits like timebase sync method, ability to nap, ... */ | ||
746 | smp_core99_setup(ncpus); | 772 | smp_core99_setup(ncpus); |
773 | |||
774 | /* Install IPIs */ | ||
747 | mpic_request_ipis(); | 775 | mpic_request_ipis(); |
776 | |||
777 | /* Collect l2cr and l3cr values from CPU 0 */ | ||
748 | core99_init_caches(0); | 778 | core99_init_caches(0); |
749 | 779 | ||
750 | return ncpus; | 780 | return ncpus; |
@@ -753,14 +783,15 @@ static int __init smp_core99_probe(void) | |||
753 | static void __devinit smp_core99_kick_cpu(int nr) | 783 | static void __devinit smp_core99_kick_cpu(int nr) |
754 | { | 784 | { |
755 | unsigned int save_vector; | 785 | unsigned int save_vector; |
756 | unsigned long new_vector; | 786 | unsigned long target, flags; |
757 | unsigned long flags; | ||
758 | volatile unsigned int *vector | 787 | volatile unsigned int *vector |
759 | = ((volatile unsigned int *)(KERNELBASE+0x100)); | 788 | = ((volatile unsigned int *)(KERNELBASE+0x100)); |
760 | 789 | ||
761 | if (nr < 0 || nr > 3) | 790 | if (nr < 0 || nr > 3) |
762 | return; | 791 | return; |
763 | if (ppc_md.progress) ppc_md.progress("smp_core99_kick_cpu", 0x346); | 792 | |
793 | if (ppc_md.progress) | ||
794 | ppc_md.progress("smp_core99_kick_cpu", 0x346); | ||
764 | 795 | ||
765 | local_irq_save(flags); | 796 | local_irq_save(flags); |
766 | local_irq_disable(); | 797 | local_irq_disable(); |
@@ -768,14 +799,11 @@ static void __devinit smp_core99_kick_cpu(int nr) | |||
768 | /* Save reset vector */ | 799 | /* Save reset vector */ |
769 | save_vector = *vector; | 800 | save_vector = *vector; |
770 | 801 | ||
771 | /* Setup fake reset vector that does | 802 | /* Setup fake reset vector that does |
772 | * b __secondary_start_pmac_0 + nr*8 - KERNELBASE | 803 | * b __secondary_start_pmac_0 + nr*8 - KERNELBASE |
773 | */ | 804 | */ |
774 | new_vector = (unsigned long) __secondary_start_pmac_0 + nr * 8; | 805 | target = (unsigned long) __secondary_start_pmac_0 + nr * 8; |
775 | *vector = 0x48000002 + new_vector - KERNELBASE; | 806 | create_branch((unsigned long)vector, target, BRANCH_SET_LINK); |
776 | |||
777 | /* flush data cache and inval instruction cache */ | ||
778 | flush_icache_range((unsigned long) vector, (unsigned long) vector + 4); | ||
779 | 807 | ||
780 | /* Put some life in our friend */ | 808 | /* Put some life in our friend */ |
781 | pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); | 809 | pmac_call_feature(PMAC_FTR_RESET_CPU, NULL, nr, 0); |
@@ -805,17 +833,25 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr) | |||
805 | mpic_setup_this_cpu(); | 833 | mpic_setup_this_cpu(); |
806 | 834 | ||
807 | if (cpu_nr == 0) { | 835 | if (cpu_nr == 0) { |
808 | #ifdef CONFIG_POWER4 | 836 | #ifdef CONFIG_PPC64 |
809 | extern void g5_phy_disable_cpu1(void); | 837 | extern void g5_phy_disable_cpu1(void); |
810 | 838 | ||
839 | /* Close i2c bus if it was used for tb sync */ | ||
840 | if (pmac_tb_clock_chip_host) { | ||
841 | pmac_i2c_close(pmac_tb_clock_chip_host); | ||
842 | pmac_tb_clock_chip_host = NULL; | ||
843 | } | ||
844 | |||
811 | /* If we didn't start the second CPU, we must take | 845 | /* If we didn't start the second CPU, we must take |
812 | * it off the bus | 846 | * it off the bus |
813 | */ | 847 | */ |
814 | if (machine_is_compatible("MacRISC4") && | 848 | if (machine_is_compatible("MacRISC4") && |
815 | num_online_cpus() < 2) | 849 | num_online_cpus() < 2) |
816 | g5_phy_disable_cpu1(); | 850 | g5_phy_disable_cpu1(); |
817 | #endif /* CONFIG_POWER4 */ | 851 | #endif /* CONFIG_PPC64 */ |
818 | if (ppc_md.progress) ppc_md.progress("core99_setup_cpu 0 done", 0x349); | 852 | |
853 | if (ppc_md.progress) | ||
854 | ppc_md.progress("core99_setup_cpu 0 done", 0x349); | ||
819 | } | 855 | } |
820 | } | 856 | } |
821 | 857 | ||
diff --git a/arch/powerpc/platforms/powermac/time.c b/arch/powerpc/platforms/powermac/time.c index feb0a94e7819..5d9afa1fa02d 100644 --- a/arch/powerpc/platforms/powermac/time.c +++ b/arch/powerpc/platforms/powermac/time.c | |||
@@ -258,15 +258,20 @@ int __init via_calibrate_decr(void) | |||
258 | volatile unsigned char __iomem *via; | 258 | volatile unsigned char __iomem *via; |
259 | int count = VIA_TIMER_FREQ_6 / 100; | 259 | int count = VIA_TIMER_FREQ_6 / 100; |
260 | unsigned int dstart, dend; | 260 | unsigned int dstart, dend; |
261 | struct resource rsrc; | ||
261 | 262 | ||
262 | vias = find_devices("via-cuda"); | 263 | vias = of_find_node_by_name(NULL, "via-cuda"); |
263 | if (vias == 0) | 264 | if (vias == 0) |
264 | vias = find_devices("via-pmu"); | 265 | vias = of_find_node_by_name(NULL, "via-pmu"); |
265 | if (vias == 0) | 266 | if (vias == 0) |
266 | vias = find_devices("via"); | 267 | vias = of_find_node_by_name(NULL, "via"); |
267 | if (vias == 0 || vias->n_addrs == 0) | 268 | if (vias == 0 || of_address_to_resource(vias, 0, &rsrc)) |
268 | return 0; | 269 | return 0; |
269 | via = ioremap(vias->addrs[0].address, vias->addrs[0].size); | 270 | via = ioremap(rsrc.start, rsrc.end - rsrc.start + 1); |
271 | if (via == NULL) { | ||
272 | printk(KERN_ERR "Failed to map VIA for timer calibration !\n"); | ||
273 | return 0; | ||
274 | } | ||
270 | 275 | ||
271 | /* set timer 1 for continuous interrupts */ | 276 | /* set timer 1 for continuous interrupts */ |
272 | out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT); | 277 | out_8(&via[ACR], (via[ACR] & ~T1MODE) | T1MODE_CONT); |
diff --git a/arch/powerpc/platforms/powermac/udbg_adb.c b/arch/powerpc/platforms/powermac/udbg_adb.c new file mode 100644 index 000000000000..06c8265c2baf --- /dev/null +++ b/arch/powerpc/platforms/powermac/udbg_adb.c | |||
@@ -0,0 +1,221 @@ | |||
1 | #include <linux/config.h> | ||
2 | #include <linux/string.h> | ||
3 | #include <linux/kernel.h> | ||
4 | #include <linux/errno.h> | ||
5 | #include <linux/bitops.h> | ||
6 | #include <linux/ptrace.h> | ||
7 | #include <linux/adb.h> | ||
8 | #include <linux/pmu.h> | ||
9 | #include <linux/cuda.h> | ||
10 | #include <asm/machdep.h> | ||
11 | #include <asm/io.h> | ||
12 | #include <asm/page.h> | ||
13 | #include <asm/xmon.h> | ||
14 | #include <asm/prom.h> | ||
15 | #include <asm/bootx.h> | ||
16 | #include <asm/machdep.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/pmac_feature.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/delay.h> | ||
21 | #include <asm/btext.h> | ||
22 | #include <asm/time.h> | ||
23 | #include <asm/udbg.h> | ||
24 | |||
25 | /* | ||
26 | * This implementation is "special", it can "patch" the current | ||
27 | * udbg implementation and work on top of it. It must thus be | ||
28 | * initialized last | ||
29 | */ | ||
30 | |||
31 | static void (*udbg_adb_old_putc)(char c); | ||
32 | static int (*udbg_adb_old_getc)(void); | ||
33 | static int (*udbg_adb_old_getc_poll)(void); | ||
34 | |||
35 | static enum { | ||
36 | input_adb_none, | ||
37 | input_adb_pmu, | ||
38 | input_adb_cuda, | ||
39 | } input_type = input_adb_none; | ||
40 | |||
41 | int xmon_wants_key, xmon_adb_keycode; | ||
42 | |||
43 | static inline void udbg_adb_poll(void) | ||
44 | { | ||
45 | #ifdef CONFIG_ADB_PMU | ||
46 | if (input_type == input_adb_pmu) | ||
47 | pmu_poll_adb(); | ||
48 | #endif /* CONFIG_ADB_PMU */ | ||
49 | #ifdef CONFIG_ADB_CUDA | ||
50 | if (input_type == input_adb_cuda) | ||
51 | cuda_poll(); | ||
52 | #endif /* CONFIG_ADB_CUDA */ | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_BOOTX_TEXT | ||
56 | |||
57 | static int udbg_adb_use_btext; | ||
58 | static int xmon_adb_shiftstate; | ||
59 | |||
60 | static unsigned char xmon_keytab[128] = | ||
61 | "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */ | ||
62 | "yt123465=97-80]o" /* 0x10 - 0x1f */ | ||
63 | "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */ | ||
64 | "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */ | ||
65 | "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */ | ||
66 | "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */ | ||
67 | |||
68 | static unsigned char xmon_shift_keytab[128] = | ||
69 | "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */ | ||
70 | "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */ | ||
71 | "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */ | ||
72 | "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */ | ||
73 | "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */ | ||
74 | "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */ | ||
75 | |||
76 | static int udbg_adb_local_getc(void) | ||
77 | { | ||
78 | int k, t, on; | ||
79 | |||
80 | xmon_wants_key = 1; | ||
81 | for (;;) { | ||
82 | xmon_adb_keycode = -1; | ||
83 | t = 0; | ||
84 | on = 0; | ||
85 | k = -1; | ||
86 | do { | ||
87 | if (--t < 0) { | ||
88 | on = 1 - on; | ||
89 | btext_drawchar(on? 0xdb: 0x20); | ||
90 | btext_drawchar('\b'); | ||
91 | t = 200000; | ||
92 | } | ||
93 | udbg_adb_poll(); | ||
94 | if (udbg_adb_old_getc_poll) | ||
95 | k = udbg_adb_old_getc_poll(); | ||
96 | } while (k == -1 && xmon_adb_keycode == -1); | ||
97 | if (on) | ||
98 | btext_drawstring(" \b"); | ||
99 | if (k != -1) | ||
100 | return k; | ||
101 | k = xmon_adb_keycode; | ||
102 | |||
103 | /* test for shift keys */ | ||
104 | if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) { | ||
105 | xmon_adb_shiftstate = (k & 0x80) == 0; | ||
106 | continue; | ||
107 | } | ||
108 | if (k >= 0x80) | ||
109 | continue; /* ignore up transitions */ | ||
110 | k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k]; | ||
111 | if (k != 0) | ||
112 | break; | ||
113 | } | ||
114 | xmon_wants_key = 0; | ||
115 | return k; | ||
116 | } | ||
117 | #endif /* CONFIG_BOOTX_TEXT */ | ||
118 | |||
119 | static int udbg_adb_getc(void) | ||
120 | { | ||
121 | #ifdef CONFIG_BOOTX_TEXT | ||
122 | if (udbg_adb_use_btext && input_type != input_adb_none) | ||
123 | return udbg_adb_local_getc(); | ||
124 | #endif | ||
125 | if (udbg_adb_old_getc) | ||
126 | return udbg_adb_old_getc(); | ||
127 | return -1; | ||
128 | } | ||
129 | |||
130 | /* getc_poll() is not really used, unless you have the xmon-over modem | ||
131 | * hack that doesn't quite concern us here, thus we just poll the low level | ||
132 | * ADB driver to prevent it from timing out and call back the original poll | ||
133 | * routine. | ||
134 | */ | ||
135 | static int udbg_adb_getc_poll(void) | ||
136 | { | ||
137 | udbg_adb_poll(); | ||
138 | |||
139 | if (udbg_adb_old_getc_poll) | ||
140 | return udbg_adb_old_getc_poll(); | ||
141 | return -1; | ||
142 | } | ||
143 | |||
144 | static void udbg_adb_putc(char c) | ||
145 | { | ||
146 | #ifdef CONFIG_BOOTX_TEXT | ||
147 | if (udbg_adb_use_btext) | ||
148 | btext_drawchar(c); | ||
149 | #endif | ||
150 | if (udbg_adb_old_putc) | ||
151 | return udbg_adb_old_putc(c); | ||
152 | } | ||
153 | |||
154 | void udbg_adb_init_early(void) | ||
155 | { | ||
156 | #ifdef CONFIG_BOOTX_TEXT | ||
157 | if (btext_find_display(1) == 0) { | ||
158 | udbg_adb_use_btext = 1; | ||
159 | udbg_putc = udbg_adb_putc; | ||
160 | } | ||
161 | #endif | ||
162 | } | ||
163 | |||
164 | int udbg_adb_init(int force_btext) | ||
165 | { | ||
166 | struct device_node *np; | ||
167 | |||
168 | /* Capture existing callbacks */ | ||
169 | udbg_adb_old_putc = udbg_putc; | ||
170 | udbg_adb_old_getc = udbg_getc; | ||
171 | udbg_adb_old_getc_poll = udbg_getc_poll; | ||
172 | |||
173 | /* Check if our early init was already called */ | ||
174 | if (udbg_adb_old_putc == udbg_adb_putc) | ||
175 | udbg_adb_old_putc = NULL; | ||
176 | #ifdef CONFIG_BOOTX_TEXT | ||
177 | if (udbg_adb_old_putc == btext_drawchar) | ||
178 | udbg_adb_old_putc = NULL; | ||
179 | #endif | ||
180 | |||
181 | /* Set ours as output */ | ||
182 | udbg_putc = udbg_adb_putc; | ||
183 | udbg_getc = udbg_adb_getc; | ||
184 | udbg_getc_poll = udbg_adb_getc_poll; | ||
185 | |||
186 | #ifdef CONFIG_BOOTX_TEXT | ||
187 | /* Check if we should use btext output */ | ||
188 | if (btext_find_display(force_btext) == 0) | ||
189 | udbg_adb_use_btext = 1; | ||
190 | #endif | ||
191 | |||
192 | /* See if there is a keyboard in the device tree with a parent | ||
193 | * of type "adb". If not, we return a failure, but we keep the | ||
194 | * bext output set for now | ||
195 | */ | ||
196 | for (np = NULL; (np = of_find_node_by_name(np, "keyboard")) != NULL;) { | ||
197 | struct device_node *parent = of_get_parent(np); | ||
198 | int found = (parent && strcmp(parent->type, "adb") == 0); | ||
199 | of_node_put(parent); | ||
200 | if (found) | ||
201 | break; | ||
202 | } | ||
203 | if (np == NULL) | ||
204 | return -ENODEV; | ||
205 | of_node_put(np); | ||
206 | |||
207 | #ifdef CONFIG_ADB_PMU | ||
208 | if (find_via_pmu()) | ||
209 | input_type = input_adb_pmu; | ||
210 | #endif | ||
211 | #ifdef CONFIG_ADB_CUDA | ||
212 | if (find_via_cuda()) | ||
213 | input_type = input_adb_cuda; | ||
214 | #endif | ||
215 | |||
216 | /* Same as above: nothing found, keep btext set for output */ | ||
217 | if (input_type == input_adb_none) | ||
218 | return -ENODEV; | ||
219 | |||
220 | return 0; | ||
221 | } | ||
diff --git a/arch/powerpc/kernel/udbg_scc.c b/arch/powerpc/platforms/powermac/udbg_scc.c index 820c53551507..e87d53acfb61 100644 --- a/arch/powerpc/kernel/udbg_scc.c +++ b/arch/powerpc/platforms/powermac/udbg_scc.c | |||
@@ -25,7 +25,7 @@ extern void real_writeb(u8 data, volatile u8 __iomem *addr); | |||
25 | static volatile u8 __iomem *sccc; | 25 | static volatile u8 __iomem *sccc; |
26 | static volatile u8 __iomem *sccd; | 26 | static volatile u8 __iomem *sccd; |
27 | 27 | ||
28 | static void udbg_scc_putc(unsigned char c) | 28 | static void udbg_scc_putc(char c) |
29 | { | 29 | { |
30 | if (sccc) { | 30 | if (sccc) { |
31 | while ((in_8(sccc) & SCC_TXRDY) == 0) | 31 | while ((in_8(sccc) & SCC_TXRDY) == 0) |
@@ -47,14 +47,14 @@ static int udbg_scc_getc_poll(void) | |||
47 | return -1; | 47 | return -1; |
48 | } | 48 | } |
49 | 49 | ||
50 | static unsigned char udbg_scc_getc(void) | 50 | static int udbg_scc_getc(void) |
51 | { | 51 | { |
52 | if (sccc) { | 52 | if (sccc) { |
53 | while ((in_8(sccc) & SCC_RXRDY) == 0) | 53 | while ((in_8(sccc) & SCC_RXRDY) == 0) |
54 | ; | 54 | ; |
55 | return in_8(sccd); | 55 | return in_8(sccd); |
56 | } | 56 | } |
57 | return 0; | 57 | return -1; |
58 | } | 58 | } |
59 | 59 | ||
60 | static unsigned char scc_inittab[] = { | 60 | static unsigned char scc_inittab[] = { |
@@ -67,38 +67,59 @@ static unsigned char scc_inittab[] = { | |||
67 | 3, 0xc1, /* rx enable, 8 bits */ | 67 | 3, 0xc1, /* rx enable, 8 bits */ |
68 | }; | 68 | }; |
69 | 69 | ||
70 | void udbg_init_scc(struct device_node *np) | 70 | void udbg_scc_init(int force_scc) |
71 | { | 71 | { |
72 | u32 *reg; | 72 | u32 *reg; |
73 | unsigned long addr; | 73 | unsigned long addr; |
74 | struct device_node *stdout = NULL, *escc = NULL, *macio = NULL; | ||
75 | struct device_node *ch, *ch_def = NULL, *ch_a = NULL; | ||
76 | char *path; | ||
74 | int i, x; | 77 | int i, x; |
75 | 78 | ||
76 | if (np == NULL) | 79 | escc = of_find_node_by_name(NULL, "escc"); |
77 | np = of_find_node_by_name(NULL, "escc"); | 80 | if (escc == NULL) |
78 | if (np == NULL || np->parent == NULL) | 81 | goto bail; |
79 | return; | 82 | macio = of_get_parent(escc); |
83 | if (macio == NULL) | ||
84 | goto bail; | ||
85 | path = (char *)get_property(of_chosen, "linux,stdout-path", NULL); | ||
86 | if (path != NULL) | ||
87 | stdout = of_find_node_by_path(path); | ||
88 | for (ch = NULL; (ch = of_get_next_child(escc, ch)) != NULL;) { | ||
89 | if (ch == stdout) | ||
90 | ch_def = of_node_get(ch); | ||
91 | if (strcmp(ch->name, "ch-a") == 0) | ||
92 | ch_a = of_node_get(ch); | ||
93 | } | ||
94 | if (ch_def == NULL && !force_scc) | ||
95 | goto bail; | ||
96 | |||
97 | ch = ch_def ? ch_def : ch_a; | ||
80 | 98 | ||
81 | udbg_printf("found SCC...\n"); | ||
82 | /* Get address within mac-io ASIC */ | 99 | /* Get address within mac-io ASIC */ |
83 | reg = (u32 *)get_property(np, "reg", NULL); | 100 | reg = (u32 *)get_property(escc, "reg", NULL); |
84 | if (reg == NULL) | 101 | if (reg == NULL) |
85 | return; | 102 | goto bail; |
86 | addr = reg[0]; | 103 | addr = reg[0]; |
87 | udbg_printf("local addr: %lx\n", addr); | 104 | |
88 | /* Get address of mac-io PCI itself */ | 105 | /* Get address of mac-io PCI itself */ |
89 | reg = (u32 *)get_property(np->parent, "assigned-addresses", NULL); | 106 | reg = (u32 *)get_property(macio, "assigned-addresses", NULL); |
90 | if (reg == NULL) | 107 | if (reg == NULL) |
91 | return; | 108 | goto bail; |
92 | addr += reg[2]; | 109 | addr += reg[2]; |
93 | udbg_printf("final addr: %lx\n", addr); | 110 | |
111 | /* Lock the serial port */ | ||
112 | pmac_call_feature(PMAC_FTR_SCC_ENABLE, ch, | ||
113 | PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1); | ||
114 | |||
94 | 115 | ||
95 | /* Setup for 57600 8N1 */ | 116 | /* Setup for 57600 8N1 */ |
96 | addr += 0x20; | 117 | if (ch == ch_a) |
118 | addr += 0x20; | ||
97 | sccc = (volatile u8 * __iomem) ioremap(addr & PAGE_MASK, PAGE_SIZE) ; | 119 | sccc = (volatile u8 * __iomem) ioremap(addr & PAGE_MASK, PAGE_SIZE) ; |
98 | sccc += addr & ~PAGE_MASK; | 120 | sccc += addr & ~PAGE_MASK; |
99 | sccd = sccc + 0x10; | 121 | sccd = sccc + 0x10; |
100 | 122 | ||
101 | udbg_printf("ioremap result sccc: %p\n", sccc); | ||
102 | mb(); | 123 | mb(); |
103 | 124 | ||
104 | for (i = 20000; i != 0; --i) | 125 | for (i = 20000; i != 0; --i) |
@@ -113,9 +134,17 @@ void udbg_init_scc(struct device_node *np) | |||
113 | udbg_getc_poll = udbg_scc_getc_poll; | 134 | udbg_getc_poll = udbg_scc_getc_poll; |
114 | 135 | ||
115 | udbg_puts("Hello World !\n"); | 136 | udbg_puts("Hello World !\n"); |
137 | |||
138 | bail: | ||
139 | of_node_put(macio); | ||
140 | of_node_put(escc); | ||
141 | of_node_put(stdout); | ||
142 | of_node_put(ch_def); | ||
143 | of_node_put(ch_a); | ||
116 | } | 144 | } |
117 | 145 | ||
118 | static void udbg_real_scc_putc(unsigned char c) | 146 | #ifdef CONFIG_PPC64 |
147 | static void udbg_real_scc_putc(char c) | ||
119 | { | 148 | { |
120 | while ((real_readb(sccc) & SCC_TXRDY) == 0) | 149 | while ((real_readb(sccc) & SCC_TXRDY) == 0) |
121 | ; | 150 | ; |
@@ -133,3 +162,4 @@ void udbg_init_pmac_realmode(void) | |||
133 | udbg_getc = NULL; | 162 | udbg_getc = NULL; |
134 | udbg_getc_poll = NULL; | 163 | udbg_getc_poll = NULL; |
135 | } | 164 | } |
165 | #endif /* CONFIG_PPC64 */ | ||
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile index 06d5ef501218..6accdd155505 100644 --- a/arch/powerpc/platforms/pseries/Makefile +++ b/arch/powerpc/platforms/pseries/Makefile | |||
@@ -1,5 +1,5 @@ | |||
1 | obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \ | 1 | obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \ |
2 | setup.o iommu.o ras.o rtasd.o | 2 | setup.o iommu.o ras.o rtasd.o pci_dlpar.o |
3 | obj-$(CONFIG_SMP) += smp.o | 3 | obj-$(CONFIG_SMP) += smp.o |
4 | obj-$(CONFIG_IBMVIO) += vio.o | 4 | obj-$(CONFIG_IBMVIO) += vio.o |
5 | obj-$(CONFIG_XICS) += xics.o | 5 | obj-$(CONFIG_XICS) += xics.o |
diff --git a/arch/powerpc/platforms/pseries/eeh.c b/arch/powerpc/platforms/pseries/eeh.c index c8d2a40dc5b4..7fbfd16d72b7 100644 --- a/arch/powerpc/platforms/pseries/eeh.c +++ b/arch/powerpc/platforms/pseries/eeh.c | |||
@@ -1093,6 +1093,15 @@ void eeh_add_device_early(struct device_node *dn) | |||
1093 | } | 1093 | } |
1094 | EXPORT_SYMBOL_GPL(eeh_add_device_early); | 1094 | EXPORT_SYMBOL_GPL(eeh_add_device_early); |
1095 | 1095 | ||
1096 | void eeh_add_device_tree_early(struct device_node *dn) | ||
1097 | { | ||
1098 | struct device_node *sib; | ||
1099 | for (sib = dn->child; sib; sib = sib->sibling) | ||
1100 | eeh_add_device_tree_early(sib); | ||
1101 | eeh_add_device_early(dn); | ||
1102 | } | ||
1103 | EXPORT_SYMBOL_GPL(eeh_add_device_tree_early); | ||
1104 | |||
1096 | /** | 1105 | /** |
1097 | * eeh_add_device_late - perform EEH initialization for the indicated pci device | 1106 | * eeh_add_device_late - perform EEH initialization for the indicated pci device |
1098 | * @dev: pci device for which to set up EEH | 1107 | * @dev: pci device for which to set up EEH |
@@ -1147,6 +1156,23 @@ void eeh_remove_device(struct pci_dev *dev) | |||
1147 | } | 1156 | } |
1148 | EXPORT_SYMBOL_GPL(eeh_remove_device); | 1157 | EXPORT_SYMBOL_GPL(eeh_remove_device); |
1149 | 1158 | ||
1159 | void eeh_remove_bus_device(struct pci_dev *dev) | ||
1160 | { | ||
1161 | eeh_remove_device(dev); | ||
1162 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) { | ||
1163 | struct pci_bus *bus = dev->subordinate; | ||
1164 | struct list_head *ln; | ||
1165 | if (!bus) | ||
1166 | return; | ||
1167 | for (ln = bus->devices.next; ln != &bus->devices; ln = ln->next) { | ||
1168 | struct pci_dev *pdev = pci_dev_b(ln); | ||
1169 | if (pdev) | ||
1170 | eeh_remove_bus_device(pdev); | ||
1171 | } | ||
1172 | } | ||
1173 | } | ||
1174 | EXPORT_SYMBOL_GPL(eeh_remove_bus_device); | ||
1175 | |||
1150 | static int proc_eeh_show(struct seq_file *m, void *v) | 1176 | static int proc_eeh_show(struct seq_file *m, void *v) |
1151 | { | 1177 | { |
1152 | unsigned int cpu; | 1178 | unsigned int cpu; |
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c index 2043659ea7b1..169f9148789c 100644 --- a/arch/powerpc/platforms/pseries/iommu.c +++ b/arch/powerpc/platforms/pseries/iommu.c | |||
@@ -436,7 +436,7 @@ static void iommu_bus_setup_pSeriesLP(struct pci_bus *bus) | |||
436 | return; | 436 | return; |
437 | } | 437 | } |
438 | 438 | ||
439 | ppci = pdn->data; | 439 | ppci = PCI_DN(pdn); |
440 | if (!ppci->iommu_table) { | 440 | if (!ppci->iommu_table) { |
441 | /* Bussubno hasn't been copied yet. | 441 | /* Bussubno hasn't been copied yet. |
442 | * Do it now because iommu_table_setparms_lpar needs it. | 442 | * Do it now because iommu_table_setparms_lpar needs it. |
@@ -483,10 +483,10 @@ static void iommu_dev_setup_pSeries(struct pci_dev *dev) | |||
483 | * an already allocated iommu table is found and use that. | 483 | * an already allocated iommu table is found and use that. |
484 | */ | 484 | */ |
485 | 485 | ||
486 | while (dn && dn->data && PCI_DN(dn)->iommu_table == NULL) | 486 | while (dn && PCI_DN(dn) && PCI_DN(dn)->iommu_table == NULL) |
487 | dn = dn->parent; | 487 | dn = dn->parent; |
488 | 488 | ||
489 | if (dn && dn->data) { | 489 | if (dn && PCI_DN(dn)) { |
490 | PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; | 490 | PCI_DN(mydn)->iommu_table = PCI_DN(dn)->iommu_table; |
491 | } else { | 491 | } else { |
492 | DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev)); | 492 | DBG("iommu_dev_setup_pSeries, dev %p (%s) has no iommu table\n", dev, pci_name(dev)); |
@@ -497,7 +497,7 @@ static int iommu_reconfig_notifier(struct notifier_block *nb, unsigned long acti | |||
497 | { | 497 | { |
498 | int err = NOTIFY_OK; | 498 | int err = NOTIFY_OK; |
499 | struct device_node *np = node; | 499 | struct device_node *np = node; |
500 | struct pci_dn *pci = np->data; | 500 | struct pci_dn *pci = PCI_DN(np); |
501 | 501 | ||
502 | switch (action) { | 502 | switch (action) { |
503 | case PSERIES_RECONFIG_REMOVE: | 503 | case PSERIES_RECONFIG_REMOVE: |
@@ -533,7 +533,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
533 | */ | 533 | */ |
534 | dn = pci_device_to_OF_node(dev); | 534 | dn = pci_device_to_OF_node(dev); |
535 | 535 | ||
536 | for (pdn = dn; pdn && pdn->data && !PCI_DN(pdn)->iommu_table; | 536 | for (pdn = dn; pdn && PCI_DN(pdn) && !PCI_DN(pdn)->iommu_table; |
537 | pdn = pdn->parent) { | 537 | pdn = pdn->parent) { |
538 | dma_window = (unsigned int *) | 538 | dma_window = (unsigned int *) |
539 | get_property(pdn, "ibm,dma-window", NULL); | 539 | get_property(pdn, "ibm,dma-window", NULL); |
@@ -552,7 +552,7 @@ static void iommu_dev_setup_pSeriesLP(struct pci_dev *dev) | |||
552 | DBG("Found DMA window, allocating table\n"); | 552 | DBG("Found DMA window, allocating table\n"); |
553 | } | 553 | } |
554 | 554 | ||
555 | pci = pdn->data; | 555 | pci = PCI_DN(pdn); |
556 | if (!pci->iommu_table) { | 556 | if (!pci->iommu_table) { |
557 | /* iommu_table_setparms_lpar needs bussubno. */ | 557 | /* iommu_table_setparms_lpar needs bussubno. */ |
558 | pci->bussubno = pci->phb->bus->number; | 558 | pci->bussubno = pci->phb->bus->number; |
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index cf1bc11b3346..1fe445ab78a6 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/config.h> | 24 | #include <linux/config.h> |
25 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
26 | #include <linux/dma-mapping.h> | 26 | #include <linux/dma-mapping.h> |
27 | #include <linux/console.h> | ||
27 | #include <asm/processor.h> | 28 | #include <asm/processor.h> |
28 | #include <asm/mmu.h> | 29 | #include <asm/mmu.h> |
29 | #include <asm/page.h> | 30 | #include <asm/page.h> |
@@ -60,7 +61,7 @@ extern void pSeries_find_serial_port(void); | |||
60 | int vtermno; /* virtual terminal# for udbg */ | 61 | int vtermno; /* virtual terminal# for udbg */ |
61 | 62 | ||
62 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) | 63 | #define __ALIGNED__ __attribute__((__aligned__(sizeof(long)))) |
63 | static void udbg_hvsi_putc(unsigned char c) | 64 | static void udbg_hvsi_putc(char c) |
64 | { | 65 | { |
65 | /* packet's seqno isn't used anyways */ | 66 | /* packet's seqno isn't used anyways */ |
66 | uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c }; | 67 | uint8_t packet[] __ALIGNED__ = { 0xff, 5, 0, 0, c }; |
@@ -111,7 +112,7 @@ static int udbg_hvsi_getc_poll(void) | |||
111 | return ch; | 112 | return ch; |
112 | } | 113 | } |
113 | 114 | ||
114 | static unsigned char udbg_hvsi_getc(void) | 115 | static int udbg_hvsi_getc(void) |
115 | { | 116 | { |
116 | int ch; | 117 | int ch; |
117 | for (;;) { | 118 | for (;;) { |
@@ -127,7 +128,7 @@ static unsigned char udbg_hvsi_getc(void) | |||
127 | } | 128 | } |
128 | } | 129 | } |
129 | 130 | ||
130 | static void udbg_putcLP(unsigned char c) | 131 | static void udbg_putcLP(char c) |
131 | { | 132 | { |
132 | char buf[16]; | 133 | char buf[16]; |
133 | unsigned long rc; | 134 | unsigned long rc; |
@@ -172,7 +173,7 @@ static int udbg_getc_pollLP(void) | |||
172 | return ch; | 173 | return ch; |
173 | } | 174 | } |
174 | 175 | ||
175 | static unsigned char udbg_getcLP(void) | 176 | static int udbg_getcLP(void) |
176 | { | 177 | { |
177 | int ch; | 178 | int ch; |
178 | for (;;) { | 179 | for (;;) { |
@@ -191,7 +192,7 @@ static unsigned char udbg_getcLP(void) | |||
191 | /* call this from early_init() for a working debug console on | 192 | /* call this from early_init() for a working debug console on |
192 | * vterm capable LPAR machines | 193 | * vterm capable LPAR machines |
193 | */ | 194 | */ |
194 | void udbg_init_debug_lpar(void) | 195 | void __init udbg_init_debug_lpar(void) |
195 | { | 196 | { |
196 | vtermno = 0; | 197 | vtermno = 0; |
197 | udbg_putc = udbg_putcLP; | 198 | udbg_putc = udbg_putcLP; |
@@ -200,63 +201,54 @@ void udbg_init_debug_lpar(void) | |||
200 | } | 201 | } |
201 | 202 | ||
202 | /* returns 0 if couldn't find or use /chosen/stdout as console */ | 203 | /* returns 0 if couldn't find or use /chosen/stdout as console */ |
203 | int find_udbg_vterm(void) | 204 | void __init find_udbg_vterm(void) |
204 | { | 205 | { |
205 | struct device_node *stdout_node; | 206 | struct device_node *stdout_node; |
206 | u32 *termno; | 207 | u32 *termno; |
207 | char *name; | 208 | char *name; |
208 | int found = 0; | 209 | int add_console; |
209 | 210 | ||
210 | /* find the boot console from /chosen/stdout */ | 211 | /* find the boot console from /chosen/stdout */ |
211 | if (!of_chosen) | 212 | if (!of_chosen) |
212 | return 0; | 213 | return; |
213 | name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); | 214 | name = (char *)get_property(of_chosen, "linux,stdout-path", NULL); |
214 | if (name == NULL) | 215 | if (name == NULL) |
215 | return 0; | 216 | return; |
216 | stdout_node = of_find_node_by_path(name); | 217 | stdout_node = of_find_node_by_path(name); |
217 | if (!stdout_node) | 218 | if (!stdout_node) |
218 | return 0; | 219 | return; |
219 | |||
220 | /* now we have the stdout node; figure out what type of device it is. */ | ||
221 | name = (char *)get_property(stdout_node, "name", NULL); | 220 | name = (char *)get_property(stdout_node, "name", NULL); |
222 | if (!name) { | 221 | if (!name) { |
223 | printk(KERN_WARNING "stdout node missing 'name' property!\n"); | 222 | printk(KERN_WARNING "stdout node missing 'name' property!\n"); |
224 | goto out; | 223 | goto out; |
225 | } | 224 | } |
225 | /* The user has requested a console so this is already set up. */ | ||
226 | add_console = !strstr(cmd_line, "console="); | ||
226 | 227 | ||
227 | if (strncmp(name, "vty", 3) == 0) { | 228 | /* Check if it's a virtual terminal */ |
228 | if (device_is_compatible(stdout_node, "hvterm1")) { | 229 | if (strncmp(name, "vty", 3) != 0) |
229 | termno = (u32 *)get_property(stdout_node, "reg", NULL); | 230 | goto out; |
230 | if (termno) { | 231 | termno = (u32 *)get_property(stdout_node, "reg", NULL); |
231 | vtermno = termno[0]; | 232 | if (termno == NULL) |
232 | udbg_putc = udbg_putcLP; | 233 | goto out; |
233 | udbg_getc = udbg_getcLP; | 234 | vtermno = termno[0]; |
234 | udbg_getc_poll = udbg_getc_pollLP; | 235 | |
235 | found = 1; | 236 | if (device_is_compatible(stdout_node, "hvterm1")) { |
236 | } | 237 | udbg_putc = udbg_putcLP; |
237 | } else if (device_is_compatible(stdout_node, "hvterm-protocol")) { | 238 | udbg_getc = udbg_getcLP; |
238 | termno = (u32 *)get_property(stdout_node, "reg", NULL); | 239 | udbg_getc_poll = udbg_getc_pollLP; |
239 | if (termno) { | 240 | if (add_console) |
240 | vtermno = termno[0]; | 241 | add_preferred_console("hvc", termno[0] & 0xff, NULL); |
241 | udbg_putc = udbg_hvsi_putc; | 242 | } else if (device_is_compatible(stdout_node, "hvterm-protocol")) { |
242 | udbg_getc = udbg_hvsi_getc; | 243 | vtermno = termno[0]; |
243 | udbg_getc_poll = udbg_hvsi_getc_poll; | 244 | udbg_putc = udbg_hvsi_putc; |
244 | found = 1; | 245 | udbg_getc = udbg_hvsi_getc; |
245 | } | 246 | udbg_getc_poll = udbg_hvsi_getc_poll; |
246 | } | 247 | if (add_console) |
247 | } else if (strncmp(name, "serial", 6)) { | 248 | add_preferred_console("hvsi", termno[0] & 0xff, NULL); |
248 | /* XXX fix ISA serial console */ | ||
249 | printk(KERN_WARNING "serial stdout on LPAR ('%s')! " | ||
250 | "can't print udbg messages\n", | ||
251 | stdout_node->full_name); | ||
252 | } else { | ||
253 | printk(KERN_WARNING "don't know how to print to stdout '%s'\n", | ||
254 | stdout_node->full_name); | ||
255 | } | 249 | } |
256 | |||
257 | out: | 250 | out: |
258 | of_node_put(stdout_node); | 251 | of_node_put(stdout_node); |
259 | return found; | ||
260 | } | 252 | } |
261 | 253 | ||
262 | void vpa_init(int cpu) | 254 | void vpa_init(int cpu) |
diff --git a/arch/powerpc/platforms/pseries/pci_dlpar.c b/arch/powerpc/platforms/pseries/pci_dlpar.c new file mode 100644 index 000000000000..21934784f936 --- /dev/null +++ b/arch/powerpc/platforms/pseries/pci_dlpar.c | |||
@@ -0,0 +1,174 @@ | |||
1 | /* | ||
2 | * PCI Dynamic LPAR, PCI Hot Plug and PCI EEH recovery code | ||
3 | * for RPA-compliant PPC64 platform. | ||
4 | * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com> | ||
5 | * Copyright (C) 2005 International Business Machines | ||
6 | * | ||
7 | * Updates, 2005, John Rose <johnrose@austin.ibm.com> | ||
8 | * Updates, 2005, Linas Vepstas <linas@austin.ibm.com> | ||
9 | * | ||
10 | * All rights reserved. | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or (at | ||
15 | * your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, but | ||
18 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or | ||
20 | * NON INFRINGEMENT. See the GNU General Public License for more | ||
21 | * details. | ||
22 | * | ||
23 | * You should have received a copy of the GNU General Public License | ||
24 | * along with this program; if not, write to the Free Software | ||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
26 | */ | ||
27 | |||
28 | #include <linux/pci.h> | ||
29 | #include <asm/pci-bridge.h> | ||
30 | |||
31 | static struct pci_bus * | ||
32 | find_bus_among_children(struct pci_bus *bus, | ||
33 | struct device_node *dn) | ||
34 | { | ||
35 | struct pci_bus *child = NULL; | ||
36 | struct list_head *tmp; | ||
37 | struct device_node *busdn; | ||
38 | |||
39 | busdn = pci_bus_to_OF_node(bus); | ||
40 | if (busdn == dn) | ||
41 | return bus; | ||
42 | |||
43 | list_for_each(tmp, &bus->children) { | ||
44 | child = find_bus_among_children(pci_bus_b(tmp), dn); | ||
45 | if (child) | ||
46 | break; | ||
47 | }; | ||
48 | return child; | ||
49 | } | ||
50 | |||
51 | struct pci_bus * | ||
52 | pcibios_find_pci_bus(struct device_node *dn) | ||
53 | { | ||
54 | struct pci_dn *pdn = dn->data; | ||
55 | |||
56 | if (!pdn || !pdn->phb || !pdn->phb->bus) | ||
57 | return NULL; | ||
58 | |||
59 | return find_bus_among_children(pdn->phb->bus, dn); | ||
60 | } | ||
61 | |||
62 | /** | ||
63 | * pcibios_remove_pci_devices - remove all devices under this bus | ||
64 | * | ||
65 | * Remove all of the PCI devices under this bus both from the | ||
66 | * linux pci device tree, and from the powerpc EEH address cache. | ||
67 | */ | ||
68 | void | ||
69 | pcibios_remove_pci_devices(struct pci_bus *bus) | ||
70 | { | ||
71 | struct pci_dev *dev, *tmp; | ||
72 | |||
73 | list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { | ||
74 | eeh_remove_bus_device(dev); | ||
75 | pci_remove_bus_device(dev); | ||
76 | } | ||
77 | } | ||
78 | |||
79 | /* Must be called before pci_bus_add_devices */ | ||
80 | void | ||
81 | pcibios_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus) | ||
82 | { | ||
83 | struct pci_dev *dev; | ||
84 | |||
85 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
86 | /* | ||
87 | * Skip already-present devices (which are on the | ||
88 | * global device list.) | ||
89 | */ | ||
90 | if (list_empty(&dev->global_list)) { | ||
91 | int i; | ||
92 | |||
93 | /* Need to setup IOMMU tables */ | ||
94 | ppc_md.iommu_dev_setup(dev); | ||
95 | |||
96 | if(fix_bus) | ||
97 | pcibios_fixup_device_resources(dev, bus); | ||
98 | pci_read_irq_line(dev); | ||
99 | for (i = 0; i < PCI_NUM_RESOURCES; i++) { | ||
100 | struct resource *r = &dev->resource[i]; | ||
101 | |||
102 | if (r->parent || !r->start || !r->flags) | ||
103 | continue; | ||
104 | pci_claim_resource(dev, i); | ||
105 | } | ||
106 | } | ||
107 | } | ||
108 | } | ||
109 | |||
110 | static int | ||
111 | pcibios_pci_config_bridge(struct pci_dev *dev) | ||
112 | { | ||
113 | u8 sec_busno; | ||
114 | struct pci_bus *child_bus; | ||
115 | struct pci_dev *child_dev; | ||
116 | |||
117 | /* Get busno of downstream bus */ | ||
118 | pci_read_config_byte(dev, PCI_SECONDARY_BUS, &sec_busno); | ||
119 | |||
120 | /* Add to children of PCI bridge dev->bus */ | ||
121 | child_bus = pci_add_new_bus(dev->bus, dev, sec_busno); | ||
122 | if (!child_bus) { | ||
123 | printk (KERN_ERR "%s: could not add second bus\n", __FUNCTION__); | ||
124 | return -EIO; | ||
125 | } | ||
126 | sprintf(child_bus->name, "PCI Bus #%02x", child_bus->number); | ||
127 | |||
128 | pci_scan_child_bus(child_bus); | ||
129 | |||
130 | list_for_each_entry(child_dev, &child_bus->devices, bus_list) { | ||
131 | eeh_add_device_late(child_dev); | ||
132 | } | ||
133 | |||
134 | /* Fixup new pci devices without touching bus struct */ | ||
135 | pcibios_fixup_new_pci_devices(child_bus, 0); | ||
136 | |||
137 | /* Make the discovered devices available */ | ||
138 | pci_bus_add_devices(child_bus); | ||
139 | return 0; | ||
140 | } | ||
141 | |||
142 | /** | ||
143 | * pcibios_add_pci_devices - adds new pci devices to bus | ||
144 | * | ||
145 | * This routine will find and fixup new pci devices under | ||
146 | * the indicated bus. This routine presumes that there | ||
147 | * might already be some devices under this bridge, so | ||
148 | * it carefully tries to add only new devices. (And that | ||
149 | * is how this routine differs from other, similar pcibios | ||
150 | * routines.) | ||
151 | */ | ||
152 | void | ||
153 | pcibios_add_pci_devices(struct pci_bus * bus) | ||
154 | { | ||
155 | int slotno, num; | ||
156 | struct pci_dev *dev; | ||
157 | struct device_node *dn = pci_bus_to_OF_node(bus); | ||
158 | |||
159 | eeh_add_device_tree_early(dn); | ||
160 | |||
161 | /* pci_scan_slot should find all children */ | ||
162 | slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); | ||
163 | num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); | ||
164 | if (num) { | ||
165 | pcibios_fixup_new_pci_devices(bus, 1); | ||
166 | pci_bus_add_devices(bus); | ||
167 | } | ||
168 | |||
169 | list_for_each_entry(dev, &bus->devices, bus_list) { | ||
170 | eeh_add_device_late (dev); | ||
171 | if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) | ||
172 | pcibios_pci_config_bridge(dev); | ||
173 | } | ||
174 | } | ||
diff --git a/arch/powerpc/platforms/pseries/ras.c b/arch/powerpc/platforms/pseries/ras.c index fbd214d68b07..b046bcf7443d 100644 --- a/arch/powerpc/platforms/pseries/ras.c +++ b/arch/powerpc/platforms/pseries/ras.c | |||
@@ -49,14 +49,14 @@ | |||
49 | #include <asm/machdep.h> | 49 | #include <asm/machdep.h> |
50 | #include <asm/rtas.h> | 50 | #include <asm/rtas.h> |
51 | #include <asm/udbg.h> | 51 | #include <asm/udbg.h> |
52 | #include <asm/firmware.h> | ||
53 | |||
54 | #include "ras.h" | ||
52 | 55 | ||
53 | static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; | 56 | static unsigned char ras_log_buf[RTAS_ERROR_LOG_MAX]; |
54 | static DEFINE_SPINLOCK(ras_log_buf_lock); | 57 | static DEFINE_SPINLOCK(ras_log_buf_lock); |
55 | 58 | ||
56 | char mce_data_buf[RTAS_ERROR_LOG_MAX] | 59 | char mce_data_buf[RTAS_ERROR_LOG_MAX]; |
57 | ; | ||
58 | /* This is true if we are using the firmware NMI handler (typically LPAR) */ | ||
59 | extern int fwnmi_active; | ||
60 | 60 | ||
61 | static int ras_get_sensor_state_token; | 61 | static int ras_get_sensor_state_token; |
62 | static int ras_check_exception_token; | 62 | static int ras_check_exception_token; |
@@ -280,7 +280,7 @@ static void fwnmi_release_errinfo(void) | |||
280 | printk("FWNMI: nmi-interlock failed: %d\n", ret); | 280 | printk("FWNMI: nmi-interlock failed: %d\n", ret); |
281 | } | 281 | } |
282 | 282 | ||
283 | void pSeries_system_reset_exception(struct pt_regs *regs) | 283 | int pSeries_system_reset_exception(struct pt_regs *regs) |
284 | { | 284 | { |
285 | if (fwnmi_active) { | 285 | if (fwnmi_active) { |
286 | struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs); | 286 | struct rtas_error_log *errhdr = fwnmi_get_errinfo(regs); |
@@ -289,6 +289,7 @@ void pSeries_system_reset_exception(struct pt_regs *regs) | |||
289 | } | 289 | } |
290 | fwnmi_release_errinfo(); | 290 | fwnmi_release_errinfo(); |
291 | } | 291 | } |
292 | return 0; /* need to perform reset */ | ||
292 | } | 293 | } |
293 | 294 | ||
294 | /* | 295 | /* |
diff --git a/arch/powerpc/platforms/pseries/ras.h b/arch/powerpc/platforms/pseries/ras.h new file mode 100644 index 000000000000..0e66b0da55e2 --- /dev/null +++ b/arch/powerpc/platforms/pseries/ras.h | |||
@@ -0,0 +1,9 @@ | |||
1 | #ifndef _PSERIES_RAS_H | ||
2 | #define _PSERIES_RAS_H | ||
3 | |||
4 | struct pt_regs; | ||
5 | |||
6 | extern int pSeries_system_reset_exception(struct pt_regs *regs); | ||
7 | extern int pSeries_machine_check_exception(struct pt_regs *regs); | ||
8 | |||
9 | #endif /* _PSERIES_RAS_H */ | ||
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c index 4a465f067ede..8903cf63236a 100644 --- a/arch/powerpc/platforms/pseries/setup.c +++ b/arch/powerpc/platforms/pseries/setup.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include <asm/dma.h> | 56 | #include <asm/dma.h> |
57 | #include <asm/machdep.h> | 57 | #include <asm/machdep.h> |
58 | #include <asm/irq.h> | 58 | #include <asm/irq.h> |
59 | #include <asm/kexec.h> | ||
59 | #include <asm/time.h> | 60 | #include <asm/time.h> |
60 | #include <asm/nvram.h> | 61 | #include <asm/nvram.h> |
61 | #include "xics.h" | 62 | #include "xics.h" |
@@ -68,6 +69,7 @@ | |||
68 | #include <asm/smp.h> | 69 | #include <asm/smp.h> |
69 | 70 | ||
70 | #include "plpar_wrappers.h" | 71 | #include "plpar_wrappers.h" |
72 | #include "ras.h" | ||
71 | 73 | ||
72 | #ifdef DEBUG | 74 | #ifdef DEBUG |
73 | #define DBG(fmt...) udbg_printf(fmt) | 75 | #define DBG(fmt...) udbg_printf(fmt) |
@@ -76,16 +78,9 @@ | |||
76 | #endif | 78 | #endif |
77 | 79 | ||
78 | extern void find_udbg_vterm(void); | 80 | extern void find_udbg_vterm(void); |
79 | extern void system_reset_fwnmi(void); /* from head.S */ | ||
80 | extern void machine_check_fwnmi(void); /* from head.S */ | ||
81 | extern void generic_find_legacy_serial_ports(u64 *physport, | ||
82 | unsigned int *default_speed); | ||
83 | 81 | ||
84 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ | 82 | int fwnmi_active; /* TRUE if an FWNMI handler is present */ |
85 | 83 | ||
86 | extern void pSeries_system_reset_exception(struct pt_regs *regs); | ||
87 | extern int pSeries_machine_check_exception(struct pt_regs *regs); | ||
88 | |||
89 | static void pseries_shared_idle(void); | 84 | static void pseries_shared_idle(void); |
90 | static void pseries_dedicated_idle(void); | 85 | static void pseries_dedicated_idle(void); |
91 | 86 | ||
@@ -105,18 +100,22 @@ void pSeries_show_cpuinfo(struct seq_file *m) | |||
105 | 100 | ||
106 | /* Initialize firmware assisted non-maskable interrupts if | 101 | /* Initialize firmware assisted non-maskable interrupts if |
107 | * the firmware supports this feature. | 102 | * the firmware supports this feature. |
108 | * | ||
109 | */ | 103 | */ |
110 | static void __init fwnmi_init(void) | 104 | static void __init fwnmi_init(void) |
111 | { | 105 | { |
112 | int ret; | 106 | unsigned long system_reset_addr, machine_check_addr; |
107 | |||
113 | int ibm_nmi_register = rtas_token("ibm,nmi-register"); | 108 | int ibm_nmi_register = rtas_token("ibm,nmi-register"); |
114 | if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE) | 109 | if (ibm_nmi_register == RTAS_UNKNOWN_SERVICE) |
115 | return; | 110 | return; |
116 | ret = rtas_call(ibm_nmi_register, 2, 1, NULL, | 111 | |
117 | __pa((unsigned long)system_reset_fwnmi), | 112 | /* If the kernel's not linked at zero we point the firmware at low |
118 | __pa((unsigned long)machine_check_fwnmi)); | 113 | * addresses anyway, and use a trampoline to get to the real code. */ |
119 | if (ret == 0) | 114 | system_reset_addr = __pa(system_reset_fwnmi) - PHYSICAL_START; |
115 | machine_check_addr = __pa(machine_check_fwnmi) - PHYSICAL_START; | ||
116 | |||
117 | if (0 == rtas_call(ibm_nmi_register, 2, 1, NULL, system_reset_addr, | ||
118 | machine_check_addr)) | ||
120 | fwnmi_active = 1; | 119 | fwnmi_active = 1; |
121 | } | 120 | } |
122 | 121 | ||
@@ -323,15 +322,18 @@ static void __init pSeries_discover_pic(void) | |||
323 | ppc64_interrupt_controller = IC_INVALID; | 322 | ppc64_interrupt_controller = IC_INVALID; |
324 | for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { | 323 | for (np = NULL; (np = of_find_node_by_name(np, "interrupt-controller"));) { |
325 | typep = (char *)get_property(np, "compatible", NULL); | 324 | typep = (char *)get_property(np, "compatible", NULL); |
326 | if (strstr(typep, "open-pic")) | 325 | if (strstr(typep, "open-pic")) { |
327 | ppc64_interrupt_controller = IC_OPEN_PIC; | 326 | ppc64_interrupt_controller = IC_OPEN_PIC; |
328 | else if (strstr(typep, "ppc-xicp")) | 327 | break; |
328 | } else if (strstr(typep, "ppc-xicp")) { | ||
329 | ppc64_interrupt_controller = IC_PPC_XIC; | 329 | ppc64_interrupt_controller = IC_PPC_XIC; |
330 | else | 330 | break; |
331 | printk("pSeries_discover_pic: failed to recognize" | 331 | } |
332 | " interrupt-controller\n"); | ||
333 | break; | ||
334 | } | 332 | } |
333 | if (ppc64_interrupt_controller == IC_INVALID) | ||
334 | printk("pSeries_discover_pic: failed to recognize" | ||
335 | " interrupt-controller\n"); | ||
336 | |||
335 | } | 337 | } |
336 | 338 | ||
337 | static void pSeries_mach_cpu_die(void) | 339 | static void pSeries_mach_cpu_die(void) |
@@ -365,10 +367,7 @@ static int pseries_set_xdabr(unsigned long dabr) | |||
365 | */ | 367 | */ |
366 | static void __init pSeries_init_early(void) | 368 | static void __init pSeries_init_early(void) |
367 | { | 369 | { |
368 | void *comport; | ||
369 | int iommu_off = 0; | 370 | int iommu_off = 0; |
370 | unsigned int default_speed; | ||
371 | u64 physport; | ||
372 | 371 | ||
373 | DBG(" -> pSeries_init_early()\n"); | 372 | DBG(" -> pSeries_init_early()\n"); |
374 | 373 | ||
@@ -382,17 +381,8 @@ static void __init pSeries_init_early(void) | |||
382 | get_property(of_chosen, "linux,iommu-off", NULL)); | 381 | get_property(of_chosen, "linux,iommu-off", NULL)); |
383 | } | 382 | } |
384 | 383 | ||
385 | generic_find_legacy_serial_ports(&physport, &default_speed); | ||
386 | |||
387 | if (platform_is_lpar()) | 384 | if (platform_is_lpar()) |
388 | find_udbg_vterm(); | 385 | find_udbg_vterm(); |
389 | else if (physport) { | ||
390 | /* Map the uart for udbg. */ | ||
391 | comport = (void *)ioremap(physport, 16); | ||
392 | udbg_init_uart(comport, default_speed); | ||
393 | |||
394 | DBG("Hello World !\n"); | ||
395 | } | ||
396 | 386 | ||
397 | if (firmware_has_feature(FW_FEATURE_DABR)) | 387 | if (firmware_has_feature(FW_FEATURE_DABR)) |
398 | ppc_md.set_dabr = pseries_set_dabr; | 388 | ppc_md.set_dabr = pseries_set_dabr; |
@@ -638,5 +628,8 @@ struct machdep_calls __initdata pSeries_md = { | |||
638 | .machine_check_exception = pSeries_machine_check_exception, | 628 | .machine_check_exception = pSeries_machine_check_exception, |
639 | #ifdef CONFIG_KEXEC | 629 | #ifdef CONFIG_KEXEC |
640 | .kexec_cpu_down = pseries_kexec_cpu_down, | 630 | .kexec_cpu_down = pseries_kexec_cpu_down, |
631 | .machine_kexec = default_machine_kexec, | ||
632 | .machine_kexec_prepare = default_machine_kexec_prepare, | ||
633 | .machine_crash_shutdown = default_machine_crash_shutdown, | ||
641 | #endif | 634 | #endif |
642 | }; | 635 | }; |
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c index 0377decc0719..0c0cfa32eb58 100644 --- a/arch/powerpc/platforms/pseries/xics.c +++ b/arch/powerpc/platforms/pseries/xics.c | |||
@@ -407,7 +407,7 @@ irqreturn_t xics_ipi_action(int irq, void *dev_id, struct pt_regs *regs) | |||
407 | smp_message_recv(PPC_MSG_MIGRATE_TASK, regs); | 407 | smp_message_recv(PPC_MSG_MIGRATE_TASK, regs); |
408 | } | 408 | } |
409 | #endif | 409 | #endif |
410 | #ifdef CONFIG_DEBUGGER | 410 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
411 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, | 411 | if (test_and_clear_bit(PPC_MSG_DEBUGGER_BREAK, |
412 | &xics_ipi_message[cpu].value)) { | 412 | &xics_ipi_message[cpu].value)) { |
413 | mb(); | 413 | mb(); |
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 6b7efcfc352a..14b9abde2d27 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -4,5 +4,6 @@ obj-$(CONFIG_PPC_I8259) += i8259.o | |||
4 | obj-$(CONFIG_PPC_MPC106) += grackle.o | 4 | obj-$(CONFIG_PPC_MPC106) += grackle.o |
5 | obj-$(CONFIG_BOOKE) += dcr.o | 5 | obj-$(CONFIG_BOOKE) += dcr.o |
6 | obj-$(CONFIG_40x) += dcr.o | 6 | obj-$(CONFIG_40x) += dcr.o |
7 | obj-$(CONFIG_U3_DART) += u3_iommu.o | 7 | obj-$(CONFIG_U3_DART) += dart_iommu.o |
8 | obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o | 8 | obj-$(CONFIG_MMIO_NVRAM) += mmio_nvram.o |
9 | obj-$(CONFIG_83xx) += ipic.o | ||
diff --git a/arch/powerpc/sysdev/dart.h b/arch/powerpc/sysdev/dart.h index 33ed9ed7fc1e..c2d05763ccbe 100644 --- a/arch/powerpc/sysdev/dart.h +++ b/arch/powerpc/sysdev/dart.h | |||
@@ -20,29 +20,44 @@ | |||
20 | #define _POWERPC_SYSDEV_DART_H | 20 | #define _POWERPC_SYSDEV_DART_H |
21 | 21 | ||
22 | 22 | ||
23 | /* physical base of DART registers */ | ||
24 | #define DART_BASE 0xf8033000UL | ||
25 | |||
26 | /* Offset from base to control register */ | 23 | /* Offset from base to control register */ |
27 | #define DARTCNTL 0 | 24 | #define DART_CNTL 0 |
25 | |||
28 | /* Offset from base to exception register */ | 26 | /* Offset from base to exception register */ |
29 | #define DARTEXCP 0x10 | 27 | #define DART_EXCP_U3 0x10 |
30 | /* Offset from base to TLB tag registers */ | 28 | /* Offset from base to TLB tag registers */ |
31 | #define DARTTAG 0x1000 | 29 | #define DART_TAGS_U3 0x1000 |
32 | 30 | ||
31 | /* U4 registers */ | ||
32 | #define DART_BASE_U4 0x10 | ||
33 | #define DART_SIZE_U4 0x20 | ||
34 | #define DART_EXCP_U4 0x30 | ||
35 | #define DART_TAGS_U4 0x1000 | ||
33 | 36 | ||
34 | /* Control Register fields */ | 37 | /* Control Register fields */ |
35 | 38 | ||
36 | /* base address of table (pfn) */ | 39 | /* U3 registers */ |
37 | #define DARTCNTL_BASE_MASK 0xfffff | 40 | #define DART_CNTL_U3_BASE_MASK 0xfffff |
38 | #define DARTCNTL_BASE_SHIFT 12 | 41 | #define DART_CNTL_U3_BASE_SHIFT 12 |
42 | #define DART_CNTL_U3_FLUSHTLB 0x400 | ||
43 | #define DART_CNTL_U3_ENABLE 0x200 | ||
44 | #define DART_CNTL_U3_SIZE_MASK 0x1ff | ||
45 | #define DART_CNTL_U3_SIZE_SHIFT 0 | ||
46 | |||
47 | /* U4 registers */ | ||
48 | #define DART_BASE_U4_BASE_MASK 0xffffff | ||
49 | #define DART_BASE_U4_BASE_SHIFT 0 | ||
50 | #define DART_CNTL_U4_FLUSHTLB 0x20000000 | ||
51 | #define DART_CNTL_U4_ENABLE 0x80000000 | ||
52 | #define DART_SIZE_U4_SIZE_MASK 0x1fff | ||
53 | #define DART_SIZE_U4_SIZE_SHIFT 0 | ||
54 | |||
55 | #define DART_REG(r) (dart + ((r) >> 2)) | ||
56 | #define DART_IN(r) (in_be32(DART_REG(r))) | ||
57 | #define DART_OUT(r,v) (out_be32(DART_REG(r), (v))) | ||
39 | 58 | ||
40 | #define DARTCNTL_FLUSHTLB 0x400 | ||
41 | #define DARTCNTL_ENABLE 0x200 | ||
42 | 59 | ||
43 | /* size of table in pages */ | 60 | /* size of table in pages */ |
44 | #define DARTCNTL_SIZE_MASK 0x1ff | ||
45 | #define DARTCNTL_SIZE_SHIFT 0 | ||
46 | 61 | ||
47 | 62 | ||
48 | /* DART table fields */ | 63 | /* DART table fields */ |
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index 5c1a26a6d00c..e00b46b9514e 100644 --- a/arch/powerpc/sysdev/u3_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c | |||
@@ -1,25 +1,27 @@ | |||
1 | /* | 1 | /* |
2 | * arch/powerpc/sysdev/u3_iommu.c | 2 | * arch/powerpc/sysdev/dart_iommu.c |
3 | * | 3 | * |
4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation | 4 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
5 | * Copyright (C) 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>, | ||
6 | * IBM Corporation | ||
5 | * | 7 | * |
6 | * Based on pSeries_iommu.c: | 8 | * Based on pSeries_iommu.c: |
7 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation | 9 | * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation |
8 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation | 10 | * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation |
9 | * | 11 | * |
10 | * Dynamic DMA mapping support, Apple U3 & IBM CPC925 "DART" iommu. | 12 | * Dynamic DMA mapping support, Apple U3, U4 & IBM CPC925 "DART" iommu. |
13 | * | ||
11 | * | 14 | * |
12 | * | ||
13 | * This program is free software; you can redistribute it and/or modify | 15 | * This program is free software; you can redistribute it and/or modify |
14 | * it under the terms of the GNU General Public License as published by | 16 | * it under the terms of the GNU General Public License as published by |
15 | * the Free Software Foundation; either version 2 of the License, or | 17 | * the Free Software Foundation; either version 2 of the License, or |
16 | * (at your option) any later version. | 18 | * (at your option) any later version. |
17 | * | 19 | * |
18 | * This program is distributed in the hope that it will be useful, | 20 | * This program is distributed in the hope that it will be useful, |
19 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | 21 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
20 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | 22 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
21 | * GNU General Public License for more details. | 23 | * GNU General Public License for more details. |
22 | * | 24 | * |
23 | * You should have received a copy of the GNU General Public License | 25 | * You should have received a copy of the GNU General Public License |
24 | * along with this program; if not, write to the Free Software | 26 | * along with this program; if not, write to the Free Software |
25 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | 27 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA |
@@ -57,21 +59,22 @@ static unsigned long dart_tablesize; | |||
57 | static u32 *dart_vbase; | 59 | static u32 *dart_vbase; |
58 | 60 | ||
59 | /* Mapped base address for the dart */ | 61 | /* Mapped base address for the dart */ |
60 | static unsigned int *dart; | 62 | static unsigned int *__iomem dart; |
61 | 63 | ||
62 | /* Dummy val that entries are set to when unused */ | 64 | /* Dummy val that entries are set to when unused */ |
63 | static unsigned int dart_emptyval; | 65 | static unsigned int dart_emptyval; |
64 | 66 | ||
65 | static struct iommu_table iommu_table_u3; | 67 | static struct iommu_table iommu_table_dart; |
66 | static int iommu_table_u3_inited; | 68 | static int iommu_table_dart_inited; |
67 | static int dart_dirty; | 69 | static int dart_dirty; |
70 | static int dart_is_u4; | ||
68 | 71 | ||
69 | #define DBG(...) | 72 | #define DBG(...) |
70 | 73 | ||
71 | static inline void dart_tlb_invalidate_all(void) | 74 | static inline void dart_tlb_invalidate_all(void) |
72 | { | 75 | { |
73 | unsigned long l = 0; | 76 | unsigned long l = 0; |
74 | unsigned int reg; | 77 | unsigned int reg, inv_bit; |
75 | unsigned long limit; | 78 | unsigned long limit; |
76 | 79 | ||
77 | DBG("dart: flush\n"); | 80 | DBG("dart: flush\n"); |
@@ -81,29 +84,28 @@ static inline void dart_tlb_invalidate_all(void) | |||
81 | * | 84 | * |
82 | * Gotcha: Sometimes, the DART won't detect that the bit gets | 85 | * Gotcha: Sometimes, the DART won't detect that the bit gets |
83 | * set. If so, clear it and set it again. | 86 | * set. If so, clear it and set it again. |
84 | */ | 87 | */ |
85 | 88 | ||
86 | limit = 0; | 89 | limit = 0; |
87 | 90 | ||
91 | inv_bit = dart_is_u4 ? DART_CNTL_U4_FLUSHTLB : DART_CNTL_U3_FLUSHTLB; | ||
88 | retry: | 92 | retry: |
89 | reg = in_be32((unsigned int *)dart+DARTCNTL); | ||
90 | reg |= DARTCNTL_FLUSHTLB; | ||
91 | out_be32((unsigned int *)dart+DARTCNTL, reg); | ||
92 | |||
93 | l = 0; | 93 | l = 0; |
94 | while ((in_be32((unsigned int *)dart+DARTCNTL) & DARTCNTL_FLUSHTLB) && | 94 | reg = DART_IN(DART_CNTL); |
95 | l < (1L<<limit)) { | 95 | reg |= inv_bit; |
96 | DART_OUT(DART_CNTL, reg); | ||
97 | |||
98 | while ((DART_IN(DART_CNTL) & inv_bit) && l < (1L << limit)) | ||
96 | l++; | 99 | l++; |
97 | } | 100 | if (l == (1L << limit)) { |
98 | if (l == (1L<<limit)) { | ||
99 | if (limit < 4) { | 101 | if (limit < 4) { |
100 | limit++; | 102 | limit++; |
101 | reg = in_be32((unsigned int *)dart+DARTCNTL); | 103 | reg = DART_IN(DART_CNTL); |
102 | reg &= ~DARTCNTL_FLUSHTLB; | 104 | reg &= ~inv_bit; |
103 | out_be32((unsigned int *)dart+DARTCNTL, reg); | 105 | DART_OUT(DART_CNTL, reg); |
104 | goto retry; | 106 | goto retry; |
105 | } else | 107 | } else |
106 | panic("U3-DART: TLB did not flush after waiting a long " | 108 | panic("DART: TLB did not flush after waiting a long " |
107 | "time. Buggy U3 ?"); | 109 | "time. Buggy U3 ?"); |
108 | } | 110 | } |
109 | } | 111 | } |
@@ -115,7 +117,7 @@ static void dart_flush(struct iommu_table *tbl) | |||
115 | dart_dirty = 0; | 117 | dart_dirty = 0; |
116 | } | 118 | } |
117 | 119 | ||
118 | static void dart_build(struct iommu_table *tbl, long index, | 120 | static void dart_build(struct iommu_table *tbl, long index, |
119 | long npages, unsigned long uaddr, | 121 | long npages, unsigned long uaddr, |
120 | enum dma_data_direction direction) | 122 | enum dma_data_direction direction) |
121 | { | 123 | { |
@@ -128,7 +130,7 @@ static void dart_build(struct iommu_table *tbl, long index, | |||
128 | npages <<= DART_PAGE_FACTOR; | 130 | npages <<= DART_PAGE_FACTOR; |
129 | 131 | ||
130 | dp = ((unsigned int*)tbl->it_base) + index; | 132 | dp = ((unsigned int*)tbl->it_base) + index; |
131 | 133 | ||
132 | /* On U3, all memory is contigous, so we can move this | 134 | /* On U3, all memory is contigous, so we can move this |
133 | * out of the loop. | 135 | * out of the loop. |
134 | */ | 136 | */ |
@@ -148,7 +150,7 @@ static void dart_build(struct iommu_table *tbl, long index, | |||
148 | static void dart_free(struct iommu_table *tbl, long index, long npages) | 150 | static void dart_free(struct iommu_table *tbl, long index, long npages) |
149 | { | 151 | { |
150 | unsigned int *dp; | 152 | unsigned int *dp; |
151 | 153 | ||
152 | /* We don't worry about flushing the TLB cache. The only drawback of | 154 | /* We don't worry about flushing the TLB cache. The only drawback of |
153 | * not doing it is that we won't catch buggy device drivers doing | 155 | * not doing it is that we won't catch buggy device drivers doing |
154 | * bad DMAs, but then no 32-bit architecture ever does either. | 156 | * bad DMAs, but then no 32-bit architecture ever does either. |
@@ -160,7 +162,7 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) | |||
160 | npages <<= DART_PAGE_FACTOR; | 162 | npages <<= DART_PAGE_FACTOR; |
161 | 163 | ||
162 | dp = ((unsigned int *)tbl->it_base) + index; | 164 | dp = ((unsigned int *)tbl->it_base) + index; |
163 | 165 | ||
164 | while (npages--) | 166 | while (npages--) |
165 | *(dp++) = dart_emptyval; | 167 | *(dp++) = dart_emptyval; |
166 | } | 168 | } |
@@ -168,20 +170,25 @@ static void dart_free(struct iommu_table *tbl, long index, long npages) | |||
168 | 170 | ||
169 | static int dart_init(struct device_node *dart_node) | 171 | static int dart_init(struct device_node *dart_node) |
170 | { | 172 | { |
171 | unsigned int regword; | ||
172 | unsigned int i; | 173 | unsigned int i; |
173 | unsigned long tmp; | 174 | unsigned long tmp, base, size; |
175 | struct resource r; | ||
174 | 176 | ||
175 | if (dart_tablebase == 0 || dart_tablesize == 0) { | 177 | if (dart_tablebase == 0 || dart_tablesize == 0) { |
176 | printk(KERN_INFO "U3-DART: table not allocated, using direct DMA\n"); | 178 | printk(KERN_INFO "DART: table not allocated, using " |
179 | "direct DMA\n"); | ||
177 | return -ENODEV; | 180 | return -ENODEV; |
178 | } | 181 | } |
179 | 182 | ||
183 | if (of_address_to_resource(dart_node, 0, &r)) | ||
184 | panic("DART: can't get register base ! "); | ||
185 | |||
180 | /* Make sure nothing from the DART range remains in the CPU cache | 186 | /* Make sure nothing from the DART range remains in the CPU cache |
181 | * from a previous mapping that existed before the kernel took | 187 | * from a previous mapping that existed before the kernel took |
182 | * over | 188 | * over |
183 | */ | 189 | */ |
184 | flush_dcache_phys_range(dart_tablebase, dart_tablebase + dart_tablesize); | 190 | flush_dcache_phys_range(dart_tablebase, |
191 | dart_tablebase + dart_tablesize); | ||
185 | 192 | ||
186 | /* Allocate a spare page to map all invalid DART pages. We need to do | 193 | /* Allocate a spare page to map all invalid DART pages. We need to do |
187 | * that to work around what looks like a problem with the HT bridge | 194 | * that to work around what looks like a problem with the HT bridge |
@@ -189,21 +196,16 @@ static int dart_init(struct device_node *dart_node) | |||
189 | */ | 196 | */ |
190 | tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); | 197 | tmp = lmb_alloc(DART_PAGE_SIZE, DART_PAGE_SIZE); |
191 | if (!tmp) | 198 | if (!tmp) |
192 | panic("U3-DART: Cannot allocate spare page!"); | 199 | panic("DART: Cannot allocate spare page!"); |
193 | dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & DARTMAP_RPNMASK); | 200 | dart_emptyval = DARTMAP_VALID | ((tmp >> DART_PAGE_SHIFT) & |
201 | DARTMAP_RPNMASK); | ||
194 | 202 | ||
195 | /* Map in DART registers. FIXME: Use device node to get base address */ | 203 | /* Map in DART registers */ |
196 | dart = ioremap(DART_BASE, 0x7000); | 204 | dart = ioremap(r.start, r.end - r.start + 1); |
197 | if (dart == NULL) | 205 | if (dart == NULL) |
198 | panic("U3-DART: Cannot map registers!"); | 206 | panic("DART: Cannot map registers!"); |
199 | 207 | ||
200 | /* Set initial control register contents: table base, | 208 | /* Map in DART table */ |
201 | * table size and enable bit | ||
202 | */ | ||
203 | regword = DARTCNTL_ENABLE | | ||
204 | ((dart_tablebase >> DART_PAGE_SHIFT) << DARTCNTL_BASE_SHIFT) | | ||
205 | (((dart_tablesize >> DART_PAGE_SHIFT) & DARTCNTL_SIZE_MASK) | ||
206 | << DARTCNTL_SIZE_SHIFT); | ||
207 | dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); | 209 | dart_vbase = ioremap(virt_to_abs(dart_tablebase), dart_tablesize); |
208 | 210 | ||
209 | /* Fill initial table */ | 211 | /* Fill initial table */ |
@@ -211,36 +213,50 @@ static int dart_init(struct device_node *dart_node) | |||
211 | dart_vbase[i] = dart_emptyval; | 213 | dart_vbase[i] = dart_emptyval; |
212 | 214 | ||
213 | /* Initialize DART with table base and enable it. */ | 215 | /* Initialize DART with table base and enable it. */ |
214 | out_be32((unsigned int *)dart, regword); | 216 | base = dart_tablebase >> DART_PAGE_SHIFT; |
217 | size = dart_tablesize >> DART_PAGE_SHIFT; | ||
218 | if (dart_is_u4) { | ||
219 | size &= DART_SIZE_U4_SIZE_MASK; | ||
220 | DART_OUT(DART_BASE_U4, base); | ||
221 | DART_OUT(DART_SIZE_U4, size); | ||
222 | DART_OUT(DART_CNTL, DART_CNTL_U4_ENABLE); | ||
223 | } else { | ||
224 | size &= DART_CNTL_U3_SIZE_MASK; | ||
225 | DART_OUT(DART_CNTL, | ||
226 | DART_CNTL_U3_ENABLE | | ||
227 | (base << DART_CNTL_U3_BASE_SHIFT) | | ||
228 | (size << DART_CNTL_U3_SIZE_SHIFT)); | ||
229 | } | ||
215 | 230 | ||
216 | /* Invalidate DART to get rid of possible stale TLBs */ | 231 | /* Invalidate DART to get rid of possible stale TLBs */ |
217 | dart_tlb_invalidate_all(); | 232 | dart_tlb_invalidate_all(); |
218 | 233 | ||
219 | printk(KERN_INFO "U3/CPC925 DART IOMMU initialized\n"); | 234 | printk(KERN_INFO "DART IOMMU initialized for %s type chipset\n", |
235 | dart_is_u4 ? "U4" : "U3"); | ||
220 | 236 | ||
221 | return 0; | 237 | return 0; |
222 | } | 238 | } |
223 | 239 | ||
224 | static void iommu_table_u3_setup(void) | 240 | static void iommu_table_dart_setup(void) |
225 | { | 241 | { |
226 | iommu_table_u3.it_busno = 0; | 242 | iommu_table_dart.it_busno = 0; |
227 | iommu_table_u3.it_offset = 0; | 243 | iommu_table_dart.it_offset = 0; |
228 | /* it_size is in number of entries */ | 244 | /* it_size is in number of entries */ |
229 | iommu_table_u3.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR; | 245 | iommu_table_dart.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR; |
230 | 246 | ||
231 | /* Initialize the common IOMMU code */ | 247 | /* Initialize the common IOMMU code */ |
232 | iommu_table_u3.it_base = (unsigned long)dart_vbase; | 248 | iommu_table_dart.it_base = (unsigned long)dart_vbase; |
233 | iommu_table_u3.it_index = 0; | 249 | iommu_table_dart.it_index = 0; |
234 | iommu_table_u3.it_blocksize = 1; | 250 | iommu_table_dart.it_blocksize = 1; |
235 | iommu_init_table(&iommu_table_u3); | 251 | iommu_init_table(&iommu_table_dart); |
236 | 252 | ||
237 | /* Reserve the last page of the DART to avoid possible prefetch | 253 | /* Reserve the last page of the DART to avoid possible prefetch |
238 | * past the DART mapped area | 254 | * past the DART mapped area |
239 | */ | 255 | */ |
240 | set_bit(iommu_table_u3.it_size - 1, iommu_table_u3.it_map); | 256 | set_bit(iommu_table_dart.it_size - 1, iommu_table_dart.it_map); |
241 | } | 257 | } |
242 | 258 | ||
243 | static void iommu_dev_setup_u3(struct pci_dev *dev) | 259 | static void iommu_dev_setup_dart(struct pci_dev *dev) |
244 | { | 260 | { |
245 | struct device_node *dn; | 261 | struct device_node *dn; |
246 | 262 | ||
@@ -254,35 +270,39 @@ static void iommu_dev_setup_u3(struct pci_dev *dev) | |||
254 | dn = pci_device_to_OF_node(dev); | 270 | dn = pci_device_to_OF_node(dev); |
255 | 271 | ||
256 | if (dn) | 272 | if (dn) |
257 | PCI_DN(dn)->iommu_table = &iommu_table_u3; | 273 | PCI_DN(dn)->iommu_table = &iommu_table_dart; |
258 | } | 274 | } |
259 | 275 | ||
260 | static void iommu_bus_setup_u3(struct pci_bus *bus) | 276 | static void iommu_bus_setup_dart(struct pci_bus *bus) |
261 | { | 277 | { |
262 | struct device_node *dn; | 278 | struct device_node *dn; |
263 | 279 | ||
264 | if (!iommu_table_u3_inited) { | 280 | if (!iommu_table_dart_inited) { |
265 | iommu_table_u3_inited = 1; | 281 | iommu_table_dart_inited = 1; |
266 | iommu_table_u3_setup(); | 282 | iommu_table_dart_setup(); |
267 | } | 283 | } |
268 | 284 | ||
269 | dn = pci_bus_to_OF_node(bus); | 285 | dn = pci_bus_to_OF_node(bus); |
270 | 286 | ||
271 | if (dn) | 287 | if (dn) |
272 | PCI_DN(dn)->iommu_table = &iommu_table_u3; | 288 | PCI_DN(dn)->iommu_table = &iommu_table_dart; |
273 | } | 289 | } |
274 | 290 | ||
275 | static void iommu_dev_setup_null(struct pci_dev *dev) { } | 291 | static void iommu_dev_setup_null(struct pci_dev *dev) { } |
276 | static void iommu_bus_setup_null(struct pci_bus *bus) { } | 292 | static void iommu_bus_setup_null(struct pci_bus *bus) { } |
277 | 293 | ||
278 | void iommu_init_early_u3(void) | 294 | void iommu_init_early_dart(void) |
279 | { | 295 | { |
280 | struct device_node *dn; | 296 | struct device_node *dn; |
281 | 297 | ||
282 | /* Find the DART in the device-tree */ | 298 | /* Find the DART in the device-tree */ |
283 | dn = of_find_compatible_node(NULL, "dart", "u3-dart"); | 299 | dn = of_find_compatible_node(NULL, "dart", "u3-dart"); |
284 | if (dn == NULL) | 300 | if (dn == NULL) { |
285 | return; | 301 | dn = of_find_compatible_node(NULL, "dart", "u4-dart"); |
302 | if (dn == NULL) | ||
303 | goto bail; | ||
304 | dart_is_u4 = 1; | ||
305 | } | ||
286 | 306 | ||
287 | /* Setup low level TCE operations for the core IOMMU code */ | 307 | /* Setup low level TCE operations for the core IOMMU code */ |
288 | ppc_md.tce_build = dart_build; | 308 | ppc_md.tce_build = dart_build; |
@@ -290,24 +310,27 @@ void iommu_init_early_u3(void) | |||
290 | ppc_md.tce_flush = dart_flush; | 310 | ppc_md.tce_flush = dart_flush; |
291 | 311 | ||
292 | /* Initialize the DART HW */ | 312 | /* Initialize the DART HW */ |
293 | if (dart_init(dn)) { | 313 | if (dart_init(dn) == 0) { |
294 | /* If init failed, use direct iommu and null setup functions */ | 314 | ppc_md.iommu_dev_setup = iommu_dev_setup_dart; |
295 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | 315 | ppc_md.iommu_bus_setup = iommu_bus_setup_dart; |
296 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | ||
297 | |||
298 | /* Setup pci_dma ops */ | ||
299 | pci_direct_iommu_init(); | ||
300 | } else { | ||
301 | ppc_md.iommu_dev_setup = iommu_dev_setup_u3; | ||
302 | ppc_md.iommu_bus_setup = iommu_bus_setup_u3; | ||
303 | 316 | ||
304 | /* Setup pci_dma ops */ | 317 | /* Setup pci_dma ops */ |
305 | pci_iommu_init(); | 318 | pci_iommu_init(); |
319 | |||
320 | return; | ||
306 | } | 321 | } |
322 | |||
323 | bail: | ||
324 | /* If init failed, use direct iommu and null setup functions */ | ||
325 | ppc_md.iommu_dev_setup = iommu_dev_setup_null; | ||
326 | ppc_md.iommu_bus_setup = iommu_bus_setup_null; | ||
327 | |||
328 | /* Setup pci_dma ops */ | ||
329 | pci_direct_iommu_init(); | ||
307 | } | 330 | } |
308 | 331 | ||
309 | 332 | ||
310 | void __init alloc_u3_dart_table(void) | 333 | void __init alloc_dart_table(void) |
311 | { | 334 | { |
312 | /* Only reserve DART space if machine has more than 2GB of RAM | 335 | /* Only reserve DART space if machine has more than 2GB of RAM |
313 | * or if requested with iommu=on on cmdline. | 336 | * or if requested with iommu=on on cmdline. |
@@ -323,5 +346,5 @@ void __init alloc_u3_dart_table(void) | |||
323 | dart_tablebase = (unsigned long) | 346 | dart_tablebase = (unsigned long) |
324 | abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); | 347 | abs_to_virt(lmb_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); |
325 | 348 | ||
326 | printk(KERN_INFO "U3-DART allocated at: %lx\n", dart_tablebase); | 349 | printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); |
327 | } | 350 | } |
diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c new file mode 100644 index 000000000000..8f01e0f1d847 --- /dev/null +++ b/arch/powerpc/sysdev/ipic.c | |||
@@ -0,0 +1,646 @@ | |||
1 | /* | ||
2 | * include/asm-ppc/ipic.c | ||
3 | * | ||
4 | * IPIC routines implementations. | ||
5 | * | ||
6 | * Copyright 2005 Freescale Semiconductor, Inc. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | */ | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/reboot.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/stddef.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/signal.h> | ||
21 | #include <linux/sysdev.h> | ||
22 | #include <asm/irq.h> | ||
23 | #include <asm/io.h> | ||
24 | #include <asm/ipic.h> | ||
25 | #include <asm/mpc83xx.h> | ||
26 | |||
27 | #include "ipic.h" | ||
28 | |||
29 | static struct ipic p_ipic; | ||
30 | static struct ipic * primary_ipic; | ||
31 | |||
32 | static struct ipic_info ipic_info[] = { | ||
33 | [9] = { | ||
34 | .pend = IPIC_SIPNR_H, | ||
35 | .mask = IPIC_SIMSR_H, | ||
36 | .prio = IPIC_SIPRR_D, | ||
37 | .force = IPIC_SIFCR_H, | ||
38 | .bit = 24, | ||
39 | .prio_mask = 0, | ||
40 | }, | ||
41 | [10] = { | ||
42 | .pend = IPIC_SIPNR_H, | ||
43 | .mask = IPIC_SIMSR_H, | ||
44 | .prio = IPIC_SIPRR_D, | ||
45 | .force = IPIC_SIFCR_H, | ||
46 | .bit = 25, | ||
47 | .prio_mask = 1, | ||
48 | }, | ||
49 | [11] = { | ||
50 | .pend = IPIC_SIPNR_H, | ||
51 | .mask = IPIC_SIMSR_H, | ||
52 | .prio = IPIC_SIPRR_D, | ||
53 | .force = IPIC_SIFCR_H, | ||
54 | .bit = 26, | ||
55 | .prio_mask = 2, | ||
56 | }, | ||
57 | [14] = { | ||
58 | .pend = IPIC_SIPNR_H, | ||
59 | .mask = IPIC_SIMSR_H, | ||
60 | .prio = IPIC_SIPRR_D, | ||
61 | .force = IPIC_SIFCR_H, | ||
62 | .bit = 29, | ||
63 | .prio_mask = 5, | ||
64 | }, | ||
65 | [15] = { | ||
66 | .pend = IPIC_SIPNR_H, | ||
67 | .mask = IPIC_SIMSR_H, | ||
68 | .prio = IPIC_SIPRR_D, | ||
69 | .force = IPIC_SIFCR_H, | ||
70 | .bit = 30, | ||
71 | .prio_mask = 6, | ||
72 | }, | ||
73 | [16] = { | ||
74 | .pend = IPIC_SIPNR_H, | ||
75 | .mask = IPIC_SIMSR_H, | ||
76 | .prio = IPIC_SIPRR_D, | ||
77 | .force = IPIC_SIFCR_H, | ||
78 | .bit = 31, | ||
79 | .prio_mask = 7, | ||
80 | }, | ||
81 | [17] = { | ||
82 | .pend = IPIC_SEPNR, | ||
83 | .mask = IPIC_SEMSR, | ||
84 | .prio = IPIC_SMPRR_A, | ||
85 | .force = IPIC_SEFCR, | ||
86 | .bit = 1, | ||
87 | .prio_mask = 5, | ||
88 | }, | ||
89 | [18] = { | ||
90 | .pend = IPIC_SEPNR, | ||
91 | .mask = IPIC_SEMSR, | ||
92 | .prio = IPIC_SMPRR_A, | ||
93 | .force = IPIC_SEFCR, | ||
94 | .bit = 2, | ||
95 | .prio_mask = 6, | ||
96 | }, | ||
97 | [19] = { | ||
98 | .pend = IPIC_SEPNR, | ||
99 | .mask = IPIC_SEMSR, | ||
100 | .prio = IPIC_SMPRR_A, | ||
101 | .force = IPIC_SEFCR, | ||
102 | .bit = 3, | ||
103 | .prio_mask = 7, | ||
104 | }, | ||
105 | [20] = { | ||
106 | .pend = IPIC_SEPNR, | ||
107 | .mask = IPIC_SEMSR, | ||
108 | .prio = IPIC_SMPRR_B, | ||
109 | .force = IPIC_SEFCR, | ||
110 | .bit = 4, | ||
111 | .prio_mask = 4, | ||
112 | }, | ||
113 | [21] = { | ||
114 | .pend = IPIC_SEPNR, | ||
115 | .mask = IPIC_SEMSR, | ||
116 | .prio = IPIC_SMPRR_B, | ||
117 | .force = IPIC_SEFCR, | ||
118 | .bit = 5, | ||
119 | .prio_mask = 5, | ||
120 | }, | ||
121 | [22] = { | ||
122 | .pend = IPIC_SEPNR, | ||
123 | .mask = IPIC_SEMSR, | ||
124 | .prio = IPIC_SMPRR_B, | ||
125 | .force = IPIC_SEFCR, | ||
126 | .bit = 6, | ||
127 | .prio_mask = 6, | ||
128 | }, | ||
129 | [23] = { | ||
130 | .pend = IPIC_SEPNR, | ||
131 | .mask = IPIC_SEMSR, | ||
132 | .prio = IPIC_SMPRR_B, | ||
133 | .force = IPIC_SEFCR, | ||
134 | .bit = 7, | ||
135 | .prio_mask = 7, | ||
136 | }, | ||
137 | [32] = { | ||
138 | .pend = IPIC_SIPNR_H, | ||
139 | .mask = IPIC_SIMSR_H, | ||
140 | .prio = IPIC_SIPRR_A, | ||
141 | .force = IPIC_SIFCR_H, | ||
142 | .bit = 0, | ||
143 | .prio_mask = 0, | ||
144 | }, | ||
145 | [33] = { | ||
146 | .pend = IPIC_SIPNR_H, | ||
147 | .mask = IPIC_SIMSR_H, | ||
148 | .prio = IPIC_SIPRR_A, | ||
149 | .force = IPIC_SIFCR_H, | ||
150 | .bit = 1, | ||
151 | .prio_mask = 1, | ||
152 | }, | ||
153 | [34] = { | ||
154 | .pend = IPIC_SIPNR_H, | ||
155 | .mask = IPIC_SIMSR_H, | ||
156 | .prio = IPIC_SIPRR_A, | ||
157 | .force = IPIC_SIFCR_H, | ||
158 | .bit = 2, | ||
159 | .prio_mask = 2, | ||
160 | }, | ||
161 | [35] = { | ||
162 | .pend = IPIC_SIPNR_H, | ||
163 | .mask = IPIC_SIMSR_H, | ||
164 | .prio = IPIC_SIPRR_A, | ||
165 | .force = IPIC_SIFCR_H, | ||
166 | .bit = 3, | ||
167 | .prio_mask = 3, | ||
168 | }, | ||
169 | [36] = { | ||
170 | .pend = IPIC_SIPNR_H, | ||
171 | .mask = IPIC_SIMSR_H, | ||
172 | .prio = IPIC_SIPRR_A, | ||
173 | .force = IPIC_SIFCR_H, | ||
174 | .bit = 4, | ||
175 | .prio_mask = 4, | ||
176 | }, | ||
177 | [37] = { | ||
178 | .pend = IPIC_SIPNR_H, | ||
179 | .mask = IPIC_SIMSR_H, | ||
180 | .prio = IPIC_SIPRR_A, | ||
181 | .force = IPIC_SIFCR_H, | ||
182 | .bit = 5, | ||
183 | .prio_mask = 5, | ||
184 | }, | ||
185 | [38] = { | ||
186 | .pend = IPIC_SIPNR_H, | ||
187 | .mask = IPIC_SIMSR_H, | ||
188 | .prio = IPIC_SIPRR_A, | ||
189 | .force = IPIC_SIFCR_H, | ||
190 | .bit = 6, | ||
191 | .prio_mask = 6, | ||
192 | }, | ||
193 | [39] = { | ||
194 | .pend = IPIC_SIPNR_H, | ||
195 | .mask = IPIC_SIMSR_H, | ||
196 | .prio = IPIC_SIPRR_A, | ||
197 | .force = IPIC_SIFCR_H, | ||
198 | .bit = 7, | ||
199 | .prio_mask = 7, | ||
200 | }, | ||
201 | [48] = { | ||
202 | .pend = IPIC_SEPNR, | ||
203 | .mask = IPIC_SEMSR, | ||
204 | .prio = IPIC_SMPRR_A, | ||
205 | .force = IPIC_SEFCR, | ||
206 | .bit = 0, | ||
207 | .prio_mask = 4, | ||
208 | }, | ||
209 | [64] = { | ||
210 | .pend = IPIC_SIPNR_H, | ||
211 | .mask = IPIC_SIMSR_L, | ||
212 | .prio = IPIC_SMPRR_A, | ||
213 | .force = IPIC_SIFCR_L, | ||
214 | .bit = 0, | ||
215 | .prio_mask = 0, | ||
216 | }, | ||
217 | [65] = { | ||
218 | .pend = IPIC_SIPNR_H, | ||
219 | .mask = IPIC_SIMSR_L, | ||
220 | .prio = IPIC_SMPRR_A, | ||
221 | .force = IPIC_SIFCR_L, | ||
222 | .bit = 1, | ||
223 | .prio_mask = 1, | ||
224 | }, | ||
225 | [66] = { | ||
226 | .pend = IPIC_SIPNR_H, | ||
227 | .mask = IPIC_SIMSR_L, | ||
228 | .prio = IPIC_SMPRR_A, | ||
229 | .force = IPIC_SIFCR_L, | ||
230 | .bit = 2, | ||
231 | .prio_mask = 2, | ||
232 | }, | ||
233 | [67] = { | ||
234 | .pend = IPIC_SIPNR_H, | ||
235 | .mask = IPIC_SIMSR_L, | ||
236 | .prio = IPIC_SMPRR_A, | ||
237 | .force = IPIC_SIFCR_L, | ||
238 | .bit = 3, | ||
239 | .prio_mask = 3, | ||
240 | }, | ||
241 | [68] = { | ||
242 | .pend = IPIC_SIPNR_H, | ||
243 | .mask = IPIC_SIMSR_L, | ||
244 | .prio = IPIC_SMPRR_B, | ||
245 | .force = IPIC_SIFCR_L, | ||
246 | .bit = 4, | ||
247 | .prio_mask = 0, | ||
248 | }, | ||
249 | [69] = { | ||
250 | .pend = IPIC_SIPNR_H, | ||
251 | .mask = IPIC_SIMSR_L, | ||
252 | .prio = IPIC_SMPRR_B, | ||
253 | .force = IPIC_SIFCR_L, | ||
254 | .bit = 5, | ||
255 | .prio_mask = 1, | ||
256 | }, | ||
257 | [70] = { | ||
258 | .pend = IPIC_SIPNR_H, | ||
259 | .mask = IPIC_SIMSR_L, | ||
260 | .prio = IPIC_SMPRR_B, | ||
261 | .force = IPIC_SIFCR_L, | ||
262 | .bit = 6, | ||
263 | .prio_mask = 2, | ||
264 | }, | ||
265 | [71] = { | ||
266 | .pend = IPIC_SIPNR_H, | ||
267 | .mask = IPIC_SIMSR_L, | ||
268 | .prio = IPIC_SMPRR_B, | ||
269 | .force = IPIC_SIFCR_L, | ||
270 | .bit = 7, | ||
271 | .prio_mask = 3, | ||
272 | }, | ||
273 | [72] = { | ||
274 | .pend = IPIC_SIPNR_H, | ||
275 | .mask = IPIC_SIMSR_L, | ||
276 | .prio = 0, | ||
277 | .force = IPIC_SIFCR_L, | ||
278 | .bit = 8, | ||
279 | }, | ||
280 | [73] = { | ||
281 | .pend = IPIC_SIPNR_H, | ||
282 | .mask = IPIC_SIMSR_L, | ||
283 | .prio = 0, | ||
284 | .force = IPIC_SIFCR_L, | ||
285 | .bit = 9, | ||
286 | }, | ||
287 | [74] = { | ||
288 | .pend = IPIC_SIPNR_H, | ||
289 | .mask = IPIC_SIMSR_L, | ||
290 | .prio = 0, | ||
291 | .force = IPIC_SIFCR_L, | ||
292 | .bit = 10, | ||
293 | }, | ||
294 | [75] = { | ||
295 | .pend = IPIC_SIPNR_H, | ||
296 | .mask = IPIC_SIMSR_L, | ||
297 | .prio = 0, | ||
298 | .force = IPIC_SIFCR_L, | ||
299 | .bit = 11, | ||
300 | }, | ||
301 | [76] = { | ||
302 | .pend = IPIC_SIPNR_H, | ||
303 | .mask = IPIC_SIMSR_L, | ||
304 | .prio = 0, | ||
305 | .force = IPIC_SIFCR_L, | ||
306 | .bit = 12, | ||
307 | }, | ||
308 | [77] = { | ||
309 | .pend = IPIC_SIPNR_H, | ||
310 | .mask = IPIC_SIMSR_L, | ||
311 | .prio = 0, | ||
312 | .force = IPIC_SIFCR_L, | ||
313 | .bit = 13, | ||
314 | }, | ||
315 | [78] = { | ||
316 | .pend = IPIC_SIPNR_H, | ||
317 | .mask = IPIC_SIMSR_L, | ||
318 | .prio = 0, | ||
319 | .force = IPIC_SIFCR_L, | ||
320 | .bit = 14, | ||
321 | }, | ||
322 | [79] = { | ||
323 | .pend = IPIC_SIPNR_H, | ||
324 | .mask = IPIC_SIMSR_L, | ||
325 | .prio = 0, | ||
326 | .force = IPIC_SIFCR_L, | ||
327 | .bit = 15, | ||
328 | }, | ||
329 | [80] = { | ||
330 | .pend = IPIC_SIPNR_H, | ||
331 | .mask = IPIC_SIMSR_L, | ||
332 | .prio = 0, | ||
333 | .force = IPIC_SIFCR_L, | ||
334 | .bit = 16, | ||
335 | }, | ||
336 | [84] = { | ||
337 | .pend = IPIC_SIPNR_H, | ||
338 | .mask = IPIC_SIMSR_L, | ||
339 | .prio = 0, | ||
340 | .force = IPIC_SIFCR_L, | ||
341 | .bit = 20, | ||
342 | }, | ||
343 | [85] = { | ||
344 | .pend = IPIC_SIPNR_H, | ||
345 | .mask = IPIC_SIMSR_L, | ||
346 | .prio = 0, | ||
347 | .force = IPIC_SIFCR_L, | ||
348 | .bit = 21, | ||
349 | }, | ||
350 | [90] = { | ||
351 | .pend = IPIC_SIPNR_H, | ||
352 | .mask = IPIC_SIMSR_L, | ||
353 | .prio = 0, | ||
354 | .force = IPIC_SIFCR_L, | ||
355 | .bit = 26, | ||
356 | }, | ||
357 | [91] = { | ||
358 | .pend = IPIC_SIPNR_H, | ||
359 | .mask = IPIC_SIMSR_L, | ||
360 | .prio = 0, | ||
361 | .force = IPIC_SIFCR_L, | ||
362 | .bit = 27, | ||
363 | }, | ||
364 | }; | ||
365 | |||
366 | static inline u32 ipic_read(volatile u32 __iomem *base, unsigned int reg) | ||
367 | { | ||
368 | return in_be32(base + (reg >> 2)); | ||
369 | } | ||
370 | |||
371 | static inline void ipic_write(volatile u32 __iomem *base, unsigned int reg, u32 value) | ||
372 | { | ||
373 | out_be32(base + (reg >> 2), value); | ||
374 | } | ||
375 | |||
376 | static inline struct ipic * ipic_from_irq(unsigned int irq) | ||
377 | { | ||
378 | return primary_ipic; | ||
379 | } | ||
380 | |||
381 | static void ipic_enable_irq(unsigned int irq) | ||
382 | { | ||
383 | struct ipic *ipic = ipic_from_irq(irq); | ||
384 | unsigned int src = irq - ipic->irq_offset; | ||
385 | u32 temp; | ||
386 | |||
387 | temp = ipic_read(ipic->regs, ipic_info[src].mask); | ||
388 | temp |= (1 << (31 - ipic_info[src].bit)); | ||
389 | ipic_write(ipic->regs, ipic_info[src].mask, temp); | ||
390 | } | ||
391 | |||
392 | static void ipic_disable_irq(unsigned int irq) | ||
393 | { | ||
394 | struct ipic *ipic = ipic_from_irq(irq); | ||
395 | unsigned int src = irq - ipic->irq_offset; | ||
396 | u32 temp; | ||
397 | |||
398 | temp = ipic_read(ipic->regs, ipic_info[src].mask); | ||
399 | temp &= ~(1 << (31 - ipic_info[src].bit)); | ||
400 | ipic_write(ipic->regs, ipic_info[src].mask, temp); | ||
401 | } | ||
402 | |||
403 | static void ipic_disable_irq_and_ack(unsigned int irq) | ||
404 | { | ||
405 | struct ipic *ipic = ipic_from_irq(irq); | ||
406 | unsigned int src = irq - ipic->irq_offset; | ||
407 | u32 temp; | ||
408 | |||
409 | ipic_disable_irq(irq); | ||
410 | |||
411 | temp = ipic_read(ipic->regs, ipic_info[src].pend); | ||
412 | temp |= (1 << (31 - ipic_info[src].bit)); | ||
413 | ipic_write(ipic->regs, ipic_info[src].pend, temp); | ||
414 | } | ||
415 | |||
416 | static void ipic_end_irq(unsigned int irq) | ||
417 | { | ||
418 | if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) | ||
419 | ipic_enable_irq(irq); | ||
420 | } | ||
421 | |||
422 | struct hw_interrupt_type ipic = { | ||
423 | .typename = " IPIC ", | ||
424 | .enable = ipic_enable_irq, | ||
425 | .disable = ipic_disable_irq, | ||
426 | .ack = ipic_disable_irq_and_ack, | ||
427 | .end = ipic_end_irq, | ||
428 | }; | ||
429 | |||
430 | void __init ipic_init(phys_addr_t phys_addr, | ||
431 | unsigned int flags, | ||
432 | unsigned int irq_offset, | ||
433 | unsigned char *senses, | ||
434 | unsigned int senses_count) | ||
435 | { | ||
436 | u32 i, temp = 0; | ||
437 | |||
438 | primary_ipic = &p_ipic; | ||
439 | primary_ipic->regs = ioremap(phys_addr, MPC83xx_IPIC_SIZE); | ||
440 | |||
441 | primary_ipic->irq_offset = irq_offset; | ||
442 | |||
443 | ipic_write(primary_ipic->regs, IPIC_SICNR, 0x0); | ||
444 | |||
445 | /* default priority scheme is grouped. If spread mode is required | ||
446 | * configure SICFR accordingly */ | ||
447 | if (flags & IPIC_SPREADMODE_GRP_A) | ||
448 | temp |= SICFR_IPSA; | ||
449 | if (flags & IPIC_SPREADMODE_GRP_D) | ||
450 | temp |= SICFR_IPSD; | ||
451 | if (flags & IPIC_SPREADMODE_MIX_A) | ||
452 | temp |= SICFR_MPSA; | ||
453 | if (flags & IPIC_SPREADMODE_MIX_B) | ||
454 | temp |= SICFR_MPSB; | ||
455 | |||
456 | ipic_write(primary_ipic->regs, IPIC_SICNR, temp); | ||
457 | |||
458 | /* handle MCP route */ | ||
459 | temp = 0; | ||
460 | if (flags & IPIC_DISABLE_MCP_OUT) | ||
461 | temp = SERCR_MCPR; | ||
462 | ipic_write(primary_ipic->regs, IPIC_SERCR, temp); | ||
463 | |||
464 | /* handle routing of IRQ0 to MCP */ | ||
465 | temp = ipic_read(primary_ipic->regs, IPIC_SEMSR); | ||
466 | |||
467 | if (flags & IPIC_IRQ0_MCP) | ||
468 | temp |= SEMSR_SIRQ0; | ||
469 | else | ||
470 | temp &= ~SEMSR_SIRQ0; | ||
471 | |||
472 | ipic_write(primary_ipic->regs, IPIC_SEMSR, temp); | ||
473 | |||
474 | for (i = 0 ; i < NR_IPIC_INTS ; i++) { | ||
475 | irq_desc[i+irq_offset].handler = &ipic; | ||
476 | irq_desc[i+irq_offset].status = IRQ_LEVEL; | ||
477 | } | ||
478 | |||
479 | temp = 0; | ||
480 | for (i = 0 ; i < senses_count ; i++) { | ||
481 | if ((senses[i] & IRQ_SENSE_MASK) == IRQ_SENSE_EDGE) { | ||
482 | temp |= 1 << (15 - i); | ||
483 | if (i != 0) | ||
484 | irq_desc[i + irq_offset + MPC83xx_IRQ_EXT1 - 1].status = 0; | ||
485 | else | ||
486 | irq_desc[irq_offset + MPC83xx_IRQ_EXT0].status = 0; | ||
487 | } | ||
488 | } | ||
489 | ipic_write(primary_ipic->regs, IPIC_SECNR, temp); | ||
490 | |||
491 | printk ("IPIC (%d IRQ sources, %d External IRQs) at %p\n", NR_IPIC_INTS, | ||
492 | senses_count, primary_ipic->regs); | ||
493 | } | ||
494 | |||
495 | int ipic_set_priority(unsigned int irq, unsigned int priority) | ||
496 | { | ||
497 | struct ipic *ipic = ipic_from_irq(irq); | ||
498 | unsigned int src = irq - ipic->irq_offset; | ||
499 | u32 temp; | ||
500 | |||
501 | if (priority > 7) | ||
502 | return -EINVAL; | ||
503 | if (src > 127) | ||
504 | return -EINVAL; | ||
505 | if (ipic_info[src].prio == 0) | ||
506 | return -EINVAL; | ||
507 | |||
508 | temp = ipic_read(ipic->regs, ipic_info[src].prio); | ||
509 | |||
510 | if (priority < 4) { | ||
511 | temp &= ~(0x7 << (20 + (3 - priority) * 3)); | ||
512 | temp |= ipic_info[src].prio_mask << (20 + (3 - priority) * 3); | ||
513 | } else { | ||
514 | temp &= ~(0x7 << (4 + (7 - priority) * 3)); | ||
515 | temp |= ipic_info[src].prio_mask << (4 + (7 - priority) * 3); | ||
516 | } | ||
517 | |||
518 | ipic_write(ipic->regs, ipic_info[src].prio, temp); | ||
519 | |||
520 | return 0; | ||
521 | } | ||
522 | |||
523 | void ipic_set_highest_priority(unsigned int irq) | ||
524 | { | ||
525 | struct ipic *ipic = ipic_from_irq(irq); | ||
526 | unsigned int src = irq - ipic->irq_offset; | ||
527 | u32 temp; | ||
528 | |||
529 | temp = ipic_read(ipic->regs, IPIC_SICFR); | ||
530 | |||
531 | /* clear and set HPI */ | ||
532 | temp &= 0x7f000000; | ||
533 | temp |= (src & 0x7f) << 24; | ||
534 | |||
535 | ipic_write(ipic->regs, IPIC_SICFR, temp); | ||
536 | } | ||
537 | |||
538 | void ipic_set_default_priority(void) | ||
539 | { | ||
540 | ipic_set_priority(MPC83xx_IRQ_TSEC1_TX, 0); | ||
541 | ipic_set_priority(MPC83xx_IRQ_TSEC1_RX, 1); | ||
542 | ipic_set_priority(MPC83xx_IRQ_TSEC1_ERROR, 2); | ||
543 | ipic_set_priority(MPC83xx_IRQ_TSEC2_TX, 3); | ||
544 | ipic_set_priority(MPC83xx_IRQ_TSEC2_RX, 4); | ||
545 | ipic_set_priority(MPC83xx_IRQ_TSEC2_ERROR, 5); | ||
546 | ipic_set_priority(MPC83xx_IRQ_USB2_DR, 6); | ||
547 | ipic_set_priority(MPC83xx_IRQ_USB2_MPH, 7); | ||
548 | |||
549 | ipic_set_priority(MPC83xx_IRQ_UART1, 0); | ||
550 | ipic_set_priority(MPC83xx_IRQ_UART2, 1); | ||
551 | ipic_set_priority(MPC83xx_IRQ_SEC2, 2); | ||
552 | ipic_set_priority(MPC83xx_IRQ_IIC1, 5); | ||
553 | ipic_set_priority(MPC83xx_IRQ_IIC2, 6); | ||
554 | ipic_set_priority(MPC83xx_IRQ_SPI, 7); | ||
555 | ipic_set_priority(MPC83xx_IRQ_RTC_SEC, 0); | ||
556 | ipic_set_priority(MPC83xx_IRQ_PIT, 1); | ||
557 | ipic_set_priority(MPC83xx_IRQ_PCI1, 2); | ||
558 | ipic_set_priority(MPC83xx_IRQ_PCI2, 3); | ||
559 | ipic_set_priority(MPC83xx_IRQ_EXT0, 4); | ||
560 | ipic_set_priority(MPC83xx_IRQ_EXT1, 5); | ||
561 | ipic_set_priority(MPC83xx_IRQ_EXT2, 6); | ||
562 | ipic_set_priority(MPC83xx_IRQ_EXT3, 7); | ||
563 | ipic_set_priority(MPC83xx_IRQ_RTC_ALR, 0); | ||
564 | ipic_set_priority(MPC83xx_IRQ_MU, 1); | ||
565 | ipic_set_priority(MPC83xx_IRQ_SBA, 2); | ||
566 | ipic_set_priority(MPC83xx_IRQ_DMA, 3); | ||
567 | ipic_set_priority(MPC83xx_IRQ_EXT4, 4); | ||
568 | ipic_set_priority(MPC83xx_IRQ_EXT5, 5); | ||
569 | ipic_set_priority(MPC83xx_IRQ_EXT6, 6); | ||
570 | ipic_set_priority(MPC83xx_IRQ_EXT7, 7); | ||
571 | } | ||
572 | |||
573 | void ipic_enable_mcp(enum ipic_mcp_irq mcp_irq) | ||
574 | { | ||
575 | struct ipic *ipic = primary_ipic; | ||
576 | u32 temp; | ||
577 | |||
578 | temp = ipic_read(ipic->regs, IPIC_SERMR); | ||
579 | temp |= (1 << (31 - mcp_irq)); | ||
580 | ipic_write(ipic->regs, IPIC_SERMR, temp); | ||
581 | } | ||
582 | |||
583 | void ipic_disable_mcp(enum ipic_mcp_irq mcp_irq) | ||
584 | { | ||
585 | struct ipic *ipic = primary_ipic; | ||
586 | u32 temp; | ||
587 | |||
588 | temp = ipic_read(ipic->regs, IPIC_SERMR); | ||
589 | temp &= (1 << (31 - mcp_irq)); | ||
590 | ipic_write(ipic->regs, IPIC_SERMR, temp); | ||
591 | } | ||
592 | |||
593 | u32 ipic_get_mcp_status(void) | ||
594 | { | ||
595 | return ipic_read(primary_ipic->regs, IPIC_SERMR); | ||
596 | } | ||
597 | |||
598 | void ipic_clear_mcp_status(u32 mask) | ||
599 | { | ||
600 | ipic_write(primary_ipic->regs, IPIC_SERMR, mask); | ||
601 | } | ||
602 | |||
603 | /* Return an interrupt vector or -1 if no interrupt is pending. */ | ||
604 | int ipic_get_irq(struct pt_regs *regs) | ||
605 | { | ||
606 | int irq; | ||
607 | |||
608 | irq = ipic_read(primary_ipic->regs, IPIC_SIVCR) & 0x7f; | ||
609 | |||
610 | if (irq == 0) /* 0 --> no irq is pending */ | ||
611 | irq = -1; | ||
612 | |||
613 | return irq; | ||
614 | } | ||
615 | |||
616 | static struct sysdev_class ipic_sysclass = { | ||
617 | set_kset_name("ipic"), | ||
618 | }; | ||
619 | |||
620 | static struct sys_device device_ipic = { | ||
621 | .id = 0, | ||
622 | .cls = &ipic_sysclass, | ||
623 | }; | ||
624 | |||
625 | static int __init init_ipic_sysfs(void) | ||
626 | { | ||
627 | int rc; | ||
628 | |||
629 | if (!primary_ipic->regs) | ||
630 | return -ENODEV; | ||
631 | printk(KERN_DEBUG "Registering ipic with sysfs...\n"); | ||
632 | |||
633 | rc = sysdev_class_register(&ipic_sysclass); | ||
634 | if (rc) { | ||
635 | printk(KERN_ERR "Failed registering ipic sys class\n"); | ||
636 | return -ENODEV; | ||
637 | } | ||
638 | rc = sysdev_register(&device_ipic); | ||
639 | if (rc) { | ||
640 | printk(KERN_ERR "Failed registering ipic sys device\n"); | ||
641 | return -ENODEV; | ||
642 | } | ||
643 | return 0; | ||
644 | } | ||
645 | |||
646 | subsys_initcall(init_ipic_sysfs); | ||
diff --git a/arch/powerpc/sysdev/ipic.h b/arch/powerpc/sysdev/ipic.h new file mode 100644 index 000000000000..a7ce7da8785c --- /dev/null +++ b/arch/powerpc/sysdev/ipic.h | |||
@@ -0,0 +1,49 @@ | |||
1 | /* | ||
2 | * arch/ppc/kernel/ipic.h | ||
3 | * | ||
4 | * IPIC private definitions and structure. | ||
5 | * | ||
6 | * Maintainer: Kumar Gala <galak@kernel.crashing.org> | ||
7 | * | ||
8 | * Copyright 2005 Freescale Semiconductor, Inc | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | */ | ||
15 | #ifndef __IPIC_H__ | ||
16 | #define __IPIC_H__ | ||
17 | |||
18 | #include <asm/ipic.h> | ||
19 | |||
20 | #define MPC83xx_IPIC_SIZE (0x00100) | ||
21 | |||
22 | /* System Global Interrupt Configuration Register */ | ||
23 | #define SICFR_IPSA 0x00010000 | ||
24 | #define SICFR_IPSD 0x00080000 | ||
25 | #define SICFR_MPSA 0x00200000 | ||
26 | #define SICFR_MPSB 0x00400000 | ||
27 | |||
28 | /* System External Interrupt Mask Register */ | ||
29 | #define SEMSR_SIRQ0 0x00008000 | ||
30 | |||
31 | /* System Error Control Register */ | ||
32 | #define SERCR_MCPR 0x00000001 | ||
33 | |||
34 | struct ipic { | ||
35 | volatile u32 __iomem *regs; | ||
36 | unsigned int irq_offset; | ||
37 | }; | ||
38 | |||
39 | struct ipic_info { | ||
40 | u8 pend; /* pending register offset from base */ | ||
41 | u8 mask; /* mask register offset from base */ | ||
42 | u8 prio; /* priority register offset from base */ | ||
43 | u8 force; /* force register offset from base */ | ||
44 | u8 bit; /* register bit position (as per doc) | ||
45 | bit mask = 1 << (31 - bit) */ | ||
46 | u8 prio_mask; /* priority mask value */ | ||
47 | }; | ||
48 | |||
49 | #endif /* __IPIC_H__ */ | ||
diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 58d1cc2023c8..4f26304d0263 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c | |||
@@ -13,6 +13,9 @@ | |||
13 | */ | 13 | */ |
14 | 14 | ||
15 | #undef DEBUG | 15 | #undef DEBUG |
16 | #undef DEBUG_IPI | ||
17 | #undef DEBUG_IRQ | ||
18 | #undef DEBUG_LOW | ||
16 | 19 | ||
17 | #include <linux/config.h> | 20 | #include <linux/config.h> |
18 | #include <linux/types.h> | 21 | #include <linux/types.h> |
@@ -45,7 +48,11 @@ static struct mpic *mpic_primary; | |||
45 | static DEFINE_SPINLOCK(mpic_lock); | 48 | static DEFINE_SPINLOCK(mpic_lock); |
46 | 49 | ||
47 | #ifdef CONFIG_PPC32 /* XXX for now */ | 50 | #ifdef CONFIG_PPC32 /* XXX for now */ |
48 | #define distribute_irqs CONFIG_IRQ_ALL_CPUS | 51 | #ifdef CONFIG_IRQ_ALL_CPUS |
52 | #define distribute_irqs (1) | ||
53 | #else | ||
54 | #define distribute_irqs (0) | ||
55 | #endif | ||
49 | #endif | 56 | #endif |
50 | 57 | ||
51 | /* | 58 | /* |
@@ -164,70 +171,129 @@ static void __init mpic_test_broken_ipi(struct mpic *mpic) | |||
164 | /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) | 171 | /* Test if an interrupt is sourced from HyperTransport (used on broken U3s) |
165 | * to force the edge setting on the MPIC and do the ack workaround. | 172 | * to force the edge setting on the MPIC and do the ack workaround. |
166 | */ | 173 | */ |
167 | static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source_no) | 174 | static inline int mpic_is_ht_interrupt(struct mpic *mpic, unsigned int source) |
168 | { | 175 | { |
169 | if (source_no >= 128 || !mpic->fixups) | 176 | if (source >= 128 || !mpic->fixups) |
170 | return 0; | 177 | return 0; |
171 | return mpic->fixups[source_no].base != NULL; | 178 | return mpic->fixups[source].base != NULL; |
172 | } | 179 | } |
173 | 180 | ||
174 | static inline void mpic_apic_end_irq(struct mpic *mpic, unsigned int source_no) | 181 | |
182 | static inline void mpic_ht_end_irq(struct mpic *mpic, unsigned int source) | ||
175 | { | 183 | { |
176 | struct mpic_irq_fixup *fixup = &mpic->fixups[source_no]; | 184 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; |
177 | u32 tmp; | ||
178 | 185 | ||
179 | spin_lock(&mpic->fixup_lock); | 186 | if (fixup->applebase) { |
180 | writeb(0x11 + 2 * fixup->irq, fixup->base); | 187 | unsigned int soff = (fixup->index >> 3) & ~3; |
181 | tmp = readl(fixup->base + 2); | 188 | unsigned int mask = 1U << (fixup->index & 0x1f); |
182 | writel(tmp | 0x80000000ul, fixup->base + 2); | 189 | writel(mask, fixup->applebase + soff); |
183 | /* config writes shouldn't be posted but let's be safe ... */ | 190 | } else { |
184 | (void)readl(fixup->base + 2); | 191 | spin_lock(&mpic->fixup_lock); |
185 | spin_unlock(&mpic->fixup_lock); | 192 | writeb(0x11 + 2 * fixup->index, fixup->base + 2); |
193 | writel(fixup->data, fixup->base + 4); | ||
194 | spin_unlock(&mpic->fixup_lock); | ||
195 | } | ||
186 | } | 196 | } |
187 | 197 | ||
198 | static void mpic_startup_ht_interrupt(struct mpic *mpic, unsigned int source, | ||
199 | unsigned int irqflags) | ||
200 | { | ||
201 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; | ||
202 | unsigned long flags; | ||
203 | u32 tmp; | ||
188 | 204 | ||
189 | static void __init mpic_amd8111_read_irq(struct mpic *mpic, u8 __iomem *devbase) | 205 | if (fixup->base == NULL) |
206 | return; | ||
207 | |||
208 | DBG("startup_ht_interrupt(%u, %u) index: %d\n", | ||
209 | source, irqflags, fixup->index); | ||
210 | spin_lock_irqsave(&mpic->fixup_lock, flags); | ||
211 | /* Enable and configure */ | ||
212 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); | ||
213 | tmp = readl(fixup->base + 4); | ||
214 | tmp &= ~(0x23U); | ||
215 | if (irqflags & IRQ_LEVEL) | ||
216 | tmp |= 0x22; | ||
217 | writel(tmp, fixup->base + 4); | ||
218 | spin_unlock_irqrestore(&mpic->fixup_lock, flags); | ||
219 | } | ||
220 | |||
221 | static void mpic_shutdown_ht_interrupt(struct mpic *mpic, unsigned int source, | ||
222 | unsigned int irqflags) | ||
190 | { | 223 | { |
191 | int i, irq; | 224 | struct mpic_irq_fixup *fixup = &mpic->fixups[source]; |
225 | unsigned long flags; | ||
192 | u32 tmp; | 226 | u32 tmp; |
193 | 227 | ||
194 | printk(KERN_INFO "mpic: - Workarounds on AMD 8111 @ %p\n", devbase); | 228 | if (fixup->base == NULL) |
229 | return; | ||
195 | 230 | ||
196 | for (i=0; i < 24; i++) { | 231 | DBG("shutdown_ht_interrupt(%u, %u)\n", source, irqflags); |
197 | writeb(0x10 + 2*i, devbase + 0xf2); | 232 | |
198 | tmp = readl(devbase + 0xf4); | 233 | /* Disable */ |
199 | if ((tmp & 0x1) || !(tmp & 0x20)) | 234 | spin_lock_irqsave(&mpic->fixup_lock, flags); |
200 | continue; | 235 | writeb(0x10 + 2 * fixup->index, fixup->base + 2); |
201 | irq = (tmp >> 16) & 0xff; | 236 | tmp = readl(fixup->base + 4); |
202 | mpic->fixups[irq].irq = i; | 237 | tmp &= ~1U; |
203 | mpic->fixups[irq].base = devbase + 0xf2; | 238 | writel(tmp, fixup->base + 4); |
204 | } | 239 | spin_unlock_irqrestore(&mpic->fixup_lock, flags); |
205 | } | 240 | } |
206 | 241 | ||
207 | static void __init mpic_amd8131_read_irq(struct mpic *mpic, u8 __iomem *devbase) | 242 | static void __init mpic_scan_ht_pic(struct mpic *mpic, u8 __iomem *devbase, |
243 | unsigned int devfn, u32 vdid) | ||
208 | { | 244 | { |
209 | int i, irq; | 245 | int i, irq, n; |
246 | u8 __iomem *base; | ||
210 | u32 tmp; | 247 | u32 tmp; |
248 | u8 pos; | ||
249 | |||
250 | for (pos = readb(devbase + PCI_CAPABILITY_LIST); pos != 0; | ||
251 | pos = readb(devbase + pos + PCI_CAP_LIST_NEXT)) { | ||
252 | u8 id = readb(devbase + pos + PCI_CAP_LIST_ID); | ||
253 | if (id == PCI_CAP_ID_HT_IRQCONF) { | ||
254 | id = readb(devbase + pos + 3); | ||
255 | if (id == 0x80) | ||
256 | break; | ||
257 | } | ||
258 | } | ||
259 | if (pos == 0) | ||
260 | return; | ||
211 | 261 | ||
212 | printk(KERN_INFO "mpic: - Workarounds on AMD 8131 @ %p\n", devbase); | 262 | base = devbase + pos; |
263 | writeb(0x01, base + 2); | ||
264 | n = (readl(base + 4) >> 16) & 0xff; | ||
213 | 265 | ||
214 | for (i=0; i < 4; i++) { | 266 | printk(KERN_INFO "mpic: - HT:%02x.%x [0x%02x] vendor %04x device %04x" |
215 | writeb(0x10 + 2*i, devbase + 0xba); | 267 | " has %d irqs\n", |
216 | tmp = readl(devbase + 0xbc); | 268 | devfn >> 3, devfn & 0x7, pos, vdid & 0xffff, vdid >> 16, n + 1); |
217 | if ((tmp & 0x1) || !(tmp & 0x20)) | 269 | |
218 | continue; | 270 | for (i = 0; i <= n; i++) { |
271 | writeb(0x10 + 2 * i, base + 2); | ||
272 | tmp = readl(base + 4); | ||
219 | irq = (tmp >> 16) & 0xff; | 273 | irq = (tmp >> 16) & 0xff; |
220 | mpic->fixups[irq].irq = i; | 274 | DBG("HT PIC index 0x%x, irq 0x%x, tmp: %08x\n", i, irq, tmp); |
221 | mpic->fixups[irq].base = devbase + 0xba; | 275 | /* mask it , will be unmasked later */ |
276 | tmp |= 0x1; | ||
277 | writel(tmp, base + 4); | ||
278 | mpic->fixups[irq].index = i; | ||
279 | mpic->fixups[irq].base = base; | ||
280 | /* Apple HT PIC has a non-standard way of doing EOIs */ | ||
281 | if ((vdid & 0xffff) == 0x106b) | ||
282 | mpic->fixups[irq].applebase = devbase + 0x60; | ||
283 | else | ||
284 | mpic->fixups[irq].applebase = NULL; | ||
285 | writeb(0x11 + 2 * i, base + 2); | ||
286 | mpic->fixups[irq].data = readl(base + 4) | 0x80000000; | ||
222 | } | 287 | } |
223 | } | 288 | } |
224 | 289 | ||
225 | static void __init mpic_scan_ioapics(struct mpic *mpic) | 290 | |
291 | static void __init mpic_scan_ht_pics(struct mpic *mpic) | ||
226 | { | 292 | { |
227 | unsigned int devfn; | 293 | unsigned int devfn; |
228 | u8 __iomem *cfgspace; | 294 | u8 __iomem *cfgspace; |
229 | 295 | ||
230 | printk(KERN_INFO "mpic: Setting up IO-APICs workarounds for U3\n"); | 296 | printk(KERN_INFO "mpic: Setting up HT PICs workarounds for U3/U4\n"); |
231 | 297 | ||
232 | /* Allocate fixups array */ | 298 | /* Allocate fixups array */ |
233 | mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup)); | 299 | mpic->fixups = alloc_bootmem(128 * sizeof(struct mpic_irq_fixup)); |
@@ -237,21 +303,20 @@ static void __init mpic_scan_ioapics(struct mpic *mpic) | |||
237 | /* Init spinlock */ | 303 | /* Init spinlock */ |
238 | spin_lock_init(&mpic->fixup_lock); | 304 | spin_lock_init(&mpic->fixup_lock); |
239 | 305 | ||
240 | /* Map u3 config space. We assume all IO-APICs are on the primary bus | 306 | /* Map U3 config space. We assume all IO-APICs are on the primary bus |
241 | * and slot will never be above "0xf" so we only need to map 32k | 307 | * so we only need to map 64kB. |
242 | */ | 308 | */ |
243 | cfgspace = (unsigned char __iomem *)ioremap(0xf2000000, 0x8000); | 309 | cfgspace = ioremap(0xf2000000, 0x10000); |
244 | BUG_ON(cfgspace == NULL); | 310 | BUG_ON(cfgspace == NULL); |
245 | 311 | ||
246 | /* Now we scan all slots. We do a very quick scan, we read the header type, | 312 | /* Now we scan all slots. We do a very quick scan, we read the header |
247 | * vendor ID and device ID only, that's plenty enough | 313 | * type, vendor ID and device ID only, that's plenty enough |
248 | */ | 314 | */ |
249 | for (devfn = 0; devfn < PCI_DEVFN(0x10,0); devfn ++) { | 315 | for (devfn = 0; devfn < 0x100; devfn++) { |
250 | u8 __iomem *devbase = cfgspace + (devfn << 8); | 316 | u8 __iomem *devbase = cfgspace + (devfn << 8); |
251 | u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); | 317 | u8 hdr_type = readb(devbase + PCI_HEADER_TYPE); |
252 | u32 l = readl(devbase + PCI_VENDOR_ID); | 318 | u32 l = readl(devbase + PCI_VENDOR_ID); |
253 | u16 vendor_id, device_id; | 319 | u16 s; |
254 | int multifunc = 0; | ||
255 | 320 | ||
256 | DBG("devfn %x, l: %x\n", devfn, l); | 321 | DBG("devfn %x, l: %x\n", devfn, l); |
257 | 322 | ||
@@ -259,22 +324,16 @@ static void __init mpic_scan_ioapics(struct mpic *mpic) | |||
259 | if (l == 0xffffffff || l == 0x00000000 || | 324 | if (l == 0xffffffff || l == 0x00000000 || |
260 | l == 0x0000ffff || l == 0xffff0000) | 325 | l == 0x0000ffff || l == 0xffff0000) |
261 | goto next; | 326 | goto next; |
327 | /* Check if is supports capability lists */ | ||
328 | s = readw(devbase + PCI_STATUS); | ||
329 | if (!(s & PCI_STATUS_CAP_LIST)) | ||
330 | goto next; | ||
331 | |||
332 | mpic_scan_ht_pic(mpic, devbase, devfn, l); | ||
262 | 333 | ||
263 | /* Check if it's a multifunction device (only really used | ||
264 | * to function 0 though | ||
265 | */ | ||
266 | multifunc = !!(hdr_type & 0x80); | ||
267 | vendor_id = l & 0xffff; | ||
268 | device_id = (l >> 16) & 0xffff; | ||
269 | |||
270 | /* If a known device, go to fixup setup code */ | ||
271 | if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7460) | ||
272 | mpic_amd8111_read_irq(mpic, devbase); | ||
273 | if (vendor_id == PCI_VENDOR_ID_AMD && device_id == 0x7450) | ||
274 | mpic_amd8131_read_irq(mpic, devbase); | ||
275 | next: | 334 | next: |
276 | /* next device, if function 0 */ | 335 | /* next device, if function 0 */ |
277 | if ((PCI_FUNC(devfn) == 0) && !multifunc) | 336 | if (PCI_FUNC(devfn) == 0 && (hdr_type & 0x80) == 0) |
278 | devfn += 7; | 337 | devfn += 7; |
279 | } | 338 | } |
280 | } | 339 | } |
@@ -371,6 +430,31 @@ static void mpic_enable_irq(unsigned int irq) | |||
371 | break; | 430 | break; |
372 | } | 431 | } |
373 | } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); | 432 | } while(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK); |
433 | |||
434 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
435 | if (mpic->flags & MPIC_BROKEN_U3) { | ||
436 | unsigned int src = irq - mpic->irq_offset; | ||
437 | if (mpic_is_ht_interrupt(mpic, src) && | ||
438 | (irq_desc[irq].status & IRQ_LEVEL)) | ||
439 | mpic_ht_end_irq(mpic, src); | ||
440 | } | ||
441 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
442 | } | ||
443 | |||
444 | static unsigned int mpic_startup_irq(unsigned int irq) | ||
445 | { | ||
446 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
447 | struct mpic *mpic = mpic_from_irq(irq); | ||
448 | unsigned int src = irq - mpic->irq_offset; | ||
449 | |||
450 | if (mpic_is_ht_interrupt(mpic, src)) | ||
451 | mpic_startup_ht_interrupt(mpic, src, irq_desc[irq].status); | ||
452 | |||
453 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
454 | |||
455 | mpic_enable_irq(irq); | ||
456 | |||
457 | return 0; | ||
374 | } | 458 | } |
375 | 459 | ||
376 | static void mpic_disable_irq(unsigned int irq) | 460 | static void mpic_disable_irq(unsigned int irq) |
@@ -394,12 +478,27 @@ static void mpic_disable_irq(unsigned int irq) | |||
394 | } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); | 478 | } while(!(mpic_irq_read(src, MPIC_IRQ_VECTOR_PRI) & MPIC_VECPRI_MASK)); |
395 | } | 479 | } |
396 | 480 | ||
481 | static void mpic_shutdown_irq(unsigned int irq) | ||
482 | { | ||
483 | #ifdef CONFIG_MPIC_BROKEN_U3 | ||
484 | struct mpic *mpic = mpic_from_irq(irq); | ||
485 | unsigned int src = irq - mpic->irq_offset; | ||
486 | |||
487 | if (mpic_is_ht_interrupt(mpic, src)) | ||
488 | mpic_shutdown_ht_interrupt(mpic, src, irq_desc[irq].status); | ||
489 | |||
490 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | ||
491 | |||
492 | mpic_disable_irq(irq); | ||
493 | } | ||
494 | |||
397 | static void mpic_end_irq(unsigned int irq) | 495 | static void mpic_end_irq(unsigned int irq) |
398 | { | 496 | { |
399 | struct mpic *mpic = mpic_from_irq(irq); | 497 | struct mpic *mpic = mpic_from_irq(irq); |
400 | 498 | ||
499 | #ifdef DEBUG_IRQ | ||
401 | DBG("%s: end_irq: %d\n", mpic->name, irq); | 500 | DBG("%s: end_irq: %d\n", mpic->name, irq); |
402 | 501 | #endif | |
403 | /* We always EOI on end_irq() even for edge interrupts since that | 502 | /* We always EOI on end_irq() even for edge interrupts since that |
404 | * should only lower the priority, the MPIC should have properly | 503 | * should only lower the priority, the MPIC should have properly |
405 | * latched another edge interrupt coming in anyway | 504 | * latched another edge interrupt coming in anyway |
@@ -408,8 +507,9 @@ static void mpic_end_irq(unsigned int irq) | |||
408 | #ifdef CONFIG_MPIC_BROKEN_U3 | 507 | #ifdef CONFIG_MPIC_BROKEN_U3 |
409 | if (mpic->flags & MPIC_BROKEN_U3) { | 508 | if (mpic->flags & MPIC_BROKEN_U3) { |
410 | unsigned int src = irq - mpic->irq_offset; | 509 | unsigned int src = irq - mpic->irq_offset; |
411 | if (mpic_is_ht_interrupt(mpic, src)) | 510 | if (mpic_is_ht_interrupt(mpic, src) && |
412 | mpic_apic_end_irq(mpic, src); | 511 | (irq_desc[irq].status & IRQ_LEVEL)) |
512 | mpic_ht_end_irq(mpic, src); | ||
413 | } | 513 | } |
414 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | 514 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
415 | 515 | ||
@@ -490,6 +590,8 @@ struct mpic * __init mpic_alloc(unsigned long phys_addr, | |||
490 | mpic->name = name; | 590 | mpic->name = name; |
491 | 591 | ||
492 | mpic->hc_irq.typename = name; | 592 | mpic->hc_irq.typename = name; |
593 | mpic->hc_irq.startup = mpic_startup_irq; | ||
594 | mpic->hc_irq.shutdown = mpic_shutdown_irq; | ||
493 | mpic->hc_irq.enable = mpic_enable_irq; | 595 | mpic->hc_irq.enable = mpic_enable_irq; |
494 | mpic->hc_irq.disable = mpic_disable_irq; | 596 | mpic->hc_irq.disable = mpic_disable_irq; |
495 | mpic->hc_irq.end = mpic_end_irq; | 597 | mpic->hc_irq.end = mpic_end_irq; |
@@ -658,10 +760,10 @@ void __init mpic_init(struct mpic *mpic) | |||
658 | mpic->irq_count = mpic->num_sources; | 760 | mpic->irq_count = mpic->num_sources; |
659 | 761 | ||
660 | #ifdef CONFIG_MPIC_BROKEN_U3 | 762 | #ifdef CONFIG_MPIC_BROKEN_U3 |
661 | /* Do the ioapic fixups on U3 broken mpic */ | 763 | /* Do the HT PIC fixups on U3 broken mpic */ |
662 | DBG("MPIC flags: %x\n", mpic->flags); | 764 | DBG("MPIC flags: %x\n", mpic->flags); |
663 | if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) | 765 | if ((mpic->flags & MPIC_BROKEN_U3) && (mpic->flags & MPIC_PRIMARY)) |
664 | mpic_scan_ioapics(mpic); | 766 | mpic_scan_ht_pics(mpic); |
665 | #endif /* CONFIG_MPIC_BROKEN_U3 */ | 767 | #endif /* CONFIG_MPIC_BROKEN_U3 */ |
666 | 768 | ||
667 | for (i = 0; i < mpic->num_sources; i++) { | 769 | for (i = 0; i < mpic->num_sources; i++) { |
@@ -848,7 +950,9 @@ void mpic_send_ipi(unsigned int ipi_no, unsigned int cpu_mask) | |||
848 | 950 | ||
849 | BUG_ON(mpic == NULL); | 951 | BUG_ON(mpic == NULL); |
850 | 952 | ||
953 | #ifdef DEBUG_IPI | ||
851 | DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); | 954 | DBG("%s: send_ipi(ipi_no: %d)\n", mpic->name, ipi_no); |
955 | #endif | ||
852 | 956 | ||
853 | mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, | 957 | mpic_cpu_write(MPIC_CPU_IPI_DISPATCH_0 + ipi_no * 0x10, |
854 | mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); | 958 | mpic_physmask(cpu_mask & cpus_addr(cpu_online_map)[0])); |
@@ -859,19 +963,28 @@ int mpic_get_one_irq(struct mpic *mpic, struct pt_regs *regs) | |||
859 | u32 irq; | 963 | u32 irq; |
860 | 964 | ||
861 | irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; | 965 | irq = mpic_cpu_read(MPIC_CPU_INTACK) & MPIC_VECPRI_VECTOR_MASK; |
966 | #ifdef DEBUG_LOW | ||
862 | DBG("%s: get_one_irq(): %d\n", mpic->name, irq); | 967 | DBG("%s: get_one_irq(): %d\n", mpic->name, irq); |
863 | 968 | #endif | |
864 | if (mpic->cascade && irq == mpic->cascade_vec) { | 969 | if (mpic->cascade && irq == mpic->cascade_vec) { |
970 | #ifdef DEBUG_LOW | ||
865 | DBG("%s: cascading ...\n", mpic->name); | 971 | DBG("%s: cascading ...\n", mpic->name); |
972 | #endif | ||
866 | irq = mpic->cascade(regs, mpic->cascade_data); | 973 | irq = mpic->cascade(regs, mpic->cascade_data); |
867 | mpic_eoi(mpic); | 974 | mpic_eoi(mpic); |
868 | return irq; | 975 | return irq; |
869 | } | 976 | } |
870 | if (unlikely(irq == MPIC_VEC_SPURRIOUS)) | 977 | if (unlikely(irq == MPIC_VEC_SPURRIOUS)) |
871 | return -1; | 978 | return -1; |
872 | if (irq < MPIC_VEC_IPI_0) | 979 | if (irq < MPIC_VEC_IPI_0) { |
980 | #ifdef DEBUG_IRQ | ||
981 | DBG("%s: irq %d\n", mpic->name, irq + mpic->irq_offset); | ||
982 | #endif | ||
873 | return irq + mpic->irq_offset; | 983 | return irq + mpic->irq_offset; |
984 | } | ||
985 | #ifdef DEBUG_IPI | ||
874 | DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); | 986 | DBG("%s: ipi %d !\n", mpic->name, irq - MPIC_VEC_IPI_0); |
987 | #endif | ||
875 | return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; | 988 | return irq - MPIC_VEC_IPI_0 + mpic->ipi_offset; |
876 | } | 989 | } |
877 | 990 | ||
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile index b20312e5ed27..109d874ecfbe 100644 --- a/arch/powerpc/xmon/Makefile +++ b/arch/powerpc/xmon/Makefile | |||
@@ -3,9 +3,5 @@ | |||
3 | ifdef CONFIG_PPC64 | 3 | ifdef CONFIG_PPC64 |
4 | EXTRA_CFLAGS += -mno-minimal-toc | 4 | EXTRA_CFLAGS += -mno-minimal-toc |
5 | endif | 5 | endif |
6 | 6 | obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o start.o \ | |
7 | obj-$(CONFIG_8xx) += start_8xx.o | 7 | nonstdio.o |
8 | obj-$(CONFIG_6xx) += start_32.o | ||
9 | obj-$(CONFIG_4xx) += start_32.o | ||
10 | obj-$(CONFIG_PPC64) += start_64.o | ||
11 | obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o nonstdio.o | ||
diff --git a/arch/powerpc/xmon/start_64.c b/arch/powerpc/xmon/start.c index 712552c4f242..712552c4f242 100644 --- a/arch/powerpc/xmon/start_64.c +++ b/arch/powerpc/xmon/start.c | |||
diff --git a/arch/powerpc/xmon/start_32.c b/arch/powerpc/xmon/start_32.c deleted file mode 100644 index c2464df4217e..000000000000 --- a/arch/powerpc/xmon/start_32.c +++ /dev/null | |||
@@ -1,441 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1996 Paul Mackerras. | ||
3 | */ | ||
4 | #include <linux/config.h> | ||
5 | #include <linux/string.h> | ||
6 | #include <asm/machdep.h> | ||
7 | #include <asm/io.h> | ||
8 | #include <asm/page.h> | ||
9 | #include <linux/adb.h> | ||
10 | #include <linux/pmu.h> | ||
11 | #include <linux/cuda.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/bitops.h> | ||
15 | #include <asm/xmon.h> | ||
16 | #include <asm/prom.h> | ||
17 | #include <asm/bootx.h> | ||
18 | #include <asm/machdep.h> | ||
19 | #include <asm/errno.h> | ||
20 | #include <asm/pmac_feature.h> | ||
21 | #include <asm/processor.h> | ||
22 | #include <asm/delay.h> | ||
23 | #include <asm/btext.h> | ||
24 | #include <asm/time.h> | ||
25 | #include "nonstdio.h" | ||
26 | |||
27 | static volatile unsigned char __iomem *sccc, *sccd; | ||
28 | unsigned int TXRDY, RXRDY, DLAB; | ||
29 | |||
30 | static int use_serial; | ||
31 | static int use_screen; | ||
32 | static int via_modem; | ||
33 | static int xmon_use_sccb; | ||
34 | static struct device_node *channel_node; | ||
35 | |||
36 | void buf_access(void) | ||
37 | { | ||
38 | if (DLAB) | ||
39 | sccd[3] &= ~DLAB; /* reset DLAB */ | ||
40 | } | ||
41 | |||
42 | extern int adb_init(void); | ||
43 | |||
44 | #ifdef CONFIG_PPC_CHRP | ||
45 | /* | ||
46 | * This looks in the "ranges" property for the primary PCI host bridge | ||
47 | * to find the physical address of the start of PCI/ISA I/O space. | ||
48 | * It is basically a cut-down version of pci_process_bridge_OF_ranges. | ||
49 | */ | ||
50 | static unsigned long chrp_find_phys_io_base(void) | ||
51 | { | ||
52 | struct device_node *node; | ||
53 | unsigned int *ranges; | ||
54 | unsigned long base = CHRP_ISA_IO_BASE; | ||
55 | int rlen = 0; | ||
56 | int np; | ||
57 | |||
58 | node = find_devices("isa"); | ||
59 | if (node != NULL) { | ||
60 | node = node->parent; | ||
61 | if (node == NULL || node->type == NULL | ||
62 | || strcmp(node->type, "pci") != 0) | ||
63 | node = NULL; | ||
64 | } | ||
65 | if (node == NULL) | ||
66 | node = find_devices("pci"); | ||
67 | if (node == NULL) | ||
68 | return base; | ||
69 | |||
70 | ranges = (unsigned int *) get_property(node, "ranges", &rlen); | ||
71 | np = prom_n_addr_cells(node) + 5; | ||
72 | while ((rlen -= np * sizeof(unsigned int)) >= 0) { | ||
73 | if ((ranges[0] >> 24) == 1 && ranges[2] == 0) { | ||
74 | /* I/O space starting at 0, grab the phys base */ | ||
75 | base = ranges[np - 3]; | ||
76 | break; | ||
77 | } | ||
78 | ranges += np; | ||
79 | } | ||
80 | return base; | ||
81 | } | ||
82 | #endif /* CONFIG_PPC_CHRP */ | ||
83 | |||
84 | void xmon_map_scc(void) | ||
85 | { | ||
86 | #ifdef CONFIG_PPC_MULTIPLATFORM | ||
87 | volatile unsigned char __iomem *base; | ||
88 | |||
89 | if (_machine == _MACH_Pmac) { | ||
90 | struct device_node *np; | ||
91 | unsigned long addr; | ||
92 | #ifdef CONFIG_BOOTX_TEXT | ||
93 | if (!use_screen && !use_serial | ||
94 | && !machine_is_compatible("iMac")) { | ||
95 | /* see if there is a keyboard in the device tree | ||
96 | with a parent of type "adb" */ | ||
97 | for (np = find_devices("keyboard"); np; np = np->next) | ||
98 | if (np->parent && np->parent->type | ||
99 | && strcmp(np->parent->type, "adb") == 0) | ||
100 | break; | ||
101 | |||
102 | /* needs to be hacked if xmon_printk is to be used | ||
103 | from within find_via_pmu() */ | ||
104 | #ifdef CONFIG_ADB_PMU | ||
105 | if (np != NULL && boot_text_mapped && find_via_pmu()) | ||
106 | use_screen = 1; | ||
107 | #endif | ||
108 | #ifdef CONFIG_ADB_CUDA | ||
109 | if (np != NULL && boot_text_mapped && find_via_cuda()) | ||
110 | use_screen = 1; | ||
111 | #endif | ||
112 | } | ||
113 | if (!use_screen && (np = find_devices("escc")) != NULL) { | ||
114 | /* | ||
115 | * look for the device node for the serial port | ||
116 | * we're using and see if it says it has a modem | ||
117 | */ | ||
118 | char *name = xmon_use_sccb? "ch-b": "ch-a"; | ||
119 | char *slots; | ||
120 | int l; | ||
121 | |||
122 | np = np->child; | ||
123 | while (np != NULL && strcmp(np->name, name) != 0) | ||
124 | np = np->sibling; | ||
125 | if (np != NULL) { | ||
126 | /* XXX should parse this properly */ | ||
127 | channel_node = np; | ||
128 | slots = get_property(np, "slot-names", &l); | ||
129 | if (slots != NULL && l >= 10 | ||
130 | && strcmp(slots+4, "Modem") == 0) | ||
131 | via_modem = 1; | ||
132 | } | ||
133 | } | ||
134 | btext_drawstring("xmon uses "); | ||
135 | if (use_screen) | ||
136 | btext_drawstring("screen and keyboard\n"); | ||
137 | else { | ||
138 | if (via_modem) | ||
139 | btext_drawstring("modem on "); | ||
140 | btext_drawstring(xmon_use_sccb? "printer": "modem"); | ||
141 | btext_drawstring(" port\n"); | ||
142 | } | ||
143 | |||
144 | #endif /* CONFIG_BOOTX_TEXT */ | ||
145 | |||
146 | #ifdef CHRP_ESCC | ||
147 | addr = 0xc1013020; | ||
148 | #else | ||
149 | addr = 0xf3013020; | ||
150 | #endif | ||
151 | TXRDY = 4; | ||
152 | RXRDY = 1; | ||
153 | |||
154 | np = find_devices("mac-io"); | ||
155 | if (np && np->n_addrs) | ||
156 | addr = np->addrs[0].address + 0x13020; | ||
157 | base = (volatile unsigned char *) ioremap(addr & PAGE_MASK, PAGE_SIZE); | ||
158 | sccc = base + (addr & ~PAGE_MASK); | ||
159 | sccd = sccc + 0x10; | ||
160 | |||
161 | } else { | ||
162 | base = (volatile unsigned char *) isa_io_base; | ||
163 | |||
164 | #ifdef CONFIG_PPC_CHRP | ||
165 | if (_machine == _MACH_chrp) | ||
166 | base = (volatile unsigned char __iomem *) | ||
167 | ioremap(chrp_find_phys_io_base(), 0x1000); | ||
168 | #endif | ||
169 | |||
170 | sccc = base + 0x3fd; | ||
171 | sccd = base + 0x3f8; | ||
172 | if (xmon_use_sccb) { | ||
173 | sccc -= 0x100; | ||
174 | sccd -= 0x100; | ||
175 | } | ||
176 | TXRDY = 0x20; | ||
177 | RXRDY = 1; | ||
178 | DLAB = 0x80; | ||
179 | } | ||
180 | #elif defined(CONFIG_GEMINI) | ||
181 | /* should already be mapped by the kernel boot */ | ||
182 | sccc = (volatile unsigned char __iomem *) 0xffeffb0d; | ||
183 | sccd = (volatile unsigned char __iomem *) 0xffeffb08; | ||
184 | TXRDY = 0x20; | ||
185 | RXRDY = 1; | ||
186 | DLAB = 0x80; | ||
187 | #elif defined(CONFIG_405GP) | ||
188 | sccc = (volatile unsigned char __iomem *)0xef600305; | ||
189 | sccd = (volatile unsigned char __iomem *)0xef600300; | ||
190 | TXRDY = 0x20; | ||
191 | RXRDY = 1; | ||
192 | DLAB = 0x80; | ||
193 | #endif /* platform */ | ||
194 | } | ||
195 | |||
196 | static int scc_initialized = 0; | ||
197 | |||
198 | void xmon_init_scc(void); | ||
199 | extern void cuda_poll(void); | ||
200 | |||
201 | static inline void do_poll_adb(void) | ||
202 | { | ||
203 | #ifdef CONFIG_ADB_PMU | ||
204 | if (sys_ctrler == SYS_CTRLER_PMU) | ||
205 | pmu_poll_adb(); | ||
206 | #endif /* CONFIG_ADB_PMU */ | ||
207 | #ifdef CONFIG_ADB_CUDA | ||
208 | if (sys_ctrler == SYS_CTRLER_CUDA) | ||
209 | cuda_poll(); | ||
210 | #endif /* CONFIG_ADB_CUDA */ | ||
211 | } | ||
212 | |||
213 | int xmon_write(void *ptr, int nb) | ||
214 | { | ||
215 | char *p = ptr; | ||
216 | int i, c, ct; | ||
217 | |||
218 | #ifdef CONFIG_SMP | ||
219 | static unsigned long xmon_write_lock; | ||
220 | int lock_wait = 1000000; | ||
221 | int locked; | ||
222 | |||
223 | while ((locked = test_and_set_bit(0, &xmon_write_lock)) != 0) | ||
224 | if (--lock_wait == 0) | ||
225 | break; | ||
226 | #endif | ||
227 | |||
228 | #ifdef CONFIG_BOOTX_TEXT | ||
229 | if (use_screen) { | ||
230 | /* write it on the screen */ | ||
231 | for (i = 0; i < nb; ++i) | ||
232 | btext_drawchar(*p++); | ||
233 | goto out; | ||
234 | } | ||
235 | #endif | ||
236 | if (!scc_initialized) | ||
237 | xmon_init_scc(); | ||
238 | ct = 0; | ||
239 | for (i = 0; i < nb; ++i) { | ||
240 | while ((*sccc & TXRDY) == 0) | ||
241 | do_poll_adb(); | ||
242 | c = p[i]; | ||
243 | if (c == '\n' && !ct) { | ||
244 | c = '\r'; | ||
245 | ct = 1; | ||
246 | --i; | ||
247 | } else { | ||
248 | ct = 0; | ||
249 | } | ||
250 | buf_access(); | ||
251 | *sccd = c; | ||
252 | eieio(); | ||
253 | } | ||
254 | |||
255 | out: | ||
256 | #ifdef CONFIG_SMP | ||
257 | if (!locked) | ||
258 | clear_bit(0, &xmon_write_lock); | ||
259 | #endif | ||
260 | return nb; | ||
261 | } | ||
262 | |||
263 | int xmon_wants_key; | ||
264 | int xmon_adb_keycode; | ||
265 | |||
266 | #ifdef CONFIG_BOOTX_TEXT | ||
267 | static int xmon_adb_shiftstate; | ||
268 | |||
269 | static unsigned char xmon_keytab[128] = | ||
270 | "asdfhgzxcv\000bqwer" /* 0x00 - 0x0f */ | ||
271 | "yt123465=97-80]o" /* 0x10 - 0x1f */ | ||
272 | "u[ip\rlj'k;\\,/nm." /* 0x20 - 0x2f */ | ||
273 | "\t `\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */ | ||
274 | "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */ | ||
275 | "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */ | ||
276 | |||
277 | static unsigned char xmon_shift_keytab[128] = | ||
278 | "ASDFHGZXCV\000BQWER" /* 0x00 - 0x0f */ | ||
279 | "YT!@#$^%+(&_*)}O" /* 0x10 - 0x1f */ | ||
280 | "U{IP\rLJ\"K:|<?NM>" /* 0x20 - 0x2f */ | ||
281 | "\t ~\177\0\033\0\0\0\0\0\0\0\0\0\0" /* 0x30 - 0x3f */ | ||
282 | "\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */ | ||
283 | "\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */ | ||
284 | |||
285 | static int xmon_get_adb_key(void) | ||
286 | { | ||
287 | int k, t, on; | ||
288 | |||
289 | xmon_wants_key = 1; | ||
290 | for (;;) { | ||
291 | xmon_adb_keycode = -1; | ||
292 | t = 0; | ||
293 | on = 0; | ||
294 | do { | ||
295 | if (--t < 0) { | ||
296 | on = 1 - on; | ||
297 | btext_drawchar(on? 0xdb: 0x20); | ||
298 | btext_drawchar('\b'); | ||
299 | t = 200000; | ||
300 | } | ||
301 | do_poll_adb(); | ||
302 | } while (xmon_adb_keycode == -1); | ||
303 | k = xmon_adb_keycode; | ||
304 | if (on) | ||
305 | btext_drawstring(" \b"); | ||
306 | |||
307 | /* test for shift keys */ | ||
308 | if ((k & 0x7f) == 0x38 || (k & 0x7f) == 0x7b) { | ||
309 | xmon_adb_shiftstate = (k & 0x80) == 0; | ||
310 | continue; | ||
311 | } | ||
312 | if (k >= 0x80) | ||
313 | continue; /* ignore up transitions */ | ||
314 | k = (xmon_adb_shiftstate? xmon_shift_keytab: xmon_keytab)[k]; | ||
315 | if (k != 0) | ||
316 | break; | ||
317 | } | ||
318 | xmon_wants_key = 0; | ||
319 | return k; | ||
320 | } | ||
321 | #endif /* CONFIG_BOOTX_TEXT */ | ||
322 | |||
323 | int xmon_readchar(void) | ||
324 | { | ||
325 | #ifdef CONFIG_BOOTX_TEXT | ||
326 | if (use_screen) | ||
327 | return xmon_get_adb_key(); | ||
328 | #endif | ||
329 | if (!scc_initialized) | ||
330 | xmon_init_scc(); | ||
331 | while ((*sccc & RXRDY) == 0) | ||
332 | do_poll_adb(); | ||
333 | buf_access(); | ||
334 | return *sccd; | ||
335 | } | ||
336 | |||
337 | int xmon_read_poll(void) | ||
338 | { | ||
339 | if ((*sccc & RXRDY) == 0) { | ||
340 | do_poll_adb(); | ||
341 | return -1; | ||
342 | } | ||
343 | buf_access(); | ||
344 | return *sccd; | ||
345 | } | ||
346 | |||
347 | static unsigned char scc_inittab[] = { | ||
348 | 13, 0, /* set baud rate divisor */ | ||
349 | 12, 1, | ||
350 | 14, 1, /* baud rate gen enable, src=rtxc */ | ||
351 | 11, 0x50, /* clocks = br gen */ | ||
352 | 5, 0xea, /* tx 8 bits, assert DTR & RTS */ | ||
353 | 4, 0x46, /* x16 clock, 1 stop */ | ||
354 | 3, 0xc1, /* rx enable, 8 bits */ | ||
355 | }; | ||
356 | |||
357 | void xmon_init_scc(void) | ||
358 | { | ||
359 | if ( _machine == _MACH_chrp ) | ||
360 | { | ||
361 | sccd[3] = 0x83; eieio(); /* LCR = 8N1 + DLAB */ | ||
362 | sccd[0] = 12; eieio(); /* DLL = 9600 baud */ | ||
363 | sccd[1] = 0; eieio(); | ||
364 | sccd[2] = 0; eieio(); /* FCR = 0 */ | ||
365 | sccd[3] = 3; eieio(); /* LCR = 8N1 */ | ||
366 | sccd[1] = 0; eieio(); /* IER = 0 */ | ||
367 | } | ||
368 | else if ( _machine == _MACH_Pmac ) | ||
369 | { | ||
370 | int i, x; | ||
371 | unsigned long timeout; | ||
372 | |||
373 | if (channel_node != 0) | ||
374 | pmac_call_feature( | ||
375 | PMAC_FTR_SCC_ENABLE, | ||
376 | channel_node, | ||
377 | PMAC_SCC_ASYNC | PMAC_SCC_FLAG_XMON, 1); | ||
378 | printk(KERN_INFO "Serial port locked ON by debugger !\n"); | ||
379 | if (via_modem && channel_node != 0) { | ||
380 | unsigned int t0; | ||
381 | |||
382 | pmac_call_feature( | ||
383 | PMAC_FTR_MODEM_ENABLE, | ||
384 | channel_node, 0, 1); | ||
385 | printk(KERN_INFO "Modem powered up by debugger !\n"); | ||
386 | t0 = get_tbl(); | ||
387 | timeout = 3 * tb_ticks_per_sec; | ||
388 | if (timeout == 0) | ||
389 | /* assume 25MHz if tb_ticks_per_sec not set */ | ||
390 | timeout = 75000000; | ||
391 | while (get_tbl() - t0 < timeout) | ||
392 | eieio(); | ||
393 | } | ||
394 | /* use the B channel if requested */ | ||
395 | if (xmon_use_sccb) { | ||
396 | sccc = (volatile unsigned char *) | ||
397 | ((unsigned long)sccc & ~0x20); | ||
398 | sccd = sccc + 0x10; | ||
399 | } | ||
400 | for (i = 20000; i != 0; --i) { | ||
401 | x = *sccc; eieio(); | ||
402 | } | ||
403 | *sccc = 9; eieio(); /* reset A or B side */ | ||
404 | *sccc = ((unsigned long)sccc & 0x20)? 0x80: 0x40; eieio(); | ||
405 | for (i = 0; i < sizeof(scc_inittab); ++i) { | ||
406 | *sccc = scc_inittab[i]; | ||
407 | eieio(); | ||
408 | } | ||
409 | } | ||
410 | scc_initialized = 1; | ||
411 | if (via_modem) { | ||
412 | for (;;) { | ||
413 | xmon_write("ATE1V1\r", 7); | ||
414 | if (xmon_expect("OK", 5)) { | ||
415 | xmon_write("ATA\r", 4); | ||
416 | if (xmon_expect("CONNECT", 40)) | ||
417 | break; | ||
418 | } | ||
419 | xmon_write("+++", 3); | ||
420 | xmon_expect("OK", 3); | ||
421 | } | ||
422 | } | ||
423 | } | ||
424 | |||
425 | void xmon_enter(void) | ||
426 | { | ||
427 | #ifdef CONFIG_ADB_PMU | ||
428 | if (_machine == _MACH_Pmac) { | ||
429 | pmu_suspend(); | ||
430 | } | ||
431 | #endif | ||
432 | } | ||
433 | |||
434 | void xmon_leave(void) | ||
435 | { | ||
436 | #ifdef CONFIG_ADB_PMU | ||
437 | if (_machine == _MACH_Pmac) { | ||
438 | pmu_resume(); | ||
439 | } | ||
440 | #endif | ||
441 | } | ||
diff --git a/arch/powerpc/xmon/start_8xx.c b/arch/powerpc/xmon/start_8xx.c deleted file mode 100644 index 4c17b0486ad5..000000000000 --- a/arch/powerpc/xmon/start_8xx.c +++ /dev/null | |||
@@ -1,44 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 1996 Paul Mackerras. | ||
3 | * Copyright (C) 2000 Dan Malek. | ||
4 | * Quick hack of Paul's code to make XMON work on 8xx processors. Lots | ||
5 | * of assumptions, like the SMC1 is used, it has been initialized by the | ||
6 | * loader at some point, and we can just stuff and suck bytes. | ||
7 | * We rely upon the 8xx uart driver to support us, as the interface | ||
8 | * changes between boot up and operational phases of the kernel. | ||
9 | */ | ||
10 | #include <linux/string.h> | ||
11 | #include <asm/machdep.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/page.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <asm/8xx_immap.h> | ||
16 | #include <asm/mpc8xx.h> | ||
17 | #include <asm/commproc.h> | ||
18 | #include "nonstdio.h" | ||
19 | |||
20 | extern int xmon_8xx_write(char *str, int nb); | ||
21 | extern int xmon_8xx_read_poll(void); | ||
22 | extern int xmon_8xx_read_char(void); | ||
23 | |||
24 | void xmon_map_scc(void) | ||
25 | { | ||
26 | cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm); | ||
27 | } | ||
28 | |||
29 | void xmon_init_scc(void); | ||
30 | |||
31 | int xmon_write(void *ptr, int nb) | ||
32 | { | ||
33 | return(xmon_8xx_write(ptr, nb)); | ||
34 | } | ||
35 | |||
36 | int xmon_readchar(void) | ||
37 | { | ||
38 | return xmon_8xx_read_char(); | ||
39 | } | ||
40 | |||
41 | int xmon_read_poll(void) | ||
42 | { | ||
43 | return(xmon_8xx_read_poll()); | ||
44 | } | ||
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c index c45a6ad5f3b7..22612ed5379c 100644 --- a/arch/powerpc/xmon/xmon.c +++ b/arch/powerpc/xmon/xmon.c | |||
@@ -450,7 +450,6 @@ int xmon_core(struct pt_regs *regs, int fromipi) | |||
450 | leave: | 450 | leave: |
451 | cpu_clear(cpu, cpus_in_xmon); | 451 | cpu_clear(cpu, cpus_in_xmon); |
452 | xmon_fault_jmp[cpu] = NULL; | 452 | xmon_fault_jmp[cpu] = NULL; |
453 | |||
454 | #else | 453 | #else |
455 | /* UP is simple... */ | 454 | /* UP is simple... */ |
456 | if (in_xmon) { | 455 | if (in_xmon) { |
@@ -805,7 +804,10 @@ cmds(struct pt_regs *excp) | |||
805 | break; | 804 | break; |
806 | case 'x': | 805 | case 'x': |
807 | case 'X': | 806 | case 'X': |
807 | return cmd; | ||
808 | case EOF: | 808 | case EOF: |
809 | printf(" <no input ...>\n"); | ||
810 | mdelay(2000); | ||
809 | return cmd; | 811 | return cmd; |
810 | case '?': | 812 | case '?': |
811 | printf(help_string); | 813 | printf(help_string); |
@@ -1011,7 +1013,7 @@ static long check_bp_loc(unsigned long addr) | |||
1011 | unsigned int instr; | 1013 | unsigned int instr; |
1012 | 1014 | ||
1013 | addr &= ~3; | 1015 | addr &= ~3; |
1014 | if (addr < KERNELBASE) { | 1016 | if (!is_kernel_addr(addr)) { |
1015 | printf("Breakpoints may only be placed at kernel addresses\n"); | 1017 | printf("Breakpoints may only be placed at kernel addresses\n"); |
1016 | return 0; | 1018 | return 0; |
1017 | } | 1019 | } |
@@ -1062,7 +1064,7 @@ bpt_cmds(void) | |||
1062 | dabr.address = 0; | 1064 | dabr.address = 0; |
1063 | dabr.enabled = 0; | 1065 | dabr.enabled = 0; |
1064 | if (scanhex(&dabr.address)) { | 1066 | if (scanhex(&dabr.address)) { |
1065 | if (dabr.address < KERNELBASE) { | 1067 | if (!is_kernel_addr(dabr.address)) { |
1066 | printf(badaddr); | 1068 | printf(badaddr); |
1067 | break; | 1069 | break; |
1068 | } | 1070 | } |