summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMichael Ellerman <mpe@ellerman.id.au>2014-06-01 21:20:46 -0400
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-06-11 02:35:38 -0400
commitfb5a515704d7e84c139140a83c5eff515adfc000 (patch)
tree52ef9e21292cec6f8d1e936661f230b2db43978c /arch
parent94314290ed719cf64619dfc42df8f84161a36892 (diff)
powerpc: Remove platforms/wsp and associated pieces
__attribute__ ((unused)) WSP is the last user of CONFIG_PPC_A2, so we remove that as well. Although CONFIG_PPC_ICSWX still exists, it's no longer selectable for any Book3E platform, so we can remove the code in mmu-book3e.h that depended on it. Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/configs/chroma_defconfig307
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h4
-rw-r--r--arch/powerpc/include/asm/reg_a2.h9
-rw-r--r--arch/powerpc/include/asm/wsp.h14
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S120
-rw-r--r--arch/powerpc/kernel/cputable.c38
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S16
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c11
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig30
-rw-r--r--arch/powerpc/platforms/wsp/Makefile10
-rw-r--r--arch/powerpc/platforms/wsp/chroma.c56
-rw-r--r--arch/powerpc/platforms/wsp/h8.c135
-rw-r--r--arch/powerpc/platforms/wsp/ics.c762
-rw-r--r--arch/powerpc/platforms/wsp/ics.h25
-rw-r--r--arch/powerpc/platforms/wsp/msi.c102
-rw-r--r--arch/powerpc/platforms/wsp/msi.h19
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c321
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c67
-rw-r--r--arch/powerpc/platforms/wsp/scom_smp.c435
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c82
-rw-r--r--arch/powerpc/platforms/wsp/setup.c36
-rw-r--r--arch/powerpc/platforms/wsp/smp.c88
-rw-r--r--arch/powerpc/platforms/wsp/wsp.c117
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h29
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c1134
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.h268
32 files changed, 1 insertions, 4250 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 21c9f304e96c..790352f93700 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -235,11 +235,6 @@ config PPC_EARLY_DEBUG_USBGECKO
235 Select this to enable early debugging for Nintendo GameCube/Wii 235 Select this to enable early debugging for Nintendo GameCube/Wii
236 consoles via an external USB Gecko adapter. 236 consoles via an external USB Gecko adapter.
237 237
238config PPC_EARLY_DEBUG_WSP
239 bool "Early debugging via WSP's internal UART"
240 depends on PPC_WSP
241 select PPC_UDBG_16550
242
243config PPC_EARLY_DEBUG_PS3GELIC 238config PPC_EARLY_DEBUG_PS3GELIC
244 bool "Early debugging through the PS3 Ethernet port" 239 bool "Early debugging through the PS3 Ethernet port"
245 depends on PPC_PS3 240 depends on PPC_PS3
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
deleted file mode 100644
index 4f35fc462385..000000000000
--- a/arch/powerpc/configs/chroma_defconfig
+++ /dev/null
@@ -1,307 +0,0 @@
1CONFIG_PPC64=y
2CONFIG_PPC_BOOK3E_64=y
3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y
5CONFIG_NR_CPUS=256
6CONFIG_EXPERIMENTAL=y
7CONFIG_SYSVIPC=y
8CONFIG_POSIX_MQUEUE=y
9CONFIG_BSD_PROCESS_ACCT=y
10CONFIG_TASKSTATS=y
11CONFIG_TASK_DELAY_ACCT=y
12CONFIG_TASK_XACCT=y
13CONFIG_TASK_IO_ACCOUNTING=y
14CONFIG_AUDIT=y
15CONFIG_AUDITSYSCALL=y
16CONFIG_IKCONFIG=y
17CONFIG_IKCONFIG_PROC=y
18CONFIG_LOG_BUF_SHIFT=19
19CONFIG_CGROUPS=y
20CONFIG_CGROUP_DEVICE=y
21CONFIG_CPUSETS=y
22CONFIG_CGROUP_CPUACCT=y
23CONFIG_RESOURCE_COUNTERS=y
24CONFIG_CGROUP_MEMCG=y
25CONFIG_CGROUP_MEMCG_SWAP=y
26CONFIG_NAMESPACES=y
27CONFIG_RELAY=y
28CONFIG_BLK_DEV_INITRD=y
29CONFIG_INITRAMFS_SOURCE=""
30CONFIG_RD_BZIP2=y
31CONFIG_RD_LZMA=y
32CONFIG_INITRAMFS_COMPRESSION_GZIP=y
33CONFIG_KALLSYMS_ALL=y
34CONFIG_EMBEDDED=y
35CONFIG_PERF_EVENTS=y
36CONFIG_PROFILING=y
37CONFIG_OPROFILE=y
38CONFIG_KPROBES=y
39CONFIG_MODULES=y
40CONFIG_MODULE_FORCE_LOAD=y
41CONFIG_MODULE_UNLOAD=y
42CONFIG_MODULE_FORCE_UNLOAD=y
43CONFIG_MODVERSIONS=y
44CONFIG_MODULE_SRCVERSION_ALL=y
45CONFIG_SCOM_DEBUGFS=y
46CONFIG_PPC_A2_DD2=y
47CONFIG_KVM_GUEST=y
48CONFIG_NO_HZ=y
49CONFIG_HIGH_RES_TIMERS=y
50CONFIG_HZ_100=y
51# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
52CONFIG_BINFMT_MISC=y
53CONFIG_NUMA=y
54# CONFIG_MIGRATION is not set
55CONFIG_PPC_64K_PAGES=y
56CONFIG_SCHED_SMT=y
57CONFIG_CMDLINE_BOOL=y
58CONFIG_CMDLINE=""
59# CONFIG_SECCOMP is not set
60CONFIG_PCIEPORTBUS=y
61# CONFIG_PCIEASPM is not set
62CONFIG_PCI_MSI=y
63CONFIG_PACKET=y
64CONFIG_UNIX=y
65CONFIG_XFRM_USER=m
66CONFIG_XFRM_SUB_POLICY=y
67CONFIG_XFRM_STATISTICS=y
68CONFIG_NET_KEY=m
69CONFIG_NET_KEY_MIGRATE=y
70CONFIG_INET=y
71CONFIG_IP_MULTICAST=y
72CONFIG_IP_ADVANCED_ROUTER=y
73CONFIG_IP_ROUTE_MULTIPATH=y
74CONFIG_IP_ROUTE_VERBOSE=y
75CONFIG_IP_PNP=y
76CONFIG_IP_PNP_DHCP=y
77CONFIG_IP_PNP_BOOTP=y
78CONFIG_NET_IPIP=y
79CONFIG_IP_MROUTE=y
80CONFIG_IP_PIMSM_V1=y
81CONFIG_IP_PIMSM_V2=y
82CONFIG_SYN_COOKIES=y
83CONFIG_INET_AH=m
84CONFIG_INET_ESP=m
85CONFIG_INET_IPCOMP=m
86CONFIG_IPV6=y
87CONFIG_IPV6_PRIVACY=y
88CONFIG_IPV6_ROUTER_PREF=y
89CONFIG_IPV6_ROUTE_INFO=y
90CONFIG_IPV6_OPTIMISTIC_DAD=y
91CONFIG_INET6_AH=y
92CONFIG_INET6_ESP=y
93CONFIG_INET6_IPCOMP=y
94CONFIG_IPV6_MIP6=y
95CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
96CONFIG_IPV6_TUNNEL=y
97CONFIG_IPV6_MULTIPLE_TABLES=y
98CONFIG_IPV6_SUBTREES=y
99CONFIG_IPV6_MROUTE=y
100CONFIG_IPV6_PIMSM_V2=y
101CONFIG_NETFILTER=y
102CONFIG_NF_CONNTRACK=m
103CONFIG_NF_CONNTRACK_EVENTS=y
104CONFIG_NF_CT_PROTO_UDPLITE=m
105CONFIG_NF_CONNTRACK_FTP=m
106CONFIG_NF_CONNTRACK_IRC=m
107CONFIG_NF_CONNTRACK_TFTP=m
108CONFIG_NF_CT_NETLINK=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
110CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
111CONFIG_NETFILTER_XT_TARGET_MARK=m
112CONFIG_NETFILTER_XT_TARGET_NFLOG=m
113CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
114CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
115CONFIG_NETFILTER_XT_MATCH_COMMENT=m
116CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
117CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
118CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
119CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
120CONFIG_NETFILTER_XT_MATCH_DCCP=m
121CONFIG_NETFILTER_XT_MATCH_DSCP=m
122CONFIG_NETFILTER_XT_MATCH_ESP=m
123CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
124CONFIG_NETFILTER_XT_MATCH_HELPER=m
125CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
126CONFIG_NETFILTER_XT_MATCH_LENGTH=m
127CONFIG_NETFILTER_XT_MATCH_LIMIT=m
128CONFIG_NETFILTER_XT_MATCH_MAC=m
129CONFIG_NETFILTER_XT_MATCH_MARK=m
130CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
131CONFIG_NETFILTER_XT_MATCH_OWNER=m
132CONFIG_NETFILTER_XT_MATCH_POLICY=m
133CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
134CONFIG_NETFILTER_XT_MATCH_QUOTA=m
135CONFIG_NETFILTER_XT_MATCH_RATEEST=m
136CONFIG_NETFILTER_XT_MATCH_REALM=m
137CONFIG_NETFILTER_XT_MATCH_RECENT=m
138CONFIG_NETFILTER_XT_MATCH_SCTP=m
139CONFIG_NETFILTER_XT_MATCH_STATE=m
140CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
141CONFIG_NETFILTER_XT_MATCH_STRING=m
142CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
143CONFIG_NETFILTER_XT_MATCH_TIME=m
144CONFIG_NETFILTER_XT_MATCH_U32=m
145CONFIG_NF_CONNTRACK_IPV4=m
146CONFIG_IP_NF_QUEUE=m
147CONFIG_IP_NF_IPTABLES=m
148CONFIG_IP_NF_MATCH_AH=m
149CONFIG_IP_NF_MATCH_ECN=m
150CONFIG_IP_NF_MATCH_TTL=m
151CONFIG_IP_NF_FILTER=m
152CONFIG_IP_NF_TARGET_REJECT=m
153CONFIG_IP_NF_TARGET_LOG=m
154CONFIG_IP_NF_TARGET_ULOG=m
155CONFIG_NF_NAT=m
156CONFIG_IP_NF_TARGET_MASQUERADE=m
157CONFIG_IP_NF_TARGET_NETMAP=m
158CONFIG_IP_NF_TARGET_REDIRECT=m
159CONFIG_NET_TCPPROBE=y
160# CONFIG_WIRELESS is not set
161CONFIG_NET_9P=y
162CONFIG_NET_9P_DEBUG=y
163CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
164CONFIG_DEVTMPFS=y
165CONFIG_MTD=y
166CONFIG_MTD_CHAR=y
167CONFIG_MTD_BLOCK=y
168CONFIG_MTD_CFI=y
169CONFIG_MTD_CFI_ADV_OPTIONS=y
170CONFIG_MTD_CFI_LE_BYTE_SWAP=y
171CONFIG_MTD_CFI_INTELEXT=y
172CONFIG_MTD_CFI_AMDSTD=y
173CONFIG_MTD_CFI_STAA=y
174CONFIG_MTD_PHYSMAP_OF=y
175CONFIG_PROC_DEVICETREE=y
176CONFIG_BLK_DEV_LOOP=y
177CONFIG_BLK_DEV_CRYPTOLOOP=y
178CONFIG_BLK_DEV_NBD=m
179CONFIG_BLK_DEV_RAM=y
180CONFIG_BLK_DEV_RAM_SIZE=65536
181CONFIG_CDROM_PKTCDVD=y
182CONFIG_MISC_DEVICES=y
183CONFIG_BLK_DEV_SD=y
184CONFIG_BLK_DEV_SR=y
185CONFIG_BLK_DEV_SR_VENDOR=y
186CONFIG_CHR_DEV_SG=y
187CONFIG_SCSI_MULTI_LUN=y
188CONFIG_SCSI_CONSTANTS=y
189CONFIG_SCSI_SPI_ATTRS=y
190CONFIG_SCSI_FC_ATTRS=y
191CONFIG_SCSI_ISCSI_ATTRS=m
192CONFIG_SCSI_SAS_ATTRS=m
193CONFIG_SCSI_SRP_ATTRS=y
194CONFIG_ATA=y
195CONFIG_SATA_AHCI=y
196CONFIG_SATA_SIL24=y
197CONFIG_SATA_MV=y
198CONFIG_SATA_SIL=y
199CONFIG_PATA_CMD64X=y
200CONFIG_PATA_MARVELL=y
201CONFIG_PATA_SIL680=y
202CONFIG_MD=y
203CONFIG_BLK_DEV_MD=y
204CONFIG_MD_LINEAR=y
205CONFIG_BLK_DEV_DM=y
206CONFIG_DM_CRYPT=y
207CONFIG_DM_SNAPSHOT=y
208CONFIG_DM_MIRROR=y
209CONFIG_DM_ZERO=y
210CONFIG_DM_UEVENT=y
211CONFIG_NETDEVICES=y
212CONFIG_TUN=y
213CONFIG_E1000E=y
214CONFIG_TIGON3=y
215# CONFIG_WLAN is not set
216# CONFIG_INPUT is not set
217# CONFIG_SERIO is not set
218# CONFIG_VT is not set
219CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
220CONFIG_SERIAL_8250=y
221CONFIG_SERIAL_8250_CONSOLE=y
222CONFIG_HW_RANDOM=y
223CONFIG_RAW_DRIVER=y
224CONFIG_MAX_RAW_DEVS=1024
225# CONFIG_HWMON is not set
226# CONFIG_VGA_ARB is not set
227# CONFIG_USB_SUPPORT is not set
228CONFIG_EDAC=y
229CONFIG_EDAC_MM_EDAC=y
230CONFIG_RTC_CLASS=y
231CONFIG_RTC_DRV_DS1511=y
232CONFIG_RTC_DRV_DS1553=y
233CONFIG_EXT2_FS=y
234CONFIG_EXT2_FS_XATTR=y
235CONFIG_EXT2_FS_POSIX_ACL=y
236CONFIG_EXT2_FS_SECURITY=y
237CONFIG_EXT2_FS_XIP=y
238CONFIG_EXT3_FS=y
239# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
240CONFIG_EXT3_FS_POSIX_ACL=y
241CONFIG_EXT3_FS_SECURITY=y
242CONFIG_EXT4_FS=y
243# CONFIG_DNOTIFY is not set
244CONFIG_FUSE_FS=y
245CONFIG_ISO9660_FS=y
246CONFIG_JOLIET=y
247CONFIG_ZISOFS=y
248CONFIG_UDF_FS=m
249CONFIG_MSDOS_FS=y
250CONFIG_VFAT_FS=y
251CONFIG_PROC_KCORE=y
252CONFIG_TMPFS=y
253CONFIG_TMPFS_POSIX_ACL=y
254CONFIG_CONFIGFS_FS=m
255CONFIG_CRAMFS=y
256CONFIG_NFS_FS=y
257CONFIG_NFS_V3=y
258CONFIG_NFS_V3_ACL=y
259CONFIG_NFS_V4=y
260CONFIG_NFS_V4_1=y
261CONFIG_ROOT_NFS=y
262CONFIG_CIFS=y
263CONFIG_CIFS_WEAK_PW_HASH=y
264CONFIG_CIFS_XATTR=y
265CONFIG_CIFS_POSIX=y
266CONFIG_NLS_CODEPAGE_437=y
267CONFIG_NLS_ASCII=y
268CONFIG_NLS_ISO8859_1=y
269CONFIG_CRC_CCITT=m
270CONFIG_CRC_T10DIF=y
271CONFIG_LIBCRC32C=m
272CONFIG_PRINTK_TIME=y
273CONFIG_MAGIC_SYSRQ=y
274CONFIG_STRIP_ASM_SYMS=y
275CONFIG_DETECT_HUNG_TASK=y
276# CONFIG_SCHED_DEBUG is not set
277CONFIG_DEBUG_INFO=y
278CONFIG_FTRACE_SYSCALLS=y
279CONFIG_PPC_EMULATED_STATS=y
280CONFIG_XMON=y
281CONFIG_XMON_DEFAULT=y
282CONFIG_IRQ_DOMAIN_DEBUG=y
283CONFIG_PPC_EARLY_DEBUG=y
284CONFIG_KEYS_DEBUG_PROC_KEYS=y
285CONFIG_CRYPTO_NULL=m
286CONFIG_CRYPTO_TEST=m
287CONFIG_CRYPTO_CCM=m
288CONFIG_CRYPTO_GCM=m
289CONFIG_CRYPTO_PCBC=m
290CONFIG_CRYPTO_MICHAEL_MIC=m
291CONFIG_CRYPTO_SHA256=m
292CONFIG_CRYPTO_SHA512=m
293CONFIG_CRYPTO_TGR192=m
294CONFIG_CRYPTO_WP512=m
295CONFIG_CRYPTO_AES=m
296CONFIG_CRYPTO_ANUBIS=m
297CONFIG_CRYPTO_BLOWFISH=m
298CONFIG_CRYPTO_CAST5=m
299CONFIG_CRYPTO_CAST6=m
300CONFIG_CRYPTO_KHAZAD=m
301CONFIG_CRYPTO_SALSA20=m
302CONFIG_CRYPTO_SERPENT=m
303CONFIG_CRYPTO_TEA=m
304CONFIG_CRYPTO_TWOFISH=m
305CONFIG_CRYPTO_LZO=m
306# CONFIG_CRYPTO_ANSI_CPRNG is not set
307CONFIG_VIRTUALIZATION=y
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 901dac6b6cb7..d0918e09557f 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -223,10 +223,6 @@ typedef struct {
223 unsigned int id; 223 unsigned int id;
224 unsigned int active; 224 unsigned int active;
225 unsigned long vdso_base; 225 unsigned long vdso_base;
226#ifdef CONFIG_PPC_ICSWX
227 struct spinlock *cop_lockp; /* guard cop related stuff */
228 unsigned long acop; /* mask of enabled coprocessor types */
229#endif /* CONFIG_PPC_ICSWX */
230#ifdef CONFIG_PPC_MM_SLICES 226#ifdef CONFIG_PPC_MM_SLICES
231 u64 low_slices_psize; /* SLB page size encodings */ 227 u64 low_slices_psize; /* SLB page size encodings */
232 u64 high_slices_psize; /* 4 bits per slice for now */ 228 u64 high_slices_psize; /* 4 bits per slice for now */
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
index 3d52a1132f3d..3ba9c6f096fc 100644
--- a/arch/powerpc/include/asm/reg_a2.h
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -110,15 +110,6 @@
110#define TLB1_UR ASM_CONST(0x0000000000000002) 110#define TLB1_UR ASM_CONST(0x0000000000000002)
111#define TLB1_SR ASM_CONST(0x0000000000000001) 111#define TLB1_SR ASM_CONST(0x0000000000000001)
112 112
113#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
114#define WSP_UART_PHYS 0xffc000c000
115/* This needs to be careful chosen to hit a !0 congruence class
116 * in the TLB since we bolt it in way 3, which is already occupied
117 * by our linear mapping primary bolted entry in CC 0.
118 */
119#define WSP_UART_VIRT 0xf000000000001000
120#endif
121
122/* A2 erativax attributes definitions */ 113/* A2 erativax attributes definitions */
123#define ERATIVAX_RS_IS_ALL 0x000 114#define ERATIVAX_RS_IS_ALL 0x000
124#define ERATIVAX_RS_IS_TID 0x040 115#define ERATIVAX_RS_IS_TID 0x040
diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h
deleted file mode 100644
index c7dc83088a33..000000000000
--- a/arch/powerpc/include/asm/wsp.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef __ASM_POWERPC_WSP_H
10#define __ASM_POWERPC_WSP_H
11
12extern int wsp_get_chip_id(struct device_node *dn);
13
14#endif /* __ASM_POWERPC_WSP_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fab19ec25597..670c312d914e 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
43obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o 43obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
44obj64-$(CONFIG_RELOCATABLE) += reloc_64.o 44obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
45obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o 45obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
46obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
47obj-$(CONFIG_PPC64) += vdso64/ 46obj-$(CONFIG_PPC64) += vdso64/
48obj-$(CONFIG_ALTIVEC) += vecemu.o 47obj-$(CONFIG_ALTIVEC) += vecemu.o
49obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 48obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
deleted file mode 100644
index 61f079e05b61..000000000000
--- a/arch/powerpc/kernel/cpu_setup_a2.S
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * A2 specific assembly support code
3 *
4 * Copyright 2009 Ben Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/asm-offsets.h>
13#include <asm/ppc_asm.h>
14#include <asm/ppc-opcode.h>
15#include <asm/processor.h>
16#include <asm/reg_a2.h>
17#include <asm/reg.h>
18#include <asm/thread_info.h>
19
20/*
21 * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
22 * This also prevents external LPID accesses but that isn't a problem when not a
23 * guest. Under PV, this setting will be ignored and MMUCR will return the right
24 * number of PID bits we can use.
25 */
26#define MMUCR1_EXTEND_PID \
27 (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
28 MMUCR1_DTTID | MMUCR1_DCCD)
29
30/*
31 * Use extended PIDs if enabled.
32 * Don't clear the ERATs on context sync events and enable I & D LRU.
33 * Enable ERAT back invalidate when tlbwe overwrites an entry.
34 */
35#define INITIAL_MMUCR1 \
36 (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
37 MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
38
39_GLOBAL(__setup_cpu_a2)
40 /* Some of these are actually thread local and some are
41 * core local but doing it always won't hurt
42 */
43
44#ifdef CONFIG_PPC_ICSWX
45 /* Make sure ACOP starts out as zero */
46 li r3,0
47 mtspr SPRN_ACOP,r3
48
49 /* Skip the following if we are in Guest mode */
50 mfmsr r3
51 andis. r0,r3,MSR_GS@h
52 bne _icswx_skip_guest
53
54 /* Enable icswx instruction */
55 mfspr r3,SPRN_A2_CCR2
56 ori r3,r3,A2_CCR2_ENABLE_ICSWX
57 mtspr SPRN_A2_CCR2,r3
58
59 /* Unmask all CTs in HACOP */
60 li r3,-1
61 mtspr SPRN_HACOP,r3
62_icswx_skip_guest:
63#endif /* CONFIG_PPC_ICSWX */
64
65 /* Enable doorbell */
66 mfspr r3,SPRN_A2_CCR2
67 oris r3,r3,A2_CCR2_ENABLE_PC@h
68 mtspr SPRN_A2_CCR2,r3
69 isync
70
71 /* Setup CCR0 to disable power saving for now as it's busted
72 * in the current implementations. Setup CCR1 to wake on
73 * interrupts normally (we write the default value but who
74 * knows what FW may have clobbered...)
75 */
76 li r3,0
77 mtspr SPRN_A2_CCR0, r3
78 LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
79 mtspr SPRN_A2_CCR1, r3
80
81 /* Initialise MMUCR1 */
82 lis r3,INITIAL_MMUCR1@h
83 ori r3,r3,INITIAL_MMUCR1@l
84 mtspr SPRN_MMUCR1,r3
85
86 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
87 LOAD_REG_IMMEDIATE(r3, 0x000a7531)
88 mtspr SPRN_MMUCR2,r3
89
90 /* Set MMUCR3 to write all thids bit to the TLB */
91 LOAD_REG_IMMEDIATE(r3, 0x0000000f)
92 mtspr SPRN_MMUCR3,r3
93
94 /* Don't do ERAT stuff if running guest mode */
95 mfmsr r3
96 andis. r0,r3,MSR_GS@h
97 bne 1f
98
99 /* Now set the I-ERAT watermark to 15 */
100 lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
101 mtspr SPRN_MMUCR0, r4
102 li r4,A2_IERAT_SIZE-1
103 PPC_ERATWE(R4,R4,3)
104
105 /* Now set the D-ERAT watermark to 31 */
106 lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
107 mtspr SPRN_MMUCR0, r4
108 li r4,A2_DERAT_SIZE-1
109 PPC_ERATWE(R4,R4,3)
110
111 /* And invalidate the beast just in case. That won't get rid of
112 * a bolted entry though it will be in LRU and so will go away eventually
113 * but let's not bother for now
114 */
115 PPC_ERATILX(0,0,R0)
1161:
117 blr
118
119_GLOBAL(__restore_cpu_a2)
120 b __setup_cpu_a2
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index 11da04a4625a..965291b4c2fa 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2149,44 +2149,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
2149 } 2149 }
2150#endif /* CONFIG_PPC32 */ 2150#endif /* CONFIG_PPC32 */
2151#endif /* CONFIG_E500 */ 2151#endif /* CONFIG_E500 */
2152
2153#ifdef CONFIG_PPC_A2
2154 { /* Standard A2 (>= DD2) + FPU core */
2155 .pvr_mask = 0xffff0000,
2156 .pvr_value = 0x00480000,
2157 .cpu_name = "A2 (>= DD2)",
2158 .cpu_features = CPU_FTRS_A2,
2159 .cpu_user_features = COMMON_USER_PPC64,
2160 .mmu_features = MMU_FTRS_A2,
2161 .icache_bsize = 64,
2162 .dcache_bsize = 64,
2163 .num_pmcs = 0,
2164 .cpu_setup = __setup_cpu_a2,
2165 .cpu_restore = __restore_cpu_a2,
2166 .machine_check = machine_check_generic,
2167 .platform = "ppca2",
2168 },
2169 { /* This is a default entry to get going, to be replaced by
2170 * a real one at some stage
2171 */
2172#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
2173 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
2174 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
2175 .pvr_mask = 0x00000000,
2176 .pvr_value = 0x00000000,
2177 .cpu_name = "Book3E",
2178 .cpu_features = CPU_FTRS_BASE_BOOK3E,
2179 .cpu_user_features = COMMON_USER_PPC64,
2180 .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
2181 MMU_FTR_USE_TLBIVAX_BCAST |
2182 MMU_FTR_LOCK_BCAST_INVAL,
2183 .icache_bsize = 64,
2184 .dcache_bsize = 64,
2185 .num_pmcs = 0,
2186 .machine_check = machine_check_generic,
2187 .platform = "power6",
2188 },
2189#endif /* CONFIG_PPC_A2 */
2190}; 2152};
2191 2153
2192static struct cpu_spec the_cpu_spec; 2154static struct cpu_spec the_cpu_spec;
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 771b4e92e5d9..bb9cac6c8051 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1467,22 +1467,6 @@ a2_tlbinit_after_linear_map:
1467 .globl a2_tlbinit_after_iprot_flush 1467 .globl a2_tlbinit_after_iprot_flush
1468a2_tlbinit_after_iprot_flush: 1468a2_tlbinit_after_iprot_flush:
1469 1469
1470#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
1471 /* Now establish early debug mappings if applicable */
1472 /* Restore the MAS0 we used for linear mapping load */
1473 mtspr SPRN_MAS0,r11
1474
1475 lis r3,(MAS1_VALID | MAS1_IPROT)@h
1476 ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
1477 mtspr SPRN_MAS1,r3
1478 LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
1479 mtspr SPRN_MAS2,r3
1480 LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
1481 mtspr SPRN_MAS7_MAS3,r3
1482 /* re-use the MAS8 value from the linear mapping */
1483 tlbwe
1484#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
1485
1486 PPC_TLBILX(0,0,R0) 1470 PPC_TLBILX(0,0,R0)
1487 sync 1471 sync
1488 isync 1472 isync
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index a15837519dca..b7aa07279a63 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -62,8 +62,6 @@ void __init udbg_early_init(void)
62 udbg_init_cpm(); 62 udbg_init_cpm();
63#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) 63#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
64 udbg_init_usbgecko(); 64 udbg_init_usbgecko();
65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
66 udbg_init_wsp();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) 65#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
68 /* In memory console */ 66 /* In memory console */
69 udbg_init_memcons(); 67 udbg_init_memcons();
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 75702e207b29..6e7c4923b5ea 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -296,14 +296,3 @@ void __init udbg_init_40x_realmode(void)
296} 296}
297 297
298#endif /* CONFIG_PPC_EARLY_DEBUG_40x */ 298#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
299
300
301#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
302
303void __init udbg_init_wsp(void)
304{
305 udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1);
306 udbg_uart_setup(57600, 50000000);
307}
308
309#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index bf9c6d4cd26c..391b3f6b54a3 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -19,7 +19,6 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
19source "arch/powerpc/platforms/44x/Kconfig" 19source "arch/powerpc/platforms/44x/Kconfig"
20source "arch/powerpc/platforms/40x/Kconfig" 20source "arch/powerpc/platforms/40x/Kconfig"
21source "arch/powerpc/platforms/amigaone/Kconfig" 21source "arch/powerpc/platforms/amigaone/Kconfig"
22source "arch/powerpc/platforms/wsp/Kconfig"
23 22
24config KVM_GUEST 23config KVM_GUEST
25 bool "KVM Guest support" 24 bool "KVM Guest support"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 43b65ad1970a..a41bd023647a 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -148,10 +148,6 @@ config POWER4
148 depends on PPC64 && PPC_BOOK3S 148 depends on PPC64 && PPC_BOOK3S
149 def_bool y 149 def_bool y
150 150
151config PPC_A2
152 bool
153 depends on PPC_BOOK3E_64
154
155config TUNE_CELL 151config TUNE_CELL
156 bool "Optimize for Cell Broadband Engine" 152 bool "Optimize for Cell Broadband Engine"
157 depends on PPC64 && PPC_BOOK3S 153 depends on PPC64 && PPC_BOOK3S
@@ -280,7 +276,7 @@ config VSX
280 276
281config PPC_ICSWX 277config PPC_ICSWX
282 bool "Support for PowerPC icswx coprocessor instruction" 278 bool "Support for PowerPC icswx coprocessor instruction"
283 depends on POWER4 || PPC_A2 279 depends on POWER4
284 default n 280 default n
285 ---help--- 281 ---help---
286 282
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 879b4a448498..469ef170d218 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,4 +22,3 @@ obj-$(CONFIG_PPC_CELL) += cell/
22obj-$(CONFIG_PPC_PS3) += ps3/ 22obj-$(CONFIG_PPC_PS3) += ps3/
23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ 23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
24obj-$(CONFIG_AMIGAONE) += amigaone/ 24obj-$(CONFIG_AMIGAONE) += amigaone/
25obj-$(CONFIG_PPC_WSP) += wsp/
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
deleted file mode 100644
index 422a175b10ee..000000000000
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ /dev/null
@@ -1,30 +0,0 @@
1config PPC_WSP
2 bool
3 select PPC_A2
4 select GENERIC_TBSYNC
5 select PPC_ICSWX
6 select PPC_SCOM
7 select PPC_XICS
8 select PPC_ICP_NATIVE
9 select PCI
10 select PPC_IO_WORKAROUNDS if PCI
11 select PPC_INDIRECT_PIO if PCI
12 default n
13
14menu "WSP platform selection"
15 depends on PPC_BOOK3E_64
16
17config PPC_PSR2
18 bool "PowerEN System Reference Platform 2"
19 select EPAPR_BOOT
20 select PPC_WSP
21 default y
22
23config PPC_CHROMA
24 bool "PowerEN PCIe Chroma Card"
25 select EPAPR_BOOT
26 select PPC_WSP
27 select OF_DYNAMIC
28 default y
29
30endmenu
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
deleted file mode 100644
index 162fc60125a2..000000000000
--- a/arch/powerpc/platforms/wsp/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1ccflags-y += $(NO_MINIMAL_TOC)
2
3obj-y += setup.o ics.o wsp.o
4obj-$(CONFIG_PPC_PSR2) += psr2.o
5obj-$(CONFIG_PPC_CHROMA) += chroma.o h8.o
6obj-$(CONFIG_PPC_WSP) += opb_pic.o
7obj-$(CONFIG_PPC_WSP) += scom_wsp.o
8obj-$(CONFIG_SMP) += smp.o scom_smp.o
9obj-$(CONFIG_PCI) += wsp_pci.o
10obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c
deleted file mode 100644
index aaa46b353715..000000000000
--- a/arch/powerpc/platforms/wsp/chroma.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/of.h>
16#include <linux/smp.h>
17#include <linux/time.h>
18#include <linux/of_fdt.h>
19
20#include <asm/machdep.h>
21#include <asm/udbg.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26void __init chroma_setup_arch(void)
27{
28 wsp_setup_arch();
29 wsp_setup_h8();
30
31}
32
33static int __init chroma_probe(void)
34{
35 unsigned long root = of_get_flat_dt_root();
36
37 if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma"))
38 return 0;
39
40 return 1;
41}
42
43define_machine(chroma_md) {
44 .name = "Chroma PCIe",
45 .probe = chroma_probe,
46 .setup_arch = chroma_setup_arch,
47 .restart = wsp_h8_restart,
48 .power_off = wsp_h8_power_off,
49 .halt = wsp_halt,
50 .calibrate_decr = generic_calibrate_decr,
51 .init_IRQ = wsp_setup_irq,
52 .progress = udbg_progress,
53 .power_save = book3e_idle,
54};
55
56machine_arch_initcall(chroma_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c
deleted file mode 100644
index a3c87f395750..000000000000
--- a/arch/powerpc/platforms/wsp/h8.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/io.h>
13#include <linux/of_address.h>
14
15#include "wsp.h"
16
17/*
18 * The UART connection to the H8 is over ttyS1 which is just a 16550.
19 * We assume that FW has it setup right and no one messes with it.
20 */
21
22
23static u8 __iomem *h8;
24
25#define RBR 0 /* Receiver Buffer Register */
26#define THR 0 /* Transmitter Holding Register */
27#define LSR 5 /* Line Status Register */
28#define LSR_DR 0x01 /* LSR value for Data-Ready */
29#define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */
30static void wsp_h8_putc(int c)
31{
32 u8 lsr;
33
34 do {
35 lsr = readb(h8 + LSR);
36 } while ((lsr & LSR_THRE) != LSR_THRE);
37 writeb(c, h8 + THR);
38}
39
40static int wsp_h8_getc(void)
41{
42 u8 lsr;
43
44 do {
45 lsr = readb(h8 + LSR);
46 } while ((lsr & LSR_DR) != LSR_DR);
47
48 return readb(h8 + RBR);
49}
50
51static void wsp_h8_puts(const char *s, int sz)
52{
53 int i;
54
55 for (i = 0; i < sz; i++) {
56 wsp_h8_putc(s[i]);
57
58 /* no flow control so wait for echo */
59 wsp_h8_getc();
60 }
61 wsp_h8_putc('\r');
62 wsp_h8_putc('\n');
63}
64
65static void wsp_h8_terminal_cmd(const char *cmd, int sz)
66{
67 hard_irq_disable();
68 wsp_h8_puts(cmd, sz);
69 /* should never return, but just in case */
70 for (;;)
71 continue;
72}
73
74
75void wsp_h8_restart(char *cmd)
76{
77 static const char restart[] = "warm-reset";
78
79 (void)cmd;
80 wsp_h8_terminal_cmd(restart, sizeof(restart) - 1);
81}
82
83void wsp_h8_power_off(void)
84{
85 static const char off[] = "power-off";
86
87 wsp_h8_terminal_cmd(off, sizeof(off) - 1);
88}
89
90static void __iomem *wsp_h8_getaddr(void)
91{
92 struct device_node *aliases;
93 struct device_node *uart;
94 struct property *path;
95 void __iomem *va = NULL;
96
97 /*
98 * there is nothing in the devtree to tell us which is mapped
99 * to the H8, but se know it is the second serial port.
100 */
101
102 aliases = of_find_node_by_path("/aliases");
103 if (aliases == NULL)
104 return NULL;
105
106 path = of_find_property(aliases, "serial1", NULL);
107 if (path == NULL)
108 goto out;
109
110 uart = of_find_node_by_path(path->value);
111 if (uart == NULL)
112 goto out;
113
114 va = of_iomap(uart, 0);
115
116 /* remove it so no one messes with it */
117 of_detach_node(uart);
118 of_node_put(uart);
119
120out:
121 of_node_put(aliases);
122
123 return va;
124}
125
126void __init wsp_setup_h8(void)
127{
128 h8 = wsp_h8_getaddr();
129
130 /* Devtree change? lets hard map it anyway */
131 if (h8 == NULL) {
132 pr_warn("UART to H8 could not be found");
133 h8 = ioremap(0xffc0008000ULL, 0x100);
134 }
135}
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
deleted file mode 100644
index 9cd92e645028..000000000000
--- a/arch/powerpc/platforms/wsp/ics.c
+++ /dev/null
@@ -1,762 +0,0 @@
1/*
2 * Copyright 2008-2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/cpu.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/kernel.h>
15#include <linux/msi.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/xics.h>
27
28#include "wsp.h"
29#include "ics.h"
30
31
32/* WSP ICS */
33
34struct wsp_ics {
35 struct ics ics;
36 struct device_node *dn;
37 void __iomem *regs;
38 spinlock_t lock;
39 unsigned long *bitmap;
40 u32 chip_id;
41 u32 lsi_base;
42 u32 lsi_count;
43 u64 hwirq_start;
44 u64 count;
45#ifdef CONFIG_SMP
46 int *hwirq_cpu_map;
47#endif
48};
49
50#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
51
52#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
53#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
54#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
55#define XIVE_UPDATE_REG(base) ((base) + 0x28)
56#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
57
58#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
59#define TBL_SELECT_XIST (1UL << 48)
60#define TBL_SELECT_XIVT (1UL << 49)
61
62#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
63
64#define XIST_REQUIRED 0x8
65#define XIST_REJECTED 0x4
66#define XIST_PRESENTED 0x2
67#define XIST_PENDING 0x1
68
69#define XIVE_SERVER_SHIFT 42
70#define XIVE_SERVER_MASK 0xFFFFULL
71#define XIVE_PRIORITY_MASK 0xFFULL
72#define XIVE_PRIORITY_SHIFT 32
73#define XIVE_WRITE_ENABLE (1ULL << 63)
74
75/*
76 * The docs refer to a 6 bit field called ChipID, which consists of a
77 * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
78 * so we ignore it, and every where we use "chip id" in this code we
79 * mean the NodeID.
80 */
81#define WSP_ICS_CHIP_SHIFT 17
82
83
84static struct wsp_ics *ics_list;
85static int num_ics;
86
87/* ICS Source controller accessors */
88
89static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
90{
91 unsigned long flags;
92 u64 xive;
93
94 spin_lock_irqsave(&ics->lock, flags);
95 out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
96 xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
97 spin_unlock_irqrestore(&ics->lock, flags);
98
99 return xive;
100}
101
102static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
103{
104 xive &= ~XIVE_ADDR_MASK;
105 xive |= (irq & XIVE_ADDR_MASK);
106 xive |= XIVE_WRITE_ENABLE;
107
108 out_be64(XIVE_UPDATE_REG(ics->regs), xive);
109}
110
111static u64 xive_set_server(u64 xive, unsigned int server)
112{
113 u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
114
115 xive &= mask;
116 xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
117
118 return xive;
119}
120
121static u64 xive_set_priority(u64 xive, unsigned int priority)
122{
123 u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
124
125 xive &= mask;
126 xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
127
128 return xive;
129}
130
131
132#ifdef CONFIG_SMP
133/* Find logical CPUs within mask on a given chip and store result in ret */
134void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
135{
136 int cpu, chip;
137 struct device_node *cpu_dn, *dn;
138 const u32 *prop;
139
140 cpumask_clear(ret);
141 for_each_cpu(cpu, mask) {
142 cpu_dn = of_get_cpu_node(cpu, NULL);
143 if (!cpu_dn)
144 continue;
145
146 prop = of_get_property(cpu_dn, "at-node", NULL);
147 if (!prop) {
148 of_node_put(cpu_dn);
149 continue;
150 }
151
152 dn = of_find_node_by_phandle(*prop);
153 of_node_put(cpu_dn);
154
155 chip = wsp_get_chip_id(dn);
156 if (chip == chip_id)
157 cpumask_set_cpu(cpu, ret);
158
159 of_node_put(dn);
160 }
161}
162
163/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
164static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
165 const cpumask_t *affinity)
166{
167 cpumask_var_t avail, newmask;
168 int ret = -ENOMEM, cpu, cpu_rover = 0, target;
169 int index = hwirq - ics->hwirq_start;
170 unsigned int nodeid;
171
172 BUG_ON(index < 0 || index >= ics->count);
173
174 if (!ics->hwirq_cpu_map)
175 return -ENOMEM;
176
177 if (!distribute_irqs) {
178 ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
179 return 0;
180 }
181
182 /* Allocate needed CPU masks */
183 if (!alloc_cpumask_var(&avail, GFP_KERNEL))
184 goto ret;
185 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
186 goto freeavail;
187
188 /* Find PBus attached to the source of this IRQ */
189 nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
190
191 /* Find CPUs that could handle this IRQ */
192 if (affinity)
193 cpumask_and(avail, cpu_online_mask, affinity);
194 else
195 cpumask_copy(avail, cpu_online_mask);
196
197 /* Narrow selection down to logical CPUs on the same chip */
198 cpus_on_chip(nodeid, avail, newmask);
199
200 /* Ensure we haven't narrowed it down to 0 */
201 if (unlikely(cpumask_empty(newmask))) {
202 if (unlikely(cpumask_empty(avail))) {
203 ret = -1;
204 goto out;
205 }
206 cpumask_copy(newmask, avail);
207 }
208
209 /* Choose a CPU out of those we narrowed it down to in round robin */
210 target = hwirq % cpumask_weight(newmask);
211 for_each_cpu(cpu, newmask) {
212 if (cpu_rover++ >= target) {
213 ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
214 ret = 0;
215 goto out;
216 }
217 }
218
219 /* Shouldn't happen */
220 WARN_ON(1);
221
222out:
223 free_cpumask_var(newmask);
224freeavail:
225 free_cpumask_var(avail);
226ret:
227 if (ret < 0) {
228 ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
229 pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
230 hwirq, ics->hwirq_cpu_map[index]);
231 }
232 return ret;
233}
234
235static void alloc_irq_map(struct wsp_ics *ics)
236{
237 int i;
238
239 ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
240 if (!ics->hwirq_cpu_map) {
241 pr_warning("Allocate hwirq_cpu_map failed, "
242 "IRQ balancing disabled\n");
243 return;
244 }
245
246 for (i=0; i < ics->count; i++)
247 ics->hwirq_cpu_map[i] = xics_default_server;
248}
249
250static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
251{
252 int index = hwirq - ics->hwirq_start;
253
254 BUG_ON(index < 0 || index >= ics->count);
255
256 if (!ics->hwirq_cpu_map)
257 return xics_default_server;
258
259 return ics->hwirq_cpu_map[index];
260}
261#else /* !CONFIG_SMP */
262static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
263 const cpumask_t *affinity)
264{
265 return 0;
266}
267
268static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
269{
270 return xics_default_server;
271}
272
273static void alloc_irq_map(struct wsp_ics *ics) { }
274#endif
275
276static void wsp_chip_unmask_irq(struct irq_data *d)
277{
278 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
279 struct wsp_ics *ics;
280 int server;
281 u64 xive;
282
283 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
284 return;
285
286 ics = d->chip_data;
287 if (WARN_ON(!ics))
288 return;
289
290 server = get_irq_server(ics, hw_irq);
291
292 xive = wsp_ics_get_xive(ics, hw_irq);
293 xive = xive_set_server(xive, server);
294 xive = xive_set_priority(xive, DEFAULT_PRIORITY);
295 wsp_ics_set_xive(ics, hw_irq, xive);
296}
297
298static unsigned int wsp_chip_startup(struct irq_data *d)
299{
300 /* unmask it */
301 wsp_chip_unmask_irq(d);
302 return 0;
303}
304
305static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
306{
307 u64 xive;
308
309 if (hw_irq == XICS_IPI)
310 return;
311
312 if (WARN_ON(!ics))
313 return;
314 xive = wsp_ics_get_xive(ics, hw_irq);
315 xive = xive_set_server(xive, xics_default_server);
316 xive = xive_set_priority(xive, LOWEST_PRIORITY);
317 wsp_ics_set_xive(ics, hw_irq, xive);
318}
319
320static void wsp_chip_mask_irq(struct irq_data *d)
321{
322 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
323 struct wsp_ics *ics = d->chip_data;
324
325 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
326 return;
327
328 wsp_mask_real_irq(hw_irq, ics);
329}
330
331static int wsp_chip_set_affinity(struct irq_data *d,
332 const struct cpumask *cpumask, bool force)
333{
334 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
335 struct wsp_ics *ics;
336 int ret;
337 u64 xive;
338
339 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
340 return -1;
341
342 ics = d->chip_data;
343 if (WARN_ON(!ics))
344 return -1;
345 xive = wsp_ics_get_xive(ics, hw_irq);
346
347 /*
348 * For the moment only implement delivery to all cpus or one cpu.
349 * Get current irq_server for the given irq
350 */
351 ret = cache_hwirq_map(ics, hw_irq, cpumask);
352 if (ret == -1) {
353 char cpulist[128];
354 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
355 pr_warning("%s: No online cpus in the mask %s for irq %d\n",
356 __func__, cpulist, d->irq);
357 return -1;
358 } else if (ret == -ENOMEM) {
359 pr_warning("%s: Out of memory\n", __func__);
360 return -1;
361 }
362
363 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
364 wsp_ics_set_xive(ics, hw_irq, xive);
365
366 return IRQ_SET_MASK_OK;
367}
368
369static struct irq_chip wsp_irq_chip = {
370 .name = "WSP ICS",
371 .irq_startup = wsp_chip_startup,
372 .irq_mask = wsp_chip_mask_irq,
373 .irq_unmask = wsp_chip_unmask_irq,
374 .irq_set_affinity = wsp_chip_set_affinity
375};
376
377static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
378{
379 /* All ICSs in the system implement a global irq number space,
380 * so match against them all. */
381 return of_device_is_compatible(dn, "ibm,ppc-xics");
382}
383
384static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
385{
386 if (hwirq >= wsp_ics->hwirq_start &&
387 hwirq < wsp_ics->hwirq_start + wsp_ics->count)
388 return 1;
389
390 return 0;
391}
392
393static int wsp_ics_map(struct ics *ics, unsigned int virq)
394{
395 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
396 unsigned int hw_irq = virq_to_hw(virq);
397 unsigned long flags;
398
399 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
400 return -ENOENT;
401
402 irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
403
404 irq_set_chip_data(virq, wsp_ics);
405
406 spin_lock_irqsave(&wsp_ics->lock, flags);
407 bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
408 spin_unlock_irqrestore(&wsp_ics->lock, flags);
409
410 return 0;
411}
412
413static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
414{
415 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
416
417 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
418 return;
419
420 pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
421 wsp_mask_real_irq(hw_irq, wsp_ics);
422}
423
424static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
425{
426 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
427
428 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
429 return -ENOENT;
430
431 return get_irq_server(wsp_ics, hw_irq);
432}
433
434/* HW Number allocation API */
435
436static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
437{
438 struct device_node *iparent;
439 int i;
440
441 iparent = of_irq_find_parent(dn);
442 if (!iparent) {
443 pr_err("wsp_ics: Failed to find interrupt parent!\n");
444 return NULL;
445 }
446
447 for(i = 0; i < num_ics; i++) {
448 if(ics_list[i].dn == iparent)
449 break;
450 }
451
452 if (i >= num_ics) {
453 pr_err("wsp_ics: Unable to find parent bitmap!\n");
454 return NULL;
455 }
456
457 return &ics_list[i];
458}
459
460int wsp_ics_alloc_irq(struct device_node *dn, int num)
461{
462 struct wsp_ics *ics;
463 int order, offset;
464
465 ics = wsp_ics_find_dn_ics(dn);
466 if (!ics)
467 return -ENODEV;
468
469 /* Fast, but overly strict if num isn't a power of two */
470 order = get_count_order(num);
471
472 spin_lock_irq(&ics->lock);
473 offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
474 spin_unlock_irq(&ics->lock);
475
476 if (offset < 0)
477 return offset;
478
479 return offset + ics->hwirq_start;
480}
481
482void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
483{
484 struct wsp_ics *ics;
485
486 ics = wsp_ics_find_dn_ics(dn);
487 if (WARN_ON(!ics))
488 return;
489
490 spin_lock_irq(&ics->lock);
491 bitmap_release_region(ics->bitmap, irq, 0);
492 spin_unlock_irq(&ics->lock);
493}
494
495/* Initialisation */
496
497static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
498 struct device_node *dn)
499{
500 int len, i, j, size;
501 u32 start, count;
502 const u32 *p;
503
504 size = BITS_TO_LONGS(ics->count) * sizeof(long);
505 ics->bitmap = kzalloc(size, GFP_KERNEL);
506 if (!ics->bitmap) {
507 pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
508 return -ENOMEM;
509 }
510
511 spin_lock_init(&ics->lock);
512
513 p = of_get_property(dn, "available-ranges", &len);
514 if (!p || !len) {
515 /* FIXME this should be a WARN() once mambo is updated */
516 pr_err("wsp_ics: No available-ranges defined for %s\n",
517 dn->full_name);
518 return 0;
519 }
520
521 if (len % (2 * sizeof(u32)) != 0) {
522 /* FIXME this should be a WARN() once mambo is updated */
523 pr_err("wsp_ics: Invalid available-ranges for %s\n",
524 dn->full_name);
525 return 0;
526 }
527
528 bitmap_fill(ics->bitmap, ics->count);
529
530 for (i = 0; i < len / sizeof(u32); i += 2) {
531 start = of_read_number(p + i, 1);
532 count = of_read_number(p + i + 1, 1);
533
534 pr_devel("%s: start: %d count: %d\n", __func__, start, count);
535
536 if ((start + count) > (ics->hwirq_start + ics->count) ||
537 start < ics->hwirq_start) {
538 pr_err("wsp_ics: Invalid range! -> %d to %d\n",
539 start, start + count);
540 break;
541 }
542
543 for (j = 0; j < count; j++)
544 bitmap_release_region(ics->bitmap,
545 (start + j) - ics->hwirq_start, 0);
546 }
547
548 /* Ensure LSIs are not available for allocation */
549 bitmap_allocate_region(ics->bitmap, ics->lsi_base,
550 get_count_order(ics->lsi_count));
551
552 return 0;
553}
554
555static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
556{
557 u32 lsi_buid, msi_buid, msi_base, msi_count;
558 void __iomem *regs;
559 const u32 *p;
560 int rc, len, i;
561 u64 caps, buid;
562
563 p = of_get_property(dn, "interrupt-ranges", &len);
564 if (!p || len < (2 * sizeof(u32))) {
565 pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
566 dn->full_name);
567 return -ENOENT;
568 }
569
570 if (len > (2 * sizeof(u32))) {
571 pr_err("wsp_ics: Multiple ics ranges not supported.\n");
572 return -EINVAL;
573 }
574
575 regs = of_iomap(dn, 0);
576 if (!regs) {
577 pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
578 return -ENXIO;
579 }
580
581 ics->hwirq_start = of_read_number(p, 1);
582 ics->count = of_read_number(p + 1, 1);
583 ics->regs = regs;
584
585 ics->chip_id = wsp_get_chip_id(dn);
586 if (WARN_ON(ics->chip_id < 0))
587 ics->chip_id = 0;
588
589 /* Get some informations about the critter */
590 caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
591 buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
592 ics->lsi_count = caps >> 56;
593 msi_count = (caps >> 44) & 0x7ff;
594
595 /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
596 * rest is mixed in the interrupt number. We store the whole
597 * thing though
598 */
599 lsi_buid = (buid >> 48) & 0x1ff;
600 ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
601 msi_buid = (buid >> 37) & 0x7;
602 msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
603
604 pr_info("wsp_ics: Found %s\n", dn->full_name);
605 pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
606 ics->hwirq_start, ics->hwirq_start + ics->count - 1);
607 pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
608 ics->lsi_count, ics->lsi_base,
609 ics->lsi_base + ics->lsi_count - 1);
610 pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
611 msi_count, msi_base,
612 msi_base + msi_count - 1);
613
614 /* Let's check the HW config is sane */
615 if (ics->lsi_base < ics->hwirq_start ||
616 (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
617 pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
618 if (msi_base < ics->hwirq_start ||
619 (msi_base + msi_count) > (ics->hwirq_start + ics->count))
620 pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
621
622 /* We don't check for overlap between LSI and MSI, which will happen
623 * if we use the same BUID, I'm not sure yet how legit that is.
624 */
625
626 rc = wsp_ics_bitmap_setup(ics, dn);
627 if (rc) {
628 iounmap(regs);
629 return rc;
630 }
631
632 ics->dn = of_node_get(dn);
633 alloc_irq_map(ics);
634
635 for(i = 0; i < ics->count; i++)
636 wsp_mask_real_irq(ics->hwirq_start + i, ics);
637
638 ics->ics.map = wsp_ics_map;
639 ics->ics.mask_unknown = wsp_ics_mask_unknown;
640 ics->ics.get_server = wsp_ics_get_server;
641 ics->ics.host_match = wsp_ics_host_match;
642
643 xics_register_ics(&ics->ics);
644
645 return 0;
646}
647
648static void __init wsp_ics_set_default_server(void)
649{
650 struct device_node *np;
651 u32 hwid;
652
653 /* Find the server number for the boot cpu. */
654 np = of_get_cpu_node(boot_cpuid, NULL);
655 BUG_ON(!np);
656
657 hwid = get_hard_smp_processor_id(boot_cpuid);
658
659 pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
660 xics_default_server = hwid;
661
662 of_node_put(np);
663}
664
665static int __init wsp_ics_init(void)
666{
667 struct device_node *dn;
668 struct wsp_ics *ics;
669 int rc, found;
670
671 wsp_ics_set_default_server();
672
673 found = 0;
674 for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
675 found++;
676
677 if (found == 0) {
678 pr_err("wsp_ics: No ICS's found!\n");
679 return -ENODEV;
680 }
681
682 ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
683 if (!ics_list) {
684 pr_err("wsp_ics: No memory for structs.\n");
685 return -ENOMEM;
686 }
687
688 num_ics = 0;
689 ics = ics_list;
690 for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
691 rc = wsp_ics_setup(ics, dn);
692 if (rc == 0) {
693 ics++;
694 num_ics++;
695 }
696 }
697
698 if (found != num_ics) {
699 pr_err("wsp_ics: Failed setting up %d ICS's\n",
700 found - num_ics);
701 return -1;
702 }
703
704 return 0;
705}
706
707void __init wsp_init_irq(void)
708{
709 wsp_ics_init();
710 xics_init();
711
712 /* We need to patch our irq chip's EOI to point to the right ICP */
713 wsp_irq_chip.irq_eoi = icp_ops->eoi;
714}
715
716#ifdef CONFIG_PCI_MSI
717static void wsp_ics_msi_unmask_irq(struct irq_data *d)
718{
719 wsp_chip_unmask_irq(d);
720 unmask_msi_irq(d);
721}
722
723static unsigned int wsp_ics_msi_startup(struct irq_data *d)
724{
725 wsp_ics_msi_unmask_irq(d);
726 return 0;
727}
728
729static void wsp_ics_msi_mask_irq(struct irq_data *d)
730{
731 mask_msi_irq(d);
732 wsp_chip_mask_irq(d);
733}
734
735/*
736 * we do it this way because we reassinge default EOI handling in
737 * irq_init() above
738 */
739static void wsp_ics_eoi(struct irq_data *data)
740{
741 wsp_irq_chip.irq_eoi(data);
742}
743
744static struct irq_chip wsp_ics_msi = {
745 .name = "WSP ICS MSI",
746 .irq_startup = wsp_ics_msi_startup,
747 .irq_mask = wsp_ics_msi_mask_irq,
748 .irq_unmask = wsp_ics_msi_unmask_irq,
749 .irq_eoi = wsp_ics_eoi,
750 .irq_set_affinity = wsp_chip_set_affinity
751};
752
753void wsp_ics_set_msi_chip(unsigned int irq)
754{
755 irq_set_chip(irq, &wsp_ics_msi);
756}
757
758void wsp_ics_set_std_chip(unsigned int irq)
759{
760 irq_set_chip(irq, &wsp_irq_chip);
761}
762#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
deleted file mode 100644
index 07b644e0cf97..000000000000
--- a/arch/powerpc/platforms/wsp/ics.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright 2009 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __ICS_H
11#define __ICS_H
12
13#define XIVE_ADDR_MASK 0x7FFULL
14
15extern void wsp_init_irq(void);
16
17extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
18extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
19
20#ifdef CONFIG_PCI_MSI
21extern void wsp_ics_set_msi_chip(unsigned int irq);
22extern void wsp_ics_set_std_chip(unsigned int irq);
23#endif /* CONFIG_PCI_MSI */
24
25#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/msi.c b/arch/powerpc/platforms/wsp/msi.c
deleted file mode 100644
index 380882f27add..000000000000
--- a/arch/powerpc/platforms/wsp/msi.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/msi.h>
13#include <linux/irq.h>
14#include <linux/interrupt.h>
15
16#include "msi.h"
17#include "ics.h"
18#include "wsp_pci.h"
19
20/* Magic addresses for 32 & 64-bit MSIs with hardcoded MVE 0 */
21#define MSI_ADDR_32 0xFFFF0000ul
22#define MSI_ADDR_64 0x1000000000000000ul
23
24int wsp_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
25{
26 struct pci_controller *phb;
27 struct msi_desc *entry;
28 struct msi_msg msg;
29 unsigned int virq;
30 int hwirq;
31
32 phb = pci_bus_to_host(dev->bus);
33 if (!phb)
34 return -ENOENT;
35
36 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
37 if (entry->msi_attrib.is_64) {
38 msg.address_lo = 0;
39 msg.address_hi = MSI_ADDR_64 >> 32;
40 } else {
41 msg.address_lo = MSI_ADDR_32;
42 msg.address_hi = 0;
43 }
44
45 list_for_each_entry(entry, &dev->msi_list, list) {
46 hwirq = wsp_ics_alloc_irq(phb->dn, 1);
47 if (hwirq < 0) {
48 dev_warn(&dev->dev, "wsp_msi: hwirq alloc failed!\n");
49 return hwirq;
50 }
51
52 virq = irq_create_mapping(NULL, hwirq);
53 if (virq == NO_IRQ) {
54 dev_warn(&dev->dev, "wsp_msi: virq alloc failed!\n");
55 return -1;
56 }
57
58 dev_dbg(&dev->dev, "wsp_msi: allocated irq %#x/%#x\n",
59 hwirq, virq);
60
61 wsp_ics_set_msi_chip(virq);
62 irq_set_msi_desc(virq, entry);
63 msg.data = hwirq & XIVE_ADDR_MASK;
64 write_msi_msg(virq, &msg);
65 }
66
67 return 0;
68}
69
70void wsp_teardown_msi_irqs(struct pci_dev *dev)
71{
72 struct pci_controller *phb;
73 struct msi_desc *entry;
74 int hwirq;
75
76 phb = pci_bus_to_host(dev->bus);
77
78 dev_dbg(&dev->dev, "wsp_msi: tearing down msi irqs\n");
79
80 list_for_each_entry(entry, &dev->msi_list, list) {
81 if (entry->irq == NO_IRQ)
82 continue;
83
84 irq_set_msi_desc(entry->irq, NULL);
85 wsp_ics_set_std_chip(entry->irq);
86
87 hwirq = virq_to_hw(entry->irq);
88 /* In this order to avoid racing with irq_create_mapping() */
89 irq_dispose_mapping(entry->irq);
90 wsp_ics_free_irq(phb->dn, hwirq);
91 }
92}
93
94void wsp_setup_phb_msi(struct pci_controller *phb)
95{
96 /* Create a single MVE at offset 0 that matches everything */
97 out_be64(phb->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_MVT);
98 out_be64(phb->cfg_data + PCIE_REG_IODA_DATA0, 1ull << 63);
99
100 ppc_md.setup_msi_irqs = wsp_setup_msi_irqs;
101 ppc_md.teardown_msi_irqs = wsp_teardown_msi_irqs;
102}
diff --git a/arch/powerpc/platforms/wsp/msi.h b/arch/powerpc/platforms/wsp/msi.h
deleted file mode 100644
index 0ab27b71b24d..000000000000
--- a/arch/powerpc/platforms/wsp/msi.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __WSP_MSI_H
11#define __WSP_MSI_H
12
13#ifdef CONFIG_PCI_MSI
14extern void wsp_setup_phb_msi(struct pci_controller *phb);
15#else
16static inline void wsp_setup_phb_msi(struct pci_controller *phb) { }
17#endif
18
19#endif /* __WSP_MSI_H */
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
deleted file mode 100644
index 3f6729807938..000000000000
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ /dev/null
@@ -1,321 +0,0 @@
1/*
2 * IBM Onboard Peripheral Bus Interrupt Controller
3 *
4 * Copyright 2010 Jack Miller, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/of.h>
16#include <linux/slab.h>
17#include <linux/time.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20
21#include <asm/reg_a2.h>
22#include <asm/irq.h>
23
24#define OPB_NR_IRQS 32
25
26#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
27#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
28#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
29#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
30#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
31
32static int opb_index = 0;
33
34struct opb_pic {
35 struct irq_domain *host;
36 void *regs;
37 int index;
38 spinlock_t lock;
39};
40
41static u32 opb_in(struct opb_pic *opb, int offset)
42{
43 return in_be32(opb->regs + offset);
44}
45
46static void opb_out(struct opb_pic *opb, int offset, u32 val)
47{
48 out_be32(opb->regs + offset, val);
49}
50
51static void opb_unmask_irq(struct irq_data *d)
52{
53 struct opb_pic *opb;
54 unsigned long flags;
55 u32 ier, bitset;
56
57 opb = d->chip_data;
58 bitset = (1 << (31 - irqd_to_hwirq(d)));
59
60 spin_lock_irqsave(&opb->lock, flags);
61
62 ier = opb_in(opb, OPB_MLSIER);
63 opb_out(opb, OPB_MLSIER, ier | bitset);
64 ier = opb_in(opb, OPB_MLSIER);
65
66 spin_unlock_irqrestore(&opb->lock, flags);
67}
68
69static void opb_mask_irq(struct irq_data *d)
70{
71 struct opb_pic *opb;
72 unsigned long flags;
73 u32 ier, mask;
74
75 opb = d->chip_data;
76 mask = ~(1 << (31 - irqd_to_hwirq(d)));
77
78 spin_lock_irqsave(&opb->lock, flags);
79
80 ier = opb_in(opb, OPB_MLSIER);
81 opb_out(opb, OPB_MLSIER, ier & mask);
82 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
83
84 spin_unlock_irqrestore(&opb->lock, flags);
85}
86
87static void opb_ack_irq(struct irq_data *d)
88{
89 struct opb_pic *opb;
90 unsigned long flags;
91 u32 bitset;
92
93 opb = d->chip_data;
94 bitset = (1 << (31 - irqd_to_hwirq(d)));
95
96 spin_lock_irqsave(&opb->lock, flags);
97
98 opb_out(opb, OPB_MLSIR, bitset);
99 opb_in(opb, OPB_MLSIR); // Flush posted writes
100
101 spin_unlock_irqrestore(&opb->lock, flags);
102}
103
104static void opb_mask_ack_irq(struct irq_data *d)
105{
106 struct opb_pic *opb;
107 unsigned long flags;
108 u32 bitset;
109 u32 ier, ir;
110
111 opb = d->chip_data;
112 bitset = (1 << (31 - irqd_to_hwirq(d)));
113
114 spin_lock_irqsave(&opb->lock, flags);
115
116 ier = opb_in(opb, OPB_MLSIER);
117 opb_out(opb, OPB_MLSIER, ier & ~bitset);
118 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
119
120 opb_out(opb, OPB_MLSIR, bitset);
121 ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
122
123 spin_unlock_irqrestore(&opb->lock, flags);
124}
125
126static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
127{
128 struct opb_pic *opb;
129 unsigned long flags;
130 int invert, ipr, mask, bit;
131
132 opb = d->chip_data;
133
134 /* The only information we're interested in in the type is whether it's
135 * a high or low trigger. For high triggered interrupts, the polarity
136 * set for it in the MLS Interrupt Polarity Register is 0, for low
137 * interrupts it's 1 so that the proper input in the MLS Interrupt Input
138 * Register is interrupted as asserting the interrupt. */
139
140 switch (flow) {
141 case IRQ_TYPE_NONE:
142 opb_mask_irq(d);
143 return 0;
144
145 case IRQ_TYPE_LEVEL_HIGH:
146 invert = 0;
147 break;
148
149 case IRQ_TYPE_LEVEL_LOW:
150 invert = 1;
151 break;
152
153 default:
154 return -EINVAL;
155 }
156
157 bit = (1 << (31 - irqd_to_hwirq(d)));
158 mask = ~bit;
159
160 spin_lock_irqsave(&opb->lock, flags);
161
162 ipr = opb_in(opb, OPB_MLSIPR);
163 ipr = (ipr & mask) | (invert ? bit : 0);
164 opb_out(opb, OPB_MLSIPR, ipr);
165 ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
166
167 spin_unlock_irqrestore(&opb->lock, flags);
168
169 /* Record the type in the interrupt descriptor */
170 irqd_set_trigger_type(d, flow);
171
172 return 0;
173}
174
175static struct irq_chip opb_irq_chip = {
176 .name = "OPB",
177 .irq_mask = opb_mask_irq,
178 .irq_unmask = opb_unmask_irq,
179 .irq_mask_ack = opb_mask_ack_irq,
180 .irq_ack = opb_ack_irq,
181 .irq_set_type = opb_set_irq_type
182};
183
184static int opb_host_map(struct irq_domain *host, unsigned int virq,
185 irq_hw_number_t hwirq)
186{
187 struct opb_pic *opb;
188
189 opb = host->host_data;
190
191 /* Most of the important stuff is handled by the generic host code, like
192 * the lookup, so just attach some info to the virtual irq */
193
194 irq_set_chip_data(virq, opb);
195 irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
196 irq_set_irq_type(virq, IRQ_TYPE_NONE);
197
198 return 0;
199}
200
201static const struct irq_domain_ops opb_host_ops = {
202 .map = opb_host_map,
203 .xlate = irq_domain_xlate_twocell,
204};
205
206irqreturn_t opb_irq_handler(int irq, void *private)
207{
208 struct opb_pic *opb;
209 u32 ir, src, subvirq;
210
211 opb = (struct opb_pic *) private;
212
213 /* Read the OPB MLS Interrupt Register for
214 * asserted interrupts */
215 ir = opb_in(opb, OPB_MLSIR);
216 if (!ir)
217 return IRQ_NONE;
218
219 do {
220 /* Get 1 - 32 source, *NOT* bit */
221 src = 32 - ffs(ir);
222
223 /* Translate from the OPB's conception of interrupt number to
224 * Linux's virtual IRQ */
225
226 subvirq = irq_linear_revmap(opb->host, src);
227
228 generic_handle_irq(subvirq);
229 } while ((ir = opb_in(opb, OPB_MLSIR)));
230
231 return IRQ_HANDLED;
232}
233
234struct opb_pic *opb_pic_init_one(struct device_node *dn)
235{
236 struct opb_pic *opb;
237 struct resource res;
238
239 if (of_address_to_resource(dn, 0, &res)) {
240 printk(KERN_ERR "opb: Couldn't translate resource\n");
241 return NULL;
242 }
243
244 opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
245 if (!opb) {
246 printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
247 return NULL;
248 }
249
250 /* Get access to the OPB MMIO registers */
251 opb->regs = ioremap(res.start + 0x10000, 0x1000);
252 if (!opb->regs) {
253 printk(KERN_ERR "opb: Failed to allocate register space!\n");
254 goto free_opb;
255 }
256
257 /* Allocate an irq domain so that Linux knows that despite only
258 * having one interrupt to issue, we're the controller for multiple
259 * hardware IRQs, so later we can lookup their virtual IRQs. */
260
261 opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
262 if (!opb->host) {
263 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
264 goto free_regs;
265 }
266
267 opb->index = opb_index++;
268 spin_lock_init(&opb->lock);
269
270 /* Disable all interrupts by default */
271 opb_out(opb, OPB_MLSASIER, 0);
272 opb_out(opb, OPB_MLSIER, 0);
273
274 /* ACK any interrupts left by FW */
275 opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
276
277 return opb;
278
279free_regs:
280 iounmap(opb->regs);
281free_opb:
282 kfree(opb);
283 return NULL;
284}
285
286void __init opb_pic_init(void)
287{
288 struct device_node *dn;
289 struct opb_pic *opb;
290 int virq;
291 int rc;
292
293 /* Call init_one for each OPB device */
294 for_each_compatible_node(dn, NULL, "ibm,opb") {
295
296 /* Fill in an OPB struct */
297 opb = opb_pic_init_one(dn);
298 if (!opb) {
299 printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
300 continue;
301 }
302
303 /* Map / get opb's hardware virtual irq */
304 virq = irq_of_parse_and_map(dn, 0);
305 if (virq <= 0) {
306 printk("opb: irq_op_parse_and_map failed!\n");
307 continue;
308 }
309
310 /* Attach opb interrupt handler to new virtual IRQ */
311 rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
312 "OPB LS Cascade", opb);
313 if (rc) {
314 printk("opb: request_irq failed: %d\n", rc);
315 continue;
316 }
317
318 printk("OPB%d init with %d IRQs at %p\n", opb->index,
319 OPB_NR_IRQS, opb->regs);
320 }
321}
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
deleted file mode 100644
index a87b414c766a..000000000000
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/of.h>
16#include <linux/smp.h>
17#include <linux/time.h>
18#include <linux/of_fdt.h>
19
20#include <asm/machdep.h>
21#include <asm/udbg.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26
27static void psr2_spin(void)
28{
29 hard_irq_disable();
30 for (;;)
31 continue;
32}
33
34static void psr2_restart(char *cmd)
35{
36 psr2_spin();
37}
38
39static int __init psr2_probe(void)
40{
41 unsigned long root = of_get_flat_dt_root();
42
43 if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) {
44 /* chroma systems also claim they are psr2s */
45 return 0;
46 }
47
48 if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
49 return 0;
50
51 return 1;
52}
53
54define_machine(psr2_md) {
55 .name = "PSR2 A2",
56 .probe = psr2_probe,
57 .setup_arch = wsp_setup_arch,
58 .restart = psr2_restart,
59 .power_off = psr2_spin,
60 .halt = psr2_spin,
61 .calibrate_decr = generic_calibrate_decr,
62 .init_IRQ = wsp_setup_irq,
63 .progress = udbg_progress,
64 .power_save = book3e_idle,
65};
66
67machine_arch_initcall(psr2_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c
deleted file mode 100644
index 8c79ce016cf1..000000000000
--- a/arch/powerpc/platforms/wsp/scom_smp.c
+++ /dev/null
@@ -1,435 +0,0 @@
1/*
2 * SCOM support for A2 platforms
3 *
4 * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson,
5 * Michael Ellerman, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/cpumask.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/spinlock.h>
17#include <linux/types.h>
18
19#include <asm/cputhreads.h>
20#include <asm/reg_a2.h>
21#include <asm/scom.h>
22#include <asm/udbg.h>
23#include <asm/code-patching.h>
24
25#include "wsp.h"
26
27#define SCOM_RAMC 0x2a /* Ram Command */
28#define SCOM_RAMC_TGT1_EXT 0x80000000
29#define SCOM_RAMC_SRC1_EXT 0x40000000
30#define SCOM_RAMC_SRC2_EXT 0x20000000
31#define SCOM_RAMC_SRC3_EXT 0x10000000
32#define SCOM_RAMC_ENABLE 0x00080000
33#define SCOM_RAMC_THREADSEL 0x00060000
34#define SCOM_RAMC_EXECUTE 0x00010000
35#define SCOM_RAMC_MSR_OVERRIDE 0x00008000
36#define SCOM_RAMC_MSR_PR 0x00004000
37#define SCOM_RAMC_MSR_GS 0x00002000
38#define SCOM_RAMC_FORCE 0x00001000
39#define SCOM_RAMC_FLUSH 0x00000800
40#define SCOM_RAMC_INTERRUPT 0x00000004
41#define SCOM_RAMC_ERROR 0x00000002
42#define SCOM_RAMC_DONE 0x00000001
43#define SCOM_RAMI 0x29 /* Ram Instruction */
44#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */
45#define SCOM_RAMIC_INSN 0xffffffff00000000
46#define SCOM_RAMD 0x2d /* Ram Data */
47#define SCOM_RAMDH 0x2e /* Ram Data High */
48#define SCOM_RAMDL 0x2f /* Ram Data Low */
49#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */
50#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000
51#define SCOM_PCCR0_ENABLE_RAM 0x40000000
52#define SCOM_THRCTL 0x30 /* Thread Control and Status */
53#define SCOM_THRCTL_T0_STOP 0x80000000
54#define SCOM_THRCTL_T1_STOP 0x40000000
55#define SCOM_THRCTL_T2_STOP 0x20000000
56#define SCOM_THRCTL_T3_STOP 0x10000000
57#define SCOM_THRCTL_T0_STEP 0x08000000
58#define SCOM_THRCTL_T1_STEP 0x04000000
59#define SCOM_THRCTL_T2_STEP 0x02000000
60#define SCOM_THRCTL_T3_STEP 0x01000000
61#define SCOM_THRCTL_T0_RUN 0x00800000
62#define SCOM_THRCTL_T1_RUN 0x00400000
63#define SCOM_THRCTL_T2_RUN 0x00200000
64#define SCOM_THRCTL_T3_RUN 0x00100000
65#define SCOM_THRCTL_T0_PM 0x00080000
66#define SCOM_THRCTL_T1_PM 0x00040000
67#define SCOM_THRCTL_T2_PM 0x00020000
68#define SCOM_THRCTL_T3_PM 0x00010000
69#define SCOM_THRCTL_T0_UDE 0x00008000
70#define SCOM_THRCTL_T1_UDE 0x00004000
71#define SCOM_THRCTL_T2_UDE 0x00002000
72#define SCOM_THRCTL_T3_UDE 0x00001000
73#define SCOM_THRCTL_ASYNC_DIS 0x00000800
74#define SCOM_THRCTL_TB_DIS 0x00000400
75#define SCOM_THRCTL_DEC_DIS 0x00000200
76#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */
77#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */
78
79
80static DEFINE_PER_CPU(scom_map_t, scom_ptrs);
81
82static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread)
83{
84 scom_map_t scom = per_cpu(scom_ptrs, cpu);
85 int tcpu;
86
87 if (scom_map_ok(scom)) {
88 *first_thread = 0;
89 return scom;
90 }
91
92 *first_thread = 1;
93
94 scom = scom_map_device(np, 0);
95
96 for (tcpu = cpu_first_thread_sibling(cpu);
97 tcpu <= cpu_last_thread_sibling(cpu); tcpu++)
98 per_cpu(scom_ptrs, tcpu) = scom;
99
100 /* Hack: for the boot core, this will actually get called on
101 * the second thread up, not the first so our test above will
102 * set first_thread incorrectly. */
103 if (cpu_first_thread_sibling(cpu) == 0)
104 *first_thread = 0;
105
106 return scom;
107}
108
109static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
110{
111 u64 cmd, mask, val;
112 int n = 0;
113
114 cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28)
115 | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE;
116 mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR;
117
118 scom_write(scom, SCOM_RAMIC, cmd);
119
120 for (;;) {
121 if (scom_read(scom, SCOM_RAMC, &val) != 0) {
122 pr_err("SCOM error on instruction 0x%08x, thread %d\n",
123 insn, thread);
124 return -1;
125 }
126 if (val & mask)
127 break;
128 pr_devel("Waiting on RAMC = 0x%llx\n", val);
129 if (++n == 3) {
130 pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
131 insn, thread);
132 return -1;
133 }
134 }
135
136 if (val & SCOM_RAMC_INTERRUPT) {
137 pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n",
138 insn, thread);
139 return -SCOM_RAMC_INTERRUPT;
140 }
141
142 if (val & SCOM_RAMC_ERROR) {
143 pr_err("RAMC error on instruction 0x%08x, thread %d\n",
144 insn, thread);
145 return -SCOM_RAMC_ERROR;
146 }
147
148 return 0;
149}
150
151static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
152 u64 *out_gpr)
153{
154 int rc;
155
156 /* or rN, rN, rN */
157 u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11);
158 rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0);
159 if (rc)
160 return rc;
161
162 return scom_read(scom, SCOM_RAMD, out_gpr);
163}
164
165static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
166{
167 int rc, sprhi, sprlo;
168 u32 insn;
169
170 sprhi = spr >> 5;
171 sprlo = spr & 0x1f;
172 insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */
173
174 if (spr == 0x0ff0)
175 insn = 0x7c2000a6; /* mfmsr r1 */
176
177 rc = a2_scom_ram(scom, thread, insn, 0xf);
178 if (rc)
179 return rc;
180 return a2_scom_getgpr(scom, thread, 1, 1, out_spr);
181}
182
183static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr,
184 int alt, u64 val)
185{
186 u32 lis = 0x3c000000 | (gpr << 21);
187 u32 li = 0x38000000 | (gpr << 21);
188 u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16);
189 u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16);
190 u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16);
191 u32 highest = val >> 48;
192 u32 higher = (val >> 32) & 0xffff;
193 u32 high = (val >> 16) & 0xffff;
194 u32 low = val & 0xffff;
195 int lext = alt ? 0x8 : 0x0;
196 int oext = alt ? 0xf : 0x0;
197 int rc = 0;
198
199 if (highest)
200 rc |= a2_scom_ram(scom, thread, lis | highest, lext);
201
202 if (higher) {
203 if (highest)
204 rc |= a2_scom_ram(scom, thread, oris | higher, oext);
205 else
206 rc |= a2_scom_ram(scom, thread, li | higher, lext);
207 }
208
209 if (highest || higher)
210 rc |= a2_scom_ram(scom, thread, rldicr32, oext);
211
212 if (high) {
213 if (highest || higher)
214 rc |= a2_scom_ram(scom, thread, oris | high, oext);
215 else
216 rc |= a2_scom_ram(scom, thread, lis | high, lext);
217 }
218
219 if (highest || higher || high)
220 rc |= a2_scom_ram(scom, thread, ori | low, oext);
221 else
222 rc |= a2_scom_ram(scom, thread, li | low, lext);
223
224 return rc;
225}
226
227static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val)
228{
229 int sprhi = spr >> 5;
230 int sprlo = spr & 0x1f;
231 /* mtspr spr, r1 */
232 u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11);
233
234 if (spr == 0x0ff0)
235 insn = 0x7c200124; /* mtmsr r1 */
236
237 if (a2_scom_setgpr(scom, thread, 1, 1, val))
238 return -1;
239
240 return a2_scom_ram(scom, thread, insn, 0xf);
241}
242
243static int a2_scom_initial_tlb(scom_map_t scom, int thread)
244{
245 extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[];
246 extern u32 a2_tlbinit_after_iprot_flush[];
247 extern u32 a2_tlbinit_after_linear_map[];
248 u32 assoc, entries, i;
249 u64 epn, tlbcfg;
250 u32 *p;
251 int rc;
252
253 /* Invalidate all entries (including iprot) */
254
255 rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg);
256 if (rc)
257 goto scom_fail;
258 entries = tlbcfg & TLBnCFG_N_ENTRY;
259 assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24;
260 epn = 0;
261
262 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
263 a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531);
264 /* Set MMUCR3 to write all thids bit to the TLB */
265 a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f);
266
267 /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */
268 a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB));
269 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
270 for (i = 0; i < entries; i++) {
271
272 a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc));
273
274 /* tlbwe */
275 rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0);
276 if (rc)
277 goto scom_fail;
278
279 /* Next entry is new address? */
280 if((i + 1) % assoc == 0) {
281 epn += (1 << 30);
282 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
283 }
284 }
285
286 /* Setup args for linear mapping */
287 rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0));
288 if (rc)
289 goto scom_fail;
290
291 /* Linear mapping */
292 for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) {
293 rc = a2_scom_ram(scom, thread, *p, 0);
294 if (rc)
295 goto scom_fail;
296 }
297
298 /*
299 * For the boot thread, between the linear mapping and the debug
300 * mappings there is a loop to flush iprot mappings. Ramming doesn't do
301 * branches, but the secondary threads don't need to be nearly as smart
302 * (i.e. we don't need to worry about invalidating the mapping we're
303 * standing on).
304 */
305
306 /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */
307 for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) {
308 rc = a2_scom_ram(scom, thread, *p, 0);
309 if (rc)
310 goto scom_fail;
311 }
312
313scom_fail:
314 if (rc)
315 pr_err("Setting up initial TLB failed, err %d\n", rc);
316
317 if (rc == -SCOM_RAMC_INTERRUPT) {
318 /* Interrupt, dump some status */
319 int rc[10];
320 u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2;
321 rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar);
322 rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0);
323 rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1);
324 rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr);
325 rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0);
326 rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1);
327 rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2);
328 rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3);
329 rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8);
330 rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2);
331 pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]);
332 pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]);
333 pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]);
334 pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]);
335 pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]);
336 pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]);
337 pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]);
338 pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]);
339 pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]);
340 pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]);
341 }
342
343 return rc;
344}
345
346int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
347{
348 u64 init_iar, init_msr, init_ccr2;
349 unsigned long start_here;
350 int rc, core_setup;
351 scom_map_t scom;
352 u64 pccr0;
353
354 scom = get_scom(lcpu, np, &core_setup);
355 if (!scom) {
356 printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu);
357 return -1;
358 }
359
360 pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
361
362 if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) {
363 printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu);
364 return -1;
365 }
366 scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
367 SCOM_PCCR0_ENABLE_RAM);
368
369 /* Stop the thead with THRCTL. If we are setting up the TLB we stop all
370 * threads. We also disable asynchronous interrupts while RAMing.
371 */
372 if (core_setup)
373 scom_write(scom, SCOM_THRCTL_OR,
374 SCOM_THRCTL_T0_STOP |
375 SCOM_THRCTL_T1_STOP |
376 SCOM_THRCTL_T2_STOP |
377 SCOM_THRCTL_T3_STOP |
378 SCOM_THRCTL_ASYNC_DIS);
379 else
380 scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx);
381
382 /* Flush its pipeline just in case */
383 scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) |
384 SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE);
385
386 a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar);
387 a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr);
388 a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2);
389
390 /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */
391 rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM);
392 if (rc) {
393 pr_err("Failed to set MSR ! err %d\n", rc);
394 return rc;
395 }
396
397 /* RAM in an sync/isync for the sake of it */
398 a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0);
399 a2_scom_ram(scom, thr_idx, 0x4c00012c, 0);
400
401 if (core_setup) {
402 pr_devel("CPU%d is first thread in core, initializing TLB...\n",
403 lcpu);
404 rc = a2_scom_initial_tlb(scom, thr_idx);
405 if (rc)
406 goto fail;
407 }
408
409 start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init
410 : generic_secondary_thread_init);
411 pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
412
413 rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here);
414 rc |= a2_scom_setgpr(scom, thr_idx, 3, 0,
415 get_hard_smp_processor_id(lcpu));
416 /*
417 * Tell book3e_secondary_core_init not to set up the TLB, we've
418 * already done that.
419 */
420 rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1);
421
422 rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx);
423
424 scom_write(scom, SCOM_RAMC, 0);
425 scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx));
426 scom_write(scom, SCOM_PCCR0, pccr0);
427fail:
428 pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded");
429 if (rc) {
430 pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n",
431 init_iar, init_msr, init_ccr2);
432 }
433
434 return rc;
435}
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
deleted file mode 100644
index 6538b4de34fc..000000000000
--- a/arch/powerpc/platforms/wsp/scom_wsp.c
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * SCOM backend for WSP
3 *
4 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/cpumask.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/of_address.h>
18
19#include <asm/cputhreads.h>
20#include <asm/reg_a2.h>
21#include <asm/scom.h>
22#include <asm/udbg.h>
23
24#include "wsp.h"
25
26
27static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
28{
29 struct resource r;
30 u64 xscom_addr;
31
32 if (!of_get_property(dev, "scom-controller", NULL)) {
33 pr_err("%s: device %s is not a SCOM controller\n",
34 __func__, dev->full_name);
35 return SCOM_MAP_INVALID;
36 }
37
38 if (of_address_to_resource(dev, 0, &r)) {
39 pr_debug("Failed to find SCOM controller address\n");
40 return 0;
41 }
42
43 /* Transform the SCOM address into an XSCOM offset */
44 xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
45
46 return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
47}
48
49static void wsp_scom_unmap(scom_map_t map)
50{
51 iounmap((void *)map);
52}
53
54static int wsp_scom_read(scom_map_t map, u64 reg, u64 *value)
55{
56 u64 __iomem *addr = (u64 __iomem *)map;
57
58 *value = in_be64(addr + reg);
59
60 return 0;
61}
62
63static int wsp_scom_write(scom_map_t map, u64 reg, u64 value)
64{
65 u64 __iomem *addr = (u64 __iomem *)map;
66
67 out_be64(addr + reg, value);
68
69 return 0;
70}
71
72static const struct scom_controller wsp_scom_controller = {
73 .map = wsp_scom_map,
74 .unmap = wsp_scom_unmap,
75 .read = wsp_scom_read,
76 .write = wsp_scom_write
77};
78
79void scom_init_wsp(void)
80{
81 scom_init(&wsp_scom_controller);
82}
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c
deleted file mode 100644
index 11ac2f05e01c..000000000000
--- a/arch/powerpc/platforms/wsp/setup.c
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright 2010 Michael Ellerman, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of_platform.h>
12
13#include "wsp.h"
14
15/*
16 * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
17 * Won't work for nodes that are not a descendant of a wsp node.
18 */
19int wsp_get_chip_id(struct device_node *dn)
20{
21 const u32 *p;
22 int rc;
23
24 /* Start looking at the specified node, not its parent */
25 dn = of_node_get(dn);
26 while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
27 dn = of_get_next_parent(dn);
28
29 if (!dn)
30 return -1;
31
32 rc = *p;
33 of_node_put(dn);
34
35 return rc;
36}
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
deleted file mode 100644
index 332a18b81403..000000000000
--- a/arch/powerpc/platforms/wsp/smp.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * SMP Support for A2 platforms
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/cpumask.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/of.h>
17#include <linux/smp.h>
18
19#include <asm/dbell.h>
20#include <asm/machdep.h>
21#include <asm/xics.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26static void smp_a2_setup_cpu(int cpu)
27{
28 doorbell_setup_this_cpu();
29
30 if (cpu != boot_cpuid)
31 xics_setup_cpu();
32}
33
34int smp_a2_kick_cpu(int nr)
35{
36 const char *enable_method;
37 struct device_node *np;
38 int thr_idx;
39
40 if (nr < 0 || nr >= NR_CPUS)
41 return -ENOENT;
42
43 np = of_get_cpu_node(nr, &thr_idx);
44 if (!np)
45 return -ENODEV;
46
47 enable_method = of_get_property(np, "enable-method", NULL);
48 pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
49
50 if (!enable_method) {
51 printk(KERN_ERR "CPU%d has no enable-method\n", nr);
52 return -ENOENT;
53 } else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
54 if (a2_scom_startup_cpu(nr, thr_idx, np))
55 return -1;
56 } else {
57 printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
58 nr, enable_method);
59 return -EINVAL;
60 }
61
62 /*
63 * The processor is currently spinning, waiting for the
64 * cpu_start field to become non-zero After we set cpu_start,
65 * the processor will continue on to secondary_start
66 */
67 paca[nr].cpu_start = 1;
68
69 return 0;
70}
71
72static int __init smp_a2_probe(void)
73{
74 return num_possible_cpus();
75}
76
77static struct smp_ops_t a2_smp_ops = {
78 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
79 .cause_ipi = doorbell_cause_ipi,
80 .probe = smp_a2_probe,
81 .kick_cpu = smp_a2_kick_cpu,
82 .setup_cpu = smp_a2_setup_cpu,
83};
84
85void __init a2_setup_smp(void)
86{
87 smp_ops = &a2_smp_ops;
88}
diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c
deleted file mode 100644
index 58cd1f00e1ef..000000000000
--- a/arch/powerpc/platforms/wsp/wsp.c
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_device.h>
13#include <linux/smp.h>
14#include <linux/delay.h>
15#include <linux/time.h>
16#include <linux/of_address.h>
17
18#include <asm/scom.h>
19
20#include "wsp.h"
21#include "ics.h"
22
23#define WSP_SOC_COMPATIBLE "ibm,wsp-soc"
24#define PBIC_COMPATIBLE "ibm,wsp-pbic"
25#define COPRO_COMPATIBLE "ibm,wsp-coprocessor"
26
27static int __init wsp_probe_buses(void)
28{
29 static __initdata struct of_device_id bus_ids[] = {
30 /*
31 * every node in between needs to be here or you won't
32 * find it
33 */
34 { .compatible = WSP_SOC_COMPATIBLE, },
35 { .compatible = PBIC_COMPATIBLE, },
36 { .compatible = COPRO_COMPATIBLE, },
37 {},
38 };
39 of_platform_bus_probe(NULL, bus_ids, NULL);
40
41 return 0;
42}
43
44void __init wsp_setup_arch(void)
45{
46 /* init to some ~sane value until calibrate_delay() runs */
47 loops_per_jiffy = 50000000;
48
49 scom_init_wsp();
50
51 /* Setup SMP callback */
52#ifdef CONFIG_SMP
53 a2_setup_smp();
54#endif
55#ifdef CONFIG_PCI
56 wsp_setup_pci();
57#endif
58}
59
60void __init wsp_setup_irq(void)
61{
62 wsp_init_irq();
63 opb_pic_init();
64}
65
66
67int __init wsp_probe_devices(void)
68{
69 struct device_node *np;
70
71 /* Our RTC is a ds1500. It seems to be programatically compatible
72 * with the ds1511 for which we have a driver so let's use that
73 */
74 np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
75 if (np != NULL) {
76 struct resource res;
77 if (of_address_to_resource(np, 0, &res) == 0)
78 platform_device_register_simple("ds1511", 0, &res, 1);
79 }
80
81 wsp_probe_buses();
82
83 return 0;
84}
85
86void wsp_halt(void)
87{
88 u64 val;
89 scom_map_t m;
90 struct device_node *dn;
91 struct device_node *mine;
92 struct device_node *me;
93 int rc;
94
95 me = of_get_cpu_node(smp_processor_id(), NULL);
96 mine = scom_find_parent(me);
97
98 /* This will halt all the A2s but not power off the chip */
99 for_each_node_with_property(dn, "scom-controller") {
100 if (dn == mine)
101 continue;
102 m = scom_map(dn, 0, 1);
103
104 /* read-modify-write it so the HW probe does not get
105 * confused */
106 rc = scom_read(m, 0, &val);
107 if (rc == 0)
108 scom_write(m, 0, val | 1);
109 scom_unmap(m);
110 }
111 m = scom_map(mine, 0, 1);
112 rc = scom_read(m, 0, &val);
113 if (rc == 0)
114 scom_write(m, 0, val | 1);
115 /* should never return */
116 scom_unmap(m);
117}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
deleted file mode 100644
index a563a8aaf812..000000000000
--- a/arch/powerpc/platforms/wsp/wsp.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __WSP_H
2#define __WSP_H
3
4#include <asm/wsp.h>
5
6/* Devtree compatible strings for major devices */
7#define PCIE_COMPATIBLE "ibm,wsp-pciex"
8
9extern void wsp_setup_arch(void);
10extern void wsp_setup_irq(void);
11extern int wsp_probe_devices(void);
12extern void wsp_halt(void);
13
14extern void wsp_setup_pci(void);
15extern void scom_init_wsp(void);
16
17extern void a2_setup_smp(void);
18extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
19 struct device_node *np);
20extern int smp_a2_kick_cpu(int nr);
21
22extern void opb_pic_init(void);
23
24/* chroma specific managment */
25extern void wsp_h8_restart(char *cmd);
26extern void wsp_h8_power_off(void);
27extern void __init wsp_setup_h8(void);
28
29#endif /* __WSP_H */
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
deleted file mode 100644
index 9a15e5b39bb8..000000000000
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ /dev/null
@@ -1,1134 +0,0 @@
1/*
2 * Copyright 2010 Ben Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#define DEBUG
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17#include <linux/bootmem.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/debugfs.h>
21
22#include <asm/sections.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/pci-bridge.h>
26#include <asm/machdep.h>
27#include <asm/ppc-pci.h>
28#include <asm/iommu.h>
29#include <asm/io-workarounds.h>
30#include <asm/debug.h>
31
32#include "wsp.h"
33#include "wsp_pci.h"
34#include "msi.h"
35
36
37/* Max number of TVTs for one table. Only 32-bit tables can use
38 * multiple TVTs and so the max currently supported is thus 8
39 * since only 2G of DMA space is supported
40 */
41#define MAX_TABLE_TVT_COUNT 8
42
43struct wsp_dma_table {
44 struct list_head link;
45 struct iommu_table table;
46 struct wsp_phb *phb;
47 struct page *tces[MAX_TABLE_TVT_COUNT];
48};
49
50/* We support DMA regions from 0...2G in 32bit space (no support for
51 * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
52 * entry) with validation enabled (though not supported by SimiCS
53 * just yet).
54 *
55 * To simplify things, we divide this 2G space into N regions based
56 * on the constant below which could be turned into a tunable eventually
57 *
58 * We then assign dynamically those regions to devices as they show up.
59 *
60 * We use a bitmap as an allocator for these.
61 *
62 * Tables are allocated/created dynamically as devices are discovered,
63 * multiple TVT entries are used if needed
64 *
65 * When 64-bit DMA support is added we should simply use a separate set
66 * of larger regions (the HW supports 64 TVT entries). We can
67 * additionally create a bypass region in 64-bit space for performances
68 * though that would have a cost in term of security.
69 *
70 * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
71 * for all devices and bus/dev/fn validation is disabled
72 *
73 * Note that a DMA32 region cannot be smaller than 256M so the max
74 * supported here for now is 8. We don't yet support sharing regions
75 * between multiple devices so the max number of devices supported
76 * is MAX_TABLE_TVT_COUNT.
77 */
78#define NUM_DMA32_REGIONS 1
79
80struct wsp_phb {
81 struct pci_controller *hose;
82
83 /* Lock controlling access to the list of dma tables.
84 * It does -not- protect against dma_* operations on
85 * those tables, those should be stopped before an entry
86 * is removed from the list.
87 *
88 * The lock is also used for error handling operations
89 */
90 spinlock_t lock;
91 struct list_head dma_tables;
92 unsigned long dma32_map;
93 unsigned long dma32_base;
94 unsigned int dma32_num_regions;
95 unsigned long dma32_region_size;
96
97 /* Debugfs stuff */
98 struct dentry *ddir;
99
100 struct list_head all;
101};
102static LIST_HEAD(wsp_phbs);
103
104//#define cfg_debug(fmt...) pr_debug(fmt)
105#define cfg_debug(fmt...)
106
107
108static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
109 int offset, int len, u32 *val)
110{
111 struct pci_controller *hose;
112 int suboff;
113 u64 addr;
114
115 hose = pci_bus_to_host(bus);
116 if (hose == NULL)
117 return PCIBIOS_DEVICE_NOT_FOUND;
118 if (offset >= 0x1000)
119 return PCIBIOS_BAD_REGISTER_NUMBER;
120 addr = PCIE_REG_CA_ENABLE |
121 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
122 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
123 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
124 suboff = offset & 3;
125
126 /*
127 * Note: the caller has already checked that offset is
128 * suitably aligned and that len is 1, 2 or 4.
129 */
130
131 switch (len) {
132 case 1:
133 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
134 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
135 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
136 >> (suboff << 3)) & 0xff;
137 cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
138 bus->number, devfn >> 3, devfn & 7,
139 offset, suboff, addr, *val);
140 break;
141 case 2:
142 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
143 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
144 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
145 >> (suboff << 3)) & 0xffff;
146 cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
147 bus->number, devfn >> 3, devfn & 7,
148 offset, suboff, addr, *val);
149 break;
150 default:
151 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
152 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
153 *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
154 cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
155 bus->number, devfn >> 3, devfn & 7,
156 offset, suboff, addr, *val);
157 break;
158 }
159 return PCIBIOS_SUCCESSFUL;
160}
161
162static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
163 int offset, int len, u32 val)
164{
165 struct pci_controller *hose;
166 int suboff;
167 u64 addr;
168
169 hose = pci_bus_to_host(bus);
170 if (hose == NULL)
171 return PCIBIOS_DEVICE_NOT_FOUND;
172 if (offset >= 0x1000)
173 return PCIBIOS_BAD_REGISTER_NUMBER;
174 addr = PCIE_REG_CA_ENABLE |
175 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
176 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
177 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
178 suboff = offset & 3;
179
180 /*
181 * Note: the caller has already checked that offset is
182 * suitably aligned and that len is 1, 2 or 4.
183 */
184 switch (len) {
185 case 1:
186 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
187 val <<= suboff << 3;
188 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
189 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
190 cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
191 bus->number, devfn >> 3, devfn & 7,
192 offset, suboff, addr, val);
193 break;
194 case 2:
195 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
196 val <<= suboff << 3;
197 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
198 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
199 cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
200 bus->number, devfn >> 3, devfn & 7,
201 offset, suboff, addr, val);
202 break;
203 default:
204 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
205 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
206 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
207 cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
208 bus->number, devfn >> 3, devfn & 7,
209 offset, suboff, addr, val);
210 break;
211 }
212 return PCIBIOS_SUCCESSFUL;
213}
214
215static struct pci_ops wsp_pcie_pci_ops =
216{
217 .read = wsp_pcie_read_config,
218 .write = wsp_pcie_write_config,
219};
220
221#define TCE_SHIFT 12
222#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
223#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
224#define TCE_PCI_READ 0x1 /* read from PCI allowed */
225#define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */
226#define TCE_RPN_SHIFT 12
227
228//#define dma_debug(fmt...) pr_debug(fmt)
229#define dma_debug(fmt...)
230
231static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
232 unsigned long uaddr, enum dma_data_direction direction,
233 struct dma_attrs *attrs)
234{
235 struct wsp_dma_table *ptbl = container_of(tbl,
236 struct wsp_dma_table,
237 table);
238 u64 proto_tce;
239 u64 *tcep;
240 u64 rpn;
241
242 proto_tce = TCE_PCI_READ;
243#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
244 proto_tce |= TCE_PCI_WRITE;
245#else
246 if (direction != DMA_TO_DEVICE)
247 proto_tce |= TCE_PCI_WRITE;
248#endif
249
250 /* XXX Make this faster by factoring out the page address for
251 * within a TCE table
252 */
253 while (npages--) {
254 /* We don't use it->base as the table can be scattered */
255 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
256 tcep += (index & 0xffff);
257
258 /* can't move this out since we might cross LMB boundary */
259 rpn = __pa(uaddr) >> TCE_SHIFT;
260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
261
262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K);
264
265 uaddr += TCE_PAGE_SIZE;
266 index++;
267 }
268 return 0;
269}
270
271static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
272{
273 struct wsp_dma_table *ptbl = container_of(tbl,
274 struct wsp_dma_table,
275 table);
276#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
277 struct pci_controller *hose = ptbl->phb->hose;
278#endif
279 u64 *tcep;
280
281 /* XXX Make this faster by factoring out the page address for
282 * within a TCE table. Also use line-kill option to kill multiple
283 * TCEs at once
284 */
285 while (npages--) {
286 /* We don't use it->base as the table can be scattered */
287 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
288 tcep += (index & 0xffff);
289 dma_debug("[DMA] TCE %p cleared\n", tcep);
290 *tcep = 0;
291#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
292 /* Don't write there since it would pollute other MMIO accesses */
293 out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
294 PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
295 (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
296#endif
297 index++;
298 }
299}
300
301static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
302 unsigned int region,
303 struct pci_dev *validate)
304{
305 struct pci_controller *hose = phb->hose;
306 unsigned long size = phb->dma32_region_size;
307 unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
308 struct wsp_dma_table *tbl;
309 int tvts_per_table, i, tvt, nid;
310 unsigned long flags;
311
312 nid = of_node_to_nid(phb->hose->dn);
313
314 /* Calculate how many TVTs are needed */
315 tvts_per_table = size / 0x10000000;
316 if (tvts_per_table == 0)
317 tvts_per_table = 1;
318
319 /* Calculate the base TVT index. We know all tables have the same
320 * size so we just do a simple multiply here
321 */
322 tvt = region * tvts_per_table;
323
324 pr_debug(" Region : %d\n", region);
325 pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
326 pr_debug(" Number of TVTs : %d\n", tvts_per_table);
327 pr_debug(" Base TVT : %d\n", tvt);
328 pr_debug(" Node : %d\n", nid);
329
330 tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
331 if (!tbl)
332 return ERR_PTR(-ENOMEM);
333 tbl->phb = phb;
334
335 /* Create as many TVTs as needed, each represents 256M at most */
336 for (i = 0; i < tvts_per_table; i++) {
337 u64 tvt_data1, tvt_data0;
338
339 /* Allocate table. We use a 4K TCE size for now always so
340 * one table is always 8 * (258M / 4K) == 512K
341 */
342 tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
343 if (tbl->tces[i] == NULL)
344 goto fail;
345 memset(page_address(tbl->tces[i]), 0, 0x80000);
346
347 pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
348
349 /* Table size. We currently set it to be the whole 256M region */
350 tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
351 /* IO page size set to 4K */
352 tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
353 /* Shift in the address */
354 tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
355
356 /* Validation stuff. We only validate fully bus/dev/fn for now
357 * one day maybe we can group devices but that isn't the case
358 * at the moment
359 */
360 if (validate) {
361 tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
362 tvt_data0 |= validate->bus->number;
363 tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
364 tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
365 << IODA_TVT1_DEVNUM_VALUE_SHIFT;
366 tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
367 tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
368 << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
369 }
370
371 /* XX PE number is always 0 for now */
372
373 /* Program the values using the PHB lock */
374 spin_lock_irqsave(&phb->lock, flags);
375 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
376 (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
377 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
378 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
379 spin_unlock_irqrestore(&phb->lock, flags);
380 }
381
382 /* Init bits and pieces */
383 tbl->table.it_blocksize = 16;
384 tbl->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
385 tbl->table.it_offset = addr >> tbl->table.it_page_shift;
386 tbl->table.it_size = size >> tbl->table.it_page_shift;
387
388 /*
389 * It's already blank but we clear it anyway.
390 * Consider an aditiona interface that makes cleaing optional
391 */
392 iommu_init_table(&tbl->table, nid);
393
394 list_add(&tbl->link, &phb->dma_tables);
395 return tbl;
396
397 fail:
398 pr_debug(" Failed to allocate a 256M TCE table !\n");
399 for (i = 0; i < tvts_per_table; i++)
400 if (tbl->tces[i])
401 __free_pages(tbl->tces[i], get_order(0x80000));
402 kfree(tbl);
403 return ERR_PTR(-ENOMEM);
404}
405
406static void wsp_pci_dma_dev_setup(struct pci_dev *pdev)
407{
408 struct dev_archdata *archdata = &pdev->dev.archdata;
409 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
410 struct wsp_phb *phb = hose->private_data;
411 struct wsp_dma_table *table = NULL;
412 unsigned long flags;
413 int i;
414
415 /* Don't assign an iommu table to a bridge */
416 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
417 return;
418
419 pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
420
421 spin_lock_irqsave(&phb->lock, flags);
422
423 /* If only one region, check if it already exist */
424 if (phb->dma32_num_regions == 1) {
425 spin_unlock_irqrestore(&phb->lock, flags);
426 if (list_empty(&phb->dma_tables))
427 table = wsp_pci_create_dma32_table(phb, 0, NULL);
428 else
429 table = list_first_entry(&phb->dma_tables,
430 struct wsp_dma_table,
431 link);
432 } else {
433 /* else find a free region */
434 for (i = 0; i < phb->dma32_num_regions && !table; i++) {
435 if (__test_and_set_bit(i, &phb->dma32_map))
436 continue;
437 spin_unlock_irqrestore(&phb->lock, flags);
438 table = wsp_pci_create_dma32_table(phb, i, pdev);
439 }
440 }
441
442 /* Check if we got an error */
443 if (IS_ERR(table)) {
444 pr_err("%s: Failed to create DMA table, err %ld !\n",
445 pci_name(pdev), PTR_ERR(table));
446 return;
447 }
448
449 /* Or a valid table */
450 if (table) {
451 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
452 pci_name(pdev),
453 table->table.it_offset << IOMMU_PAGE_SHIFT_4K,
454 (table->table.it_offset << IOMMU_PAGE_SHIFT_4K)
455 + phb->dma32_region_size - 1);
456 archdata->dma_data.iommu_table_base = &table->table;
457 return;
458 }
459
460 /* Or no room */
461 spin_unlock_irqrestore(&phb->lock, flags);
462 pr_err("%s: Out of DMA space !\n", pci_name(pdev));
463}
464
465static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
466{
467 u64 val;
468 int i;
469
470#define DUMP_REG(x) \
471 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
472
473 /*
474 * Some WSP variants has a bogus class code by default in the PCI-E
475 * root complex's built-in P2P bridge
476 */
477 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
478 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
479 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
480 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
481 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
482
483#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
484 /* XXX Disable TCE caching, it doesn't work on DD1 */
485 out_be64(hose->cfg_data + 0xe50,
486 in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
487 printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
488#endif
489
490 /* Configure M32A and IO. IO is hard wired to be 1M for now */
491 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
492 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
493 (~(hose->io_resource.end - hose->io_resource.start)) &
494 0x3fffffff000ul);
495 out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
496
497 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
498 hose->mem_resources[0].start);
499 printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
500 (~(hose->mem_resources[0].end -
501 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
502 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
503 (~(hose->mem_resources[0].end -
504 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
505 out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
506 (hose->mem_resources[0].start - hose->mem_offset[0]) | 1);
507
508 /* Clear all TVT entries
509 *
510 * XX Might get TVT count from device-tree
511 */
512 for (i = 0; i < IODA_TVT_COUNT; i++) {
513 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
514 PCIE_REG_IODA_AD_TBL_TVT | i);
515 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
516 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
517 }
518
519 /* Kill the TCE cache */
520 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
521 in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
522 PCIE_REG_PHBC_64B_TCE_EN);
523
524 /* Enable 32 & 64-bit MSIs, IO space and M32A */
525 val = PCIE_REG_PHBC_32BIT_MSI_EN |
526 PCIE_REG_PHBC_IO_EN |
527 PCIE_REG_PHBC_64BIT_MSI_EN |
528 PCIE_REG_PHBC_M32A_EN;
529 if (iommu_is_off)
530 val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
531 pr_debug("Will write config: 0x%llx\n", val);
532 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
533
534 /* Enable error reporting */
535 out_be64(hose->cfg_data + 0xe00,
536 in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
537
538 /* Mask an error that's generated when doing config space probe
539 *
540 * XXX Maybe we should only mask it around config space cycles... that or
541 * ignore it when we know we had a config space cycle recently ?
542 */
543 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
544 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
545
546 /* Enable UTL errors, for now, all of them got to UTL irq 1
547 *
548 * We similarily mask one UTL error caused apparently during normal
549 * probing. We also mask the link up error
550 */
551 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
552 out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
553 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
554 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
555 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
556 out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
557
558 DUMP_REG(PCIE_REG_IO_BASE_ADDR);
559 DUMP_REG(PCIE_REG_IO_BASE_MASK);
560 DUMP_REG(PCIE_REG_IO_START_ADDR);
561 DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
562 DUMP_REG(PCIE_REG_M32A_BASE_MASK);
563 DUMP_REG(PCIE_REG_M32A_START_ADDR);
564 DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
565 DUMP_REG(PCIE_REG_M32B_BASE_MASK);
566 DUMP_REG(PCIE_REG_M32B_START_ADDR);
567 DUMP_REG(PCIE_REG_M64_BASE_ADDR);
568 DUMP_REG(PCIE_REG_M64_BASE_MASK);
569 DUMP_REG(PCIE_REG_M64_START_ADDR);
570 DUMP_REG(PCIE_REG_PHB_CONFIG);
571}
572
573static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
574{
575 u64 val;
576 int i;
577
578 for (i = 0; i < 10000; i++) {
579 val = in_be64(phb->hose->cfg_data + 0xe08);
580 if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
581 return;
582 udelay(1);
583 }
584 pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
585 phb->hose->global_number, port);
586}
587
588#define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
589static ret wsp_pci_##name at \
590{ \
591 struct iowa_bus *bus; \
592 struct wsp_phb *phb; \
593 unsigned long flags; \
594 ret rval; \
595 bus = iowa_pio_find_bus(aa); \
596 WARN_ON(!bus); \
597 phb = bus->private; \
598 spin_lock_irqsave(&phb->lock, flags); \
599 wsp_pci_wait_io_idle(phb, aa); \
600 rval = __do_##name al; \
601 spin_unlock_irqrestore(&phb->lock, flags); \
602 return rval; \
603}
604
605#define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
606static void wsp_pci_##name at \
607{ \
608 struct iowa_bus *bus; \
609 struct wsp_phb *phb; \
610 unsigned long flags; \
611 bus = iowa_pio_find_bus(aa); \
612 WARN_ON(!bus); \
613 phb = bus->private; \
614 spin_lock_irqsave(&phb->lock, flags); \
615 wsp_pci_wait_io_idle(phb, aa); \
616 __do_##name al; \
617 spin_unlock_irqrestore(&phb->lock, flags); \
618}
619
620#define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
621#define DEF_PCI_AC_NORET_mem(name, at, al, aa)
622
623#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
624 DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
625
626#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
627 DEF_PCI_AC_NORET_##space(name, at, al, aa) \
628
629
630#include <asm/io-defs.h>
631
632#undef DEF_PCI_AC_RET
633#undef DEF_PCI_AC_NORET
634
635static struct ppc_pci_io wsp_pci_iops = {
636 .inb = wsp_pci_inb,
637 .inw = wsp_pci_inw,
638 .inl = wsp_pci_inl,
639 .outb = wsp_pci_outb,
640 .outw = wsp_pci_outw,
641 .outl = wsp_pci_outl,
642 .insb = wsp_pci_insb,
643 .insw = wsp_pci_insw,
644 .insl = wsp_pci_insl,
645 .outsb = wsp_pci_outsb,
646 .outsw = wsp_pci_outsw,
647 .outsl = wsp_pci_outsl,
648};
649
650static int __init wsp_setup_one_phb(struct device_node *np)
651{
652 struct pci_controller *hose;
653 struct wsp_phb *phb;
654
655 pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
656
657 phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
658 if (!phb)
659 return -ENOMEM;
660 hose = pcibios_alloc_controller(np);
661 if (!hose) {
662 /* Can't really free the phb */
663 return -ENOMEM;
664 }
665 hose->private_data = phb;
666 phb->hose = hose;
667
668 INIT_LIST_HEAD(&phb->dma_tables);
669 spin_lock_init(&phb->lock);
670
671 /* XXX Use bus-range property ? */
672 hose->first_busno = 0;
673 hose->last_busno = 0xff;
674
675 /* We use cfg_data as the address for the whole bridge MMIO space
676 */
677 hose->cfg_data = of_iomap(hose->dn, 0);
678
679 pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
680
681 /* Get the ranges of the device-tree */
682 pci_process_bridge_OF_ranges(hose, np, 0);
683
684 /* XXX Force re-assigning of everything for now */
685 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
686 PCI_ENABLE_PROC_DOMAINS);
687
688 /* Calculate how the TCE space is divided */
689 phb->dma32_base = 0;
690 phb->dma32_num_regions = NUM_DMA32_REGIONS;
691 if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
692 pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
693 MAX_TABLE_TVT_COUNT);
694 phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
695 }
696 phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
697
698 BUG_ON(!is_power_of_2(phb->dma32_region_size));
699
700 /* Setup config ops */
701 hose->ops = &wsp_pcie_pci_ops;
702
703 /* Configure the HW */
704 wsp_pcie_configure_hw(hose);
705
706 /* Instanciate IO workarounds */
707 iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
708#ifdef CONFIG_PCI_MSI
709 wsp_setup_phb_msi(hose);
710#endif
711
712 /* Add to global list */
713 list_add(&phb->all, &wsp_phbs);
714
715 return 0;
716}
717
718void __init wsp_setup_pci(void)
719{
720 struct device_node *np;
721 int rc;
722
723 /* Find host bridges */
724 for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
725 rc = wsp_setup_one_phb(np);
726 if (rc)
727 pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
728 np->full_name, rc);
729 }
730
731 /* Establish device-tree linkage */
732 pci_devs_phb_init();
733
734 /* Set DMA ops to use TCEs */
735 if (iommu_is_off) {
736 pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
737 set_pci_dma_ops(&dma_direct_ops);
738 } else {
739 ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
740 ppc_md.tce_build = tce_build_wsp;
741 ppc_md.tce_free = tce_free_wsp;
742 set_pci_dma_ops(&dma_iommu_ops);
743 }
744}
745
746#define err_debug(fmt...) pr_debug(fmt)
747//#define err_debug(fmt...)
748
749static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
750{
751 const u32 *prop;
752 int hw_irq;
753
754 /* Ok, no interrupts property, let's try to find our child P2P */
755 np = of_get_next_child(np, NULL);
756 if (np == NULL)
757 return 0;
758
759 /* Grab it's interrupt map */
760 prop = of_get_property(np, "interrupt-map", NULL);
761 if (prop == NULL)
762 return 0;
763
764 /* Grab one of the interrupts in there, keep the low 4 bits */
765 hw_irq = prop[5] & 0xf;
766
767 /* 0..4 for PHB 0 and 5..9 for PHB 1 */
768 if (hw_irq < 5)
769 hw_irq = 4;
770 else
771 hw_irq = 9;
772 hw_irq |= prop[5] & ~0xf;
773
774 err_debug("PCI: Using 0x%x as error IRQ for %s\n",
775 hw_irq, np->parent->full_name);
776 return irq_create_mapping(NULL, hw_irq);
777}
778
779static const struct {
780 u32 offset;
781 const char *name;
782} wsp_pci_regs[] = {
783#define DREG(x) { PCIE_REG_##x, #x }
784#define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
785 /* Architected registers except CONFIG_ and IODA
786 * to avoid side effects
787 */
788 DREG(DMA_CHAN_STATUS),
789 DREG(CPU_LOADSTORE_STATUS),
790 DREG(LOCK0),
791 DREG(LOCK1),
792 DREG(PHB_CONFIG),
793 DREG(IO_BASE_ADDR),
794 DREG(IO_BASE_MASK),
795 DREG(IO_START_ADDR),
796 DREG(M32A_BASE_ADDR),
797 DREG(M32A_BASE_MASK),
798 DREG(M32A_START_ADDR),
799 DREG(M32B_BASE_ADDR),
800 DREG(M32B_BASE_MASK),
801 DREG(M32B_START_ADDR),
802 DREG(M64_BASE_ADDR),
803 DREG(M64_BASE_MASK),
804 DREG(M64_START_ADDR),
805 DREG(TCE_KILL),
806 DREG(LOCK2),
807 DREG(PHB_GEN_CAP),
808 DREG(PHB_TCE_CAP),
809 DREG(PHB_IRQ_CAP),
810 DREG(PHB_EEH_CAP),
811 DREG(PAPR_ERR_INJ_CONTROL),
812 DREG(PAPR_ERR_INJ_ADDR),
813 DREG(PAPR_ERR_INJ_MASK),
814
815 /* UTL core regs */
816 DUTL(SYS_BUS_CONTROL),
817 DUTL(STATUS),
818 DUTL(SYS_BUS_AGENT_STATUS),
819 DUTL(SYS_BUS_AGENT_ERR_SEV),
820 DUTL(SYS_BUS_AGENT_IRQ_EN),
821 DUTL(SYS_BUS_BURST_SZ_CONF),
822 DUTL(REVISION_ID),
823 DUTL(OUT_POST_HDR_BUF_ALLOC),
824 DUTL(OUT_POST_DAT_BUF_ALLOC),
825 DUTL(IN_POST_HDR_BUF_ALLOC),
826 DUTL(IN_POST_DAT_BUF_ALLOC),
827 DUTL(OUT_NP_BUF_ALLOC),
828 DUTL(IN_NP_BUF_ALLOC),
829 DUTL(PCIE_TAGS_ALLOC),
830 DUTL(GBIF_READ_TAGS_ALLOC),
831
832 DUTL(PCIE_PORT_CONTROL),
833 DUTL(PCIE_PORT_STATUS),
834 DUTL(PCIE_PORT_ERROR_SEV),
835 DUTL(PCIE_PORT_IRQ_EN),
836 DUTL(RC_STATUS),
837 DUTL(RC_ERR_SEVERITY),
838 DUTL(RC_IRQ_EN),
839 DUTL(EP_STATUS),
840 DUTL(EP_ERR_SEVERITY),
841 DUTL(EP_ERR_IRQ_EN),
842 DUTL(PCI_PM_CTRL1),
843 DUTL(PCI_PM_CTRL2),
844
845 /* PCIe stack regs */
846 DREG(SYSTEM_CONFIG1),
847 DREG(SYSTEM_CONFIG2),
848 DREG(EP_SYSTEM_CONFIG),
849 DREG(EP_FLR),
850 DREG(EP_BAR_CONFIG),
851 DREG(LINK_CONFIG),
852 DREG(PM_CONFIG),
853 DREG(DLP_CONTROL),
854 DREG(DLP_STATUS),
855 DREG(ERR_REPORT_CONTROL),
856 DREG(SLOT_CONTROL1),
857 DREG(SLOT_CONTROL2),
858 DREG(UTL_CONFIG),
859 DREG(BUFFERS_CONFIG),
860 DREG(ERROR_INJECT),
861 DREG(SRIOV_CONFIG),
862 DREG(PF0_SRIOV_STATUS),
863 DREG(PF1_SRIOV_STATUS),
864 DREG(PORT_NUMBER),
865 DREG(POR_SYSTEM_CONFIG),
866
867 /* Internal logic regs */
868 DREG(PHB_VERSION),
869 DREG(RESET),
870 DREG(PHB_CONTROL),
871 DREG(PHB_TIMEOUT_CONTROL1),
872 DREG(PHB_QUIESCE_DMA),
873 DREG(PHB_DMA_READ_TAG_ACTV),
874 DREG(PHB_TCE_READ_TAG_ACTV),
875
876 /* FIR registers */
877 DREG(LEM_FIR_ACCUM),
878 DREG(LEM_FIR_AND_MASK),
879 DREG(LEM_FIR_OR_MASK),
880 DREG(LEM_ACTION0),
881 DREG(LEM_ACTION1),
882 DREG(LEM_ERROR_MASK),
883 DREG(LEM_ERROR_AND_MASK),
884 DREG(LEM_ERROR_OR_MASK),
885
886 /* Error traps registers */
887 DREG(PHB_ERR_STATUS),
888 DREG(PHB_ERR_STATUS),
889 DREG(PHB_ERR1_STATUS),
890 DREG(PHB_ERR_INJECT),
891 DREG(PHB_ERR_LEM_ENABLE),
892 DREG(PHB_ERR_IRQ_ENABLE),
893 DREG(PHB_ERR_FREEZE_ENABLE),
894 DREG(PHB_ERR_SIDE_ENABLE),
895 DREG(PHB_ERR_LOG_0),
896 DREG(PHB_ERR_LOG_1),
897 DREG(PHB_ERR_STATUS_MASK),
898 DREG(PHB_ERR1_STATUS_MASK),
899 DREG(MMIO_ERR_STATUS),
900 DREG(MMIO_ERR1_STATUS),
901 DREG(MMIO_ERR_INJECT),
902 DREG(MMIO_ERR_LEM_ENABLE),
903 DREG(MMIO_ERR_IRQ_ENABLE),
904 DREG(MMIO_ERR_FREEZE_ENABLE),
905 DREG(MMIO_ERR_SIDE_ENABLE),
906 DREG(MMIO_ERR_LOG_0),
907 DREG(MMIO_ERR_LOG_1),
908 DREG(MMIO_ERR_STATUS_MASK),
909 DREG(MMIO_ERR1_STATUS_MASK),
910 DREG(DMA_ERR_STATUS),
911 DREG(DMA_ERR1_STATUS),
912 DREG(DMA_ERR_INJECT),
913 DREG(DMA_ERR_LEM_ENABLE),
914 DREG(DMA_ERR_IRQ_ENABLE),
915 DREG(DMA_ERR_FREEZE_ENABLE),
916 DREG(DMA_ERR_SIDE_ENABLE),
917 DREG(DMA_ERR_LOG_0),
918 DREG(DMA_ERR_LOG_1),
919 DREG(DMA_ERR_STATUS_MASK),
920 DREG(DMA_ERR1_STATUS_MASK),
921
922 /* Debug and Trace registers */
923 DREG(PHB_DEBUG_CONTROL0),
924 DREG(PHB_DEBUG_STATUS0),
925 DREG(PHB_DEBUG_CONTROL1),
926 DREG(PHB_DEBUG_STATUS1),
927 DREG(PHB_DEBUG_CONTROL2),
928 DREG(PHB_DEBUG_STATUS2),
929 DREG(PHB_DEBUG_CONTROL3),
930 DREG(PHB_DEBUG_STATUS3),
931 DREG(PHB_DEBUG_CONTROL4),
932 DREG(PHB_DEBUG_STATUS4),
933 DREG(PHB_DEBUG_CONTROL5),
934 DREG(PHB_DEBUG_STATUS5),
935
936 /* Don't seem to exist ...
937 DREG(PHB_DEBUG_CONTROL6),
938 DREG(PHB_DEBUG_STATUS6),
939 */
940};
941
942static int wsp_pci_regs_show(struct seq_file *m, void *private)
943{
944 struct wsp_phb *phb = m->private;
945 struct pci_controller *hose = phb->hose;
946 int i;
947
948 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
949 /* Skip write-only regs */
950 if (wsp_pci_regs[i].offset == 0xc08 ||
951 wsp_pci_regs[i].offset == 0xc10 ||
952 wsp_pci_regs[i].offset == 0xc38 ||
953 wsp_pci_regs[i].offset == 0xc40)
954 continue;
955 seq_printf(m, "0x%03x: 0x%016llx %s\n",
956 wsp_pci_regs[i].offset,
957 in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
958 wsp_pci_regs[i].name);
959 }
960 return 0;
961}
962
963static int wsp_pci_regs_open(struct inode *inode, struct file *file)
964{
965 return single_open(file, wsp_pci_regs_show, inode->i_private);
966}
967
968static const struct file_operations wsp_pci_regs_fops = {
969 .open = wsp_pci_regs_open,
970 .read = seq_read,
971 .llseek = seq_lseek,
972 .release = single_release,
973};
974
975static int wsp_pci_reg_set(void *data, u64 val)
976{
977 out_be64((void __iomem *)data, val);
978 return 0;
979}
980
981static int wsp_pci_reg_get(void *data, u64 *val)
982{
983 *val = in_be64((void __iomem *)data);
984 return 0;
985}
986
987DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
988
989static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
990{
991 struct wsp_phb *phb = dev_id;
992 struct pci_controller *hose = phb->hose;
993 irqreturn_t handled = IRQ_NONE;
994 struct wsp_pcie_err_log_data ed;
995
996 pr_err("PCI: Error interrupt on %s (PHB %d)\n",
997 hose->dn->full_name, hose->global_number);
998 again:
999 memset(&ed, 0, sizeof(ed));
1000
1001 /* Read and clear UTL errors */
1002 ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
1003 if (ed.utl_sys_err)
1004 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
1005 ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
1006 if (ed.utl_port_err)
1007 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
1008 ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
1009 if (ed.utl_rc_err)
1010 out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
1011
1012 /* Read and clear main trap errors */
1013 ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
1014 if (ed.phb_err) {
1015 ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
1016 ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
1017 ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
1018 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
1019 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
1020 }
1021 ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
1022 if (ed.mmio_err) {
1023 ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
1024 ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
1025 ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
1026 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
1027 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
1028 }
1029 ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
1030 if (ed.dma_err) {
1031 ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
1032 ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
1033 ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
1034 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
1035 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
1036 }
1037
1038 /* Now print things out */
1039 if (ed.phb_err) {
1040 pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
1041 pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
1042 pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
1043 pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
1044 }
1045 if (ed.mmio_err) {
1046 pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
1047 pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
1048 pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
1049 pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
1050 }
1051 if (ed.dma_err) {
1052 pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
1053 pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
1054 pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
1055 pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
1056 }
1057 if (ed.utl_sys_err)
1058 pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
1059 if (ed.utl_port_err)
1060 pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
1061 if (ed.utl_rc_err)
1062 pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
1063
1064 /* Interrupts are caused by the error traps. If we had any error there
1065 * we loop again in case the UTL buffered some new stuff between
1066 * going there and going to the traps
1067 */
1068 if (ed.dma_err || ed.mmio_err || ed.phb_err) {
1069 handled = IRQ_HANDLED;
1070 goto again;
1071 }
1072 return handled;
1073}
1074
1075static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
1076{
1077 struct pci_controller *hose = phb->hose;
1078 int err_irq, i, rc;
1079 char fname[16];
1080
1081 /* Create a debugfs file for that PHB */
1082 sprintf(fname, "phb%d", phb->hose->global_number);
1083 phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
1084
1085 /* Some useful debug output */
1086 if (phb->ddir) {
1087 struct dentry *d = debugfs_create_dir("regs", phb->ddir);
1088 char tmp[64];
1089
1090 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
1091 sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
1092 wsp_pci_regs[i].name);
1093 debugfs_create_file(tmp, 0600, d,
1094 hose->cfg_data + wsp_pci_regs[i].offset,
1095 &wsp_pci_reg_fops);
1096 }
1097 debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
1098 }
1099
1100 /* Find the IRQ number for that PHB */
1101 err_irq = irq_of_parse_and_map(hose->dn, 0);
1102 if (err_irq == 0)
1103 /* XXX Error IRQ lacking from device-tree */
1104 err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
1105 if (err_irq == 0) {
1106 pr_err("PCI: Failed to fetch error interrupt for %s\n",
1107 hose->dn->full_name);
1108 return;
1109 }
1110 /* Request it */
1111 rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
1112 if (rc) {
1113 pr_err("PCI: Failed to request interrupt for %s\n",
1114 hose->dn->full_name);
1115 }
1116 /* Enable interrupts for all errors for now */
1117 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1118 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1119 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1120}
1121
1122/*
1123 * This is called later to hookup with the error interrupt
1124 */
1125static int __init wsp_setup_pci_late(void)
1126{
1127 struct wsp_phb *phb;
1128
1129 list_for_each_entry(phb, &wsp_phbs, all)
1130 wsp_setup_pci_err_reporting(phb);
1131
1132 return 0;
1133}
1134arch_initcall(wsp_setup_pci_late);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.h b/arch/powerpc/platforms/wsp/wsp_pci.h
deleted file mode 100644
index 52e9bd95250d..000000000000
--- a/arch/powerpc/platforms/wsp/wsp_pci.h
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * Copyright 2010 Ben Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __WSP_PCI_H
11#define __WSP_PCI_H
12
13/* Architected registers */
14#define PCIE_REG_DMA_CHAN_STATUS 0x110
15#define PCIE_REG_CPU_LOADSTORE_STATUS 0x120
16
17#define PCIE_REG_CONFIG_DATA 0x130
18#define PCIE_REG_LOCK0 0x138
19#define PCIE_REG_CONFIG_ADDRESS 0x140
20#define PCIE_REG_CA_ENABLE 0x8000000000000000ull
21#define PCIE_REG_CA_BUS_MASK 0x0ff0000000000000ull
22#define PCIE_REG_CA_BUS_SHIFT (20+32)
23#define PCIE_REG_CA_DEV_MASK 0x000f800000000000ull
24#define PCIE_REG_CA_DEV_SHIFT (15+32)
25#define PCIE_REG_CA_FUNC_MASK 0x0000700000000000ull
26#define PCIE_REG_CA_FUNC_SHIFT (12+32)
27#define PCIE_REG_CA_REG_MASK 0x00000fff00000000ull
28#define PCIE_REG_CA_REG_SHIFT ( 0+32)
29#define PCIE_REG_CA_BE_MASK 0x00000000f0000000ull
30#define PCIE_REG_CA_BE_SHIFT ( 28)
31#define PCIE_REG_LOCK1 0x148
32
33#define PCIE_REG_PHB_CONFIG 0x160
34#define PCIE_REG_PHBC_64B_TCE_EN 0x2000000000000000ull
35#define PCIE_REG_PHBC_MMIO_DMA_FREEZE_EN 0x1000000000000000ull
36#define PCIE_REG_PHBC_32BIT_MSI_EN 0x0080000000000000ull
37#define PCIE_REG_PHBC_M64_EN 0x0040000000000000ull
38#define PCIE_REG_PHBC_IO_EN 0x0008000000000000ull
39#define PCIE_REG_PHBC_64BIT_MSI_EN 0x0002000000000000ull
40#define PCIE_REG_PHBC_M32A_EN 0x0000800000000000ull
41#define PCIE_REG_PHBC_M32B_EN 0x0000400000000000ull
42#define PCIE_REG_PHBC_MSI_PE_VALIDATE 0x0000200000000000ull
43#define PCIE_REG_PHBC_DMA_XLATE_BYPASS 0x0000100000000000ull
44
45#define PCIE_REG_IO_BASE_ADDR 0x170
46#define PCIE_REG_IO_BASE_MASK 0x178
47#define PCIE_REG_IO_START_ADDR 0x180
48
49#define PCIE_REG_M32A_BASE_ADDR 0x190
50#define PCIE_REG_M32A_BASE_MASK 0x198
51#define PCIE_REG_M32A_START_ADDR 0x1a0
52
53#define PCIE_REG_M32B_BASE_ADDR 0x1b0
54#define PCIE_REG_M32B_BASE_MASK 0x1b8
55#define PCIE_REG_M32B_START_ADDR 0x1c0
56
57#define PCIE_REG_M64_BASE_ADDR 0x1e0
58#define PCIE_REG_M64_BASE_MASK 0x1e8
59#define PCIE_REG_M64_START_ADDR 0x1f0
60
61#define PCIE_REG_TCE_KILL 0x210
62#define PCIE_REG_TCEKILL_SINGLE 0x8000000000000000ull
63#define PCIE_REG_TCEKILL_ADDR_MASK 0x000003fffffffff8ull
64#define PCIE_REG_TCEKILL_PS_4K 0
65#define PCIE_REG_TCEKILL_PS_64K 1
66#define PCIE_REG_TCEKILL_PS_16M 2
67#define PCIE_REG_TCEKILL_PS_16G 3
68
69#define PCIE_REG_IODA_ADDR 0x220
70#define PCIE_REG_IODA_AD_AUTOINC 0x8000000000000000ull
71#define PCIE_REG_IODA_AD_TBL_MVT 0x0005000000000000ull
72#define PCIE_REG_IODA_AD_TBL_PELT 0x0006000000000000ull
73#define PCIE_REG_IODA_AD_TBL_PESTA 0x0007000000000000ull
74#define PCIE_REG_IODA_AD_TBL_PESTB 0x0008000000000000ull
75#define PCIE_REG_IODA_AD_TBL_TVT 0x0009000000000000ull
76#define PCIE_REG_IODA_AD_TBL_TCE 0x000a000000000000ull
77#define PCIE_REG_IODA_DATA0 0x228
78#define PCIE_REG_IODA_DATA1 0x230
79
80#define PCIE_REG_LOCK2 0x240
81
82#define PCIE_REG_PHB_GEN_CAP 0x250
83#define PCIE_REG_PHB_TCE_CAP 0x258
84#define PCIE_REG_PHB_IRQ_CAP 0x260
85#define PCIE_REG_PHB_EEH_CAP 0x268
86
87#define PCIE_REG_PAPR_ERR_INJ_CONTROL 0x2b0
88#define PCIE_REG_PAPR_ERR_INJ_ADDR 0x2b8
89#define PCIE_REG_PAPR_ERR_INJ_MASK 0x2c0
90
91
92#define PCIE_REG_SYS_CFG1 0x600
93#define PCIE_REG_SYS_CFG1_CLASS_CODE 0x0000000000ffffffull
94
95#define IODA_TVT0_TTA_MASK 0x000fffffffff0000ull
96#define IODA_TVT0_TTA_SHIFT 4
97#define IODA_TVT0_BUSNUM_VALID_MASK 0x000000000000e000ull
98#define IODA_TVT0_TCE_TABLE_SIZE_MASK 0x0000000000001f00ull
99#define IODA_TVT0_TCE_TABLE_SIZE_SHIFT 8
100#define IODA_TVT0_BUSNUM_VALUE_MASK 0x00000000000000ffull
101#define IODA_TVT0_BUSNUM_VALID_SHIFT 0
102#define IODA_TVT1_DEVNUM_VALID 0x2000000000000000ull
103#define IODA_TVT1_DEVNUM_VALUE_MASK 0x1f00000000000000ull
104#define IODA_TVT1_DEVNUM_VALUE_SHIFT 56
105#define IODA_TVT1_FUNCNUM_VALID 0x0008000000000000ull
106#define IODA_TVT1_FUNCNUM_VALUE_MASK 0x0007000000000000ull
107#define IODA_TVT1_FUNCNUM_VALUE_SHIFT 48
108#define IODA_TVT1_IO_PAGE_SIZE_MASK 0x00001f0000000000ull
109#define IODA_TVT1_IO_PAGE_SIZE_SHIFT 40
110#define IODA_TVT1_PE_NUMBER_MASK 0x000000000000003full
111#define IODA_TVT1_PE_NUMBER_SHIFT 0
112
113#define IODA_TVT_COUNT 64
114
115/* UTL Core registers */
116#define PCIE_UTL_SYS_BUS_CONTROL 0x400
117#define PCIE_UTL_STATUS 0x408
118#define PCIE_UTL_SYS_BUS_AGENT_STATUS 0x410
119#define PCIE_UTL_SYS_BUS_AGENT_ERR_SEV 0x418
120#define PCIE_UTL_SYS_BUS_AGENT_IRQ_EN 0x420
121#define PCIE_UTL_SYS_BUS_BURST_SZ_CONF 0x440
122#define PCIE_UTL_REVISION_ID 0x448
123
124#define PCIE_UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
125#define PCIE_UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
126#define PCIE_UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
127#define PCIE_UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
128#define PCIE_UTL_OUT_NP_BUF_ALLOC 0x500
129#define PCIE_UTL_IN_NP_BUF_ALLOC 0x510
130#define PCIE_UTL_PCIE_TAGS_ALLOC 0x520
131#define PCIE_UTL_GBIF_READ_TAGS_ALLOC 0x530
132
133#define PCIE_UTL_PCIE_PORT_CONTROL 0x540
134#define PCIE_UTL_PCIE_PORT_STATUS 0x548
135#define PCIE_UTL_PCIE_PORT_ERROR_SEV 0x550
136#define PCIE_UTL_PCIE_PORT_IRQ_EN 0x558
137#define PCIE_UTL_RC_STATUS 0x560
138#define PCIE_UTL_RC_ERR_SEVERITY 0x568
139#define PCIE_UTL_RC_IRQ_EN 0x570
140#define PCIE_UTL_EP_STATUS 0x578
141#define PCIE_UTL_EP_ERR_SEVERITY 0x580
142#define PCIE_UTL_EP_ERR_IRQ_EN 0x588
143
144#define PCIE_UTL_PCI_PM_CTRL1 0x590
145#define PCIE_UTL_PCI_PM_CTRL2 0x598
146
147/* PCIe stack registers */
148#define PCIE_REG_SYSTEM_CONFIG1 0x600
149#define PCIE_REG_SYSTEM_CONFIG2 0x608
150#define PCIE_REG_EP_SYSTEM_CONFIG 0x618
151#define PCIE_REG_EP_FLR 0x620
152#define PCIE_REG_EP_BAR_CONFIG 0x628
153#define PCIE_REG_LINK_CONFIG 0x630
154#define PCIE_REG_PM_CONFIG 0x640
155#define PCIE_REG_DLP_CONTROL 0x650
156#define PCIE_REG_DLP_STATUS 0x658
157#define PCIE_REG_ERR_REPORT_CONTROL 0x660
158#define PCIE_REG_SLOT_CONTROL1 0x670
159#define PCIE_REG_SLOT_CONTROL2 0x678
160#define PCIE_REG_UTL_CONFIG 0x680
161#define PCIE_REG_BUFFERS_CONFIG 0x690
162#define PCIE_REG_ERROR_INJECT 0x698
163#define PCIE_REG_SRIOV_CONFIG 0x6a0
164#define PCIE_REG_PF0_SRIOV_STATUS 0x6a8
165#define PCIE_REG_PF1_SRIOV_STATUS 0x6b0
166#define PCIE_REG_PORT_NUMBER 0x700
167#define PCIE_REG_POR_SYSTEM_CONFIG 0x708
168
169/* PHB internal logic registers */
170#define PCIE_REG_PHB_VERSION 0x800
171#define PCIE_REG_RESET 0x808
172#define PCIE_REG_PHB_CONTROL 0x810
173#define PCIE_REG_PHB_TIMEOUT_CONTROL1 0x878
174#define PCIE_REG_PHB_QUIESCE_DMA 0x888
175#define PCIE_REG_PHB_DMA_READ_TAG_ACTV 0x900
176#define PCIE_REG_PHB_TCE_READ_TAG_ACTV 0x908
177
178/* FIR registers */
179#define PCIE_REG_LEM_FIR_ACCUM 0xc00
180#define PCIE_REG_LEM_FIR_AND_MASK 0xc08
181#define PCIE_REG_LEM_FIR_OR_MASK 0xc10
182#define PCIE_REG_LEM_ACTION0 0xc18
183#define PCIE_REG_LEM_ACTION1 0xc20
184#define PCIE_REG_LEM_ERROR_MASK 0xc30
185#define PCIE_REG_LEM_ERROR_AND_MASK 0xc38
186#define PCIE_REG_LEM_ERROR_OR_MASK 0xc40
187
188/* PHB Error registers */
189#define PCIE_REG_PHB_ERR_STATUS 0xc80
190#define PCIE_REG_PHB_ERR1_STATUS 0xc88
191#define PCIE_REG_PHB_ERR_INJECT 0xc90
192#define PCIE_REG_PHB_ERR_LEM_ENABLE 0xc98
193#define PCIE_REG_PHB_ERR_IRQ_ENABLE 0xca0
194#define PCIE_REG_PHB_ERR_FREEZE_ENABLE 0xca8
195#define PCIE_REG_PHB_ERR_SIDE_ENABLE 0xcb8
196#define PCIE_REG_PHB_ERR_LOG_0 0xcc0
197#define PCIE_REG_PHB_ERR_LOG_1 0xcc8
198#define PCIE_REG_PHB_ERR_STATUS_MASK 0xcd0
199#define PCIE_REG_PHB_ERR1_STATUS_MASK 0xcd8
200
201#define PCIE_REG_MMIO_ERR_STATUS 0xd00
202#define PCIE_REG_MMIO_ERR1_STATUS 0xd08
203#define PCIE_REG_MMIO_ERR_INJECT 0xd10
204#define PCIE_REG_MMIO_ERR_LEM_ENABLE 0xd18
205#define PCIE_REG_MMIO_ERR_IRQ_ENABLE 0xd20
206#define PCIE_REG_MMIO_ERR_FREEZE_ENABLE 0xd28
207#define PCIE_REG_MMIO_ERR_SIDE_ENABLE 0xd38
208#define PCIE_REG_MMIO_ERR_LOG_0 0xd40
209#define PCIE_REG_MMIO_ERR_LOG_1 0xd48
210#define PCIE_REG_MMIO_ERR_STATUS_MASK 0xd50
211#define PCIE_REG_MMIO_ERR1_STATUS_MASK 0xd58
212
213#define PCIE_REG_DMA_ERR_STATUS 0xd80
214#define PCIE_REG_DMA_ERR1_STATUS 0xd88
215#define PCIE_REG_DMA_ERR_INJECT 0xd90
216#define PCIE_REG_DMA_ERR_LEM_ENABLE 0xd98
217#define PCIE_REG_DMA_ERR_IRQ_ENABLE 0xda0
218#define PCIE_REG_DMA_ERR_FREEZE_ENABLE 0xda8
219#define PCIE_REG_DMA_ERR_SIDE_ENABLE 0xdb8
220#define PCIE_REG_DMA_ERR_LOG_0 0xdc0
221#define PCIE_REG_DMA_ERR_LOG_1 0xdc8
222#define PCIE_REG_DMA_ERR_STATUS_MASK 0xdd0
223#define PCIE_REG_DMA_ERR1_STATUS_MASK 0xdd8
224
225/* Shortcuts for access to the above using the PHB definitions
226 * with an offset
227 */
228#define PCIE_REG_ERR_PHB_OFFSET 0x0
229#define PCIE_REG_ERR_MMIO_OFFSET 0x80
230#define PCIE_REG_ERR_DMA_OFFSET 0x100
231
232/* Debug and Trace registers */
233#define PCIE_REG_PHB_DEBUG_CONTROL0 0xe00
234#define PCIE_REG_PHB_DEBUG_STATUS0 0xe08
235#define PCIE_REG_PHB_DEBUG_CONTROL1 0xe10
236#define PCIE_REG_PHB_DEBUG_STATUS1 0xe18
237#define PCIE_REG_PHB_DEBUG_CONTROL2 0xe20
238#define PCIE_REG_PHB_DEBUG_STATUS2 0xe28
239#define PCIE_REG_PHB_DEBUG_CONTROL3 0xe30
240#define PCIE_REG_PHB_DEBUG_STATUS3 0xe38
241#define PCIE_REG_PHB_DEBUG_CONTROL4 0xe40
242#define PCIE_REG_PHB_DEBUG_STATUS4 0xe48
243#define PCIE_REG_PHB_DEBUG_CONTROL5 0xe50
244#define PCIE_REG_PHB_DEBUG_STATUS5 0xe58
245#define PCIE_REG_PHB_DEBUG_CONTROL6 0xe60
246#define PCIE_REG_PHB_DEBUG_STATUS6 0xe68
247
248/* Definition for PCIe errors */
249struct wsp_pcie_err_log_data {
250 __u64 phb_err;
251 __u64 phb_err1;
252 __u64 phb_log0;
253 __u64 phb_log1;
254 __u64 mmio_err;
255 __u64 mmio_err1;
256 __u64 mmio_log0;
257 __u64 mmio_log1;
258 __u64 dma_err;
259 __u64 dma_err1;
260 __u64 dma_log0;
261 __u64 dma_log1;
262 __u64 utl_sys_err;
263 __u64 utl_port_err;
264 __u64 utl_rc_err;
265 __u64 unused;
266};
267
268#endif /* __WSP_PCI_H */