aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 23:11:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-12 23:11:38 -0400
commitb7c8c1945cfbcfb9d60f5be957b4339c6eee4201 (patch)
treeef27f4b91fc98fcea70b8ef2b7d917c9814a0fbb /arch
parent88bbfb4a6267ff90a466ade9762d9a8fff2bb1bb (diff)
parentad718622ab6d500c870772b1b8dda46fa2195e6d (diff)
Merge branch 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc
Pull more powerpc updates from Ben Herrenschmidt: "Here are the remaining bits I was mentioning earlier. Mostly bug fixes and new selftests from Michael (yay !). He also removed the WSP platform and A2 core support which were dead before release, so less clutter. One little "feature" I snuck in is the doorbell IPI support for non-virtualized P8 which speeds up IPIs significantly between threads of a core" * 'next' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc: (34 commits) powerpc/book3s: Fix some ABIv2 issues in machine check code powerpc/book3s: Fix guest MC delivery mechanism to avoid soft lockups in guest. powerpc/book3s: Increment the mce counter during machine_check_early call. powerpc/book3s: Add stack overflow check in machine check handler. powerpc/book3s: Fix machine check handling for unhandled errors powerpc/eeh: Dump PE location code powerpc/powernv: Enable POWER8 doorbell IPIs powerpc/cpuidle: Only clear LPCR decrementer wakeup bit on fast sleep entry powerpc/powernv: Fix killed EEH event powerpc: fix typo 'CONFIG_PMAC' powerpc: fix typo 'CONFIG_PPC_CPU' powerpc/powernv: Don't escalate non-existing frozen PE powerpc/eeh: Report frozen parent PE prior to child PE powerpc/eeh: Clear frozen state for child PE powerpc/powernv: Reduce panic timeout from 180s to 10s powerpc/xmon: avoid format string leaking to printk selftests/powerpc: Add tests of PMU EBBs selftests/powerpc: Add support for skipping tests selftests/powerpc: Put the test in a separate process group selftests/powerpc: Fix instruction loop for ABIv2 (LE) ...
Diffstat (limited to 'arch')
-rw-r--r--arch/powerpc/Kconfig.debug5
-rw-r--r--arch/powerpc/configs/chroma_defconfig307
-rw-r--r--arch/powerpc/include/asm/cpm2.h1
-rw-r--r--arch/powerpc/include/asm/eeh.h1
-rw-r--r--arch/powerpc/include/asm/eeh_event.h2
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h4
-rw-r--r--arch/powerpc/include/asm/opal.h102
-rw-r--r--arch/powerpc/include/asm/reg_a2.h9
-rw-r--r--arch/powerpc/include/asm/switch_to.h8
-rw-r--r--arch/powerpc/include/asm/wsp.h14
-rw-r--r--arch/powerpc/include/uapi/asm/cputable.h1
-rw-r--r--arch/powerpc/kernel/Makefile1
-rw-r--r--arch/powerpc/kernel/cpu_setup_a2.S120
-rw-r--r--arch/powerpc/kernel/cpu_setup_power.S2
-rw-r--r--arch/powerpc/kernel/cputable.c41
-rw-r--r--arch/powerpc/kernel/eeh.c38
-rw-r--r--arch/powerpc/kernel/eeh_driver.c24
-rw-r--r--arch/powerpc/kernel/eeh_event.c21
-rw-r--r--arch/powerpc/kernel/eeh_pe.c60
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64e.S16
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S64
-rw-r--r--arch/powerpc/kernel/head_40x.S19
-rw-r--r--arch/powerpc/kernel/process.c8
-rw-r--r--arch/powerpc/kernel/setup-common.c2
-rw-r--r--arch/powerpc/kernel/time.c2
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/kernel/udbg.c2
-rw-r--r--arch/powerpc/kernel/udbg_16550.c11
-rw-r--r--arch/powerpc/kvm/book3s_hv_ras.c15
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S19
-rw-r--r--arch/powerpc/lib/sstep.c2
-rw-r--r--arch/powerpc/platforms/Kconfig1
-rw-r--r--arch/powerpc/platforms/Kconfig.cputype6
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/cell/spufs/spufs.h1
-rw-r--r--arch/powerpc/platforms/powernv/Kconfig1
-rw-r--r--arch/powerpc/platforms/powernv/Makefile4
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c109
-rw-r--r--arch/powerpc/platforms/powernv/opal-msglog.c6
-rw-r--r--arch/powerpc/platforms/powernv/opal-sysparam.c4
-rw-r--r--arch/powerpc/platforms/powernv/pci.c81
-rw-r--r--arch/powerpc/platforms/powernv/setup.c3
-rw-r--r--arch/powerpc/platforms/powernv/smp.c6
-rw-r--r--arch/powerpc/platforms/pseries/Kconfig1
-rw-r--r--arch/powerpc/platforms/wsp/Kconfig30
-rw-r--r--arch/powerpc/platforms/wsp/Makefile10
-rw-r--r--arch/powerpc/platforms/wsp/chroma.c56
-rw-r--r--arch/powerpc/platforms/wsp/h8.c135
-rw-r--r--arch/powerpc/platforms/wsp/ics.c762
-rw-r--r--arch/powerpc/platforms/wsp/ics.h25
-rw-r--r--arch/powerpc/platforms/wsp/msi.c102
-rw-r--r--arch/powerpc/platforms/wsp/msi.h19
-rw-r--r--arch/powerpc/platforms/wsp/opb_pic.c321
-rw-r--r--arch/powerpc/platforms/wsp/psr2.c67
-rw-r--r--arch/powerpc/platforms/wsp/scom_smp.c435
-rw-r--r--arch/powerpc/platforms/wsp/scom_wsp.c82
-rw-r--r--arch/powerpc/platforms/wsp/setup.c36
-rw-r--r--arch/powerpc/platforms/wsp/smp.c88
-rw-r--r--arch/powerpc/platforms/wsp/wsp.c117
-rw-r--r--arch/powerpc/platforms/wsp/wsp.h29
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.c1134
-rw-r--r--arch/powerpc/platforms/wsp/wsp_pci.h268
-rw-r--r--arch/powerpc/sysdev/xics/icp-native.c9
-rw-r--r--arch/powerpc/xmon/nonstdio.c2
65 files changed, 427 insertions, 4453 deletions
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 21c9f304e96c..790352f93700 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -235,11 +235,6 @@ config PPC_EARLY_DEBUG_USBGECKO
235 Select this to enable early debugging for Nintendo GameCube/Wii 235 Select this to enable early debugging for Nintendo GameCube/Wii
236 consoles via an external USB Gecko adapter. 236 consoles via an external USB Gecko adapter.
237 237
238config PPC_EARLY_DEBUG_WSP
239 bool "Early debugging via WSP's internal UART"
240 depends on PPC_WSP
241 select PPC_UDBG_16550
242
243config PPC_EARLY_DEBUG_PS3GELIC 238config PPC_EARLY_DEBUG_PS3GELIC
244 bool "Early debugging through the PS3 Ethernet port" 239 bool "Early debugging through the PS3 Ethernet port"
245 depends on PPC_PS3 240 depends on PPC_PS3
diff --git a/arch/powerpc/configs/chroma_defconfig b/arch/powerpc/configs/chroma_defconfig
deleted file mode 100644
index 4f35fc462385..000000000000
--- a/arch/powerpc/configs/chroma_defconfig
+++ /dev/null
@@ -1,307 +0,0 @@
1CONFIG_PPC64=y
2CONFIG_PPC_BOOK3E_64=y
3# CONFIG_VIRT_CPU_ACCOUNTING_NATIVE is not set
4CONFIG_SMP=y
5CONFIG_NR_CPUS=256
6CONFIG_EXPERIMENTAL=y
7CONFIG_SYSVIPC=y
8CONFIG_POSIX_MQUEUE=y
9CONFIG_BSD_PROCESS_ACCT=y
10CONFIG_TASKSTATS=y
11CONFIG_TASK_DELAY_ACCT=y
12CONFIG_TASK_XACCT=y
13CONFIG_TASK_IO_ACCOUNTING=y
14CONFIG_AUDIT=y
15CONFIG_AUDITSYSCALL=y
16CONFIG_IKCONFIG=y
17CONFIG_IKCONFIG_PROC=y
18CONFIG_LOG_BUF_SHIFT=19
19CONFIG_CGROUPS=y
20CONFIG_CGROUP_DEVICE=y
21CONFIG_CPUSETS=y
22CONFIG_CGROUP_CPUACCT=y
23CONFIG_RESOURCE_COUNTERS=y
24CONFIG_CGROUP_MEMCG=y
25CONFIG_CGROUP_MEMCG_SWAP=y
26CONFIG_NAMESPACES=y
27CONFIG_RELAY=y
28CONFIG_BLK_DEV_INITRD=y
29CONFIG_INITRAMFS_SOURCE=""
30CONFIG_RD_BZIP2=y
31CONFIG_RD_LZMA=y
32CONFIG_INITRAMFS_COMPRESSION_GZIP=y
33CONFIG_KALLSYMS_ALL=y
34CONFIG_EMBEDDED=y
35CONFIG_PERF_EVENTS=y
36CONFIG_PROFILING=y
37CONFIG_OPROFILE=y
38CONFIG_KPROBES=y
39CONFIG_MODULES=y
40CONFIG_MODULE_FORCE_LOAD=y
41CONFIG_MODULE_UNLOAD=y
42CONFIG_MODULE_FORCE_UNLOAD=y
43CONFIG_MODVERSIONS=y
44CONFIG_MODULE_SRCVERSION_ALL=y
45CONFIG_SCOM_DEBUGFS=y
46CONFIG_PPC_A2_DD2=y
47CONFIG_KVM_GUEST=y
48CONFIG_NO_HZ=y
49CONFIG_HIGH_RES_TIMERS=y
50CONFIG_HZ_100=y
51# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
52CONFIG_BINFMT_MISC=y
53CONFIG_NUMA=y
54# CONFIG_MIGRATION is not set
55CONFIG_PPC_64K_PAGES=y
56CONFIG_SCHED_SMT=y
57CONFIG_CMDLINE_BOOL=y
58CONFIG_CMDLINE=""
59# CONFIG_SECCOMP is not set
60CONFIG_PCIEPORTBUS=y
61# CONFIG_PCIEASPM is not set
62CONFIG_PCI_MSI=y
63CONFIG_PACKET=y
64CONFIG_UNIX=y
65CONFIG_XFRM_USER=m
66CONFIG_XFRM_SUB_POLICY=y
67CONFIG_XFRM_STATISTICS=y
68CONFIG_NET_KEY=m
69CONFIG_NET_KEY_MIGRATE=y
70CONFIG_INET=y
71CONFIG_IP_MULTICAST=y
72CONFIG_IP_ADVANCED_ROUTER=y
73CONFIG_IP_ROUTE_MULTIPATH=y
74CONFIG_IP_ROUTE_VERBOSE=y
75CONFIG_IP_PNP=y
76CONFIG_IP_PNP_DHCP=y
77CONFIG_IP_PNP_BOOTP=y
78CONFIG_NET_IPIP=y
79CONFIG_IP_MROUTE=y
80CONFIG_IP_PIMSM_V1=y
81CONFIG_IP_PIMSM_V2=y
82CONFIG_SYN_COOKIES=y
83CONFIG_INET_AH=m
84CONFIG_INET_ESP=m
85CONFIG_INET_IPCOMP=m
86CONFIG_IPV6=y
87CONFIG_IPV6_PRIVACY=y
88CONFIG_IPV6_ROUTER_PREF=y
89CONFIG_IPV6_ROUTE_INFO=y
90CONFIG_IPV6_OPTIMISTIC_DAD=y
91CONFIG_INET6_AH=y
92CONFIG_INET6_ESP=y
93CONFIG_INET6_IPCOMP=y
94CONFIG_IPV6_MIP6=y
95CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y
96CONFIG_IPV6_TUNNEL=y
97CONFIG_IPV6_MULTIPLE_TABLES=y
98CONFIG_IPV6_SUBTREES=y
99CONFIG_IPV6_MROUTE=y
100CONFIG_IPV6_PIMSM_V2=y
101CONFIG_NETFILTER=y
102CONFIG_NF_CONNTRACK=m
103CONFIG_NF_CONNTRACK_EVENTS=y
104CONFIG_NF_CT_PROTO_UDPLITE=m
105CONFIG_NF_CONNTRACK_FTP=m
106CONFIG_NF_CONNTRACK_IRC=m
107CONFIG_NF_CONNTRACK_TFTP=m
108CONFIG_NF_CT_NETLINK=m
109CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
110CONFIG_NETFILTER_XT_TARGET_CONNMARK=m
111CONFIG_NETFILTER_XT_TARGET_MARK=m
112CONFIG_NETFILTER_XT_TARGET_NFLOG=m
113CONFIG_NETFILTER_XT_TARGET_NFQUEUE=m
114CONFIG_NETFILTER_XT_TARGET_TCPMSS=m
115CONFIG_NETFILTER_XT_MATCH_COMMENT=m
116CONFIG_NETFILTER_XT_MATCH_CONNBYTES=m
117CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=m
118CONFIG_NETFILTER_XT_MATCH_CONNMARK=m
119CONFIG_NETFILTER_XT_MATCH_CONNTRACK=m
120CONFIG_NETFILTER_XT_MATCH_DCCP=m
121CONFIG_NETFILTER_XT_MATCH_DSCP=m
122CONFIG_NETFILTER_XT_MATCH_ESP=m
123CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=m
124CONFIG_NETFILTER_XT_MATCH_HELPER=m
125CONFIG_NETFILTER_XT_MATCH_IPRANGE=m
126CONFIG_NETFILTER_XT_MATCH_LENGTH=m
127CONFIG_NETFILTER_XT_MATCH_LIMIT=m
128CONFIG_NETFILTER_XT_MATCH_MAC=m
129CONFIG_NETFILTER_XT_MATCH_MARK=m
130CONFIG_NETFILTER_XT_MATCH_MULTIPORT=m
131CONFIG_NETFILTER_XT_MATCH_OWNER=m
132CONFIG_NETFILTER_XT_MATCH_POLICY=m
133CONFIG_NETFILTER_XT_MATCH_PKTTYPE=m
134CONFIG_NETFILTER_XT_MATCH_QUOTA=m
135CONFIG_NETFILTER_XT_MATCH_RATEEST=m
136CONFIG_NETFILTER_XT_MATCH_REALM=m
137CONFIG_NETFILTER_XT_MATCH_RECENT=m
138CONFIG_NETFILTER_XT_MATCH_SCTP=m
139CONFIG_NETFILTER_XT_MATCH_STATE=m
140CONFIG_NETFILTER_XT_MATCH_STATISTIC=m
141CONFIG_NETFILTER_XT_MATCH_STRING=m
142CONFIG_NETFILTER_XT_MATCH_TCPMSS=m
143CONFIG_NETFILTER_XT_MATCH_TIME=m
144CONFIG_NETFILTER_XT_MATCH_U32=m
145CONFIG_NF_CONNTRACK_IPV4=m
146CONFIG_IP_NF_QUEUE=m
147CONFIG_IP_NF_IPTABLES=m
148CONFIG_IP_NF_MATCH_AH=m
149CONFIG_IP_NF_MATCH_ECN=m
150CONFIG_IP_NF_MATCH_TTL=m
151CONFIG_IP_NF_FILTER=m
152CONFIG_IP_NF_TARGET_REJECT=m
153CONFIG_IP_NF_TARGET_LOG=m
154CONFIG_IP_NF_TARGET_ULOG=m
155CONFIG_NF_NAT=m
156CONFIG_IP_NF_TARGET_MASQUERADE=m
157CONFIG_IP_NF_TARGET_NETMAP=m
158CONFIG_IP_NF_TARGET_REDIRECT=m
159CONFIG_NET_TCPPROBE=y
160# CONFIG_WIRELESS is not set
161CONFIG_NET_9P=y
162CONFIG_NET_9P_DEBUG=y
163CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
164CONFIG_DEVTMPFS=y
165CONFIG_MTD=y
166CONFIG_MTD_CHAR=y
167CONFIG_MTD_BLOCK=y
168CONFIG_MTD_CFI=y
169CONFIG_MTD_CFI_ADV_OPTIONS=y
170CONFIG_MTD_CFI_LE_BYTE_SWAP=y
171CONFIG_MTD_CFI_INTELEXT=y
172CONFIG_MTD_CFI_AMDSTD=y
173CONFIG_MTD_CFI_STAA=y
174CONFIG_MTD_PHYSMAP_OF=y
175CONFIG_PROC_DEVICETREE=y
176CONFIG_BLK_DEV_LOOP=y
177CONFIG_BLK_DEV_CRYPTOLOOP=y
178CONFIG_BLK_DEV_NBD=m
179CONFIG_BLK_DEV_RAM=y
180CONFIG_BLK_DEV_RAM_SIZE=65536
181CONFIG_CDROM_PKTCDVD=y
182CONFIG_MISC_DEVICES=y
183CONFIG_BLK_DEV_SD=y
184CONFIG_BLK_DEV_SR=y
185CONFIG_BLK_DEV_SR_VENDOR=y
186CONFIG_CHR_DEV_SG=y
187CONFIG_SCSI_MULTI_LUN=y
188CONFIG_SCSI_CONSTANTS=y
189CONFIG_SCSI_SPI_ATTRS=y
190CONFIG_SCSI_FC_ATTRS=y
191CONFIG_SCSI_ISCSI_ATTRS=m
192CONFIG_SCSI_SAS_ATTRS=m
193CONFIG_SCSI_SRP_ATTRS=y
194CONFIG_ATA=y
195CONFIG_SATA_AHCI=y
196CONFIG_SATA_SIL24=y
197CONFIG_SATA_MV=y
198CONFIG_SATA_SIL=y
199CONFIG_PATA_CMD64X=y
200CONFIG_PATA_MARVELL=y
201CONFIG_PATA_SIL680=y
202CONFIG_MD=y
203CONFIG_BLK_DEV_MD=y
204CONFIG_MD_LINEAR=y
205CONFIG_BLK_DEV_DM=y
206CONFIG_DM_CRYPT=y
207CONFIG_DM_SNAPSHOT=y
208CONFIG_DM_MIRROR=y
209CONFIG_DM_ZERO=y
210CONFIG_DM_UEVENT=y
211CONFIG_NETDEVICES=y
212CONFIG_TUN=y
213CONFIG_E1000E=y
214CONFIG_TIGON3=y
215# CONFIG_WLAN is not set
216# CONFIG_INPUT is not set
217# CONFIG_SERIO is not set
218# CONFIG_VT is not set
219CONFIG_DEVPTS_MULTIPLE_INSTANCES=y
220CONFIG_SERIAL_8250=y
221CONFIG_SERIAL_8250_CONSOLE=y
222CONFIG_HW_RANDOM=y
223CONFIG_RAW_DRIVER=y
224CONFIG_MAX_RAW_DEVS=1024
225# CONFIG_HWMON is not set
226# CONFIG_VGA_ARB is not set
227# CONFIG_USB_SUPPORT is not set
228CONFIG_EDAC=y
229CONFIG_EDAC_MM_EDAC=y
230CONFIG_RTC_CLASS=y
231CONFIG_RTC_DRV_DS1511=y
232CONFIG_RTC_DRV_DS1553=y
233CONFIG_EXT2_FS=y
234CONFIG_EXT2_FS_XATTR=y
235CONFIG_EXT2_FS_POSIX_ACL=y
236CONFIG_EXT2_FS_SECURITY=y
237CONFIG_EXT2_FS_XIP=y
238CONFIG_EXT3_FS=y
239# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
240CONFIG_EXT3_FS_POSIX_ACL=y
241CONFIG_EXT3_FS_SECURITY=y
242CONFIG_EXT4_FS=y
243# CONFIG_DNOTIFY is not set
244CONFIG_FUSE_FS=y
245CONFIG_ISO9660_FS=y
246CONFIG_JOLIET=y
247CONFIG_ZISOFS=y
248CONFIG_UDF_FS=m
249CONFIG_MSDOS_FS=y
250CONFIG_VFAT_FS=y
251CONFIG_PROC_KCORE=y
252CONFIG_TMPFS=y
253CONFIG_TMPFS_POSIX_ACL=y
254CONFIG_CONFIGFS_FS=m
255CONFIG_CRAMFS=y
256CONFIG_NFS_FS=y
257CONFIG_NFS_V3=y
258CONFIG_NFS_V3_ACL=y
259CONFIG_NFS_V4=y
260CONFIG_NFS_V4_1=y
261CONFIG_ROOT_NFS=y
262CONFIG_CIFS=y
263CONFIG_CIFS_WEAK_PW_HASH=y
264CONFIG_CIFS_XATTR=y
265CONFIG_CIFS_POSIX=y
266CONFIG_NLS_CODEPAGE_437=y
267CONFIG_NLS_ASCII=y
268CONFIG_NLS_ISO8859_1=y
269CONFIG_CRC_CCITT=m
270CONFIG_CRC_T10DIF=y
271CONFIG_LIBCRC32C=m
272CONFIG_PRINTK_TIME=y
273CONFIG_MAGIC_SYSRQ=y
274CONFIG_STRIP_ASM_SYMS=y
275CONFIG_DETECT_HUNG_TASK=y
276# CONFIG_SCHED_DEBUG is not set
277CONFIG_DEBUG_INFO=y
278CONFIG_FTRACE_SYSCALLS=y
279CONFIG_PPC_EMULATED_STATS=y
280CONFIG_XMON=y
281CONFIG_XMON_DEFAULT=y
282CONFIG_IRQ_DOMAIN_DEBUG=y
283CONFIG_PPC_EARLY_DEBUG=y
284CONFIG_KEYS_DEBUG_PROC_KEYS=y
285CONFIG_CRYPTO_NULL=m
286CONFIG_CRYPTO_TEST=m
287CONFIG_CRYPTO_CCM=m
288CONFIG_CRYPTO_GCM=m
289CONFIG_CRYPTO_PCBC=m
290CONFIG_CRYPTO_MICHAEL_MIC=m
291CONFIG_CRYPTO_SHA256=m
292CONFIG_CRYPTO_SHA512=m
293CONFIG_CRYPTO_TGR192=m
294CONFIG_CRYPTO_WP512=m
295CONFIG_CRYPTO_AES=m
296CONFIG_CRYPTO_ANUBIS=m
297CONFIG_CRYPTO_BLOWFISH=m
298CONFIG_CRYPTO_CAST5=m
299CONFIG_CRYPTO_CAST6=m
300CONFIG_CRYPTO_KHAZAD=m
301CONFIG_CRYPTO_SALSA20=m
302CONFIG_CRYPTO_SERPENT=m
303CONFIG_CRYPTO_TEA=m
304CONFIG_CRYPTO_TWOFISH=m
305CONFIG_CRYPTO_LZO=m
306# CONFIG_CRYPTO_ANSI_CPRNG is not set
307CONFIG_VIRTUALIZATION=y
diff --git a/arch/powerpc/include/asm/cpm2.h b/arch/powerpc/include/asm/cpm2.h
index f42e9baf3a4e..7c8608b09694 100644
--- a/arch/powerpc/include/asm/cpm2.h
+++ b/arch/powerpc/include/asm/cpm2.h
@@ -489,7 +489,6 @@ typedef struct scc_trans {
489#define FCC_GFMR_TCI ((uint)0x20000000) 489#define FCC_GFMR_TCI ((uint)0x20000000)
490#define FCC_GFMR_TRX ((uint)0x10000000) 490#define FCC_GFMR_TRX ((uint)0x10000000)
491#define FCC_GFMR_TTX ((uint)0x08000000) 491#define FCC_GFMR_TTX ((uint)0x08000000)
492#define FCC_GFMR_TTX ((uint)0x08000000)
493#define FCC_GFMR_CDP ((uint)0x04000000) 492#define FCC_GFMR_CDP ((uint)0x04000000)
494#define FCC_GFMR_CTSP ((uint)0x02000000) 493#define FCC_GFMR_CTSP ((uint)0x02000000)
495#define FCC_GFMR_CDS ((uint)0x01000000) 494#define FCC_GFMR_CDS ((uint)0x01000000)
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index b76f58c124ca..fab7743c2640 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -254,6 +254,7 @@ void *eeh_pe_traverse(struct eeh_pe *root,
254void *eeh_pe_dev_traverse(struct eeh_pe *root, 254void *eeh_pe_dev_traverse(struct eeh_pe *root,
255 eeh_traverse_func fn, void *flag); 255 eeh_traverse_func fn, void *flag);
256void eeh_pe_restore_bars(struct eeh_pe *pe); 256void eeh_pe_restore_bars(struct eeh_pe *pe);
257const char *eeh_pe_loc_get(struct eeh_pe *pe);
257struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe); 258struct pci_bus *eeh_pe_bus_get(struct eeh_pe *pe);
258 259
259void *eeh_dev_init(struct device_node *dn, void *data); 260void *eeh_dev_init(struct device_node *dn, void *data);
diff --git a/arch/powerpc/include/asm/eeh_event.h b/arch/powerpc/include/asm/eeh_event.h
index 89d5670b2eeb..1e551a2d6f82 100644
--- a/arch/powerpc/include/asm/eeh_event.h
+++ b/arch/powerpc/include/asm/eeh_event.h
@@ -33,7 +33,7 @@ struct eeh_event {
33 33
34int eeh_event_init(void); 34int eeh_event_init(void);
35int eeh_send_failure_event(struct eeh_pe *pe); 35int eeh_send_failure_event(struct eeh_pe *pe);
36void eeh_remove_event(struct eeh_pe *pe); 36void eeh_remove_event(struct eeh_pe *pe, bool force);
37void eeh_handle_event(struct eeh_pe *pe); 37void eeh_handle_event(struct eeh_pe *pe);
38 38
39#endif /* __KERNEL__ */ 39#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index 901dac6b6cb7..d0918e09557f 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -223,10 +223,6 @@ typedef struct {
223 unsigned int id; 223 unsigned int id;
224 unsigned int active; 224 unsigned int active;
225 unsigned long vdso_base; 225 unsigned long vdso_base;
226#ifdef CONFIG_PPC_ICSWX
227 struct spinlock *cop_lockp; /* guard cop related stuff */
228 unsigned long acop; /* mask of enabled coprocessor types */
229#endif /* CONFIG_PPC_ICSWX */
230#ifdef CONFIG_PPC_MM_SLICES 226#ifdef CONFIG_PPC_MM_SLICES
231 u64 low_slices_psize; /* SLB page size encodings */ 227 u64 low_slices_psize; /* SLB page size encodings */
232 u64 high_slices_psize; /* 4 bits per slice for now */ 228 u64 high_slices_psize; /* 4 bits per slice for now */
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index cb15cbb51600..460018889ba9 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -599,9 +599,9 @@ enum {
599}; 599};
600 600
601struct OpalIoPhbErrorCommon { 601struct OpalIoPhbErrorCommon {
602 uint32_t version; 602 __be32 version;
603 uint32_t ioType; 603 __be32 ioType;
604 uint32_t len; 604 __be32 len;
605}; 605};
606 606
607struct OpalIoP7IOCPhbErrorData { 607struct OpalIoP7IOCPhbErrorData {
@@ -666,64 +666,64 @@ struct OpalIoP7IOCPhbErrorData {
666struct OpalIoPhb3ErrorData { 666struct OpalIoPhb3ErrorData {
667 struct OpalIoPhbErrorCommon common; 667 struct OpalIoPhbErrorCommon common;
668 668
669 uint32_t brdgCtl; 669 __be32 brdgCtl;
670 670
671 /* PHB3 UTL regs */ 671 /* PHB3 UTL regs */
672 uint32_t portStatusReg; 672 __be32 portStatusReg;
673 uint32_t rootCmplxStatus; 673 __be32 rootCmplxStatus;
674 uint32_t busAgentStatus; 674 __be32 busAgentStatus;
675 675
676 /* PHB3 cfg regs */ 676 /* PHB3 cfg regs */
677 uint32_t deviceStatus; 677 __be32 deviceStatus;
678 uint32_t slotStatus; 678 __be32 slotStatus;
679 uint32_t linkStatus; 679 __be32 linkStatus;
680 uint32_t devCmdStatus; 680 __be32 devCmdStatus;
681 uint32_t devSecStatus; 681 __be32 devSecStatus;
682 682
683 /* cfg AER regs */ 683 /* cfg AER regs */
684 uint32_t rootErrorStatus; 684 __be32 rootErrorStatus;
685 uint32_t uncorrErrorStatus; 685 __be32 uncorrErrorStatus;
686 uint32_t corrErrorStatus; 686 __be32 corrErrorStatus;
687 uint32_t tlpHdr1; 687 __be32 tlpHdr1;
688 uint32_t tlpHdr2; 688 __be32 tlpHdr2;
689 uint32_t tlpHdr3; 689 __be32 tlpHdr3;
690 uint32_t tlpHdr4; 690 __be32 tlpHdr4;
691 uint32_t sourceId; 691 __be32 sourceId;
692 692
693 uint32_t rsv3; 693 __be32 rsv3;
694 694
695 /* Record data about the call to allocate a buffer */ 695 /* Record data about the call to allocate a buffer */
696 uint64_t errorClass; 696 __be64 errorClass;
697 uint64_t correlator; 697 __be64 correlator;
698 698
699 uint64_t nFir; /* 000 */ 699 __be64 nFir; /* 000 */
700 uint64_t nFirMask; /* 003 */ 700 __be64 nFirMask; /* 003 */
701 uint64_t nFirWOF; /* 008 */ 701 __be64 nFirWOF; /* 008 */
702 702
703 /* PHB3 MMIO Error Regs */ 703 /* PHB3 MMIO Error Regs */
704 uint64_t phbPlssr; /* 120 */ 704 __be64 phbPlssr; /* 120 */
705 uint64_t phbCsr; /* 110 */ 705 __be64 phbCsr; /* 110 */
706 uint64_t lemFir; /* C00 */ 706 __be64 lemFir; /* C00 */
707 uint64_t lemErrorMask; /* C18 */ 707 __be64 lemErrorMask; /* C18 */
708 uint64_t lemWOF; /* C40 */ 708 __be64 lemWOF; /* C40 */
709 uint64_t phbErrorStatus; /* C80 */ 709 __be64 phbErrorStatus; /* C80 */
710 uint64_t phbFirstErrorStatus; /* C88 */ 710 __be64 phbFirstErrorStatus; /* C88 */
711 uint64_t phbErrorLog0; /* CC0 */ 711 __be64 phbErrorLog0; /* CC0 */
712 uint64_t phbErrorLog1; /* CC8 */ 712 __be64 phbErrorLog1; /* CC8 */
713 uint64_t mmioErrorStatus; /* D00 */ 713 __be64 mmioErrorStatus; /* D00 */
714 uint64_t mmioFirstErrorStatus; /* D08 */ 714 __be64 mmioFirstErrorStatus; /* D08 */
715 uint64_t mmioErrorLog0; /* D40 */ 715 __be64 mmioErrorLog0; /* D40 */
716 uint64_t mmioErrorLog1; /* D48 */ 716 __be64 mmioErrorLog1; /* D48 */
717 uint64_t dma0ErrorStatus; /* D80 */ 717 __be64 dma0ErrorStatus; /* D80 */
718 uint64_t dma0FirstErrorStatus; /* D88 */ 718 __be64 dma0FirstErrorStatus; /* D88 */
719 uint64_t dma0ErrorLog0; /* DC0 */ 719 __be64 dma0ErrorLog0; /* DC0 */
720 uint64_t dma0ErrorLog1; /* DC8 */ 720 __be64 dma0ErrorLog1; /* DC8 */
721 uint64_t dma1ErrorStatus; /* E00 */ 721 __be64 dma1ErrorStatus; /* E00 */
722 uint64_t dma1FirstErrorStatus; /* E08 */ 722 __be64 dma1FirstErrorStatus; /* E08 */
723 uint64_t dma1ErrorLog0; /* E40 */ 723 __be64 dma1ErrorLog0; /* E40 */
724 uint64_t dma1ErrorLog1; /* E48 */ 724 __be64 dma1ErrorLog1; /* E48 */
725 uint64_t pestA[OPAL_PHB3_NUM_PEST_REGS]; 725 __be64 pestA[OPAL_PHB3_NUM_PEST_REGS];
726 uint64_t pestB[OPAL_PHB3_NUM_PEST_REGS]; 726 __be64 pestB[OPAL_PHB3_NUM_PEST_REGS];
727}; 727};
728 728
729enum { 729enum {
@@ -851,8 +851,8 @@ int64_t opal_pci_mask_pe_error(uint64_t phb_id, uint16_t pe_number, uint8_t erro
851int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action); 851int64_t opal_set_slot_led_status(uint64_t phb_id, uint64_t slot_id, uint8_t led_type, uint8_t led_action);
852int64_t opal_get_epow_status(__be64 *status); 852int64_t opal_get_epow_status(__be64 *status);
853int64_t opal_set_system_attention_led(uint8_t led_action); 853int64_t opal_set_system_attention_led(uint8_t led_action);
854int64_t opal_pci_next_error(uint64_t phb_id, uint64_t *first_frozen_pe, 854int64_t opal_pci_next_error(uint64_t phb_id, __be64 *first_frozen_pe,
855 uint16_t *pci_error_type, uint16_t *severity); 855 __be16 *pci_error_type, __be16 *severity);
856int64_t opal_pci_poll(uint64_t phb_id); 856int64_t opal_pci_poll(uint64_t phb_id);
857int64_t opal_return_cpu(void); 857int64_t opal_return_cpu(void);
858int64_t opal_reinit_cpus(uint64_t flags); 858int64_t opal_reinit_cpus(uint64_t flags);
diff --git a/arch/powerpc/include/asm/reg_a2.h b/arch/powerpc/include/asm/reg_a2.h
index 3d52a1132f3d..3ba9c6f096fc 100644
--- a/arch/powerpc/include/asm/reg_a2.h
+++ b/arch/powerpc/include/asm/reg_a2.h
@@ -110,15 +110,6 @@
110#define TLB1_UR ASM_CONST(0x0000000000000002) 110#define TLB1_UR ASM_CONST(0x0000000000000002)
111#define TLB1_SR ASM_CONST(0x0000000000000001) 111#define TLB1_SR ASM_CONST(0x0000000000000001)
112 112
113#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
114#define WSP_UART_PHYS 0xffc000c000
115/* This needs to be careful chosen to hit a !0 congruence class
116 * in the TLB since we bolt it in way 3, which is already occupied
117 * by our linear mapping primary bolted entry in CC 0.
118 */
119#define WSP_UART_VIRT 0xf000000000001000
120#endif
121
122/* A2 erativax attributes definitions */ 113/* A2 erativax attributes definitions */
123#define ERATIVAX_RS_IS_ALL 0x000 114#define ERATIVAX_RS_IS_ALL 0x000
124#define ERATIVAX_RS_IS_TID 0x040 115#define ERATIVAX_RS_IS_TID 0x040
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 0e83e7d8c73f..58abeda64cb7 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -16,13 +16,15 @@ struct thread_struct;
16extern struct task_struct *_switch(struct thread_struct *prev, 16extern struct task_struct *_switch(struct thread_struct *prev,
17 struct thread_struct *next); 17 struct thread_struct *next);
18#ifdef CONFIG_PPC_BOOK3S_64 18#ifdef CONFIG_PPC_BOOK3S_64
19static inline void save_tar(struct thread_struct *prev) 19static inline void save_early_sprs(struct thread_struct *prev)
20{ 20{
21 if (cpu_has_feature(CPU_FTR_ARCH_207S)) 21 if (cpu_has_feature(CPU_FTR_ARCH_207S))
22 prev->tar = mfspr(SPRN_TAR); 22 prev->tar = mfspr(SPRN_TAR);
23 if (cpu_has_feature(CPU_FTR_DSCR))
24 prev->dscr = mfspr(SPRN_DSCR);
23} 25}
24#else 26#else
25static inline void save_tar(struct thread_struct *prev) {} 27static inline void save_early_sprs(struct thread_struct *prev) {}
26#endif 28#endif
27 29
28extern void enable_kernel_fp(void); 30extern void enable_kernel_fp(void);
@@ -84,6 +86,8 @@ static inline void clear_task_ebb(struct task_struct *t)
84{ 86{
85#ifdef CONFIG_PPC_BOOK3S_64 87#ifdef CONFIG_PPC_BOOK3S_64
86 /* EBB perf events are not inherited, so clear all EBB state. */ 88 /* EBB perf events are not inherited, so clear all EBB state. */
89 t->thread.ebbrr = 0;
90 t->thread.ebbhr = 0;
87 t->thread.bescr = 0; 91 t->thread.bescr = 0;
88 t->thread.mmcr2 = 0; 92 t->thread.mmcr2 = 0;
89 t->thread.mmcr0 = 0; 93 t->thread.mmcr0 = 0;
diff --git a/arch/powerpc/include/asm/wsp.h b/arch/powerpc/include/asm/wsp.h
deleted file mode 100644
index c7dc83088a33..000000000000
--- a/arch/powerpc/include/asm/wsp.h
+++ /dev/null
@@ -1,14 +0,0 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef __ASM_POWERPC_WSP_H
10#define __ASM_POWERPC_WSP_H
11
12extern int wsp_get_chip_id(struct device_node *dn);
13
14#endif /* __ASM_POWERPC_WSP_H */
diff --git a/arch/powerpc/include/uapi/asm/cputable.h b/arch/powerpc/include/uapi/asm/cputable.h
index 5b7657959faa..de2c0e4ee1aa 100644
--- a/arch/powerpc/include/uapi/asm/cputable.h
+++ b/arch/powerpc/include/uapi/asm/cputable.h
@@ -41,5 +41,6 @@
41#define PPC_FEATURE2_EBB 0x10000000 41#define PPC_FEATURE2_EBB 0x10000000
42#define PPC_FEATURE2_ISEL 0x08000000 42#define PPC_FEATURE2_ISEL 0x08000000
43#define PPC_FEATURE2_TAR 0x04000000 43#define PPC_FEATURE2_TAR 0x04000000
44#define PPC_FEATURE2_VEC_CRYPTO 0x02000000
44 45
45#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */ 46#endif /* _UAPI__ASM_POWERPC_CPUTABLE_H */
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index fab19ec25597..670c312d914e 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -43,7 +43,6 @@ obj-$(CONFIG_PPC_BOOK3S_64) += cpu_setup_power.o
43obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o 43obj-$(CONFIG_PPC_BOOK3S_64) += mce.o mce_power.o
44obj64-$(CONFIG_RELOCATABLE) += reloc_64.o 44obj64-$(CONFIG_RELOCATABLE) += reloc_64.o
45obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o 45obj-$(CONFIG_PPC_BOOK3E_64) += exceptions-64e.o idle_book3e.o
46obj-$(CONFIG_PPC_A2) += cpu_setup_a2.o
47obj-$(CONFIG_PPC64) += vdso64/ 46obj-$(CONFIG_PPC64) += vdso64/
48obj-$(CONFIG_ALTIVEC) += vecemu.o 47obj-$(CONFIG_ALTIVEC) += vecemu.o
49obj-$(CONFIG_PPC_970_NAP) += idle_power4.o 48obj-$(CONFIG_PPC_970_NAP) += idle_power4.o
diff --git a/arch/powerpc/kernel/cpu_setup_a2.S b/arch/powerpc/kernel/cpu_setup_a2.S
deleted file mode 100644
index 61f079e05b61..000000000000
--- a/arch/powerpc/kernel/cpu_setup_a2.S
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * A2 specific assembly support code
3 *
4 * Copyright 2009 Ben Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/asm-offsets.h>
13#include <asm/ppc_asm.h>
14#include <asm/ppc-opcode.h>
15#include <asm/processor.h>
16#include <asm/reg_a2.h>
17#include <asm/reg.h>
18#include <asm/thread_info.h>
19
20/*
21 * Disable thdid and class fields in ERATs to bump PID to full 14 bits capacity.
22 * This also prevents external LPID accesses but that isn't a problem when not a
23 * guest. Under PV, this setting will be ignored and MMUCR will return the right
24 * number of PID bits we can use.
25 */
26#define MMUCR1_EXTEND_PID \
27 (MMUCR1_ICTID | MMUCR1_ITTID | MMUCR1_DCTID | \
28 MMUCR1_DTTID | MMUCR1_DCCD)
29
30/*
31 * Use extended PIDs if enabled.
32 * Don't clear the ERATs on context sync events and enable I & D LRU.
33 * Enable ERAT back invalidate when tlbwe overwrites an entry.
34 */
35#define INITIAL_MMUCR1 \
36 (MMUCR1_EXTEND_PID | MMUCR1_CSINV_NEVER | MMUCR1_IRRE | \
37 MMUCR1_DRRE | MMUCR1_TLBWE_BINV)
38
39_GLOBAL(__setup_cpu_a2)
40 /* Some of these are actually thread local and some are
41 * core local but doing it always won't hurt
42 */
43
44#ifdef CONFIG_PPC_ICSWX
45 /* Make sure ACOP starts out as zero */
46 li r3,0
47 mtspr SPRN_ACOP,r3
48
49 /* Skip the following if we are in Guest mode */
50 mfmsr r3
51 andis. r0,r3,MSR_GS@h
52 bne _icswx_skip_guest
53
54 /* Enable icswx instruction */
55 mfspr r3,SPRN_A2_CCR2
56 ori r3,r3,A2_CCR2_ENABLE_ICSWX
57 mtspr SPRN_A2_CCR2,r3
58
59 /* Unmask all CTs in HACOP */
60 li r3,-1
61 mtspr SPRN_HACOP,r3
62_icswx_skip_guest:
63#endif /* CONFIG_PPC_ICSWX */
64
65 /* Enable doorbell */
66 mfspr r3,SPRN_A2_CCR2
67 oris r3,r3,A2_CCR2_ENABLE_PC@h
68 mtspr SPRN_A2_CCR2,r3
69 isync
70
71 /* Setup CCR0 to disable power saving for now as it's busted
72 * in the current implementations. Setup CCR1 to wake on
73 * interrupts normally (we write the default value but who
74 * knows what FW may have clobbered...)
75 */
76 li r3,0
77 mtspr SPRN_A2_CCR0, r3
78 LOAD_REG_IMMEDIATE(r3,0x0f0f0f0f)
79 mtspr SPRN_A2_CCR1, r3
80
81 /* Initialise MMUCR1 */
82 lis r3,INITIAL_MMUCR1@h
83 ori r3,r3,INITIAL_MMUCR1@l
84 mtspr SPRN_MMUCR1,r3
85
86 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
87 LOAD_REG_IMMEDIATE(r3, 0x000a7531)
88 mtspr SPRN_MMUCR2,r3
89
90 /* Set MMUCR3 to write all thids bit to the TLB */
91 LOAD_REG_IMMEDIATE(r3, 0x0000000f)
92 mtspr SPRN_MMUCR3,r3
93
94 /* Don't do ERAT stuff if running guest mode */
95 mfmsr r3
96 andis. r0,r3,MSR_GS@h
97 bne 1f
98
99 /* Now set the I-ERAT watermark to 15 */
100 lis r4,(MMUCR0_TLBSEL_I|MMUCR0_ECL)@h
101 mtspr SPRN_MMUCR0, r4
102 li r4,A2_IERAT_SIZE-1
103 PPC_ERATWE(R4,R4,3)
104
105 /* Now set the D-ERAT watermark to 31 */
106 lis r4,(MMUCR0_TLBSEL_D|MMUCR0_ECL)@h
107 mtspr SPRN_MMUCR0, r4
108 li r4,A2_DERAT_SIZE-1
109 PPC_ERATWE(R4,R4,3)
110
111 /* And invalidate the beast just in case. That won't get rid of
112 * a bolted entry though it will be in LRU and so will go away eventually
113 * but let's not bother for now
114 */
115 PPC_ERATILX(0,0,R0)
1161:
117 blr
118
119_GLOBAL(__restore_cpu_a2)
120 b __setup_cpu_a2
diff --git a/arch/powerpc/kernel/cpu_setup_power.S b/arch/powerpc/kernel/cpu_setup_power.S
index 1557e7c2c7e1..46733535cc0b 100644
--- a/arch/powerpc/kernel/cpu_setup_power.S
+++ b/arch/powerpc/kernel/cpu_setup_power.S
@@ -56,6 +56,7 @@ _GLOBAL(__setup_cpu_power8)
56 li r0,0 56 li r0,0
57 mtspr SPRN_LPID,r0 57 mtspr SPRN_LPID,r0
58 mfspr r3,SPRN_LPCR 58 mfspr r3,SPRN_LPCR
59 ori r3, r3, LPCR_PECEDH
59 bl __init_LPCR 60 bl __init_LPCR
60 bl __init_HFSCR 61 bl __init_HFSCR
61 bl __init_tlb_power8 62 bl __init_tlb_power8
@@ -74,6 +75,7 @@ _GLOBAL(__restore_cpu_power8)
74 li r0,0 75 li r0,0
75 mtspr SPRN_LPID,r0 76 mtspr SPRN_LPID,r0
76 mfspr r3,SPRN_LPCR 77 mfspr r3,SPRN_LPCR
78 ori r3, r3, LPCR_PECEDH
77 bl __init_LPCR 79 bl __init_LPCR
78 bl __init_HFSCR 80 bl __init_HFSCR
79 bl __init_tlb_power8 81 bl __init_tlb_power8
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index c1faade6506d..965291b4c2fa 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -109,7 +109,8 @@ extern void __restore_cpu_e6500(void);
109 PPC_FEATURE_PSERIES_PERFMON_COMPAT) 109 PPC_FEATURE_PSERIES_PERFMON_COMPAT)
110#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \ 110#define COMMON_USER2_POWER8 (PPC_FEATURE2_ARCH_2_07 | \
111 PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \ 111 PPC_FEATURE2_HTM_COMP | PPC_FEATURE2_DSCR | \
112 PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR) 112 PPC_FEATURE2_ISEL | PPC_FEATURE2_TAR | \
113 PPC_FEATURE2_VEC_CRYPTO)
113#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\ 114#define COMMON_USER_PA6T (COMMON_USER_PPC64 | PPC_FEATURE_PA6T |\
114 PPC_FEATURE_TRUE_LE | \ 115 PPC_FEATURE_TRUE_LE | \
115 PPC_FEATURE_HAS_ALTIVEC_COMP) 116 PPC_FEATURE_HAS_ALTIVEC_COMP)
@@ -2148,44 +2149,6 @@ static struct cpu_spec __initdata cpu_specs[] = {
2148 } 2149 }
2149#endif /* CONFIG_PPC32 */ 2150#endif /* CONFIG_PPC32 */
2150#endif /* CONFIG_E500 */ 2151#endif /* CONFIG_E500 */
2151
2152#ifdef CONFIG_PPC_A2
2153 { /* Standard A2 (>= DD2) + FPU core */
2154 .pvr_mask = 0xffff0000,
2155 .pvr_value = 0x00480000,
2156 .cpu_name = "A2 (>= DD2)",
2157 .cpu_features = CPU_FTRS_A2,
2158 .cpu_user_features = COMMON_USER_PPC64,
2159 .mmu_features = MMU_FTRS_A2,
2160 .icache_bsize = 64,
2161 .dcache_bsize = 64,
2162 .num_pmcs = 0,
2163 .cpu_setup = __setup_cpu_a2,
2164 .cpu_restore = __restore_cpu_a2,
2165 .machine_check = machine_check_generic,
2166 .platform = "ppca2",
2167 },
2168 { /* This is a default entry to get going, to be replaced by
2169 * a real one at some stage
2170 */
2171#define CPU_FTRS_BASE_BOOK3E (CPU_FTR_USE_TB | \
2172 CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_SMT | \
2173 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
2174 .pvr_mask = 0x00000000,
2175 .pvr_value = 0x00000000,
2176 .cpu_name = "Book3E",
2177 .cpu_features = CPU_FTRS_BASE_BOOK3E,
2178 .cpu_user_features = COMMON_USER_PPC64,
2179 .mmu_features = MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX |
2180 MMU_FTR_USE_TLBIVAX_BCAST |
2181 MMU_FTR_LOCK_BCAST_INVAL,
2182 .icache_bsize = 64,
2183 .dcache_bsize = 64,
2184 .num_pmcs = 0,
2185 .machine_check = machine_check_generic,
2186 .platform = "power6",
2187 },
2188#endif /* CONFIG_PPC_A2 */
2189}; 2152};
2190 2153
2191static struct cpu_spec the_cpu_spec; 2154static struct cpu_spec the_cpu_spec;
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index 7051ea3101b9..86e25702aaca 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -330,8 +330,8 @@ static int eeh_phb_check_failure(struct eeh_pe *pe)
330 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED); 330 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
331 eeh_serialize_unlock(flags); 331 eeh_serialize_unlock(flags);
332 332
333 pr_err("EEH: PHB#%x failure detected\n", 333 pr_err("EEH: PHB#%x failure detected, location: %s\n",
334 phb_pe->phb->global_number); 334 phb_pe->phb->global_number, eeh_pe_loc_get(phb_pe));
335 dump_stack(); 335 dump_stack();
336 eeh_send_failure_event(phb_pe); 336 eeh_send_failure_event(phb_pe);
337 337
@@ -358,10 +358,11 @@ out:
358int eeh_dev_check_failure(struct eeh_dev *edev) 358int eeh_dev_check_failure(struct eeh_dev *edev)
359{ 359{
360 int ret; 360 int ret;
361 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
361 unsigned long flags; 362 unsigned long flags;
362 struct device_node *dn; 363 struct device_node *dn;
363 struct pci_dev *dev; 364 struct pci_dev *dev;
364 struct eeh_pe *pe; 365 struct eeh_pe *pe, *parent_pe, *phb_pe;
365 int rc = 0; 366 int rc = 0;
366 const char *location; 367 const char *location;
367 368
@@ -439,14 +440,34 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
439 */ 440 */
440 if ((ret < 0) || 441 if ((ret < 0) ||
441 (ret == EEH_STATE_NOT_SUPPORT) || 442 (ret == EEH_STATE_NOT_SUPPORT) ||
442 (ret & (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) == 443 ((ret & active_flags) == active_flags)) {
443 (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE)) {
444 eeh_stats.false_positives++; 444 eeh_stats.false_positives++;
445 pe->false_positives++; 445 pe->false_positives++;
446 rc = 0; 446 rc = 0;
447 goto dn_unlock; 447 goto dn_unlock;
448 } 448 }
449 449
450 /*
451 * It should be corner case that the parent PE has been
452 * put into frozen state as well. We should take care
453 * that at first.
454 */
455 parent_pe = pe->parent;
456 while (parent_pe) {
457 /* Hit the ceiling ? */
458 if (parent_pe->type & EEH_PE_PHB)
459 break;
460
461 /* Frozen parent PE ? */
462 ret = eeh_ops->get_state(parent_pe, NULL);
463 if (ret > 0 &&
464 (ret & active_flags) != active_flags)
465 pe = parent_pe;
466
467 /* Next parent level */
468 parent_pe = parent_pe->parent;
469 }
470
450 eeh_stats.slot_resets++; 471 eeh_stats.slot_resets++;
451 472
452 /* Avoid repeated reports of this failure, including problems 473 /* Avoid repeated reports of this failure, including problems
@@ -460,8 +481,11 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
460 * a stack trace will help the device-driver authors figure 481 * a stack trace will help the device-driver authors figure
461 * out what happened. So print that out. 482 * out what happened. So print that out.
462 */ 483 */
463 pr_err("EEH: Frozen PE#%x detected on PHB#%x\n", 484 phb_pe = eeh_phb_pe_get(pe->phb);
464 pe->addr, pe->phb->global_number); 485 pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
486 pe->phb->global_number, pe->addr);
487 pr_err("EEH: PE location: %s, PHB location: %s\n",
488 eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
465 dump_stack(); 489 dump_stack();
466 490
467 eeh_send_failure_event(pe); 491 eeh_send_failure_event(pe);
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 7100a5b96e70..420da61d4ce0 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -447,8 +447,9 @@ static void *eeh_pe_detach_dev(void *data, void *userdata)
447 * PE reset (for 3 times), we try to clear the frozen state 447 * PE reset (for 3 times), we try to clear the frozen state
448 * for 3 times as well. 448 * for 3 times as well.
449 */ 449 */
450static int eeh_clear_pe_frozen_state(struct eeh_pe *pe) 450static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
451{ 451{
452 struct eeh_pe *pe = (struct eeh_pe *)data;
452 int i, rc; 453 int i, rc;
453 454
454 for (i = 0; i < 3; i++) { 455 for (i = 0; i < 3; i++) {
@@ -461,13 +462,24 @@ static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
461 } 462 }
462 463
463 /* The PE has been isolated, clear it */ 464 /* The PE has been isolated, clear it */
464 if (rc) 465 if (rc) {
465 pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n", 466 pr_warn("%s: Can't clear frozen PHB#%x-PE#%x (%d)\n",
466 __func__, pe->phb->global_number, pe->addr, rc); 467 __func__, pe->phb->global_number, pe->addr, rc);
467 else 468 return (void *)pe;
469 }
470
471 return NULL;
472}
473
474static int eeh_clear_pe_frozen_state(struct eeh_pe *pe)
475{
476 void *rc;
477
478 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, NULL);
479 if (!rc)
468 eeh_pe_state_clear(pe, EEH_PE_ISOLATED); 480 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
469 481
470 return rc; 482 return rc ? -EIO : 0;
471} 483}
472 484
473/** 485/**
@@ -758,7 +770,7 @@ static void eeh_handle_special_event(void)
758 eeh_serialize_lock(&flags); 770 eeh_serialize_lock(&flags);
759 771
760 /* Purge all events */ 772 /* Purge all events */
761 eeh_remove_event(NULL); 773 eeh_remove_event(NULL, true);
762 774
763 list_for_each_entry(hose, &hose_list, list_node) { 775 list_for_each_entry(hose, &hose_list, list_node) {
764 phb_pe = eeh_phb_pe_get(hose); 776 phb_pe = eeh_phb_pe_get(hose);
@@ -777,7 +789,7 @@ static void eeh_handle_special_event(void)
777 eeh_serialize_lock(&flags); 789 eeh_serialize_lock(&flags);
778 790
779 /* Purge all events of the PHB */ 791 /* Purge all events of the PHB */
780 eeh_remove_event(pe); 792 eeh_remove_event(pe, true);
781 793
782 if (rc == EEH_NEXT_ERR_DEAD_PHB) 794 if (rc == EEH_NEXT_ERR_DEAD_PHB)
783 eeh_pe_state_mark(pe, EEH_PE_ISOLATED); 795 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
diff --git a/arch/powerpc/kernel/eeh_event.c b/arch/powerpc/kernel/eeh_event.c
index 72d748b56c86..4eefb6e34dbb 100644
--- a/arch/powerpc/kernel/eeh_event.c
+++ b/arch/powerpc/kernel/eeh_event.c
@@ -152,24 +152,33 @@ int eeh_send_failure_event(struct eeh_pe *pe)
152/** 152/**
153 * eeh_remove_event - Remove EEH event from the queue 153 * eeh_remove_event - Remove EEH event from the queue
154 * @pe: Event binding to the PE 154 * @pe: Event binding to the PE
155 * @force: Event will be removed unconditionally
155 * 156 *
156 * On PowerNV platform, we might have subsequent coming events 157 * On PowerNV platform, we might have subsequent coming events
157 * is part of the former one. For that case, those subsequent 158 * is part of the former one. For that case, those subsequent
158 * coming events are totally duplicated and unnecessary, thus 159 * coming events are totally duplicated and unnecessary, thus
159 * they should be removed. 160 * they should be removed.
160 */ 161 */
161void eeh_remove_event(struct eeh_pe *pe) 162void eeh_remove_event(struct eeh_pe *pe, bool force)
162{ 163{
163 unsigned long flags; 164 unsigned long flags;
164 struct eeh_event *event, *tmp; 165 struct eeh_event *event, *tmp;
165 166
167 /*
168 * If we have NULL PE passed in, we have dead IOC
169 * or we're sure we can report all existing errors
170 * by the caller.
171 *
172 * With "force", the event with associated PE that
173 * have been isolated, the event won't be removed
174 * to avoid event lost.
175 */
166 spin_lock_irqsave(&eeh_eventlist_lock, flags); 176 spin_lock_irqsave(&eeh_eventlist_lock, flags);
167 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) { 177 list_for_each_entry_safe(event, tmp, &eeh_eventlist, list) {
168 /* 178 if (!force && event->pe &&
169 * If we don't have valid PE passed in, that means 179 (event->pe->state & EEH_PE_ISOLATED))
170 * we already have event corresponding to dead IOC 180 continue;
171 * and all events should be purged. 181
172 */
173 if (!pe) { 182 if (!pe) {
174 list_del(&event->list); 183 list_del(&event->list);
175 kfree(event); 184 kfree(event);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 995c2a284630..fbd01eba4473 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -792,6 +792,66 @@ void eeh_pe_restore_bars(struct eeh_pe *pe)
792} 792}
793 793
794/** 794/**
795 * eeh_pe_loc_get - Retrieve location code binding to the given PE
796 * @pe: EEH PE
797 *
798 * Retrieve the location code of the given PE. If the primary PE bus
799 * is root bus, we will grab location code from PHB device tree node
800 * or root port. Otherwise, the upstream bridge's device tree node
801 * of the primary PE bus will be checked for the location code.
802 */
803const char *eeh_pe_loc_get(struct eeh_pe *pe)
804{
805 struct pci_controller *hose;
806 struct pci_bus *bus = eeh_pe_bus_get(pe);
807 struct pci_dev *pdev;
808 struct device_node *dn;
809 const char *loc;
810
811 if (!bus)
812 return "N/A";
813
814 /* PHB PE or root PE ? */
815 if (pci_is_root_bus(bus)) {
816 hose = pci_bus_to_host(bus);
817 loc = of_get_property(hose->dn,
818 "ibm,loc-code", NULL);
819 if (loc)
820 return loc;
821 loc = of_get_property(hose->dn,
822 "ibm,io-base-loc-code", NULL);
823 if (loc)
824 return loc;
825
826 pdev = pci_get_slot(bus, 0x0);
827 } else {
828 pdev = bus->self;
829 }
830
831 if (!pdev) {
832 loc = "N/A";
833 goto out;
834 }
835
836 dn = pci_device_to_OF_node(pdev);
837 if (!dn) {
838 loc = "N/A";
839 goto out;
840 }
841
842 loc = of_get_property(dn, "ibm,loc-code", NULL);
843 if (!loc)
844 loc = of_get_property(dn, "ibm,slot-location-code", NULL);
845 if (!loc)
846 loc = "N/A";
847
848out:
849 if (pci_is_root_bus(bus) && pdev)
850 pci_dev_put(pdev);
851 return loc;
852}
853
854/**
795 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE 855 * eeh_pe_bus_get - Retrieve PCI bus according to the given PE
796 * @pe: EEH PE 856 * @pe: EEH PE
797 * 857 *
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 911d45366f59..6528c5e2cc44 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -428,12 +428,6 @@ BEGIN_FTR_SECTION
428 std r24,THREAD_VRSAVE(r3) 428 std r24,THREAD_VRSAVE(r3)
429END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 429END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
430#endif /* CONFIG_ALTIVEC */ 430#endif /* CONFIG_ALTIVEC */
431#ifdef CONFIG_PPC64
432BEGIN_FTR_SECTION
433 mfspr r25,SPRN_DSCR
434 std r25,THREAD_DSCR(r3)
435END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
436#endif
437 and. r0,r0,r22 431 and. r0,r0,r22
438 beq+ 1f 432 beq+ 1f
439 andc r22,r22,r0 433 andc r22,r22,r0
diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
index 771b4e92e5d9..bb9cac6c8051 100644
--- a/arch/powerpc/kernel/exceptions-64e.S
+++ b/arch/powerpc/kernel/exceptions-64e.S
@@ -1467,22 +1467,6 @@ a2_tlbinit_after_linear_map:
1467 .globl a2_tlbinit_after_iprot_flush 1467 .globl a2_tlbinit_after_iprot_flush
1468a2_tlbinit_after_iprot_flush: 1468a2_tlbinit_after_iprot_flush:
1469 1469
1470#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
1471 /* Now establish early debug mappings if applicable */
1472 /* Restore the MAS0 we used for linear mapping load */
1473 mtspr SPRN_MAS0,r11
1474
1475 lis r3,(MAS1_VALID | MAS1_IPROT)@h
1476 ori r3,r3,(BOOK3E_PAGESZ_4K << MAS1_TSIZE_SHIFT)
1477 mtspr SPRN_MAS1,r3
1478 LOAD_REG_IMMEDIATE(r3, WSP_UART_VIRT | MAS2_I | MAS2_G)
1479 mtspr SPRN_MAS2,r3
1480 LOAD_REG_IMMEDIATE(r3, WSP_UART_PHYS | MAS3_SR | MAS3_SW)
1481 mtspr SPRN_MAS7_MAS3,r3
1482 /* re-use the MAS8 value from the linear mapping */
1483 tlbwe
1484#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
1485
1486 PPC_TLBILX(0,0,R0) 1470 PPC_TLBILX(0,0,R0)
1487 sync 1471 sync
1488 isync 1472 isync
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 20f11eb4dff7..a7d36b19221d 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -439,9 +439,9 @@ BEGIN_FTR_SECTION
439 * R9 = CR 439 * R9 = CR
440 * Original R9 to R13 is saved on PACA_EXMC 440 * Original R9 to R13 is saved on PACA_EXMC
441 * 441 *
442 * Switch to mc_emergency stack and handle re-entrancy (though we 442 * Switch to mc_emergency stack and handle re-entrancy (we limit
443 * currently don't test for overflow). Save MCE registers srr1, 443 * the nested MCE upto level 4 to avoid stack overflow).
444 * srr0, dar and dsisr and then set ME=1 444 * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
445 * 445 *
446 * We use paca->in_mce to check whether this is the first entry or 446 * We use paca->in_mce to check whether this is the first entry or
447 * nested machine check. We increment paca->in_mce to track nested 447 * nested machine check. We increment paca->in_mce to track nested
@@ -464,6 +464,9 @@ BEGIN_FTR_SECTION
4640: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */ 4640: subi r1,r1,INT_FRAME_SIZE /* alloc stack frame */
465 addi r10,r10,1 /* increment paca->in_mce */ 465 addi r10,r10,1 /* increment paca->in_mce */
466 sth r10,PACA_IN_MCE(r13) 466 sth r10,PACA_IN_MCE(r13)
467 /* Limit nested MCE to level 4 to avoid stack overflow */
468 cmpwi r10,4
469 bgt 2f /* Check if we hit limit of 4 */
467 std r11,GPR1(r1) /* Save r1 on the stack. */ 470 std r11,GPR1(r1) /* Save r1 on the stack. */
468 std r11,0(r1) /* make stack chain pointer */ 471 std r11,0(r1) /* make stack chain pointer */
469 mfspr r11,SPRN_SRR0 /* Save SRR0 */ 472 mfspr r11,SPRN_SRR0 /* Save SRR0 */
@@ -482,10 +485,23 @@ BEGIN_FTR_SECTION
482 ori r11,r11,MSR_RI /* turn on RI bit */ 485 ori r11,r11,MSR_RI /* turn on RI bit */
483 ld r12,PACAKBASE(r13) /* get high part of &label */ 486 ld r12,PACAKBASE(r13) /* get high part of &label */
484 LOAD_HANDLER(r12, machine_check_handle_early) 487 LOAD_HANDLER(r12, machine_check_handle_early)
485 mtspr SPRN_SRR0,r12 4881: mtspr SPRN_SRR0,r12
486 mtspr SPRN_SRR1,r11 489 mtspr SPRN_SRR1,r11
487 rfid 490 rfid
488 b . /* prevent speculative execution */ 491 b . /* prevent speculative execution */
4922:
493 /* Stack overflow. Stay on emergency stack and panic.
494 * Keep the ME bit off while panic-ing, so that if we hit
495 * another machine check we checkstop.
496 */
497 addi r1,r1,INT_FRAME_SIZE /* go back to previous stack frame */
498 ld r11,PACAKMSR(r13)
499 ld r12,PACAKBASE(r13)
500 LOAD_HANDLER(r12, unrecover_mce)
501 li r10,MSR_ME
502 andc r11,r11,r10 /* Turn off MSR_ME */
503 b 1b
504 b . /* prevent speculative execution */
489END_FTR_SECTION_IFSET(CPU_FTR_HVMODE) 505END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
490 506
491machine_check_pSeries: 507machine_check_pSeries:
@@ -1389,6 +1405,7 @@ machine_check_handle_early:
1389 bl save_nvgprs 1405 bl save_nvgprs
1390 addi r3,r1,STACK_FRAME_OVERHEAD 1406 addi r3,r1,STACK_FRAME_OVERHEAD
1391 bl machine_check_early 1407 bl machine_check_early
1408 std r3,RESULT(r1) /* Save result */
1392 ld r12,_MSR(r1) 1409 ld r12,_MSR(r1)
1393#ifdef CONFIG_PPC_P7_NAP 1410#ifdef CONFIG_PPC_P7_NAP
1394 /* 1411 /*
@@ -1443,11 +1460,33 @@ machine_check_handle_early:
1443 */ 1460 */
1444 andi. r11,r12,MSR_RI 1461 andi. r11,r12,MSR_RI
1445 bne 2f 1462 bne 2f
14461: addi r3,r1,STACK_FRAME_OVERHEAD 14631: mfspr r11,SPRN_SRR0
1447 bl unrecoverable_exception 1464 ld r10,PACAKBASE(r13)
1448 b 1b 1465 LOAD_HANDLER(r10,unrecover_mce)
1466 mtspr SPRN_SRR0,r10
1467 ld r10,PACAKMSR(r13)
1468 /*
1469 * We are going down. But there are chances that we might get hit by
1470 * another MCE during panic path and we may run into unstable state
1471 * with no way out. Hence, turn ME bit off while going down, so that
1472 * when another MCE is hit during panic path, system will checkstop
1473 * and hypervisor will get restarted cleanly by SP.
1474 */
1475 li r3,MSR_ME
1476 andc r10,r10,r3 /* Turn off MSR_ME */
1477 mtspr SPRN_SRR1,r10
1478 rfid
1479 b .
14492: 14802:
1450 /* 1481 /*
1482 * Check if we have successfully handled/recovered from error, if not
1483 * then stay on emergency stack and panic.
1484 */
1485 ld r3,RESULT(r1) /* Load result */
1486 cmpdi r3,0 /* see if we handled MCE successfully */
1487
1488 beq 1b /* if !handled then panic */
1489 /*
1451 * Return from MC interrupt. 1490 * Return from MC interrupt.
1452 * Queue up the MCE event so that we can log it later, while 1491 * Queue up the MCE event so that we can log it later, while
1453 * returning from kernel or opal call. 1492 * returning from kernel or opal call.
@@ -1460,6 +1499,17 @@ machine_check_handle_early:
1460 MACHINE_CHECK_HANDLER_WINDUP 1499 MACHINE_CHECK_HANDLER_WINDUP
1461 b machine_check_pSeries 1500 b machine_check_pSeries
1462 1501
1502unrecover_mce:
1503 /* Invoke machine_check_exception to print MCE event and panic. */
1504 addi r3,r1,STACK_FRAME_OVERHEAD
1505 bl machine_check_exception
1506 /*
1507 * We will not reach here. Even if we did, there is no way out. Call
1508 * unrecoverable_exception and die.
1509 */
15101: addi r3,r1,STACK_FRAME_OVERHEAD
1511 bl unrecoverable_exception
1512 b 1b
1463/* 1513/*
1464 * r13 points to the PACA, r9 contains the saved CR, 1514 * r13 points to the PACA, r9 contains the saved CR,
1465 * r12 contain the saved SRR1, SRR0 is still ready for return 1515 * r12 contain the saved SRR1, SRR0 is still ready for return
diff --git a/arch/powerpc/kernel/head_40x.S b/arch/powerpc/kernel/head_40x.S
index 67ee0d6c1070..7d7d8635227a 100644
--- a/arch/powerpc/kernel/head_40x.S
+++ b/arch/powerpc/kernel/head_40x.S
@@ -930,25 +930,6 @@ initial_mmu:
930 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */ 930 tlbwe r4,r0,TLB_DATA /* Load the data portion of the entry */
931 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */ 931 tlbwe r3,r0,TLB_TAG /* Load the tag portion of the entry */
932 932
933#if defined(CONFIG_SERIAL_TEXT_DEBUG) && defined(SERIAL_DEBUG_IO_BASE)
934
935 /* Load a TLB entry for the UART, so that ppc4xx_progress() can use
936 * the UARTs nice and early. We use a 4k real==virtual mapping. */
937
938 lis r3,SERIAL_DEBUG_IO_BASE@h
939 ori r3,r3,SERIAL_DEBUG_IO_BASE@l
940 mr r4,r3
941 clrrwi r4,r4,12
942 ori r4,r4,(TLB_WR|TLB_I|TLB_M|TLB_G)
943
944 clrrwi r3,r3,12
945 ori r3,r3,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
946
947 li r0,0 /* TLB slot 0 */
948 tlbwe r4,r0,TLB_DATA
949 tlbwe r3,r0,TLB_TAG
950#endif /* CONFIG_SERIAL_DEBUG_TEXT && SERIAL_DEBUG_IO_BASE */
951
952 isync 933 isync
953 934
954 /* Establish the exception vector base 935 /* Establish the exception vector base
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index 8a1edbe26b8f..be99774d3f44 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -755,15 +755,15 @@ struct task_struct *__switch_to(struct task_struct *prev,
755 755
756 WARN_ON(!irqs_disabled()); 756 WARN_ON(!irqs_disabled());
757 757
758 /* Back up the TAR across context switches. 758 /* Back up the TAR and DSCR across context switches.
759 * Note that the TAR is not available for use in the kernel. (To 759 * Note that the TAR is not available for use in the kernel. (To
760 * provide this, the TAR should be backed up/restored on exception 760 * provide this, the TAR should be backed up/restored on exception
761 * entry/exit instead, and be in pt_regs. FIXME, this should be in 761 * entry/exit instead, and be in pt_regs. FIXME, this should be in
762 * pt_regs anyway (for debug).) 762 * pt_regs anyway (for debug).)
763 * Save the TAR here before we do treclaim/trecheckpoint as these 763 * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
764 * will change the TAR. 764 * these will change them.
765 */ 765 */
766 save_tar(&prev->thread); 766 save_early_sprs(&prev->thread);
767 767
768 __switch_to_tm(prev); 768 __switch_to_tm(prev);
769 769
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index d4d418376f99..e239df3768ac 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -471,7 +471,7 @@ void __init smp_setup_cpu_maps(void)
471 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { 471 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
472 DBG(" thread %d -> cpu %d (hard id %d)\n", 472 DBG(" thread %d -> cpu %d (hard id %d)\n",
473 j, cpu, be32_to_cpu(intserv[j])); 473 j, cpu, be32_to_cpu(intserv[j]));
474 set_cpu_present(cpu, true); 474 set_cpu_present(cpu, of_device_is_available(dn));
475 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); 475 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
476 set_cpu_possible(cpu, true); 476 set_cpu_possible(cpu, true);
477 cpu++; 477 cpu++;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 7e711bdcc6da..9fff9cdcc519 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -551,7 +551,7 @@ void timer_interrupt(struct pt_regs * regs)
551 may_hard_irq_enable(); 551 may_hard_irq_enable();
552 552
553 553
554#if defined(CONFIG_PPC32) && defined(CONFIG_PMAC) 554#if defined(CONFIG_PPC32) && defined(CONFIG_PPC_PMAC)
555 if (atomic_read(&ppc_n_lost_interrupts) != 0) 555 if (atomic_read(&ppc_n_lost_interrupts) != 0)
556 do_IRQ(regs); 556 do_IRQ(regs);
557#endif 557#endif
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 1bd7ca298fa1..239f1cde3fff 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -295,6 +295,8 @@ long machine_check_early(struct pt_regs *regs)
295{ 295{
296 long handled = 0; 296 long handled = 0;
297 297
298 __get_cpu_var(irq_stat).mce_exceptions++;
299
298 if (cur_cpu_spec && cur_cpu_spec->machine_check_early) 300 if (cur_cpu_spec && cur_cpu_spec->machine_check_early)
299 handled = cur_cpu_spec->machine_check_early(regs); 301 handled = cur_cpu_spec->machine_check_early(regs);
300 return handled; 302 return handled;
diff --git a/arch/powerpc/kernel/udbg.c b/arch/powerpc/kernel/udbg.c
index a15837519dca..b7aa07279a63 100644
--- a/arch/powerpc/kernel/udbg.c
+++ b/arch/powerpc/kernel/udbg.c
@@ -62,8 +62,6 @@ void __init udbg_early_init(void)
62 udbg_init_cpm(); 62 udbg_init_cpm();
63#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO) 63#elif defined(CONFIG_PPC_EARLY_DEBUG_USBGECKO)
64 udbg_init_usbgecko(); 64 udbg_init_usbgecko();
65#elif defined(CONFIG_PPC_EARLY_DEBUG_WSP)
66 udbg_init_wsp();
67#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS) 65#elif defined(CONFIG_PPC_EARLY_DEBUG_MEMCONS)
68 /* In memory console */ 66 /* In memory console */
69 udbg_init_memcons(); 67 udbg_init_memcons();
diff --git a/arch/powerpc/kernel/udbg_16550.c b/arch/powerpc/kernel/udbg_16550.c
index 75702e207b29..6e7c4923b5ea 100644
--- a/arch/powerpc/kernel/udbg_16550.c
+++ b/arch/powerpc/kernel/udbg_16550.c
@@ -296,14 +296,3 @@ void __init udbg_init_40x_realmode(void)
296} 296}
297 297
298#endif /* CONFIG_PPC_EARLY_DEBUG_40x */ 298#endif /* CONFIG_PPC_EARLY_DEBUG_40x */
299
300
301#ifdef CONFIG_PPC_EARLY_DEBUG_WSP
302
303void __init udbg_init_wsp(void)
304{
305 udbg_uart_init_mmio((void *)WSP_UART_VIRT, 1);
306 udbg_uart_setup(57600, 50000000);
307}
308
309#endif /* CONFIG_PPC_EARLY_DEBUG_WSP */
diff --git a/arch/powerpc/kvm/book3s_hv_ras.c b/arch/powerpc/kvm/book3s_hv_ras.c
index 768a9f977c00..3a5c568b1e89 100644
--- a/arch/powerpc/kvm/book3s_hv_ras.c
+++ b/arch/powerpc/kvm/book3s_hv_ras.c
@@ -113,10 +113,8 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
113 * We assume that if the condition is recovered then linux host 113 * We assume that if the condition is recovered then linux host
114 * will have generated an error log event that we will pick 114 * will have generated an error log event that we will pick
115 * up and log later. 115 * up and log later.
116 * Don't release mce event now. In case if condition is not 116 * Don't release mce event now. We will queue up the event so that
117 * recovered we do guest exit and go back to linux host machine 117 * we can log the MCE event info on host console.
118 * check handler. Hence we need make sure that current mce event
119 * is available for linux host to consume.
120 */ 118 */
121 if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE)) 119 if (!get_mce_event(&mce_evt, MCE_EVENT_DONTRELEASE))
122 goto out; 120 goto out;
@@ -128,11 +126,12 @@ static long kvmppc_realmode_mc_power7(struct kvm_vcpu *vcpu)
128 126
129out: 127out:
130 /* 128 /*
131 * If we have handled the error, then release the mce event because 129 * We are now going enter guest either through machine check
132 * we will be delivering machine check to guest. 130 * interrupt (for unhandled errors) or will continue from
131 * current HSRR0 (for handled errors) in guest. Hence
132 * queue up the event so that we can log it from host console later.
133 */ 133 */
134 if (handled) 134 machine_check_queue_event();
135 release_mce_event();
136 135
137 return handled; 136 return handled;
138} 137}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 77356fd25ccc..868347ef09fd 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2257,15 +2257,28 @@ machine_check_realmode:
2257 mr r3, r9 /* get vcpu pointer */ 2257 mr r3, r9 /* get vcpu pointer */
2258 bl kvmppc_realmode_machine_check 2258 bl kvmppc_realmode_machine_check
2259 nop 2259 nop
2260 cmpdi r3, 0 /* continue exiting from guest? */ 2260 cmpdi r3, 0 /* Did we handle MCE ? */
2261 ld r9, HSTATE_KVM_VCPU(r13) 2261 ld r9, HSTATE_KVM_VCPU(r13)
2262 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK 2262 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2263 beq mc_cont 2263 /*
2264 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2265 * machine check interrupt (set HSRR0 to 0x200). And for handled
2266 * errors (no-fatal), just go back to guest execution with current
2267 * HSRR0 instead of exiting guest. This new approach will inject
2268 * machine check to guest for fatal error causing guest to crash.
2269 *
2270 * The old code used to return to host for unhandled errors which
2271 * was causing guest to hang with soft lockups inside guest and
2272 * makes it difficult to recover guest instance.
2273 */
2274 ld r10, VCPU_PC(r9)
2275 ld r11, VCPU_MSR(r9)
2276 bne 2f /* Continue guest execution. */
2264 /* If not, deliver a machine check. SRR0/1 are already set */ 2277 /* If not, deliver a machine check. SRR0/1 are already set */
2265 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK 2278 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2266 ld r11, VCPU_MSR(r9) 2279 ld r11, VCPU_MSR(r9)
2267 bl kvmppc_msr_interrupt 2280 bl kvmppc_msr_interrupt
2268 b fast_interrupt_c_return 22812: b fast_interrupt_c_return
2269 2282
2270/* 2283/*
2271 * Check the reason we woke from nap, and take appropriate action. 2284 * Check the reason we woke from nap, and take appropriate action.
diff --git a/arch/powerpc/lib/sstep.c b/arch/powerpc/lib/sstep.c
index c0511c27a733..412dd46dd0b7 100644
--- a/arch/powerpc/lib/sstep.c
+++ b/arch/powerpc/lib/sstep.c
@@ -1470,7 +1470,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
1470 regs->gpr[rd] = byterev_4(val); 1470 regs->gpr[rd] = byterev_4(val);
1471 goto ldst_done; 1471 goto ldst_done;
1472 1472
1473#ifdef CONFIG_PPC_CPU 1473#ifdef CONFIG_PPC_FPU
1474 case 535: /* lfsx */ 1474 case 535: /* lfsx */
1475 case 567: /* lfsux */ 1475 case 567: /* lfsux */
1476 if (!(regs->msr & MSR_FP)) 1476 if (!(regs->msr & MSR_FP))
diff --git a/arch/powerpc/platforms/Kconfig b/arch/powerpc/platforms/Kconfig
index bf9c6d4cd26c..391b3f6b54a3 100644
--- a/arch/powerpc/platforms/Kconfig
+++ b/arch/powerpc/platforms/Kconfig
@@ -19,7 +19,6 @@ source "arch/powerpc/platforms/embedded6xx/Kconfig"
19source "arch/powerpc/platforms/44x/Kconfig" 19source "arch/powerpc/platforms/44x/Kconfig"
20source "arch/powerpc/platforms/40x/Kconfig" 20source "arch/powerpc/platforms/40x/Kconfig"
21source "arch/powerpc/platforms/amigaone/Kconfig" 21source "arch/powerpc/platforms/amigaone/Kconfig"
22source "arch/powerpc/platforms/wsp/Kconfig"
23 22
24config KVM_GUEST 23config KVM_GUEST
25 bool "KVM Guest support" 24 bool "KVM Guest support"
diff --git a/arch/powerpc/platforms/Kconfig.cputype b/arch/powerpc/platforms/Kconfig.cputype
index 43b65ad1970a..a41bd023647a 100644
--- a/arch/powerpc/platforms/Kconfig.cputype
+++ b/arch/powerpc/platforms/Kconfig.cputype
@@ -148,10 +148,6 @@ config POWER4
148 depends on PPC64 && PPC_BOOK3S 148 depends on PPC64 && PPC_BOOK3S
149 def_bool y 149 def_bool y
150 150
151config PPC_A2
152 bool
153 depends on PPC_BOOK3E_64
154
155config TUNE_CELL 151config TUNE_CELL
156 bool "Optimize for Cell Broadband Engine" 152 bool "Optimize for Cell Broadband Engine"
157 depends on PPC64 && PPC_BOOK3S 153 depends on PPC64 && PPC_BOOK3S
@@ -280,7 +276,7 @@ config VSX
280 276
281config PPC_ICSWX 277config PPC_ICSWX
282 bool "Support for PowerPC icswx coprocessor instruction" 278 bool "Support for PowerPC icswx coprocessor instruction"
283 depends on POWER4 || PPC_A2 279 depends on POWER4
284 default n 280 default n
285 ---help--- 281 ---help---
286 282
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 879b4a448498..469ef170d218 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -22,4 +22,3 @@ obj-$(CONFIG_PPC_CELL) += cell/
22obj-$(CONFIG_PPC_PS3) += ps3/ 22obj-$(CONFIG_PPC_PS3) += ps3/
23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/ 23obj-$(CONFIG_EMBEDDED6xx) += embedded6xx/
24obj-$(CONFIG_AMIGAONE) += amigaone/ 24obj-$(CONFIG_AMIGAONE) += amigaone/
25obj-$(CONFIG_PPC_WSP) += wsp/
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 0ba3c9598358..bcfd6f063efa 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -35,7 +35,6 @@
35#define SPUFS_PS_MAP_SIZE 0x20000 35#define SPUFS_PS_MAP_SIZE 0x20000
36#define SPUFS_MFC_MAP_SIZE 0x1000 36#define SPUFS_MFC_MAP_SIZE 0x1000
37#define SPUFS_CNTL_MAP_SIZE 0x1000 37#define SPUFS_CNTL_MAP_SIZE 0x1000
38#define SPUFS_CNTL_MAP_SIZE 0x1000
39#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE 38#define SPUFS_SIGNAL_MAP_SIZE PAGE_SIZE
40#define SPUFS_MSS_MAP_SIZE 0x1000 39#define SPUFS_MSS_MAP_SIZE 0x1000
41 40
diff --git a/arch/powerpc/platforms/powernv/Kconfig b/arch/powerpc/platforms/powernv/Kconfig
index c252ee95bddf..45a8ed0585cd 100644
--- a/arch/powerpc/platforms/powernv/Kconfig
+++ b/arch/powerpc/platforms/powernv/Kconfig
@@ -17,6 +17,7 @@ config PPC_POWERNV
17 select CPU_FREQ_GOV_USERSPACE 17 select CPU_FREQ_GOV_USERSPACE
18 select CPU_FREQ_GOV_ONDEMAND 18 select CPU_FREQ_GOV_ONDEMAND
19 select CPU_FREQ_GOV_CONSERVATIVE 19 select CPU_FREQ_GOV_CONSERVATIVE
20 select PPC_DOORBELL
20 default y 21 default y
21 22
22config PPC_POWERNV_RTAS 23config PPC_POWERNV_RTAS
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index 4ad0d345bc96..d55891f89a2c 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,9 +1,9 @@
1obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o 1obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
2obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o 2obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
3obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o 3obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
4obj-y += opal-msglog.o subcore.o subcore-asm.o 4obj-y += opal-msglog.o
5 5
6obj-$(CONFIG_SMP) += smp.o 6obj-$(CONFIG_SMP) += smp.o subcore.o subcore-asm.o
7obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o 7obj-$(CONFIG_PCI) += pci.o pci-p5ioc2.o pci-ioda.o
8obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o 8obj-$(CONFIG_EEH) += eeh-ioda.o eeh-powernv.o
9obj-$(CONFIG_PPC_SCOM) += opal-xscom.o 9obj-$(CONFIG_PPC_SCOM) += opal-xscom.o
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 753f08e36dfa..8ad0c5b891f4 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -267,7 +267,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
267{ 267{
268 s64 ret = 0; 268 s64 ret = 0;
269 u8 fstate; 269 u8 fstate;
270 u16 pcierr; 270 __be16 pcierr;
271 u32 pe_no; 271 u32 pe_no;
272 int result; 272 int result;
273 struct pci_controller *hose = pe->phb; 273 struct pci_controller *hose = pe->phb;
@@ -316,7 +316,7 @@ static int ioda_eeh_get_state(struct eeh_pe *pe)
316 result = 0; 316 result = 0;
317 result &= ~EEH_STATE_RESET_ACTIVE; 317 result &= ~EEH_STATE_RESET_ACTIVE;
318 318
319 if (pcierr != OPAL_EEH_PHB_ERROR) { 319 if (be16_to_cpu(pcierr) != OPAL_EEH_PHB_ERROR) {
320 result |= EEH_STATE_MMIO_ACTIVE; 320 result |= EEH_STATE_MMIO_ACTIVE;
321 result |= EEH_STATE_DMA_ACTIVE; 321 result |= EEH_STATE_DMA_ACTIVE;
322 result |= EEH_STATE_MMIO_ENABLED; 322 result |= EEH_STATE_MMIO_ENABLED;
@@ -705,18 +705,19 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
705{ 705{
706 struct pci_controller *hose; 706 struct pci_controller *hose;
707 struct pnv_phb *phb; 707 struct pnv_phb *phb;
708 struct eeh_pe *phb_pe; 708 struct eeh_pe *phb_pe, *parent_pe;
709 u64 frozen_pe_no; 709 __be64 frozen_pe_no;
710 u16 err_type, severity; 710 __be16 err_type, severity;
711 int active_flags = (EEH_STATE_MMIO_ACTIVE | EEH_STATE_DMA_ACTIVE);
711 long rc; 712 long rc;
712 int ret = EEH_NEXT_ERR_NONE; 713 int state, ret = EEH_NEXT_ERR_NONE;
713 714
714 /* 715 /*
715 * While running here, it's safe to purge the event queue. 716 * While running here, it's safe to purge the event queue.
716 * And we should keep the cached OPAL notifier event sychronized 717 * And we should keep the cached OPAL notifier event sychronized
717 * between the kernel and firmware. 718 * between the kernel and firmware.
718 */ 719 */
719 eeh_remove_event(NULL); 720 eeh_remove_event(NULL, false);
720 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul); 721 opal_notifier_update_evt(OPAL_EVENT_PCI_ERROR, 0x0ul);
721 722
722 list_for_each_entry(hose, &hose_list, list_node) { 723 list_for_each_entry(hose, &hose_list, list_node) {
@@ -742,8 +743,8 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
742 } 743 }
743 744
744 /* If the PHB doesn't have error, stop processing */ 745 /* If the PHB doesn't have error, stop processing */
745 if (err_type == OPAL_EEH_NO_ERROR || 746 if (be16_to_cpu(err_type) == OPAL_EEH_NO_ERROR ||
746 severity == OPAL_EEH_SEV_NO_ERROR) { 747 be16_to_cpu(severity) == OPAL_EEH_SEV_NO_ERROR) {
747 pr_devel("%s: No error found on PHB#%x\n", 748 pr_devel("%s: No error found on PHB#%x\n",
748 __func__, hose->global_number); 749 __func__, hose->global_number);
749 continue; 750 continue;
@@ -755,14 +756,14 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
755 * specific PHB. 756 * specific PHB.
756 */ 757 */
757 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n", 758 pr_devel("%s: Error (%d, %d, %llu) on PHB#%x\n",
758 __func__, err_type, severity, 759 __func__, be16_to_cpu(err_type), be16_to_cpu(severity),
759 frozen_pe_no, hose->global_number); 760 be64_to_cpu(frozen_pe_no), hose->global_number);
760 switch (err_type) { 761 switch (be16_to_cpu(err_type)) {
761 case OPAL_EEH_IOC_ERROR: 762 case OPAL_EEH_IOC_ERROR:
762 if (severity == OPAL_EEH_SEV_IOC_DEAD) { 763 if (be16_to_cpu(severity) == OPAL_EEH_SEV_IOC_DEAD) {
763 pr_err("EEH: dead IOC detected\n"); 764 pr_err("EEH: dead IOC detected\n");
764 ret = EEH_NEXT_ERR_DEAD_IOC; 765 ret = EEH_NEXT_ERR_DEAD_IOC;
765 } else if (severity == OPAL_EEH_SEV_INF) { 766 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
766 pr_info("EEH: IOC informative error " 767 pr_info("EEH: IOC informative error "
767 "detected\n"); 768 "detected\n");
768 ioda_eeh_hub_diag(hose); 769 ioda_eeh_hub_diag(hose);
@@ -771,20 +772,26 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
771 772
772 break; 773 break;
773 case OPAL_EEH_PHB_ERROR: 774 case OPAL_EEH_PHB_ERROR:
774 if (severity == OPAL_EEH_SEV_PHB_DEAD) { 775 if (be16_to_cpu(severity) == OPAL_EEH_SEV_PHB_DEAD) {
775 *pe = phb_pe; 776 *pe = phb_pe;
776 pr_err("EEH: dead PHB#%x detected\n", 777 pr_err("EEH: dead PHB#%x detected, "
777 hose->global_number); 778 "location: %s\n",
779 hose->global_number,
780 eeh_pe_loc_get(phb_pe));
778 ret = EEH_NEXT_ERR_DEAD_PHB; 781 ret = EEH_NEXT_ERR_DEAD_PHB;
779 } else if (severity == OPAL_EEH_SEV_PHB_FENCED) { 782 } else if (be16_to_cpu(severity) ==
783 OPAL_EEH_SEV_PHB_FENCED) {
780 *pe = phb_pe; 784 *pe = phb_pe;
781 pr_err("EEH: fenced PHB#%x detected\n", 785 pr_err("EEH: Fenced PHB#%x detected, "
782 hose->global_number); 786 "location: %s\n",
787 hose->global_number,
788 eeh_pe_loc_get(phb_pe));
783 ret = EEH_NEXT_ERR_FENCED_PHB; 789 ret = EEH_NEXT_ERR_FENCED_PHB;
784 } else if (severity == OPAL_EEH_SEV_INF) { 790 } else if (be16_to_cpu(severity) == OPAL_EEH_SEV_INF) {
785 pr_info("EEH: PHB#%x informative error " 791 pr_info("EEH: PHB#%x informative error "
786 "detected\n", 792 "detected, location: %s\n",
787 hose->global_number); 793 hose->global_number,
794 eeh_pe_loc_get(phb_pe));
788 ioda_eeh_phb_diag(hose); 795 ioda_eeh_phb_diag(hose);
789 ret = EEH_NEXT_ERR_NONE; 796 ret = EEH_NEXT_ERR_NONE;
790 } 797 }
@@ -792,34 +799,33 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
792 break; 799 break;
793 case OPAL_EEH_PE_ERROR: 800 case OPAL_EEH_PE_ERROR:
794 /* 801 /*
795 * If we can't find the corresponding PE, the 802 * If we can't find the corresponding PE, we
796 * PEEV / PEST would be messy. So we force an 803 * just try to unfreeze.
797 * fenced PHB so that it can be recovered.
798 *
799 * If the PE has been marked as isolated, that
800 * should have been removed permanently or in
801 * progress with recovery. We needn't report
802 * it again.
803 */ 804 */
804 if (ioda_eeh_get_pe(hose, frozen_pe_no, pe)) { 805 if (ioda_eeh_get_pe(hose,
805 *pe = phb_pe; 806 be64_to_cpu(frozen_pe_no), pe)) {
806 pr_err("EEH: Escalated fenced PHB#%x " 807 /* Try best to clear it */
807 "detected for PE#%llx\n", 808 pr_info("EEH: Clear non-existing PHB#%x-PE#%llx\n",
808 hose->global_number, 809 hose->global_number, frozen_pe_no);
809 frozen_pe_no); 810 pr_info("EEH: PHB location: %s\n",
810 ret = EEH_NEXT_ERR_FENCED_PHB; 811 eeh_pe_loc_get(phb_pe));
812 opal_pci_eeh_freeze_clear(phb->opal_id, frozen_pe_no,
813 OPAL_EEH_ACTION_CLEAR_FREEZE_ALL);
814 ret = EEH_NEXT_ERR_NONE;
811 } else if ((*pe)->state & EEH_PE_ISOLATED) { 815 } else if ((*pe)->state & EEH_PE_ISOLATED) {
812 ret = EEH_NEXT_ERR_NONE; 816 ret = EEH_NEXT_ERR_NONE;
813 } else { 817 } else {
814 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n", 818 pr_err("EEH: Frozen PE#%x on PHB#%x detected\n",
815 (*pe)->addr, (*pe)->phb->global_number); 819 (*pe)->addr, (*pe)->phb->global_number);
820 pr_err("EEH: PE location: %s, PHB location: %s\n",
821 eeh_pe_loc_get(*pe), eeh_pe_loc_get(phb_pe));
816 ret = EEH_NEXT_ERR_FROZEN_PE; 822 ret = EEH_NEXT_ERR_FROZEN_PE;
817 } 823 }
818 824
819 break; 825 break;
820 default: 826 default:
821 pr_warn("%s: Unexpected error type %d\n", 827 pr_warn("%s: Unexpected error type %d\n",
822 __func__, err_type); 828 __func__, be16_to_cpu(err_type));
823 } 829 }
824 830
825 /* 831 /*
@@ -837,6 +843,31 @@ static int ioda_eeh_next_error(struct eeh_pe **pe)
837 } 843 }
838 844
839 /* 845 /*
846 * We probably have the frozen parent PE out there and
847 * we need have to handle frozen parent PE firstly.
848 */
849 if (ret == EEH_NEXT_ERR_FROZEN_PE) {
850 parent_pe = (*pe)->parent;
851 while (parent_pe) {
852 /* Hit the ceiling ? */
853 if (parent_pe->type & EEH_PE_PHB)
854 break;
855
856 /* Frozen parent PE ? */
857 state = ioda_eeh_get_state(parent_pe);
858 if (state > 0 &&
859 (state & active_flags) != active_flags)
860 *pe = parent_pe;
861
862 /* Next parent level */
863 parent_pe = parent_pe->parent;
864 }
865
866 /* We possibly migrate to another PE */
867 eeh_pe_state_mark(*pe, EEH_PE_ISOLATED);
868 }
869
870 /*
840 * If we have no errors on the specific PHB or only 871 * If we have no errors on the specific PHB or only
841 * informative error there, we continue poking it. 872 * informative error there, we continue poking it.
842 * Otherwise, we need actions to be taken by upper 873 * Otherwise, we need actions to be taken by upper
diff --git a/arch/powerpc/platforms/powernv/opal-msglog.c b/arch/powerpc/platforms/powernv/opal-msglog.c
index 1bb25b952504..44ed78af1a0d 100644
--- a/arch/powerpc/platforms/powernv/opal-msglog.c
+++ b/arch/powerpc/platforms/powernv/opal-msglog.c
@@ -37,7 +37,8 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
37{ 37{
38 struct memcons *mc = bin_attr->private; 38 struct memcons *mc = bin_attr->private;
39 const char *conbuf; 39 const char *conbuf;
40 size_t ret, first_read = 0; 40 ssize_t ret;
41 size_t first_read = 0;
41 uint32_t out_pos, avail; 42 uint32_t out_pos, avail;
42 43
43 if (!mc) 44 if (!mc)
@@ -69,6 +70,9 @@ static ssize_t opal_msglog_read(struct file *file, struct kobject *kobj,
69 to += first_read; 70 to += first_read;
70 count -= first_read; 71 count -= first_read;
71 pos -= avail; 72 pos -= avail;
73
74 if (count <= 0)
75 goto out;
72 } 76 }
73 77
74 /* Sanity check. The firmware should not do this to us. */ 78 /* Sanity check. The firmware should not do this to us. */
diff --git a/arch/powerpc/platforms/powernv/opal-sysparam.c b/arch/powerpc/platforms/powernv/opal-sysparam.c
index d202f9bc3683..9d1acf22a099 100644
--- a/arch/powerpc/platforms/powernv/opal-sysparam.c
+++ b/arch/powerpc/platforms/powernv/opal-sysparam.c
@@ -260,10 +260,10 @@ void __init opal_sys_param_init(void)
260 attr[i].kobj_attr.attr.mode = S_IRUGO; 260 attr[i].kobj_attr.attr.mode = S_IRUGO;
261 break; 261 break;
262 case OPAL_SYSPARAM_WRITE: 262 case OPAL_SYSPARAM_WRITE:
263 attr[i].kobj_attr.attr.mode = S_IWUGO; 263 attr[i].kobj_attr.attr.mode = S_IWUSR;
264 break; 264 break;
265 case OPAL_SYSPARAM_RW: 265 case OPAL_SYSPARAM_RW:
266 attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUGO; 266 attr[i].kobj_attr.attr.mode = S_IRUGO | S_IWUSR;
267 break; 267 break;
268 default: 268 default:
269 break; 269 break;
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index eefbfcc3fd8c..f91a4e5d872e 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -206,72 +206,91 @@ static void pnv_pci_dump_phb3_diag_data(struct pci_controller *hose,
206 206
207 data = (struct OpalIoPhb3ErrorData*)common; 207 data = (struct OpalIoPhb3ErrorData*)common;
208 pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n", 208 pr_info("PHB3 PHB#%d Diag-data (Version: %d)\n",
209 hose->global_number, common->version); 209 hose->global_number, be32_to_cpu(common->version));
210 if (data->brdgCtl) 210 if (data->brdgCtl)
211 pr_info("brdgCtl: %08x\n", 211 pr_info("brdgCtl: %08x\n",
212 data->brdgCtl); 212 be32_to_cpu(data->brdgCtl));
213 if (data->portStatusReg || data->rootCmplxStatus || 213 if (data->portStatusReg || data->rootCmplxStatus ||
214 data->busAgentStatus) 214 data->busAgentStatus)
215 pr_info("UtlSts: %08x %08x %08x\n", 215 pr_info("UtlSts: %08x %08x %08x\n",
216 data->portStatusReg, data->rootCmplxStatus, 216 be32_to_cpu(data->portStatusReg),
217 data->busAgentStatus); 217 be32_to_cpu(data->rootCmplxStatus),
218 be32_to_cpu(data->busAgentStatus));
218 if (data->deviceStatus || data->slotStatus || 219 if (data->deviceStatus || data->slotStatus ||
219 data->linkStatus || data->devCmdStatus || 220 data->linkStatus || data->devCmdStatus ||
220 data->devSecStatus) 221 data->devSecStatus)
221 pr_info("RootSts: %08x %08x %08x %08x %08x\n", 222 pr_info("RootSts: %08x %08x %08x %08x %08x\n",
222 data->deviceStatus, data->slotStatus, 223 be32_to_cpu(data->deviceStatus),
223 data->linkStatus, data->devCmdStatus, 224 be32_to_cpu(data->slotStatus),
224 data->devSecStatus); 225 be32_to_cpu(data->linkStatus),
226 be32_to_cpu(data->devCmdStatus),
227 be32_to_cpu(data->devSecStatus));
225 if (data->rootErrorStatus || data->uncorrErrorStatus || 228 if (data->rootErrorStatus || data->uncorrErrorStatus ||
226 data->corrErrorStatus) 229 data->corrErrorStatus)
227 pr_info("RootErrSts: %08x %08x %08x\n", 230 pr_info("RootErrSts: %08x %08x %08x\n",
228 data->rootErrorStatus, data->uncorrErrorStatus, 231 be32_to_cpu(data->rootErrorStatus),
229 data->corrErrorStatus); 232 be32_to_cpu(data->uncorrErrorStatus),
233 be32_to_cpu(data->corrErrorStatus));
230 if (data->tlpHdr1 || data->tlpHdr2 || 234 if (data->tlpHdr1 || data->tlpHdr2 ||
231 data->tlpHdr3 || data->tlpHdr4) 235 data->tlpHdr3 || data->tlpHdr4)
232 pr_info("RootErrLog: %08x %08x %08x %08x\n", 236 pr_info("RootErrLog: %08x %08x %08x %08x\n",
233 data->tlpHdr1, data->tlpHdr2, 237 be32_to_cpu(data->tlpHdr1),
234 data->tlpHdr3, data->tlpHdr4); 238 be32_to_cpu(data->tlpHdr2),
239 be32_to_cpu(data->tlpHdr3),
240 be32_to_cpu(data->tlpHdr4));
235 if (data->sourceId || data->errorClass || 241 if (data->sourceId || data->errorClass ||
236 data->correlator) 242 data->correlator)
237 pr_info("RootErrLog1: %08x %016llx %016llx\n", 243 pr_info("RootErrLog1: %08x %016llx %016llx\n",
238 data->sourceId, data->errorClass, 244 be32_to_cpu(data->sourceId),
239 data->correlator); 245 be64_to_cpu(data->errorClass),
246 be64_to_cpu(data->correlator));
240 if (data->nFir) 247 if (data->nFir)
241 pr_info("nFir: %016llx %016llx %016llx\n", 248 pr_info("nFir: %016llx %016llx %016llx\n",
242 data->nFir, data->nFirMask, 249 be64_to_cpu(data->nFir),
243 data->nFirWOF); 250 be64_to_cpu(data->nFirMask),
251 be64_to_cpu(data->nFirWOF));
244 if (data->phbPlssr || data->phbCsr) 252 if (data->phbPlssr || data->phbCsr)
245 pr_info("PhbSts: %016llx %016llx\n", 253 pr_info("PhbSts: %016llx %016llx\n",
246 data->phbPlssr, data->phbCsr); 254 be64_to_cpu(data->phbPlssr),
255 be64_to_cpu(data->phbCsr));
247 if (data->lemFir) 256 if (data->lemFir)
248 pr_info("Lem: %016llx %016llx %016llx\n", 257 pr_info("Lem: %016llx %016llx %016llx\n",
249 data->lemFir, data->lemErrorMask, 258 be64_to_cpu(data->lemFir),
250 data->lemWOF); 259 be64_to_cpu(data->lemErrorMask),
260 be64_to_cpu(data->lemWOF));
251 if (data->phbErrorStatus) 261 if (data->phbErrorStatus)
252 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n", 262 pr_info("PhbErr: %016llx %016llx %016llx %016llx\n",
253 data->phbErrorStatus, data->phbFirstErrorStatus, 263 be64_to_cpu(data->phbErrorStatus),
254 data->phbErrorLog0, data->phbErrorLog1); 264 be64_to_cpu(data->phbFirstErrorStatus),
265 be64_to_cpu(data->phbErrorLog0),
266 be64_to_cpu(data->phbErrorLog1));
255 if (data->mmioErrorStatus) 267 if (data->mmioErrorStatus)
256 pr_info("OutErr: %016llx %016llx %016llx %016llx\n", 268 pr_info("OutErr: %016llx %016llx %016llx %016llx\n",
257 data->mmioErrorStatus, data->mmioFirstErrorStatus, 269 be64_to_cpu(data->mmioErrorStatus),
258 data->mmioErrorLog0, data->mmioErrorLog1); 270 be64_to_cpu(data->mmioFirstErrorStatus),
271 be64_to_cpu(data->mmioErrorLog0),
272 be64_to_cpu(data->mmioErrorLog1));
259 if (data->dma0ErrorStatus) 273 if (data->dma0ErrorStatus)
260 pr_info("InAErr: %016llx %016llx %016llx %016llx\n", 274 pr_info("InAErr: %016llx %016llx %016llx %016llx\n",
261 data->dma0ErrorStatus, data->dma0FirstErrorStatus, 275 be64_to_cpu(data->dma0ErrorStatus),
262 data->dma0ErrorLog0, data->dma0ErrorLog1); 276 be64_to_cpu(data->dma0FirstErrorStatus),
277 be64_to_cpu(data->dma0ErrorLog0),
278 be64_to_cpu(data->dma0ErrorLog1));
263 if (data->dma1ErrorStatus) 279 if (data->dma1ErrorStatus)
264 pr_info("InBErr: %016llx %016llx %016llx %016llx\n", 280 pr_info("InBErr: %016llx %016llx %016llx %016llx\n",
265 data->dma1ErrorStatus, data->dma1FirstErrorStatus, 281 be64_to_cpu(data->dma1ErrorStatus),
266 data->dma1ErrorLog0, data->dma1ErrorLog1); 282 be64_to_cpu(data->dma1FirstErrorStatus),
283 be64_to_cpu(data->dma1ErrorLog0),
284 be64_to_cpu(data->dma1ErrorLog1));
267 285
268 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) { 286 for (i = 0; i < OPAL_PHB3_NUM_PEST_REGS; i++) {
269 if ((data->pestA[i] >> 63) == 0 && 287 if ((be64_to_cpu(data->pestA[i]) >> 63) == 0 &&
270 (data->pestB[i] >> 63) == 0) 288 (be64_to_cpu(data->pestB[i]) >> 63) == 0)
271 continue; 289 continue;
272 290
273 pr_info("PE[%3d] A/B: %016llx %016llx\n", 291 pr_info("PE[%3d] A/B: %016llx %016llx\n",
274 i, data->pestA[i], data->pestB[i]); 292 i, be64_to_cpu(data->pestA[i]),
293 be64_to_cpu(data->pestB[i]));
275 } 294 }
276} 295}
277 296
@@ -284,7 +303,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
284 return; 303 return;
285 304
286 common = (struct OpalIoPhbErrorCommon *)log_buff; 305 common = (struct OpalIoPhbErrorCommon *)log_buff;
287 switch (common->ioType) { 306 switch (be32_to_cpu(common->ioType)) {
288 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC: 307 case OPAL_PHB_ERROR_DATA_TYPE_P7IOC:
289 pnv_pci_dump_p7ioc_diag_data(hose, common); 308 pnv_pci_dump_p7ioc_diag_data(hose, common);
290 break; 309 break;
@@ -293,7 +312,7 @@ void pnv_pci_dump_phb_diag_data(struct pci_controller *hose,
293 break; 312 break;
294 default: 313 default:
295 pr_warn("%s: Unrecognized ioType %d\n", 314 pr_warn("%s: Unrecognized ioType %d\n",
296 __func__, common->ioType); 315 __func__, be32_to_cpu(common->ioType));
297 } 316 }
298} 317}
299 318
diff --git a/arch/powerpc/platforms/powernv/setup.c b/arch/powerpc/platforms/powernv/setup.c
index 8c16a5f96728..d9b88fa7c5a3 100644
--- a/arch/powerpc/platforms/powernv/setup.c
+++ b/arch/powerpc/platforms/powernv/setup.c
@@ -35,11 +35,14 @@
35#include <asm/rtas.h> 35#include <asm/rtas.h>
36#include <asm/opal.h> 36#include <asm/opal.h>
37#include <asm/kexec.h> 37#include <asm/kexec.h>
38#include <asm/smp.h>
38 39
39#include "powernv.h" 40#include "powernv.h"
40 41
41static void __init pnv_setup_arch(void) 42static void __init pnv_setup_arch(void)
42{ 43{
44 set_arch_panic_timeout(10, ARCH_PANIC_TIMEOUT);
45
43 /* Initialize SMP */ 46 /* Initialize SMP */
44 pnv_smp_init(); 47 pnv_smp_init();
45 48
diff --git a/arch/powerpc/platforms/powernv/smp.c b/arch/powerpc/platforms/powernv/smp.c
index 0062a43a2e0d..5fcfcf44e3a9 100644
--- a/arch/powerpc/platforms/powernv/smp.c
+++ b/arch/powerpc/platforms/powernv/smp.c
@@ -32,6 +32,7 @@
32#include <asm/opal.h> 32#include <asm/opal.h>
33#include <asm/runlatch.h> 33#include <asm/runlatch.h>
34#include <asm/code-patching.h> 34#include <asm/code-patching.h>
35#include <asm/dbell.h>
35 36
36#include "powernv.h" 37#include "powernv.h"
37 38
@@ -46,6 +47,11 @@ static void pnv_smp_setup_cpu(int cpu)
46{ 47{
47 if (cpu != boot_cpuid) 48 if (cpu != boot_cpuid)
48 xics_setup_cpu(); 49 xics_setup_cpu();
50
51#ifdef CONFIG_PPC_DOORBELL
52 if (cpu_has_feature(CPU_FTR_DBELL))
53 doorbell_setup_this_cpu();
54#endif
49} 55}
50 56
51int pnv_smp_kick_cpu(int nr) 57int pnv_smp_kick_cpu(int nr)
diff --git a/arch/powerpc/platforms/pseries/Kconfig b/arch/powerpc/platforms/pseries/Kconfig
index 2cb8b776c84a..756b482f819a 100644
--- a/arch/powerpc/platforms/pseries/Kconfig
+++ b/arch/powerpc/platforms/pseries/Kconfig
@@ -21,6 +21,7 @@ config PPC_PSERIES
21 select HAVE_CONTEXT_TRACKING 21 select HAVE_CONTEXT_TRACKING
22 select HOTPLUG_CPU if SMP 22 select HOTPLUG_CPU if SMP
23 select ARCH_RANDOM 23 select ARCH_RANDOM
24 select PPC_DOORBELL
24 default y 25 default y
25 26
26config PPC_SPLPAR 27config PPC_SPLPAR
diff --git a/arch/powerpc/platforms/wsp/Kconfig b/arch/powerpc/platforms/wsp/Kconfig
deleted file mode 100644
index 422a175b10ee..000000000000
--- a/arch/powerpc/platforms/wsp/Kconfig
+++ /dev/null
@@ -1,30 +0,0 @@
1config PPC_WSP
2 bool
3 select PPC_A2
4 select GENERIC_TBSYNC
5 select PPC_ICSWX
6 select PPC_SCOM
7 select PPC_XICS
8 select PPC_ICP_NATIVE
9 select PCI
10 select PPC_IO_WORKAROUNDS if PCI
11 select PPC_INDIRECT_PIO if PCI
12 default n
13
14menu "WSP platform selection"
15 depends on PPC_BOOK3E_64
16
17config PPC_PSR2
18 bool "PowerEN System Reference Platform 2"
19 select EPAPR_BOOT
20 select PPC_WSP
21 default y
22
23config PPC_CHROMA
24 bool "PowerEN PCIe Chroma Card"
25 select EPAPR_BOOT
26 select PPC_WSP
27 select OF_DYNAMIC
28 default y
29
30endmenu
diff --git a/arch/powerpc/platforms/wsp/Makefile b/arch/powerpc/platforms/wsp/Makefile
deleted file mode 100644
index 162fc60125a2..000000000000
--- a/arch/powerpc/platforms/wsp/Makefile
+++ /dev/null
@@ -1,10 +0,0 @@
1ccflags-y += $(NO_MINIMAL_TOC)
2
3obj-y += setup.o ics.o wsp.o
4obj-$(CONFIG_PPC_PSR2) += psr2.o
5obj-$(CONFIG_PPC_CHROMA) += chroma.o h8.o
6obj-$(CONFIG_PPC_WSP) += opb_pic.o
7obj-$(CONFIG_PPC_WSP) += scom_wsp.o
8obj-$(CONFIG_SMP) += smp.o scom_smp.o
9obj-$(CONFIG_PCI) += wsp_pci.o
10obj-$(CONFIG_PCI_MSI) += msi.o
diff --git a/arch/powerpc/platforms/wsp/chroma.c b/arch/powerpc/platforms/wsp/chroma.c
deleted file mode 100644
index aaa46b353715..000000000000
--- a/arch/powerpc/platforms/wsp/chroma.c
+++ /dev/null
@@ -1,56 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/of.h>
16#include <linux/smp.h>
17#include <linux/time.h>
18#include <linux/of_fdt.h>
19
20#include <asm/machdep.h>
21#include <asm/udbg.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26void __init chroma_setup_arch(void)
27{
28 wsp_setup_arch();
29 wsp_setup_h8();
30
31}
32
33static int __init chroma_probe(void)
34{
35 unsigned long root = of_get_flat_dt_root();
36
37 if (!of_flat_dt_is_compatible(root, "ibm,wsp-chroma"))
38 return 0;
39
40 return 1;
41}
42
43define_machine(chroma_md) {
44 .name = "Chroma PCIe",
45 .probe = chroma_probe,
46 .setup_arch = chroma_setup_arch,
47 .restart = wsp_h8_restart,
48 .power_off = wsp_h8_power_off,
49 .halt = wsp_halt,
50 .calibrate_decr = generic_calibrate_decr,
51 .init_IRQ = wsp_setup_irq,
52 .progress = udbg_progress,
53 .power_save = book3e_idle,
54};
55
56machine_arch_initcall(chroma_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/h8.c b/arch/powerpc/platforms/wsp/h8.c
deleted file mode 100644
index a3c87f395750..000000000000
--- a/arch/powerpc/platforms/wsp/h8.c
+++ /dev/null
@@ -1,135 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/io.h>
13#include <linux/of_address.h>
14
15#include "wsp.h"
16
17/*
18 * The UART connection to the H8 is over ttyS1 which is just a 16550.
19 * We assume that FW has it setup right and no one messes with it.
20 */
21
22
23static u8 __iomem *h8;
24
25#define RBR 0 /* Receiver Buffer Register */
26#define THR 0 /* Transmitter Holding Register */
27#define LSR 5 /* Line Status Register */
28#define LSR_DR 0x01 /* LSR value for Data-Ready */
29#define LSR_THRE 0x20 /* LSR value for Transmitter-Holding-Register-Empty */
30static void wsp_h8_putc(int c)
31{
32 u8 lsr;
33
34 do {
35 lsr = readb(h8 + LSR);
36 } while ((lsr & LSR_THRE) != LSR_THRE);
37 writeb(c, h8 + THR);
38}
39
40static int wsp_h8_getc(void)
41{
42 u8 lsr;
43
44 do {
45 lsr = readb(h8 + LSR);
46 } while ((lsr & LSR_DR) != LSR_DR);
47
48 return readb(h8 + RBR);
49}
50
51static void wsp_h8_puts(const char *s, int sz)
52{
53 int i;
54
55 for (i = 0; i < sz; i++) {
56 wsp_h8_putc(s[i]);
57
58 /* no flow control so wait for echo */
59 wsp_h8_getc();
60 }
61 wsp_h8_putc('\r');
62 wsp_h8_putc('\n');
63}
64
65static void wsp_h8_terminal_cmd(const char *cmd, int sz)
66{
67 hard_irq_disable();
68 wsp_h8_puts(cmd, sz);
69 /* should never return, but just in case */
70 for (;;)
71 continue;
72}
73
74
75void wsp_h8_restart(char *cmd)
76{
77 static const char restart[] = "warm-reset";
78
79 (void)cmd;
80 wsp_h8_terminal_cmd(restart, sizeof(restart) - 1);
81}
82
83void wsp_h8_power_off(void)
84{
85 static const char off[] = "power-off";
86
87 wsp_h8_terminal_cmd(off, sizeof(off) - 1);
88}
89
90static void __iomem *wsp_h8_getaddr(void)
91{
92 struct device_node *aliases;
93 struct device_node *uart;
94 struct property *path;
95 void __iomem *va = NULL;
96
97 /*
98 * there is nothing in the devtree to tell us which is mapped
99 * to the H8, but se know it is the second serial port.
100 */
101
102 aliases = of_find_node_by_path("/aliases");
103 if (aliases == NULL)
104 return NULL;
105
106 path = of_find_property(aliases, "serial1", NULL);
107 if (path == NULL)
108 goto out;
109
110 uart = of_find_node_by_path(path->value);
111 if (uart == NULL)
112 goto out;
113
114 va = of_iomap(uart, 0);
115
116 /* remove it so no one messes with it */
117 of_detach_node(uart);
118 of_node_put(uart);
119
120out:
121 of_node_put(aliases);
122
123 return va;
124}
125
126void __init wsp_setup_h8(void)
127{
128 h8 = wsp_h8_getaddr();
129
130 /* Devtree change? lets hard map it anyway */
131 if (h8 == NULL) {
132 pr_warn("UART to H8 could not be found");
133 h8 = ioremap(0xffc0008000ULL, 0x100);
134 }
135}
diff --git a/arch/powerpc/platforms/wsp/ics.c b/arch/powerpc/platforms/wsp/ics.c
deleted file mode 100644
index 9cd92e645028..000000000000
--- a/arch/powerpc/platforms/wsp/ics.c
+++ /dev/null
@@ -1,762 +0,0 @@
1/*
2 * Copyright 2008-2011 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/cpu.h>
11#include <linux/init.h>
12#include <linux/interrupt.h>
13#include <linux/irq.h>
14#include <linux/kernel.h>
15#include <linux/msi.h>
16#include <linux/of.h>
17#include <linux/slab.h>
18#include <linux/smp.h>
19#include <linux/spinlock.h>
20#include <linux/types.h>
21#include <linux/of_address.h>
22#include <linux/of_irq.h>
23
24#include <asm/io.h>
25#include <asm/irq.h>
26#include <asm/xics.h>
27
28#include "wsp.h"
29#include "ics.h"
30
31
32/* WSP ICS */
33
34struct wsp_ics {
35 struct ics ics;
36 struct device_node *dn;
37 void __iomem *regs;
38 spinlock_t lock;
39 unsigned long *bitmap;
40 u32 chip_id;
41 u32 lsi_base;
42 u32 lsi_count;
43 u64 hwirq_start;
44 u64 count;
45#ifdef CONFIG_SMP
46 int *hwirq_cpu_map;
47#endif
48};
49
50#define to_wsp_ics(ics) container_of(ics, struct wsp_ics, ics)
51
52#define INT_SRC_LAYER_BUID_REG(base) ((base) + 0x00)
53#define IODA_TBL_ADDR_REG(base) ((base) + 0x18)
54#define IODA_TBL_DATA_REG(base) ((base) + 0x20)
55#define XIVE_UPDATE_REG(base) ((base) + 0x28)
56#define ICS_INT_CAPS_REG(base) ((base) + 0x30)
57
58#define TBL_AUTO_INCREMENT ((1UL << 63) | (1UL << 15))
59#define TBL_SELECT_XIST (1UL << 48)
60#define TBL_SELECT_XIVT (1UL << 49)
61
62#define IODA_IRQ(irq) ((irq) & (0x7FFULL)) /* HRM 5.1.3.4 */
63
64#define XIST_REQUIRED 0x8
65#define XIST_REJECTED 0x4
66#define XIST_PRESENTED 0x2
67#define XIST_PENDING 0x1
68
69#define XIVE_SERVER_SHIFT 42
70#define XIVE_SERVER_MASK 0xFFFFULL
71#define XIVE_PRIORITY_MASK 0xFFULL
72#define XIVE_PRIORITY_SHIFT 32
73#define XIVE_WRITE_ENABLE (1ULL << 63)
74
75/*
76 * The docs refer to a 6 bit field called ChipID, which consists of a
77 * 3 bit NodeID and a 3 bit ChipID. On WSP the ChipID is always zero
78 * so we ignore it, and every where we use "chip id" in this code we
79 * mean the NodeID.
80 */
81#define WSP_ICS_CHIP_SHIFT 17
82
83
84static struct wsp_ics *ics_list;
85static int num_ics;
86
87/* ICS Source controller accessors */
88
89static u64 wsp_ics_get_xive(struct wsp_ics *ics, unsigned int irq)
90{
91 unsigned long flags;
92 u64 xive;
93
94 spin_lock_irqsave(&ics->lock, flags);
95 out_be64(IODA_TBL_ADDR_REG(ics->regs), TBL_SELECT_XIVT | IODA_IRQ(irq));
96 xive = in_be64(IODA_TBL_DATA_REG(ics->regs));
97 spin_unlock_irqrestore(&ics->lock, flags);
98
99 return xive;
100}
101
102static void wsp_ics_set_xive(struct wsp_ics *ics, unsigned int irq, u64 xive)
103{
104 xive &= ~XIVE_ADDR_MASK;
105 xive |= (irq & XIVE_ADDR_MASK);
106 xive |= XIVE_WRITE_ENABLE;
107
108 out_be64(XIVE_UPDATE_REG(ics->regs), xive);
109}
110
111static u64 xive_set_server(u64 xive, unsigned int server)
112{
113 u64 mask = ~(XIVE_SERVER_MASK << XIVE_SERVER_SHIFT);
114
115 xive &= mask;
116 xive |= (server & XIVE_SERVER_MASK) << XIVE_SERVER_SHIFT;
117
118 return xive;
119}
120
121static u64 xive_set_priority(u64 xive, unsigned int priority)
122{
123 u64 mask = ~(XIVE_PRIORITY_MASK << XIVE_PRIORITY_SHIFT);
124
125 xive &= mask;
126 xive |= (priority & XIVE_PRIORITY_MASK) << XIVE_PRIORITY_SHIFT;
127
128 return xive;
129}
130
131
132#ifdef CONFIG_SMP
133/* Find logical CPUs within mask on a given chip and store result in ret */
134void cpus_on_chip(int chip_id, cpumask_t *mask, cpumask_t *ret)
135{
136 int cpu, chip;
137 struct device_node *cpu_dn, *dn;
138 const u32 *prop;
139
140 cpumask_clear(ret);
141 for_each_cpu(cpu, mask) {
142 cpu_dn = of_get_cpu_node(cpu, NULL);
143 if (!cpu_dn)
144 continue;
145
146 prop = of_get_property(cpu_dn, "at-node", NULL);
147 if (!prop) {
148 of_node_put(cpu_dn);
149 continue;
150 }
151
152 dn = of_find_node_by_phandle(*prop);
153 of_node_put(cpu_dn);
154
155 chip = wsp_get_chip_id(dn);
156 if (chip == chip_id)
157 cpumask_set_cpu(cpu, ret);
158
159 of_node_put(dn);
160 }
161}
162
163/* Store a suitable CPU to handle a hwirq in the ics->hwirq_cpu_map cache */
164static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
165 const cpumask_t *affinity)
166{
167 cpumask_var_t avail, newmask;
168 int ret = -ENOMEM, cpu, cpu_rover = 0, target;
169 int index = hwirq - ics->hwirq_start;
170 unsigned int nodeid;
171
172 BUG_ON(index < 0 || index >= ics->count);
173
174 if (!ics->hwirq_cpu_map)
175 return -ENOMEM;
176
177 if (!distribute_irqs) {
178 ics->hwirq_cpu_map[hwirq - ics->hwirq_start] = xics_default_server;
179 return 0;
180 }
181
182 /* Allocate needed CPU masks */
183 if (!alloc_cpumask_var(&avail, GFP_KERNEL))
184 goto ret;
185 if (!alloc_cpumask_var(&newmask, GFP_KERNEL))
186 goto freeavail;
187
188 /* Find PBus attached to the source of this IRQ */
189 nodeid = (hwirq >> WSP_ICS_CHIP_SHIFT) & 0x3; /* 12:14 */
190
191 /* Find CPUs that could handle this IRQ */
192 if (affinity)
193 cpumask_and(avail, cpu_online_mask, affinity);
194 else
195 cpumask_copy(avail, cpu_online_mask);
196
197 /* Narrow selection down to logical CPUs on the same chip */
198 cpus_on_chip(nodeid, avail, newmask);
199
200 /* Ensure we haven't narrowed it down to 0 */
201 if (unlikely(cpumask_empty(newmask))) {
202 if (unlikely(cpumask_empty(avail))) {
203 ret = -1;
204 goto out;
205 }
206 cpumask_copy(newmask, avail);
207 }
208
209 /* Choose a CPU out of those we narrowed it down to in round robin */
210 target = hwirq % cpumask_weight(newmask);
211 for_each_cpu(cpu, newmask) {
212 if (cpu_rover++ >= target) {
213 ics->hwirq_cpu_map[index] = get_hard_smp_processor_id(cpu);
214 ret = 0;
215 goto out;
216 }
217 }
218
219 /* Shouldn't happen */
220 WARN_ON(1);
221
222out:
223 free_cpumask_var(newmask);
224freeavail:
225 free_cpumask_var(avail);
226ret:
227 if (ret < 0) {
228 ics->hwirq_cpu_map[index] = cpumask_first(cpu_online_mask);
229 pr_warning("Error, falling hwirq 0x%x routing back to CPU %i\n",
230 hwirq, ics->hwirq_cpu_map[index]);
231 }
232 return ret;
233}
234
235static void alloc_irq_map(struct wsp_ics *ics)
236{
237 int i;
238
239 ics->hwirq_cpu_map = kmalloc(sizeof(int) * ics->count, GFP_KERNEL);
240 if (!ics->hwirq_cpu_map) {
241 pr_warning("Allocate hwirq_cpu_map failed, "
242 "IRQ balancing disabled\n");
243 return;
244 }
245
246 for (i=0; i < ics->count; i++)
247 ics->hwirq_cpu_map[i] = xics_default_server;
248}
249
250static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
251{
252 int index = hwirq - ics->hwirq_start;
253
254 BUG_ON(index < 0 || index >= ics->count);
255
256 if (!ics->hwirq_cpu_map)
257 return xics_default_server;
258
259 return ics->hwirq_cpu_map[index];
260}
261#else /* !CONFIG_SMP */
262static int cache_hwirq_map(struct wsp_ics *ics, unsigned int hwirq,
263 const cpumask_t *affinity)
264{
265 return 0;
266}
267
268static int get_irq_server(struct wsp_ics *ics, unsigned int hwirq)
269{
270 return xics_default_server;
271}
272
273static void alloc_irq_map(struct wsp_ics *ics) { }
274#endif
275
276static void wsp_chip_unmask_irq(struct irq_data *d)
277{
278 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
279 struct wsp_ics *ics;
280 int server;
281 u64 xive;
282
283 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
284 return;
285
286 ics = d->chip_data;
287 if (WARN_ON(!ics))
288 return;
289
290 server = get_irq_server(ics, hw_irq);
291
292 xive = wsp_ics_get_xive(ics, hw_irq);
293 xive = xive_set_server(xive, server);
294 xive = xive_set_priority(xive, DEFAULT_PRIORITY);
295 wsp_ics_set_xive(ics, hw_irq, xive);
296}
297
298static unsigned int wsp_chip_startup(struct irq_data *d)
299{
300 /* unmask it */
301 wsp_chip_unmask_irq(d);
302 return 0;
303}
304
305static void wsp_mask_real_irq(unsigned int hw_irq, struct wsp_ics *ics)
306{
307 u64 xive;
308
309 if (hw_irq == XICS_IPI)
310 return;
311
312 if (WARN_ON(!ics))
313 return;
314 xive = wsp_ics_get_xive(ics, hw_irq);
315 xive = xive_set_server(xive, xics_default_server);
316 xive = xive_set_priority(xive, LOWEST_PRIORITY);
317 wsp_ics_set_xive(ics, hw_irq, xive);
318}
319
320static void wsp_chip_mask_irq(struct irq_data *d)
321{
322 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
323 struct wsp_ics *ics = d->chip_data;
324
325 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
326 return;
327
328 wsp_mask_real_irq(hw_irq, ics);
329}
330
331static int wsp_chip_set_affinity(struct irq_data *d,
332 const struct cpumask *cpumask, bool force)
333{
334 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
335 struct wsp_ics *ics;
336 int ret;
337 u64 xive;
338
339 if (hw_irq == XICS_IPI || hw_irq == XICS_IRQ_SPURIOUS)
340 return -1;
341
342 ics = d->chip_data;
343 if (WARN_ON(!ics))
344 return -1;
345 xive = wsp_ics_get_xive(ics, hw_irq);
346
347 /*
348 * For the moment only implement delivery to all cpus or one cpu.
349 * Get current irq_server for the given irq
350 */
351 ret = cache_hwirq_map(ics, hw_irq, cpumask);
352 if (ret == -1) {
353 char cpulist[128];
354 cpumask_scnprintf(cpulist, sizeof(cpulist), cpumask);
355 pr_warning("%s: No online cpus in the mask %s for irq %d\n",
356 __func__, cpulist, d->irq);
357 return -1;
358 } else if (ret == -ENOMEM) {
359 pr_warning("%s: Out of memory\n", __func__);
360 return -1;
361 }
362
363 xive = xive_set_server(xive, get_irq_server(ics, hw_irq));
364 wsp_ics_set_xive(ics, hw_irq, xive);
365
366 return IRQ_SET_MASK_OK;
367}
368
369static struct irq_chip wsp_irq_chip = {
370 .name = "WSP ICS",
371 .irq_startup = wsp_chip_startup,
372 .irq_mask = wsp_chip_mask_irq,
373 .irq_unmask = wsp_chip_unmask_irq,
374 .irq_set_affinity = wsp_chip_set_affinity
375};
376
377static int wsp_ics_host_match(struct ics *ics, struct device_node *dn)
378{
379 /* All ICSs in the system implement a global irq number space,
380 * so match against them all. */
381 return of_device_is_compatible(dn, "ibm,ppc-xics");
382}
383
384static int wsp_ics_match_hwirq(struct wsp_ics *wsp_ics, unsigned int hwirq)
385{
386 if (hwirq >= wsp_ics->hwirq_start &&
387 hwirq < wsp_ics->hwirq_start + wsp_ics->count)
388 return 1;
389
390 return 0;
391}
392
393static int wsp_ics_map(struct ics *ics, unsigned int virq)
394{
395 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
396 unsigned int hw_irq = virq_to_hw(virq);
397 unsigned long flags;
398
399 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
400 return -ENOENT;
401
402 irq_set_chip_and_handler(virq, &wsp_irq_chip, handle_fasteoi_irq);
403
404 irq_set_chip_data(virq, wsp_ics);
405
406 spin_lock_irqsave(&wsp_ics->lock, flags);
407 bitmap_allocate_region(wsp_ics->bitmap, hw_irq - wsp_ics->hwirq_start, 0);
408 spin_unlock_irqrestore(&wsp_ics->lock, flags);
409
410 return 0;
411}
412
413static void wsp_ics_mask_unknown(struct ics *ics, unsigned long hw_irq)
414{
415 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
416
417 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
418 return;
419
420 pr_err("%s: IRQ %lu (real) is invalid, disabling it.\n", __func__, hw_irq);
421 wsp_mask_real_irq(hw_irq, wsp_ics);
422}
423
424static long wsp_ics_get_server(struct ics *ics, unsigned long hw_irq)
425{
426 struct wsp_ics *wsp_ics = to_wsp_ics(ics);
427
428 if (!wsp_ics_match_hwirq(wsp_ics, hw_irq))
429 return -ENOENT;
430
431 return get_irq_server(wsp_ics, hw_irq);
432}
433
434/* HW Number allocation API */
435
436static struct wsp_ics *wsp_ics_find_dn_ics(struct device_node *dn)
437{
438 struct device_node *iparent;
439 int i;
440
441 iparent = of_irq_find_parent(dn);
442 if (!iparent) {
443 pr_err("wsp_ics: Failed to find interrupt parent!\n");
444 return NULL;
445 }
446
447 for(i = 0; i < num_ics; i++) {
448 if(ics_list[i].dn == iparent)
449 break;
450 }
451
452 if (i >= num_ics) {
453 pr_err("wsp_ics: Unable to find parent bitmap!\n");
454 return NULL;
455 }
456
457 return &ics_list[i];
458}
459
460int wsp_ics_alloc_irq(struct device_node *dn, int num)
461{
462 struct wsp_ics *ics;
463 int order, offset;
464
465 ics = wsp_ics_find_dn_ics(dn);
466 if (!ics)
467 return -ENODEV;
468
469 /* Fast, but overly strict if num isn't a power of two */
470 order = get_count_order(num);
471
472 spin_lock_irq(&ics->lock);
473 offset = bitmap_find_free_region(ics->bitmap, ics->count, order);
474 spin_unlock_irq(&ics->lock);
475
476 if (offset < 0)
477 return offset;
478
479 return offset + ics->hwirq_start;
480}
481
482void wsp_ics_free_irq(struct device_node *dn, unsigned int irq)
483{
484 struct wsp_ics *ics;
485
486 ics = wsp_ics_find_dn_ics(dn);
487 if (WARN_ON(!ics))
488 return;
489
490 spin_lock_irq(&ics->lock);
491 bitmap_release_region(ics->bitmap, irq, 0);
492 spin_unlock_irq(&ics->lock);
493}
494
495/* Initialisation */
496
497static int __init wsp_ics_bitmap_setup(struct wsp_ics *ics,
498 struct device_node *dn)
499{
500 int len, i, j, size;
501 u32 start, count;
502 const u32 *p;
503
504 size = BITS_TO_LONGS(ics->count) * sizeof(long);
505 ics->bitmap = kzalloc(size, GFP_KERNEL);
506 if (!ics->bitmap) {
507 pr_err("wsp_ics: ENOMEM allocating IRQ bitmap!\n");
508 return -ENOMEM;
509 }
510
511 spin_lock_init(&ics->lock);
512
513 p = of_get_property(dn, "available-ranges", &len);
514 if (!p || !len) {
515 /* FIXME this should be a WARN() once mambo is updated */
516 pr_err("wsp_ics: No available-ranges defined for %s\n",
517 dn->full_name);
518 return 0;
519 }
520
521 if (len % (2 * sizeof(u32)) != 0) {
522 /* FIXME this should be a WARN() once mambo is updated */
523 pr_err("wsp_ics: Invalid available-ranges for %s\n",
524 dn->full_name);
525 return 0;
526 }
527
528 bitmap_fill(ics->bitmap, ics->count);
529
530 for (i = 0; i < len / sizeof(u32); i += 2) {
531 start = of_read_number(p + i, 1);
532 count = of_read_number(p + i + 1, 1);
533
534 pr_devel("%s: start: %d count: %d\n", __func__, start, count);
535
536 if ((start + count) > (ics->hwirq_start + ics->count) ||
537 start < ics->hwirq_start) {
538 pr_err("wsp_ics: Invalid range! -> %d to %d\n",
539 start, start + count);
540 break;
541 }
542
543 for (j = 0; j < count; j++)
544 bitmap_release_region(ics->bitmap,
545 (start + j) - ics->hwirq_start, 0);
546 }
547
548 /* Ensure LSIs are not available for allocation */
549 bitmap_allocate_region(ics->bitmap, ics->lsi_base,
550 get_count_order(ics->lsi_count));
551
552 return 0;
553}
554
555static int __init wsp_ics_setup(struct wsp_ics *ics, struct device_node *dn)
556{
557 u32 lsi_buid, msi_buid, msi_base, msi_count;
558 void __iomem *regs;
559 const u32 *p;
560 int rc, len, i;
561 u64 caps, buid;
562
563 p = of_get_property(dn, "interrupt-ranges", &len);
564 if (!p || len < (2 * sizeof(u32))) {
565 pr_err("wsp_ics: No/bad interrupt-ranges found on %s\n",
566 dn->full_name);
567 return -ENOENT;
568 }
569
570 if (len > (2 * sizeof(u32))) {
571 pr_err("wsp_ics: Multiple ics ranges not supported.\n");
572 return -EINVAL;
573 }
574
575 regs = of_iomap(dn, 0);
576 if (!regs) {
577 pr_err("wsp_ics: of_iomap(%s) failed\n", dn->full_name);
578 return -ENXIO;
579 }
580
581 ics->hwirq_start = of_read_number(p, 1);
582 ics->count = of_read_number(p + 1, 1);
583 ics->regs = regs;
584
585 ics->chip_id = wsp_get_chip_id(dn);
586 if (WARN_ON(ics->chip_id < 0))
587 ics->chip_id = 0;
588
589 /* Get some informations about the critter */
590 caps = in_be64(ICS_INT_CAPS_REG(ics->regs));
591 buid = in_be64(INT_SRC_LAYER_BUID_REG(ics->regs));
592 ics->lsi_count = caps >> 56;
593 msi_count = (caps >> 44) & 0x7ff;
594
595 /* Note: LSI BUID is 9 bits, but really only 3 are BUID and the
596 * rest is mixed in the interrupt number. We store the whole
597 * thing though
598 */
599 lsi_buid = (buid >> 48) & 0x1ff;
600 ics->lsi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | lsi_buid << 5;
601 msi_buid = (buid >> 37) & 0x7;
602 msi_base = (ics->chip_id << WSP_ICS_CHIP_SHIFT) | msi_buid << 11;
603
604 pr_info("wsp_ics: Found %s\n", dn->full_name);
605 pr_info("wsp_ics: irq range : 0x%06llx..0x%06llx\n",
606 ics->hwirq_start, ics->hwirq_start + ics->count - 1);
607 pr_info("wsp_ics: %4d LSIs : 0x%06x..0x%06x\n",
608 ics->lsi_count, ics->lsi_base,
609 ics->lsi_base + ics->lsi_count - 1);
610 pr_info("wsp_ics: %4d MSIs : 0x%06x..0x%06x\n",
611 msi_count, msi_base,
612 msi_base + msi_count - 1);
613
614 /* Let's check the HW config is sane */
615 if (ics->lsi_base < ics->hwirq_start ||
616 (ics->lsi_base + ics->lsi_count) > (ics->hwirq_start + ics->count))
617 pr_warning("wsp_ics: WARNING ! LSIs out of interrupt-ranges !\n");
618 if (msi_base < ics->hwirq_start ||
619 (msi_base + msi_count) > (ics->hwirq_start + ics->count))
620 pr_warning("wsp_ics: WARNING ! MSIs out of interrupt-ranges !\n");
621
622 /* We don't check for overlap between LSI and MSI, which will happen
623 * if we use the same BUID, I'm not sure yet how legit that is.
624 */
625
626 rc = wsp_ics_bitmap_setup(ics, dn);
627 if (rc) {
628 iounmap(regs);
629 return rc;
630 }
631
632 ics->dn = of_node_get(dn);
633 alloc_irq_map(ics);
634
635 for(i = 0; i < ics->count; i++)
636 wsp_mask_real_irq(ics->hwirq_start + i, ics);
637
638 ics->ics.map = wsp_ics_map;
639 ics->ics.mask_unknown = wsp_ics_mask_unknown;
640 ics->ics.get_server = wsp_ics_get_server;
641 ics->ics.host_match = wsp_ics_host_match;
642
643 xics_register_ics(&ics->ics);
644
645 return 0;
646}
647
648static void __init wsp_ics_set_default_server(void)
649{
650 struct device_node *np;
651 u32 hwid;
652
653 /* Find the server number for the boot cpu. */
654 np = of_get_cpu_node(boot_cpuid, NULL);
655 BUG_ON(!np);
656
657 hwid = get_hard_smp_processor_id(boot_cpuid);
658
659 pr_info("wsp_ics: default server is %#x, CPU %s\n", hwid, np->full_name);
660 xics_default_server = hwid;
661
662 of_node_put(np);
663}
664
665static int __init wsp_ics_init(void)
666{
667 struct device_node *dn;
668 struct wsp_ics *ics;
669 int rc, found;
670
671 wsp_ics_set_default_server();
672
673 found = 0;
674 for_each_compatible_node(dn, NULL, "ibm,ppc-xics")
675 found++;
676
677 if (found == 0) {
678 pr_err("wsp_ics: No ICS's found!\n");
679 return -ENODEV;
680 }
681
682 ics_list = kmalloc(sizeof(*ics) * found, GFP_KERNEL);
683 if (!ics_list) {
684 pr_err("wsp_ics: No memory for structs.\n");
685 return -ENOMEM;
686 }
687
688 num_ics = 0;
689 ics = ics_list;
690 for_each_compatible_node(dn, NULL, "ibm,wsp-xics") {
691 rc = wsp_ics_setup(ics, dn);
692 if (rc == 0) {
693 ics++;
694 num_ics++;
695 }
696 }
697
698 if (found != num_ics) {
699 pr_err("wsp_ics: Failed setting up %d ICS's\n",
700 found - num_ics);
701 return -1;
702 }
703
704 return 0;
705}
706
707void __init wsp_init_irq(void)
708{
709 wsp_ics_init();
710 xics_init();
711
712 /* We need to patch our irq chip's EOI to point to the right ICP */
713 wsp_irq_chip.irq_eoi = icp_ops->eoi;
714}
715
716#ifdef CONFIG_PCI_MSI
717static void wsp_ics_msi_unmask_irq(struct irq_data *d)
718{
719 wsp_chip_unmask_irq(d);
720 unmask_msi_irq(d);
721}
722
723static unsigned int wsp_ics_msi_startup(struct irq_data *d)
724{
725 wsp_ics_msi_unmask_irq(d);
726 return 0;
727}
728
729static void wsp_ics_msi_mask_irq(struct irq_data *d)
730{
731 mask_msi_irq(d);
732 wsp_chip_mask_irq(d);
733}
734
735/*
736 * we do it this way because we reassinge default EOI handling in
737 * irq_init() above
738 */
739static void wsp_ics_eoi(struct irq_data *data)
740{
741 wsp_irq_chip.irq_eoi(data);
742}
743
744static struct irq_chip wsp_ics_msi = {
745 .name = "WSP ICS MSI",
746 .irq_startup = wsp_ics_msi_startup,
747 .irq_mask = wsp_ics_msi_mask_irq,
748 .irq_unmask = wsp_ics_msi_unmask_irq,
749 .irq_eoi = wsp_ics_eoi,
750 .irq_set_affinity = wsp_chip_set_affinity
751};
752
753void wsp_ics_set_msi_chip(unsigned int irq)
754{
755 irq_set_chip(irq, &wsp_ics_msi);
756}
757
758void wsp_ics_set_std_chip(unsigned int irq)
759{
760 irq_set_chip(irq, &wsp_irq_chip);
761}
762#endif /* CONFIG_PCI_MSI */
diff --git a/arch/powerpc/platforms/wsp/ics.h b/arch/powerpc/platforms/wsp/ics.h
deleted file mode 100644
index 07b644e0cf97..000000000000
--- a/arch/powerpc/platforms/wsp/ics.h
+++ /dev/null
@@ -1,25 +0,0 @@
1/*
2 * Copyright 2009 IBM Corporation.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __ICS_H
11#define __ICS_H
12
13#define XIVE_ADDR_MASK 0x7FFULL
14
15extern void wsp_init_irq(void);
16
17extern int wsp_ics_alloc_irq(struct device_node *dn, int num);
18extern void wsp_ics_free_irq(struct device_node *dn, unsigned int irq);
19
20#ifdef CONFIG_PCI_MSI
21extern void wsp_ics_set_msi_chip(unsigned int irq);
22extern void wsp_ics_set_std_chip(unsigned int irq);
23#endif /* CONFIG_PCI_MSI */
24
25#endif /* __ICS_H */
diff --git a/arch/powerpc/platforms/wsp/msi.c b/arch/powerpc/platforms/wsp/msi.c
deleted file mode 100644
index 380882f27add..000000000000
--- a/arch/powerpc/platforms/wsp/msi.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/pci.h>
12#include <linux/msi.h>
13#include <linux/irq.h>
14#include <linux/interrupt.h>
15
16#include "msi.h"
17#include "ics.h"
18#include "wsp_pci.h"
19
20/* Magic addresses for 32 & 64-bit MSIs with hardcoded MVE 0 */
21#define MSI_ADDR_32 0xFFFF0000ul
22#define MSI_ADDR_64 0x1000000000000000ul
23
24int wsp_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
25{
26 struct pci_controller *phb;
27 struct msi_desc *entry;
28 struct msi_msg msg;
29 unsigned int virq;
30 int hwirq;
31
32 phb = pci_bus_to_host(dev->bus);
33 if (!phb)
34 return -ENOENT;
35
36 entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
37 if (entry->msi_attrib.is_64) {
38 msg.address_lo = 0;
39 msg.address_hi = MSI_ADDR_64 >> 32;
40 } else {
41 msg.address_lo = MSI_ADDR_32;
42 msg.address_hi = 0;
43 }
44
45 list_for_each_entry(entry, &dev->msi_list, list) {
46 hwirq = wsp_ics_alloc_irq(phb->dn, 1);
47 if (hwirq < 0) {
48 dev_warn(&dev->dev, "wsp_msi: hwirq alloc failed!\n");
49 return hwirq;
50 }
51
52 virq = irq_create_mapping(NULL, hwirq);
53 if (virq == NO_IRQ) {
54 dev_warn(&dev->dev, "wsp_msi: virq alloc failed!\n");
55 return -1;
56 }
57
58 dev_dbg(&dev->dev, "wsp_msi: allocated irq %#x/%#x\n",
59 hwirq, virq);
60
61 wsp_ics_set_msi_chip(virq);
62 irq_set_msi_desc(virq, entry);
63 msg.data = hwirq & XIVE_ADDR_MASK;
64 write_msi_msg(virq, &msg);
65 }
66
67 return 0;
68}
69
70void wsp_teardown_msi_irqs(struct pci_dev *dev)
71{
72 struct pci_controller *phb;
73 struct msi_desc *entry;
74 int hwirq;
75
76 phb = pci_bus_to_host(dev->bus);
77
78 dev_dbg(&dev->dev, "wsp_msi: tearing down msi irqs\n");
79
80 list_for_each_entry(entry, &dev->msi_list, list) {
81 if (entry->irq == NO_IRQ)
82 continue;
83
84 irq_set_msi_desc(entry->irq, NULL);
85 wsp_ics_set_std_chip(entry->irq);
86
87 hwirq = virq_to_hw(entry->irq);
88 /* In this order to avoid racing with irq_create_mapping() */
89 irq_dispose_mapping(entry->irq);
90 wsp_ics_free_irq(phb->dn, hwirq);
91 }
92}
93
94void wsp_setup_phb_msi(struct pci_controller *phb)
95{
96 /* Create a single MVE at offset 0 that matches everything */
97 out_be64(phb->cfg_data + PCIE_REG_IODA_ADDR, PCIE_REG_IODA_AD_TBL_MVT);
98 out_be64(phb->cfg_data + PCIE_REG_IODA_DATA0, 1ull << 63);
99
100 ppc_md.setup_msi_irqs = wsp_setup_msi_irqs;
101 ppc_md.teardown_msi_irqs = wsp_teardown_msi_irqs;
102}
diff --git a/arch/powerpc/platforms/wsp/msi.h b/arch/powerpc/platforms/wsp/msi.h
deleted file mode 100644
index 0ab27b71b24d..000000000000
--- a/arch/powerpc/platforms/wsp/msi.h
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * Copyright 2011 Michael Ellerman, IBM Corp.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __WSP_MSI_H
11#define __WSP_MSI_H
12
13#ifdef CONFIG_PCI_MSI
14extern void wsp_setup_phb_msi(struct pci_controller *phb);
15#else
16static inline void wsp_setup_phb_msi(struct pci_controller *phb) { }
17#endif
18
19#endif /* __WSP_MSI_H */
diff --git a/arch/powerpc/platforms/wsp/opb_pic.c b/arch/powerpc/platforms/wsp/opb_pic.c
deleted file mode 100644
index 3f6729807938..000000000000
--- a/arch/powerpc/platforms/wsp/opb_pic.c
+++ /dev/null
@@ -1,321 +0,0 @@
1/*
2 * IBM Onboard Peripheral Bus Interrupt Controller
3 *
4 * Copyright 2010 Jack Miller, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation; either version 2 of the License, or (at your
9 * option) any later version.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/io.h>
14#include <linux/irq.h>
15#include <linux/of.h>
16#include <linux/slab.h>
17#include <linux/time.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20
21#include <asm/reg_a2.h>
22#include <asm/irq.h>
23
24#define OPB_NR_IRQS 32
25
26#define OPB_MLSASIER 0x04 /* MLS Accumulated Status IER */
27#define OPB_MLSIR 0x50 /* MLS Interrupt Register */
28#define OPB_MLSIER 0x54 /* MLS Interrupt Enable Register */
29#define OPB_MLSIPR 0x58 /* MLS Interrupt Polarity Register */
30#define OPB_MLSIIR 0x5c /* MLS Interrupt Inputs Register */
31
32static int opb_index = 0;
33
34struct opb_pic {
35 struct irq_domain *host;
36 void *regs;
37 int index;
38 spinlock_t lock;
39};
40
41static u32 opb_in(struct opb_pic *opb, int offset)
42{
43 return in_be32(opb->regs + offset);
44}
45
46static void opb_out(struct opb_pic *opb, int offset, u32 val)
47{
48 out_be32(opb->regs + offset, val);
49}
50
51static void opb_unmask_irq(struct irq_data *d)
52{
53 struct opb_pic *opb;
54 unsigned long flags;
55 u32 ier, bitset;
56
57 opb = d->chip_data;
58 bitset = (1 << (31 - irqd_to_hwirq(d)));
59
60 spin_lock_irqsave(&opb->lock, flags);
61
62 ier = opb_in(opb, OPB_MLSIER);
63 opb_out(opb, OPB_MLSIER, ier | bitset);
64 ier = opb_in(opb, OPB_MLSIER);
65
66 spin_unlock_irqrestore(&opb->lock, flags);
67}
68
69static void opb_mask_irq(struct irq_data *d)
70{
71 struct opb_pic *opb;
72 unsigned long flags;
73 u32 ier, mask;
74
75 opb = d->chip_data;
76 mask = ~(1 << (31 - irqd_to_hwirq(d)));
77
78 spin_lock_irqsave(&opb->lock, flags);
79
80 ier = opb_in(opb, OPB_MLSIER);
81 opb_out(opb, OPB_MLSIER, ier & mask);
82 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
83
84 spin_unlock_irqrestore(&opb->lock, flags);
85}
86
87static void opb_ack_irq(struct irq_data *d)
88{
89 struct opb_pic *opb;
90 unsigned long flags;
91 u32 bitset;
92
93 opb = d->chip_data;
94 bitset = (1 << (31 - irqd_to_hwirq(d)));
95
96 spin_lock_irqsave(&opb->lock, flags);
97
98 opb_out(opb, OPB_MLSIR, bitset);
99 opb_in(opb, OPB_MLSIR); // Flush posted writes
100
101 spin_unlock_irqrestore(&opb->lock, flags);
102}
103
104static void opb_mask_ack_irq(struct irq_data *d)
105{
106 struct opb_pic *opb;
107 unsigned long flags;
108 u32 bitset;
109 u32 ier, ir;
110
111 opb = d->chip_data;
112 bitset = (1 << (31 - irqd_to_hwirq(d)));
113
114 spin_lock_irqsave(&opb->lock, flags);
115
116 ier = opb_in(opb, OPB_MLSIER);
117 opb_out(opb, OPB_MLSIER, ier & ~bitset);
118 ier = opb_in(opb, OPB_MLSIER); // Flush posted writes
119
120 opb_out(opb, OPB_MLSIR, bitset);
121 ir = opb_in(opb, OPB_MLSIR); // Flush posted writes
122
123 spin_unlock_irqrestore(&opb->lock, flags);
124}
125
126static int opb_set_irq_type(struct irq_data *d, unsigned int flow)
127{
128 struct opb_pic *opb;
129 unsigned long flags;
130 int invert, ipr, mask, bit;
131
132 opb = d->chip_data;
133
134 /* The only information we're interested in in the type is whether it's
135 * a high or low trigger. For high triggered interrupts, the polarity
136 * set for it in the MLS Interrupt Polarity Register is 0, for low
137 * interrupts it's 1 so that the proper input in the MLS Interrupt Input
138 * Register is interrupted as asserting the interrupt. */
139
140 switch (flow) {
141 case IRQ_TYPE_NONE:
142 opb_mask_irq(d);
143 return 0;
144
145 case IRQ_TYPE_LEVEL_HIGH:
146 invert = 0;
147 break;
148
149 case IRQ_TYPE_LEVEL_LOW:
150 invert = 1;
151 break;
152
153 default:
154 return -EINVAL;
155 }
156
157 bit = (1 << (31 - irqd_to_hwirq(d)));
158 mask = ~bit;
159
160 spin_lock_irqsave(&opb->lock, flags);
161
162 ipr = opb_in(opb, OPB_MLSIPR);
163 ipr = (ipr & mask) | (invert ? bit : 0);
164 opb_out(opb, OPB_MLSIPR, ipr);
165 ipr = opb_in(opb, OPB_MLSIPR); // Flush posted writes
166
167 spin_unlock_irqrestore(&opb->lock, flags);
168
169 /* Record the type in the interrupt descriptor */
170 irqd_set_trigger_type(d, flow);
171
172 return 0;
173}
174
175static struct irq_chip opb_irq_chip = {
176 .name = "OPB",
177 .irq_mask = opb_mask_irq,
178 .irq_unmask = opb_unmask_irq,
179 .irq_mask_ack = opb_mask_ack_irq,
180 .irq_ack = opb_ack_irq,
181 .irq_set_type = opb_set_irq_type
182};
183
184static int opb_host_map(struct irq_domain *host, unsigned int virq,
185 irq_hw_number_t hwirq)
186{
187 struct opb_pic *opb;
188
189 opb = host->host_data;
190
191 /* Most of the important stuff is handled by the generic host code, like
192 * the lookup, so just attach some info to the virtual irq */
193
194 irq_set_chip_data(virq, opb);
195 irq_set_chip_and_handler(virq, &opb_irq_chip, handle_level_irq);
196 irq_set_irq_type(virq, IRQ_TYPE_NONE);
197
198 return 0;
199}
200
201static const struct irq_domain_ops opb_host_ops = {
202 .map = opb_host_map,
203 .xlate = irq_domain_xlate_twocell,
204};
205
206irqreturn_t opb_irq_handler(int irq, void *private)
207{
208 struct opb_pic *opb;
209 u32 ir, src, subvirq;
210
211 opb = (struct opb_pic *) private;
212
213 /* Read the OPB MLS Interrupt Register for
214 * asserted interrupts */
215 ir = opb_in(opb, OPB_MLSIR);
216 if (!ir)
217 return IRQ_NONE;
218
219 do {
220 /* Get 1 - 32 source, *NOT* bit */
221 src = 32 - ffs(ir);
222
223 /* Translate from the OPB's conception of interrupt number to
224 * Linux's virtual IRQ */
225
226 subvirq = irq_linear_revmap(opb->host, src);
227
228 generic_handle_irq(subvirq);
229 } while ((ir = opb_in(opb, OPB_MLSIR)));
230
231 return IRQ_HANDLED;
232}
233
234struct opb_pic *opb_pic_init_one(struct device_node *dn)
235{
236 struct opb_pic *opb;
237 struct resource res;
238
239 if (of_address_to_resource(dn, 0, &res)) {
240 printk(KERN_ERR "opb: Couldn't translate resource\n");
241 return NULL;
242 }
243
244 opb = kzalloc(sizeof(struct opb_pic), GFP_KERNEL);
245 if (!opb) {
246 printk(KERN_ERR "opb: Failed to allocate opb struct!\n");
247 return NULL;
248 }
249
250 /* Get access to the OPB MMIO registers */
251 opb->regs = ioremap(res.start + 0x10000, 0x1000);
252 if (!opb->regs) {
253 printk(KERN_ERR "opb: Failed to allocate register space!\n");
254 goto free_opb;
255 }
256
257 /* Allocate an irq domain so that Linux knows that despite only
258 * having one interrupt to issue, we're the controller for multiple
259 * hardware IRQs, so later we can lookup their virtual IRQs. */
260
261 opb->host = irq_domain_add_linear(dn, OPB_NR_IRQS, &opb_host_ops, opb);
262 if (!opb->host) {
263 printk(KERN_ERR "opb: Failed to allocate IRQ host!\n");
264 goto free_regs;
265 }
266
267 opb->index = opb_index++;
268 spin_lock_init(&opb->lock);
269
270 /* Disable all interrupts by default */
271 opb_out(opb, OPB_MLSASIER, 0);
272 opb_out(opb, OPB_MLSIER, 0);
273
274 /* ACK any interrupts left by FW */
275 opb_out(opb, OPB_MLSIR, 0xFFFFFFFF);
276
277 return opb;
278
279free_regs:
280 iounmap(opb->regs);
281free_opb:
282 kfree(opb);
283 return NULL;
284}
285
286void __init opb_pic_init(void)
287{
288 struct device_node *dn;
289 struct opb_pic *opb;
290 int virq;
291 int rc;
292
293 /* Call init_one for each OPB device */
294 for_each_compatible_node(dn, NULL, "ibm,opb") {
295
296 /* Fill in an OPB struct */
297 opb = opb_pic_init_one(dn);
298 if (!opb) {
299 printk(KERN_WARNING "opb: Failed to init node, skipped!\n");
300 continue;
301 }
302
303 /* Map / get opb's hardware virtual irq */
304 virq = irq_of_parse_and_map(dn, 0);
305 if (virq <= 0) {
306 printk("opb: irq_op_parse_and_map failed!\n");
307 continue;
308 }
309
310 /* Attach opb interrupt handler to new virtual IRQ */
311 rc = request_irq(virq, opb_irq_handler, IRQF_NO_THREAD,
312 "OPB LS Cascade", opb);
313 if (rc) {
314 printk("opb: request_irq failed: %d\n", rc);
315 continue;
316 }
317
318 printk("OPB%d init with %d IRQs at %p\n", opb->index,
319 OPB_NR_IRQS, opb->regs);
320 }
321}
diff --git a/arch/powerpc/platforms/wsp/psr2.c b/arch/powerpc/platforms/wsp/psr2.c
deleted file mode 100644
index a87b414c766a..000000000000
--- a/arch/powerpc/platforms/wsp/psr2.c
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/delay.h>
11#include <linux/init.h>
12#include <linux/irq.h>
13#include <linux/kernel.h>
14#include <linux/mm.h>
15#include <linux/of.h>
16#include <linux/smp.h>
17#include <linux/time.h>
18#include <linux/of_fdt.h>
19
20#include <asm/machdep.h>
21#include <asm/udbg.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26
27static void psr2_spin(void)
28{
29 hard_irq_disable();
30 for (;;)
31 continue;
32}
33
34static void psr2_restart(char *cmd)
35{
36 psr2_spin();
37}
38
39static int __init psr2_probe(void)
40{
41 unsigned long root = of_get_flat_dt_root();
42
43 if (of_flat_dt_is_compatible(root, "ibm,wsp-chroma")) {
44 /* chroma systems also claim they are psr2s */
45 return 0;
46 }
47
48 if (!of_flat_dt_is_compatible(root, "ibm,psr2"))
49 return 0;
50
51 return 1;
52}
53
54define_machine(psr2_md) {
55 .name = "PSR2 A2",
56 .probe = psr2_probe,
57 .setup_arch = wsp_setup_arch,
58 .restart = psr2_restart,
59 .power_off = psr2_spin,
60 .halt = psr2_spin,
61 .calibrate_decr = generic_calibrate_decr,
62 .init_IRQ = wsp_setup_irq,
63 .progress = udbg_progress,
64 .power_save = book3e_idle,
65};
66
67machine_arch_initcall(psr2_md, wsp_probe_devices);
diff --git a/arch/powerpc/platforms/wsp/scom_smp.c b/arch/powerpc/platforms/wsp/scom_smp.c
deleted file mode 100644
index 8c79ce016cf1..000000000000
--- a/arch/powerpc/platforms/wsp/scom_smp.c
+++ /dev/null
@@ -1,435 +0,0 @@
1/*
2 * SCOM support for A2 platforms
3 *
4 * Copyright 2007-2011 Benjamin Herrenschmidt, David Gibson,
5 * Michael Ellerman, IBM Corp.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <linux/cpumask.h>
14#include <linux/io.h>
15#include <linux/of.h>
16#include <linux/spinlock.h>
17#include <linux/types.h>
18
19#include <asm/cputhreads.h>
20#include <asm/reg_a2.h>
21#include <asm/scom.h>
22#include <asm/udbg.h>
23#include <asm/code-patching.h>
24
25#include "wsp.h"
26
27#define SCOM_RAMC 0x2a /* Ram Command */
28#define SCOM_RAMC_TGT1_EXT 0x80000000
29#define SCOM_RAMC_SRC1_EXT 0x40000000
30#define SCOM_RAMC_SRC2_EXT 0x20000000
31#define SCOM_RAMC_SRC3_EXT 0x10000000
32#define SCOM_RAMC_ENABLE 0x00080000
33#define SCOM_RAMC_THREADSEL 0x00060000
34#define SCOM_RAMC_EXECUTE 0x00010000
35#define SCOM_RAMC_MSR_OVERRIDE 0x00008000
36#define SCOM_RAMC_MSR_PR 0x00004000
37#define SCOM_RAMC_MSR_GS 0x00002000
38#define SCOM_RAMC_FORCE 0x00001000
39#define SCOM_RAMC_FLUSH 0x00000800
40#define SCOM_RAMC_INTERRUPT 0x00000004
41#define SCOM_RAMC_ERROR 0x00000002
42#define SCOM_RAMC_DONE 0x00000001
43#define SCOM_RAMI 0x29 /* Ram Instruction */
44#define SCOM_RAMIC 0x28 /* Ram Instruction and Command */
45#define SCOM_RAMIC_INSN 0xffffffff00000000
46#define SCOM_RAMD 0x2d /* Ram Data */
47#define SCOM_RAMDH 0x2e /* Ram Data High */
48#define SCOM_RAMDL 0x2f /* Ram Data Low */
49#define SCOM_PCCR0 0x33 /* PC Configuration Register 0 */
50#define SCOM_PCCR0_ENABLE_DEBUG 0x80000000
51#define SCOM_PCCR0_ENABLE_RAM 0x40000000
52#define SCOM_THRCTL 0x30 /* Thread Control and Status */
53#define SCOM_THRCTL_T0_STOP 0x80000000
54#define SCOM_THRCTL_T1_STOP 0x40000000
55#define SCOM_THRCTL_T2_STOP 0x20000000
56#define SCOM_THRCTL_T3_STOP 0x10000000
57#define SCOM_THRCTL_T0_STEP 0x08000000
58#define SCOM_THRCTL_T1_STEP 0x04000000
59#define SCOM_THRCTL_T2_STEP 0x02000000
60#define SCOM_THRCTL_T3_STEP 0x01000000
61#define SCOM_THRCTL_T0_RUN 0x00800000
62#define SCOM_THRCTL_T1_RUN 0x00400000
63#define SCOM_THRCTL_T2_RUN 0x00200000
64#define SCOM_THRCTL_T3_RUN 0x00100000
65#define SCOM_THRCTL_T0_PM 0x00080000
66#define SCOM_THRCTL_T1_PM 0x00040000
67#define SCOM_THRCTL_T2_PM 0x00020000
68#define SCOM_THRCTL_T3_PM 0x00010000
69#define SCOM_THRCTL_T0_UDE 0x00008000
70#define SCOM_THRCTL_T1_UDE 0x00004000
71#define SCOM_THRCTL_T2_UDE 0x00002000
72#define SCOM_THRCTL_T3_UDE 0x00001000
73#define SCOM_THRCTL_ASYNC_DIS 0x00000800
74#define SCOM_THRCTL_TB_DIS 0x00000400
75#define SCOM_THRCTL_DEC_DIS 0x00000200
76#define SCOM_THRCTL_AND 0x31 /* Thread Control and Status */
77#define SCOM_THRCTL_OR 0x32 /* Thread Control and Status */
78
79
80static DEFINE_PER_CPU(scom_map_t, scom_ptrs);
81
82static scom_map_t get_scom(int cpu, struct device_node *np, int *first_thread)
83{
84 scom_map_t scom = per_cpu(scom_ptrs, cpu);
85 int tcpu;
86
87 if (scom_map_ok(scom)) {
88 *first_thread = 0;
89 return scom;
90 }
91
92 *first_thread = 1;
93
94 scom = scom_map_device(np, 0);
95
96 for (tcpu = cpu_first_thread_sibling(cpu);
97 tcpu <= cpu_last_thread_sibling(cpu); tcpu++)
98 per_cpu(scom_ptrs, tcpu) = scom;
99
100 /* Hack: for the boot core, this will actually get called on
101 * the second thread up, not the first so our test above will
102 * set first_thread incorrectly. */
103 if (cpu_first_thread_sibling(cpu) == 0)
104 *first_thread = 0;
105
106 return scom;
107}
108
109static int a2_scom_ram(scom_map_t scom, int thread, u32 insn, int extmask)
110{
111 u64 cmd, mask, val;
112 int n = 0;
113
114 cmd = ((u64)insn << 32) | (((u64)extmask & 0xf) << 28)
115 | ((u64)thread << 17) | SCOM_RAMC_ENABLE | SCOM_RAMC_EXECUTE;
116 mask = SCOM_RAMC_DONE | SCOM_RAMC_INTERRUPT | SCOM_RAMC_ERROR;
117
118 scom_write(scom, SCOM_RAMIC, cmd);
119
120 for (;;) {
121 if (scom_read(scom, SCOM_RAMC, &val) != 0) {
122 pr_err("SCOM error on instruction 0x%08x, thread %d\n",
123 insn, thread);
124 return -1;
125 }
126 if (val & mask)
127 break;
128 pr_devel("Waiting on RAMC = 0x%llx\n", val);
129 if (++n == 3) {
130 pr_err("RAMC timeout on instruction 0x%08x, thread %d\n",
131 insn, thread);
132 return -1;
133 }
134 }
135
136 if (val & SCOM_RAMC_INTERRUPT) {
137 pr_err("RAMC interrupt on instruction 0x%08x, thread %d\n",
138 insn, thread);
139 return -SCOM_RAMC_INTERRUPT;
140 }
141
142 if (val & SCOM_RAMC_ERROR) {
143 pr_err("RAMC error on instruction 0x%08x, thread %d\n",
144 insn, thread);
145 return -SCOM_RAMC_ERROR;
146 }
147
148 return 0;
149}
150
151static int a2_scom_getgpr(scom_map_t scom, int thread, int gpr, int alt,
152 u64 *out_gpr)
153{
154 int rc;
155
156 /* or rN, rN, rN */
157 u32 insn = 0x7c000378 | (gpr << 21) | (gpr << 16) | (gpr << 11);
158 rc = a2_scom_ram(scom, thread, insn, alt ? 0xf : 0x0);
159 if (rc)
160 return rc;
161
162 return scom_read(scom, SCOM_RAMD, out_gpr);
163}
164
165static int a2_scom_getspr(scom_map_t scom, int thread, int spr, u64 *out_spr)
166{
167 int rc, sprhi, sprlo;
168 u32 insn;
169
170 sprhi = spr >> 5;
171 sprlo = spr & 0x1f;
172 insn = 0x7c2002a6 | (sprlo << 16) | (sprhi << 11); /* mfspr r1,spr */
173
174 if (spr == 0x0ff0)
175 insn = 0x7c2000a6; /* mfmsr r1 */
176
177 rc = a2_scom_ram(scom, thread, insn, 0xf);
178 if (rc)
179 return rc;
180 return a2_scom_getgpr(scom, thread, 1, 1, out_spr);
181}
182
183static int a2_scom_setgpr(scom_map_t scom, int thread, int gpr,
184 int alt, u64 val)
185{
186 u32 lis = 0x3c000000 | (gpr << 21);
187 u32 li = 0x38000000 | (gpr << 21);
188 u32 oris = 0x64000000 | (gpr << 21) | (gpr << 16);
189 u32 ori = 0x60000000 | (gpr << 21) | (gpr << 16);
190 u32 rldicr32 = 0x780007c6 | (gpr << 21) | (gpr << 16);
191 u32 highest = val >> 48;
192 u32 higher = (val >> 32) & 0xffff;
193 u32 high = (val >> 16) & 0xffff;
194 u32 low = val & 0xffff;
195 int lext = alt ? 0x8 : 0x0;
196 int oext = alt ? 0xf : 0x0;
197 int rc = 0;
198
199 if (highest)
200 rc |= a2_scom_ram(scom, thread, lis | highest, lext);
201
202 if (higher) {
203 if (highest)
204 rc |= a2_scom_ram(scom, thread, oris | higher, oext);
205 else
206 rc |= a2_scom_ram(scom, thread, li | higher, lext);
207 }
208
209 if (highest || higher)
210 rc |= a2_scom_ram(scom, thread, rldicr32, oext);
211
212 if (high) {
213 if (highest || higher)
214 rc |= a2_scom_ram(scom, thread, oris | high, oext);
215 else
216 rc |= a2_scom_ram(scom, thread, lis | high, lext);
217 }
218
219 if (highest || higher || high)
220 rc |= a2_scom_ram(scom, thread, ori | low, oext);
221 else
222 rc |= a2_scom_ram(scom, thread, li | low, lext);
223
224 return rc;
225}
226
227static int a2_scom_setspr(scom_map_t scom, int thread, int spr, u64 val)
228{
229 int sprhi = spr >> 5;
230 int sprlo = spr & 0x1f;
231 /* mtspr spr, r1 */
232 u32 insn = 0x7c2003a6 | (sprlo << 16) | (sprhi << 11);
233
234 if (spr == 0x0ff0)
235 insn = 0x7c200124; /* mtmsr r1 */
236
237 if (a2_scom_setgpr(scom, thread, 1, 1, val))
238 return -1;
239
240 return a2_scom_ram(scom, thread, insn, 0xf);
241}
242
243static int a2_scom_initial_tlb(scom_map_t scom, int thread)
244{
245 extern u32 a2_tlbinit_code_start[], a2_tlbinit_code_end[];
246 extern u32 a2_tlbinit_after_iprot_flush[];
247 extern u32 a2_tlbinit_after_linear_map[];
248 u32 assoc, entries, i;
249 u64 epn, tlbcfg;
250 u32 *p;
251 int rc;
252
253 /* Invalidate all entries (including iprot) */
254
255 rc = a2_scom_getspr(scom, thread, SPRN_TLB0CFG, &tlbcfg);
256 if (rc)
257 goto scom_fail;
258 entries = tlbcfg & TLBnCFG_N_ENTRY;
259 assoc = (tlbcfg & TLBnCFG_ASSOC) >> 24;
260 epn = 0;
261
262 /* Set MMUCR2 to enable 4K, 64K, 1M, 16M and 1G pages */
263 a2_scom_setspr(scom, thread, SPRN_MMUCR2, 0x000a7531);
264 /* Set MMUCR3 to write all thids bit to the TLB */
265 a2_scom_setspr(scom, thread, SPRN_MMUCR3, 0x0000000f);
266
267 /* Set MAS1 for 1G page size, and MAS2 to our initial EPN */
268 a2_scom_setspr(scom, thread, SPRN_MAS1, MAS1_TSIZE(BOOK3E_PAGESZ_1GB));
269 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
270 for (i = 0; i < entries; i++) {
271
272 a2_scom_setspr(scom, thread, SPRN_MAS0, MAS0_ESEL(i % assoc));
273
274 /* tlbwe */
275 rc = a2_scom_ram(scom, thread, 0x7c0007a4, 0);
276 if (rc)
277 goto scom_fail;
278
279 /* Next entry is new address? */
280 if((i + 1) % assoc == 0) {
281 epn += (1 << 30);
282 a2_scom_setspr(scom, thread, SPRN_MAS2, epn);
283 }
284 }
285
286 /* Setup args for linear mapping */
287 rc = a2_scom_setgpr(scom, thread, 3, 0, MAS0_TLBSEL(0));
288 if (rc)
289 goto scom_fail;
290
291 /* Linear mapping */
292 for (p = a2_tlbinit_code_start; p < a2_tlbinit_after_linear_map; p++) {
293 rc = a2_scom_ram(scom, thread, *p, 0);
294 if (rc)
295 goto scom_fail;
296 }
297
298 /*
299 * For the boot thread, between the linear mapping and the debug
300 * mappings there is a loop to flush iprot mappings. Ramming doesn't do
301 * branches, but the secondary threads don't need to be nearly as smart
302 * (i.e. we don't need to worry about invalidating the mapping we're
303 * standing on).
304 */
305
306 /* Debug mappings. Expects r11 = MAS0 from linear map (set above) */
307 for (p = a2_tlbinit_after_iprot_flush; p < a2_tlbinit_code_end; p++) {
308 rc = a2_scom_ram(scom, thread, *p, 0);
309 if (rc)
310 goto scom_fail;
311 }
312
313scom_fail:
314 if (rc)
315 pr_err("Setting up initial TLB failed, err %d\n", rc);
316
317 if (rc == -SCOM_RAMC_INTERRUPT) {
318 /* Interrupt, dump some status */
319 int rc[10];
320 u64 iar, srr0, srr1, esr, mas0, mas1, mas2, mas7_3, mas8, ccr2;
321 rc[0] = a2_scom_getspr(scom, thread, SPRN_IAR, &iar);
322 rc[1] = a2_scom_getspr(scom, thread, SPRN_SRR0, &srr0);
323 rc[2] = a2_scom_getspr(scom, thread, SPRN_SRR1, &srr1);
324 rc[3] = a2_scom_getspr(scom, thread, SPRN_ESR, &esr);
325 rc[4] = a2_scom_getspr(scom, thread, SPRN_MAS0, &mas0);
326 rc[5] = a2_scom_getspr(scom, thread, SPRN_MAS1, &mas1);
327 rc[6] = a2_scom_getspr(scom, thread, SPRN_MAS2, &mas2);
328 rc[7] = a2_scom_getspr(scom, thread, SPRN_MAS7_MAS3, &mas7_3);
329 rc[8] = a2_scom_getspr(scom, thread, SPRN_MAS8, &mas8);
330 rc[9] = a2_scom_getspr(scom, thread, SPRN_A2_CCR2, &ccr2);
331 pr_err(" -> retreived IAR =0x%llx (err %d)\n", iar, rc[0]);
332 pr_err(" retreived SRR0=0x%llx (err %d)\n", srr0, rc[1]);
333 pr_err(" retreived SRR1=0x%llx (err %d)\n", srr1, rc[2]);
334 pr_err(" retreived ESR =0x%llx (err %d)\n", esr, rc[3]);
335 pr_err(" retreived MAS0=0x%llx (err %d)\n", mas0, rc[4]);
336 pr_err(" retreived MAS1=0x%llx (err %d)\n", mas1, rc[5]);
337 pr_err(" retreived MAS2=0x%llx (err %d)\n", mas2, rc[6]);
338 pr_err(" retreived MS73=0x%llx (err %d)\n", mas7_3, rc[7]);
339 pr_err(" retreived MAS8=0x%llx (err %d)\n", mas8, rc[8]);
340 pr_err(" retreived CCR2=0x%llx (err %d)\n", ccr2, rc[9]);
341 }
342
343 return rc;
344}
345
346int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx, struct device_node *np)
347{
348 u64 init_iar, init_msr, init_ccr2;
349 unsigned long start_here;
350 int rc, core_setup;
351 scom_map_t scom;
352 u64 pccr0;
353
354 scom = get_scom(lcpu, np, &core_setup);
355 if (!scom) {
356 printk(KERN_ERR "Couldn't map SCOM for CPU%d\n", lcpu);
357 return -1;
358 }
359
360 pr_devel("Bringing up CPU%d using SCOM...\n", lcpu);
361
362 if (scom_read(scom, SCOM_PCCR0, &pccr0) != 0) {
363 printk(KERN_ERR "XSCOM failure readng PCCR0 on CPU%d\n", lcpu);
364 return -1;
365 }
366 scom_write(scom, SCOM_PCCR0, pccr0 | SCOM_PCCR0_ENABLE_DEBUG |
367 SCOM_PCCR0_ENABLE_RAM);
368
369 /* Stop the thead with THRCTL. If we are setting up the TLB we stop all
370 * threads. We also disable asynchronous interrupts while RAMing.
371 */
372 if (core_setup)
373 scom_write(scom, SCOM_THRCTL_OR,
374 SCOM_THRCTL_T0_STOP |
375 SCOM_THRCTL_T1_STOP |
376 SCOM_THRCTL_T2_STOP |
377 SCOM_THRCTL_T3_STOP |
378 SCOM_THRCTL_ASYNC_DIS);
379 else
380 scom_write(scom, SCOM_THRCTL_OR, SCOM_THRCTL_T0_STOP >> thr_idx);
381
382 /* Flush its pipeline just in case */
383 scom_write(scom, SCOM_RAMC, ((u64)thr_idx << 17) |
384 SCOM_RAMC_FLUSH | SCOM_RAMC_ENABLE);
385
386 a2_scom_getspr(scom, thr_idx, SPRN_IAR, &init_iar);
387 a2_scom_getspr(scom, thr_idx, 0x0ff0, &init_msr);
388 a2_scom_getspr(scom, thr_idx, SPRN_A2_CCR2, &init_ccr2);
389
390 /* Set MSR to MSR_CM (0x0ff0 is magic value for MSR_CM) */
391 rc = a2_scom_setspr(scom, thr_idx, 0x0ff0, MSR_CM);
392 if (rc) {
393 pr_err("Failed to set MSR ! err %d\n", rc);
394 return rc;
395 }
396
397 /* RAM in an sync/isync for the sake of it */
398 a2_scom_ram(scom, thr_idx, 0x7c0004ac, 0);
399 a2_scom_ram(scom, thr_idx, 0x4c00012c, 0);
400
401 if (core_setup) {
402 pr_devel("CPU%d is first thread in core, initializing TLB...\n",
403 lcpu);
404 rc = a2_scom_initial_tlb(scom, thr_idx);
405 if (rc)
406 goto fail;
407 }
408
409 start_here = ppc_function_entry(core_setup ? generic_secondary_smp_init
410 : generic_secondary_thread_init);
411 pr_devel("CPU%d entry point at 0x%lx...\n", lcpu, start_here);
412
413 rc |= a2_scom_setspr(scom, thr_idx, SPRN_IAR, start_here);
414 rc |= a2_scom_setgpr(scom, thr_idx, 3, 0,
415 get_hard_smp_processor_id(lcpu));
416 /*
417 * Tell book3e_secondary_core_init not to set up the TLB, we've
418 * already done that.
419 */
420 rc |= a2_scom_setgpr(scom, thr_idx, 4, 0, 1);
421
422 rc |= a2_scom_setspr(scom, thr_idx, SPRN_TENS, 0x1 << thr_idx);
423
424 scom_write(scom, SCOM_RAMC, 0);
425 scom_write(scom, SCOM_THRCTL_AND, ~(SCOM_THRCTL_T0_STOP >> thr_idx));
426 scom_write(scom, SCOM_PCCR0, pccr0);
427fail:
428 pr_devel(" SCOM initialization %s\n", rc ? "failed" : "succeeded");
429 if (rc) {
430 pr_err("Old IAR=0x%08llx MSR=0x%08llx CCR2=0x%08llx\n",
431 init_iar, init_msr, init_ccr2);
432 }
433
434 return rc;
435}
diff --git a/arch/powerpc/platforms/wsp/scom_wsp.c b/arch/powerpc/platforms/wsp/scom_wsp.c
deleted file mode 100644
index 6538b4de34fc..000000000000
--- a/arch/powerpc/platforms/wsp/scom_wsp.c
+++ /dev/null
@@ -1,82 +0,0 @@
1/*
2 * SCOM backend for WSP
3 *
4 * Copyright 2010 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/cpumask.h>
13#include <linux/io.h>
14#include <linux/of.h>
15#include <linux/spinlock.h>
16#include <linux/types.h>
17#include <linux/of_address.h>
18
19#include <asm/cputhreads.h>
20#include <asm/reg_a2.h>
21#include <asm/scom.h>
22#include <asm/udbg.h>
23
24#include "wsp.h"
25
26
27static scom_map_t wsp_scom_map(struct device_node *dev, u64 reg, u64 count)
28{
29 struct resource r;
30 u64 xscom_addr;
31
32 if (!of_get_property(dev, "scom-controller", NULL)) {
33 pr_err("%s: device %s is not a SCOM controller\n",
34 __func__, dev->full_name);
35 return SCOM_MAP_INVALID;
36 }
37
38 if (of_address_to_resource(dev, 0, &r)) {
39 pr_debug("Failed to find SCOM controller address\n");
40 return 0;
41 }
42
43 /* Transform the SCOM address into an XSCOM offset */
44 xscom_addr = ((reg & 0x7f000000) >> 1) | ((reg & 0xfffff) << 3);
45
46 return (scom_map_t)ioremap(r.start + xscom_addr, count << 3);
47}
48
49static void wsp_scom_unmap(scom_map_t map)
50{
51 iounmap((void *)map);
52}
53
54static int wsp_scom_read(scom_map_t map, u64 reg, u64 *value)
55{
56 u64 __iomem *addr = (u64 __iomem *)map;
57
58 *value = in_be64(addr + reg);
59
60 return 0;
61}
62
63static int wsp_scom_write(scom_map_t map, u64 reg, u64 value)
64{
65 u64 __iomem *addr = (u64 __iomem *)map;
66
67 out_be64(addr + reg, value);
68
69 return 0;
70}
71
72static const struct scom_controller wsp_scom_controller = {
73 .map = wsp_scom_map,
74 .unmap = wsp_scom_unmap,
75 .read = wsp_scom_read,
76 .write = wsp_scom_write
77};
78
79void scom_init_wsp(void)
80{
81 scom_init(&wsp_scom_controller);
82}
diff --git a/arch/powerpc/platforms/wsp/setup.c b/arch/powerpc/platforms/wsp/setup.c
deleted file mode 100644
index 11ac2f05e01c..000000000000
--- a/arch/powerpc/platforms/wsp/setup.c
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * Copyright 2010 Michael Ellerman, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of_platform.h>
12
13#include "wsp.h"
14
15/*
16 * Find chip-id by walking up device tree looking for ibm,wsp-chip-id property.
17 * Won't work for nodes that are not a descendant of a wsp node.
18 */
19int wsp_get_chip_id(struct device_node *dn)
20{
21 const u32 *p;
22 int rc;
23
24 /* Start looking at the specified node, not its parent */
25 dn = of_node_get(dn);
26 while (dn && !(p = of_get_property(dn, "ibm,wsp-chip-id", NULL)))
27 dn = of_get_next_parent(dn);
28
29 if (!dn)
30 return -1;
31
32 rc = *p;
33 of_node_put(dn);
34
35 return rc;
36}
diff --git a/arch/powerpc/platforms/wsp/smp.c b/arch/powerpc/platforms/wsp/smp.c
deleted file mode 100644
index 332a18b81403..000000000000
--- a/arch/powerpc/platforms/wsp/smp.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * SMP Support for A2 platforms
3 *
4 * Copyright 2007 Benjamin Herrenschmidt, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 *
11 */
12
13#include <linux/cpumask.h>
14#include <linux/init.h>
15#include <linux/kernel.h>
16#include <linux/of.h>
17#include <linux/smp.h>
18
19#include <asm/dbell.h>
20#include <asm/machdep.h>
21#include <asm/xics.h>
22
23#include "ics.h"
24#include "wsp.h"
25
26static void smp_a2_setup_cpu(int cpu)
27{
28 doorbell_setup_this_cpu();
29
30 if (cpu != boot_cpuid)
31 xics_setup_cpu();
32}
33
34int smp_a2_kick_cpu(int nr)
35{
36 const char *enable_method;
37 struct device_node *np;
38 int thr_idx;
39
40 if (nr < 0 || nr >= NR_CPUS)
41 return -ENOENT;
42
43 np = of_get_cpu_node(nr, &thr_idx);
44 if (!np)
45 return -ENODEV;
46
47 enable_method = of_get_property(np, "enable-method", NULL);
48 pr_devel("CPU%d has enable-method: \"%s\"\n", nr, enable_method);
49
50 if (!enable_method) {
51 printk(KERN_ERR "CPU%d has no enable-method\n", nr);
52 return -ENOENT;
53 } else if (strcmp(enable_method, "ibm,a2-scom") == 0) {
54 if (a2_scom_startup_cpu(nr, thr_idx, np))
55 return -1;
56 } else {
57 printk(KERN_ERR "CPU%d: Don't understand enable-method \"%s\"\n",
58 nr, enable_method);
59 return -EINVAL;
60 }
61
62 /*
63 * The processor is currently spinning, waiting for the
64 * cpu_start field to become non-zero After we set cpu_start,
65 * the processor will continue on to secondary_start
66 */
67 paca[nr].cpu_start = 1;
68
69 return 0;
70}
71
72static int __init smp_a2_probe(void)
73{
74 return num_possible_cpus();
75}
76
77static struct smp_ops_t a2_smp_ops = {
78 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
79 .cause_ipi = doorbell_cause_ipi,
80 .probe = smp_a2_probe,
81 .kick_cpu = smp_a2_kick_cpu,
82 .setup_cpu = smp_a2_setup_cpu,
83};
84
85void __init a2_setup_smp(void)
86{
87 smp_ops = &a2_smp_ops;
88}
diff --git a/arch/powerpc/platforms/wsp/wsp.c b/arch/powerpc/platforms/wsp/wsp.c
deleted file mode 100644
index 58cd1f00e1ef..000000000000
--- a/arch/powerpc/platforms/wsp/wsp.c
+++ /dev/null
@@ -1,117 +0,0 @@
1/*
2 * Copyright 2008-2011, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/kernel.h>
11#include <linux/of.h>
12#include <linux/of_device.h>
13#include <linux/smp.h>
14#include <linux/delay.h>
15#include <linux/time.h>
16#include <linux/of_address.h>
17
18#include <asm/scom.h>
19
20#include "wsp.h"
21#include "ics.h"
22
23#define WSP_SOC_COMPATIBLE "ibm,wsp-soc"
24#define PBIC_COMPATIBLE "ibm,wsp-pbic"
25#define COPRO_COMPATIBLE "ibm,wsp-coprocessor"
26
27static int __init wsp_probe_buses(void)
28{
29 static __initdata struct of_device_id bus_ids[] = {
30 /*
31 * every node in between needs to be here or you won't
32 * find it
33 */
34 { .compatible = WSP_SOC_COMPATIBLE, },
35 { .compatible = PBIC_COMPATIBLE, },
36 { .compatible = COPRO_COMPATIBLE, },
37 {},
38 };
39 of_platform_bus_probe(NULL, bus_ids, NULL);
40
41 return 0;
42}
43
44void __init wsp_setup_arch(void)
45{
46 /* init to some ~sane value until calibrate_delay() runs */
47 loops_per_jiffy = 50000000;
48
49 scom_init_wsp();
50
51 /* Setup SMP callback */
52#ifdef CONFIG_SMP
53 a2_setup_smp();
54#endif
55#ifdef CONFIG_PCI
56 wsp_setup_pci();
57#endif
58}
59
60void __init wsp_setup_irq(void)
61{
62 wsp_init_irq();
63 opb_pic_init();
64}
65
66
67int __init wsp_probe_devices(void)
68{
69 struct device_node *np;
70
71 /* Our RTC is a ds1500. It seems to be programatically compatible
72 * with the ds1511 for which we have a driver so let's use that
73 */
74 np = of_find_compatible_node(NULL, NULL, "dallas,ds1500");
75 if (np != NULL) {
76 struct resource res;
77 if (of_address_to_resource(np, 0, &res) == 0)
78 platform_device_register_simple("ds1511", 0, &res, 1);
79 }
80
81 wsp_probe_buses();
82
83 return 0;
84}
85
86void wsp_halt(void)
87{
88 u64 val;
89 scom_map_t m;
90 struct device_node *dn;
91 struct device_node *mine;
92 struct device_node *me;
93 int rc;
94
95 me = of_get_cpu_node(smp_processor_id(), NULL);
96 mine = scom_find_parent(me);
97
98 /* This will halt all the A2s but not power off the chip */
99 for_each_node_with_property(dn, "scom-controller") {
100 if (dn == mine)
101 continue;
102 m = scom_map(dn, 0, 1);
103
104 /* read-modify-write it so the HW probe does not get
105 * confused */
106 rc = scom_read(m, 0, &val);
107 if (rc == 0)
108 scom_write(m, 0, val | 1);
109 scom_unmap(m);
110 }
111 m = scom_map(mine, 0, 1);
112 rc = scom_read(m, 0, &val);
113 if (rc == 0)
114 scom_write(m, 0, val | 1);
115 /* should never return */
116 scom_unmap(m);
117}
diff --git a/arch/powerpc/platforms/wsp/wsp.h b/arch/powerpc/platforms/wsp/wsp.h
deleted file mode 100644
index a563a8aaf812..000000000000
--- a/arch/powerpc/platforms/wsp/wsp.h
+++ /dev/null
@@ -1,29 +0,0 @@
1#ifndef __WSP_H
2#define __WSP_H
3
4#include <asm/wsp.h>
5
6/* Devtree compatible strings for major devices */
7#define PCIE_COMPATIBLE "ibm,wsp-pciex"
8
9extern void wsp_setup_arch(void);
10extern void wsp_setup_irq(void);
11extern int wsp_probe_devices(void);
12extern void wsp_halt(void);
13
14extern void wsp_setup_pci(void);
15extern void scom_init_wsp(void);
16
17extern void a2_setup_smp(void);
18extern int a2_scom_startup_cpu(unsigned int lcpu, int thr_idx,
19 struct device_node *np);
20extern int smp_a2_kick_cpu(int nr);
21
22extern void opb_pic_init(void);
23
24/* chroma specific managment */
25extern void wsp_h8_restart(char *cmd);
26extern void wsp_h8_power_off(void);
27extern void __init wsp_setup_h8(void);
28
29#endif /* __WSP_H */
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.c b/arch/powerpc/platforms/wsp/wsp_pci.c
deleted file mode 100644
index 9a15e5b39bb8..000000000000
--- a/arch/powerpc/platforms/wsp/wsp_pci.c
+++ /dev/null
@@ -1,1134 +0,0 @@
1/*
2 * Copyright 2010 Ben Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#define DEBUG
11
12#include <linux/kernel.h>
13#include <linux/pci.h>
14#include <linux/delay.h>
15#include <linux/string.h>
16#include <linux/init.h>
17#include <linux/bootmem.h>
18#include <linux/irq.h>
19#include <linux/interrupt.h>
20#include <linux/debugfs.h>
21
22#include <asm/sections.h>
23#include <asm/io.h>
24#include <asm/prom.h>
25#include <asm/pci-bridge.h>
26#include <asm/machdep.h>
27#include <asm/ppc-pci.h>
28#include <asm/iommu.h>
29#include <asm/io-workarounds.h>
30#include <asm/debug.h>
31
32#include "wsp.h"
33#include "wsp_pci.h"
34#include "msi.h"
35
36
37/* Max number of TVTs for one table. Only 32-bit tables can use
38 * multiple TVTs and so the max currently supported is thus 8
39 * since only 2G of DMA space is supported
40 */
41#define MAX_TABLE_TVT_COUNT 8
42
43struct wsp_dma_table {
44 struct list_head link;
45 struct iommu_table table;
46 struct wsp_phb *phb;
47 struct page *tces[MAX_TABLE_TVT_COUNT];
48};
49
50/* We support DMA regions from 0...2G in 32bit space (no support for
51 * 64-bit DMA just yet). Each device gets a separate TCE table (TVT
52 * entry) with validation enabled (though not supported by SimiCS
53 * just yet).
54 *
55 * To simplify things, we divide this 2G space into N regions based
56 * on the constant below which could be turned into a tunable eventually
57 *
58 * We then assign dynamically those regions to devices as they show up.
59 *
60 * We use a bitmap as an allocator for these.
61 *
62 * Tables are allocated/created dynamically as devices are discovered,
63 * multiple TVT entries are used if needed
64 *
65 * When 64-bit DMA support is added we should simply use a separate set
66 * of larger regions (the HW supports 64 TVT entries). We can
67 * additionally create a bypass region in 64-bit space for performances
68 * though that would have a cost in term of security.
69 *
70 * If you set NUM_DMA32_REGIONS to 1, then a single table is shared
71 * for all devices and bus/dev/fn validation is disabled
72 *
73 * Note that a DMA32 region cannot be smaller than 256M so the max
74 * supported here for now is 8. We don't yet support sharing regions
75 * between multiple devices so the max number of devices supported
76 * is MAX_TABLE_TVT_COUNT.
77 */
78#define NUM_DMA32_REGIONS 1
79
80struct wsp_phb {
81 struct pci_controller *hose;
82
83 /* Lock controlling access to the list of dma tables.
84 * It does -not- protect against dma_* operations on
85 * those tables, those should be stopped before an entry
86 * is removed from the list.
87 *
88 * The lock is also used for error handling operations
89 */
90 spinlock_t lock;
91 struct list_head dma_tables;
92 unsigned long dma32_map;
93 unsigned long dma32_base;
94 unsigned int dma32_num_regions;
95 unsigned long dma32_region_size;
96
97 /* Debugfs stuff */
98 struct dentry *ddir;
99
100 struct list_head all;
101};
102static LIST_HEAD(wsp_phbs);
103
104//#define cfg_debug(fmt...) pr_debug(fmt)
105#define cfg_debug(fmt...)
106
107
108static int wsp_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
109 int offset, int len, u32 *val)
110{
111 struct pci_controller *hose;
112 int suboff;
113 u64 addr;
114
115 hose = pci_bus_to_host(bus);
116 if (hose == NULL)
117 return PCIBIOS_DEVICE_NOT_FOUND;
118 if (offset >= 0x1000)
119 return PCIBIOS_BAD_REGISTER_NUMBER;
120 addr = PCIE_REG_CA_ENABLE |
121 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
122 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
123 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
124 suboff = offset & 3;
125
126 /*
127 * Note: the caller has already checked that offset is
128 * suitably aligned and that len is 1, 2 or 4.
129 */
130
131 switch (len) {
132 case 1:
133 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
134 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
135 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
136 >> (suboff << 3)) & 0xff;
137 cfg_debug("read 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
138 bus->number, devfn >> 3, devfn & 7,
139 offset, suboff, addr, *val);
140 break;
141 case 2:
142 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
143 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
144 *val = (in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA)
145 >> (suboff << 3)) & 0xffff;
146 cfg_debug("read 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
147 bus->number, devfn >> 3, devfn & 7,
148 offset, suboff, addr, *val);
149 break;
150 default:
151 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
152 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
153 *val = in_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA);
154 cfg_debug("read 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
155 bus->number, devfn >> 3, devfn & 7,
156 offset, suboff, addr, *val);
157 break;
158 }
159 return PCIBIOS_SUCCESSFUL;
160}
161
162static int wsp_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
163 int offset, int len, u32 val)
164{
165 struct pci_controller *hose;
166 int suboff;
167 u64 addr;
168
169 hose = pci_bus_to_host(bus);
170 if (hose == NULL)
171 return PCIBIOS_DEVICE_NOT_FOUND;
172 if (offset >= 0x1000)
173 return PCIBIOS_BAD_REGISTER_NUMBER;
174 addr = PCIE_REG_CA_ENABLE |
175 ((u64)bus->number) << PCIE_REG_CA_BUS_SHIFT |
176 ((u64)devfn) << PCIE_REG_CA_FUNC_SHIFT |
177 ((u64)offset & ~3) << PCIE_REG_CA_REG_SHIFT;
178 suboff = offset & 3;
179
180 /*
181 * Note: the caller has already checked that offset is
182 * suitably aligned and that len is 1, 2 or 4.
183 */
184 switch (len) {
185 case 1:
186 addr |= (0x8ul >> suboff) << PCIE_REG_CA_BE_SHIFT;
187 val <<= suboff << 3;
188 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
189 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
190 cfg_debug("write 1 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%02x\n",
191 bus->number, devfn >> 3, devfn & 7,
192 offset, suboff, addr, val);
193 break;
194 case 2:
195 addr |= (0xcul >> suboff) << PCIE_REG_CA_BE_SHIFT;
196 val <<= suboff << 3;
197 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
198 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
199 cfg_debug("write 2 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%04x\n",
200 bus->number, devfn >> 3, devfn & 7,
201 offset, suboff, addr, val);
202 break;
203 default:
204 addr |= 0xful << PCIE_REG_CA_BE_SHIFT;
205 out_be64(hose->cfg_data + PCIE_REG_CONFIG_ADDRESS, addr);
206 out_le32(hose->cfg_data + PCIE_REG_CONFIG_DATA, val);
207 cfg_debug("write 4 %02x:%02x:%02x + %02x/%x addr=0x%llx val=%08x\n",
208 bus->number, devfn >> 3, devfn & 7,
209 offset, suboff, addr, val);
210 break;
211 }
212 return PCIBIOS_SUCCESSFUL;
213}
214
215static struct pci_ops wsp_pcie_pci_ops =
216{
217 .read = wsp_pcie_read_config,
218 .write = wsp_pcie_write_config,
219};
220
221#define TCE_SHIFT 12
222#define TCE_PAGE_SIZE (1 << TCE_SHIFT)
223#define TCE_PCI_WRITE 0x2 /* write from PCI allowed */
224#define TCE_PCI_READ 0x1 /* read from PCI allowed */
225#define TCE_RPN_MASK 0x3fffffffffful /* 42-bit RPN (4K pages) */
226#define TCE_RPN_SHIFT 12
227
228//#define dma_debug(fmt...) pr_debug(fmt)
229#define dma_debug(fmt...)
230
231static int tce_build_wsp(struct iommu_table *tbl, long index, long npages,
232 unsigned long uaddr, enum dma_data_direction direction,
233 struct dma_attrs *attrs)
234{
235 struct wsp_dma_table *ptbl = container_of(tbl,
236 struct wsp_dma_table,
237 table);
238 u64 proto_tce;
239 u64 *tcep;
240 u64 rpn;
241
242 proto_tce = TCE_PCI_READ;
243#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
244 proto_tce |= TCE_PCI_WRITE;
245#else
246 if (direction != DMA_TO_DEVICE)
247 proto_tce |= TCE_PCI_WRITE;
248#endif
249
250 /* XXX Make this faster by factoring out the page address for
251 * within a TCE table
252 */
253 while (npages--) {
254 /* We don't use it->base as the table can be scattered */
255 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
256 tcep += (index & 0xffff);
257
258 /* can't move this out since we might cross LMB boundary */
259 rpn = __pa(uaddr) >> TCE_SHIFT;
260 *tcep = proto_tce | (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
261
262 dma_debug("[DMA] TCE %p set to 0x%016llx (dma addr: 0x%lx)\n",
263 tcep, *tcep, (tbl->it_offset + index) << IOMMU_PAGE_SHIFT_4K);
264
265 uaddr += TCE_PAGE_SIZE;
266 index++;
267 }
268 return 0;
269}
270
271static void tce_free_wsp(struct iommu_table *tbl, long index, long npages)
272{
273 struct wsp_dma_table *ptbl = container_of(tbl,
274 struct wsp_dma_table,
275 table);
276#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
277 struct pci_controller *hose = ptbl->phb->hose;
278#endif
279 u64 *tcep;
280
281 /* XXX Make this faster by factoring out the page address for
282 * within a TCE table. Also use line-kill option to kill multiple
283 * TCEs at once
284 */
285 while (npages--) {
286 /* We don't use it->base as the table can be scattered */
287 tcep = (u64 *)page_address(ptbl->tces[index >> 16]);
288 tcep += (index & 0xffff);
289 dma_debug("[DMA] TCE %p cleared\n", tcep);
290 *tcep = 0;
291#ifndef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
292 /* Don't write there since it would pollute other MMIO accesses */
293 out_be64(hose->cfg_data + PCIE_REG_TCE_KILL,
294 PCIE_REG_TCEKILL_SINGLE | PCIE_REG_TCEKILL_PS_4K |
295 (__pa(tcep) & PCIE_REG_TCEKILL_ADDR_MASK));
296#endif
297 index++;
298 }
299}
300
301static struct wsp_dma_table *wsp_pci_create_dma32_table(struct wsp_phb *phb,
302 unsigned int region,
303 struct pci_dev *validate)
304{
305 struct pci_controller *hose = phb->hose;
306 unsigned long size = phb->dma32_region_size;
307 unsigned long addr = phb->dma32_region_size * region + phb->dma32_base;
308 struct wsp_dma_table *tbl;
309 int tvts_per_table, i, tvt, nid;
310 unsigned long flags;
311
312 nid = of_node_to_nid(phb->hose->dn);
313
314 /* Calculate how many TVTs are needed */
315 tvts_per_table = size / 0x10000000;
316 if (tvts_per_table == 0)
317 tvts_per_table = 1;
318
319 /* Calculate the base TVT index. We know all tables have the same
320 * size so we just do a simple multiply here
321 */
322 tvt = region * tvts_per_table;
323
324 pr_debug(" Region : %d\n", region);
325 pr_debug(" DMA range : 0x%08lx..0x%08lx\n", addr, addr + size - 1);
326 pr_debug(" Number of TVTs : %d\n", tvts_per_table);
327 pr_debug(" Base TVT : %d\n", tvt);
328 pr_debug(" Node : %d\n", nid);
329
330 tbl = kzalloc_node(sizeof(struct wsp_dma_table), GFP_KERNEL, nid);
331 if (!tbl)
332 return ERR_PTR(-ENOMEM);
333 tbl->phb = phb;
334
335 /* Create as many TVTs as needed, each represents 256M at most */
336 for (i = 0; i < tvts_per_table; i++) {
337 u64 tvt_data1, tvt_data0;
338
339 /* Allocate table. We use a 4K TCE size for now always so
340 * one table is always 8 * (258M / 4K) == 512K
341 */
342 tbl->tces[i] = alloc_pages_node(nid, GFP_KERNEL, get_order(0x80000));
343 if (tbl->tces[i] == NULL)
344 goto fail;
345 memset(page_address(tbl->tces[i]), 0, 0x80000);
346
347 pr_debug(" TCE table %d at : %p\n", i, page_address(tbl->tces[i]));
348
349 /* Table size. We currently set it to be the whole 256M region */
350 tvt_data0 = 2ull << IODA_TVT0_TCE_TABLE_SIZE_SHIFT;
351 /* IO page size set to 4K */
352 tvt_data1 = 1ull << IODA_TVT1_IO_PAGE_SIZE_SHIFT;
353 /* Shift in the address */
354 tvt_data0 |= __pa(page_address(tbl->tces[i])) << IODA_TVT0_TTA_SHIFT;
355
356 /* Validation stuff. We only validate fully bus/dev/fn for now
357 * one day maybe we can group devices but that isn't the case
358 * at the moment
359 */
360 if (validate) {
361 tvt_data0 |= IODA_TVT0_BUSNUM_VALID_MASK;
362 tvt_data0 |= validate->bus->number;
363 tvt_data1 |= IODA_TVT1_DEVNUM_VALID;
364 tvt_data1 |= ((u64)PCI_SLOT(validate->devfn))
365 << IODA_TVT1_DEVNUM_VALUE_SHIFT;
366 tvt_data1 |= IODA_TVT1_FUNCNUM_VALID;
367 tvt_data1 |= ((u64)PCI_FUNC(validate->devfn))
368 << IODA_TVT1_FUNCNUM_VALUE_SHIFT;
369 }
370
371 /* XX PE number is always 0 for now */
372
373 /* Program the values using the PHB lock */
374 spin_lock_irqsave(&phb->lock, flags);
375 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
376 (tvt + i) | PCIE_REG_IODA_AD_TBL_TVT);
377 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, tvt_data1);
378 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, tvt_data0);
379 spin_unlock_irqrestore(&phb->lock, flags);
380 }
381
382 /* Init bits and pieces */
383 tbl->table.it_blocksize = 16;
384 tbl->table.it_page_shift = IOMMU_PAGE_SHIFT_4K;
385 tbl->table.it_offset = addr >> tbl->table.it_page_shift;
386 tbl->table.it_size = size >> tbl->table.it_page_shift;
387
388 /*
389 * It's already blank but we clear it anyway.
390 * Consider an aditiona interface that makes cleaing optional
391 */
392 iommu_init_table(&tbl->table, nid);
393
394 list_add(&tbl->link, &phb->dma_tables);
395 return tbl;
396
397 fail:
398 pr_debug(" Failed to allocate a 256M TCE table !\n");
399 for (i = 0; i < tvts_per_table; i++)
400 if (tbl->tces[i])
401 __free_pages(tbl->tces[i], get_order(0x80000));
402 kfree(tbl);
403 return ERR_PTR(-ENOMEM);
404}
405
406static void wsp_pci_dma_dev_setup(struct pci_dev *pdev)
407{
408 struct dev_archdata *archdata = &pdev->dev.archdata;
409 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
410 struct wsp_phb *phb = hose->private_data;
411 struct wsp_dma_table *table = NULL;
412 unsigned long flags;
413 int i;
414
415 /* Don't assign an iommu table to a bridge */
416 if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
417 return;
418
419 pr_debug("%s: Setting up DMA...\n", pci_name(pdev));
420
421 spin_lock_irqsave(&phb->lock, flags);
422
423 /* If only one region, check if it already exist */
424 if (phb->dma32_num_regions == 1) {
425 spin_unlock_irqrestore(&phb->lock, flags);
426 if (list_empty(&phb->dma_tables))
427 table = wsp_pci_create_dma32_table(phb, 0, NULL);
428 else
429 table = list_first_entry(&phb->dma_tables,
430 struct wsp_dma_table,
431 link);
432 } else {
433 /* else find a free region */
434 for (i = 0; i < phb->dma32_num_regions && !table; i++) {
435 if (__test_and_set_bit(i, &phb->dma32_map))
436 continue;
437 spin_unlock_irqrestore(&phb->lock, flags);
438 table = wsp_pci_create_dma32_table(phb, i, pdev);
439 }
440 }
441
442 /* Check if we got an error */
443 if (IS_ERR(table)) {
444 pr_err("%s: Failed to create DMA table, err %ld !\n",
445 pci_name(pdev), PTR_ERR(table));
446 return;
447 }
448
449 /* Or a valid table */
450 if (table) {
451 pr_info("%s: Setup iommu: 32-bit DMA region 0x%08lx..0x%08lx\n",
452 pci_name(pdev),
453 table->table.it_offset << IOMMU_PAGE_SHIFT_4K,
454 (table->table.it_offset << IOMMU_PAGE_SHIFT_4K)
455 + phb->dma32_region_size - 1);
456 archdata->dma_data.iommu_table_base = &table->table;
457 return;
458 }
459
460 /* Or no room */
461 spin_unlock_irqrestore(&phb->lock, flags);
462 pr_err("%s: Out of DMA space !\n", pci_name(pdev));
463}
464
465static void __init wsp_pcie_configure_hw(struct pci_controller *hose)
466{
467 u64 val;
468 int i;
469
470#define DUMP_REG(x) \
471 pr_debug("%-30s : 0x%016llx\n", #x, in_be64(hose->cfg_data + x))
472
473 /*
474 * Some WSP variants has a bogus class code by default in the PCI-E
475 * root complex's built-in P2P bridge
476 */
477 val = in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1);
478 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", val);
479 out_be64(hose->cfg_data + PCIE_REG_SYS_CFG1,
480 (val & ~PCIE_REG_SYS_CFG1_CLASS_CODE) | (PCI_CLASS_BRIDGE_PCI << 8));
481 pr_debug("PCI-E SYS_CFG1 : 0x%llx\n", in_be64(hose->cfg_data + PCIE_REG_SYS_CFG1));
482
483#ifdef CONFIG_WSP_DD1_WORKAROUND_DD1_TCE_BUGS
484 /* XXX Disable TCE caching, it doesn't work on DD1 */
485 out_be64(hose->cfg_data + 0xe50,
486 in_be64(hose->cfg_data + 0xe50) | (3ull << 62));
487 printk("PCI-E DEBUG CONTROL 5 = 0x%llx\n", in_be64(hose->cfg_data + 0xe50));
488#endif
489
490 /* Configure M32A and IO. IO is hard wired to be 1M for now */
491 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_ADDR, hose->io_base_phys);
492 out_be64(hose->cfg_data + PCIE_REG_IO_BASE_MASK,
493 (~(hose->io_resource.end - hose->io_resource.start)) &
494 0x3fffffff000ul);
495 out_be64(hose->cfg_data + PCIE_REG_IO_START_ADDR, 0 | 1);
496
497 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_ADDR,
498 hose->mem_resources[0].start);
499 printk("Want to write to M32A_BASE_MASK : 0x%llx\n",
500 (~(hose->mem_resources[0].end -
501 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
502 out_be64(hose->cfg_data + PCIE_REG_M32A_BASE_MASK,
503 (~(hose->mem_resources[0].end -
504 hose->mem_resources[0].start)) & 0x3ffffff0000ul);
505 out_be64(hose->cfg_data + PCIE_REG_M32A_START_ADDR,
506 (hose->mem_resources[0].start - hose->mem_offset[0]) | 1);
507
508 /* Clear all TVT entries
509 *
510 * XX Might get TVT count from device-tree
511 */
512 for (i = 0; i < IODA_TVT_COUNT; i++) {
513 out_be64(hose->cfg_data + PCIE_REG_IODA_ADDR,
514 PCIE_REG_IODA_AD_TBL_TVT | i);
515 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA1, 0);
516 out_be64(hose->cfg_data + PCIE_REG_IODA_DATA0, 0);
517 }
518
519 /* Kill the TCE cache */
520 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG,
521 in_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG) |
522 PCIE_REG_PHBC_64B_TCE_EN);
523
524 /* Enable 32 & 64-bit MSIs, IO space and M32A */
525 val = PCIE_REG_PHBC_32BIT_MSI_EN |
526 PCIE_REG_PHBC_IO_EN |
527 PCIE_REG_PHBC_64BIT_MSI_EN |
528 PCIE_REG_PHBC_M32A_EN;
529 if (iommu_is_off)
530 val |= PCIE_REG_PHBC_DMA_XLATE_BYPASS;
531 pr_debug("Will write config: 0x%llx\n", val);
532 out_be64(hose->cfg_data + PCIE_REG_PHB_CONFIG, val);
533
534 /* Enable error reporting */
535 out_be64(hose->cfg_data + 0xe00,
536 in_be64(hose->cfg_data + 0xe00) | 0x0008000000000000ull);
537
538 /* Mask an error that's generated when doing config space probe
539 *
540 * XXX Maybe we should only mask it around config space cycles... that or
541 * ignore it when we know we had a config space cycle recently ?
542 */
543 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS_MASK, 0x8000000000000000ull);
544 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS_MASK, 0x8000000000000000ull);
545
546 /* Enable UTL errors, for now, all of them got to UTL irq 1
547 *
548 * We similarily mask one UTL error caused apparently during normal
549 * probing. We also mask the link up error
550 */
551 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_ERR_SEV, 0);
552 out_be64(hose->cfg_data + PCIE_UTL_RC_ERR_SEVERITY, 0);
553 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_ERROR_SEV, 0);
554 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_IRQ_EN, 0xffffffff00000000ull);
555 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_IRQ_EN, 0xff5fffff00000000ull);
556 out_be64(hose->cfg_data + PCIE_UTL_EP_ERR_IRQ_EN, 0xffffffff00000000ull);
557
558 DUMP_REG(PCIE_REG_IO_BASE_ADDR);
559 DUMP_REG(PCIE_REG_IO_BASE_MASK);
560 DUMP_REG(PCIE_REG_IO_START_ADDR);
561 DUMP_REG(PCIE_REG_M32A_BASE_ADDR);
562 DUMP_REG(PCIE_REG_M32A_BASE_MASK);
563 DUMP_REG(PCIE_REG_M32A_START_ADDR);
564 DUMP_REG(PCIE_REG_M32B_BASE_ADDR);
565 DUMP_REG(PCIE_REG_M32B_BASE_MASK);
566 DUMP_REG(PCIE_REG_M32B_START_ADDR);
567 DUMP_REG(PCIE_REG_M64_BASE_ADDR);
568 DUMP_REG(PCIE_REG_M64_BASE_MASK);
569 DUMP_REG(PCIE_REG_M64_START_ADDR);
570 DUMP_REG(PCIE_REG_PHB_CONFIG);
571}
572
573static void wsp_pci_wait_io_idle(struct wsp_phb *phb, unsigned long port)
574{
575 u64 val;
576 int i;
577
578 for (i = 0; i < 10000; i++) {
579 val = in_be64(phb->hose->cfg_data + 0xe08);
580 if ((val & 0x1900000000000000ull) == 0x0100000000000000ull)
581 return;
582 udelay(1);
583 }
584 pr_warning("PCI IO timeout on domain %d port 0x%lx\n",
585 phb->hose->global_number, port);
586}
587
588#define DEF_PCI_AC_RET_pio(name, ret, at, al, aa) \
589static ret wsp_pci_##name at \
590{ \
591 struct iowa_bus *bus; \
592 struct wsp_phb *phb; \
593 unsigned long flags; \
594 ret rval; \
595 bus = iowa_pio_find_bus(aa); \
596 WARN_ON(!bus); \
597 phb = bus->private; \
598 spin_lock_irqsave(&phb->lock, flags); \
599 wsp_pci_wait_io_idle(phb, aa); \
600 rval = __do_##name al; \
601 spin_unlock_irqrestore(&phb->lock, flags); \
602 return rval; \
603}
604
605#define DEF_PCI_AC_NORET_pio(name, at, al, aa) \
606static void wsp_pci_##name at \
607{ \
608 struct iowa_bus *bus; \
609 struct wsp_phb *phb; \
610 unsigned long flags; \
611 bus = iowa_pio_find_bus(aa); \
612 WARN_ON(!bus); \
613 phb = bus->private; \
614 spin_lock_irqsave(&phb->lock, flags); \
615 wsp_pci_wait_io_idle(phb, aa); \
616 __do_##name al; \
617 spin_unlock_irqrestore(&phb->lock, flags); \
618}
619
620#define DEF_PCI_AC_RET_mem(name, ret, at, al, aa)
621#define DEF_PCI_AC_NORET_mem(name, at, al, aa)
622
623#define DEF_PCI_AC_RET(name, ret, at, al, space, aa) \
624 DEF_PCI_AC_RET_##space(name, ret, at, al, aa)
625
626#define DEF_PCI_AC_NORET(name, at, al, space, aa) \
627 DEF_PCI_AC_NORET_##space(name, at, al, aa) \
628
629
630#include <asm/io-defs.h>
631
632#undef DEF_PCI_AC_RET
633#undef DEF_PCI_AC_NORET
634
635static struct ppc_pci_io wsp_pci_iops = {
636 .inb = wsp_pci_inb,
637 .inw = wsp_pci_inw,
638 .inl = wsp_pci_inl,
639 .outb = wsp_pci_outb,
640 .outw = wsp_pci_outw,
641 .outl = wsp_pci_outl,
642 .insb = wsp_pci_insb,
643 .insw = wsp_pci_insw,
644 .insl = wsp_pci_insl,
645 .outsb = wsp_pci_outsb,
646 .outsw = wsp_pci_outsw,
647 .outsl = wsp_pci_outsl,
648};
649
650static int __init wsp_setup_one_phb(struct device_node *np)
651{
652 struct pci_controller *hose;
653 struct wsp_phb *phb;
654
655 pr_info("PCI: Setting up PCIe host bridge 0x%s\n", np->full_name);
656
657 phb = zalloc_maybe_bootmem(sizeof(struct wsp_phb), GFP_KERNEL);
658 if (!phb)
659 return -ENOMEM;
660 hose = pcibios_alloc_controller(np);
661 if (!hose) {
662 /* Can't really free the phb */
663 return -ENOMEM;
664 }
665 hose->private_data = phb;
666 phb->hose = hose;
667
668 INIT_LIST_HEAD(&phb->dma_tables);
669 spin_lock_init(&phb->lock);
670
671 /* XXX Use bus-range property ? */
672 hose->first_busno = 0;
673 hose->last_busno = 0xff;
674
675 /* We use cfg_data as the address for the whole bridge MMIO space
676 */
677 hose->cfg_data = of_iomap(hose->dn, 0);
678
679 pr_debug("PCIe registers mapped at 0x%p\n", hose->cfg_data);
680
681 /* Get the ranges of the device-tree */
682 pci_process_bridge_OF_ranges(hose, np, 0);
683
684 /* XXX Force re-assigning of everything for now */
685 pci_add_flags(PCI_REASSIGN_ALL_BUS | PCI_REASSIGN_ALL_RSRC |
686 PCI_ENABLE_PROC_DOMAINS);
687
688 /* Calculate how the TCE space is divided */
689 phb->dma32_base = 0;
690 phb->dma32_num_regions = NUM_DMA32_REGIONS;
691 if (phb->dma32_num_regions > MAX_TABLE_TVT_COUNT) {
692 pr_warning("IOMMU: Clamped to %d DMA32 regions\n",
693 MAX_TABLE_TVT_COUNT);
694 phb->dma32_num_regions = MAX_TABLE_TVT_COUNT;
695 }
696 phb->dma32_region_size = 0x80000000 / phb->dma32_num_regions;
697
698 BUG_ON(!is_power_of_2(phb->dma32_region_size));
699
700 /* Setup config ops */
701 hose->ops = &wsp_pcie_pci_ops;
702
703 /* Configure the HW */
704 wsp_pcie_configure_hw(hose);
705
706 /* Instanciate IO workarounds */
707 iowa_register_bus(hose, &wsp_pci_iops, NULL, phb);
708#ifdef CONFIG_PCI_MSI
709 wsp_setup_phb_msi(hose);
710#endif
711
712 /* Add to global list */
713 list_add(&phb->all, &wsp_phbs);
714
715 return 0;
716}
717
718void __init wsp_setup_pci(void)
719{
720 struct device_node *np;
721 int rc;
722
723 /* Find host bridges */
724 for_each_compatible_node(np, "pciex", PCIE_COMPATIBLE) {
725 rc = wsp_setup_one_phb(np);
726 if (rc)
727 pr_err("Failed to setup PCIe bridge %s, rc=%d\n",
728 np->full_name, rc);
729 }
730
731 /* Establish device-tree linkage */
732 pci_devs_phb_init();
733
734 /* Set DMA ops to use TCEs */
735 if (iommu_is_off) {
736 pr_info("PCI-E: Disabled TCEs, using direct DMA\n");
737 set_pci_dma_ops(&dma_direct_ops);
738 } else {
739 ppc_md.pci_dma_dev_setup = wsp_pci_dma_dev_setup;
740 ppc_md.tce_build = tce_build_wsp;
741 ppc_md.tce_free = tce_free_wsp;
742 set_pci_dma_ops(&dma_iommu_ops);
743 }
744}
745
746#define err_debug(fmt...) pr_debug(fmt)
747//#define err_debug(fmt...)
748
749static int __init wsp_pci_get_err_irq_no_dt(struct device_node *np)
750{
751 const u32 *prop;
752 int hw_irq;
753
754 /* Ok, no interrupts property, let's try to find our child P2P */
755 np = of_get_next_child(np, NULL);
756 if (np == NULL)
757 return 0;
758
759 /* Grab it's interrupt map */
760 prop = of_get_property(np, "interrupt-map", NULL);
761 if (prop == NULL)
762 return 0;
763
764 /* Grab one of the interrupts in there, keep the low 4 bits */
765 hw_irq = prop[5] & 0xf;
766
767 /* 0..4 for PHB 0 and 5..9 for PHB 1 */
768 if (hw_irq < 5)
769 hw_irq = 4;
770 else
771 hw_irq = 9;
772 hw_irq |= prop[5] & ~0xf;
773
774 err_debug("PCI: Using 0x%x as error IRQ for %s\n",
775 hw_irq, np->parent->full_name);
776 return irq_create_mapping(NULL, hw_irq);
777}
778
779static const struct {
780 u32 offset;
781 const char *name;
782} wsp_pci_regs[] = {
783#define DREG(x) { PCIE_REG_##x, #x }
784#define DUTL(x) { PCIE_UTL_##x, "UTL_" #x }
785 /* Architected registers except CONFIG_ and IODA
786 * to avoid side effects
787 */
788 DREG(DMA_CHAN_STATUS),
789 DREG(CPU_LOADSTORE_STATUS),
790 DREG(LOCK0),
791 DREG(LOCK1),
792 DREG(PHB_CONFIG),
793 DREG(IO_BASE_ADDR),
794 DREG(IO_BASE_MASK),
795 DREG(IO_START_ADDR),
796 DREG(M32A_BASE_ADDR),
797 DREG(M32A_BASE_MASK),
798 DREG(M32A_START_ADDR),
799 DREG(M32B_BASE_ADDR),
800 DREG(M32B_BASE_MASK),
801 DREG(M32B_START_ADDR),
802 DREG(M64_BASE_ADDR),
803 DREG(M64_BASE_MASK),
804 DREG(M64_START_ADDR),
805 DREG(TCE_KILL),
806 DREG(LOCK2),
807 DREG(PHB_GEN_CAP),
808 DREG(PHB_TCE_CAP),
809 DREG(PHB_IRQ_CAP),
810 DREG(PHB_EEH_CAP),
811 DREG(PAPR_ERR_INJ_CONTROL),
812 DREG(PAPR_ERR_INJ_ADDR),
813 DREG(PAPR_ERR_INJ_MASK),
814
815 /* UTL core regs */
816 DUTL(SYS_BUS_CONTROL),
817 DUTL(STATUS),
818 DUTL(SYS_BUS_AGENT_STATUS),
819 DUTL(SYS_BUS_AGENT_ERR_SEV),
820 DUTL(SYS_BUS_AGENT_IRQ_EN),
821 DUTL(SYS_BUS_BURST_SZ_CONF),
822 DUTL(REVISION_ID),
823 DUTL(OUT_POST_HDR_BUF_ALLOC),
824 DUTL(OUT_POST_DAT_BUF_ALLOC),
825 DUTL(IN_POST_HDR_BUF_ALLOC),
826 DUTL(IN_POST_DAT_BUF_ALLOC),
827 DUTL(OUT_NP_BUF_ALLOC),
828 DUTL(IN_NP_BUF_ALLOC),
829 DUTL(PCIE_TAGS_ALLOC),
830 DUTL(GBIF_READ_TAGS_ALLOC),
831
832 DUTL(PCIE_PORT_CONTROL),
833 DUTL(PCIE_PORT_STATUS),
834 DUTL(PCIE_PORT_ERROR_SEV),
835 DUTL(PCIE_PORT_IRQ_EN),
836 DUTL(RC_STATUS),
837 DUTL(RC_ERR_SEVERITY),
838 DUTL(RC_IRQ_EN),
839 DUTL(EP_STATUS),
840 DUTL(EP_ERR_SEVERITY),
841 DUTL(EP_ERR_IRQ_EN),
842 DUTL(PCI_PM_CTRL1),
843 DUTL(PCI_PM_CTRL2),
844
845 /* PCIe stack regs */
846 DREG(SYSTEM_CONFIG1),
847 DREG(SYSTEM_CONFIG2),
848 DREG(EP_SYSTEM_CONFIG),
849 DREG(EP_FLR),
850 DREG(EP_BAR_CONFIG),
851 DREG(LINK_CONFIG),
852 DREG(PM_CONFIG),
853 DREG(DLP_CONTROL),
854 DREG(DLP_STATUS),
855 DREG(ERR_REPORT_CONTROL),
856 DREG(SLOT_CONTROL1),
857 DREG(SLOT_CONTROL2),
858 DREG(UTL_CONFIG),
859 DREG(BUFFERS_CONFIG),
860 DREG(ERROR_INJECT),
861 DREG(SRIOV_CONFIG),
862 DREG(PF0_SRIOV_STATUS),
863 DREG(PF1_SRIOV_STATUS),
864 DREG(PORT_NUMBER),
865 DREG(POR_SYSTEM_CONFIG),
866
867 /* Internal logic regs */
868 DREG(PHB_VERSION),
869 DREG(RESET),
870 DREG(PHB_CONTROL),
871 DREG(PHB_TIMEOUT_CONTROL1),
872 DREG(PHB_QUIESCE_DMA),
873 DREG(PHB_DMA_READ_TAG_ACTV),
874 DREG(PHB_TCE_READ_TAG_ACTV),
875
876 /* FIR registers */
877 DREG(LEM_FIR_ACCUM),
878 DREG(LEM_FIR_AND_MASK),
879 DREG(LEM_FIR_OR_MASK),
880 DREG(LEM_ACTION0),
881 DREG(LEM_ACTION1),
882 DREG(LEM_ERROR_MASK),
883 DREG(LEM_ERROR_AND_MASK),
884 DREG(LEM_ERROR_OR_MASK),
885
886 /* Error traps registers */
887 DREG(PHB_ERR_STATUS),
888 DREG(PHB_ERR_STATUS),
889 DREG(PHB_ERR1_STATUS),
890 DREG(PHB_ERR_INJECT),
891 DREG(PHB_ERR_LEM_ENABLE),
892 DREG(PHB_ERR_IRQ_ENABLE),
893 DREG(PHB_ERR_FREEZE_ENABLE),
894 DREG(PHB_ERR_SIDE_ENABLE),
895 DREG(PHB_ERR_LOG_0),
896 DREG(PHB_ERR_LOG_1),
897 DREG(PHB_ERR_STATUS_MASK),
898 DREG(PHB_ERR1_STATUS_MASK),
899 DREG(MMIO_ERR_STATUS),
900 DREG(MMIO_ERR1_STATUS),
901 DREG(MMIO_ERR_INJECT),
902 DREG(MMIO_ERR_LEM_ENABLE),
903 DREG(MMIO_ERR_IRQ_ENABLE),
904 DREG(MMIO_ERR_FREEZE_ENABLE),
905 DREG(MMIO_ERR_SIDE_ENABLE),
906 DREG(MMIO_ERR_LOG_0),
907 DREG(MMIO_ERR_LOG_1),
908 DREG(MMIO_ERR_STATUS_MASK),
909 DREG(MMIO_ERR1_STATUS_MASK),
910 DREG(DMA_ERR_STATUS),
911 DREG(DMA_ERR1_STATUS),
912 DREG(DMA_ERR_INJECT),
913 DREG(DMA_ERR_LEM_ENABLE),
914 DREG(DMA_ERR_IRQ_ENABLE),
915 DREG(DMA_ERR_FREEZE_ENABLE),
916 DREG(DMA_ERR_SIDE_ENABLE),
917 DREG(DMA_ERR_LOG_0),
918 DREG(DMA_ERR_LOG_1),
919 DREG(DMA_ERR_STATUS_MASK),
920 DREG(DMA_ERR1_STATUS_MASK),
921
922 /* Debug and Trace registers */
923 DREG(PHB_DEBUG_CONTROL0),
924 DREG(PHB_DEBUG_STATUS0),
925 DREG(PHB_DEBUG_CONTROL1),
926 DREG(PHB_DEBUG_STATUS1),
927 DREG(PHB_DEBUG_CONTROL2),
928 DREG(PHB_DEBUG_STATUS2),
929 DREG(PHB_DEBUG_CONTROL3),
930 DREG(PHB_DEBUG_STATUS3),
931 DREG(PHB_DEBUG_CONTROL4),
932 DREG(PHB_DEBUG_STATUS4),
933 DREG(PHB_DEBUG_CONTROL5),
934 DREG(PHB_DEBUG_STATUS5),
935
936 /* Don't seem to exist ...
937 DREG(PHB_DEBUG_CONTROL6),
938 DREG(PHB_DEBUG_STATUS6),
939 */
940};
941
942static int wsp_pci_regs_show(struct seq_file *m, void *private)
943{
944 struct wsp_phb *phb = m->private;
945 struct pci_controller *hose = phb->hose;
946 int i;
947
948 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
949 /* Skip write-only regs */
950 if (wsp_pci_regs[i].offset == 0xc08 ||
951 wsp_pci_regs[i].offset == 0xc10 ||
952 wsp_pci_regs[i].offset == 0xc38 ||
953 wsp_pci_regs[i].offset == 0xc40)
954 continue;
955 seq_printf(m, "0x%03x: 0x%016llx %s\n",
956 wsp_pci_regs[i].offset,
957 in_be64(hose->cfg_data + wsp_pci_regs[i].offset),
958 wsp_pci_regs[i].name);
959 }
960 return 0;
961}
962
963static int wsp_pci_regs_open(struct inode *inode, struct file *file)
964{
965 return single_open(file, wsp_pci_regs_show, inode->i_private);
966}
967
968static const struct file_operations wsp_pci_regs_fops = {
969 .open = wsp_pci_regs_open,
970 .read = seq_read,
971 .llseek = seq_lseek,
972 .release = single_release,
973};
974
975static int wsp_pci_reg_set(void *data, u64 val)
976{
977 out_be64((void __iomem *)data, val);
978 return 0;
979}
980
981static int wsp_pci_reg_get(void *data, u64 *val)
982{
983 *val = in_be64((void __iomem *)data);
984 return 0;
985}
986
987DEFINE_SIMPLE_ATTRIBUTE(wsp_pci_reg_fops, wsp_pci_reg_get, wsp_pci_reg_set, "0x%llx\n");
988
989static irqreturn_t wsp_pci_err_irq(int irq, void *dev_id)
990{
991 struct wsp_phb *phb = dev_id;
992 struct pci_controller *hose = phb->hose;
993 irqreturn_t handled = IRQ_NONE;
994 struct wsp_pcie_err_log_data ed;
995
996 pr_err("PCI: Error interrupt on %s (PHB %d)\n",
997 hose->dn->full_name, hose->global_number);
998 again:
999 memset(&ed, 0, sizeof(ed));
1000
1001 /* Read and clear UTL errors */
1002 ed.utl_sys_err = in_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS);
1003 if (ed.utl_sys_err)
1004 out_be64(hose->cfg_data + PCIE_UTL_SYS_BUS_AGENT_STATUS, ed.utl_sys_err);
1005 ed.utl_port_err = in_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS);
1006 if (ed.utl_port_err)
1007 out_be64(hose->cfg_data + PCIE_UTL_PCIE_PORT_STATUS, ed.utl_port_err);
1008 ed.utl_rc_err = in_be64(hose->cfg_data + PCIE_UTL_RC_STATUS);
1009 if (ed.utl_rc_err)
1010 out_be64(hose->cfg_data + PCIE_UTL_RC_STATUS, ed.utl_rc_err);
1011
1012 /* Read and clear main trap errors */
1013 ed.phb_err = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS);
1014 if (ed.phb_err) {
1015 ed.phb_err1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS);
1016 ed.phb_log0 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_0);
1017 ed.phb_log1 = in_be64(hose->cfg_data + PCIE_REG_PHB_ERR_LOG_1);
1018 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR1_STATUS, 0);
1019 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_STATUS, 0);
1020 }
1021 ed.mmio_err = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS);
1022 if (ed.mmio_err) {
1023 ed.mmio_err1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS);
1024 ed.mmio_log0 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_0);
1025 ed.mmio_log1 = in_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_LOG_1);
1026 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR1_STATUS, 0);
1027 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_STATUS, 0);
1028 }
1029 ed.dma_err = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS);
1030 if (ed.dma_err) {
1031 ed.dma_err1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS);
1032 ed.dma_log0 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_0);
1033 ed.dma_log1 = in_be64(hose->cfg_data + PCIE_REG_DMA_ERR_LOG_1);
1034 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR1_STATUS, 0);
1035 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_STATUS, 0);
1036 }
1037
1038 /* Now print things out */
1039 if (ed.phb_err) {
1040 pr_err(" PHB Error Status : 0x%016llx\n", ed.phb_err);
1041 pr_err(" PHB First Error Status: 0x%016llx\n", ed.phb_err1);
1042 pr_err(" PHB Error Log 0 : 0x%016llx\n", ed.phb_log0);
1043 pr_err(" PHB Error Log 1 : 0x%016llx\n", ed.phb_log1);
1044 }
1045 if (ed.mmio_err) {
1046 pr_err(" MMIO Error Status : 0x%016llx\n", ed.mmio_err);
1047 pr_err(" MMIO First Error Status: 0x%016llx\n", ed.mmio_err1);
1048 pr_err(" MMIO Error Log 0 : 0x%016llx\n", ed.mmio_log0);
1049 pr_err(" MMIO Error Log 1 : 0x%016llx\n", ed.mmio_log1);
1050 }
1051 if (ed.dma_err) {
1052 pr_err(" DMA Error Status : 0x%016llx\n", ed.dma_err);
1053 pr_err(" DMA First Error Status: 0x%016llx\n", ed.dma_err1);
1054 pr_err(" DMA Error Log 0 : 0x%016llx\n", ed.dma_log0);
1055 pr_err(" DMA Error Log 1 : 0x%016llx\n", ed.dma_log1);
1056 }
1057 if (ed.utl_sys_err)
1058 pr_err(" UTL Sys Error Status : 0x%016llx\n", ed.utl_sys_err);
1059 if (ed.utl_port_err)
1060 pr_err(" UTL Port Error Status : 0x%016llx\n", ed.utl_port_err);
1061 if (ed.utl_rc_err)
1062 pr_err(" UTL RC Error Status : 0x%016llx\n", ed.utl_rc_err);
1063
1064 /* Interrupts are caused by the error traps. If we had any error there
1065 * we loop again in case the UTL buffered some new stuff between
1066 * going there and going to the traps
1067 */
1068 if (ed.dma_err || ed.mmio_err || ed.phb_err) {
1069 handled = IRQ_HANDLED;
1070 goto again;
1071 }
1072 return handled;
1073}
1074
1075static void __init wsp_setup_pci_err_reporting(struct wsp_phb *phb)
1076{
1077 struct pci_controller *hose = phb->hose;
1078 int err_irq, i, rc;
1079 char fname[16];
1080
1081 /* Create a debugfs file for that PHB */
1082 sprintf(fname, "phb%d", phb->hose->global_number);
1083 phb->ddir = debugfs_create_dir(fname, powerpc_debugfs_root);
1084
1085 /* Some useful debug output */
1086 if (phb->ddir) {
1087 struct dentry *d = debugfs_create_dir("regs", phb->ddir);
1088 char tmp[64];
1089
1090 for (i = 0; i < ARRAY_SIZE(wsp_pci_regs); i++) {
1091 sprintf(tmp, "%03x_%s", wsp_pci_regs[i].offset,
1092 wsp_pci_regs[i].name);
1093 debugfs_create_file(tmp, 0600, d,
1094 hose->cfg_data + wsp_pci_regs[i].offset,
1095 &wsp_pci_reg_fops);
1096 }
1097 debugfs_create_file("all_regs", 0600, phb->ddir, phb, &wsp_pci_regs_fops);
1098 }
1099
1100 /* Find the IRQ number for that PHB */
1101 err_irq = irq_of_parse_and_map(hose->dn, 0);
1102 if (err_irq == 0)
1103 /* XXX Error IRQ lacking from device-tree */
1104 err_irq = wsp_pci_get_err_irq_no_dt(hose->dn);
1105 if (err_irq == 0) {
1106 pr_err("PCI: Failed to fetch error interrupt for %s\n",
1107 hose->dn->full_name);
1108 return;
1109 }
1110 /* Request it */
1111 rc = request_irq(err_irq, wsp_pci_err_irq, 0, "wsp_pci error", phb);
1112 if (rc) {
1113 pr_err("PCI: Failed to request interrupt for %s\n",
1114 hose->dn->full_name);
1115 }
1116 /* Enable interrupts for all errors for now */
1117 out_be64(hose->cfg_data + PCIE_REG_PHB_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1118 out_be64(hose->cfg_data + PCIE_REG_MMIO_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1119 out_be64(hose->cfg_data + PCIE_REG_DMA_ERR_IRQ_ENABLE, 0xffffffffffffffffull);
1120}
1121
1122/*
1123 * This is called later to hookup with the error interrupt
1124 */
1125static int __init wsp_setup_pci_late(void)
1126{
1127 struct wsp_phb *phb;
1128
1129 list_for_each_entry(phb, &wsp_phbs, all)
1130 wsp_setup_pci_err_reporting(phb);
1131
1132 return 0;
1133}
1134arch_initcall(wsp_setup_pci_late);
diff --git a/arch/powerpc/platforms/wsp/wsp_pci.h b/arch/powerpc/platforms/wsp/wsp_pci.h
deleted file mode 100644
index 52e9bd95250d..000000000000
--- a/arch/powerpc/platforms/wsp/wsp_pci.h
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * Copyright 2010 Ben Herrenschmidt, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#ifndef __WSP_PCI_H
11#define __WSP_PCI_H
12
13/* Architected registers */
14#define PCIE_REG_DMA_CHAN_STATUS 0x110
15#define PCIE_REG_CPU_LOADSTORE_STATUS 0x120
16
17#define PCIE_REG_CONFIG_DATA 0x130
18#define PCIE_REG_LOCK0 0x138
19#define PCIE_REG_CONFIG_ADDRESS 0x140
20#define PCIE_REG_CA_ENABLE 0x8000000000000000ull
21#define PCIE_REG_CA_BUS_MASK 0x0ff0000000000000ull
22#define PCIE_REG_CA_BUS_SHIFT (20+32)
23#define PCIE_REG_CA_DEV_MASK 0x000f800000000000ull
24#define PCIE_REG_CA_DEV_SHIFT (15+32)
25#define PCIE_REG_CA_FUNC_MASK 0x0000700000000000ull
26#define PCIE_REG_CA_FUNC_SHIFT (12+32)
27#define PCIE_REG_CA_REG_MASK 0x00000fff00000000ull
28#define PCIE_REG_CA_REG_SHIFT ( 0+32)
29#define PCIE_REG_CA_BE_MASK 0x00000000f0000000ull
30#define PCIE_REG_CA_BE_SHIFT ( 28)
31#define PCIE_REG_LOCK1 0x148
32
33#define PCIE_REG_PHB_CONFIG 0x160
34#define PCIE_REG_PHBC_64B_TCE_EN 0x2000000000000000ull
35#define PCIE_REG_PHBC_MMIO_DMA_FREEZE_EN 0x1000000000000000ull
36#define PCIE_REG_PHBC_32BIT_MSI_EN 0x0080000000000000ull
37#define PCIE_REG_PHBC_M64_EN 0x0040000000000000ull
38#define PCIE_REG_PHBC_IO_EN 0x0008000000000000ull
39#define PCIE_REG_PHBC_64BIT_MSI_EN 0x0002000000000000ull
40#define PCIE_REG_PHBC_M32A_EN 0x0000800000000000ull
41#define PCIE_REG_PHBC_M32B_EN 0x0000400000000000ull
42#define PCIE_REG_PHBC_MSI_PE_VALIDATE 0x0000200000000000ull
43#define PCIE_REG_PHBC_DMA_XLATE_BYPASS 0x0000100000000000ull
44
45#define PCIE_REG_IO_BASE_ADDR 0x170
46#define PCIE_REG_IO_BASE_MASK 0x178
47#define PCIE_REG_IO_START_ADDR 0x180
48
49#define PCIE_REG_M32A_BASE_ADDR 0x190
50#define PCIE_REG_M32A_BASE_MASK 0x198
51#define PCIE_REG_M32A_START_ADDR 0x1a0
52
53#define PCIE_REG_M32B_BASE_ADDR 0x1b0
54#define PCIE_REG_M32B_BASE_MASK 0x1b8
55#define PCIE_REG_M32B_START_ADDR 0x1c0
56
57#define PCIE_REG_M64_BASE_ADDR 0x1e0
58#define PCIE_REG_M64_BASE_MASK 0x1e8
59#define PCIE_REG_M64_START_ADDR 0x1f0
60
61#define PCIE_REG_TCE_KILL 0x210
62#define PCIE_REG_TCEKILL_SINGLE 0x8000000000000000ull
63#define PCIE_REG_TCEKILL_ADDR_MASK 0x000003fffffffff8ull
64#define PCIE_REG_TCEKILL_PS_4K 0
65#define PCIE_REG_TCEKILL_PS_64K 1
66#define PCIE_REG_TCEKILL_PS_16M 2
67#define PCIE_REG_TCEKILL_PS_16G 3
68
69#define PCIE_REG_IODA_ADDR 0x220
70#define PCIE_REG_IODA_AD_AUTOINC 0x8000000000000000ull
71#define PCIE_REG_IODA_AD_TBL_MVT 0x0005000000000000ull
72#define PCIE_REG_IODA_AD_TBL_PELT 0x0006000000000000ull
73#define PCIE_REG_IODA_AD_TBL_PESTA 0x0007000000000000ull
74#define PCIE_REG_IODA_AD_TBL_PESTB 0x0008000000000000ull
75#define PCIE_REG_IODA_AD_TBL_TVT 0x0009000000000000ull
76#define PCIE_REG_IODA_AD_TBL_TCE 0x000a000000000000ull
77#define PCIE_REG_IODA_DATA0 0x228
78#define PCIE_REG_IODA_DATA1 0x230
79
80#define PCIE_REG_LOCK2 0x240
81
82#define PCIE_REG_PHB_GEN_CAP 0x250
83#define PCIE_REG_PHB_TCE_CAP 0x258
84#define PCIE_REG_PHB_IRQ_CAP 0x260
85#define PCIE_REG_PHB_EEH_CAP 0x268
86
87#define PCIE_REG_PAPR_ERR_INJ_CONTROL 0x2b0
88#define PCIE_REG_PAPR_ERR_INJ_ADDR 0x2b8
89#define PCIE_REG_PAPR_ERR_INJ_MASK 0x2c0
90
91
92#define PCIE_REG_SYS_CFG1 0x600
93#define PCIE_REG_SYS_CFG1_CLASS_CODE 0x0000000000ffffffull
94
95#define IODA_TVT0_TTA_MASK 0x000fffffffff0000ull
96#define IODA_TVT0_TTA_SHIFT 4
97#define IODA_TVT0_BUSNUM_VALID_MASK 0x000000000000e000ull
98#define IODA_TVT0_TCE_TABLE_SIZE_MASK 0x0000000000001f00ull
99#define IODA_TVT0_TCE_TABLE_SIZE_SHIFT 8
100#define IODA_TVT0_BUSNUM_VALUE_MASK 0x00000000000000ffull
101#define IODA_TVT0_BUSNUM_VALID_SHIFT 0
102#define IODA_TVT1_DEVNUM_VALID 0x2000000000000000ull
103#define IODA_TVT1_DEVNUM_VALUE_MASK 0x1f00000000000000ull
104#define IODA_TVT1_DEVNUM_VALUE_SHIFT 56
105#define IODA_TVT1_FUNCNUM_VALID 0x0008000000000000ull
106#define IODA_TVT1_FUNCNUM_VALUE_MASK 0x0007000000000000ull
107#define IODA_TVT1_FUNCNUM_VALUE_SHIFT 48
108#define IODA_TVT1_IO_PAGE_SIZE_MASK 0x00001f0000000000ull
109#define IODA_TVT1_IO_PAGE_SIZE_SHIFT 40
110#define IODA_TVT1_PE_NUMBER_MASK 0x000000000000003full
111#define IODA_TVT1_PE_NUMBER_SHIFT 0
112
113#define IODA_TVT_COUNT 64
114
115/* UTL Core registers */
116#define PCIE_UTL_SYS_BUS_CONTROL 0x400
117#define PCIE_UTL_STATUS 0x408
118#define PCIE_UTL_SYS_BUS_AGENT_STATUS 0x410
119#define PCIE_UTL_SYS_BUS_AGENT_ERR_SEV 0x418
120#define PCIE_UTL_SYS_BUS_AGENT_IRQ_EN 0x420
121#define PCIE_UTL_SYS_BUS_BURST_SZ_CONF 0x440
122#define PCIE_UTL_REVISION_ID 0x448
123
124#define PCIE_UTL_OUT_POST_HDR_BUF_ALLOC 0x4c0
125#define PCIE_UTL_OUT_POST_DAT_BUF_ALLOC 0x4d0
126#define PCIE_UTL_IN_POST_HDR_BUF_ALLOC 0x4e0
127#define PCIE_UTL_IN_POST_DAT_BUF_ALLOC 0x4f0
128#define PCIE_UTL_OUT_NP_BUF_ALLOC 0x500
129#define PCIE_UTL_IN_NP_BUF_ALLOC 0x510
130#define PCIE_UTL_PCIE_TAGS_ALLOC 0x520
131#define PCIE_UTL_GBIF_READ_TAGS_ALLOC 0x530
132
133#define PCIE_UTL_PCIE_PORT_CONTROL 0x540
134#define PCIE_UTL_PCIE_PORT_STATUS 0x548
135#define PCIE_UTL_PCIE_PORT_ERROR_SEV 0x550
136#define PCIE_UTL_PCIE_PORT_IRQ_EN 0x558
137#define PCIE_UTL_RC_STATUS 0x560
138#define PCIE_UTL_RC_ERR_SEVERITY 0x568
139#define PCIE_UTL_RC_IRQ_EN 0x570
140#define PCIE_UTL_EP_STATUS 0x578
141#define PCIE_UTL_EP_ERR_SEVERITY 0x580
142#define PCIE_UTL_EP_ERR_IRQ_EN 0x588
143
144#define PCIE_UTL_PCI_PM_CTRL1 0x590
145#define PCIE_UTL_PCI_PM_CTRL2 0x598
146
147/* PCIe stack registers */
148#define PCIE_REG_SYSTEM_CONFIG1 0x600
149#define PCIE_REG_SYSTEM_CONFIG2 0x608
150#define PCIE_REG_EP_SYSTEM_CONFIG 0x618
151#define PCIE_REG_EP_FLR 0x620
152#define PCIE_REG_EP_BAR_CONFIG 0x628
153#define PCIE_REG_LINK_CONFIG 0x630
154#define PCIE_REG_PM_CONFIG 0x640
155#define PCIE_REG_DLP_CONTROL 0x650
156#define PCIE_REG_DLP_STATUS 0x658
157#define PCIE_REG_ERR_REPORT_CONTROL 0x660
158#define PCIE_REG_SLOT_CONTROL1 0x670
159#define PCIE_REG_SLOT_CONTROL2 0x678
160#define PCIE_REG_UTL_CONFIG 0x680
161#define PCIE_REG_BUFFERS_CONFIG 0x690
162#define PCIE_REG_ERROR_INJECT 0x698
163#define PCIE_REG_SRIOV_CONFIG 0x6a0
164#define PCIE_REG_PF0_SRIOV_STATUS 0x6a8
165#define PCIE_REG_PF1_SRIOV_STATUS 0x6b0
166#define PCIE_REG_PORT_NUMBER 0x700
167#define PCIE_REG_POR_SYSTEM_CONFIG 0x708
168
169/* PHB internal logic registers */
170#define PCIE_REG_PHB_VERSION 0x800
171#define PCIE_REG_RESET 0x808
172#define PCIE_REG_PHB_CONTROL 0x810
173#define PCIE_REG_PHB_TIMEOUT_CONTROL1 0x878
174#define PCIE_REG_PHB_QUIESCE_DMA 0x888
175#define PCIE_REG_PHB_DMA_READ_TAG_ACTV 0x900
176#define PCIE_REG_PHB_TCE_READ_TAG_ACTV 0x908
177
178/* FIR registers */
179#define PCIE_REG_LEM_FIR_ACCUM 0xc00
180#define PCIE_REG_LEM_FIR_AND_MASK 0xc08
181#define PCIE_REG_LEM_FIR_OR_MASK 0xc10
182#define PCIE_REG_LEM_ACTION0 0xc18
183#define PCIE_REG_LEM_ACTION1 0xc20
184#define PCIE_REG_LEM_ERROR_MASK 0xc30
185#define PCIE_REG_LEM_ERROR_AND_MASK 0xc38
186#define PCIE_REG_LEM_ERROR_OR_MASK 0xc40
187
188/* PHB Error registers */
189#define PCIE_REG_PHB_ERR_STATUS 0xc80
190#define PCIE_REG_PHB_ERR1_STATUS 0xc88
191#define PCIE_REG_PHB_ERR_INJECT 0xc90
192#define PCIE_REG_PHB_ERR_LEM_ENABLE 0xc98
193#define PCIE_REG_PHB_ERR_IRQ_ENABLE 0xca0
194#define PCIE_REG_PHB_ERR_FREEZE_ENABLE 0xca8
195#define PCIE_REG_PHB_ERR_SIDE_ENABLE 0xcb8
196#define PCIE_REG_PHB_ERR_LOG_0 0xcc0
197#define PCIE_REG_PHB_ERR_LOG_1 0xcc8
198#define PCIE_REG_PHB_ERR_STATUS_MASK 0xcd0
199#define PCIE_REG_PHB_ERR1_STATUS_MASK 0xcd8
200
201#define PCIE_REG_MMIO_ERR_STATUS 0xd00
202#define PCIE_REG_MMIO_ERR1_STATUS 0xd08
203#define PCIE_REG_MMIO_ERR_INJECT 0xd10
204#define PCIE_REG_MMIO_ERR_LEM_ENABLE 0xd18
205#define PCIE_REG_MMIO_ERR_IRQ_ENABLE 0xd20
206#define PCIE_REG_MMIO_ERR_FREEZE_ENABLE 0xd28
207#define PCIE_REG_MMIO_ERR_SIDE_ENABLE 0xd38
208#define PCIE_REG_MMIO_ERR_LOG_0 0xd40
209#define PCIE_REG_MMIO_ERR_LOG_1 0xd48
210#define PCIE_REG_MMIO_ERR_STATUS_MASK 0xd50
211#define PCIE_REG_MMIO_ERR1_STATUS_MASK 0xd58
212
213#define PCIE_REG_DMA_ERR_STATUS 0xd80
214#define PCIE_REG_DMA_ERR1_STATUS 0xd88
215#define PCIE_REG_DMA_ERR_INJECT 0xd90
216#define PCIE_REG_DMA_ERR_LEM_ENABLE 0xd98
217#define PCIE_REG_DMA_ERR_IRQ_ENABLE 0xda0
218#define PCIE_REG_DMA_ERR_FREEZE_ENABLE 0xda8
219#define PCIE_REG_DMA_ERR_SIDE_ENABLE 0xdb8
220#define PCIE_REG_DMA_ERR_LOG_0 0xdc0
221#define PCIE_REG_DMA_ERR_LOG_1 0xdc8
222#define PCIE_REG_DMA_ERR_STATUS_MASK 0xdd0
223#define PCIE_REG_DMA_ERR1_STATUS_MASK 0xdd8
224
225/* Shortcuts for access to the above using the PHB definitions
226 * with an offset
227 */
228#define PCIE_REG_ERR_PHB_OFFSET 0x0
229#define PCIE_REG_ERR_MMIO_OFFSET 0x80
230#define PCIE_REG_ERR_DMA_OFFSET 0x100
231
232/* Debug and Trace registers */
233#define PCIE_REG_PHB_DEBUG_CONTROL0 0xe00
234#define PCIE_REG_PHB_DEBUG_STATUS0 0xe08
235#define PCIE_REG_PHB_DEBUG_CONTROL1 0xe10
236#define PCIE_REG_PHB_DEBUG_STATUS1 0xe18
237#define PCIE_REG_PHB_DEBUG_CONTROL2 0xe20
238#define PCIE_REG_PHB_DEBUG_STATUS2 0xe28
239#define PCIE_REG_PHB_DEBUG_CONTROL3 0xe30
240#define PCIE_REG_PHB_DEBUG_STATUS3 0xe38
241#define PCIE_REG_PHB_DEBUG_CONTROL4 0xe40
242#define PCIE_REG_PHB_DEBUG_STATUS4 0xe48
243#define PCIE_REG_PHB_DEBUG_CONTROL5 0xe50
244#define PCIE_REG_PHB_DEBUG_STATUS5 0xe58
245#define PCIE_REG_PHB_DEBUG_CONTROL6 0xe60
246#define PCIE_REG_PHB_DEBUG_STATUS6 0xe68
247
248/* Definition for PCIe errors */
249struct wsp_pcie_err_log_data {
250 __u64 phb_err;
251 __u64 phb_err1;
252 __u64 phb_log0;
253 __u64 phb_log1;
254 __u64 mmio_err;
255 __u64 mmio_err1;
256 __u64 mmio_log0;
257 __u64 mmio_log1;
258 __u64 dma_err;
259 __u64 dma_err1;
260 __u64 dma_log0;
261 __u64 dma_log1;
262 __u64 utl_sys_err;
263 __u64 utl_port_err;
264 __u64 utl_rc_err;
265 __u64 unused;
266};
267
268#endif /* __WSP_PCI_H */
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index 9dee47071af8..de8d9483bbe8 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -26,6 +26,7 @@
26#include <asm/errno.h> 26#include <asm/errno.h>
27#include <asm/xics.h> 27#include <asm/xics.h>
28#include <asm/kvm_ppc.h> 28#include <asm/kvm_ppc.h>
29#include <asm/dbell.h>
29 30
30struct icp_ipl { 31struct icp_ipl {
31 union { 32 union {
@@ -145,7 +146,13 @@ static unsigned int icp_native_get_irq(void)
145static void icp_native_cause_ipi(int cpu, unsigned long data) 146static void icp_native_cause_ipi(int cpu, unsigned long data)
146{ 147{
147 kvmppc_set_host_ipi(cpu, 1); 148 kvmppc_set_host_ipi(cpu, 1);
148 icp_native_set_qirr(cpu, IPI_PRIORITY); 149#ifdef CONFIG_PPC_DOORBELL
150 if (cpu_has_feature(CPU_FTR_DBELL) &&
151 (cpumask_test_cpu(cpu, cpu_sibling_mask(smp_processor_id()))))
152 doorbell_cause_ipi(cpu, data);
153 else
154#endif
155 icp_native_set_qirr(cpu, IPI_PRIORITY);
149} 156}
150 157
151void xics_wake_cpu(int cpu) 158void xics_wake_cpu(int cpu)
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
index bce3dcfe5058..c98748617896 100644
--- a/arch/powerpc/xmon/nonstdio.c
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -122,7 +122,7 @@ void xmon_printf(const char *format, ...)
122 122
123 if (n && rc == 0) { 123 if (n && rc == 0) {
124 /* No udbg hooks, fallback to printk() - dangerous */ 124 /* No udbg hooks, fallback to printk() - dangerous */
125 printk(xmon_outbuf); 125 printk("%s", xmon_outbuf);
126 } 126 }
127} 127}
128 128