aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2012-03-07 12:02:07 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2012-03-08 18:35:11 -0500
commit8ee3e0d69623cd74f626cbd2b9f237ad45519e3f (patch)
tree9450b0c397afa441ed85c3331cf2e614d3bc729d
parent2d4b971287cbce16585acda4b76308faa8da0950 (diff)
powerpc: Remove the main legacy iSerie platform code
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--arch/powerpc/platforms/Makefile1
-rw-r--r--arch/powerpc/platforms/iseries/Makefile9
-rw-r--r--arch/powerpc/platforms/iseries/call_hpt.h102
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h309
-rw-r--r--arch/powerpc/platforms/iseries/call_sm.h37
-rw-r--r--arch/powerpc/platforms/iseries/dt.c643
-rw-r--r--arch/powerpc/platforms/iseries/exception.S311
-rw-r--r--arch/powerpc/platforms/iseries/exception.h58
-rw-r--r--arch/powerpc/platforms/iseries/htab.c257
-rw-r--r--arch/powerpc/platforms/iseries/hvcall.S94
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c35
-rw-r--r--arch/powerpc/platforms/iseries/hvlpconfig.c39
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c260
-rw-r--r--arch/powerpc/platforms/iseries/ipl_parms.h68
-rw-r--r--arch/powerpc/platforms/iseries/irq.c400
-rw-r--r--arch/powerpc/platforms/iseries/irq.h13
-rw-r--r--arch/powerpc/platforms/iseries/it_exp_vpd_panel.h51
-rw-r--r--arch/powerpc/platforms/iseries/it_lp_naca.h80
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c21
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c318
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c341
-rw-r--r--arch/powerpc/platforms/iseries/main_store.h165
-rw-r--r--arch/powerpc/platforms/iseries/mf.c1275
-rw-r--r--arch/powerpc/platforms/iseries/misc.S26
-rw-r--r--arch/powerpc/platforms/iseries/naca.h24
-rw-r--r--arch/powerpc/platforms/iseries/pci.c919
-rw-r--r--arch/powerpc/platforms/iseries/pci.h58
-rw-r--r--arch/powerpc/platforms/iseries/proc.c120
-rw-r--r--arch/powerpc/platforms/iseries/processor_vpd.h85
-rw-r--r--arch/powerpc/platforms/iseries/release_data.h63
-rw-r--r--arch/powerpc/platforms/iseries/setup.c722
-rw-r--r--arch/powerpc/platforms/iseries/setup.h27
-rw-r--r--arch/powerpc/platforms/iseries/smp.c88
-rw-r--r--arch/powerpc/platforms/iseries/spcomm_area.h34
-rw-r--r--arch/powerpc/platforms/iseries/vio.c556
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c677
-rw-r--r--arch/powerpc/platforms/iseries/vpd_areas.h88
37 files changed, 0 insertions, 8374 deletions
diff --git a/arch/powerpc/platforms/Makefile b/arch/powerpc/platforms/Makefile
index 2635a22bade2..879b4a448498 100644
--- a/arch/powerpc/platforms/Makefile
+++ b/arch/powerpc/platforms/Makefile
@@ -16,7 +16,6 @@ obj-$(CONFIG_FSL_SOC_BOOKE) += 85xx/
16obj-$(CONFIG_PPC_86xx) += 86xx/ 16obj-$(CONFIG_PPC_86xx) += 86xx/
17obj-$(CONFIG_PPC_POWERNV) += powernv/ 17obj-$(CONFIG_PPC_POWERNV) += powernv/
18obj-$(CONFIG_PPC_PSERIES) += pseries/ 18obj-$(CONFIG_PPC_PSERIES) += pseries/
19obj-$(CONFIG_PPC_ISERIES) += iseries/
20obj-$(CONFIG_PPC_MAPLE) += maple/ 19obj-$(CONFIG_PPC_MAPLE) += maple/
21obj-$(CONFIG_PPC_PASEMI) += pasemi/ 20obj-$(CONFIG_PPC_PASEMI) += pasemi/
22obj-$(CONFIG_PPC_CELL) += cell/ 21obj-$(CONFIG_PPC_CELL) += cell/
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
deleted file mode 100644
index a7602b11ed9d..000000000000
--- a/arch/powerpc/platforms/iseries/Makefile
+++ /dev/null
@@ -1,9 +0,0 @@
1ccflags-y := -mno-minimal-toc
2
3obj-y += exception.o
4obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \
5 hvcall.o proc.o htab.o iommu.o misc.o irq.o
6obj-$(CONFIG_PCI) += pci.o
7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_VIOPATH) += viopath.o vio.o
9obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/arch/powerpc/platforms/iseries/call_hpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
deleted file mode 100644
index 8d95fe4b554e..000000000000
--- a/arch/powerpc/platforms/iseries/call_hpt.h
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
19#define _PLATFORMS_ISERIES_CALL_HPT_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28#include <asm/mmu.h>
29
30#define HvCallHptGetHptAddress HvCallHpt + 0
31#define HvCallHptGetHptPages HvCallHpt + 1
32#define HvCallHptSetPp HvCallHpt + 5
33#define HvCallHptSetSwBits HvCallHpt + 6
34#define HvCallHptUpdate HvCallHpt + 7
35#define HvCallHptInvalidateNoSyncICache HvCallHpt + 8
36#define HvCallHptGet HvCallHpt + 11
37#define HvCallHptFindNextValid HvCallHpt + 12
38#define HvCallHptFindValid HvCallHpt + 13
39#define HvCallHptAddValidate HvCallHpt + 16
40#define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18
41
42
43static inline u64 HvCallHpt_getHptAddress(void)
44{
45 return HvCall0(HvCallHptGetHptAddress);
46}
47
48static inline u64 HvCallHpt_getHptPages(void)
49{
50 return HvCall0(HvCallHptGetHptPages);
51}
52
53static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value)
54{
55 HvCall2(HvCallHptSetPp, hpteIndex, value);
56}
57
58static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff)
59{
60 HvCall3(HvCallHptSetSwBits, hpteIndex, bitson, bitsoff);
61}
62
63static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex)
64{
65 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
66}
67
68static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson,
69 u8 bitsoff)
70{
71 u64 compressedStatus;
72
73 compressedStatus = HvCall4(HvCallHptInvalidateSetSwBitsGet,
74 hpteIndex, bitson, bitsoff, 1);
75 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
76 return compressedStatus;
77}
78
79static inline u64 HvCallHpt_findValid(struct hash_pte *hpte, u64 vpn)
80{
81 return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
82}
83
84static inline u64 HvCallHpt_findNextValid(struct hash_pte *hpte, u32 hpteIndex,
85 u8 bitson, u8 bitsoff)
86{
87 return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
88 bitson, bitsoff);
89}
90
91static inline void HvCallHpt_get(struct hash_pte *hpte, u32 hpteIndex)
92{
93 HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
94}
95
96static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit,
97 struct hash_pte *hpte)
98{
99 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
100}
101
102#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
diff --git a/arch/powerpc/platforms/iseries/call_pci.h b/arch/powerpc/platforms/iseries/call_pci.h
deleted file mode 100644
index dbdf69850ed9..000000000000
--- a/arch/powerpc/platforms/iseries/call_pci.h
+++ /dev/null
@@ -1,309 +0,0 @@
1/*
2 * Provides the Hypervisor PCI calls for iSeries Linux Parition.
3 * Copyright (C) 2001 <Wayne G Holm> <IBM Corporation>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the:
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330,
19 * Boston, MA 02111-1307 USA
20 *
21 * Change Activity:
22 * Created, Jan 9, 2001
23 */
24
25#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
26#define _PLATFORMS_ISERIES_CALL_PCI_H
27
28#include <asm/iseries/hv_call_sc.h>
29#include <asm/iseries/hv_types.h>
30
31/*
32 * DSA == Direct Select Address
33 * this struct must be 64 bits in total
34 */
35struct HvCallPci_DsaAddr {
36 u16 busNumber; /* PHB index? */
37 u8 subBusNumber; /* PCI bus number? */
38 u8 deviceId; /* device and function? */
39 u8 barNumber;
40 u8 reserved[3];
41};
42
43union HvDsaMap {
44 u64 DsaAddr;
45 struct HvCallPci_DsaAddr Dsa;
46};
47
48struct HvCallPci_LoadReturn {
49 u64 rc;
50 u64 value;
51};
52
53enum HvCallPci_DeviceType {
54 HvCallPci_NodeDevice = 1,
55 HvCallPci_SpDevice = 2,
56 HvCallPci_IopDevice = 3,
57 HvCallPci_BridgeDevice = 4,
58 HvCallPci_MultiFunctionDevice = 5,
59 HvCallPci_IoaDevice = 6
60};
61
62
63struct HvCallPci_DeviceInfo {
64 u32 deviceType; /* See DeviceType enum for values */
65};
66
67struct HvCallPci_BusUnitInfo {
68 u32 sizeReturned; /* length of data returned */
69 u32 deviceType; /* see DeviceType enum for values */
70};
71
72struct HvCallPci_BridgeInfo {
73 struct HvCallPci_BusUnitInfo busUnitInfo; /* Generic bus unit info */
74 u8 subBusNumber; /* Bus number of secondary bus */
75 u8 maxAgents; /* Max idsels on secondary bus */
76 u8 maxSubBusNumber; /* Max Sub Bus */
77 u8 logicalSlotNumber; /* Logical Slot Number for IOA */
78};
79
80
81/*
82 * Maximum BusUnitInfo buffer size. Provided for clients so
83 * they can allocate a buffer big enough for any type of bus
84 * unit. Increase as needed.
85 */
86enum {HvCallPci_MaxBusUnitInfoSize = 128};
87
88struct HvCallPci_BarParms {
89 u64 vaddr;
90 u64 raddr;
91 u64 size;
92 u64 protectStart;
93 u64 protectEnd;
94 u64 relocationOffset;
95 u64 pciAddress;
96 u64 reserved[3];
97};
98
99enum HvCallPci_VpdType {
100 HvCallPci_BusVpd = 1,
101 HvCallPci_BusAdapterVpd = 2
102};
103
104#define HvCallPciConfigLoad8 HvCallPci + 0
105#define HvCallPciConfigLoad16 HvCallPci + 1
106#define HvCallPciConfigLoad32 HvCallPci + 2
107#define HvCallPciConfigStore8 HvCallPci + 3
108#define HvCallPciConfigStore16 HvCallPci + 4
109#define HvCallPciConfigStore32 HvCallPci + 5
110#define HvCallPciEoi HvCallPci + 16
111#define HvCallPciGetBarParms HvCallPci + 18
112#define HvCallPciMaskFisr HvCallPci + 20
113#define HvCallPciUnmaskFisr HvCallPci + 21
114#define HvCallPciSetSlotReset HvCallPci + 25
115#define HvCallPciGetDeviceInfo HvCallPci + 27
116#define HvCallPciGetCardVpd HvCallPci + 28
117#define HvCallPciBarLoad8 HvCallPci + 40
118#define HvCallPciBarLoad16 HvCallPci + 41
119#define HvCallPciBarLoad32 HvCallPci + 42
120#define HvCallPciBarLoad64 HvCallPci + 43
121#define HvCallPciBarStore8 HvCallPci + 44
122#define HvCallPciBarStore16 HvCallPci + 45
123#define HvCallPciBarStore32 HvCallPci + 46
124#define HvCallPciBarStore64 HvCallPci + 47
125#define HvCallPciMaskInterrupts HvCallPci + 48
126#define HvCallPciUnmaskInterrupts HvCallPci + 49
127#define HvCallPciGetBusUnitInfo HvCallPci + 50
128
129static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
130 u8 deviceId, u32 offset, u16 *value)
131{
132 struct HvCallPci_DsaAddr dsa;
133 struct HvCallPci_LoadReturn retVal;
134
135 *((u64*)&dsa) = 0;
136
137 dsa.busNumber = busNumber;
138 dsa.subBusNumber = subBusNumber;
139 dsa.deviceId = deviceId;
140
141 HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0);
142
143 *value = retVal.value;
144
145 return retVal.rc;
146}
147
148static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
149 u8 deviceId, u32 offset, u32 *value)
150{
151 struct HvCallPci_DsaAddr dsa;
152 struct HvCallPci_LoadReturn retVal;
153
154 *((u64*)&dsa) = 0;
155
156 dsa.busNumber = busNumber;
157 dsa.subBusNumber = subBusNumber;
158 dsa.deviceId = deviceId;
159
160 HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
161
162 *value = retVal.value;
163
164 return retVal.rc;
165}
166
167static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
168 u8 deviceId, u32 offset, u8 value)
169{
170 struct HvCallPci_DsaAddr dsa;
171
172 *((u64*)&dsa) = 0;
173
174 dsa.busNumber = busNumber;
175 dsa.subBusNumber = subBusNumber;
176 dsa.deviceId = deviceId;
177
178 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
179}
180
181static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
182 u8 deviceIdParm)
183{
184 struct HvCallPci_DsaAddr dsa;
185 struct HvCallPci_LoadReturn retVal;
186
187 *((u64*)&dsa) = 0;
188
189 dsa.busNumber = busNumberParm;
190 dsa.subBusNumber = subBusParm;
191 dsa.deviceId = deviceIdParm;
192
193 HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa);
194
195 return retVal.rc;
196}
197
198static inline u64 HvCallPci_getBarParms(u16 busNumberParm, u8 subBusParm,
199 u8 deviceIdParm, u8 barNumberParm, u64 parms, u32 sizeofParms)
200{
201 struct HvCallPci_DsaAddr dsa;
202
203 *((u64*)&dsa) = 0;
204
205 dsa.busNumber = busNumberParm;
206 dsa.subBusNumber = subBusParm;
207 dsa.deviceId = deviceIdParm;
208 dsa.barNumber = barNumberParm;
209
210 return HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms);
211}
212
213static inline u64 HvCallPci_maskFisr(u16 busNumberParm, u8 subBusParm,
214 u8 deviceIdParm, u64 fisrMask)
215{
216 struct HvCallPci_DsaAddr dsa;
217
218 *((u64*)&dsa) = 0;
219
220 dsa.busNumber = busNumberParm;
221 dsa.subBusNumber = subBusParm;
222 dsa.deviceId = deviceIdParm;
223
224 return HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask);
225}
226
227static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
228 u8 deviceIdParm, u64 fisrMask)
229{
230 struct HvCallPci_DsaAddr dsa;
231
232 *((u64*)&dsa) = 0;
233
234 dsa.busNumber = busNumberParm;
235 dsa.subBusNumber = subBusParm;
236 dsa.deviceId = deviceIdParm;
237
238 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
239}
240
241static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
242 u8 deviceNumberParm, u64 parms, u32 sizeofParms)
243{
244 struct HvCallPci_DsaAddr dsa;
245
246 *((u64*)&dsa) = 0;
247
248 dsa.busNumber = busNumberParm;
249 dsa.subBusNumber = subBusParm;
250 dsa.deviceId = deviceNumberParm << 4;
251
252 return HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms);
253}
254
255static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, u8 subBusParm,
256 u8 deviceIdParm, u64 interruptMask)
257{
258 struct HvCallPci_DsaAddr dsa;
259
260 *((u64*)&dsa) = 0;
261
262 dsa.busNumber = busNumberParm;
263 dsa.subBusNumber = subBusParm;
264 dsa.deviceId = deviceIdParm;
265
266 return HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask);
267}
268
269static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, u8 subBusParm,
270 u8 deviceIdParm, u64 interruptMask)
271{
272 struct HvCallPci_DsaAddr dsa;
273
274 *((u64*)&dsa) = 0;
275
276 dsa.busNumber = busNumberParm;
277 dsa.subBusNumber = subBusParm;
278 dsa.deviceId = deviceIdParm;
279
280 return HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask);
281}
282
283static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, u8 subBusParm,
284 u8 deviceIdParm, u64 parms, u32 sizeofParms)
285{
286 struct HvCallPci_DsaAddr dsa;
287
288 *((u64*)&dsa) = 0;
289
290 dsa.busNumber = busNumberParm;
291 dsa.subBusNumber = subBusParm;
292 dsa.deviceId = deviceIdParm;
293
294 return HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms,
295 sizeofParms);
296}
297
298static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
299 u16 sizeParm)
300{
301 u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
302 sizeParm, HvCallPci_BusVpd);
303 if (xRc == -1)
304 return -1;
305 else
306 return xRc & 0xFFFF;
307}
308
309#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/call_sm.h b/arch/powerpc/platforms/iseries/call_sm.h
deleted file mode 100644
index c7e251619f48..000000000000
--- a/arch/powerpc/platforms/iseries/call_sm.h
+++ /dev/null
@@ -1,37 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_CALL_SM_H
19#define _ISERIES_CALL_SM_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28
29#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
30
31static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
32 u64 indexIntoBitMap)
33{
34 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
35}
36
37#endif /* _ISERIES_CALL_SM_H */
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
deleted file mode 100644
index f0491cc28900..000000000000
--- a/arch/powerpc/platforms/iseries/dt.c
+++ /dev/null
@@ -1,643 +0,0 @@
1/*
2 * Copyright (C) 2005-2006 Michael Ellerman, IBM Corporation
3 * Copyright (C) 2000-2004, IBM Corporation
4 *
5 * Description:
6 * This file contains all the routines to build a flattened device
7 * tree for a legacy iSeries machine.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/pci.h>
20#include <linux/pci_regs.h>
21#include <linux/pci_ids.h>
22#include <linux/threads.h>
23#include <linux/bitops.h>
24#include <linux/string.h>
25#include <linux/kernel.h>
26#include <linux/if_ether.h> /* ETH_ALEN */
27
28#include <asm/machdep.h>
29#include <asm/prom.h>
30#include <asm/lppaca.h>
31#include <asm/cputable.h>
32#include <asm/abs_addr.h>
33#include <asm/system.h>
34#include <asm/iseries/hv_types.h>
35#include <asm/iseries/hv_lp_config.h>
36#include <asm/iseries/hv_call_xm.h>
37#include <asm/udbg.h>
38
39#include "processor_vpd.h"
40#include "call_hpt.h"
41#include "call_pci.h"
42#include "pci.h"
43#include "it_exp_vpd_panel.h"
44#include "naca.h"
45
46#ifdef DEBUG
47#define DBG(fmt...) udbg_printf(fmt)
48#else
49#define DBG(fmt...)
50#endif
51
52/*
53 * These are created by the linker script at the start and end
54 * of the section containing all the strings marked with the DS macro.
55 */
56extern char __dt_strings_start[];
57extern char __dt_strings_end[];
58
59#define DS(s) ({ \
60 static const char __s[] __attribute__((section(".dt_strings"))) = s; \
61 __s; \
62})
63
64struct iseries_flat_dt {
65 struct boot_param_header header;
66 u64 reserve_map[2];
67};
68
69static void * __initdata dt_data;
70
71/*
72 * Putting these strings here keeps them out of the .dt_strings section
73 * that we capture for the strings blob of the flattened device tree.
74 */
75static char __initdata device_type_cpu[] = "cpu";
76static char __initdata device_type_memory[] = "memory";
77static char __initdata device_type_serial[] = "serial";
78static char __initdata device_type_network[] = "network";
79static char __initdata device_type_pci[] = "pci";
80static char __initdata device_type_vdevice[] = "vdevice";
81static char __initdata device_type_vscsi[] = "vscsi";
82
83
84/* EBCDIC to ASCII conversion routines */
85
86static unsigned char __init e2a(unsigned char x)
87{
88 switch (x) {
89 case 0x81 ... 0x89:
90 return x - 0x81 + 'a';
91 case 0x91 ... 0x99:
92 return x - 0x91 + 'j';
93 case 0xA2 ... 0xA9:
94 return x - 0xA2 + 's';
95 case 0xC1 ... 0xC9:
96 return x - 0xC1 + 'A';
97 case 0xD1 ... 0xD9:
98 return x - 0xD1 + 'J';
99 case 0xE2 ... 0xE9:
100 return x - 0xE2 + 'S';
101 case 0xF0 ... 0xF9:
102 return x - 0xF0 + '0';
103 }
104 return ' ';
105}
106
107static unsigned char * __init strne2a(unsigned char *dest,
108 const unsigned char *src, size_t n)
109{
110 int i;
111
112 n = strnlen(src, n);
113
114 for (i = 0; i < n; i++)
115 dest[i] = e2a(src[i]);
116
117 return dest;
118}
119
120static struct iseries_flat_dt * __init dt_init(void)
121{
122 struct iseries_flat_dt *dt;
123 unsigned long str_len;
124
125 str_len = __dt_strings_end - __dt_strings_start;
126 dt = (struct iseries_flat_dt *)ALIGN(klimit, 8);
127 dt->header.off_mem_rsvmap =
128 offsetof(struct iseries_flat_dt, reserve_map);
129 dt->header.off_dt_strings = ALIGN(sizeof(*dt), 8);
130 dt->header.off_dt_struct = dt->header.off_dt_strings
131 + ALIGN(str_len, 8);
132 dt_data = (void *)((unsigned long)dt + dt->header.off_dt_struct);
133 dt->header.dt_strings_size = str_len;
134
135 /* There is no notion of hardware cpu id on iSeries */
136 dt->header.boot_cpuid_phys = smp_processor_id();
137
138 memcpy((char *)dt + dt->header.off_dt_strings, __dt_strings_start,
139 str_len);
140
141 dt->header.magic = OF_DT_HEADER;
142 dt->header.version = 0x10;
143 dt->header.last_comp_version = 0x10;
144
145 dt->reserve_map[0] = 0;
146 dt->reserve_map[1] = 0;
147
148 return dt;
149}
150
151static void __init dt_push_u32(struct iseries_flat_dt *dt, u32 value)
152{
153 *((u32 *)dt_data) = value;
154 dt_data += sizeof(u32);
155}
156
157#ifdef notyet
158static void __init dt_push_u64(struct iseries_flat_dt *dt, u64 value)
159{
160 *((u64 *)dt_data) = value;
161 dt_data += sizeof(u64);
162}
163#endif
164
165static void __init dt_push_bytes(struct iseries_flat_dt *dt, const char *data,
166 int len)
167{
168 memcpy(dt_data, data, len);
169 dt_data += ALIGN(len, 4);
170}
171
172static void __init dt_start_node(struct iseries_flat_dt *dt, const char *name)
173{
174 dt_push_u32(dt, OF_DT_BEGIN_NODE);
175 dt_push_bytes(dt, name, strlen(name) + 1);
176}
177
178#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
179
180static void __init __dt_prop(struct iseries_flat_dt *dt, const char *name,
181 const void *data, int len)
182{
183 unsigned long offset;
184
185 dt_push_u32(dt, OF_DT_PROP);
186
187 /* Length of the data */
188 dt_push_u32(dt, len);
189
190 offset = name - __dt_strings_start;
191
192 /* The offset of the properties name in the string blob. */
193 dt_push_u32(dt, (u32)offset);
194
195 /* The actual data. */
196 dt_push_bytes(dt, data, len);
197}
198#define dt_prop(dt, name, data, len) __dt_prop((dt), DS(name), (data), (len))
199
200#define dt_prop_str(dt, name, data) \
201 dt_prop((dt), name, (data), strlen((data)) + 1); /* + 1 for NULL */
202
203static void __init __dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
204 u32 data)
205{
206 __dt_prop(dt, name, &data, sizeof(u32));
207}
208#define dt_prop_u32(dt, name, data) __dt_prop_u32((dt), DS(name), (data))
209
210static void __init __maybe_unused __dt_prop_u64(struct iseries_flat_dt *dt,
211 const char *name, u64 data)
212{
213 __dt_prop(dt, name, &data, sizeof(u64));
214}
215#define dt_prop_u64(dt, name, data) __dt_prop_u64((dt), DS(name), (data))
216
217#define dt_prop_u64_list(dt, name, data, n) \
218 dt_prop((dt), name, (data), sizeof(u64) * (n))
219
220#define dt_prop_u32_list(dt, name, data, n) \
221 dt_prop((dt), name, (data), sizeof(u32) * (n))
222
223#define dt_prop_empty(dt, name) dt_prop((dt), name, NULL, 0)
224
225static void __init dt_cpus(struct iseries_flat_dt *dt)
226{
227 unsigned char buf[32];
228 unsigned char *p;
229 unsigned int i, index;
230 struct IoHriProcessorVpd *d;
231 u32 pft_size[2];
232
233 /* yuck */
234 snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
235 p = strchr(buf, ' ');
236 if (!p) p = buf + strlen(buf);
237
238 dt_start_node(dt, "cpus");
239 dt_prop_u32(dt, "#address-cells", 1);
240 dt_prop_u32(dt, "#size-cells", 0);
241
242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
244
245 for (i = 0; i < NR_LPPACAS; i++) {
246 if (lppaca[i].dyn_proc_status >= 2)
247 continue;
248
249 snprintf(p, 32 - (p - buf), "@%d", i);
250 dt_start_node(dt, buf);
251
252 dt_prop_str(dt, "device_type", device_type_cpu);
253
254 index = lppaca[i].dyn_hv_phys_proc_index;
255 d = &xIoHriProcessorVpd[index];
256
257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
258 dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
259
260 dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
261 dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
262
263 /* magic conversions to Hz copied from old code */
264 dt_prop_u32(dt, "clock-frequency",
265 ((1UL << 34) * 1000000) / d->xProcFreq);
266 dt_prop_u32(dt, "timebase-frequency",
267 ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
268
269 dt_prop_u32(dt, "reg", i);
270
271 dt_prop_u32_list(dt, "ibm,pft-size", pft_size, 2);
272
273 dt_end_node(dt);
274 }
275
276 dt_end_node(dt);
277}
278
279static void __init dt_model(struct iseries_flat_dt *dt)
280{
281 char buf[16] = "IBM,";
282
283 /* N.B. lparcfg.c knows about the "IBM," prefixes ... */
284 /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
285 strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
286 strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
287 buf[11] = '\0';
288 dt_prop_str(dt, "system-id", buf);
289
290 /* "IBM," + machineType[0:4] */
291 strne2a(buf + 4, xItExtVpdPanel.machineType, 4);
292 buf[8] = '\0';
293 dt_prop_str(dt, "model", buf);
294
295 dt_prop_str(dt, "compatible", "IBM,iSeries");
296 dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
297}
298
299static void __init dt_initrd(struct iseries_flat_dt *dt)
300{
301#ifdef CONFIG_BLK_DEV_INITRD
302 if (naca.xRamDisk) {
303 dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
304 dt_prop_u64(dt, "linux,initrd-end",
305 (u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
306 }
307#endif
308}
309
310static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
311 const char *name, u32 reg, int unit,
312 const char *type, const char *compat, int end)
313{
314 char buf[32];
315
316 snprintf(buf, 32, "%s@%08x", name, reg + ((unit >= 0) ? unit : 0));
317 dt_start_node(dt, buf);
318 dt_prop_str(dt, "device_type", type);
319 if (compat)
320 dt_prop_str(dt, "compatible", compat);
321 dt_prop_u32(dt, "reg", reg + ((unit >= 0) ? unit : 0));
322 if (unit >= 0)
323 dt_prop_u32(dt, "linux,unit_address", unit);
324 if (end)
325 dt_end_node(dt);
326}
327
328static void __init dt_vdevices(struct iseries_flat_dt *dt)
329{
330 u32 reg = 0;
331 HvLpIndexMap vlan_map;
332 int i;
333
334 dt_start_node(dt, "vdevice");
335 dt_prop_str(dt, "device_type", device_type_vdevice);
336 dt_prop_str(dt, "compatible", "IBM,iSeries-vdevice");
337 dt_prop_u32(dt, "#address-cells", 1);
338 dt_prop_u32(dt, "#size-cells", 0);
339
340 dt_do_vdevice(dt, "vty", reg, -1, device_type_serial,
341 "IBM,iSeries-vty", 1);
342 reg++;
343
344 dt_do_vdevice(dt, "v-scsi", reg, -1, device_type_vscsi,
345 "IBM,v-scsi", 1);
346 reg++;
347
348 vlan_map = HvLpConfig_getVirtualLanIndexMap();
349 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
350 unsigned char mac_addr[ETH_ALEN];
351
352 if ((vlan_map & (0x8000 >> i)) == 0)
353 continue;
354 dt_do_vdevice(dt, "l-lan", reg, i, device_type_network,
355 "IBM,iSeries-l-lan", 0);
356 mac_addr[0] = 0x02;
357 mac_addr[1] = 0x01;
358 mac_addr[2] = 0xff;
359 mac_addr[3] = i;
360 mac_addr[4] = 0xff;
361 mac_addr[5] = HvLpConfig_getLpIndex_outline();
362 dt_prop(dt, "local-mac-address", (char *)mac_addr, ETH_ALEN);
363 dt_prop(dt, "mac-address", (char *)mac_addr, ETH_ALEN);
364 dt_prop_u32(dt, "max-frame-size", 9000);
365 dt_prop_u32(dt, "address-bits", 48);
366
367 dt_end_node(dt);
368 }
369
370 dt_end_node(dt);
371}
372
373struct pci_class_name {
374 u16 code;
375 const char *name;
376 const char *type;
377};
378
379static struct pci_class_name __initdata pci_class_name[] = {
380 { PCI_CLASS_NETWORK_ETHERNET, "ethernet", device_type_network },
381};
382
383static struct pci_class_name * __init dt_find_pci_class_name(u16 class_code)
384{
385 struct pci_class_name *cp;
386
387 for (cp = pci_class_name;
388 cp < &pci_class_name[ARRAY_SIZE(pci_class_name)]; cp++)
389 if (cp->code == class_code)
390 return cp;
391 return NULL;
392}
393
394/*
395 * This assumes that the node slot is always on the primary bus!
396 */
397static void __init scan_bridge_slot(struct iseries_flat_dt *dt,
398 HvBusNumber bus, struct HvCallPci_BridgeInfo *bridge_info)
399{
400 HvSubBusNumber sub_bus = bridge_info->subBusNumber;
401 u16 vendor_id;
402 u16 device_id;
403 u32 class_id;
404 int err;
405 char buf[32];
406 u32 reg[5];
407 int id_sel = ISERIES_GET_DEVICE_FROM_SUBBUS(sub_bus);
408 int function = ISERIES_GET_FUNCTION_FROM_SUBBUS(sub_bus);
409 HvAgentId eads_id_sel = ISERIES_PCI_AGENTID(id_sel, function);
410 u8 devfn;
411 struct pci_class_name *cp;
412
413 /*
414 * Connect all functions of any device found.
415 */
416 for (id_sel = 1; id_sel <= bridge_info->maxAgents; id_sel++) {
417 for (function = 0; function < 8; function++) {
418 HvAgentId agent_id = ISERIES_PCI_AGENTID(id_sel,
419 function);
420 err = HvCallXm_connectBusUnit(bus, sub_bus,
421 agent_id, 0);
422 if (err) {
423 if (err != 0x302)
424 DBG("connectBusUnit(%x, %x, %x) %x\n",
425 bus, sub_bus, agent_id, err);
426 continue;
427 }
428
429 err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
430 PCI_VENDOR_ID, &vendor_id);
431 if (err) {
432 DBG("ReadVendor(%x, %x, %x) %x\n",
433 bus, sub_bus, agent_id, err);
434 continue;
435 }
436 err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
437 PCI_DEVICE_ID, &device_id);
438 if (err) {
439 DBG("ReadDevice(%x, %x, %x) %x\n",
440 bus, sub_bus, agent_id, err);
441 continue;
442 }
443 err = HvCallPci_configLoad32(bus, sub_bus, agent_id,
444 PCI_CLASS_REVISION , &class_id);
445 if (err) {
446 DBG("ReadClass(%x, %x, %x) %x\n",
447 bus, sub_bus, agent_id, err);
448 continue;
449 }
450
451 devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(eads_id_sel),
452 function);
453 cp = dt_find_pci_class_name(class_id >> 16);
454 if (cp && cp->name)
455 strncpy(buf, cp->name, sizeof(buf) - 1);
456 else
457 snprintf(buf, sizeof(buf), "pci%x,%x",
458 vendor_id, device_id);
459 buf[sizeof(buf) - 1] = '\0';
460 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
461 "@%x", PCI_SLOT(devfn));
462 buf[sizeof(buf) - 1] = '\0';
463 if (function != 0)
464 snprintf(buf + strlen(buf),
465 sizeof(buf) - strlen(buf),
466 ",%x", function);
467 dt_start_node(dt, buf);
468 reg[0] = (bus << 16) | (devfn << 8);
469 reg[1] = 0;
470 reg[2] = 0;
471 reg[3] = 0;
472 reg[4] = 0;
473 dt_prop_u32_list(dt, "reg", reg, 5);
474 if (cp && (cp->type || cp->name))
475 dt_prop_str(dt, "device_type",
476 cp->type ? cp->type : cp->name);
477 dt_prop_u32(dt, "vendor-id", vendor_id);
478 dt_prop_u32(dt, "device-id", device_id);
479 dt_prop_u32(dt, "class-code", class_id >> 8);
480 dt_prop_u32(dt, "revision-id", class_id & 0xff);
481 dt_prop_u32(dt, "linux,subbus", sub_bus);
482 dt_prop_u32(dt, "linux,agent-id", agent_id);
483 dt_prop_u32(dt, "linux,logical-slot-number",
484 bridge_info->logicalSlotNumber);
485 dt_end_node(dt);
486
487 }
488 }
489}
490
491static void __init scan_bridge(struct iseries_flat_dt *dt, HvBusNumber bus,
492 HvSubBusNumber sub_bus, int id_sel)
493{
494 struct HvCallPci_BridgeInfo bridge_info;
495 HvAgentId agent_id;
496 int function;
497 int ret;
498
499 /* Note: hvSubBus and irq is always be 0 at this level! */
500 for (function = 0; function < 8; ++function) {
501 agent_id = ISERIES_PCI_AGENTID(id_sel, function);
502 ret = HvCallXm_connectBusUnit(bus, sub_bus, agent_id, 0);
503 if (ret != 0) {
504 if (ret != 0xb)
505 DBG("connectBusUnit(%x, %x, %x) %x\n",
506 bus, sub_bus, agent_id, ret);
507 continue;
508 }
509 DBG("found device at bus %d idsel %d func %d (AgentId %x)\n",
510 bus, id_sel, function, agent_id);
511 ret = HvCallPci_getBusUnitInfo(bus, sub_bus, agent_id,
512 iseries_hv_addr(&bridge_info),
513 sizeof(struct HvCallPci_BridgeInfo));
514 if (ret != 0)
515 continue;
516 DBG("bridge info: type %x subbus %x "
517 "maxAgents %x maxsubbus %x logslot %x\n",
518 bridge_info.busUnitInfo.deviceType,
519 bridge_info.subBusNumber,
520 bridge_info.maxAgents,
521 bridge_info.maxSubBusNumber,
522 bridge_info.logicalSlotNumber);
523 if (bridge_info.busUnitInfo.deviceType ==
524 HvCallPci_BridgeDevice)
525 scan_bridge_slot(dt, bus, &bridge_info);
526 else
527 DBG("PCI: Invalid Bridge Configuration(0x%02X)",
528 bridge_info.busUnitInfo.deviceType);
529 }
530}
531
532static void __init scan_phb(struct iseries_flat_dt *dt, HvBusNumber bus)
533{
534 struct HvCallPci_DeviceInfo dev_info;
535 const HvSubBusNumber sub_bus = 0; /* EADs is always 0. */
536 int err;
537 int id_sel;
538 const int max_agents = 8;
539
540 /*
541 * Probe for EADs Bridges
542 */
543 for (id_sel = 1; id_sel < max_agents; ++id_sel) {
544 err = HvCallPci_getDeviceInfo(bus, sub_bus, id_sel,
545 iseries_hv_addr(&dev_info),
546 sizeof(struct HvCallPci_DeviceInfo));
547 if (err) {
548 if (err != 0x302)
549 DBG("getDeviceInfo(%x, %x, %x) %x\n",
550 bus, sub_bus, id_sel, err);
551 continue;
552 }
553 if (dev_info.deviceType != HvCallPci_NodeDevice) {
554 DBG("PCI: Invalid System Configuration"
555 "(0x%02X) for bus 0x%02x id 0x%02x.\n",
556 dev_info.deviceType, bus, id_sel);
557 continue;
558 }
559 scan_bridge(dt, bus, sub_bus, id_sel);
560 }
561}
562
563static void __init dt_pci_devices(struct iseries_flat_dt *dt)
564{
565 HvBusNumber bus;
566 char buf[32];
567 u32 buses[2];
568 int phb_num = 0;
569
570 /* Check all possible buses. */
571 for (bus = 0; bus < 256; bus++) {
572 int err = HvCallXm_testBus(bus);
573
574 if (err) {
575 /*
576 * Check for Unexpected Return code, a clue that
577 * something has gone wrong.
578 */
579 if (err != 0x0301)
580 DBG("Unexpected Return on Probe(0x%02X) "
581 "0x%04X\n", bus, err);
582 continue;
583 }
584 DBG("bus %d appears to exist\n", bus);
585 snprintf(buf, 32, "pci@%d", phb_num);
586 dt_start_node(dt, buf);
587 dt_prop_str(dt, "device_type", device_type_pci);
588 dt_prop_str(dt, "compatible", "IBM,iSeries-Logical-PHB");
589 dt_prop_u32(dt, "#address-cells", 3);
590 dt_prop_u32(dt, "#size-cells", 2);
591 buses[0] = buses[1] = bus;
592 dt_prop_u32_list(dt, "bus-range", buses, 2);
593 scan_phb(dt, bus);
594 dt_end_node(dt);
595 phb_num++;
596 }
597}
598
599static void dt_finish(struct iseries_flat_dt *dt)
600{
601 dt_push_u32(dt, OF_DT_END);
602 dt->header.totalsize = (unsigned long)dt_data - (unsigned long)dt;
603 klimit = ALIGN((unsigned long)dt_data, 8);
604}
605
606void * __init build_flat_dt(unsigned long phys_mem_size)
607{
608 struct iseries_flat_dt *iseries_dt;
609 u64 tmp[2];
610
611 iseries_dt = dt_init();
612
613 dt_start_node(iseries_dt, "");
614
615 dt_prop_u32(iseries_dt, "#address-cells", 2);
616 dt_prop_u32(iseries_dt, "#size-cells", 2);
617 dt_model(iseries_dt);
618
619 /* /memory */
620 dt_start_node(iseries_dt, "memory@0");
621 dt_prop_str(iseries_dt, "device_type", device_type_memory);
622 tmp[0] = 0;
623 tmp[1] = phys_mem_size;
624 dt_prop_u64_list(iseries_dt, "reg", tmp, 2);
625 dt_end_node(iseries_dt);
626
627 /* /chosen */
628 dt_start_node(iseries_dt, "chosen");
629 dt_prop_str(iseries_dt, "bootargs", cmd_line);
630 dt_initrd(iseries_dt);
631 dt_end_node(iseries_dt);
632
633 dt_cpus(iseries_dt);
634
635 dt_vdevices(iseries_dt);
636 dt_pci_devices(iseries_dt);
637
638 dt_end_node(iseries_dt);
639
640 dt_finish(iseries_dt);
641
642 return iseries_dt;
643}
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
deleted file mode 100644
index f519ee17ff7d..000000000000
--- a/arch/powerpc/platforms/iseries/exception.S
+++ /dev/null
@@ -1,311 +0,0 @@
1/*
2 * Low level routines for legacy iSeries support.
3 *
4 * Extracted from head_64.S
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 *
16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
18 *
19 * This file contains the low-level support and setup for the
20 * PowerPC-64 platform, including trap and interrupt dispatch.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <asm/reg.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/thread_info.h>
32#include <asm/ptrace.h>
33#include <asm/cputable.h>
34#include <asm/mmu.h>
35
36#include "exception.h"
37
38 .text
39
40 .globl system_reset_iSeries
41system_reset_iSeries:
42 bl .relative_toc
43 mfspr r13,SPRN_SPRG3 /* Get alpaca address */
44 LOAD_REG_ADDR(r23, alpaca)
45 li r0,ALPACA_SIZE
46 sub r23,r13,r23
47 divdu r24,r23,r0 /* r24 has cpu number */
48 cmpwi 0,r24,0 /* Are we processor 0? */
49 bne 1f
50 LOAD_REG_ADDR(r13, boot_paca)
51 mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
52 mfmsr r23
53 ori r23,r23,MSR_RI
54 mtmsrd r23 /* RI on */
55 b .__start_initialization_iSeries /* Start up the first processor */
561: mfspr r4,SPRN_CTRLF
57 li r5,CTRL_RUNLATCH /* Turn off the run light */
58 andc r4,r4,r5
59 mtspr SPRN_CTRLT,r4
60
61/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
62/* In the UP case we'll yield() later, and we will not access the paca anyway */
63#ifdef CONFIG_SMP
64iSeries_secondary_wait_paca:
65 HMT_LOW
66 LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
67 ld r23,0(r23)
68
69 cmpdi 0,r23,0
70 bne 2f /* go on when the master is ready */
71
72 /* Keep poking the Hypervisor until we're released */
73 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
74 lis r3,0x8002
75 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
76 li r0,-1 /* r0=-1 indicates a Hypervisor call */
77 sc /* Invoke the hypervisor via a system call */
78 b iSeries_secondary_wait_paca
79
802:
81 HMT_MEDIUM
82 sync
83
84 LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
85 lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
86 cmpld 0,r24,r3 /* is our cpu number allocated? */
87 bge iSeries_secondary_yield /* no, yield forever */
88
89 /* Load our paca now that it's been allocated */
90 LOAD_REG_ADDR(r13, paca)
91 ld r13,0(r13)
92 mulli r0,r24,PACA_SIZE
93 add r13,r13,r0
94 mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
95 mfmsr r23
96 ori r23,r23,MSR_RI
97 mtmsrd r23 /* RI on */
98
99iSeries_secondary_smp_loop:
100 lbz r23,PACAPROCSTART(r13) /* Test if this processor
101 * should start */
102 cmpwi 0,r23,0
103 bne 3f /* go on when we are told */
104
105 HMT_LOW
106 /* Let the Hypervisor know we are alive */
107 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
108 lis r3,0x8002
109 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
110 li r0,-1 /* r0=-1 indicates a Hypervisor call */
111 sc /* Invoke the hypervisor via a system call */
112 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
113 b iSeries_secondary_smp_loop /* wait for signal to start */
114
1153:
116 HMT_MEDIUM
117 sync
118 LOAD_REG_ADDR(r3,current_set)
119 sldi r28,r24,3 /* get current_set[cpu#] */
120 ldx r3,r3,r28
121 addi r1,r3,THREAD_SIZE
122 subi r1,r1,STACK_FRAME_OVERHEAD
123
124 b __secondary_start /* Loop until told to go */
125#endif /* CONFIG_SMP */
126
127iSeries_secondary_yield:
128 /* Yield the processor. This is required for non-SMP kernels
129 which are running on multi-threaded machines. */
130 HMT_LOW
131 lis r3,0x8000
132 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
133 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
134 li r4,0 /* "yield timed" */
135 li r5,-1 /* "yield forever" */
136 li r0,-1 /* r0=-1 indicates a Hypervisor call */
137 sc /* Invoke the hypervisor via a system call */
138 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
139 b iSeries_secondary_yield /* If SMP not configured, secondaries
140 * loop forever */
141
142/*** ISeries-LPAR interrupt handlers ***/
143
144 STD_EXCEPTION_ISERIES(machine_check, PACA_EXMC)
145
146 .globl data_access_iSeries
147data_access_iSeries:
148 mtspr SPRN_SPRG_SCRATCH0,r13
149BEGIN_FTR_SECTION
150 mfspr r13,SPRN_SPRG_PACA
151 std r9,PACA_EXSLB+EX_R9(r13)
152 std r10,PACA_EXSLB+EX_R10(r13)
153 mfspr r10,SPRN_DAR
154 mfspr r9,SPRN_DSISR
155 srdi r10,r10,60
156 rlwimi r10,r9,16,0x20
157 mfcr r9
158 cmpwi r10,0x2c
159 beq .do_stab_bolted_iSeries
160 ld r10,PACA_EXSLB+EX_R10(r13)
161 std r11,PACA_EXGEN+EX_R11(r13)
162 ld r11,PACA_EXSLB+EX_R9(r13)
163 std r12,PACA_EXGEN+EX_R12(r13)
164 mfspr r12,SPRN_SPRG_SCRATCH0
165 std r10,PACA_EXGEN+EX_R10(r13)
166 std r11,PACA_EXGEN+EX_R9(r13)
167 std r12,PACA_EXGEN+EX_R13(r13)
168 EXCEPTION_PROLOG_ISERIES_1
169FTR_SECTION_ELSE
170 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
171 EXCEPTION_PROLOG_ISERIES_1
172ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
173 b data_access_common
174
175.do_stab_bolted_iSeries:
176 std r11,PACA_EXSLB+EX_R11(r13)
177 std r12,PACA_EXSLB+EX_R12(r13)
178 mfspr r10,SPRN_SPRG_SCRATCH0
179 std r10,PACA_EXSLB+EX_R13(r13)
180 EXCEPTION_PROLOG_ISERIES_1
181 b .do_stab_bolted
182
183 .globl data_access_slb_iSeries
184data_access_slb_iSeries:
185 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
186 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
187 std r3,PACA_EXSLB+EX_R3(r13)
188 mfspr r3,SPRN_DAR
189 std r9,PACA_EXSLB+EX_R9(r13)
190 mfcr r9
191#ifdef __DISABLED__
192 cmpdi r3,0
193 bge slb_miss_user_iseries
194#endif
195 std r10,PACA_EXSLB+EX_R10(r13)
196 std r11,PACA_EXSLB+EX_R11(r13)
197 std r12,PACA_EXSLB+EX_R12(r13)
198 mfspr r10,SPRN_SPRG_SCRATCH0
199 std r10,PACA_EXSLB+EX_R13(r13)
200 ld r12,PACALPPACAPTR(r13)
201 ld r12,LPPACASRR1(r12)
202 b .slb_miss_realmode
203
204 STD_EXCEPTION_ISERIES(instruction_access, PACA_EXGEN)
205
206 .globl instruction_access_slb_iSeries
207instruction_access_slb_iSeries:
208 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
209 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
210 std r3,PACA_EXSLB+EX_R3(r13)
211 ld r3,PACALPPACAPTR(r13)
212 ld r3,LPPACASRR0(r3) /* get SRR0 value */
213 std r9,PACA_EXSLB+EX_R9(r13)
214 mfcr r9
215#ifdef __DISABLED__
216 cmpdi r3,0
217 bge slb_miss_user_iseries
218#endif
219 std r10,PACA_EXSLB+EX_R10(r13)
220 std r11,PACA_EXSLB+EX_R11(r13)
221 std r12,PACA_EXSLB+EX_R12(r13)
222 mfspr r10,SPRN_SPRG_SCRATCH0
223 std r10,PACA_EXSLB+EX_R13(r13)
224 ld r12,PACALPPACAPTR(r13)
225 ld r12,LPPACASRR1(r12)
226 b .slb_miss_realmode
227
228#ifdef __DISABLED__
229slb_miss_user_iseries:
230 std r10,PACA_EXGEN+EX_R10(r13)
231 std r11,PACA_EXGEN+EX_R11(r13)
232 std r12,PACA_EXGEN+EX_R12(r13)
233 mfspr r10,SPRG_SCRATCH0
234 ld r11,PACA_EXSLB+EX_R9(r13)
235 ld r12,PACA_EXSLB+EX_R3(r13)
236 std r10,PACA_EXGEN+EX_R13(r13)
237 std r11,PACA_EXGEN+EX_R9(r13)
238 std r12,PACA_EXGEN+EX_R3(r13)
239 EXCEPTION_PROLOG_ISERIES_1
240 b slb_miss_user_common
241#endif
242
243 MASKABLE_EXCEPTION_ISERIES(hardware_interrupt)
244 STD_EXCEPTION_ISERIES(alignment, PACA_EXGEN)
245 STD_EXCEPTION_ISERIES(program_check, PACA_EXGEN)
246 STD_EXCEPTION_ISERIES(fp_unavailable, PACA_EXGEN)
247 MASKABLE_EXCEPTION_ISERIES(decrementer)
248 STD_EXCEPTION_ISERIES(trap_0a, PACA_EXGEN)
249 STD_EXCEPTION_ISERIES(trap_0b, PACA_EXGEN)
250
251 .globl system_call_iSeries
252system_call_iSeries:
253 mr r9,r13
254 mfspr r13,SPRN_SPRG_PACA
255 EXCEPTION_PROLOG_ISERIES_1
256 b system_call_common
257
258 STD_EXCEPTION_ISERIES(single_step, PACA_EXGEN)
259 STD_EXCEPTION_ISERIES(trap_0e, PACA_EXGEN)
260 STD_EXCEPTION_ISERIES(performance_monitor, PACA_EXGEN)
261
262decrementer_iSeries_masked:
263 /* We may not have a valid TOC pointer in here. */
264 li r11,1
265 ld r12,PACALPPACAPTR(r13)
266 stb r11,LPPACADECRINT(r12)
267 li r12,-1
268 clrldi r12,r12,33 /* set DEC to 0x7fffffff */
269 mtspr SPRN_DEC,r12
270 /* fall through */
271
272hardware_interrupt_iSeries_masked:
273 mtcrf 0x80,r9 /* Restore regs */
274 ld r12,PACALPPACAPTR(r13)
275 ld r11,LPPACASRR0(r12)
276 ld r12,LPPACASRR1(r12)
277 mtspr SPRN_SRR0,r11
278 mtspr SPRN_SRR1,r12
279 ld r9,PACA_EXGEN+EX_R9(r13)
280 ld r10,PACA_EXGEN+EX_R10(r13)
281 ld r11,PACA_EXGEN+EX_R11(r13)
282 ld r12,PACA_EXGEN+EX_R12(r13)
283 ld r13,PACA_EXGEN+EX_R13(r13)
284 rfid
285 b . /* prevent speculative execution */
286
287_INIT_STATIC(__start_initialization_iSeries)
288 /* Clear out the BSS */
289 LOAD_REG_ADDR(r11,__bss_stop)
290 LOAD_REG_ADDR(r8,__bss_start)
291 sub r11,r11,r8 /* bss size */
292 addi r11,r11,7 /* round up to an even double word */
293 rldicl. r11,r11,61,3 /* shift right by 3 */
294 beq 4f
295 addi r8,r8,-8
296 li r0,0
297 mtctr r11 /* zero this many doublewords */
2983: stdu r0,8(r8)
299 bdnz 3b
3004:
301 LOAD_REG_ADDR(r1,init_thread_union)
302 addi r1,r1,THREAD_SIZE
303 li r0,0
304 stdu r0,-STACK_FRAME_OVERHEAD(r1)
305
306 bl .iSeries_early_setup
307 bl .early_setup
308
309 /* relocation is on at this point */
310
311 b .start_here_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
deleted file mode 100644
index 50271b550a99..000000000000
--- a/arch/powerpc/platforms/iseries/exception.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _ASM_POWERPC_ISERIES_EXCEPTION_H
2#define _ASM_POWERPC_ISERIES_EXCEPTION_H
3/*
4 * Extracted from head_64.S
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 *
16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
18 *
19 * This file contains the low-level support and setup for the
20 * PowerPC-64 platform, including trap and interrupt dispatch.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27#include <asm/exception-64s.h>
28
29#define EXCEPTION_PROLOG_ISERIES_1 \
30 mfmsr r10; \
31 ld r12,PACALPPACAPTR(r13); \
32 ld r11,LPPACASRR0(r12); \
33 ld r12,LPPACASRR1(r12); \
34 ori r10,r10,MSR_RI; \
35 mtmsrd r10,1
36
37#define STD_EXCEPTION_ISERIES(label, area) \
38 .globl label##_iSeries; \
39label##_iSeries: \
40 HMT_MEDIUM; \
41 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
42 EXCEPTION_PROLOG_1(area, NOTEST, 0); \
43 EXCEPTION_PROLOG_ISERIES_1; \
44 b label##_common
45
46#define MASKABLE_EXCEPTION_ISERIES(label) \
47 .globl label##_iSeries; \
48label##_iSeries: \
49 HMT_MEDIUM; \
50 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
51 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
52 lbz r10,PACASOFTIRQEN(r13); \
53 cmpwi 0,r10,0; \
54 beq- label##_iSeries_masked; \
55 EXCEPTION_PROLOG_ISERIES_1; \
56 b label##_common; \
57
58#endif /* _ASM_POWERPC_ISERIES_EXCEPTION_H */
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
deleted file mode 100644
index 3ae66ab9d5e7..000000000000
--- a/arch/powerpc/platforms/iseries/htab.c
+++ /dev/null
@@ -1,257 +0,0 @@
1/*
2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c
4 *
5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <asm/machdep.h>
14#include <asm/pgtable.h>
15#include <asm/mmu.h>
16#include <asm/mmu_context.h>
17#include <asm/abs_addr.h>
18#include <linux/spinlock.h>
19
20#include "call_hpt.h"
21
22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
23
24/*
25 * Very primitive algorithm for picking up a lock
26 */
27static inline void iSeries_hlock(unsigned long slot)
28{
29 if (slot & 0x8)
30 slot = ~slot;
31 spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
32}
33
34static inline void iSeries_hunlock(unsigned long slot)
35{
36 if (slot & 0x8)
37 slot = ~slot;
38 spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
39}
40
41static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
42 unsigned long pa, unsigned long rflags,
43 unsigned long vflags, int psize, int ssize)
44{
45 long slot;
46 struct hash_pte lhpte;
47 int secondary = 0;
48
49 BUG_ON(psize != MMU_PAGE_4K);
50
51 /*
52 * The hypervisor tries both primary and secondary.
53 * If we are being called to insert in the secondary,
54 * it means we have already tried both primary and secondary,
55 * so we return failure immediately.
56 */
57 if (vflags & HPTE_V_SECONDARY)
58 return -1;
59
60 iSeries_hlock(hpte_group);
61
62 slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
63 if (unlikely(lhpte.v & HPTE_V_VALID)) {
64 if (vflags & HPTE_V_BOLTED) {
65 HvCallHpt_setSwBits(slot, 0x10, 0);
66 HvCallHpt_setPp(slot, PP_RWXX);
67 iSeries_hunlock(hpte_group);
68 if (slot < 0)
69 return 0x8 | (slot & 7);
70 else
71 return slot & 7;
72 }
73 BUG();
74 }
75
76 if (slot == -1) { /* No available entry found in either group */
77 iSeries_hunlock(hpte_group);
78 return -1;
79 }
80
81 if (slot < 0) { /* MSB set means secondary group */
82 vflags |= HPTE_V_SECONDARY;
83 secondary = 1;
84 slot &= 0x7fffffffffffffff;
85 }
86
87
88 lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
89 vflags | HPTE_V_VALID;
90 lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
91
92 /* Now fill in the actual HPTE */
93 HvCallHpt_addValidate(slot, secondary, &lhpte);
94
95 iSeries_hunlock(hpte_group);
96
97 return (secondary << 3) | (slot & 7);
98}
99
100static unsigned long iSeries_hpte_getword0(unsigned long slot)
101{
102 struct hash_pte hpte;
103
104 HvCallHpt_get(&hpte, slot);
105 return hpte.v;
106}
107
108static long iSeries_hpte_remove(unsigned long hpte_group)
109{
110 unsigned long slot_offset;
111 int i;
112 unsigned long hpte_v;
113
114 /* Pick a random slot to start at */
115 slot_offset = mftb() & 0x7;
116
117 iSeries_hlock(hpte_group);
118
119 for (i = 0; i < HPTES_PER_GROUP; i++) {
120 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
121
122 if (! (hpte_v & HPTE_V_BOLTED)) {
123 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
124 slot_offset, 0, 0);
125 iSeries_hunlock(hpte_group);
126 return i;
127 }
128
129 slot_offset++;
130 slot_offset &= 0x7;
131 }
132
133 iSeries_hunlock(hpte_group);
134
135 return -1;
136}
137
138/*
139 * The HyperVisor expects the "flags" argument in this form:
140 * bits 0..59 : reserved
141 * bit 60 : N
142 * bits 61..63 : PP2,PP1,PP0
143 */
144static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
145 unsigned long va, int psize, int ssize, int local)
146{
147 struct hash_pte hpte;
148 unsigned long want_v;
149
150 iSeries_hlock(slot);
151
152 HvCallHpt_get(&hpte, slot);
153 want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
154
155 if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
156 /*
157 * Hypervisor expects bits as NPPP, which is
158 * different from how they are mapped in our PP.
159 */
160 HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
161 iSeries_hunlock(slot);
162 return 0;
163 }
164 iSeries_hunlock(slot);
165
166 return -1;
167}
168
169/*
170 * Functions used to find the PTE for a particular virtual address.
171 * Only used during boot when bolting pages.
172 *
173 * Input : vpn : virtual page number
174 * Output: PTE index within the page table of the entry
175 * -1 on failure
176 */
177static long iSeries_hpte_find(unsigned long vpn)
178{
179 struct hash_pte hpte;
180 long slot;
181
182 /*
183 * The HvCallHpt_findValid interface is as follows:
184 * 0xffffffffffffffff : No entry found.
185 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
186 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
187 */
188 slot = HvCallHpt_findValid(&hpte, vpn);
189 if (hpte.v & HPTE_V_VALID) {
190 if (slot < 0) {
191 slot &= 0x7fffffffffffffff;
192 slot = -slot;
193 }
194 } else
195 slot = -1;
196 return slot;
197}
198
199/*
200 * Update the page protection bits. Intended to be used to create
201 * guard pages for kernel data structures on pages which are bolted
202 * in the HPT. Assumes pages being operated on will not be stolen.
203 * Does not work on large pages.
204 *
205 * No need to lock here because we should be the only user.
206 */
207static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
208 int psize, int ssize)
209{
210 unsigned long vsid,va,vpn;
211 long slot;
212
213 BUG_ON(psize != MMU_PAGE_4K);
214
215 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
216 va = (vsid << 28) | (ea & 0x0fffffff);
217 vpn = va >> HW_PAGE_SHIFT;
218 slot = iSeries_hpte_find(vpn);
219 if (slot == -1)
220 panic("updateboltedpp: Could not find page to bolt\n");
221 HvCallHpt_setPp(slot, newpp);
222}
223
224static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
225 int psize, int ssize, int local)
226{
227 unsigned long hpte_v;
228 unsigned long avpn = va >> 23;
229 unsigned long flags;
230
231 local_irq_save(flags);
232
233 iSeries_hlock(slot);
234
235 hpte_v = iSeries_hpte_getword0(slot);
236
237 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
238 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
239
240 iSeries_hunlock(slot);
241
242 local_irq_restore(flags);
243}
244
245void __init hpte_init_iSeries(void)
246{
247 int i;
248
249 for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
250 spin_lock_init(&iSeries_hlocks[i]);
251
252 ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
253 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
254 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
255 ppc_md.hpte_insert = iSeries_hpte_insert;
256 ppc_md.hpte_remove = iSeries_hpte_remove;
257}
diff --git a/arch/powerpc/platforms/iseries/hvcall.S b/arch/powerpc/platforms/iseries/hvcall.S
deleted file mode 100644
index 07ae6ad5f49f..000000000000
--- a/arch/powerpc/platforms/iseries/hvcall.S
+++ /dev/null
@@ -1,94 +0,0 @@
1/*
2 * This file contains the code to perform calls to the
3 * iSeries LPAR hypervisor
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <asm/ppc_asm.h>
12#include <asm/processor.h>
13#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
14
15 .text
16
17/*
18 * Hypervisor call
19 *
20 * Invoke the iSeries hypervisor via the System Call instruction
21 * Parameters are passed to this routine in registers r3 - r10
22 *
23 * r3 contains the HV function to be called
24 * r4-r10 contain the operands to the hypervisor function
25 *
26 */
27
28_GLOBAL(HvCall)
29_GLOBAL(HvCall0)
30_GLOBAL(HvCall1)
31_GLOBAL(HvCall2)
32_GLOBAL(HvCall3)
33_GLOBAL(HvCall4)
34_GLOBAL(HvCall5)
35_GLOBAL(HvCall6)
36_GLOBAL(HvCall7)
37
38
39 mfcr r0
40 std r0,-8(r1)
41 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
42
43 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
44
45 li r0,-1
46
47 /* Invoke the hypervisor */
48
49 sc
50
51 ld r1,0(r1)
52 ld r0,-8(r1)
53 mtcrf 0xff,r0
54
55 /* return to caller, return value in r3 */
56
57 blr
58
59_GLOBAL(HvCall0Ret16)
60_GLOBAL(HvCall1Ret16)
61_GLOBAL(HvCall2Ret16)
62_GLOBAL(HvCall3Ret16)
63_GLOBAL(HvCall4Ret16)
64_GLOBAL(HvCall5Ret16)
65_GLOBAL(HvCall6Ret16)
66_GLOBAL(HvCall7Ret16)
67
68 mfcr r0
69 std r0,-8(r1)
70 std r31,-16(r1)
71 stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
72
73 mr r31,r4
74 li r0,-1
75 mr r4,r5
76 mr r5,r6
77 mr r6,r7
78 mr r7,r8
79 mr r8,r9
80 mr r9,r10
81
82 sc
83
84 std r3,0(r31)
85 std r4,8(r31)
86
87 mr r3,r5
88
89 ld r1,0(r1)
90 ld r0,-8(r1)
91 mtcrf 0xff,r0
92 ld r31,-16(r1)
93
94 blr
diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c
deleted file mode 100644
index f476d71194fa..000000000000
--- a/arch/powerpc/platforms/iseries/hvlog.c
+++ /dev/null
@@ -1,35 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <asm/page.h>
11#include <asm/abs_addr.h>
12#include <asm/iseries/hv_call.h>
13#include <asm/iseries/hv_call_sc.h>
14#include <asm/iseries/hv_types.h>
15
16
17void HvCall_writeLogBuffer(const void *buffer, u64 len)
18{
19 struct HvLpBufferList hv_buf;
20 u64 left_this_page;
21 u64 cur = virt_to_abs(buffer);
22
23 while (len) {
24 hv_buf.addr = cur;
25 left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
26 if (left_this_page > len)
27 left_this_page = len;
28 hv_buf.len = left_this_page;
29 len -= left_this_page;
30 HvCall2(HvCallBaseWriteLogBuffer,
31 virt_to_abs(&hv_buf),
32 left_this_page);
33 cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
34 }
35}
diff --git a/arch/powerpc/platforms/iseries/hvlpconfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
deleted file mode 100644
index f62a0c5fa670..000000000000
--- a/arch/powerpc/platforms/iseries/hvlpconfig.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/export.h>
20#include <asm/iseries/hv_lp_config.h>
21#include "it_lp_naca.h"
22
23HvLpIndex HvLpConfig_getLpIndex_outline(void)
24{
25 return HvLpConfig_getLpIndex();
26}
27EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
28
29HvLpIndex HvLpConfig_getLpIndex(void)
30{
31 return itLpNaca.xLpIndex;
32}
33EXPORT_SYMBOL(HvLpConfig_getLpIndex);
34
35HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
36{
37 return itLpNaca.xPrimaryLpIndex;
38}
39EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex);
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
deleted file mode 100644
index 2f3d9110248c..000000000000
--- a/arch/powerpc/platforms/iseries/iommu.c
+++ /dev/null
@@ -1,260 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup:
5 *
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
8 *
9 * Dynamic DMA mapping support, iSeries-specific parts.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/list.h>
30#include <linux/pci.h>
31#include <linux/export.h>
32#include <linux/slab.h>
33
34#include <asm/iommu.h>
35#include <asm/vio.h>
36#include <asm/tce.h>
37#include <asm/machdep.h>
38#include <asm/abs_addr.h>
39#include <asm/prom.h>
40#include <asm/pci-bridge.h>
41#include <asm/iseries/hv_call_xm.h>
42#include <asm/iseries/hv_call_event.h>
43#include <asm/iseries/iommu.h>
44
45static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
46 unsigned long uaddr, enum dma_data_direction direction,
47 struct dma_attrs *attrs)
48{
49 u64 rc;
50 u64 tce, rpn;
51
52 while (npages--) {
53 rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
54 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
55
56 if (tbl->it_type == TCE_VB) {
57 /* Virtual Bus */
58 tce |= TCE_VALID|TCE_ALLIO;
59 if (direction != DMA_TO_DEVICE)
60 tce |= TCE_VB_WRITE;
61 } else {
62 /* PCI Bus */
63 tce |= TCE_PCI_READ; /* Read allowed */
64 if (direction != DMA_TO_DEVICE)
65 tce |= TCE_PCI_WRITE;
66 }
67
68 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
69 if (rc)
70 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
71 rc);
72 index++;
73 uaddr += TCE_PAGE_SIZE;
74 }
75 return 0;
76}
77
78static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
79{
80 u64 rc;
81
82 while (npages--) {
83 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
84 if (rc)
85 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
86 rc);
87 index++;
88 }
89}
90
91/*
92 * Structure passed to HvCallXm_getTceTableParms
93 */
94struct iommu_table_cb {
95 unsigned long itc_busno; /* Bus number for this tce table */
96 unsigned long itc_start; /* Will be NULL for secondary */
97 unsigned long itc_totalsize; /* Size (in pages) of whole table */
98 unsigned long itc_offset; /* Index into real tce table of the
99 start of our section */
100 unsigned long itc_size; /* Size (in pages) of our section */
101 unsigned long itc_index; /* Index of this tce table */
102 unsigned short itc_maxtables; /* Max num of tables for partition */
103 unsigned char itc_virtbus; /* Flag to indicate virtual bus */
104 unsigned char itc_slotno; /* IOA Tce Slot Index */
105 unsigned char itc_rsvd[4];
106};
107
108/*
109 * Call Hv with the architected data structure to get TCE table info.
110 * info. Put the returned data into the Linux representation of the
111 * TCE table data.
112 * The Hardware Tce table comes in three flavors.
113 * 1. TCE table shared between Buses.
114 * 2. TCE table per Bus.
115 * 3. TCE Table per IOA.
116 */
117void iommu_table_getparms_iSeries(unsigned long busno,
118 unsigned char slotno,
119 unsigned char virtbus,
120 struct iommu_table* tbl)
121{
122 struct iommu_table_cb *parms;
123
124 parms = kzalloc(sizeof(*parms), GFP_KERNEL);
125 if (parms == NULL)
126 panic("PCI_DMA: TCE Table Allocation failed.");
127
128 parms->itc_busno = busno;
129 parms->itc_slotno = slotno;
130 parms->itc_virtbus = virtbus;
131
132 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
133
134 if (parms->itc_size == 0)
135 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
136
137 /* itc_size is in pages worth of table, it_size is in # of entries */
138 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
139 tbl->it_busno = parms->itc_busno;
140 tbl->it_offset = parms->itc_offset;
141 tbl->it_index = parms->itc_index;
142 tbl->it_blocksize = 1;
143 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
144
145 kfree(parms);
146}
147
148
149#ifdef CONFIG_PCI
150/*
151 * This function compares the known tables to find an iommu_table
152 * that has already been built for hardware TCEs.
153 */
154static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
155{
156 struct device_node *node;
157
158 for (node = NULL; (node = of_find_all_nodes(node)); ) {
159 struct pci_dn *pdn = PCI_DN(node);
160 struct iommu_table *it;
161
162 if (pdn == NULL)
163 continue;
164 it = pdn->iommu_table;
165 if ((it != NULL) &&
166 (it->it_type == TCE_PCI) &&
167 (it->it_offset == tbl->it_offset) &&
168 (it->it_index == tbl->it_index) &&
169 (it->it_size == tbl->it_size)) {
170 of_node_put(node);
171 return it;
172 }
173 }
174 return NULL;
175}
176
177
178static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
179{
180 struct iommu_table *tbl;
181 struct device_node *dn = pci_device_to_OF_node(pdev);
182 struct pci_dn *pdn = PCI_DN(dn);
183 const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
184
185 BUG_ON(lsn == NULL);
186
187 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
188
189 iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
190
191 /* Look for existing tce table */
192 pdn->iommu_table = iommu_table_find(tbl);
193 if (pdn->iommu_table == NULL)
194 pdn->iommu_table = iommu_init_table(tbl, -1);
195 else
196 kfree(tbl);
197 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
198}
199#else
200#define pci_dma_dev_setup_iseries NULL
201#endif
202
203static struct iommu_table veth_iommu_table;
204static struct iommu_table vio_iommu_table;
205
206void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
207{
208 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
209 DMA_BIT_MASK(32), flag, -1);
210}
211EXPORT_SYMBOL_GPL(iseries_hv_alloc);
212
213void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
214{
215 iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
216}
217EXPORT_SYMBOL_GPL(iseries_hv_free);
218
219dma_addr_t iseries_hv_map(void *vaddr, size_t size,
220 enum dma_data_direction direction)
221{
222 return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
223 (unsigned long)vaddr % PAGE_SIZE, size,
224 DMA_BIT_MASK(32), direction, NULL);
225}
226
227void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
228 enum dma_data_direction direction)
229{
230 iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
231}
232
233void __init iommu_vio_init(void)
234{
235 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
236 veth_iommu_table.it_size /= 2;
237 vio_iommu_table = veth_iommu_table;
238 vio_iommu_table.it_offset += veth_iommu_table.it_size;
239
240 if (!iommu_init_table(&veth_iommu_table, -1))
241 printk("Virtual Bus VETH TCE table failed.\n");
242 if (!iommu_init_table(&vio_iommu_table, -1))
243 printk("Virtual Bus VIO TCE table failed.\n");
244}
245
246struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
247{
248 if (strcmp(dev->type, "network") == 0)
249 return &veth_iommu_table;
250 return &vio_iommu_table;
251}
252
253void iommu_init_early_iSeries(void)
254{
255 ppc_md.tce_build = tce_build_iSeries;
256 ppc_md.tce_free = tce_free_iSeries;
257
258 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
259 set_pci_dma_ops(&dma_iommu_ops);
260}
diff --git a/arch/powerpc/platforms/iseries/ipl_parms.h b/arch/powerpc/platforms/iseries/ipl_parms.h
deleted file mode 100644
index 83e4ca42fc57..000000000000
--- a/arch/powerpc/platforms/iseries/ipl_parms.h
+++ /dev/null
@@ -1,68 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_IPL_PARMS_H
19#define _ISERIES_IPL_PARMS_H
20
21/*
22 * This struct maps the IPL Parameters DMA'd from the SP.
23 *
24 * Warning:
25 * This data must map in exactly 64 bytes and match the architecture for
26 * the IPL parms
27 */
28
29#include <asm/types.h>
30
31struct ItIplParmsReal {
32 u8 xFormat; // Defines format of IplParms x00-x00
33 u8 xRsvd01:6; // Reserved x01-x01
34 u8 xAlternateSearch:1; // Alternate search indicator ...
35 u8 xUaSupplied:1; // UA Supplied on programmed IPL...
36 u8 xLsUaFormat; // Format byte for UA x02-x02
37 u8 xRsvd02; // Reserved x03-x03
38 u32 xLsUa; // LS UA x04-x07
39 u32 xUnusedLsLid; // First OS LID to load x08-x0B
40 u16 xLsBusNumber; // LS Bus Number x0C-x0D
41 u8 xLsCardAdr; // LS Card Address x0E-x0E
42 u8 xLsBoardAdr; // LS Board Address x0F-x0F
43 u32 xRsvd03; // Reserved x10-x13
44 u8 xSpcnPresent:1; // SPCN present x14-x14
45 u8 xCpmPresent:1; // CPM present ...
46 u8 xRsvd04:6; // Reserved ...
47 u8 xRsvd05:4; // Reserved x15-x15
48 u8 xKeyLock:4; // Keylock setting ...
49 u8 xRsvd06:6; // Reserved x16-x16
50 u8 xIplMode:2; // Ipl mode (A|B|C|D) ...
51 u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17
52 u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiatedx18-x19
53 u16 xPowerOnResetIpl:1; // Indicate POR condition ...
54 u16 xMainStorePreserved:1; // Main Storage is preserved ...
55 u16 xRsvd07:13; // Reserved ...
56 u16 xIplSource:16; // Ipl source x1A-x1B
57 u8 xIplReason:8; // Reason for this IPL x1C-x1C
58 u8 xRsvd08; // Reserved x1D-x1D
59 u16 xRsvd09; // Reserved x1E-x1F
60 u16 xSysBoxType; // System Box Type x20-x21
61 u16 xSysProcType; // System Processor Type x22-x23
62 u32 xRsvd10; // Reserved x24-x27
63 u64 xRsvd11; // Reserved x28-x2F
64 u64 xRsvd12; // Reserved x30-x37
65 u64 xRsvd13; // Reserved x38-x3F
66};
67
68#endif /* _ISERIES_IPL_PARMS_H */
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
deleted file mode 100644
index b2103453eb01..000000000000
--- a/arch/powerpc/platforms/iseries/irq.c
+++ /dev/null
@@ -1,400 +0,0 @@
1/*
2 * This module supports the iSeries PCI bus interrupt handling
3 * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp>
4 * Copyright (C) 2004-2005 IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the:
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330,
20 * Boston, MA 02111-1307 USA
21 *
22 * Change Activity:
23 * Created, December 13, 2000 by Wayne Holm
24 * End Change Activity
25 */
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/threads.h>
29#include <linux/smp.h>
30#include <linux/param.h>
31#include <linux/string.h>
32#include <linux/bootmem.h>
33#include <linux/irq.h>
34#include <linux/spinlock.h>
35
36#include <asm/paca.h>
37#include <asm/iseries/hv_types.h>
38#include <asm/iseries/hv_lp_event.h>
39#include <asm/iseries/hv_call_xm.h>
40#include <asm/iseries/it_lp_queue.h>
41
42#include "irq.h"
43#include "pci.h"
44#include "call_pci.h"
45
46#ifdef CONFIG_PCI
47
48enum pci_event_type {
49 pe_bus_created = 0, /* PHB has been created */
50 pe_bus_error = 1, /* PHB has failed */
51 pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */
52 pe_node_failed = 4, /* Multi-adapter bridge has failed */
53 pe_node_recovered = 5, /* Multi-adapter bridge has recovered */
54 pe_bus_recovered = 12, /* PHB has been recovered */
55 pe_unquiese_bus = 18, /* Secondary bus unqiescing */
56 pe_bridge_error = 21, /* Bridge Error */
57 pe_slot_interrupt = 22 /* Slot interrupt */
58};
59
60struct pci_event {
61 struct HvLpEvent event;
62 union {
63 u64 __align; /* Align on an 8-byte boundary */
64 struct {
65 u32 fisr;
66 HvBusNumber bus_number;
67 HvSubBusNumber sub_bus_number;
68 HvAgentId dev_id;
69 } slot;
70 struct {
71 HvBusNumber bus_number;
72 HvSubBusNumber sub_bus_number;
73 } bus;
74 struct {
75 HvBusNumber bus_number;
76 HvSubBusNumber sub_bus_number;
77 HvAgentId dev_id;
78 } node;
79 } data;
80};
81
82static DEFINE_SPINLOCK(pending_irqs_lock);
83static int num_pending_irqs;
84static int pending_irqs[NR_IRQS];
85
86static void int_received(struct pci_event *event)
87{
88 int irq;
89
90 switch (event->event.xSubtype) {
91 case pe_slot_interrupt:
92 irq = event->event.xCorrelationToken;
93 if (irq < NR_IRQS) {
94 spin_lock(&pending_irqs_lock);
95 pending_irqs[irq]++;
96 num_pending_irqs++;
97 spin_unlock(&pending_irqs_lock);
98 } else {
99 printk(KERN_WARNING "int_received: bad irq number %d\n",
100 irq);
101 HvCallPci_eoi(event->data.slot.bus_number,
102 event->data.slot.sub_bus_number,
103 event->data.slot.dev_id);
104 }
105 break;
106 /* Ignore error recovery events for now */
107 case pe_bus_created:
108 printk(KERN_INFO "int_received: system bus %d created\n",
109 event->data.bus.bus_number);
110 break;
111 case pe_bus_error:
112 case pe_bus_failed:
113 printk(KERN_INFO "int_received: system bus %d failed\n",
114 event->data.bus.bus_number);
115 break;
116 case pe_bus_recovered:
117 case pe_unquiese_bus:
118 printk(KERN_INFO "int_received: system bus %d recovered\n",
119 event->data.bus.bus_number);
120 break;
121 case pe_node_failed:
122 case pe_bridge_error:
123 printk(KERN_INFO
124 "int_received: multi-adapter bridge %d/%d/%d failed\n",
125 event->data.node.bus_number,
126 event->data.node.sub_bus_number,
127 event->data.node.dev_id);
128 break;
129 case pe_node_recovered:
130 printk(KERN_INFO
131 "int_received: multi-adapter bridge %d/%d/%d recovered\n",
132 event->data.node.bus_number,
133 event->data.node.sub_bus_number,
134 event->data.node.dev_id);
135 break;
136 default:
137 printk(KERN_ERR
138 "int_received: unrecognized event subtype 0x%x\n",
139 event->event.xSubtype);
140 break;
141 }
142}
143
144static void pci_event_handler(struct HvLpEvent *event)
145{
146 if (event && (event->xType == HvLpEvent_Type_PciIo)) {
147 if (hvlpevent_is_int(event))
148 int_received((struct pci_event *)event);
149 else
150 printk(KERN_ERR
151 "pci_event_handler: unexpected ack received\n");
152 } else if (event)
153 printk(KERN_ERR
154 "pci_event_handler: Unrecognized PCI event type 0x%x\n",
155 (int)event->xType);
156 else
157 printk(KERN_ERR "pci_event_handler: NULL event received\n");
158}
159
160#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
161#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
162#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
163#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
164
165/*
166 * This will be called by device drivers (via enable_IRQ)
167 * to enable INTA in the bridge interrupt status register.
168 */
169static void iseries_enable_IRQ(struct irq_data *d)
170{
171 u32 bus, dev_id, function, mask;
172 const u32 sub_bus = 0;
173 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
174
175 /* The IRQ has already been locked by the caller */
176 bus = REAL_IRQ_TO_BUS(rirq);
177 function = REAL_IRQ_TO_FUNC(rirq);
178 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
179
180 /* Unmask secondary INTA */
181 mask = 0x80000000;
182 HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask);
183}
184
185/* This is called by iseries_activate_IRQs */
186static unsigned int iseries_startup_IRQ(struct irq_data *d)
187{
188 u32 bus, dev_id, function, mask;
189 const u32 sub_bus = 0;
190 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
191
192 bus = REAL_IRQ_TO_BUS(rirq);
193 function = REAL_IRQ_TO_FUNC(rirq);
194 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
195
196 /* Link the IRQ number to the bridge */
197 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq);
198
199 /* Unmask bridge interrupts in the FISR */
200 mask = 0x01010000 << function;
201 HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask);
202 iseries_enable_IRQ(d);
203 return 0;
204}
205
206/*
207 * This is called out of iSeries_fixup to activate interrupt
208 * generation for usable slots
209 */
210void __init iSeries_activate_IRQs()
211{
212 int irq;
213 unsigned long flags;
214
215 for_each_irq (irq) {
216 struct irq_desc *desc = irq_to_desc(irq);
217 struct irq_chip *chip;
218
219 if (!desc)
220 continue;
221
222 chip = irq_desc_get_chip(desc);
223 if (chip && chip->irq_startup) {
224 raw_spin_lock_irqsave(&desc->lock, flags);
225 chip->irq_startup(&desc->irq_data);
226 raw_spin_unlock_irqrestore(&desc->lock, flags);
227 }
228 }
229}
230
231/* this is not called anywhere currently */
232static void iseries_shutdown_IRQ(struct irq_data *d)
233{
234 u32 bus, dev_id, function, mask;
235 const u32 sub_bus = 0;
236 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
237
238 /* irq should be locked by the caller */
239 bus = REAL_IRQ_TO_BUS(rirq);
240 function = REAL_IRQ_TO_FUNC(rirq);
241 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
242
243 /* Invalidate the IRQ number in the bridge */
244 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0);
245
246 /* Mask bridge interrupts in the FISR */
247 mask = 0x01010000 << function;
248 HvCallPci_maskFisr(bus, sub_bus, dev_id, mask);
249}
250
251/*
252 * This will be called by device drivers (via disable_IRQ)
253 * to disable INTA in the bridge interrupt status register.
254 */
255static void iseries_disable_IRQ(struct irq_data *d)
256{
257 u32 bus, dev_id, function, mask;
258 const u32 sub_bus = 0;
259 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
260
261 /* The IRQ has already been locked by the caller */
262 bus = REAL_IRQ_TO_BUS(rirq);
263 function = REAL_IRQ_TO_FUNC(rirq);
264 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
265
266 /* Mask secondary INTA */
267 mask = 0x80000000;
268 HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask);
269}
270
271static void iseries_end_IRQ(struct irq_data *d)
272{
273 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
274
275 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
276 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
277}
278
279static struct irq_chip iseries_pic = {
280 .name = "iSeries",
281 .irq_startup = iseries_startup_IRQ,
282 .irq_shutdown = iseries_shutdown_IRQ,
283 .irq_unmask = iseries_enable_IRQ,
284 .irq_mask = iseries_disable_IRQ,
285 .irq_eoi = iseries_end_IRQ
286};
287
288/*
289 * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
290 * It calculates the irq value for the slot.
291 * Note that sub_bus is always 0 (at the moment at least).
292 */
293int __init iSeries_allocate_IRQ(HvBusNumber bus,
294 HvSubBusNumber sub_bus, u32 bsubbus)
295{
296 unsigned int realirq;
297 u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus);
298 u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus);
299
300 realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
301 + function;
302
303 return irq_create_mapping(NULL, realirq);
304}
305
306#endif /* CONFIG_PCI */
307
308/*
309 * Get the next pending IRQ.
310 */
311unsigned int iSeries_get_irq(void)
312{
313 int irq = NO_IRQ_IGNORE;
314
315#ifdef CONFIG_SMP
316 if (get_lppaca()->int_dword.fields.ipi_cnt) {
317 get_lppaca()->int_dword.fields.ipi_cnt = 0;
318 smp_ipi_demux();
319 }
320#endif /* CONFIG_SMP */
321 if (hvlpevent_is_pending())
322 process_hvlpevents();
323
324#ifdef CONFIG_PCI
325 if (num_pending_irqs) {
326 spin_lock(&pending_irqs_lock);
327 for (irq = 0; irq < NR_IRQS; irq++) {
328 if (pending_irqs[irq]) {
329 pending_irqs[irq]--;
330 num_pending_irqs--;
331 break;
332 }
333 }
334 spin_unlock(&pending_irqs_lock);
335 if (irq >= NR_IRQS)
336 irq = NO_IRQ_IGNORE;
337 }
338#endif
339
340 return irq;
341}
342
343#ifdef CONFIG_PCI
344
345static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
346 irq_hw_number_t hw)
347{
348 irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
349
350 return 0;
351}
352
353static int iseries_irq_host_match(struct irq_host *h, struct device_node *np)
354{
355 /* Match all */
356 return 1;
357}
358
359static struct irq_host_ops iseries_irq_host_ops = {
360 .map = iseries_irq_host_map,
361 .match = iseries_irq_host_match,
362};
363
364/*
365 * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
366 * It must be called before the bus walk.
367 */
368void __init iSeries_init_IRQ(void)
369{
370 /* Register PCI event handler and open an event path */
371 struct irq_host *host;
372 int ret;
373
374 /*
375 * The Hypervisor only allows us up to 256 interrupt
376 * sources (the irq number is passed in a u8).
377 */
378 irq_set_virq_count(256);
379
380 /* Create irq host. No need for a revmap since HV will give us
381 * back our virtual irq number
382 */
383 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
384 &iseries_irq_host_ops, 0);
385 BUG_ON(host == NULL);
386 irq_set_default_host(host);
387
388 ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
389 &pci_event_handler);
390 if (ret == 0) {
391 ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
392 if (ret != 0)
393 printk(KERN_ERR "iseries_init_IRQ: open event path "
394 "failed with rc 0x%x\n", ret);
395 } else
396 printk(KERN_ERR "iseries_init_IRQ: register handler "
397 "failed with rc 0x%x\n", ret);
398}
399
400#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
deleted file mode 100644
index a1c236074034..000000000000
--- a/arch/powerpc/platforms/iseries/irq.h
+++ /dev/null
@@ -1,13 +0,0 @@
1#ifndef _ISERIES_IRQ_H
2#define _ISERIES_IRQ_H
3
4#ifdef CONFIG_PCI
5extern void iSeries_init_IRQ(void);
6extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
7extern void iSeries_activate_IRQs(void);
8#else
9#define iSeries_init_IRQ NULL
10#endif
11extern unsigned int iSeries_get_irq(void);
12
13#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h b/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
deleted file mode 100644
index 6de9097b7f57..000000000000
--- a/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
+++ /dev/null
@@ -1,51 +0,0 @@
1/*
2 * Copyright (C) 2002 Dave Boutcher IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
19#define _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
20
21/*
22 * This struct maps the panel information
23 *
24 * Warning:
25 * This data must match the architecture for the panel information
26 */
27
28#include <asm/types.h>
29
30struct ItExtVpdPanel {
31 /* Definition of the Extended Vpd On Panel Data Area */
32 char systemSerial[8];
33 char mfgID[4];
34 char reserved1[24];
35 char machineType[4];
36 char systemID[6];
37 char somUniqueCnt[4];
38 char serialNumberCount;
39 char reserved2[7];
40 u16 bbu3;
41 u16 bbu2;
42 u16 bbu1;
43 char xLocationLabel[8];
44 u8 xRsvd1[6];
45 u16 xFrameId;
46 u8 xRsvd2[48];
47};
48
49extern struct ItExtVpdPanel xItExtVpdPanel;
50
51#endif /* _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/arch/powerpc/platforms/iseries/it_lp_naca.h b/arch/powerpc/platforms/iseries/it_lp_naca.h
deleted file mode 100644
index cf6dcf6ef07b..000000000000
--- a/arch/powerpc/platforms/iseries/it_lp_naca.h
+++ /dev/null
@@ -1,80 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_IT_LP_NACA_H
19#define _PLATFORMS_ISERIES_IT_LP_NACA_H
20
21#include <linux/types.h>
22
23/*
24 * This control block contains the data that is shared between the
25 * hypervisor (PLIC) and the OS.
26 */
27
28struct ItLpNaca {
29// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
30 u32 xDesc; // Eye catcher x00-x03
31 u16 xSize; // Size of this class x04-x05
32 u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
33 u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
34 u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
35 u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
36 u8 xLpIndex; // LP Index x0B-x0B
37 u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
38 u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
39 u8 xPirEnvironMode; // Piranha or hardware x10-x10
40 u8 xPirConsoleMode; // Piranha console indicator x11-x11
41 u8 xPirDasdMode; // Piranha dasd indicator x12-x12
42 u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
43 u8 flags; // flags, see below x18-x1F
44 u8 xSpVpdFormat; // VPD areas are in CSP format ...
45 u8 xIntProcRatio; // Ratio of int procs to procs ...
46 u8 xRsvd1_2[5]; // Reserved ...
47 u16 xRsvd1_3; // Reserved x20-x21
48 u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
49 u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
50 u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27
51 u64 xLoadAreaAddr; // ER address of load area x28-x2F
52 u32 xLoadAreaChunks; // Chunks for the load area x30-x33
53 u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
54 // doing an ASR switch on PASE
55 // system call.
56 u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
57 u8 xRsvd1_4[64]; // x40-x7F
58
59// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
60 u8 xRsvd2_0[128]; // Reserved x00-x7F
61
62// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
63// NB: Padding required to keep xInterruptHdlr at x300 which is required
64// for v4r4 PLIC.
65 u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
66 u8 xRsvd3_0[384]; // Reserved 180-2FF
67
68// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
69// handlers
70 u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
71};
72
73extern struct ItLpNaca itLpNaca;
74
75#define ITLPNACA_LPAR 0x80 /* Is LPAR installed on the system */
76#define ITLPNACA_PARTITIONED 0x40 /* Is the system partitioned */
77#define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */
78#define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */
79
80#endif /* _PLATFORMS_ISERIES_IT_LP_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
deleted file mode 100644
index 997e234fb8b7..000000000000
--- a/arch/powerpc/platforms/iseries/ksyms.c
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/export.h>
10
11#include <asm/hw_irq.h>
12#include <asm/iseries/hv_call_sc.h>
13
14EXPORT_SYMBOL(HvCall0);
15EXPORT_SYMBOL(HvCall1);
16EXPORT_SYMBOL(HvCall2);
17EXPORT_SYMBOL(HvCall3);
18EXPORT_SYMBOL(HvCall4);
19EXPORT_SYMBOL(HvCall5);
20EXPORT_SYMBOL(HvCall6);
21EXPORT_SYMBOL(HvCall7);
diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c
deleted file mode 100644
index 00e0ec813a1c..000000000000
--- a/arch/powerpc/platforms/iseries/lpardata.c
+++ /dev/null
@@ -1,318 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/types.h>
10#include <linux/threads.h>
11#include <linux/bitops.h>
12#include <asm/processor.h>
13#include <asm/ptrace.h>
14#include <asm/abs_addr.h>
15#include <asm/lppaca.h>
16#include <asm/paca.h>
17#include <asm/iseries/lpar_map.h>
18#include <asm/iseries/it_lp_queue.h>
19#include <asm/iseries/alpaca.h>
20
21#include "naca.h"
22#include "vpd_areas.h"
23#include "spcomm_area.h"
24#include "ipl_parms.h"
25#include "processor_vpd.h"
26#include "release_data.h"
27#include "it_exp_vpd_panel.h"
28#include "it_lp_naca.h"
29
30/* The HvReleaseData is the root of the information shared between
31 * the hypervisor and Linux.
32 */
33const struct HvReleaseData hvReleaseData = {
34 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
35 .xSize = sizeof(struct HvReleaseData),
36 .xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
37 .xSlicNacaAddr = &naca, /* 64-bit Naca address */
38 .xMsNucDataOffset = LPARMAP_PHYS,
39 .xFlags = HVREL_TAGSINACTIVE /* tags inactive */
40 /* 64 bit */
41 /* shared processors */
42 /* HMT allowed */
43 | 6, /* TEMP: This allows non-GA driver */
44 .xVrmIndex = 4, /* We are v5r2m0 */
45 .xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
46 .xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
47 .xVrmName = { 0xd3, 0x89, 0x95, 0xa4, /* "Linux 2.4.64" ebcdic */
48 0xa7, 0x40, 0xf2, 0x4b,
49 0xf4, 0x4b, 0xf6, 0xf4 },
50};
51
52/*
53 * The NACA. The first dword of the naca is required by the iSeries
54 * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
55 * through the pointer in hvReleaseData.
56 */
57struct naca_struct naca = {
58 .xItVpdAreas = &itVpdAreas,
59 .xRamDisk = 0,
60 .xRamDiskSize = 0,
61};
62
63struct ItLpRegSave {
64 u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
65 u16 xSize; // Size of this class 004-005
66 u8 xInUse; // Area is live 006-007
67 u8 xRsvd1[9]; // Reserved 007-00F
68
69 u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F
70 u32 xCTRL; // Control Register 170-173
71 u32 xDEC; // Decrementer 174-177
72 u32 xFPSCR; // FP Status and Control Reg 178-17B
73 u32 xPVR; // Processor Version Number 17C-17F
74
75 u64 xMMCR0; // Monitor Mode Control Reg 0 180-187
76 u32 xPMC1; // Perf Monitor Counter 1 188-18B
77 u32 xPMC2; // Perf Monitor Counter 2 18C-18F
78 u32 xPMC3; // Perf Monitor Counter 3 190-193
79 u32 xPMC4; // Perf Monitor Counter 4 194-197
80 u32 xPIR; // Processor ID Reg 198-19B
81
82 u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F
83 u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3
84 u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7
85 u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB
86 u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF
87 u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3
88 u32 xTSC; // Thread Switch Control 1B4-1B7
89 u32 xTST; // Thread Switch Timeout 1B8-1BB
90 u32 xRsvd; // Reserved 1BC-1BF
91
92 u64 xACCR; // Address Compare Control Reg 1C0-1C7
93 u64 xIMR; // Instruction Match Register 1C8-1CF
94 u64 xSDR1; // Storage Description Reg 1 1D0-1D7
95 u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF
96 u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7
97 u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF
98 u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7
99 u64 xTB; // Time Base Register 1F8-1FF
100
101 u64 xFPR[32]; // Floating Point Registers 200-2FF
102
103 u64 xMSR; // Machine State Register 300-307
104 u64 xNIA; // Next Instruction Address 308-30F
105
106 u64 xDABR; // Data Address Breakpoint Reg 310-317
107 u64 xIABR; // Inst Address Breakpoint Reg 318-31F
108
109 u64 xHID0; // HW Implementation Dependent0 320-327
110
111 u64 xHID4; // HW Implementation Dependent4 328-32F
112 u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337
113 u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F
114 u64 xSDAR; // Sample Data Address Register 340-347
115 u64 xSIAR; // Sample Inst Address Register 348-34F
116
117 u8 xRsvd3[176]; // Reserved 350-3FF
118};
119
120extern void system_reset_iSeries(void);
121extern void machine_check_iSeries(void);
122extern void data_access_iSeries(void);
123extern void instruction_access_iSeries(void);
124extern void hardware_interrupt_iSeries(void);
125extern void alignment_iSeries(void);
126extern void program_check_iSeries(void);
127extern void fp_unavailable_iSeries(void);
128extern void decrementer_iSeries(void);
129extern void trap_0a_iSeries(void);
130extern void trap_0b_iSeries(void);
131extern void system_call_iSeries(void);
132extern void single_step_iSeries(void);
133extern void trap_0e_iSeries(void);
134extern void performance_monitor_iSeries(void);
135extern void data_access_slb_iSeries(void);
136extern void instruction_access_slb_iSeries(void);
137
138struct ItLpNaca itLpNaca = {
139 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
140 .xSize = 0x0400, /* size of ItLpNaca */
141 .xIntHdlrOffset = 0x0300, /* offset to int array */
142 .xMaxIntHdlrEntries = 19, /* # ents */
143 .xPrimaryLpIndex = 0, /* Part # of primary */
144 .xServiceLpIndex = 0, /* Part # of serv */
145 .xLpIndex = 0, /* Part # of me */
146 .xMaxLpQueues = 0, /* # of LP queues */
147 .xLpQueueOffset = 0x100, /* offset of start of LP queues */
148 .xPirEnvironMode = 0, /* Piranha stuff */
149 .xPirConsoleMode = 0,
150 .xPirDasdMode = 0,
151 .flags = 0,
152 .xSpVpdFormat = 0,
153 .xIntProcRatio = 0,
154 .xPlicVrmIndex = 0, /* VRM index of PLIC */
155 .xMinSupportedSlicVrmInd = 0, /* min supported SLIC */
156 .xMinCompatableSlicVrmInd = 0, /* min compat SLIC */
157 .xLoadAreaAddr = 0, /* 64-bit addr of load area */
158 .xLoadAreaChunks = 0, /* chunks for load area */
159 .xPaseSysCallCRMask = 0, /* PASE mask */
160 .xSlicSegmentTablePtr = 0, /* seg table */
161 .xOldLpQueue = { 0 }, /* Old LP Queue */
162 .xInterruptHdlr = {
163 (u64)system_reset_iSeries, /* 0x100 System Reset */
164 (u64)machine_check_iSeries, /* 0x200 Machine Check */
165 (u64)data_access_iSeries, /* 0x300 Data Access */
166 (u64)instruction_access_iSeries, /* 0x400 Instruction Access */
167 (u64)hardware_interrupt_iSeries, /* 0x500 External */
168 (u64)alignment_iSeries, /* 0x600 Alignment */
169 (u64)program_check_iSeries, /* 0x700 Program Check */
170 (u64)fp_unavailable_iSeries, /* 0x800 FP Unavailable */
171 (u64)decrementer_iSeries, /* 0x900 Decrementer */
172 (u64)trap_0a_iSeries, /* 0xa00 Trap 0A */
173 (u64)trap_0b_iSeries, /* 0xb00 Trap 0B */
174 (u64)system_call_iSeries, /* 0xc00 System Call */
175 (u64)single_step_iSeries, /* 0xd00 Single Step */
176 (u64)trap_0e_iSeries, /* 0xe00 Trap 0E */
177 (u64)performance_monitor_iSeries,/* 0xf00 Performance Monitor */
178 0, /* int 0x1000 */
179 0, /* int 0x1010 */
180 0, /* int 0x1020 CPU ctls */
181 (u64)hardware_interrupt_iSeries, /* SC Ret Hdlr */
182 (u64)data_access_slb_iSeries, /* 0x380 D-SLB */
183 (u64)instruction_access_slb_iSeries /* 0x480 I-SLB */
184 }
185};
186
187/* May be filled in by the hypervisor so cannot end up in the BSS */
188static struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
189
190/* May be filled in by the hypervisor so cannot end up in the BSS */
191struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
192
193#define maxPhysicalProcessors 32
194
195struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
196 {
197 .xInstCacheOperandSize = 32,
198 .xDataCacheOperandSize = 32,
199 .xProcFreq = 50000000,
200 .xTimeBaseFreq = 50000000,
201 .xPVR = 0x3600
202 }
203};
204
205/* Space for Main Store Vpd 27,200 bytes */
206/* May be filled in by the hypervisor so cannot end up in the BSS */
207u64 xMsVpd[3400] __attribute__((__section__(".data")));
208
209/* Space for Recovery Log Buffer */
210/* May be filled in by the hypervisor so cannot end up in the BSS */
211static u64 xRecoveryLogBuffer[32] __attribute__((__section__(".data")));
212
213static const struct SpCommArea xSpCommArea = {
214 .xDesc = 0xE2D7C3C2,
215 .xFormat = 1,
216};
217
218static const struct ItLpRegSave iseries_reg_save[] = {
219 [0 ... (NR_CPUS-1)] = {
220 .xDesc = 0xd397d9e2, /* "LpRS" */
221 .xSize = sizeof(struct ItLpRegSave),
222 },
223};
224
225#define ALPACA_INIT(number) \
226{ \
227 .lppaca_ptr = &lppaca[number], \
228 .reg_save_ptr = &iseries_reg_save[number], \
229}
230
231const struct alpaca alpaca[] = {
232 ALPACA_INIT( 0),
233#if NR_CPUS > 1
234 ALPACA_INIT( 1), ALPACA_INIT( 2), ALPACA_INIT( 3),
235#if NR_CPUS > 4
236 ALPACA_INIT( 4), ALPACA_INIT( 5), ALPACA_INIT( 6), ALPACA_INIT( 7),
237#if NR_CPUS > 8
238 ALPACA_INIT( 8), ALPACA_INIT( 9), ALPACA_INIT(10), ALPACA_INIT(11),
239 ALPACA_INIT(12), ALPACA_INIT(13), ALPACA_INIT(14), ALPACA_INIT(15),
240 ALPACA_INIT(16), ALPACA_INIT(17), ALPACA_INIT(18), ALPACA_INIT(19),
241 ALPACA_INIT(20), ALPACA_INIT(21), ALPACA_INIT(22), ALPACA_INIT(23),
242 ALPACA_INIT(24), ALPACA_INIT(25), ALPACA_INIT(26), ALPACA_INIT(27),
243 ALPACA_INIT(28), ALPACA_INIT(29), ALPACA_INIT(30), ALPACA_INIT(31),
244#if NR_CPUS > 32
245 ALPACA_INIT(32), ALPACA_INIT(33), ALPACA_INIT(34), ALPACA_INIT(35),
246 ALPACA_INIT(36), ALPACA_INIT(37), ALPACA_INIT(38), ALPACA_INIT(39),
247 ALPACA_INIT(40), ALPACA_INIT(41), ALPACA_INIT(42), ALPACA_INIT(43),
248 ALPACA_INIT(44), ALPACA_INIT(45), ALPACA_INIT(46), ALPACA_INIT(47),
249 ALPACA_INIT(48), ALPACA_INIT(49), ALPACA_INIT(50), ALPACA_INIT(51),
250 ALPACA_INIT(52), ALPACA_INIT(53), ALPACA_INIT(54), ALPACA_INIT(55),
251 ALPACA_INIT(56), ALPACA_INIT(57), ALPACA_INIT(58), ALPACA_INIT(59),
252 ALPACA_INIT(60), ALPACA_INIT(61), ALPACA_INIT(62), ALPACA_INIT(63),
253#endif
254#endif
255#endif
256#endif
257};
258
259/* The LparMap data is now located at offset 0x6000 in head.S
260 * It was put there so that the HvReleaseData could address it
261 * with a 32-bit offset as required by the iSeries hypervisor
262 *
263 * The Naca has a pointer to the ItVpdAreas. The hypervisor finds
264 * the Naca via the HvReleaseData area. The HvReleaseData has the
265 * offset into the Naca of the pointer to the ItVpdAreas.
266 */
267const struct ItVpdAreas itVpdAreas = {
268 .xSlicDesc = 0xc9a3e5c1, /* "ItVA" */
269 .xSlicSize = sizeof(struct ItVpdAreas),
270 .xSlicVpdEntries = ItVpdMaxEntries, /* # VPD array entries */
271 .xSlicDmaEntries = ItDmaMaxEntries, /* # DMA array entries */
272 .xSlicMaxLogicalProcs = NR_CPUS * 2, /* Max logical procs */
273 .xSlicMaxPhysicalProcs = maxPhysicalProcessors, /* Max physical procs */
274 .xSlicDmaToksOffset = offsetof(struct ItVpdAreas, xPlicDmaToks),
275 .xSlicVpdAdrsOffset = offsetof(struct ItVpdAreas, xSlicVpdAdrs),
276 .xSlicDmaLensOffset = offsetof(struct ItVpdAreas, xPlicDmaLens),
277 .xSlicVpdLensOffset = offsetof(struct ItVpdAreas, xSlicVpdLens),
278 .xSlicMaxSlotLabels = 0, /* max slot labels */
279 .xSlicMaxLpQueues = 1, /* max LP queues */
280 .xPlicDmaLens = { 0 }, /* DMA lengths */
281 .xPlicDmaToks = { 0 }, /* DMA tokens */
282 .xSlicVpdLens = { /* VPD lengths */
283 0,0,0, /* 0 - 2 */
284 sizeof(xItExtVpdPanel), /* 3 Extended VPD */
285 sizeof(struct alpaca), /* 4 length of (fake) Paca */
286 0, /* 5 */
287 sizeof(struct ItIplParmsReal),/* 6 length of IPL parms */
288 26992, /* 7 length of MS VPD */
289 0, /* 8 */
290 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
291 0, /* 10 */
292 256, /* 11 length of Recovery Log Buf */
293 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
294 0,0,0, /* 13 - 15 */
295 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
296 0,0,0,0,0,0, /* 17 - 22 */
297 sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
298 0,0 /* 24 - 25 */
299 },
300 .xSlicVpdAdrs = { /* VPD addresses */
301 0,0,0, /* 0 - 2 */
302 &xItExtVpdPanel, /* 3 Extended VPD */
303 &alpaca[0], /* 4 first (fake) Paca */
304 0, /* 5 */
305 &xItIplParmsReal, /* 6 IPL parms */
306 &xMsVpd, /* 7 MS Vpd */
307 0, /* 8 */
308 &itLpNaca, /* 9 LpNaca */
309 0, /* 10 */
310 &xRecoveryLogBuffer, /* 11 Recovery Log Buffer */
311 &xSpCommArea, /* 12 SP Comm Area */
312 0,0,0, /* 13 - 15 */
313 &xIoHriProcessorVpd, /* 16 Proc Vpd */
314 0,0,0,0,0,0, /* 17 - 22 */
315 &hvlpevent_queue, /* 23 Lp Queue */
316 0,0
317 }
318};
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
deleted file mode 100644
index 202e22798d30..000000000000
--- a/arch/powerpc/platforms/iseries/lpevents.c
+++ /dev/null
@@ -1,341 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/stddef.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/bootmem.h>
14#include <linux/seq_file.h>
15#include <linux/proc_fs.h>
16#include <linux/export.h>
17
18#include <asm/system.h>
19#include <asm/paca.h>
20#include <asm/firmware.h>
21#include <asm/iseries/it_lp_queue.h>
22#include <asm/iseries/hv_lp_event.h>
23#include <asm/iseries/hv_call_event.h>
24#include "it_lp_naca.h"
25
26/*
27 * The LpQueue is used to pass event data from the hypervisor to
28 * the partition. This is where I/O interrupt events are communicated.
29 *
30 * It is written to by the hypervisor so cannot end up in the BSS.
31 */
32struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
33
34DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
35
36static char *event_types[HvLpEvent_Type_NumTypes] = {
37 "Hypervisor",
38 "Machine Facilities",
39 "Session Manager",
40 "SPD I/O",
41 "Virtual Bus",
42 "PCI I/O",
43 "RIO I/O",
44 "Virtual Lan",
45 "Virtual I/O"
46};
47
48/* Array of LpEvent handler functions */
49static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
50static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
51
52static struct HvLpEvent * get_next_hvlpevent(void)
53{
54 struct HvLpEvent * event;
55 event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
56
57 if (hvlpevent_is_valid(event)) {
58 /* rmb() needed only for weakly consistent machines (regatta) */
59 rmb();
60 /* Set pointer to next potential event */
61 hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
62 IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
63 IT_LP_EVENT_ALIGN;
64
65 /* Wrap to beginning if no room at end */
66 if (hvlpevent_queue.hq_current_event >
67 hvlpevent_queue.hq_last_event) {
68 hvlpevent_queue.hq_current_event =
69 hvlpevent_queue.hq_event_stack;
70 }
71 } else {
72 event = NULL;
73 }
74
75 return event;
76}
77
78static unsigned long spread_lpevents = NR_CPUS;
79
80int hvlpevent_is_pending(void)
81{
82 struct HvLpEvent *next_event;
83
84 if (smp_processor_id() >= spread_lpevents)
85 return 0;
86
87 next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
88
89 return hvlpevent_is_valid(next_event) ||
90 hvlpevent_queue.hq_overflow_pending;
91}
92
93static void hvlpevent_clear_valid(struct HvLpEvent * event)
94{
95 /* Tell the Hypervisor that we're done with this event.
96 * Also clear bits within this event that might look like valid bits.
97 * ie. on 64-byte boundaries.
98 */
99 struct HvLpEvent *tmp;
100 unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
101 IT_LP_EVENT_ALIGN) - 1;
102
103 switch (extra) {
104 case 3:
105 tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
106 hvlpevent_invalidate(tmp);
107 case 2:
108 tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
109 hvlpevent_invalidate(tmp);
110 case 1:
111 tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
112 hvlpevent_invalidate(tmp);
113 }
114
115 mb();
116
117 hvlpevent_invalidate(event);
118}
119
120void process_hvlpevents(void)
121{
122 struct HvLpEvent * event;
123
124 restart:
125 /* If we have recursed, just return */
126 if (!spin_trylock(&hvlpevent_queue.hq_lock))
127 return;
128
129 for (;;) {
130 event = get_next_hvlpevent();
131 if (event) {
132 /* Call appropriate handler here, passing
133 * a pointer to the LpEvent. The handler
134 * must make a copy of the LpEvent if it
135 * needs it in a bottom half. (perhaps for
136 * an ACK)
137 *
138 * Handlers are responsible for ACK processing
139 *
140 * The Hypervisor guarantees that LpEvents will
141 * only be delivered with types that we have
142 * registered for, so no type check is necessary
143 * here!
144 */
145 if (event->xType < HvLpEvent_Type_NumTypes)
146 __get_cpu_var(hvlpevent_counts)[event->xType]++;
147 if (event->xType < HvLpEvent_Type_NumTypes &&
148 lpEventHandler[event->xType])
149 lpEventHandler[event->xType](event);
150 else {
151 u8 type = event->xType;
152
153 /*
154 * Don't printk in the spinlock as printk
155 * may require ack events form the HV to send
156 * any characters there.
157 */
158 hvlpevent_clear_valid(event);
159 spin_unlock(&hvlpevent_queue.hq_lock);
160 printk(KERN_INFO
161 "Unexpected Lp Event type=%d\n", type);
162 goto restart;
163 }
164
165 hvlpevent_clear_valid(event);
166 } else if (hvlpevent_queue.hq_overflow_pending)
167 /*
168 * No more valid events. If overflow events are
169 * pending process them
170 */
171 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
172 else
173 break;
174 }
175
176 spin_unlock(&hvlpevent_queue.hq_lock);
177}
178
179static int set_spread_lpevents(char *str)
180{
181 unsigned long val = simple_strtoul(str, NULL, 0);
182
183 /*
184 * The parameter is the number of processors to share in processing
185 * lp events.
186 */
187 if (( val > 0) && (val <= NR_CPUS)) {
188 spread_lpevents = val;
189 printk("lpevent processing spread over %ld processors\n", val);
190 } else {
191 printk("invalid spread_lpevents %ld\n", val);
192 }
193
194 return 1;
195}
196__setup("spread_lpevents=", set_spread_lpevents);
197
198void __init setup_hvlpevent_queue(void)
199{
200 void *eventStack;
201
202 spin_lock_init(&hvlpevent_queue.hq_lock);
203
204 /* Allocate a page for the Event Stack. */
205 eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
206 memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
207
208 /* Invoke the hypervisor to initialize the event stack */
209 HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
210
211 hvlpevent_queue.hq_event_stack = eventStack;
212 hvlpevent_queue.hq_current_event = eventStack;
213 hvlpevent_queue.hq_last_event = (char *)eventStack +
214 (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
215 hvlpevent_queue.hq_index = 0;
216}
217
218/* Register a handler for an LpEvent type */
219int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
220{
221 if (eventType < HvLpEvent_Type_NumTypes) {
222 lpEventHandler[eventType] = handler;
223 return 0;
224 }
225 return 1;
226}
227EXPORT_SYMBOL(HvLpEvent_registerHandler);
228
229int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
230{
231 might_sleep();
232
233 if (eventType < HvLpEvent_Type_NumTypes) {
234 if (!lpEventHandlerPaths[eventType]) {
235 lpEventHandler[eventType] = NULL;
236 /*
237 * We now sleep until all other CPUs have scheduled.
238 * This ensures that the deletion is seen by all
239 * other CPUs, and that the deleted handler isn't
240 * still running on another CPU when we return.
241 */
242 synchronize_sched();
243 return 0;
244 }
245 }
246 return 1;
247}
248EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
249
250/*
251 * lpIndex is the partition index of the target partition.
252 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
253 * indicates to use our partition index - for the other types.
254 */
255int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
256{
257 if ((eventType < HvLpEvent_Type_NumTypes) &&
258 lpEventHandler[eventType]) {
259 if (lpIndex == 0)
260 lpIndex = itLpNaca.xLpIndex;
261 HvCallEvent_openLpEventPath(lpIndex, eventType);
262 ++lpEventHandlerPaths[eventType];
263 return 0;
264 }
265 return 1;
266}
267
268int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
269{
270 if ((eventType < HvLpEvent_Type_NumTypes) &&
271 lpEventHandler[eventType] &&
272 lpEventHandlerPaths[eventType]) {
273 if (lpIndex == 0)
274 lpIndex = itLpNaca.xLpIndex;
275 HvCallEvent_closeLpEventPath(lpIndex, eventType);
276 --lpEventHandlerPaths[eventType];
277 return 0;
278 }
279 return 1;
280}
281
282static int proc_lpevents_show(struct seq_file *m, void *v)
283{
284 int cpu, i;
285 unsigned long sum;
286 static unsigned long cpu_totals[NR_CPUS];
287
288 /* FIXME: do we care that there's no locking here? */
289 sum = 0;
290 for_each_online_cpu(cpu) {
291 cpu_totals[cpu] = 0;
292 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
293 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
294 }
295 sum += cpu_totals[cpu];
296 }
297
298 seq_printf(m, "LpEventQueue 0\n");
299 seq_printf(m, " events processed:\t%lu\n", sum);
300
301 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
302 sum = 0;
303 for_each_online_cpu(cpu) {
304 sum += per_cpu(hvlpevent_counts, cpu)[i];
305 }
306
307 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
308 }
309
310 seq_printf(m, "\n events processed by processor:\n");
311
312 for_each_online_cpu(cpu) {
313 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
314 }
315
316 return 0;
317}
318
319static int proc_lpevents_open(struct inode *inode, struct file *file)
320{
321 return single_open(file, proc_lpevents_show, NULL);
322}
323
324static const struct file_operations proc_lpevents_operations = {
325 .open = proc_lpevents_open,
326 .read = seq_read,
327 .llseek = seq_lseek,
328 .release = single_release,
329};
330
331static int __init proc_lpevents_init(void)
332{
333 if (!firmware_has_feature(FW_FEATURE_ISERIES))
334 return 0;
335
336 proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
337 &proc_lpevents_operations);
338 return 0;
339}
340__initcall(proc_lpevents_init);
341
diff --git a/arch/powerpc/platforms/iseries/main_store.h b/arch/powerpc/platforms/iseries/main_store.h
deleted file mode 100644
index 1a7a3f50e40b..000000000000
--- a/arch/powerpc/platforms/iseries/main_store.h
+++ /dev/null
@@ -1,165 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_MAIN_STORE_H
20#define _ISERIES_MAIN_STORE_H
21
22/* Main Store Vpd for Condor,iStar,sStar */
23struct IoHriMainStoreSegment4 {
24 u8 msArea0Exists:1;
25 u8 msArea1Exists:1;
26 u8 msArea2Exists:1;
27 u8 msArea3Exists:1;
28 u8 reserved1:4;
29 u8 reserved2;
30
31 u8 msArea0Functional:1;
32 u8 msArea1Functional:1;
33 u8 msArea2Functional:1;
34 u8 msArea3Functional:1;
35 u8 reserved3:4;
36 u8 reserved4;
37
38 u32 totalMainStore;
39
40 u64 msArea0Ptr;
41 u64 msArea1Ptr;
42 u64 msArea2Ptr;
43 u64 msArea3Ptr;
44
45 u32 cardProductionLevel;
46
47 u32 msAdrHole;
48
49 u8 msArea0HasRiserVpd:1;
50 u8 msArea1HasRiserVpd:1;
51 u8 msArea2HasRiserVpd:1;
52 u8 msArea3HasRiserVpd:1;
53 u8 reserved5:4;
54 u8 reserved6;
55 u16 reserved7;
56
57 u8 reserved8[28];
58
59 u64 nonInterleavedBlocksStartAdr;
60 u64 nonInterleavedBlocksEndAdr;
61};
62
63/* Main Store VPD for Power4 */
64struct __attribute((packed)) IoHriMainStoreChipInfo1 {
65 u32 chipMfgID;
66 char chipECLevel[4];
67};
68
69struct IoHriMainStoreVpdIdData {
70 char typeNumber[4];
71 char modelNumber[4];
72 char partNumber[12];
73 char serialNumber[12];
74};
75
76struct __attribute((packed)) IoHriMainStoreVpdFruData {
77 char fruLabel[8];
78 u8 numberOfSlots;
79 u8 pluggingType;
80 u16 slotMapIndex;
81};
82
83struct __attribute((packed)) IoHriMainStoreAdrRangeBlock {
84 void *blockStart;
85 void *blockEnd;
86 u32 blockProcChipId;
87};
88
89#define MaxAreaAdrRangeBlocks 4
90
91struct __attribute((packed)) IoHriMainStoreArea4 {
92 u32 msVpdFormat;
93 u8 containedVpdType;
94 u8 reserved1;
95 u16 reserved2;
96
97 u64 msExists;
98 u64 msFunctional;
99
100 u32 memorySize;
101 u32 procNodeId;
102
103 u32 numAdrRangeBlocks;
104 struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks];
105
106 struct IoHriMainStoreChipInfo1 chipInfo0;
107 struct IoHriMainStoreChipInfo1 chipInfo1;
108 struct IoHriMainStoreChipInfo1 chipInfo2;
109 struct IoHriMainStoreChipInfo1 chipInfo3;
110 struct IoHriMainStoreChipInfo1 chipInfo4;
111 struct IoHriMainStoreChipInfo1 chipInfo5;
112 struct IoHriMainStoreChipInfo1 chipInfo6;
113 struct IoHriMainStoreChipInfo1 chipInfo7;
114
115 void *msRamAreaArray;
116 u32 msRamAreaArrayNumEntries;
117 u32 msRamAreaArrayEntrySize;
118
119 u32 numaDimmExists;
120 u32 numaDimmFunctional;
121 void *numaDimmArray;
122 u32 numaDimmArrayNumEntries;
123 u32 numaDimmArrayEntrySize;
124
125 struct IoHriMainStoreVpdIdData idData;
126
127 u64 powerData;
128 u64 cardAssemblyPartNum;
129 u64 chipSerialNum;
130
131 u64 reserved3;
132 char reserved4[16];
133
134 struct IoHriMainStoreVpdFruData fruData;
135
136 u8 vpdPortNum;
137 u8 reserved5;
138 u8 frameId;
139 u8 rackUnit;
140 char asciiKeywordVpd[256];
141 u32 reserved6;
142};
143
144
145struct IoHriMainStoreSegment5 {
146 u16 reserved1;
147 u8 reserved2;
148 u8 msVpdFormat;
149
150 u32 totalMainStore;
151 u64 maxConfiguredMsAdr;
152
153 struct IoHriMainStoreArea4 *msAreaArray;
154 u32 msAreaArrayNumEntries;
155 u32 msAreaArrayEntrySize;
156
157 u32 msAreaExists;
158 u32 msAreaFunctional;
159
160 u64 reserved3;
161};
162
163extern u64 xMsVpd[];
164
165#endif /* _ISERIES_MAIN_STORE_H */
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
deleted file mode 100644
index 254c1fc3d8dd..000000000000
--- a/arch/powerpc/platforms/iseries/mf.c
+++ /dev/null
@@ -1,1275 +0,0 @@
1/*
2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
3 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
4 *
5 * This modules exists as an interface between a Linux secondary partition
6 * running on an iSeries and the primary partition's Virtual Service
7 * Processor (VSP) object. The VSP has final authority over powering on/off
8 * all partitions in the iSeries. It also provides miscellaneous low-level
9 * machine facility type operations.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/completion.h>
32#include <linux/delay.h>
33#include <linux/export.h>
34#include <linux/proc_fs.h>
35#include <linux/dma-mapping.h>
36#include <linux/bcd.h>
37#include <linux/rtc.h>
38#include <linux/slab.h>
39
40#include <asm/time.h>
41#include <asm/uaccess.h>
42#include <asm/paca.h>
43#include <asm/abs_addr.h>
44#include <asm/firmware.h>
45#include <asm/iseries/mf.h>
46#include <asm/iseries/hv_lp_config.h>
47#include <asm/iseries/hv_lp_event.h>
48#include <asm/iseries/it_lp_queue.h>
49
50#include "setup.h"
51
52static int mf_initialized;
53
54/*
55 * This is the structure layout for the Machine Facilities LPAR event
56 * flows.
57 */
58struct vsp_cmd_data {
59 u64 token;
60 u16 cmd;
61 HvLpIndex lp_index;
62 u8 result_code;
63 u32 reserved;
64 union {
65 u64 state; /* GetStateOut */
66 u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
67 u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
68 u64 page[4]; /* GetSrcHistoryIn */
69 u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
70 SetAutoIplWhenPrimaryIplsIn,
71 WhiteButtonPowerOffIn,
72 Function08FastPowerOffIn,
73 IsSpcnRackPowerIncompleteOut */
74 struct {
75 u64 token;
76 u64 address_type;
77 u64 side;
78 u32 length;
79 u32 offset;
80 } kern; /* SetKernelImageIn, GetKernelImageIn,
81 SetKernelCmdLineIn, GetKernelCmdLineIn */
82 u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
83 u8 reserved[80];
84 } sub_data;
85};
86
87struct vsp_rsp_data {
88 struct completion com;
89 struct vsp_cmd_data *response;
90};
91
92struct alloc_data {
93 u16 size;
94 u16 type;
95 u32 count;
96 u16 reserved1;
97 u8 reserved2;
98 HvLpIndex target_lp;
99};
100
101struct ce_msg_data;
102
103typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
104
105struct ce_msg_comp_data {
106 ce_msg_comp_hdlr handler;
107 void *token;
108};
109
110struct ce_msg_data {
111 u8 ce_msg[12];
112 char reserved[4];
113 struct ce_msg_comp_data *completion;
114};
115
116struct io_mf_lp_event {
117 struct HvLpEvent hp_lp_event;
118 u16 subtype_result_code;
119 u16 reserved1;
120 u32 reserved2;
121 union {
122 struct alloc_data alloc;
123 struct ce_msg_data ce_msg;
124 struct vsp_cmd_data vsp_cmd;
125 } data;
126};
127
128#define subtype_data(a, b, c, d) \
129 (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
130
131/*
132 * All outgoing event traffic is kept on a FIFO queue. The first
133 * pointer points to the one that is outstanding, and all new
134 * requests get stuck on the end. Also, we keep a certain number of
135 * preallocated pending events so that we can operate very early in
136 * the boot up sequence (before kmalloc is ready).
137 */
138struct pending_event {
139 struct pending_event *next;
140 struct io_mf_lp_event event;
141 MFCompleteHandler hdlr;
142 char dma_data[72];
143 unsigned dma_data_length;
144 unsigned remote_address;
145};
146static spinlock_t pending_event_spinlock;
147static struct pending_event *pending_event_head;
148static struct pending_event *pending_event_tail;
149static struct pending_event *pending_event_avail;
150#define PENDING_EVENT_PREALLOC_LEN 16
151static struct pending_event pending_event_prealloc[PENDING_EVENT_PREALLOC_LEN];
152
153/*
154 * Put a pending event onto the available queue, so it can get reused.
155 * Attention! You must have the pending_event_spinlock before calling!
156 */
157static void free_pending_event(struct pending_event *ev)
158{
159 if (ev != NULL) {
160 ev->next = pending_event_avail;
161 pending_event_avail = ev;
162 }
163}
164
165/*
166 * Enqueue the outbound event onto the stack. If the queue was
167 * empty to begin with, we must also issue it via the Hypervisor
168 * interface. There is a section of code below that will touch
169 * the first stack pointer without the protection of the pending_event_spinlock.
170 * This is OK, because we know that nobody else will be modifying
171 * the first pointer when we do this.
172 */
173static int signal_event(struct pending_event *ev)
174{
175 int rc = 0;
176 unsigned long flags;
177 int go = 1;
178 struct pending_event *ev1;
179 HvLpEvent_Rc hv_rc;
180
181 /* enqueue the event */
182 if (ev != NULL) {
183 ev->next = NULL;
184 spin_lock_irqsave(&pending_event_spinlock, flags);
185 if (pending_event_head == NULL)
186 pending_event_head = ev;
187 else {
188 go = 0;
189 pending_event_tail->next = ev;
190 }
191 pending_event_tail = ev;
192 spin_unlock_irqrestore(&pending_event_spinlock, flags);
193 }
194
195 /* send the event */
196 while (go) {
197 go = 0;
198
199 /* any DMA data to send beforehand? */
200 if (pending_event_head->dma_data_length > 0)
201 HvCallEvent_dmaToSp(pending_event_head->dma_data,
202 pending_event_head->remote_address,
203 pending_event_head->dma_data_length,
204 HvLpDma_Direction_LocalToRemote);
205
206 hv_rc = HvCallEvent_signalLpEvent(
207 &pending_event_head->event.hp_lp_event);
208 if (hv_rc != HvLpEvent_Rc_Good) {
209 printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
210 "failed with %d\n", (int)hv_rc);
211
212 spin_lock_irqsave(&pending_event_spinlock, flags);
213 ev1 = pending_event_head;
214 pending_event_head = pending_event_head->next;
215 if (pending_event_head != NULL)
216 go = 1;
217 spin_unlock_irqrestore(&pending_event_spinlock, flags);
218
219 if (ev1 == ev)
220 rc = -EIO;
221 else if (ev1->hdlr != NULL)
222 (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
223
224 spin_lock_irqsave(&pending_event_spinlock, flags);
225 free_pending_event(ev1);
226 spin_unlock_irqrestore(&pending_event_spinlock, flags);
227 }
228 }
229
230 return rc;
231}
232
233/*
234 * Allocate a new pending_event structure, and initialize it.
235 */
236static struct pending_event *new_pending_event(void)
237{
238 struct pending_event *ev = NULL;
239 HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
240 unsigned long flags;
241 struct HvLpEvent *hev;
242
243 spin_lock_irqsave(&pending_event_spinlock, flags);
244 if (pending_event_avail != NULL) {
245 ev = pending_event_avail;
246 pending_event_avail = pending_event_avail->next;
247 }
248 spin_unlock_irqrestore(&pending_event_spinlock, flags);
249 if (ev == NULL) {
250 ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
251 if (ev == NULL) {
252 printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
253 sizeof(struct pending_event));
254 return NULL;
255 }
256 }
257 memset(ev, 0, sizeof(struct pending_event));
258 hev = &ev->event.hp_lp_event;
259 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT;
260 hev->xType = HvLpEvent_Type_MachineFac;
261 hev->xSourceLp = HvLpConfig_getLpIndex();
262 hev->xTargetLp = primary_lp;
263 hev->xSizeMinus1 = sizeof(ev->event) - 1;
264 hev->xRc = HvLpEvent_Rc_Good;
265 hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
266 HvLpEvent_Type_MachineFac);
267 hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
268 HvLpEvent_Type_MachineFac);
269
270 return ev;
271}
272
273static int __maybe_unused
274signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
275{
276 struct pending_event *ev = new_pending_event();
277 int rc;
278 struct vsp_rsp_data response;
279
280 if (ev == NULL)
281 return -ENOMEM;
282
283 init_completion(&response.com);
284 response.response = vsp_cmd;
285 ev->event.hp_lp_event.xSubtype = 6;
286 ev->event.hp_lp_event.x.xSubtypeData =
287 subtype_data('M', 'F', 'V', 'I');
288 ev->event.data.vsp_cmd.token = (u64)&response;
289 ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
290 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
291 ev->event.data.vsp_cmd.result_code = 0xFF;
292 ev->event.data.vsp_cmd.reserved = 0;
293 memcpy(&(ev->event.data.vsp_cmd.sub_data),
294 &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
295 mb();
296
297 rc = signal_event(ev);
298 if (rc == 0)
299 wait_for_completion(&response.com);
300 return rc;
301}
302
303
304/*
305 * Send a 12-byte CE message to the primary partition VSP object
306 */
307static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
308{
309 struct pending_event *ev = new_pending_event();
310
311 if (ev == NULL)
312 return -ENOMEM;
313
314 ev->event.hp_lp_event.xSubtype = 0;
315 ev->event.hp_lp_event.x.xSubtypeData =
316 subtype_data('M', 'F', 'C', 'E');
317 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
318 ev->event.data.ce_msg.completion = completion;
319 return signal_event(ev);
320}
321
322/*
323 * Send a 12-byte CE message (with no data) to the primary partition VSP object
324 */
325static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
326{
327 u8 ce_msg[12];
328
329 memset(ce_msg, 0, sizeof(ce_msg));
330 ce_msg[3] = ce_op;
331 return signal_ce_msg(ce_msg, completion);
332}
333
334/*
335 * Send a 12-byte CE message and DMA data to the primary partition VSP object
336 */
337static int dma_and_signal_ce_msg(char *ce_msg,
338 struct ce_msg_comp_data *completion, void *dma_data,
339 unsigned dma_data_length, unsigned remote_address)
340{
341 struct pending_event *ev = new_pending_event();
342
343 if (ev == NULL)
344 return -ENOMEM;
345
346 ev->event.hp_lp_event.xSubtype = 0;
347 ev->event.hp_lp_event.x.xSubtypeData =
348 subtype_data('M', 'F', 'C', 'E');
349 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
350 ev->event.data.ce_msg.completion = completion;
351 memcpy(ev->dma_data, dma_data, dma_data_length);
352 ev->dma_data_length = dma_data_length;
353 ev->remote_address = remote_address;
354 return signal_event(ev);
355}
356
357/*
358 * Initiate a nice (hopefully) shutdown of Linux. We simply are
359 * going to try and send the init process a SIGINT signal. If
360 * this fails (why?), we'll simply force it off in a not-so-nice
361 * manner.
362 */
363static int shutdown(void)
364{
365 int rc = kill_cad_pid(SIGINT, 1);
366
367 if (rc) {
368 printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
369 "hard shutdown commencing\n", rc);
370 mf_power_off();
371 } else
372 printk(KERN_INFO "mf.c: init has been successfully notified "
373 "to proceed with shutdown\n");
374 return rc;
375}
376
377/*
378 * The primary partition VSP object is sending us a new
379 * event flow. Handle it...
380 */
381static void handle_int(struct io_mf_lp_event *event)
382{
383 struct ce_msg_data *ce_msg_data;
384 struct ce_msg_data *pce_msg_data;
385 unsigned long flags;
386 struct pending_event *pev;
387
388 /* ack the interrupt */
389 event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
390 HvCallEvent_ackLpEvent(&event->hp_lp_event);
391
392 /* process interrupt */
393 switch (event->hp_lp_event.xSubtype) {
394 case 0: /* CE message */
395 ce_msg_data = &event->data.ce_msg;
396 switch (ce_msg_data->ce_msg[3]) {
397 case 0x5B: /* power control notification */
398 if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
399 printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
400 if (shutdown() == 0)
401 signal_ce_msg_simple(0xDB, NULL);
402 }
403 break;
404 case 0xC0: /* get time */
405 spin_lock_irqsave(&pending_event_spinlock, flags);
406 pev = pending_event_head;
407 if (pev != NULL)
408 pending_event_head = pending_event_head->next;
409 spin_unlock_irqrestore(&pending_event_spinlock, flags);
410 if (pev == NULL)
411 break;
412 pce_msg_data = &pev->event.data.ce_msg;
413 if (pce_msg_data->ce_msg[3] != 0x40)
414 break;
415 if (pce_msg_data->completion != NULL) {
416 ce_msg_comp_hdlr handler =
417 pce_msg_data->completion->handler;
418 void *token = pce_msg_data->completion->token;
419
420 if (handler != NULL)
421 (*handler)(token, ce_msg_data);
422 }
423 spin_lock_irqsave(&pending_event_spinlock, flags);
424 free_pending_event(pev);
425 spin_unlock_irqrestore(&pending_event_spinlock, flags);
426 /* send next waiting event */
427 if (pending_event_head != NULL)
428 signal_event(NULL);
429 break;
430 }
431 break;
432 case 1: /* IT sys shutdown */
433 printk(KERN_INFO "mf.c: Commencing system shutdown\n");
434 shutdown();
435 break;
436 }
437}
438
439/*
440 * The primary partition VSP object is acknowledging the receipt
441 * of a flow we sent to them. If there are other flows queued
442 * up, we must send another one now...
443 */
444static void handle_ack(struct io_mf_lp_event *event)
445{
446 unsigned long flags;
447 struct pending_event *two = NULL;
448 unsigned long free_it = 0;
449 struct ce_msg_data *ce_msg_data;
450 struct ce_msg_data *pce_msg_data;
451 struct vsp_rsp_data *rsp;
452
453 /* handle current event */
454 if (pending_event_head == NULL) {
455 printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
456 return;
457 }
458
459 switch (event->hp_lp_event.xSubtype) {
460 case 0: /* CE msg */
461 ce_msg_data = &event->data.ce_msg;
462 if (ce_msg_data->ce_msg[3] != 0x40) {
463 free_it = 1;
464 break;
465 }
466 if (ce_msg_data->ce_msg[2] == 0)
467 break;
468 free_it = 1;
469 pce_msg_data = &pending_event_head->event.data.ce_msg;
470 if (pce_msg_data->completion != NULL) {
471 ce_msg_comp_hdlr handler =
472 pce_msg_data->completion->handler;
473 void *token = pce_msg_data->completion->token;
474
475 if (handler != NULL)
476 (*handler)(token, ce_msg_data);
477 }
478 break;
479 case 4: /* allocate */
480 case 5: /* deallocate */
481 if (pending_event_head->hdlr != NULL)
482 (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
483 free_it = 1;
484 break;
485 case 6:
486 free_it = 1;
487 rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
488 if (rsp == NULL) {
489 printk(KERN_ERR "mf.c: no rsp\n");
490 break;
491 }
492 if (rsp->response != NULL)
493 memcpy(rsp->response, &event->data.vsp_cmd,
494 sizeof(event->data.vsp_cmd));
495 complete(&rsp->com);
496 break;
497 }
498
499 /* remove from queue */
500 spin_lock_irqsave(&pending_event_spinlock, flags);
501 if ((pending_event_head != NULL) && (free_it == 1)) {
502 struct pending_event *oldHead = pending_event_head;
503
504 pending_event_head = pending_event_head->next;
505 two = pending_event_head;
506 free_pending_event(oldHead);
507 }
508 spin_unlock_irqrestore(&pending_event_spinlock, flags);
509
510 /* send next waiting event */
511 if (two != NULL)
512 signal_event(NULL);
513}
514
515/*
516 * This is the generic event handler we are registering with
517 * the Hypervisor. Ensure the flows are for us, and then
518 * parse it enough to know if it is an interrupt or an
519 * acknowledge.
520 */
521static void hv_handler(struct HvLpEvent *event)
522{
523 if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
524 if (hvlpevent_is_ack(event))
525 handle_ack((struct io_mf_lp_event *)event);
526 else
527 handle_int((struct io_mf_lp_event *)event);
528 } else
529 printk(KERN_ERR "mf.c: alien event received\n");
530}
531
532/*
533 * Global kernel interface to allocate and seed events into the
534 * Hypervisor.
535 */
536void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
537 unsigned size, unsigned count, MFCompleteHandler hdlr,
538 void *user_token)
539{
540 struct pending_event *ev = new_pending_event();
541 int rc;
542
543 if (ev == NULL) {
544 rc = -ENOMEM;
545 } else {
546 ev->event.hp_lp_event.xSubtype = 4;
547 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
548 ev->event.hp_lp_event.x.xSubtypeData =
549 subtype_data('M', 'F', 'M', 'A');
550 ev->event.data.alloc.target_lp = target_lp;
551 ev->event.data.alloc.type = type;
552 ev->event.data.alloc.size = size;
553 ev->event.data.alloc.count = count;
554 ev->hdlr = hdlr;
555 rc = signal_event(ev);
556 }
557 if ((rc != 0) && (hdlr != NULL))
558 (*hdlr)(user_token, rc);
559}
560EXPORT_SYMBOL(mf_allocate_lp_events);
561
562/*
563 * Global kernel interface to unseed and deallocate events already in
564 * Hypervisor.
565 */
566void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
567 unsigned count, MFCompleteHandler hdlr, void *user_token)
568{
569 struct pending_event *ev = new_pending_event();
570 int rc;
571
572 if (ev == NULL)
573 rc = -ENOMEM;
574 else {
575 ev->event.hp_lp_event.xSubtype = 5;
576 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
577 ev->event.hp_lp_event.x.xSubtypeData =
578 subtype_data('M', 'F', 'M', 'D');
579 ev->event.data.alloc.target_lp = target_lp;
580 ev->event.data.alloc.type = type;
581 ev->event.data.alloc.count = count;
582 ev->hdlr = hdlr;
583 rc = signal_event(ev);
584 }
585 if ((rc != 0) && (hdlr != NULL))
586 (*hdlr)(user_token, rc);
587}
588EXPORT_SYMBOL(mf_deallocate_lp_events);
589
590/*
591 * Global kernel interface to tell the VSP object in the primary
592 * partition to power this partition off.
593 */
594void mf_power_off(void)
595{
596 printk(KERN_INFO "mf.c: Down it goes...\n");
597 signal_ce_msg_simple(0x4d, NULL);
598 for (;;)
599 ;
600}
601
602/*
603 * Global kernel interface to tell the VSP object in the primary
604 * partition to reboot this partition.
605 */
606void mf_reboot(char *cmd)
607{
608 printk(KERN_INFO "mf.c: Preparing to bounce...\n");
609 signal_ce_msg_simple(0x4e, NULL);
610 for (;;)
611 ;
612}
613
614/*
615 * Display a single word SRC onto the VSP control panel.
616 */
617void mf_display_src(u32 word)
618{
619 u8 ce[12];
620
621 memset(ce, 0, sizeof(ce));
622 ce[3] = 0x4a;
623 ce[7] = 0x01;
624 ce[8] = word >> 24;
625 ce[9] = word >> 16;
626 ce[10] = word >> 8;
627 ce[11] = word;
628 signal_ce_msg(ce, NULL);
629}
630
631/*
632 * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
633 */
634static __init void mf_display_progress_src(u16 value)
635{
636 u8 ce[12];
637 u8 src[72];
638
639 memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
640 memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
641 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
642 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
643 "\x00\x00\x00\x00PROGxxxx ",
644 72);
645 src[6] = value >> 8;
646 src[7] = value & 255;
647 src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
648 src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
649 src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
650 src[47] = "0123456789ABCDEF"[value & 15];
651 dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
652}
653
654/*
655 * Clear the VSP control panel. Used to "erase" an SRC that was
656 * previously displayed.
657 */
658static void mf_clear_src(void)
659{
660 signal_ce_msg_simple(0x4b, NULL);
661}
662
663void __init mf_display_progress(u16 value)
664{
665 if (!mf_initialized)
666 return;
667
668 if (0xFFFF == value)
669 mf_clear_src();
670 else
671 mf_display_progress_src(value);
672}
673
674/*
675 * Initialization code here.
676 */
677void __init mf_init(void)
678{
679 int i;
680
681 spin_lock_init(&pending_event_spinlock);
682
683 for (i = 0; i < PENDING_EVENT_PREALLOC_LEN; i++)
684 free_pending_event(&pending_event_prealloc[i]);
685
686 HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
687
688 /* virtual continue ack */
689 signal_ce_msg_simple(0x57, NULL);
690
691 mf_initialized = 1;
692 mb();
693
694 printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
695 "initialized\n");
696}
697
698struct rtc_time_data {
699 struct completion com;
700 struct ce_msg_data ce_msg;
701 int rc;
702};
703
704static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
705{
706 struct rtc_time_data *rtc = token;
707
708 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
709 rtc->rc = 0;
710 complete(&rtc->com);
711}
712
713static int mf_set_rtc(struct rtc_time *tm)
714{
715 char ce_time[12];
716 u8 day, mon, hour, min, sec, y1, y2;
717 unsigned year;
718
719 year = 1900 + tm->tm_year;
720 y1 = year / 100;
721 y2 = year % 100;
722
723 sec = tm->tm_sec;
724 min = tm->tm_min;
725 hour = tm->tm_hour;
726 day = tm->tm_mday;
727 mon = tm->tm_mon + 1;
728
729 sec = bin2bcd(sec);
730 min = bin2bcd(min);
731 hour = bin2bcd(hour);
732 mon = bin2bcd(mon);
733 day = bin2bcd(day);
734 y1 = bin2bcd(y1);
735 y2 = bin2bcd(y2);
736
737 memset(ce_time, 0, sizeof(ce_time));
738 ce_time[3] = 0x41;
739 ce_time[4] = y1;
740 ce_time[5] = y2;
741 ce_time[6] = sec;
742 ce_time[7] = min;
743 ce_time[8] = hour;
744 ce_time[10] = day;
745 ce_time[11] = mon;
746
747 return signal_ce_msg(ce_time, NULL);
748}
749
750static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
751{
752 tm->tm_wday = 0;
753 tm->tm_yday = 0;
754 tm->tm_isdst = 0;
755 if (rc) {
756 tm->tm_sec = 0;
757 tm->tm_min = 0;
758 tm->tm_hour = 0;
759 tm->tm_mday = 15;
760 tm->tm_mon = 5;
761 tm->tm_year = 52;
762 return rc;
763 }
764
765 if ((ce_msg[2] == 0xa9) ||
766 (ce_msg[2] == 0xaf)) {
767 /* TOD clock is not set */
768 tm->tm_sec = 1;
769 tm->tm_min = 1;
770 tm->tm_hour = 1;
771 tm->tm_mday = 10;
772 tm->tm_mon = 8;
773 tm->tm_year = 71;
774 mf_set_rtc(tm);
775 }
776 {
777 u8 year = ce_msg[5];
778 u8 sec = ce_msg[6];
779 u8 min = ce_msg[7];
780 u8 hour = ce_msg[8];
781 u8 day = ce_msg[10];
782 u8 mon = ce_msg[11];
783
784 sec = bcd2bin(sec);
785 min = bcd2bin(min);
786 hour = bcd2bin(hour);
787 day = bcd2bin(day);
788 mon = bcd2bin(mon);
789 year = bcd2bin(year);
790
791 if (year <= 69)
792 year += 100;
793
794 tm->tm_sec = sec;
795 tm->tm_min = min;
796 tm->tm_hour = hour;
797 tm->tm_mday = day;
798 tm->tm_mon = mon;
799 tm->tm_year = year;
800 }
801
802 return 0;
803}
804
805static int mf_get_rtc(struct rtc_time *tm)
806{
807 struct ce_msg_comp_data ce_complete;
808 struct rtc_time_data rtc_data;
809 int rc;
810
811 memset(&ce_complete, 0, sizeof(ce_complete));
812 memset(&rtc_data, 0, sizeof(rtc_data));
813 init_completion(&rtc_data.com);
814 ce_complete.handler = &get_rtc_time_complete;
815 ce_complete.token = &rtc_data;
816 rc = signal_ce_msg_simple(0x40, &ce_complete);
817 if (rc)
818 return rc;
819 wait_for_completion(&rtc_data.com);
820 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
821}
822
823struct boot_rtc_time_data {
824 int busy;
825 struct ce_msg_data ce_msg;
826 int rc;
827};
828
829static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
830{
831 struct boot_rtc_time_data *rtc = token;
832
833 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
834 rtc->rc = 0;
835 rtc->busy = 0;
836}
837
838static int mf_get_boot_rtc(struct rtc_time *tm)
839{
840 struct ce_msg_comp_data ce_complete;
841 struct boot_rtc_time_data rtc_data;
842 int rc;
843
844 memset(&ce_complete, 0, sizeof(ce_complete));
845 memset(&rtc_data, 0, sizeof(rtc_data));
846 rtc_data.busy = 1;
847 ce_complete.handler = &get_boot_rtc_time_complete;
848 ce_complete.token = &rtc_data;
849 rc = signal_ce_msg_simple(0x40, &ce_complete);
850 if (rc)
851 return rc;
852 /* We need to poll here as we are not yet taking interrupts */
853 while (rtc_data.busy) {
854 if (hvlpevent_is_pending())
855 process_hvlpevents();
856 }
857 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
858}
859
860#ifdef CONFIG_PROC_FS
861static int mf_cmdline_proc_show(struct seq_file *m, void *v)
862{
863 char *page, *p;
864 struct vsp_cmd_data vsp_cmd;
865 int rc;
866 dma_addr_t dma_addr;
867
868 /* The HV appears to return no more than 256 bytes of command line */
869 page = kmalloc(256, GFP_KERNEL);
870 if (!page)
871 return -ENOMEM;
872
873 dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE);
874 if (dma_addr == DMA_ERROR_CODE) {
875 kfree(page);
876 return -ENOMEM;
877 }
878 memset(page, 0, 256);
879 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
880 vsp_cmd.cmd = 33;
881 vsp_cmd.sub_data.kern.token = dma_addr;
882 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
883 vsp_cmd.sub_data.kern.side = (u64)m->private;
884 vsp_cmd.sub_data.kern.length = 256;
885 mb();
886 rc = signal_vsp_instruction(&vsp_cmd);
887 iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE);
888 if (rc) {
889 kfree(page);
890 return rc;
891 }
892 if (vsp_cmd.result_code != 0) {
893 kfree(page);
894 return -ENOMEM;
895 }
896 p = page;
897 while (p - page < 256) {
898 if (*p == '\0' || *p == '\n') {
899 *p = '\n';
900 break;
901 }
902 p++;
903
904 }
905 seq_write(m, page, p - page);
906 kfree(page);
907 return 0;
908}
909
910static int mf_cmdline_proc_open(struct inode *inode, struct file *file)
911{
912 return single_open(file, mf_cmdline_proc_show, PDE(inode)->data);
913}
914
915#if 0
916static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
917{
918 struct vsp_cmd_data vsp_cmd;
919 int rc;
920 int len = *size;
921 dma_addr_t dma_addr;
922
923 dma_addr = iseries_hv_map(buffer, len, DMA_FROM_DEVICE);
924 memset(buffer, 0, len);
925 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
926 vsp_cmd.cmd = 32;
927 vsp_cmd.sub_data.kern.token = dma_addr;
928 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
929 vsp_cmd.sub_data.kern.side = side;
930 vsp_cmd.sub_data.kern.offset = offset;
931 vsp_cmd.sub_data.kern.length = len;
932 mb();
933 rc = signal_vsp_instruction(&vsp_cmd);
934 if (rc == 0) {
935 if (vsp_cmd.result_code == 0)
936 *size = vsp_cmd.sub_data.length_out;
937 else
938 rc = -ENOMEM;
939 }
940
941 iseries_hv_unmap(dma_addr, len, DMA_FROM_DEVICE);
942
943 return rc;
944}
945
946static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
947 int count, int *eof, void *data)
948{
949 int sizeToGet = count;
950
951 if (!capable(CAP_SYS_ADMIN))
952 return -EACCES;
953
954 if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) {
955 if (sizeToGet != 0) {
956 *start = page + off;
957 return sizeToGet;
958 }
959 *eof = 1;
960 return 0;
961 }
962 *eof = 1;
963 return 0;
964}
965#endif
966
967static int mf_side_proc_show(struct seq_file *m, void *v)
968{
969 char mf_current_side = ' ';
970 struct vsp_cmd_data vsp_cmd;
971
972 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
973 vsp_cmd.cmd = 2;
974 vsp_cmd.sub_data.ipl_type = 0;
975 mb();
976
977 if (signal_vsp_instruction(&vsp_cmd) == 0) {
978 if (vsp_cmd.result_code == 0) {
979 switch (vsp_cmd.sub_data.ipl_type) {
980 case 0: mf_current_side = 'A';
981 break;
982 case 1: mf_current_side = 'B';
983 break;
984 case 2: mf_current_side = 'C';
985 break;
986 default: mf_current_side = 'D';
987 break;
988 }
989 }
990 }
991
992 seq_printf(m, "%c\n", mf_current_side);
993 return 0;
994}
995
996static int mf_side_proc_open(struct inode *inode, struct file *file)
997{
998 return single_open(file, mf_side_proc_show, NULL);
999}
1000
1001static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer,
1002 size_t count, loff_t *pos)
1003{
1004 char side;
1005 u64 newSide;
1006 struct vsp_cmd_data vsp_cmd;
1007
1008 if (!capable(CAP_SYS_ADMIN))
1009 return -EACCES;
1010
1011 if (count == 0)
1012 return 0;
1013
1014 if (get_user(side, buffer))
1015 return -EFAULT;
1016
1017 switch (side) {
1018 case 'A': newSide = 0;
1019 break;
1020 case 'B': newSide = 1;
1021 break;
1022 case 'C': newSide = 2;
1023 break;
1024 case 'D': newSide = 3;
1025 break;
1026 default:
1027 printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
1028 return -EINVAL;
1029 }
1030
1031 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1032 vsp_cmd.sub_data.ipl_type = newSide;
1033 vsp_cmd.cmd = 10;
1034
1035 (void)signal_vsp_instruction(&vsp_cmd);
1036
1037 return count;
1038}
1039
1040static const struct file_operations mf_side_proc_fops = {
1041 .owner = THIS_MODULE,
1042 .open = mf_side_proc_open,
1043 .read = seq_read,
1044 .llseek = seq_lseek,
1045 .release = single_release,
1046 .write = mf_side_proc_write,
1047};
1048
1049static int mf_src_proc_show(struct seq_file *m, void *v)
1050{
1051 return 0;
1052}
1053
1054static int mf_src_proc_open(struct inode *inode, struct file *file)
1055{
1056 return single_open(file, mf_src_proc_show, NULL);
1057}
1058
1059static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer,
1060 size_t count, loff_t *pos)
1061{
1062 char stkbuf[10];
1063
1064 if (!capable(CAP_SYS_ADMIN))
1065 return -EACCES;
1066
1067 if ((count < 4) && (count != 1)) {
1068 printk(KERN_ERR "mf_proc: invalid src\n");
1069 return -EINVAL;
1070 }
1071
1072 if (count > (sizeof(stkbuf) - 1))
1073 count = sizeof(stkbuf) - 1;
1074 if (copy_from_user(stkbuf, buffer, count))
1075 return -EFAULT;
1076
1077 if ((count == 1) && (*stkbuf == '\0'))
1078 mf_clear_src();
1079 else
1080 mf_display_src(*(u32 *)stkbuf);
1081
1082 return count;
1083}
1084
1085static const struct file_operations mf_src_proc_fops = {
1086 .owner = THIS_MODULE,
1087 .open = mf_src_proc_open,
1088 .read = seq_read,
1089 .llseek = seq_lseek,
1090 .release = single_release,
1091 .write = mf_src_proc_write,
1092};
1093
1094static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer,
1095 size_t count, loff_t *pos)
1096{
1097 void *data = PDE(file->f_path.dentry->d_inode)->data;
1098 struct vsp_cmd_data vsp_cmd;
1099 dma_addr_t dma_addr;
1100 char *page;
1101 int ret = -EACCES;
1102
1103 if (!capable(CAP_SYS_ADMIN))
1104 goto out;
1105
1106 dma_addr = 0;
1107 page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1108 ret = -ENOMEM;
1109 if (page == NULL)
1110 goto out;
1111
1112 ret = -EFAULT;
1113 if (copy_from_user(page, buffer, count))
1114 goto out_free;
1115
1116 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1117 vsp_cmd.cmd = 31;
1118 vsp_cmd.sub_data.kern.token = dma_addr;
1119 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1120 vsp_cmd.sub_data.kern.side = (u64)data;
1121 vsp_cmd.sub_data.kern.length = count;
1122 mb();
1123 (void)signal_vsp_instruction(&vsp_cmd);
1124 ret = count;
1125
1126out_free:
1127 iseries_hv_free(count, page, dma_addr);
1128out:
1129 return ret;
1130}
1131
1132static const struct file_operations mf_cmdline_proc_fops = {
1133 .owner = THIS_MODULE,
1134 .open = mf_cmdline_proc_open,
1135 .read = seq_read,
1136 .llseek = seq_lseek,
1137 .release = single_release,
1138 .write = mf_cmdline_proc_write,
1139};
1140
1141static ssize_t proc_mf_change_vmlinux(struct file *file,
1142 const char __user *buf,
1143 size_t count, loff_t *ppos)
1144{
1145 struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
1146 ssize_t rc;
1147 dma_addr_t dma_addr;
1148 char *page;
1149 struct vsp_cmd_data vsp_cmd;
1150
1151 rc = -EACCES;
1152 if (!capable(CAP_SYS_ADMIN))
1153 goto out;
1154
1155 dma_addr = 0;
1156 page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1157 rc = -ENOMEM;
1158 if (page == NULL) {
1159 printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
1160 goto out;
1161 }
1162 rc = -EFAULT;
1163 if (copy_from_user(page, buf, count))
1164 goto out_free;
1165
1166 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1167 vsp_cmd.cmd = 30;
1168 vsp_cmd.sub_data.kern.token = dma_addr;
1169 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1170 vsp_cmd.sub_data.kern.side = (u64)dp->data;
1171 vsp_cmd.sub_data.kern.offset = *ppos;
1172 vsp_cmd.sub_data.kern.length = count;
1173 mb();
1174 rc = signal_vsp_instruction(&vsp_cmd);
1175 if (rc)
1176 goto out_free;
1177 rc = -ENOMEM;
1178 if (vsp_cmd.result_code != 0)
1179 goto out_free;
1180
1181 *ppos += count;
1182 rc = count;
1183out_free:
1184 iseries_hv_free(count, page, dma_addr);
1185out:
1186 return rc;
1187}
1188
1189static const struct file_operations proc_vmlinux_operations = {
1190 .write = proc_mf_change_vmlinux,
1191 .llseek = default_llseek,
1192};
1193
1194static int __init mf_proc_init(void)
1195{
1196 struct proc_dir_entry *mf_proc_root;
1197 struct proc_dir_entry *ent;
1198 struct proc_dir_entry *mf;
1199 char name[2];
1200 int i;
1201
1202 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1203 return 0;
1204
1205 mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1206 if (!mf_proc_root)
1207 return 1;
1208
1209 name[1] = '\0';
1210 for (i = 0; i < 4; i++) {
1211 name[0] = 'A' + i;
1212 mf = proc_mkdir(name, mf_proc_root);
1213 if (!mf)
1214 return 1;
1215
1216 ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf,
1217 &mf_cmdline_proc_fops, (void *)(long)i);
1218 if (!ent)
1219 return 1;
1220
1221 if (i == 3) /* no vmlinux entry for 'D' */
1222 continue;
1223
1224 ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf,
1225 &proc_vmlinux_operations,
1226 (void *)(long)i);
1227 if (!ent)
1228 return 1;
1229 }
1230
1231 ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1232 &mf_side_proc_fops);
1233 if (!ent)
1234 return 1;
1235
1236 ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1237 &mf_src_proc_fops);
1238 if (!ent)
1239 return 1;
1240
1241 return 0;
1242}
1243
1244__initcall(mf_proc_init);
1245
1246#endif /* CONFIG_PROC_FS */
1247
1248/*
1249 * Get the RTC from the virtual service processor
1250 * This requires flowing LpEvents to the primary partition
1251 */
1252void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
1253{
1254 mf_get_rtc(rtc_tm);
1255 rtc_tm->tm_mon--;
1256}
1257
1258/*
1259 * Set the RTC in the virtual service processor
1260 * This requires flowing LpEvents to the primary partition
1261 */
1262int iSeries_set_rtc_time(struct rtc_time *tm)
1263{
1264 mf_set_rtc(tm);
1265 return 0;
1266}
1267
1268unsigned long iSeries_get_boot_time(void)
1269{
1270 struct rtc_time tm;
1271
1272 mf_get_boot_rtc(&tm);
1273 return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
1274 tm.tm_hour, tm.tm_min, tm.tm_sec);
1275}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
deleted file mode 100644
index 2c6ff0fdac98..000000000000
--- a/arch/powerpc/platforms/iseries/misc.S
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-2005 IBM Corp
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/processor.h>
17#include <asm/asm-offsets.h>
18#include <asm/ppc_asm.h>
19
20 .text
21
22/* Handle pending interrupts in interrupt context */
23_GLOBAL(iseries_handle_interrupts)
24 li r0,0x5555
25 sc
26 blr
diff --git a/arch/powerpc/platforms/iseries/naca.h b/arch/powerpc/platforms/iseries/naca.h
deleted file mode 100644
index f01708e12862..000000000000
--- a/arch/powerpc/platforms/iseries/naca.h
+++ /dev/null
@@ -1,24 +0,0 @@
1#ifndef _PLATFORMS_ISERIES_NACA_H
2#define _PLATFORMS_ISERIES_NACA_H
3
4/*
5 * c 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/types.h>
14
15struct naca_struct {
16 /* Kernel only data - undefined for user space */
17 const void *xItVpdAreas; /* VPD Data 0x00 */
18 void *xRamDisk; /* iSeries ramdisk 0x08 */
19 u64 xRamDiskSize; /* In pages 0x10 */
20};
21
22extern struct naca_struct naca;
23
24#endif /* _PLATFORMS_ISERIES_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
deleted file mode 100644
index c75412884625..000000000000
--- a/arch/powerpc/platforms/iseries/pci.c
+++ /dev/null
@@ -1,919 +0,0 @@
1/*
2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
3 * Copyright (C) 2005,2007 Stephen Rothwell, IBM Corp
4 *
5 * iSeries specific routines for PCI.
6 *
7 * Based on code from pci.c and iSeries_pci.c 32bit
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#undef DEBUG
25
26#include <linux/jiffies.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/string.h>
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/pci.h>
33#include <linux/of.h>
34#include <linux/ratelimit.h>
35
36#include <asm/types.h>
37#include <asm/io.h>
38#include <asm/irq.h>
39#include <asm/prom.h>
40#include <asm/machdep.h>
41#include <asm/pci-bridge.h>
42#include <asm/iommu.h>
43#include <asm/abs_addr.h>
44#include <asm/firmware.h>
45
46#include <asm/iseries/hv_types.h>
47#include <asm/iseries/hv_call_xm.h>
48#include <asm/iseries/mf.h>
49#include <asm/iseries/iommu.h>
50
51#include <asm/ppc-pci.h>
52
53#include "irq.h"
54#include "pci.h"
55#include "call_pci.h"
56
57#define PCI_RETRY_MAX 3
58static int limit_pci_retries = 1; /* Set Retry Error on. */
59
60/*
61 * Table defines
62 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
63 */
64#define IOMM_TABLE_MAX_ENTRIES 1024
65#define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
66#define BASE_IO_MEMORY 0xE000000000000000UL
67#define END_IO_MEMORY 0xEFFFFFFFFFFFFFFFUL
68
69static unsigned long max_io_memory = BASE_IO_MEMORY;
70static long current_iomm_table_entry;
71
72/*
73 * Lookup Tables.
74 */
75static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
76static u64 ds_addr_table[IOMM_TABLE_MAX_ENTRIES];
77
78static DEFINE_SPINLOCK(iomm_table_lock);
79
80/*
81 * Generate a Direct Select Address for the Hypervisor
82 */
83static inline u64 iseries_ds_addr(struct device_node *node)
84{
85 struct pci_dn *pdn = PCI_DN(node);
86 const u32 *sbp = of_get_property(node, "linux,subbus", NULL);
87
88 return ((u64)pdn->busno << 48) + ((u64)(sbp ? *sbp : 0) << 40)
89 + ((u64)0x10 << 32);
90}
91
92/*
93 * Size of Bus VPD data
94 */
95#define BUS_VPDSIZE 1024
96
97/*
98 * Bus Vpd Tags
99 */
100#define VPD_END_OF_AREA 0x79
101#define VPD_ID_STRING 0x82
102#define VPD_VENDOR_AREA 0x84
103
104/*
105 * Mfg Area Tags
106 */
107#define VPD_FRU_FRAME_ID 0x4649 /* "FI" */
108#define VPD_SLOT_MAP_FORMAT 0x4D46 /* "MF" */
109#define VPD_SLOT_MAP 0x534D /* "SM" */
110
111/*
112 * Structures of the areas
113 */
114struct mfg_vpd_area {
115 u16 tag;
116 u8 length;
117 u8 data1;
118 u8 data2;
119};
120#define MFG_ENTRY_SIZE 3
121
122struct slot_map {
123 u8 agent;
124 u8 secondary_agent;
125 u8 phb;
126 char card_location[3];
127 char parms[8];
128 char reserved[2];
129};
130#define SLOT_ENTRY_SIZE 16
131
132/*
133 * Parse the Slot Area
134 */
135static void __init iseries_parse_slot_area(struct slot_map *map, int len,
136 HvAgentId agent, u8 *phb, char card[4])
137{
138 /*
139 * Parse Slot label until we find the one requested
140 */
141 while (len > 0) {
142 if (map->agent == agent) {
143 /*
144 * If Phb wasn't found, grab the entry first one found.
145 */
146 if (*phb == 0xff)
147 *phb = map->phb;
148 /* Found it, extract the data. */
149 if (map->phb == *phb) {
150 memcpy(card, &map->card_location, 3);
151 card[3] = 0;
152 break;
153 }
154 }
155 /* Point to the next Slot */
156 map = (struct slot_map *)((char *)map + SLOT_ENTRY_SIZE);
157 len -= SLOT_ENTRY_SIZE;
158 }
159}
160
161/*
162 * Parse the Mfg Area
163 */
164static void __init iseries_parse_mfg_area(struct mfg_vpd_area *area, int len,
165 HvAgentId agent, u8 *phb, u8 *frame, char card[4])
166{
167 u16 slot_map_fmt = 0;
168
169 /* Parse Mfg Data */
170 while (len > 0) {
171 int mfg_tag_len = area->length;
172 /* Frame ID (FI 4649020310 ) */
173 if (area->tag == VPD_FRU_FRAME_ID)
174 *frame = area->data1;
175 /* Slot Map Format (MF 4D46020004 ) */
176 else if (area->tag == VPD_SLOT_MAP_FORMAT)
177 slot_map_fmt = (area->data1 * 256)
178 + area->data2;
179 /* Slot Map (SM 534D90 */
180 else if (area->tag == VPD_SLOT_MAP) {
181 struct slot_map *slot_map;
182
183 if (slot_map_fmt == 0x1004)
184 slot_map = (struct slot_map *)((char *)area
185 + MFG_ENTRY_SIZE + 1);
186 else
187 slot_map = (struct slot_map *)((char *)area
188 + MFG_ENTRY_SIZE);
189 iseries_parse_slot_area(slot_map, mfg_tag_len,
190 agent, phb, card);
191 }
192 /*
193 * Point to the next Mfg Area
194 * Use defined size, sizeof give wrong answer
195 */
196 area = (struct mfg_vpd_area *)((char *)area + mfg_tag_len
197 + MFG_ENTRY_SIZE);
198 len -= (mfg_tag_len + MFG_ENTRY_SIZE);
199 }
200}
201
202/*
203 * Look for "BUS".. Data is not Null terminated.
204 * PHBID of 0xFF indicates PHB was not found in VPD Data.
205 */
206static u8 __init iseries_parse_phbid(u8 *area, int len)
207{
208 while (len > 0) {
209 if ((*area == 'B') && (*(area + 1) == 'U')
210 && (*(area + 2) == 'S')) {
211 area += 3;
212 while (*area == ' ')
213 area++;
214 return *area & 0x0F;
215 }
216 area++;
217 len--;
218 }
219 return 0xff;
220}
221
222/*
223 * Parse out the VPD Areas
224 */
225static void __init iseries_parse_vpd(u8 *data, int data_len,
226 HvAgentId agent, u8 *frame, char card[4])
227{
228 u8 phb = 0xff;
229
230 while (data_len > 0) {
231 int len;
232 u8 tag = *data;
233
234 if (tag == VPD_END_OF_AREA)
235 break;
236 len = *(data + 1) + (*(data + 2) * 256);
237 data += 3;
238 data_len -= 3;
239 if (tag == VPD_ID_STRING)
240 phb = iseries_parse_phbid(data, len);
241 else if (tag == VPD_VENDOR_AREA)
242 iseries_parse_mfg_area((struct mfg_vpd_area *)data, len,
243 agent, &phb, frame, card);
244 /* Point to next Area. */
245 data += len;
246 data_len -= len;
247 }
248}
249
250static int __init iseries_get_location_code(u16 bus, HvAgentId agent,
251 u8 *frame, char card[4])
252{
253 int status = 0;
254 int bus_vpd_len = 0;
255 u8 *bus_vpd = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
256
257 if (bus_vpd == NULL) {
258 printk("PCI: Bus VPD Buffer allocation failure.\n");
259 return 0;
260 }
261 bus_vpd_len = HvCallPci_getBusVpd(bus, iseries_hv_addr(bus_vpd),
262 BUS_VPDSIZE);
263 if (bus_vpd_len == 0) {
264 printk("PCI: Bus VPD Buffer zero length.\n");
265 goto out_free;
266 }
267 /* printk("PCI: bus_vpd: %p, %d\n",bus_vpd, bus_vpd_len); */
268 /* Make sure this is what I think it is */
269 if (*bus_vpd != VPD_ID_STRING) {
270 printk("PCI: Bus VPD Buffer missing starting tag.\n");
271 goto out_free;
272 }
273 iseries_parse_vpd(bus_vpd, bus_vpd_len, agent, frame, card);
274 status = 1;
275out_free:
276 kfree(bus_vpd);
277 return status;
278}
279
280/*
281 * Prints the device information.
282 * - Pass in pci_dev* pointer to the device.
283 * - Pass in the device count
284 *
285 * Format:
286 * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet
287 * controller
288 */
289static void __init iseries_device_information(struct pci_dev *pdev,
290 u16 bus, HvSubBusNumber subbus)
291{
292 u8 frame = 0;
293 char card[4];
294 HvAgentId agent;
295
296 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
297 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
298
299 if (iseries_get_location_code(bus, agent, &frame, card)) {
300 printk(KERN_INFO "PCI: %s, Vendor %04X Frame%3d, "
301 "Card %4s 0x%04X\n", pci_name(pdev), pdev->vendor,
302 frame, card, (int)(pdev->class >> 8));
303 }
304}
305
306/*
307 * iomm_table_allocate_entry
308 *
309 * Adds pci_dev entry in address translation table
310 *
311 * - Allocates the number of entries required in table base on BAR
312 * size.
313 * - Allocates starting at BASE_IO_MEMORY and increases.
314 * - The size is round up to be a multiple of entry size.
315 * - CurrentIndex is incremented to keep track of the last entry.
316 * - Builds the resource entry for allocated BARs.
317 */
318static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
319{
320 struct resource *bar_res = &dev->resource[bar_num];
321 long bar_size = pci_resource_len(dev, bar_num);
322 struct device_node *dn = pci_device_to_OF_node(dev);
323
324 /*
325 * No space to allocate, quick exit, skip Allocation.
326 */
327 if (bar_size == 0)
328 return;
329 /*
330 * Set Resource values.
331 */
332 spin_lock(&iomm_table_lock);
333 bar_res->start = BASE_IO_MEMORY +
334 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
335 bar_res->end = bar_res->start + bar_size - 1;
336 /*
337 * Allocate the number of table entries needed for BAR.
338 */
339 while (bar_size > 0 ) {
340 iomm_table[current_iomm_table_entry] = dn;
341 ds_addr_table[current_iomm_table_entry] =
342 iseries_ds_addr(dn) | (bar_num << 24);
343 bar_size -= IOMM_TABLE_ENTRY_SIZE;
344 ++current_iomm_table_entry;
345 }
346 max_io_memory = BASE_IO_MEMORY +
347 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
348 spin_unlock(&iomm_table_lock);
349}
350
351/*
352 * allocate_device_bars
353 *
354 * - Allocates ALL pci_dev BAR's and updates the resources with the
355 * BAR value. BARS with zero length will have the resources
356 * The HvCallPci_getBarParms is used to get the size of the BAR
357 * space. It calls iomm_table_allocate_entry to allocate
358 * each entry.
359 * - Loops through The Bar resources(0 - 5) including the ROM
360 * is resource(6).
361 */
362static void __init allocate_device_bars(struct pci_dev *dev)
363{
364 int bar_num;
365
366 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
367 iomm_table_allocate_entry(dev, bar_num);
368}
369
370/*
371 * Log error information to system console.
372 * Filter out the device not there errors.
373 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
374 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
375 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
376 */
377static void pci_log_error(char *error, int bus, int subbus,
378 int agent, int hv_res)
379{
380 if (hv_res == 0x0302)
381 return;
382 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
383 error, bus, subbus, agent, hv_res);
384}
385
386/*
387 * Look down the chain to find the matching Device Device
388 */
389static struct device_node *find_device_node(int bus, int devfn)
390{
391 struct device_node *node;
392
393 for (node = NULL; (node = of_find_all_nodes(node)); ) {
394 struct pci_dn *pdn = PCI_DN(node);
395
396 if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
397 return node;
398 }
399 return NULL;
400}
401
402/*
403 * iSeries_pcibios_fixup_resources
404 *
405 * Fixes up all resources for devices
406 */
407void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev)
408{
409 const u32 *agent;
410 const u32 *sub_bus;
411 unsigned char bus = pdev->bus->number;
412 struct device_node *node;
413 int i;
414
415 node = pci_device_to_OF_node(pdev);
416 pr_debug("PCI: iSeries %s, pdev %p, node %p\n",
417 pci_name(pdev), pdev, node);
418 if (!node) {
419 printk("PCI: %s disabled, device tree entry not found !\n",
420 pci_name(pdev));
421 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
422 pdev->resource[i].flags = 0;
423 return;
424 }
425 sub_bus = of_get_property(node, "linux,subbus", NULL);
426 agent = of_get_property(node, "linux,agent-id", NULL);
427 if (agent && sub_bus) {
428 u8 irq = iSeries_allocate_IRQ(bus, 0, *sub_bus);
429 int err;
430
431 err = HvCallXm_connectBusUnit(bus, *sub_bus, *agent, irq);
432 if (err)
433 pci_log_error("Connect Bus Unit",
434 bus, *sub_bus, *agent, err);
435 else {
436 err = HvCallPci_configStore8(bus, *sub_bus,
437 *agent, PCI_INTERRUPT_LINE, irq);
438 if (err)
439 pci_log_error("PciCfgStore Irq Failed!",
440 bus, *sub_bus, *agent, err);
441 else
442 pdev->irq = irq;
443 }
444 }
445
446 allocate_device_bars(pdev);
447 if (likely(sub_bus))
448 iseries_device_information(pdev, bus, *sub_bus);
449 else
450 printk(KERN_ERR "PCI: Device node %s has missing or invalid "
451 "linux,subbus property\n", node->full_name);
452}
453
454/*
455 * iSeries_pci_final_fixup(void)
456 */
457void __init iSeries_pci_final_fixup(void)
458{
459 /* Fix up at the device node and pci_dev relationship */
460 mf_display_src(0xC9000100);
461 iSeries_activate_IRQs();
462 mf_display_src(0xC9000200);
463}
464
465/*
466 * Config space read and write functions.
467 * For now at least, we look for the device node for the bus and devfn
468 * that we are asked to access. It may be possible to translate the devfn
469 * to a subbus and deviceid more directly.
470 */
471static u64 hv_cfg_read_func[4] = {
472 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
473 HvCallPciConfigLoad32, HvCallPciConfigLoad32
474};
475
476static u64 hv_cfg_write_func[4] = {
477 HvCallPciConfigStore8, HvCallPciConfigStore16,
478 HvCallPciConfigStore32, HvCallPciConfigStore32
479};
480
481/*
482 * Read PCI config space
483 */
484static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
485 int offset, int size, u32 *val)
486{
487 struct device_node *node = find_device_node(bus->number, devfn);
488 u64 fn;
489 struct HvCallPci_LoadReturn ret;
490
491 if (node == NULL)
492 return PCIBIOS_DEVICE_NOT_FOUND;
493 if (offset > 255) {
494 *val = ~0;
495 return PCIBIOS_BAD_REGISTER_NUMBER;
496 }
497
498 fn = hv_cfg_read_func[(size - 1) & 3];
499 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
500
501 if (ret.rc != 0) {
502 *val = ~0;
503 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
504 }
505
506 *val = ret.value;
507 return 0;
508}
509
510/*
511 * Write PCI config space
512 */
513
514static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
515 int offset, int size, u32 val)
516{
517 struct device_node *node = find_device_node(bus->number, devfn);
518 u64 fn;
519 u64 ret;
520
521 if (node == NULL)
522 return PCIBIOS_DEVICE_NOT_FOUND;
523 if (offset > 255)
524 return PCIBIOS_BAD_REGISTER_NUMBER;
525
526 fn = hv_cfg_write_func[(size - 1) & 3];
527 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
528
529 if (ret != 0)
530 return PCIBIOS_DEVICE_NOT_FOUND;
531
532 return 0;
533}
534
535static struct pci_ops iSeries_pci_ops = {
536 .read = iSeries_pci_read_config,
537 .write = iSeries_pci_write_config
538};
539
540/*
541 * Check Return Code
542 * -> On Failure, print and log information.
543 * Increment Retry Count, if exceeds max, panic partition.
544 *
545 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
546 * PCI: Device 23.90 ReadL Retry( 1)
547 * PCI: Device 23.90 ReadL Retry Successful(1)
548 */
549static int check_return_code(char *type, struct device_node *dn,
550 int *retry, u64 ret)
551{
552 if (ret != 0) {
553 struct pci_dn *pdn = PCI_DN(dn);
554
555 (*retry)++;
556 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
557 type, pdn->busno, pdn->devfn,
558 *retry, (int)ret);
559 /*
560 * Bump the retry and check for retry count exceeded.
561 * If, Exceeded, panic the system.
562 */
563 if (((*retry) > PCI_RETRY_MAX) &&
564 (limit_pci_retries > 0)) {
565 mf_display_src(0xB6000103);
566 panic_timeout = 0;
567 panic("PCI: Hardware I/O Error, SRC B6000103, "
568 "Automatic Reboot Disabled.\n");
569 }
570 return -1; /* Retry Try */
571 }
572 return 0;
573}
574
575/*
576 * Translate the I/O Address into a device node, bar, and bar offset.
577 * Note: Make sure the passed variable end up on the stack to avoid
578 * the exposure of being device global.
579 */
580static inline struct device_node *xlate_iomm_address(
581 const volatile void __iomem *addr,
582 u64 *dsaptr, u64 *bar_offset, const char *func)
583{
584 unsigned long orig_addr;
585 unsigned long base_addr;
586 unsigned long ind;
587 struct device_node *dn;
588
589 orig_addr = (unsigned long __force)addr;
590 if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
591 static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
592
593 if (__ratelimit(&ratelimit))
594 printk(KERN_ERR
595 "iSeries_%s: invalid access at IO address %p\n",
596 func, addr);
597 return NULL;
598 }
599 base_addr = orig_addr - BASE_IO_MEMORY;
600 ind = base_addr / IOMM_TABLE_ENTRY_SIZE;
601 dn = iomm_table[ind];
602
603 if (dn != NULL) {
604 *dsaptr = ds_addr_table[ind];
605 *bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE;
606 } else
607 panic("PCI: Invalid PCI IO address detected!\n");
608 return dn;
609}
610
611/*
612 * Read MM I/O Instructions for the iSeries
613 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
614 * else, data is returned in Big Endian format.
615 */
616static u8 iseries_readb(const volatile void __iomem *addr)
617{
618 u64 bar_offset;
619 u64 dsa;
620 int retry = 0;
621 struct HvCallPci_LoadReturn ret;
622 struct device_node *dn =
623 xlate_iomm_address(addr, &dsa, &bar_offset, "read_byte");
624
625 if (dn == NULL)
626 return 0xff;
627 do {
628 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0);
629 } while (check_return_code("RDB", dn, &retry, ret.rc) != 0);
630
631 return ret.value;
632}
633
634static u16 iseries_readw_be(const volatile void __iomem *addr)
635{
636 u64 bar_offset;
637 u64 dsa;
638 int retry = 0;
639 struct HvCallPci_LoadReturn ret;
640 struct device_node *dn =
641 xlate_iomm_address(addr, &dsa, &bar_offset, "read_word");
642
643 if (dn == NULL)
644 return 0xffff;
645 do {
646 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
647 bar_offset, 0);
648 } while (check_return_code("RDW", dn, &retry, ret.rc) != 0);
649
650 return ret.value;
651}
652
653static u32 iseries_readl_be(const volatile void __iomem *addr)
654{
655 u64 bar_offset;
656 u64 dsa;
657 int retry = 0;
658 struct HvCallPci_LoadReturn ret;
659 struct device_node *dn =
660 xlate_iomm_address(addr, &dsa, &bar_offset, "read_long");
661
662 if (dn == NULL)
663 return 0xffffffff;
664 do {
665 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
666 bar_offset, 0);
667 } while (check_return_code("RDL", dn, &retry, ret.rc) != 0);
668
669 return ret.value;
670}
671
672/*
673 * Write MM I/O Instructions for the iSeries
674 *
675 */
676static void iseries_writeb(u8 data, volatile void __iomem *addr)
677{
678 u64 bar_offset;
679 u64 dsa;
680 int retry = 0;
681 u64 rc;
682 struct device_node *dn =
683 xlate_iomm_address(addr, &dsa, &bar_offset, "write_byte");
684
685 if (dn == NULL)
686 return;
687 do {
688 rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0);
689 } while (check_return_code("WWB", dn, &retry, rc) != 0);
690}
691
692static void iseries_writew_be(u16 data, volatile void __iomem *addr)
693{
694 u64 bar_offset;
695 u64 dsa;
696 int retry = 0;
697 u64 rc;
698 struct device_node *dn =
699 xlate_iomm_address(addr, &dsa, &bar_offset, "write_word");
700
701 if (dn == NULL)
702 return;
703 do {
704 rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0);
705 } while (check_return_code("WWW", dn, &retry, rc) != 0);
706}
707
708static void iseries_writel_be(u32 data, volatile void __iomem *addr)
709{
710 u64 bar_offset;
711 u64 dsa;
712 int retry = 0;
713 u64 rc;
714 struct device_node *dn =
715 xlate_iomm_address(addr, &dsa, &bar_offset, "write_long");
716
717 if (dn == NULL)
718 return;
719 do {
720 rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0);
721 } while (check_return_code("WWL", dn, &retry, rc) != 0);
722}
723
724static u16 iseries_readw(const volatile void __iomem *addr)
725{
726 return le16_to_cpu(iseries_readw_be(addr));
727}
728
729static u32 iseries_readl(const volatile void __iomem *addr)
730{
731 return le32_to_cpu(iseries_readl_be(addr));
732}
733
734static void iseries_writew(u16 data, volatile void __iomem *addr)
735{
736 iseries_writew_be(cpu_to_le16(data), addr);
737}
738
739static void iseries_writel(u32 data, volatile void __iomem *addr)
740{
741 iseries_writel(cpu_to_le32(data), addr);
742}
743
744static void iseries_readsb(const volatile void __iomem *addr, void *buf,
745 unsigned long count)
746{
747 u8 *dst = buf;
748 while(count-- > 0)
749 *(dst++) = iseries_readb(addr);
750}
751
752static void iseries_readsw(const volatile void __iomem *addr, void *buf,
753 unsigned long count)
754{
755 u16 *dst = buf;
756 while(count-- > 0)
757 *(dst++) = iseries_readw_be(addr);
758}
759
760static void iseries_readsl(const volatile void __iomem *addr, void *buf,
761 unsigned long count)
762{
763 u32 *dst = buf;
764 while(count-- > 0)
765 *(dst++) = iseries_readl_be(addr);
766}
767
768static void iseries_writesb(volatile void __iomem *addr, const void *buf,
769 unsigned long count)
770{
771 const u8 *src = buf;
772 while(count-- > 0)
773 iseries_writeb(*(src++), addr);
774}
775
776static void iseries_writesw(volatile void __iomem *addr, const void *buf,
777 unsigned long count)
778{
779 const u16 *src = buf;
780 while(count-- > 0)
781 iseries_writew_be(*(src++), addr);
782}
783
784static void iseries_writesl(volatile void __iomem *addr, const void *buf,
785 unsigned long count)
786{
787 const u32 *src = buf;
788 while(count-- > 0)
789 iseries_writel_be(*(src++), addr);
790}
791
792static void iseries_memset_io(volatile void __iomem *addr, int c,
793 unsigned long n)
794{
795 volatile char __iomem *d = addr;
796
797 while (n-- > 0)
798 iseries_writeb(c, d++);
799}
800
801static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
802 unsigned long n)
803{
804 char *d = dest;
805 const volatile char __iomem *s = src;
806
807 while (n-- > 0)
808 *d++ = iseries_readb(s++);
809}
810
811static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
812 unsigned long n)
813{
814 const char *s = src;
815 volatile char __iomem *d = dest;
816
817 while (n-- > 0)
818 iseries_writeb(*s++, d++);
819}
820
821/* We only set MMIO ops. The default PIO ops will be default
822 * to the MMIO ops + pci_io_base which is 0 on iSeries as
823 * expected so both should work.
824 *
825 * Note that we don't implement the readq/writeq versions as
826 * I don't know of an HV call for doing so. Thus, the default
827 * operation will be used instead, which will fault a the value
828 * return by iSeries for MMIO addresses always hits a non mapped
829 * area. This is as good as the BUG() we used to have there.
830 */
831static struct ppc_pci_io __initdata iseries_pci_io = {
832 .readb = iseries_readb,
833 .readw = iseries_readw,
834 .readl = iseries_readl,
835 .readw_be = iseries_readw_be,
836 .readl_be = iseries_readl_be,
837 .writeb = iseries_writeb,
838 .writew = iseries_writew,
839 .writel = iseries_writel,
840 .writew_be = iseries_writew_be,
841 .writel_be = iseries_writel_be,
842 .readsb = iseries_readsb,
843 .readsw = iseries_readsw,
844 .readsl = iseries_readsl,
845 .writesb = iseries_writesb,
846 .writesw = iseries_writesw,
847 .writesl = iseries_writesl,
848 .memset_io = iseries_memset_io,
849 .memcpy_fromio = iseries_memcpy_fromio,
850 .memcpy_toio = iseries_memcpy_toio,
851};
852
853/*
854 * iSeries_pcibios_init
855 *
856 * Description:
857 * This function checks for all possible system PCI host bridges that connect
858 * PCI buses. The system hypervisor is queried as to the guest partition
859 * ownership status. A pci_controller is built for any bus which is partially
860 * owned or fully owned by this guest partition.
861 */
862void __init iSeries_pcibios_init(void)
863{
864 struct pci_controller *phb;
865 struct device_node *root = of_find_node_by_path("/");
866 struct device_node *node = NULL;
867
868 /* Install IO hooks */
869 ppc_pci_io = iseries_pci_io;
870
871 pci_probe_only = 1;
872
873 /* iSeries has no IO space in the common sense, it needs to set
874 * the IO base to 0
875 */
876 pci_io_base = 0;
877
878 if (root == NULL) {
879 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
880 "of device tree\n");
881 return;
882 }
883 while ((node = of_get_next_child(root, node)) != NULL) {
884 HvBusNumber bus;
885 const u32 *busp;
886
887 if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
888 continue;
889
890 busp = of_get_property(node, "bus-range", NULL);
891 if (busp == NULL)
892 continue;
893 bus = *busp;
894 printk("bus %d appears to exist\n", bus);
895 phb = pcibios_alloc_controller(node);
896 if (phb == NULL)
897 continue;
898 /* All legacy iSeries PHBs are in domain zero */
899 phb->global_number = 0;
900
901 phb->first_busno = bus;
902 phb->last_busno = bus;
903 phb->ops = &iSeries_pci_ops;
904 phb->io_base_virt = (void __iomem *)_IO_BASE;
905 phb->io_resource.flags = IORESOURCE_IO;
906 phb->io_resource.start = BASE_IO_MEMORY;
907 phb->io_resource.end = END_IO_MEMORY;
908 phb->io_resource.name = "iSeries PCI IO";
909 phb->mem_resources[0].flags = IORESOURCE_MEM;
910 phb->mem_resources[0].start = BASE_IO_MEMORY;
911 phb->mem_resources[0].end = END_IO_MEMORY;
912 phb->mem_resources[0].name = "Series PCI MEM";
913 }
914
915 of_node_put(root);
916
917 pci_devs_phb_init();
918}
919
diff --git a/arch/powerpc/platforms/iseries/pci.h b/arch/powerpc/platforms/iseries/pci.h
deleted file mode 100644
index d9cf974c2718..000000000000
--- a/arch/powerpc/platforms/iseries/pci.h
+++ /dev/null
@@ -1,58 +0,0 @@
1#ifndef _PLATFORMS_ISERIES_PCI_H
2#define _PLATFORMS_ISERIES_PCI_H
3
4/*
5 * Created by Allan Trautman on Tue Feb 20, 2001.
6 *
7 * Define some useful macros for the iSeries pci routines.
8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the:
22 * Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330,
24 * Boston, MA 02111-1307 USA
25 *
26 * Change Activity:
27 * Created Feb 20, 2001
28 * Added device reset, March 22, 2001
29 * Ported to ppc64, May 25, 2001
30 * End Change Activity
31 */
32
33/*
34 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
35 * For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h
36 */
37
38#define ISERIES_PCI_AGENTID(idsel, func) \
39 (((idsel & 0x0F) << 4) | (func & 0x07))
40#define ISERIES_ENCODE_DEVICE(agentid) \
41 ((0x10) | ((agentid & 0x20) >> 2) | (agentid & 0x07))
42
43#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7)
44#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
45
46struct pci_dev;
47
48#ifdef CONFIG_PCI
49extern void iSeries_pcibios_init(void);
50extern void iSeries_pci_final_fixup(void);
51extern void iSeries_pcibios_fixup_resources(struct pci_dev *dev);
52#else
53static inline void iSeries_pcibios_init(void) { }
54static inline void iSeries_pci_final_fixup(void) { }
55static inline void iSeries_pcibios_fixup_resources(struct pci_dev *dev) {}
56#endif
57
58#endif /* _PLATFORMS_ISERIES_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c
deleted file mode 100644
index 06763682db47..000000000000
--- a/arch/powerpc/platforms/iseries/proc.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/param.h> /* for HZ */
23#include <asm/paca.h>
24#include <asm/processor.h>
25#include <asm/time.h>
26#include <asm/lppaca.h>
27#include <asm/firmware.h>
28#include <asm/iseries/hv_call_xm.h>
29
30#include "processor_vpd.h"
31#include "main_store.h"
32
33static int __init iseries_proc_create(void)
34{
35 struct proc_dir_entry *e;
36
37 if (!firmware_has_feature(FW_FEATURE_ISERIES))
38 return 0;
39
40 e = proc_mkdir("iSeries", 0);
41 if (!e)
42 return 1;
43
44 return 0;
45}
46core_initcall(iseries_proc_create);
47
48static unsigned long startTitan = 0;
49static unsigned long startTb = 0;
50
51static int proc_titantod_show(struct seq_file *m, void *v)
52{
53 unsigned long tb0, titan_tod;
54
55 tb0 = get_tb();
56 titan_tod = HvCallXm_loadTod();
57
58 seq_printf(m, "Titan\n" );
59 seq_printf(m, " time base = %016lx\n", tb0);
60 seq_printf(m, " titan tod = %016lx\n", titan_tod);
61 seq_printf(m, " xProcFreq = %016x\n",
62 xIoHriProcessorVpd[0].xProcFreq);
63 seq_printf(m, " xTimeBaseFreq = %016x\n",
64 xIoHriProcessorVpd[0].xTimeBaseFreq);
65 seq_printf(m, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy);
66 seq_printf(m, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec);
67
68 if (!startTitan) {
69 startTitan = titan_tod;
70 startTb = tb0;
71 } else {
72 unsigned long titan_usec = (titan_tod - startTitan) >> 12;
73 unsigned long tb_ticks = (tb0 - startTb);
74 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
75 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
76 unsigned long titan_jiff_rem_usec =
77 titan_usec - titan_jiff_usec;
78 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
79 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
80 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
81 unsigned long tb_jiff_rem_usec =
82 tb_jiff_rem_ticks / tb_ticks_per_usec;
83 unsigned long new_tb_ticks_per_jiffy =
84 (tb_ticks * (1000000/HZ))/titan_usec;
85
86 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
87 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
88 seq_printf(m, " titan jiffies = %lu.%04lu\n", titan_jiffies,
89 titan_jiff_rem_usec);
90 seq_printf(m, " tb jiffies = %lu.%04lu\n", tb_jiffies,
91 tb_jiff_rem_usec);
92 seq_printf(m, " new tb_ticks_per_jiffy = %lu\n",
93 new_tb_ticks_per_jiffy);
94 }
95
96 return 0;
97}
98
99static int proc_titantod_open(struct inode *inode, struct file *file)
100{
101 return single_open(file, proc_titantod_show, NULL);
102}
103
104static const struct file_operations proc_titantod_operations = {
105 .open = proc_titantod_open,
106 .read = seq_read,
107 .llseek = seq_lseek,
108 .release = single_release,
109};
110
111static int __init iseries_proc_init(void)
112{
113 if (!firmware_has_feature(FW_FEATURE_ISERIES))
114 return 0;
115
116 proc_create("iSeries/titanTod", S_IFREG|S_IRUGO, NULL,
117 &proc_titantod_operations);
118 return 0;
119}
120__initcall(iseries_proc_init);
diff --git a/arch/powerpc/platforms/iseries/processor_vpd.h b/arch/powerpc/platforms/iseries/processor_vpd.h
deleted file mode 100644
index 7ac5d0d0dbfa..000000000000
--- a/arch/powerpc/platforms/iseries/processor_vpd.h
+++ /dev/null
@@ -1,85 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_PROCESSOR_VPD_H
19#define _ISERIES_PROCESSOR_VPD_H
20
21#include <asm/types.h>
22
23/*
24 * This struct maps Processor Vpd that is DMAd to SLIC by CSP
25 */
26struct IoHriProcessorVpd {
27 u8 xFormat; // VPD format indicator x00-x00
28 u8 xProcStatus:8; // Processor State x01-x01
29 u8 xSecondaryThreadCount; // Secondary thread cnt x02-x02
30 u8 xSrcType:1; // Src Type x03-x03
31 u8 xSrcSoft:1; // Src stay soft ...
32 u8 xSrcParable:1; // Src parable ...
33 u8 xRsvd1:5; // Reserved ...
34 u16 xHvPhysicalProcIndex; // Hypervisor physical proc index04-x05
35 u16 xRsvd2; // Reserved x06-x07
36 u32 xHwNodeId; // Hardware node id x08-x0B
37 u32 xHwProcId; // Hardware processor id x0C-x0F
38
39 u32 xTypeNum; // Card Type/CCIN number x10-x13
40 u32 xModelNum; // Model/Feature number x14-x17
41 u64 xSerialNum; // Serial number x18-x1F
42 char xPartNum[12]; // Book Part or FPU number x20-x2B
43 char xMfgID[4]; // Manufacturing ID x2C-x2F
44
45 u32 xProcFreq; // Processor Frequency x30-x33
46 u32 xTimeBaseFreq; // Time Base Frequency x34-x37
47
48 u32 xChipEcLevel; // Chip EC Levels x38-x3B
49 u32 xProcIdReg; // PIR SPR value x3C-x3F
50 u32 xPVR; // PVR value x40-x43
51 u8 xRsvd3[12]; // Reserved x44-x4F
52
53 u32 xInstCacheSize; // Instruction cache size in KB x50-x53
54 u32 xInstBlockSize; // Instruction cache block size x54-x57
55 u32 xDataCacheOperandSize; // Data cache operand size x58-x5B
56 u32 xInstCacheOperandSize; // Inst cache operand size x5C-x5F
57
58 u32 xDataL1CacheSizeKB; // L1 data cache size in KB x60-x63
59 u32 xDataL1CacheLineSize; // L1 data cache block size x64-x67
60 u64 xRsvd4; // Reserved x68-x6F
61
62 u32 xDataL2CacheSizeKB; // L2 data cache size in KB x70-x73
63 u32 xDataL2CacheLineSize; // L2 data cache block size x74-x77
64 u64 xRsvd5; // Reserved x78-x7F
65
66 u32 xDataL3CacheSizeKB; // L3 data cache size in KB x80-x83
67 u32 xDataL3CacheLineSize; // L3 data cache block size x84-x87
68 u64 xRsvd6; // Reserved x88-x8F
69
70 u64 xFruLabel; // Card Location Label x90-x97
71 u8 xSlotsOnCard; // Slots on card (0=no slots) x98-x98
72 u8 xPartLocFlag; // Location flag (0-pluggable 1-imbedded) x99-x99
73 u16 xSlotMapIndex; // Index in slot map table x9A-x9B
74 u8 xSmartCardPortNo; // Smart card port number x9C-x9C
75 u8 xRsvd7; // Reserved x9D-x9D
76 u16 xFrameIdAndRackUnit; // Frame ID and rack unit adr x9E-x9F
77
78 u8 xRsvd8[24]; // Reserved xA0-xB7
79
80 char xProcSrc[72]; // CSP format SRC xB8-xFF
81};
82
83extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
84
85#endif /* _ISERIES_PROCESSOR_VPD_H */
diff --git a/arch/powerpc/platforms/iseries/release_data.h b/arch/powerpc/platforms/iseries/release_data.h
deleted file mode 100644
index 6ad7d843e8fc..000000000000
--- a/arch/powerpc/platforms/iseries/release_data.h
+++ /dev/null
@@ -1,63 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_RELEASE_DATA_H
19#define _ISERIES_RELEASE_DATA_H
20
21/*
22 * This control block contains the critical information about the
23 * release so that it can be changed in the future (ie, the virtual
24 * address of the OS's NACA).
25 */
26#include <asm/types.h>
27#include "naca.h"
28
29/*
30 * When we IPL a secondary partition, we will check if if the
31 * secondary xMinPlicVrmIndex > the primary xVrmIndex.
32 * If it is then this tells PLIC that this secondary is not
33 * supported running on this "old" of a level of PLIC.
34 *
35 * Likewise, we will compare the primary xMinSlicVrmIndex to
36 * the secondary xVrmIndex.
37 * If the primary xMinSlicVrmDelta > secondary xVrmDelta then we
38 * know that this PLIC does not support running an OS "that old".
39 */
40
41#define HVREL_TAGSINACTIVE 0x8000
42#define HVREL_32BIT 0x4000
43#define HVREL_NOSHAREDPROCS 0x2000
44#define HVREL_NOHMT 0x1000
45
46struct HvReleaseData {
47 u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */
48 u16 xSize; /* Size of this control block x04-x05 */
49 u16 xVpdAreasPtrOffset; /* Offset in NACA of ItVpdAreas x06-x07 */
50 struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */
51 u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */
52 u32 xRsvd1; /* Reserved x14-x17 */
53 u16 xFlags;
54 u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */
55 u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */
56 u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */
57 char xVrmName[12]; /* Displayable name x20-x2B */
58 char xRsvd3[20]; /* Reserved x2C-x3F */
59};
60
61extern const struct HvReleaseData hvReleaseData;
62
63#endif /* _ISERIES_RELEASE_DATA_H */
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
deleted file mode 100644
index 8fc62586a973..000000000000
--- a/arch/powerpc/platforms/iseries/setup.c
+++ /dev/null
@@ -1,722 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Description:
6 * Architecture- / platform-specific boot-time initialization code for
7 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
8 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
9 * <dan@net4x.com>.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#undef DEBUG
18
19#include <linux/init.h>
20#include <linux/threads.h>
21#include <linux/smp.h>
22#include <linux/param.h>
23#include <linux/string.h>
24#include <linux/export.h>
25#include <linux/seq_file.h>
26#include <linux/kdev_t.h>
27#include <linux/kexec.h>
28#include <linux/major.h>
29#include <linux/root_dev.h>
30#include <linux/kernel.h>
31#include <linux/hrtimer.h>
32#include <linux/tick.h>
33
34#include <asm/processor.h>
35#include <asm/machdep.h>
36#include <asm/page.h>
37#include <asm/mmu.h>
38#include <asm/pgtable.h>
39#include <asm/mmu_context.h>
40#include <asm/cputable.h>
41#include <asm/sections.h>
42#include <asm/iommu.h>
43#include <asm/firmware.h>
44#include <asm/system.h>
45#include <asm/time.h>
46#include <asm/paca.h>
47#include <asm/cache.h>
48#include <asm/abs_addr.h>
49#include <asm/iseries/hv_lp_config.h>
50#include <asm/iseries/hv_call_event.h>
51#include <asm/iseries/hv_call_xm.h>
52#include <asm/iseries/it_lp_queue.h>
53#include <asm/iseries/mf.h>
54#include <asm/iseries/hv_lp_event.h>
55#include <asm/iseries/lpar_map.h>
56#include <asm/udbg.h>
57#include <asm/irq.h>
58
59#include "naca.h"
60#include "setup.h"
61#include "irq.h"
62#include "vpd_areas.h"
63#include "processor_vpd.h"
64#include "it_lp_naca.h"
65#include "main_store.h"
66#include "call_sm.h"
67#include "call_hpt.h"
68#include "pci.h"
69
70#ifdef DEBUG
71#define DBG(fmt...) udbg_printf(fmt)
72#else
73#define DBG(fmt...)
74#endif
75
76/* Function Prototypes */
77static unsigned long build_iSeries_Memory_Map(void);
78static void iseries_shared_idle(void);
79static void iseries_dedicated_idle(void);
80
81
82struct MemoryBlock {
83 unsigned long absStart;
84 unsigned long absEnd;
85 unsigned long logicalStart;
86 unsigned long logicalEnd;
87};
88
89/*
90 * Process the main store vpd to determine where the holes in memory are
91 * and return the number of physical blocks and fill in the array of
92 * block data.
93 */
94static unsigned long iSeries_process_Condor_mainstore_vpd(
95 struct MemoryBlock *mb_array, unsigned long max_entries)
96{
97 unsigned long holeFirstChunk, holeSizeChunks;
98 unsigned long numMemoryBlocks = 1;
99 struct IoHriMainStoreSegment4 *msVpd =
100 (struct IoHriMainStoreSegment4 *)xMsVpd;
101 unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
102 unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
103 unsigned long holeSize = holeEnd - holeStart;
104
105 printk("Mainstore_VPD: Condor\n");
106 /*
107 * Determine if absolute memory has any
108 * holes so that we can interpret the
109 * access map we get back from the hypervisor
110 * correctly.
111 */
112 mb_array[0].logicalStart = 0;
113 mb_array[0].logicalEnd = 0x100000000UL;
114 mb_array[0].absStart = 0;
115 mb_array[0].absEnd = 0x100000000UL;
116
117 if (holeSize) {
118 numMemoryBlocks = 2;
119 holeStart = holeStart & 0x000fffffffffffffUL;
120 holeStart = addr_to_chunk(holeStart);
121 holeFirstChunk = holeStart;
122 holeSize = addr_to_chunk(holeSize);
123 holeSizeChunks = holeSize;
124 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
125 holeFirstChunk, holeSizeChunks );
126 mb_array[0].logicalEnd = holeFirstChunk;
127 mb_array[0].absEnd = holeFirstChunk;
128 mb_array[1].logicalStart = holeFirstChunk;
129 mb_array[1].logicalEnd = 0x100000000UL - holeSizeChunks;
130 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
131 mb_array[1].absEnd = 0x100000000UL;
132 }
133 return numMemoryBlocks;
134}
135
136#define MaxSegmentAreas 32
137#define MaxSegmentAdrRangeBlocks 128
138#define MaxAreaRangeBlocks 4
139
140static unsigned long iSeries_process_Regatta_mainstore_vpd(
141 struct MemoryBlock *mb_array, unsigned long max_entries)
142{
143 struct IoHriMainStoreSegment5 *msVpdP =
144 (struct IoHriMainStoreSegment5 *)xMsVpd;
145 unsigned long numSegmentBlocks = 0;
146 u32 existsBits = msVpdP->msAreaExists;
147 unsigned long area_num;
148
149 printk("Mainstore_VPD: Regatta\n");
150
151 for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
152 unsigned long numAreaBlocks;
153 struct IoHriMainStoreArea4 *currentArea;
154
155 if (existsBits & 0x80000000) {
156 unsigned long block_num;
157
158 currentArea = &msVpdP->msAreaArray[area_num];
159 numAreaBlocks = currentArea->numAdrRangeBlocks;
160 printk("ms_vpd: processing area %2ld blocks=%ld",
161 area_num, numAreaBlocks);
162 for (block_num = 0; block_num < numAreaBlocks;
163 ++block_num ) {
164 /* Process an address range block */
165 struct MemoryBlock tempBlock;
166 unsigned long i;
167
168 tempBlock.absStart =
169 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
170 tempBlock.absEnd =
171 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
172 tempBlock.logicalStart = 0;
173 tempBlock.logicalEnd = 0;
174 printk("\n block %ld absStart=%016lx absEnd=%016lx",
175 block_num, tempBlock.absStart,
176 tempBlock.absEnd);
177
178 for (i = 0; i < numSegmentBlocks; ++i) {
179 if (mb_array[i].absStart ==
180 tempBlock.absStart)
181 break;
182 }
183 if (i == numSegmentBlocks) {
184 if (numSegmentBlocks == max_entries)
185 panic("iSeries_process_mainstore_vpd: too many memory blocks");
186 mb_array[numSegmentBlocks] = tempBlock;
187 ++numSegmentBlocks;
188 } else
189 printk(" (duplicate)");
190 }
191 printk("\n");
192 }
193 existsBits <<= 1;
194 }
195 /* Now sort the blocks found into ascending sequence */
196 if (numSegmentBlocks > 1) {
197 unsigned long m, n;
198
199 for (m = 0; m < numSegmentBlocks - 1; ++m) {
200 for (n = numSegmentBlocks - 1; m < n; --n) {
201 if (mb_array[n].absStart <
202 mb_array[n-1].absStart) {
203 struct MemoryBlock tempBlock;
204
205 tempBlock = mb_array[n];
206 mb_array[n] = mb_array[n-1];
207 mb_array[n-1] = tempBlock;
208 }
209 }
210 }
211 }
212 /*
213 * Assign "logical" addresses to each block. These
214 * addresses correspond to the hypervisor "bitmap" space.
215 * Convert all addresses into units of 256K chunks.
216 */
217 {
218 unsigned long i, nextBitmapAddress;
219
220 printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
221 nextBitmapAddress = 0;
222 for (i = 0; i < numSegmentBlocks; ++i) {
223 unsigned long length = mb_array[i].absEnd -
224 mb_array[i].absStart;
225
226 mb_array[i].logicalStart = nextBitmapAddress;
227 mb_array[i].logicalEnd = nextBitmapAddress + length;
228 nextBitmapAddress += length;
229 printk(" Bitmap range: %016lx - %016lx\n"
230 " Absolute range: %016lx - %016lx\n",
231 mb_array[i].logicalStart,
232 mb_array[i].logicalEnd,
233 mb_array[i].absStart, mb_array[i].absEnd);
234 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
235 0x000fffffffffffffUL);
236 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
237 0x000fffffffffffffUL);
238 mb_array[i].logicalStart =
239 addr_to_chunk(mb_array[i].logicalStart);
240 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
241 }
242 }
243
244 return numSegmentBlocks;
245}
246
247static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
248 unsigned long max_entries)
249{
250 unsigned long i;
251 unsigned long mem_blocks = 0;
252
253 if (mmu_has_feature(MMU_FTR_SLB))
254 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
255 max_entries);
256 else
257 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
258 max_entries);
259
260 printk("Mainstore_VPD: numMemoryBlocks = %ld\n", mem_blocks);
261 for (i = 0; i < mem_blocks; ++i) {
262 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
263 " abs chunks %016lx - %016lx\n",
264 i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
265 mb_array[i].absStart, mb_array[i].absEnd);
266 }
267 return mem_blocks;
268}
269
270static void __init iSeries_get_cmdline(void)
271{
272 char *p, *q;
273
274 /* copy the command line parameter from the primary VSP */
275 HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
276 HvLpDma_Direction_RemoteToLocal);
277
278 p = cmd_line;
279 q = cmd_line + 255;
280 while(p < q) {
281 if (!*p || *p == '\n')
282 break;
283 ++p;
284 }
285 *p = 0;
286}
287
288static void __init iSeries_init_early(void)
289{
290 DBG(" -> iSeries_init_early()\n");
291
292 /* Snapshot the timebase, for use in later recalibration */
293 iSeries_time_init_early();
294
295 /*
296 * Initialize the DMA/TCE management
297 */
298 iommu_init_early_iSeries();
299
300 /* Initialize machine-dependency vectors */
301#ifdef CONFIG_SMP
302 smp_init_iSeries();
303#endif
304
305 /* Associate Lp Event Queue 0 with processor 0 */
306 HvCallEvent_setLpEventQueueInterruptProc(0, 0);
307
308 mf_init();
309
310 DBG(" <- iSeries_init_early()\n");
311}
312
313struct mschunks_map mschunks_map = {
314 /* XXX We don't use these, but Piranha might need them. */
315 .chunk_size = MSCHUNKS_CHUNK_SIZE,
316 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
317 .chunk_mask = MSCHUNKS_OFFSET_MASK,
318};
319EXPORT_SYMBOL(mschunks_map);
320
321static void mschunks_alloc(unsigned long num_chunks)
322{
323 klimit = _ALIGN(klimit, sizeof(u32));
324 mschunks_map.mapping = (u32 *)klimit;
325 klimit += num_chunks * sizeof(u32);
326 mschunks_map.num_chunks = num_chunks;
327}
328
329/*
330 * The iSeries may have very large memories ( > 128 GB ) and a partition
331 * may get memory in "chunks" that may be anywhere in the 2**52 real
332 * address space. The chunks are 256K in size. To map this to the
333 * memory model Linux expects, the AS/400 specific code builds a
334 * translation table to translate what Linux thinks are "physical"
335 * addresses to the actual real addresses. This allows us to make
336 * it appear to Linux that we have contiguous memory starting at
337 * physical address zero while in fact this could be far from the truth.
338 * To avoid confusion, I'll let the words physical and/or real address
339 * apply to the Linux addresses while I'll use "absolute address" to
340 * refer to the actual hardware real address.
341 *
342 * build_iSeries_Memory_Map gets information from the Hypervisor and
343 * looks at the Main Store VPD to determine the absolute addresses
344 * of the memory that has been assigned to our partition and builds
345 * a table used to translate Linux's physical addresses to these
346 * absolute addresses. Absolute addresses are needed when
347 * communicating with the hypervisor (e.g. to build HPT entries)
348 *
349 * Returns the physical memory size
350 */
351
352static unsigned long __init build_iSeries_Memory_Map(void)
353{
354 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
355 u32 nextPhysChunk;
356 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
357 u32 totalChunks,moreChunks;
358 u32 currChunk, thisChunk, absChunk;
359 u32 currDword;
360 u32 chunkBit;
361 u64 map;
362 struct MemoryBlock mb[32];
363 unsigned long numMemoryBlocks, curBlock;
364
365 /* Chunk size on iSeries is 256K bytes */
366 totalChunks = (u32)HvLpConfig_getMsChunks();
367 mschunks_alloc(totalChunks);
368
369 /*
370 * Get absolute address of our load area
371 * and map it to physical address 0
372 * This guarantees that the loadarea ends up at physical 0
373 * otherwise, it might not be returned by PLIC as the first
374 * chunks
375 */
376
377 loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
378 loadAreaSize = itLpNaca.xLoadAreaChunks;
379
380 /*
381 * Only add the pages already mapped here.
382 * Otherwise we might add the hpt pages
383 * The rest of the pages of the load area
384 * aren't in the HPT yet and can still
385 * be assigned an arbitrary physical address
386 */
387 if ((loadAreaSize * 64) > HvPagesToMap)
388 loadAreaSize = HvPagesToMap / 64;
389
390 loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
391
392 /*
393 * TODO Do we need to do something if the HPT is in the 64MB load area?
394 * This would be required if the itLpNaca.xLoadAreaChunks includes
395 * the HPT size
396 */
397
398 printk("Mapping load area - physical addr = 0000000000000000\n"
399 " absolute addr = %016lx\n",
400 chunk_to_addr(loadAreaFirstChunk));
401 printk("Load area size %dK\n", loadAreaSize * 256);
402
403 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
404 mschunks_map.mapping[nextPhysChunk] =
405 loadAreaFirstChunk + nextPhysChunk;
406
407 /*
408 * Get absolute address of our HPT and remember it so
409 * we won't map it to any physical address
410 */
411 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
412 hptSizePages = (u32)HvCallHpt_getHptPages();
413 hptSizeChunks = hptSizePages >>
414 (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
415 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
416
417 printk("HPT absolute addr = %016lx, size = %dK\n",
418 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
419
420 /*
421 * Determine if absolute memory has any
422 * holes so that we can interpret the
423 * access map we get back from the hypervisor
424 * correctly.
425 */
426 numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
427
428 /*
429 * Process the main store access map from the hypervisor
430 * to build up our physical -> absolute translation table
431 */
432 curBlock = 0;
433 currChunk = 0;
434 currDword = 0;
435 moreChunks = totalChunks;
436
437 while (moreChunks) {
438 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
439 currDword);
440 thisChunk = currChunk;
441 while (map) {
442 chunkBit = map >> 63;
443 map <<= 1;
444 if (chunkBit) {
445 --moreChunks;
446 while (thisChunk >= mb[curBlock].logicalEnd) {
447 ++curBlock;
448 if (curBlock >= numMemoryBlocks)
449 panic("out of memory blocks");
450 }
451 if (thisChunk < mb[curBlock].logicalStart)
452 panic("memory block error");
453
454 absChunk = mb[curBlock].absStart +
455 (thisChunk - mb[curBlock].logicalStart);
456 if (((absChunk < hptFirstChunk) ||
457 (absChunk > hptLastChunk)) &&
458 ((absChunk < loadAreaFirstChunk) ||
459 (absChunk > loadAreaLastChunk))) {
460 mschunks_map.mapping[nextPhysChunk] =
461 absChunk;
462 ++nextPhysChunk;
463 }
464 }
465 ++thisChunk;
466 }
467 ++currDword;
468 currChunk += 64;
469 }
470
471 /*
472 * main store size (in chunks) is
473 * totalChunks - hptSizeChunks
474 * which should be equal to
475 * nextPhysChunk
476 */
477 return chunk_to_addr(nextPhysChunk);
478}
479
480/*
481 * Document me.
482 */
483static void __init iSeries_setup_arch(void)
484{
485 if (get_lppaca()->shared_proc) {
486 ppc_md.idle_loop = iseries_shared_idle;
487 printk(KERN_DEBUG "Using shared processor idle loop\n");
488 } else {
489 ppc_md.idle_loop = iseries_dedicated_idle;
490 printk(KERN_DEBUG "Using dedicated idle loop\n");
491 }
492
493 /* Setup the Lp Event Queue */
494 setup_hvlpevent_queue();
495
496 printk("Max logical processors = %d\n",
497 itVpdAreas.xSlicMaxLogicalProcs);
498 printk("Max physical processors = %d\n",
499 itVpdAreas.xSlicMaxPhysicalProcs);
500
501 iSeries_pcibios_init();
502}
503
504static void iSeries_show_cpuinfo(struct seq_file *m)
505{
506 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
507}
508
509static void __init iSeries_progress(char * st, unsigned short code)
510{
511 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
512 mf_display_progress(code);
513}
514
515static void __init iSeries_fixup_klimit(void)
516{
517 /*
518 * Change klimit to take into account any ram disk
519 * that may be included
520 */
521 if (naca.xRamDisk)
522 klimit = KERNELBASE + (u64)naca.xRamDisk +
523 (naca.xRamDiskSize * HW_PAGE_SIZE);
524}
525
526static int __init iSeries_src_init(void)
527{
528 /* clear the progress line */
529 if (firmware_has_feature(FW_FEATURE_ISERIES))
530 ppc_md.progress(" ", 0xffff);
531 return 0;
532}
533
534late_initcall(iSeries_src_init);
535
536static inline void process_iSeries_events(void)
537{
538 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
539}
540
541static void yield_shared_processor(void)
542{
543 unsigned long tb;
544
545 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
546 HvCall_MaskLpEvent |
547 HvCall_MaskLpProd |
548 HvCall_MaskTimeout);
549
550 tb = get_tb();
551 /* Compute future tb value when yield should expire */
552 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
553
554 /*
555 * The decrementer stops during the yield. Force a fake decrementer
556 * here and let the timer_interrupt code sort out the actual time.
557 */
558 get_lppaca()->int_dword.fields.decr_int = 1;
559 ppc64_runlatch_on();
560 process_iSeries_events();
561}
562
563static void iseries_shared_idle(void)
564{
565 while (1) {
566 tick_nohz_idle_enter();
567 rcu_idle_enter();
568 while (!need_resched() && !hvlpevent_is_pending()) {
569 local_irq_disable();
570 ppc64_runlatch_off();
571
572 /* Recheck with irqs off */
573 if (!need_resched() && !hvlpevent_is_pending())
574 yield_shared_processor();
575
576 HMT_medium();
577 local_irq_enable();
578 }
579
580 ppc64_runlatch_on();
581 rcu_idle_exit();
582 tick_nohz_idle_exit();
583
584 if (hvlpevent_is_pending())
585 process_iSeries_events();
586
587 preempt_enable_no_resched();
588 schedule();
589 preempt_disable();
590 }
591}
592
593static void iseries_dedicated_idle(void)
594{
595 set_thread_flag(TIF_POLLING_NRFLAG);
596
597 while (1) {
598 tick_nohz_idle_enter();
599 rcu_idle_enter();
600 if (!need_resched()) {
601 while (!need_resched()) {
602 ppc64_runlatch_off();
603 HMT_low();
604
605 if (hvlpevent_is_pending()) {
606 HMT_medium();
607 ppc64_runlatch_on();
608 process_iSeries_events();
609 }
610 }
611
612 HMT_medium();
613 }
614
615 ppc64_runlatch_on();
616 rcu_idle_exit();
617 tick_nohz_idle_exit();
618 preempt_enable_no_resched();
619 schedule();
620 preempt_disable();
621 }
622}
623
624static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
625 unsigned long flags, void *caller)
626{
627 return (void __iomem *)address;
628}
629
630static void iseries_iounmap(volatile void __iomem *token)
631{
632}
633
634static int __init iseries_probe(void)
635{
636 unsigned long root = of_get_flat_dt_root();
637 if (!of_flat_dt_is_compatible(root, "IBM,iSeries"))
638 return 0;
639
640 hpte_init_iSeries();
641 /* iSeries does not support 16M pages */
642 cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE;
643
644 return 1;
645}
646
647#ifdef CONFIG_KEXEC
648static int iseries_kexec_prepare(struct kimage *image)
649{
650 return -ENOSYS;
651}
652#endif
653
654define_machine(iseries) {
655 .name = "iSeries",
656 .setup_arch = iSeries_setup_arch,
657 .show_cpuinfo = iSeries_show_cpuinfo,
658 .init_IRQ = iSeries_init_IRQ,
659 .get_irq = iSeries_get_irq,
660 .init_early = iSeries_init_early,
661 .pcibios_fixup = iSeries_pci_final_fixup,
662 .pcibios_fixup_resources= iSeries_pcibios_fixup_resources,
663 .restart = mf_reboot,
664 .power_off = mf_power_off,
665 .halt = mf_power_off,
666 .get_boot_time = iSeries_get_boot_time,
667 .set_rtc_time = iSeries_set_rtc_time,
668 .get_rtc_time = iSeries_get_rtc_time,
669 .calibrate_decr = generic_calibrate_decr,
670 .progress = iSeries_progress,
671 .probe = iseries_probe,
672 .ioremap = iseries_ioremap,
673 .iounmap = iseries_iounmap,
674#ifdef CONFIG_KEXEC
675 .machine_kexec_prepare = iseries_kexec_prepare,
676#endif
677 /* XXX Implement enable_pmcs for iSeries */
678};
679
680void * __init iSeries_early_setup(void)
681{
682 unsigned long phys_mem_size;
683
684 /* Identify CPU type. This is done again by the common code later
685 * on but calling this function multiple times is fine.
686 */
687 identify_cpu(0, mfspr(SPRN_PVR));
688 initialise_paca(&boot_paca, 0);
689
690 powerpc_firmware_features |= FW_FEATURE_ISERIES;
691 powerpc_firmware_features |= FW_FEATURE_LPAR;
692
693#ifdef CONFIG_SMP
694 /* On iSeries we know we can never have more than 64 cpus */
695 nr_cpu_ids = max(nr_cpu_ids, 64);
696#endif
697
698 iSeries_fixup_klimit();
699
700 /*
701 * Initialize the table which translate Linux physical addresses to
702 * AS/400 absolute addresses
703 */
704 phys_mem_size = build_iSeries_Memory_Map();
705
706 iSeries_get_cmdline();
707
708 return (void *) __pa(build_flat_dt(phys_mem_size));
709}
710
711static void hvputc(char c)
712{
713 if (c == '\n')
714 hvputc('\r');
715
716 HvCall_writeLogBuffer(&c, 1);
717}
718
719void __init udbg_init_iseries(void)
720{
721 udbg_putc = hvputc;
722}
diff --git a/arch/powerpc/platforms/iseries/setup.h b/arch/powerpc/platforms/iseries/setup.h
deleted file mode 100644
index 729754bbb018..000000000000
--- a/arch/powerpc/platforms/iseries/setup.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Description:
6 * Architecture- / platform-specific boot-time initialization code for
7 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
8 * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
9 * <dan@netx4.com>.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#ifndef __ISERIES_SETUP_H__
18#define __ISERIES_SETUP_H__
19
20extern void *iSeries_early_setup(void);
21extern unsigned long iSeries_get_boot_time(void);
22extern int iSeries_set_rtc_time(struct rtc_time *tm);
23extern void iSeries_get_rtc_time(struct rtc_time *tm);
24
25extern void *build_flat_dt(unsigned long phys_mem_size);
26
27#endif /* __ISERIES_SETUP_H__ */
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
deleted file mode 100644
index 02df49fb59f0..000000000000
--- a/arch/powerpc/platforms/iseries/smp.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * SMP support for iSeries machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/kernel.h>
18#include <linux/sched.h>
19#include <linux/smp.h>
20#include <linux/interrupt.h>
21#include <linux/kernel_stat.h>
22#include <linux/delay.h>
23#include <linux/init.h>
24#include <linux/spinlock.h>
25#include <linux/cache.h>
26#include <linux/err.h>
27#include <linux/device.h>
28#include <linux/cpu.h>
29
30#include <asm/ptrace.h>
31#include <linux/atomic.h>
32#include <asm/irq.h>
33#include <asm/page.h>
34#include <asm/pgtable.h>
35#include <asm/io.h>
36#include <asm/smp.h>
37#include <asm/paca.h>
38#include <asm/iseries/hv_call.h>
39#include <asm/time.h>
40#include <asm/machdep.h>
41#include <asm/cputable.h>
42#include <asm/system.h>
43
44static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
45{
46 HvCall_sendIPI(&(paca[cpu]));
47}
48
49static int smp_iSeries_probe(void)
50{
51 return cpumask_weight(cpu_possible_mask);
52}
53
54static int smp_iSeries_kick_cpu(int nr)
55{
56 BUG_ON((nr < 0) || (nr >= NR_CPUS));
57
58 /* Verify that our partition has a processor nr */
59 if (lppaca_of(nr).dyn_proc_status >= 2)
60 return -ENOENT;
61
62 /* The processor is currently spinning, waiting
63 * for the cpu_start field to become non-zero
64 * After we set cpu_start, the processor will
65 * continue on to secondary_start in iSeries_head.S
66 */
67 paca[nr].cpu_start = 1;
68
69 return 0;
70}
71
72static void __devinit smp_iSeries_setup_cpu(int nr)
73{
74}
75
76static struct smp_ops_t iSeries_smp_ops = {
77 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
78 .cause_ipi = smp_iSeries_cause_ipi,
79 .probe = smp_iSeries_probe,
80 .kick_cpu = smp_iSeries_kick_cpu,
81 .setup_cpu = smp_iSeries_setup_cpu,
82};
83
84/* This is called very early. */
85void __init smp_init_iSeries(void)
86{
87 smp_ops = &iSeries_smp_ops;
88}
diff --git a/arch/powerpc/platforms/iseries/spcomm_area.h b/arch/powerpc/platforms/iseries/spcomm_area.h
deleted file mode 100644
index 598b7c14573a..000000000000
--- a/arch/powerpc/platforms/iseries/spcomm_area.h
+++ /dev/null
@@ -1,34 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_SPCOMM_AREA_H
20#define _ISERIES_SPCOMM_AREA_H
21
22
23struct SpCommArea {
24 u32 xDesc; // Descriptor (only in new formats) 000-003
25 u8 xFormat; // Format (only in new formats) 004-004
26 u8 xRsvd1[11]; // Reserved 005-00F
27 u64 xRawTbAtIplStart; // Raw HW TB value when IPL is started 010-017
28 u64 xRawTodAtIplStart; // Raw HW TOD value when IPL is started 018-01F
29 u64 xBcdTimeAtIplStart; // BCD time when IPL is started 020-027
30 u64 xBcdTimeAtOsStart; // BCD time when OS passed control 028-02F
31 u8 xRsvd2[80]; // Reserved 030-07F
32};
33
34#endif /* _ISERIES_SPCOMM_AREA_H */
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
deleted file mode 100644
index 04be62d368a6..000000000000
--- a/arch/powerpc/platforms/iseries/vio.c
+++ /dev/null
@@ -1,556 +0,0 @@
1/*
2 * Legacy iSeries specific vio initialisation
3 * that needs to be built in (not a module).
4 *
5 * © Copyright 2007 IBM Corporation
6 * Author: Stephen Rothwell
7 * Some parts collected from various other files
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/of.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <linux/completion.h>
27#include <linux/proc_fs.h>
28#include <linux/export.h>
29
30#include <asm/firmware.h>
31#include <asm/vio.h>
32#include <asm/iseries/vio.h>
33#include <asm/iseries/iommu.h>
34#include <asm/iseries/hv_types.h>
35#include <asm/iseries/hv_lp_event.h>
36
37#define FIRST_VTY 0
38#define NUM_VTYS 1
39#define FIRST_VSCSI (FIRST_VTY + NUM_VTYS)
40#define NUM_VSCSIS 1
41#define FIRST_VLAN (FIRST_VSCSI + NUM_VSCSIS)
42#define NUM_VLANS HVMAXARCHITECTEDVIRTUALLANS
43#define FIRST_VIODASD (FIRST_VLAN + NUM_VLANS)
44#define NUM_VIODASDS HVMAXARCHITECTEDVIRTUALDISKS
45#define FIRST_VIOCD (FIRST_VIODASD + NUM_VIODASDS)
46#define NUM_VIOCDS HVMAXARCHITECTEDVIRTUALCDROMS
47#define FIRST_VIOTAPE (FIRST_VIOCD + NUM_VIOCDS)
48#define NUM_VIOTAPES HVMAXARCHITECTEDVIRTUALTAPES
49
50struct vio_waitevent {
51 struct completion com;
52 int rc;
53 u16 sub_result;
54};
55
56struct vio_resource {
57 char rsrcname[10];
58 char type[4];
59 char model[3];
60};
61
62static struct property *new_property(const char *name, int length,
63 const void *value)
64{
65 struct property *np = kzalloc(sizeof(*np) + strlen(name) + 1 + length,
66 GFP_KERNEL);
67
68 if (!np)
69 return NULL;
70 np->name = (char *)(np + 1);
71 np->value = np->name + strlen(name) + 1;
72 strcpy(np->name, name);
73 memcpy(np->value, value, length);
74 np->length = length;
75 return np;
76}
77
78static void free_property(struct property *np)
79{
80 kfree(np);
81}
82
83static struct device_node *new_node(const char *path,
84 struct device_node *parent)
85{
86 struct device_node *np = kzalloc(sizeof(*np), GFP_KERNEL);
87
88 if (!np)
89 return NULL;
90 np->full_name = kstrdup(path, GFP_KERNEL);
91 if (!np->full_name) {
92 kfree(np);
93 return NULL;
94 }
95 of_node_set_flag(np, OF_DYNAMIC);
96 kref_init(&np->kref);
97 np->parent = of_node_get(parent);
98 return np;
99}
100
101static void free_node(struct device_node *np)
102{
103 struct property *next;
104 struct property *prop;
105
106 next = np->properties;
107 while (next) {
108 prop = next;
109 next = prop->next;
110 free_property(prop);
111 }
112 of_node_put(np->parent);
113 kfree(np->full_name);
114 kfree(np);
115}
116
117static int add_string_property(struct device_node *np, const char *name,
118 const char *value)
119{
120 struct property *nprop = new_property(name, strlen(value) + 1, value);
121
122 if (!nprop)
123 return 0;
124 prom_add_property(np, nprop);
125 return 1;
126}
127
128static int add_raw_property(struct device_node *np, const char *name,
129 int length, const void *value)
130{
131 struct property *nprop = new_property(name, length, value);
132
133 if (!nprop)
134 return 0;
135 prom_add_property(np, nprop);
136 return 1;
137}
138
139static struct device_node *do_device_node(struct device_node *parent,
140 const char *name, u32 reg, u32 unit, const char *type,
141 const char *compat, struct vio_resource *res)
142{
143 struct device_node *np;
144 char path[32];
145
146 snprintf(path, sizeof(path), "/vdevice/%s@%08x", name, reg);
147 np = new_node(path, parent);
148 if (!np)
149 return NULL;
150 if (!add_string_property(np, "name", name) ||
151 !add_string_property(np, "device_type", type) ||
152 !add_string_property(np, "compatible", compat) ||
153 !add_raw_property(np, "reg", sizeof(reg), &reg) ||
154 !add_raw_property(np, "linux,unit_address",
155 sizeof(unit), &unit)) {
156 goto node_free;
157 }
158 if (res) {
159 if (!add_raw_property(np, "linux,vio_rsrcname",
160 sizeof(res->rsrcname), res->rsrcname) ||
161 !add_raw_property(np, "linux,vio_type",
162 sizeof(res->type), res->type) ||
163 !add_raw_property(np, "linux,vio_model",
164 sizeof(res->model), res->model))
165 goto node_free;
166 }
167 np->name = of_get_property(np, "name", NULL);
168 np->type = of_get_property(np, "device_type", NULL);
169 of_attach_node(np);
170#ifdef CONFIG_PROC_DEVICETREE
171 if (parent->pde) {
172 struct proc_dir_entry *ent;
173
174 ent = proc_mkdir(strrchr(np->full_name, '/') + 1, parent->pde);
175 if (ent)
176 proc_device_tree_add_node(np, ent);
177 }
178#endif
179 return np;
180
181 node_free:
182 free_node(np);
183 return NULL;
184}
185
186/*
187 * This is here so that we can dynamically add viodasd
188 * devices without exposing all the above infrastructure.
189 */
190struct vio_dev *vio_create_viodasd(u32 unit)
191{
192 struct device_node *vio_root;
193 struct device_node *np;
194 struct vio_dev *vdev = NULL;
195
196 vio_root = of_find_node_by_path("/vdevice");
197 if (!vio_root)
198 return NULL;
199 np = do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
200 "block", "IBM,iSeries-viodasd", NULL);
201 of_node_put(vio_root);
202 if (np) {
203 vdev = vio_register_device_node(np);
204 if (!vdev)
205 free_node(np);
206 }
207 return vdev;
208}
209EXPORT_SYMBOL_GPL(vio_create_viodasd);
210
211static void __init handle_block_event(struct HvLpEvent *event)
212{
213 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
214 struct vio_waitevent *pwe;
215
216 if (event == NULL)
217 /* Notification that a partition went away! */
218 return;
219 /* First, we should NEVER get an int here...only acks */
220 if (hvlpevent_is_int(event)) {
221 printk(KERN_WARNING "handle_viod_request: "
222 "Yikes! got an int in viodasd event handler!\n");
223 if (hvlpevent_need_ack(event)) {
224 event->xRc = HvLpEvent_Rc_InvalidSubtype;
225 HvCallEvent_ackLpEvent(event);
226 }
227 return;
228 }
229
230 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
231 case vioblockopen:
232 /*
233 * Handle a response to an open request. We get all the
234 * disk information in the response, so update it. The
235 * correlation token contains a pointer to a waitevent
236 * structure that has a completion in it. update the
237 * return code in the waitevent structure and post the
238 * completion to wake up the guy who sent the request
239 */
240 pwe = (struct vio_waitevent *)event->xCorrelationToken;
241 pwe->rc = event->xRc;
242 pwe->sub_result = bevent->sub_result;
243 complete(&pwe->com);
244 break;
245 case vioblockclose:
246 break;
247 default:
248 printk(KERN_WARNING "handle_viod_request: unexpected subtype!");
249 if (hvlpevent_need_ack(event)) {
250 event->xRc = HvLpEvent_Rc_InvalidSubtype;
251 HvCallEvent_ackLpEvent(event);
252 }
253 }
254}
255
256static void __init probe_disk(struct device_node *vio_root, u32 unit)
257{
258 HvLpEvent_Rc hvrc;
259 struct vio_waitevent we;
260 u16 flags = 0;
261
262retry:
263 init_completion(&we.com);
264
265 /* Send the open event to OS/400 */
266 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
267 HvLpEvent_Type_VirtualIo,
268 viomajorsubtype_blockio | vioblockopen,
269 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
270 viopath_sourceinst(viopath_hostLp),
271 viopath_targetinst(viopath_hostLp),
272 (u64)(unsigned long)&we, VIOVERSION << 16,
273 ((u64)unit << 48) | ((u64)flags<< 32),
274 0, 0, 0);
275 if (hvrc != 0) {
276 printk(KERN_WARNING "probe_disk: bad rc on HV open %d\n",
277 (int)hvrc);
278 return;
279 }
280
281 wait_for_completion(&we.com);
282
283 if (we.rc != 0) {
284 if (flags != 0)
285 return;
286 /* try again with read only flag set */
287 flags = vioblockflags_ro;
288 goto retry;
289 }
290
291 /* Send the close event to OS/400. We DON'T expect a response */
292 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
293 HvLpEvent_Type_VirtualIo,
294 viomajorsubtype_blockio | vioblockclose,
295 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
296 viopath_sourceinst(viopath_hostLp),
297 viopath_targetinst(viopath_hostLp),
298 0, VIOVERSION << 16,
299 ((u64)unit << 48) | ((u64)flags << 32),
300 0, 0, 0);
301 if (hvrc != 0) {
302 printk(KERN_WARNING "probe_disk: "
303 "bad rc sending event to OS/400 %d\n", (int)hvrc);
304 return;
305 }
306
307 do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
308 "block", "IBM,iSeries-viodasd", NULL);
309}
310
311static void __init get_viodasd_info(struct device_node *vio_root)
312{
313 int rc;
314 u32 unit;
315
316 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, 2);
317 if (rc) {
318 printk(KERN_WARNING "get_viodasd_info: "
319 "error opening path to host partition %d\n",
320 viopath_hostLp);
321 return;
322 }
323
324 /* Initialize our request handler */
325 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
326
327 for (unit = 0; unit < HVMAXARCHITECTEDVIRTUALDISKS; unit++)
328 probe_disk(vio_root, unit);
329
330 vio_clearHandler(viomajorsubtype_blockio);
331 viopath_close(viopath_hostLp, viomajorsubtype_blockio, 2);
332}
333
334static void __init handle_cd_event(struct HvLpEvent *event)
335{
336 struct viocdlpevent *bevent;
337 struct vio_waitevent *pwe;
338
339 if (!event)
340 /* Notification that a partition went away! */
341 return;
342
343 /* First, we should NEVER get an int here...only acks */
344 if (hvlpevent_is_int(event)) {
345 printk(KERN_WARNING "handle_cd_event: got an unexpected int\n");
346 if (hvlpevent_need_ack(event)) {
347 event->xRc = HvLpEvent_Rc_InvalidSubtype;
348 HvCallEvent_ackLpEvent(event);
349 }
350 return;
351 }
352
353 bevent = (struct viocdlpevent *)event;
354
355 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
356 case viocdgetinfo:
357 pwe = (struct vio_waitevent *)event->xCorrelationToken;
358 pwe->rc = event->xRc;
359 pwe->sub_result = bevent->sub_result;
360 complete(&pwe->com);
361 break;
362
363 default:
364 printk(KERN_WARNING "handle_cd_event: "
365 "message with unexpected subtype %0x04X!\n",
366 event->xSubtype & VIOMINOR_SUBTYPE_MASK);
367 if (hvlpevent_need_ack(event)) {
368 event->xRc = HvLpEvent_Rc_InvalidSubtype;
369 HvCallEvent_ackLpEvent(event);
370 }
371 }
372}
373
374static void __init get_viocd_info(struct device_node *vio_root)
375{
376 HvLpEvent_Rc hvrc;
377 u32 unit;
378 struct vio_waitevent we;
379 struct vio_resource *unitinfo;
380 dma_addr_t unitinfo_dmaaddr;
381 int ret;
382
383 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 2);
384 if (ret) {
385 printk(KERN_WARNING
386 "get_viocd_info: error opening path to host partition %d\n",
387 viopath_hostLp);
388 return;
389 }
390
391 /* Initialize our request handler */
392 vio_setHandler(viomajorsubtype_cdio, handle_cd_event);
393
394 unitinfo = iseries_hv_alloc(
395 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
396 &unitinfo_dmaaddr, GFP_ATOMIC);
397 if (!unitinfo) {
398 printk(KERN_WARNING
399 "get_viocd_info: error allocating unitinfo\n");
400 goto clear_handler;
401 }
402
403 memset(unitinfo, 0, sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS);
404
405 init_completion(&we.com);
406
407 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
408 HvLpEvent_Type_VirtualIo,
409 viomajorsubtype_cdio | viocdgetinfo,
410 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
411 viopath_sourceinst(viopath_hostLp),
412 viopath_targetinst(viopath_hostLp),
413 (u64)&we, VIOVERSION << 16, unitinfo_dmaaddr, 0,
414 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 0);
415 if (hvrc != HvLpEvent_Rc_Good) {
416 printk(KERN_WARNING
417 "get_viocd_info: cdrom error sending event. rc %d\n",
418 (int)hvrc);
419 goto hv_free;
420 }
421
422 wait_for_completion(&we.com);
423
424 if (we.rc) {
425 printk(KERN_WARNING "get_viocd_info: bad rc %d:0x%04X\n",
426 we.rc, we.sub_result);
427 goto hv_free;
428 }
429
430 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALCDROMS) &&
431 unitinfo[unit].rsrcname[0]; unit++) {
432 if (!do_device_node(vio_root, "viocd", FIRST_VIOCD + unit, unit,
433 "block", "IBM,iSeries-viocd", &unitinfo[unit]))
434 break;
435 }
436
437 hv_free:
438 iseries_hv_free(sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
439 unitinfo, unitinfo_dmaaddr);
440 clear_handler:
441 vio_clearHandler(viomajorsubtype_cdio);
442 viopath_close(viopath_hostLp, viomajorsubtype_cdio, 2);
443}
444
445/* Handle interrupt events for tape */
446static void __init handle_tape_event(struct HvLpEvent *event)
447{
448 struct vio_waitevent *we;
449 struct viotapelpevent *tevent = (struct viotapelpevent *)event;
450
451 if (event == NULL)
452 /* Notification that a partition went away! */
453 return;
454
455 we = (struct vio_waitevent *)event->xCorrelationToken;
456 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
457 case viotapegetinfo:
458 we->rc = tevent->sub_type_result;
459 complete(&we->com);
460 break;
461 default:
462 printk(KERN_WARNING "handle_tape_event: weird ack\n");
463 }
464}
465
466static void __init get_viotape_info(struct device_node *vio_root)
467{
468 HvLpEvent_Rc hvrc;
469 u32 unit;
470 struct vio_resource *unitinfo;
471 dma_addr_t unitinfo_dmaaddr;
472 size_t len = sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALTAPES;
473 struct vio_waitevent we;
474 int ret;
475
476 init_completion(&we.com);
477
478 ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, 2);
479 if (ret) {
480 printk(KERN_WARNING "get_viotape_info: "
481 "error on viopath_open to hostlp %d\n", ret);
482 return;
483 }
484
485 vio_setHandler(viomajorsubtype_tape, handle_tape_event);
486
487 unitinfo = iseries_hv_alloc(len, &unitinfo_dmaaddr, GFP_ATOMIC);
488 if (!unitinfo)
489 goto clear_handler;
490
491 memset(unitinfo, 0, len);
492
493 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
494 HvLpEvent_Type_VirtualIo,
495 viomajorsubtype_tape | viotapegetinfo,
496 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
497 viopath_sourceinst(viopath_hostLp),
498 viopath_targetinst(viopath_hostLp),
499 (u64)(unsigned long)&we, VIOVERSION << 16,
500 unitinfo_dmaaddr, len, 0, 0);
501 if (hvrc != HvLpEvent_Rc_Good) {
502 printk(KERN_WARNING "get_viotape_info: hv error on op %d\n",
503 (int)hvrc);
504 goto hv_free;
505 }
506
507 wait_for_completion(&we.com);
508
509 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALTAPES) &&
510 unitinfo[unit].rsrcname[0]; unit++) {
511 if (!do_device_node(vio_root, "viotape", FIRST_VIOTAPE + unit,
512 unit, "byte", "IBM,iSeries-viotape",
513 &unitinfo[unit]))
514 break;
515 }
516
517 hv_free:
518 iseries_hv_free(len, unitinfo, unitinfo_dmaaddr);
519 clear_handler:
520 vio_clearHandler(viomajorsubtype_tape);
521 viopath_close(viopath_hostLp, viomajorsubtype_tape, 2);
522}
523
524static int __init iseries_vio_init(void)
525{
526 struct device_node *vio_root;
527 int ret = -ENODEV;
528
529 if (!firmware_has_feature(FW_FEATURE_ISERIES))
530 goto out;
531
532 iommu_vio_init();
533
534 vio_root = of_find_node_by_path("/vdevice");
535 if (!vio_root)
536 goto out;
537
538 if (viopath_hostLp == HvLpIndexInvalid) {
539 vio_set_hostlp();
540 /* If we don't have a host, bail out */
541 if (viopath_hostLp == HvLpIndexInvalid)
542 goto put_node;
543 }
544
545 get_viodasd_info(vio_root);
546 get_viocd_info(vio_root);
547 get_viotape_info(vio_root);
548
549 ret = 0;
550
551 put_node:
552 of_node_put(vio_root);
553 out:
554 return ret;
555}
556arch_initcall(iseries_vio_init);
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
deleted file mode 100644
index 40dad0840eb3..000000000000
--- a/arch/powerpc/platforms/iseries/viopath.c
+++ /dev/null
@@ -1,677 +0,0 @@
1/* -*- linux-c -*-
2 *
3 * iSeries Virtual I/O Message Path code
4 *
5 * Authors: Dave Boutcher <boutcher@us.ibm.com>
6 * Ryan Arnold <ryanarn@us.ibm.com>
7 * Colin Devilbiss <devilbis@us.ibm.com>
8 *
9 * (C) Copyright 2000-2005 IBM Corporation
10 *
11 * This code is used by the iSeries virtual disk, cd,
12 * tape, and console to communicate with OS/400 in another
13 * partition.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) anyu later version.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software Foundation,
27 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 *
29 */
30#include <linux/export.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/vmalloc.h>
35#include <linux/string.h>
36#include <linux/proc_fs.h>
37#include <linux/dma-mapping.h>
38#include <linux/wait.h>
39#include <linux/seq_file.h>
40#include <linux/interrupt.h>
41#include <linux/completion.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/prom.h>
46#include <asm/firmware.h>
47#include <asm/iseries/hv_types.h>
48#include <asm/iseries/hv_lp_event.h>
49#include <asm/iseries/hv_lp_config.h>
50#include <asm/iseries/mf.h>
51#include <asm/iseries/vio.h>
52
53/* Status of the path to each other partition in the system.
54 * This is overkill, since we will only ever establish connections
55 * to our hosting partition and the primary partition on the system.
56 * But this allows for other support in the future.
57 */
58static struct viopathStatus {
59 int isOpen; /* Did we open the path? */
60 int isActive; /* Do we have a mon msg outstanding */
61 int users[VIO_MAX_SUBTYPES];
62 HvLpInstanceId mSourceInst;
63 HvLpInstanceId mTargetInst;
64 int numberAllocated;
65} viopathStatus[HVMAXARCHITECTEDLPS];
66
67static DEFINE_SPINLOCK(statuslock);
68
69/*
70 * For each kind of event we allocate a buffer that is
71 * guaranteed not to cross a page boundary
72 */
73static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
74 __attribute__((__aligned__(4096)));
75static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
76static int event_buffer_initialised;
77
78static void handleMonitorEvent(struct HvLpEvent *event);
79
80/*
81 * We use this structure to handle asynchronous responses. The caller
82 * blocks on the semaphore and the handler posts the semaphore. However,
83 * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
84 */
85struct alloc_parms {
86 struct completion done;
87 int number;
88 atomic_t wait_atomic;
89 int used_wait_atomic;
90};
91
92/* Put a sequence number in each mon msg. The value is not
93 * important. Start at something other than 0 just for
94 * readability. wrapping this is ok.
95 */
96static u8 viomonseq = 22;
97
98/* Our hosting logical partition. We get this at startup
99 * time, and different modules access this variable directly.
100 */
101HvLpIndex viopath_hostLp = HvLpIndexInvalid;
102EXPORT_SYMBOL(viopath_hostLp);
103HvLpIndex viopath_ourLp = HvLpIndexInvalid;
104EXPORT_SYMBOL(viopath_ourLp);
105
106/* For each kind of incoming event we set a pointer to a
107 * routine to call.
108 */
109static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
110
111#define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
112#define VIOPATH_KERN_INFO KERN_INFO "viopath: "
113
114static int proc_viopath_show(struct seq_file *m, void *v)
115{
116 char *buf;
117 u16 vlanMap;
118 dma_addr_t handle;
119 HvLpEvent_Rc hvrc;
120 DECLARE_COMPLETION_ONSTACK(done);
121 struct device_node *node;
122 const char *sysid;
123
124 buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
125 if (!buf)
126 return 0;
127
128 handle = iseries_hv_map(buf, HW_PAGE_SIZE, DMA_FROM_DEVICE);
129
130 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
131 HvLpEvent_Type_VirtualIo,
132 viomajorsubtype_config | vioconfigget,
133 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
134 viopath_sourceinst(viopath_hostLp),
135 viopath_targetinst(viopath_hostLp),
136 (u64)(unsigned long)&done, VIOVERSION << 16,
137 ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
138
139 if (hvrc != HvLpEvent_Rc_Good)
140 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
141
142 wait_for_completion(&done);
143
144 vlanMap = HvLpConfig_getVirtualLanIndexMap();
145
146 buf[HW_PAGE_SIZE-1] = '\0';
147 seq_printf(m, "%s", buf);
148
149 iseries_hv_unmap(handle, HW_PAGE_SIZE, DMA_FROM_DEVICE);
150 kfree(buf);
151
152 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
153
154 node = of_find_node_by_path("/");
155 sysid = NULL;
156 if (node != NULL)
157 sysid = of_get_property(node, "system-id", NULL);
158
159 if (sysid == NULL)
160 seq_printf(m, "SRLNBR=<UNKNOWN>\n");
161 else
162 /* Skip "IBM," on front of serial number, see dt.c */
163 seq_printf(m, "SRLNBR=%s\n", sysid + 4);
164
165 of_node_put(node);
166
167 return 0;
168}
169
170static int proc_viopath_open(struct inode *inode, struct file *file)
171{
172 return single_open(file, proc_viopath_show, NULL);
173}
174
175static const struct file_operations proc_viopath_operations = {
176 .open = proc_viopath_open,
177 .read = seq_read,
178 .llseek = seq_lseek,
179 .release = single_release,
180};
181
182static int __init vio_proc_init(void)
183{
184 if (!firmware_has_feature(FW_FEATURE_ISERIES))
185 return 0;
186
187 proc_create("iSeries/config", 0, NULL, &proc_viopath_operations);
188 return 0;
189}
190__initcall(vio_proc_init);
191
192/* See if a given LP is active. Allow for invalid lps to be passed in
193 * and just return invalid
194 */
195int viopath_isactive(HvLpIndex lp)
196{
197 if (lp == HvLpIndexInvalid)
198 return 0;
199 if (lp < HVMAXARCHITECTEDLPS)
200 return viopathStatus[lp].isActive;
201 else
202 return 0;
203}
204EXPORT_SYMBOL(viopath_isactive);
205
206/*
207 * We cache the source and target instance ids for each
208 * partition.
209 */
210HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
211{
212 return viopathStatus[lp].mSourceInst;
213}
214EXPORT_SYMBOL(viopath_sourceinst);
215
216HvLpInstanceId viopath_targetinst(HvLpIndex lp)
217{
218 return viopathStatus[lp].mTargetInst;
219}
220EXPORT_SYMBOL(viopath_targetinst);
221
222/*
223 * Send a monitor message. This is a message with the acknowledge
224 * bit on that the other side will NOT explicitly acknowledge. When
225 * the other side goes down, the hypervisor will acknowledge any
226 * outstanding messages....so we will know when the other side dies.
227 */
228static void sendMonMsg(HvLpIndex remoteLp)
229{
230 HvLpEvent_Rc hvrc;
231
232 viopathStatus[remoteLp].mSourceInst =
233 HvCallEvent_getSourceLpInstanceId(remoteLp,
234 HvLpEvent_Type_VirtualIo);
235 viopathStatus[remoteLp].mTargetInst =
236 HvCallEvent_getTargetLpInstanceId(remoteLp,
237 HvLpEvent_Type_VirtualIo);
238
239 /*
240 * Deliberately ignore the return code here. if we call this
241 * more than once, we don't care.
242 */
243 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
244
245 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
246 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
247 HvLpEvent_AckType_DeferredAck,
248 viopathStatus[remoteLp].mSourceInst,
249 viopathStatus[remoteLp].mTargetInst,
250 viomonseq++, 0, 0, 0, 0, 0);
251
252 if (hvrc == HvLpEvent_Rc_Good)
253 viopathStatus[remoteLp].isActive = 1;
254 else {
255 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
256 remoteLp);
257 viopathStatus[remoteLp].isActive = 0;
258 }
259}
260
261static void handleMonitorEvent(struct HvLpEvent *event)
262{
263 HvLpIndex remoteLp;
264 int i;
265
266 /*
267 * This handler is _also_ called as part of the loop
268 * at the end of this routine, so it must be able to
269 * ignore NULL events...
270 */
271 if (!event)
272 return;
273
274 /*
275 * First see if this is just a normal monitor message from the
276 * other partition
277 */
278 if (hvlpevent_is_int(event)) {
279 remoteLp = event->xSourceLp;
280 if (!viopathStatus[remoteLp].isActive)
281 sendMonMsg(remoteLp);
282 return;
283 }
284
285 /*
286 * This path is for an acknowledgement; the other partition
287 * died
288 */
289 remoteLp = event->xTargetLp;
290 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
291 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
292 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
293 return;
294 }
295
296 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
297
298 viopathStatus[remoteLp].isActive = 0;
299
300 /*
301 * For each active handler, pass them a NULL
302 * message to indicate that the other partition
303 * died
304 */
305 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
306 if (vio_handler[i] != NULL)
307 (*vio_handler[i])(NULL);
308 }
309}
310
311int vio_setHandler(int subtype, vio_event_handler_t *beh)
312{
313 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
314 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
315 return -EINVAL;
316 if (vio_handler[subtype] != NULL)
317 return -EBUSY;
318 vio_handler[subtype] = beh;
319 return 0;
320}
321EXPORT_SYMBOL(vio_setHandler);
322
323int vio_clearHandler(int subtype)
324{
325 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
326 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
327 return -EINVAL;
328 if (vio_handler[subtype] == NULL)
329 return -EAGAIN;
330 vio_handler[subtype] = NULL;
331 return 0;
332}
333EXPORT_SYMBOL(vio_clearHandler);
334
335static void handleConfig(struct HvLpEvent *event)
336{
337 if (!event)
338 return;
339 if (hvlpevent_is_int(event)) {
340 printk(VIOPATH_KERN_WARN
341 "unexpected config request from partition %d",
342 event->xSourceLp);
343
344 if (hvlpevent_need_ack(event)) {
345 event->xRc = HvLpEvent_Rc_InvalidSubtype;
346 HvCallEvent_ackLpEvent(event);
347 }
348 return;
349 }
350
351 complete((struct completion *)event->xCorrelationToken);
352}
353
354/*
355 * Initialization of the hosting partition
356 */
357void vio_set_hostlp(void)
358{
359 /*
360 * If this has already been set then we DON'T want to either change
361 * it or re-register the proc file system
362 */
363 if (viopath_hostLp != HvLpIndexInvalid)
364 return;
365
366 /*
367 * Figure out our hosting partition. This isn't allowed to change
368 * while we're active
369 */
370 viopath_ourLp = HvLpConfig_getLpIndex();
371 viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
372
373 if (viopath_hostLp != HvLpIndexInvalid)
374 vio_setHandler(viomajorsubtype_config, handleConfig);
375}
376EXPORT_SYMBOL(vio_set_hostlp);
377
378static void vio_handleEvent(struct HvLpEvent *event)
379{
380 HvLpIndex remoteLp;
381 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
382 >> VIOMAJOR_SUBTYPE_SHIFT;
383
384 if (hvlpevent_is_int(event)) {
385 remoteLp = event->xSourceLp;
386 /*
387 * The isActive is checked because if the hosting partition
388 * went down and came back up it would not be active but it
389 * would have different source and target instances, in which
390 * case we'd want to reset them. This case really protects
391 * against an unauthorized active partition sending interrupts
392 * or acks to this linux partition.
393 */
394 if (viopathStatus[remoteLp].isActive
395 && (event->xSourceInstanceId !=
396 viopathStatus[remoteLp].mTargetInst)) {
397 printk(VIOPATH_KERN_WARN
398 "message from invalid partition. "
399 "int msg rcvd, source inst (%d) doesn't match (%d)\n",
400 viopathStatus[remoteLp].mTargetInst,
401 event->xSourceInstanceId);
402 return;
403 }
404
405 if (viopathStatus[remoteLp].isActive
406 && (event->xTargetInstanceId !=
407 viopathStatus[remoteLp].mSourceInst)) {
408 printk(VIOPATH_KERN_WARN
409 "message from invalid partition. "
410 "int msg rcvd, target inst (%d) doesn't match (%d)\n",
411 viopathStatus[remoteLp].mSourceInst,
412 event->xTargetInstanceId);
413 return;
414 }
415 } else {
416 remoteLp = event->xTargetLp;
417 if (event->xSourceInstanceId !=
418 viopathStatus[remoteLp].mSourceInst) {
419 printk(VIOPATH_KERN_WARN
420 "message from invalid partition. "
421 "ack msg rcvd, source inst (%d) doesn't match (%d)\n",
422 viopathStatus[remoteLp].mSourceInst,
423 event->xSourceInstanceId);
424 return;
425 }
426
427 if (event->xTargetInstanceId !=
428 viopathStatus[remoteLp].mTargetInst) {
429 printk(VIOPATH_KERN_WARN
430 "message from invalid partition. "
431 "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n",
432 viopathStatus[remoteLp].mTargetInst,
433 event->xTargetInstanceId);
434 return;
435 }
436 }
437
438 if (vio_handler[subtype] == NULL) {
439 printk(VIOPATH_KERN_WARN
440 "unexpected virtual io event subtype %d from partition %d\n",
441 event->xSubtype, remoteLp);
442 /* No handler. Ack if necessary */
443 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
444 event->xRc = HvLpEvent_Rc_InvalidSubtype;
445 HvCallEvent_ackLpEvent(event);
446 }
447 return;
448 }
449
450 /* This innocuous little line is where all the real work happens */
451 (*vio_handler[subtype])(event);
452}
453
454static void viopath_donealloc(void *parm, int number)
455{
456 struct alloc_parms *parmsp = parm;
457
458 parmsp->number = number;
459 if (parmsp->used_wait_atomic)
460 atomic_set(&parmsp->wait_atomic, 0);
461 else
462 complete(&parmsp->done);
463}
464
465static int allocateEvents(HvLpIndex remoteLp, int numEvents)
466{
467 struct alloc_parms parms;
468
469 if (system_state != SYSTEM_RUNNING) {
470 parms.used_wait_atomic = 1;
471 atomic_set(&parms.wait_atomic, 1);
472 } else {
473 parms.used_wait_atomic = 0;
474 init_completion(&parms.done);
475 }
476 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
477 numEvents, &viopath_donealloc, &parms);
478 if (system_state != SYSTEM_RUNNING) {
479 while (atomic_read(&parms.wait_atomic))
480 mb();
481 } else
482 wait_for_completion(&parms.done);
483 return parms.number;
484}
485
486int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
487{
488 int i;
489 unsigned long flags;
490 int tempNumAllocated;
491
492 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
493 return -EINVAL;
494
495 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
496 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
497 return -EINVAL;
498
499 spin_lock_irqsave(&statuslock, flags);
500
501 if (!event_buffer_initialised) {
502 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
503 atomic_set(&event_buffer_available[i], 1);
504 event_buffer_initialised = 1;
505 }
506
507 viopathStatus[remoteLp].users[subtype]++;
508
509 if (!viopathStatus[remoteLp].isOpen) {
510 viopathStatus[remoteLp].isOpen = 1;
511 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
512
513 /*
514 * Don't hold the spinlock during an operation that
515 * can sleep.
516 */
517 spin_unlock_irqrestore(&statuslock, flags);
518 tempNumAllocated = allocateEvents(remoteLp, 1);
519 spin_lock_irqsave(&statuslock, flags);
520
521 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
522
523 if (viopathStatus[remoteLp].numberAllocated == 0) {
524 HvCallEvent_closeLpEventPath(remoteLp,
525 HvLpEvent_Type_VirtualIo);
526
527 spin_unlock_irqrestore(&statuslock, flags);
528 return -ENOMEM;
529 }
530
531 viopathStatus[remoteLp].mSourceInst =
532 HvCallEvent_getSourceLpInstanceId(remoteLp,
533 HvLpEvent_Type_VirtualIo);
534 viopathStatus[remoteLp].mTargetInst =
535 HvCallEvent_getTargetLpInstanceId(remoteLp,
536 HvLpEvent_Type_VirtualIo);
537 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
538 &vio_handleEvent);
539 sendMonMsg(remoteLp);
540 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
541 "setting sinst %d, tinst %d\n",
542 remoteLp, viopathStatus[remoteLp].mSourceInst,
543 viopathStatus[remoteLp].mTargetInst);
544 }
545
546 spin_unlock_irqrestore(&statuslock, flags);
547 tempNumAllocated = allocateEvents(remoteLp, numReq);
548 spin_lock_irqsave(&statuslock, flags);
549 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
550 spin_unlock_irqrestore(&statuslock, flags);
551
552 return 0;
553}
554EXPORT_SYMBOL(viopath_open);
555
556int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
557{
558 unsigned long flags;
559 int i;
560 int numOpen;
561 struct alloc_parms parms;
562
563 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
564 return -EINVAL;
565
566 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
567 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
568 return -EINVAL;
569
570 spin_lock_irqsave(&statuslock, flags);
571 /*
572 * If the viopath_close somehow gets called before a
573 * viopath_open it could decrement to -1 which is a non
574 * recoverable state so we'll prevent this from
575 * happening.
576 */
577 if (viopathStatus[remoteLp].users[subtype] > 0)
578 viopathStatus[remoteLp].users[subtype]--;
579
580 spin_unlock_irqrestore(&statuslock, flags);
581
582 parms.used_wait_atomic = 0;
583 init_completion(&parms.done);
584 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
585 numReq, &viopath_donealloc, &parms);
586 wait_for_completion(&parms.done);
587
588 spin_lock_irqsave(&statuslock, flags);
589 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
590 numOpen += viopathStatus[remoteLp].users[i];
591
592 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
593 printk(VIOPATH_KERN_INFO "closing connection to partition %d\n",
594 remoteLp);
595
596 HvCallEvent_closeLpEventPath(remoteLp,
597 HvLpEvent_Type_VirtualIo);
598 viopathStatus[remoteLp].isOpen = 0;
599 viopathStatus[remoteLp].isActive = 0;
600
601 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
602 atomic_set(&event_buffer_available[i], 0);
603 event_buffer_initialised = 0;
604 }
605 spin_unlock_irqrestore(&statuslock, flags);
606 return 0;
607}
608EXPORT_SYMBOL(viopath_close);
609
610void *vio_get_event_buffer(int subtype)
611{
612 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
613 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
614 return NULL;
615
616 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
617 return &event_buffer[subtype * 256];
618 else
619 return NULL;
620}
621EXPORT_SYMBOL(vio_get_event_buffer);
622
623void vio_free_event_buffer(int subtype, void *buffer)
624{
625 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
626 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
627 printk(VIOPATH_KERN_WARN
628 "unexpected subtype %d freeing event buffer\n", subtype);
629 return;
630 }
631
632 if (atomic_read(&event_buffer_available[subtype]) != 0) {
633 printk(VIOPATH_KERN_WARN
634 "freeing unallocated event buffer, subtype %d\n",
635 subtype);
636 return;
637 }
638
639 if (buffer != &event_buffer[subtype * 256]) {
640 printk(VIOPATH_KERN_WARN
641 "freeing invalid event buffer, subtype %d\n", subtype);
642 }
643
644 atomic_set(&event_buffer_available[subtype], 1);
645}
646EXPORT_SYMBOL(vio_free_event_buffer);
647
648static const struct vio_error_entry vio_no_error =
649 { 0, 0, "Non-VIO Error" };
650static const struct vio_error_entry vio_unknown_error =
651 { 0, EIO, "Unknown Error" };
652
653static const struct vio_error_entry vio_default_errors[] = {
654 {0x0001, EIO, "No Connection"},
655 {0x0002, EIO, "No Receiver"},
656 {0x0003, EIO, "No Buffer Available"},
657 {0x0004, EBADRQC, "Invalid Message Type"},
658 {0x0000, 0, NULL},
659};
660
661const struct vio_error_entry *vio_lookup_rc(
662 const struct vio_error_entry *local_table, u16 rc)
663{
664 const struct vio_error_entry *cur;
665
666 if (!rc)
667 return &vio_no_error;
668 if (local_table)
669 for (cur = local_table; cur->rc; ++cur)
670 if (cur->rc == rc)
671 return cur;
672 for (cur = vio_default_errors; cur->rc; ++cur)
673 if (cur->rc == rc)
674 return cur;
675 return &vio_unknown_error;
676}
677EXPORT_SYMBOL(vio_lookup_rc);
diff --git a/arch/powerpc/platforms/iseries/vpd_areas.h b/arch/powerpc/platforms/iseries/vpd_areas.h
deleted file mode 100644
index feb001f3a5fe..000000000000
--- a/arch/powerpc/platforms/iseries/vpd_areas.h
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_VPD_AREAS_H
19#define _ISERIES_VPD_AREAS_H
20
21/*
22 * This file defines the address and length of all of the VPD area passed to
23 * the OS from PLIC (most of which start from the SP).
24 */
25
26#include <asm/types.h>
27
28/* VPD Entry index is carved in stone - cannot be changed (easily). */
29#define ItVpdCecVpd 0
30#define ItVpdDynamicSpace 1
31#define ItVpdExtVpd 2
32#define ItVpdExtVpdOnPanel 3
33#define ItVpdFirstPaca 4
34#define ItVpdIoVpd 5
35#define ItVpdIplParms 6
36#define ItVpdMsVpd 7
37#define ItVpdPanelVpd 8
38#define ItVpdLpNaca 9
39#define ItVpdBackplaneAndMaybeClockCardVpd 10
40#define ItVpdRecoveryLogBuffer 11
41#define ItVpdSpCommArea 12
42#define ItVpdSpLogBuffer 13
43#define ItVpdSpLogBufferSave 14
44#define ItVpdSpCardVpd 15
45#define ItVpdFirstProcVpd 16
46#define ItVpdApModelVpd 17
47#define ItVpdClockCardVpd 18
48#define ItVpdBusExtCardVpd 19
49#define ItVpdProcCapacityVpd 20
50#define ItVpdInteractiveCapacityVpd 21
51#define ItVpdFirstSlotLabel 22
52#define ItVpdFirstLpQueue 23
53#define ItVpdFirstL3CacheVpd 24
54#define ItVpdFirstProcFruVpd 25
55
56#define ItVpdMaxEntries 26
57
58#define ItDmaMaxEntries 10
59
60#define ItVpdAreasMaxSlotLabels 192
61
62
63struct ItVpdAreas {
64 u32 xSlicDesc; // Descriptor 000-003
65 u16 xSlicSize; // Size of this control block 004-005
66 u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface006-007
67 u16 xRsvd1:15; // Reserved bits ...
68 u16 xSlicVpdEntries; // Number of VPD entries 008-009
69 u16 xSlicDmaEntries; // Number of DMA entries 00A-00B
70 u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D
71 u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F
72 u16 xSlicDmaToksOffset; // Offset into this of array 010-011
73 u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013
74 u16 xSlicDmaLensOffset; // Offset into this of array 014-015
75 u16 xSlicVpdLensOffset; // Offset into this of array 016-017
76 u16 xSlicMaxSlotLabels; // Maximum number of slot labels018-019
77 u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B
78 u8 xRsvd2[4]; // Reserved 01C-01F
79 u64 xRsvd3[12]; // Reserved 020-07F
80 u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7
81 u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF
82 u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F
83 const void *xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
84};
85
86extern const struct ItVpdAreas itVpdAreas;
87
88#endif /* _ISERIES_VPD_AREAS_H */