aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/platforms/iseries
diff options
context:
space:
mode:
authorJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
committerJonathan Herman <hermanjl@cs.unc.edu>2013-01-22 10:38:37 -0500
commitfcc9d2e5a6c89d22b8b773a64fb4ad21ac318446 (patch)
treea57612d1888735a2ec7972891b68c1ac5ec8faea /arch/powerpc/platforms/iseries
parent8dea78da5cee153b8af9c07a2745f6c55057fe12 (diff)
Added missing tegra files.HEADmaster
Diffstat (limited to 'arch/powerpc/platforms/iseries')
-rw-r--r--arch/powerpc/platforms/iseries/Kconfig38
-rw-r--r--arch/powerpc/platforms/iseries/Makefile9
-rw-r--r--arch/powerpc/platforms/iseries/call_hpt.h102
-rw-r--r--arch/powerpc/platforms/iseries/call_pci.h309
-rw-r--r--arch/powerpc/platforms/iseries/call_sm.h37
-rw-r--r--arch/powerpc/platforms/iseries/dt.c643
-rw-r--r--arch/powerpc/platforms/iseries/exception.S311
-rw-r--r--arch/powerpc/platforms/iseries/exception.h58
-rw-r--r--arch/powerpc/platforms/iseries/htab.c257
-rw-r--r--arch/powerpc/platforms/iseries/hvcall.S94
-rw-r--r--arch/powerpc/platforms/iseries/hvlog.c35
-rw-r--r--arch/powerpc/platforms/iseries/hvlpconfig.c39
-rw-r--r--arch/powerpc/platforms/iseries/iommu.c260
-rw-r--r--arch/powerpc/platforms/iseries/ipl_parms.h68
-rw-r--r--arch/powerpc/platforms/iseries/irq.c400
-rw-r--r--arch/powerpc/platforms/iseries/irq.h13
-rw-r--r--arch/powerpc/platforms/iseries/it_exp_vpd_panel.h51
-rw-r--r--arch/powerpc/platforms/iseries/it_lp_naca.h80
-rw-r--r--arch/powerpc/platforms/iseries/ksyms.c21
-rw-r--r--arch/powerpc/platforms/iseries/lpardata.c319
-rw-r--r--arch/powerpc/platforms/iseries/lpevents.c341
-rw-r--r--arch/powerpc/platforms/iseries/main_store.h165
-rw-r--r--arch/powerpc/platforms/iseries/mf.c1274
-rw-r--r--arch/powerpc/platforms/iseries/misc.S26
-rw-r--r--arch/powerpc/platforms/iseries/naca.h24
-rw-r--r--arch/powerpc/platforms/iseries/pci.c920
-rw-r--r--arch/powerpc/platforms/iseries/pci.h58
-rw-r--r--arch/powerpc/platforms/iseries/proc.c120
-rw-r--r--arch/powerpc/platforms/iseries/processor_vpd.h85
-rw-r--r--arch/powerpc/platforms/iseries/release_data.h63
-rw-r--r--arch/powerpc/platforms/iseries/setup.c717
-rw-r--r--arch/powerpc/platforms/iseries/setup.h27
-rw-r--r--arch/powerpc/platforms/iseries/smp.c89
-rw-r--r--arch/powerpc/platforms/iseries/spcomm_area.h34
-rw-r--r--arch/powerpc/platforms/iseries/vio.c556
-rw-r--r--arch/powerpc/platforms/iseries/viopath.c677
-rw-r--r--arch/powerpc/platforms/iseries/vpd_areas.h88
37 files changed, 8408 insertions, 0 deletions
diff --git a/arch/powerpc/platforms/iseries/Kconfig b/arch/powerpc/platforms/iseries/Kconfig
new file mode 100644
index 00000000000..b57cda3a081
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Kconfig
@@ -0,0 +1,38 @@
1config PPC_ISERIES
2 bool "IBM Legacy iSeries"
3 depends on PPC64 && PPC_BOOK3S
4 select PPC_SMP_MUXED_IPI
5 select PPC_INDIRECT_PIO
6 select PPC_INDIRECT_MMIO
7 select PPC_PCI_CHOICE if EXPERT
8
9menu "iSeries device drivers"
10 depends on PPC_ISERIES
11
12config VIODASD
13 tristate "iSeries Virtual I/O disk support"
14 depends on BLOCK
15 select VIOPATH
16 help
17 If you are running on an iSeries system and you want to use
18 virtual disks created and managed by OS/400, say Y.
19
20config VIOCD
21 tristate "iSeries Virtual I/O CD support"
22 depends on BLOCK
23 select VIOPATH
24 help
25 If you are running Linux on an IBM iSeries system and you want to
26 read a CD drive owned by OS/400, say Y here.
27
28config VIOTAPE
29 tristate "iSeries Virtual Tape Support"
30 select VIOPATH
31 help
32 If you are running Linux on an iSeries system and you want Linux
33 to read and/or write a tape drive owned by OS/400, say Y here.
34
35endmenu
36
37config VIOPATH
38 bool
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile
new file mode 100644
index 00000000000..a7602b11ed9
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/Makefile
@@ -0,0 +1,9 @@
1ccflags-y := -mno-minimal-toc
2
3obj-y += exception.o
4obj-y += hvlog.o hvlpconfig.o lpardata.o setup.o dt.o mf.o lpevents.o \
5 hvcall.o proc.o htab.o iommu.o misc.o irq.o
6obj-$(CONFIG_PCI) += pci.o
7obj-$(CONFIG_SMP) += smp.o
8obj-$(CONFIG_VIOPATH) += viopath.o vio.o
9obj-$(CONFIG_MODULES) += ksyms.o
diff --git a/arch/powerpc/platforms/iseries/call_hpt.h b/arch/powerpc/platforms/iseries/call_hpt.h
new file mode 100644
index 00000000000..8d95fe4b554
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/call_hpt.h
@@ -0,0 +1,102 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_CALL_HPT_H
19#define _PLATFORMS_ISERIES_CALL_HPT_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28#include <asm/mmu.h>
29
30#define HvCallHptGetHptAddress HvCallHpt + 0
31#define HvCallHptGetHptPages HvCallHpt + 1
32#define HvCallHptSetPp HvCallHpt + 5
33#define HvCallHptSetSwBits HvCallHpt + 6
34#define HvCallHptUpdate HvCallHpt + 7
35#define HvCallHptInvalidateNoSyncICache HvCallHpt + 8
36#define HvCallHptGet HvCallHpt + 11
37#define HvCallHptFindNextValid HvCallHpt + 12
38#define HvCallHptFindValid HvCallHpt + 13
39#define HvCallHptAddValidate HvCallHpt + 16
40#define HvCallHptInvalidateSetSwBitsGet HvCallHpt + 18
41
42
43static inline u64 HvCallHpt_getHptAddress(void)
44{
45 return HvCall0(HvCallHptGetHptAddress);
46}
47
48static inline u64 HvCallHpt_getHptPages(void)
49{
50 return HvCall0(HvCallHptGetHptPages);
51}
52
53static inline void HvCallHpt_setPp(u32 hpteIndex, u8 value)
54{
55 HvCall2(HvCallHptSetPp, hpteIndex, value);
56}
57
58static inline void HvCallHpt_setSwBits(u32 hpteIndex, u8 bitson, u8 bitsoff)
59{
60 HvCall3(HvCallHptSetSwBits, hpteIndex, bitson, bitsoff);
61}
62
63static inline void HvCallHpt_invalidateNoSyncICache(u32 hpteIndex)
64{
65 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
66}
67
68static inline u64 HvCallHpt_invalidateSetSwBitsGet(u32 hpteIndex, u8 bitson,
69 u8 bitsoff)
70{
71 u64 compressedStatus;
72
73 compressedStatus = HvCall4(HvCallHptInvalidateSetSwBitsGet,
74 hpteIndex, bitson, bitsoff, 1);
75 HvCall1(HvCallHptInvalidateNoSyncICache, hpteIndex);
76 return compressedStatus;
77}
78
79static inline u64 HvCallHpt_findValid(struct hash_pte *hpte, u64 vpn)
80{
81 return HvCall3Ret16(HvCallHptFindValid, hpte, vpn, 0, 0);
82}
83
84static inline u64 HvCallHpt_findNextValid(struct hash_pte *hpte, u32 hpteIndex,
85 u8 bitson, u8 bitsoff)
86{
87 return HvCall3Ret16(HvCallHptFindNextValid, hpte, hpteIndex,
88 bitson, bitsoff);
89}
90
91static inline void HvCallHpt_get(struct hash_pte *hpte, u32 hpteIndex)
92{
93 HvCall2Ret16(HvCallHptGet, hpte, hpteIndex, 0);
94}
95
96static inline void HvCallHpt_addValidate(u32 hpteIndex, u32 hBit,
97 struct hash_pte *hpte)
98{
99 HvCall4(HvCallHptAddValidate, hpteIndex, hBit, hpte->v, hpte->r);
100}
101
102#endif /* _PLATFORMS_ISERIES_CALL_HPT_H */
diff --git a/arch/powerpc/platforms/iseries/call_pci.h b/arch/powerpc/platforms/iseries/call_pci.h
new file mode 100644
index 00000000000..dbdf69850ed
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/call_pci.h
@@ -0,0 +1,309 @@
1/*
2 * Provides the Hypervisor PCI calls for iSeries Linux Parition.
3 * Copyright (C) 2001 <Wayne G Holm> <IBM Corporation>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the:
17 * Free Software Foundation, Inc.,
18 * 59 Temple Place, Suite 330,
19 * Boston, MA 02111-1307 USA
20 *
21 * Change Activity:
22 * Created, Jan 9, 2001
23 */
24
25#ifndef _PLATFORMS_ISERIES_CALL_PCI_H
26#define _PLATFORMS_ISERIES_CALL_PCI_H
27
28#include <asm/iseries/hv_call_sc.h>
29#include <asm/iseries/hv_types.h>
30
31/*
32 * DSA == Direct Select Address
33 * this struct must be 64 bits in total
34 */
35struct HvCallPci_DsaAddr {
36 u16 busNumber; /* PHB index? */
37 u8 subBusNumber; /* PCI bus number? */
38 u8 deviceId; /* device and function? */
39 u8 barNumber;
40 u8 reserved[3];
41};
42
43union HvDsaMap {
44 u64 DsaAddr;
45 struct HvCallPci_DsaAddr Dsa;
46};
47
48struct HvCallPci_LoadReturn {
49 u64 rc;
50 u64 value;
51};
52
53enum HvCallPci_DeviceType {
54 HvCallPci_NodeDevice = 1,
55 HvCallPci_SpDevice = 2,
56 HvCallPci_IopDevice = 3,
57 HvCallPci_BridgeDevice = 4,
58 HvCallPci_MultiFunctionDevice = 5,
59 HvCallPci_IoaDevice = 6
60};
61
62
63struct HvCallPci_DeviceInfo {
64 u32 deviceType; /* See DeviceType enum for values */
65};
66
67struct HvCallPci_BusUnitInfo {
68 u32 sizeReturned; /* length of data returned */
69 u32 deviceType; /* see DeviceType enum for values */
70};
71
72struct HvCallPci_BridgeInfo {
73 struct HvCallPci_BusUnitInfo busUnitInfo; /* Generic bus unit info */
74 u8 subBusNumber; /* Bus number of secondary bus */
75 u8 maxAgents; /* Max idsels on secondary bus */
76 u8 maxSubBusNumber; /* Max Sub Bus */
77 u8 logicalSlotNumber; /* Logical Slot Number for IOA */
78};
79
80
81/*
82 * Maximum BusUnitInfo buffer size. Provided for clients so
83 * they can allocate a buffer big enough for any type of bus
84 * unit. Increase as needed.
85 */
86enum {HvCallPci_MaxBusUnitInfoSize = 128};
87
88struct HvCallPci_BarParms {
89 u64 vaddr;
90 u64 raddr;
91 u64 size;
92 u64 protectStart;
93 u64 protectEnd;
94 u64 relocationOffset;
95 u64 pciAddress;
96 u64 reserved[3];
97};
98
99enum HvCallPci_VpdType {
100 HvCallPci_BusVpd = 1,
101 HvCallPci_BusAdapterVpd = 2
102};
103
104#define HvCallPciConfigLoad8 HvCallPci + 0
105#define HvCallPciConfigLoad16 HvCallPci + 1
106#define HvCallPciConfigLoad32 HvCallPci + 2
107#define HvCallPciConfigStore8 HvCallPci + 3
108#define HvCallPciConfigStore16 HvCallPci + 4
109#define HvCallPciConfigStore32 HvCallPci + 5
110#define HvCallPciEoi HvCallPci + 16
111#define HvCallPciGetBarParms HvCallPci + 18
112#define HvCallPciMaskFisr HvCallPci + 20
113#define HvCallPciUnmaskFisr HvCallPci + 21
114#define HvCallPciSetSlotReset HvCallPci + 25
115#define HvCallPciGetDeviceInfo HvCallPci + 27
116#define HvCallPciGetCardVpd HvCallPci + 28
117#define HvCallPciBarLoad8 HvCallPci + 40
118#define HvCallPciBarLoad16 HvCallPci + 41
119#define HvCallPciBarLoad32 HvCallPci + 42
120#define HvCallPciBarLoad64 HvCallPci + 43
121#define HvCallPciBarStore8 HvCallPci + 44
122#define HvCallPciBarStore16 HvCallPci + 45
123#define HvCallPciBarStore32 HvCallPci + 46
124#define HvCallPciBarStore64 HvCallPci + 47
125#define HvCallPciMaskInterrupts HvCallPci + 48
126#define HvCallPciUnmaskInterrupts HvCallPci + 49
127#define HvCallPciGetBusUnitInfo HvCallPci + 50
128
129static inline u64 HvCallPci_configLoad16(u16 busNumber, u8 subBusNumber,
130 u8 deviceId, u32 offset, u16 *value)
131{
132 struct HvCallPci_DsaAddr dsa;
133 struct HvCallPci_LoadReturn retVal;
134
135 *((u64*)&dsa) = 0;
136
137 dsa.busNumber = busNumber;
138 dsa.subBusNumber = subBusNumber;
139 dsa.deviceId = deviceId;
140
141 HvCall3Ret16(HvCallPciConfigLoad16, &retVal, *(u64 *)&dsa, offset, 0);
142
143 *value = retVal.value;
144
145 return retVal.rc;
146}
147
148static inline u64 HvCallPci_configLoad32(u16 busNumber, u8 subBusNumber,
149 u8 deviceId, u32 offset, u32 *value)
150{
151 struct HvCallPci_DsaAddr dsa;
152 struct HvCallPci_LoadReturn retVal;
153
154 *((u64*)&dsa) = 0;
155
156 dsa.busNumber = busNumber;
157 dsa.subBusNumber = subBusNumber;
158 dsa.deviceId = deviceId;
159
160 HvCall3Ret16(HvCallPciConfigLoad32, &retVal, *(u64 *)&dsa, offset, 0);
161
162 *value = retVal.value;
163
164 return retVal.rc;
165}
166
167static inline u64 HvCallPci_configStore8(u16 busNumber, u8 subBusNumber,
168 u8 deviceId, u32 offset, u8 value)
169{
170 struct HvCallPci_DsaAddr dsa;
171
172 *((u64*)&dsa) = 0;
173
174 dsa.busNumber = busNumber;
175 dsa.subBusNumber = subBusNumber;
176 dsa.deviceId = deviceId;
177
178 return HvCall4(HvCallPciConfigStore8, *(u64 *)&dsa, offset, value, 0);
179}
180
181static inline u64 HvCallPci_eoi(u16 busNumberParm, u8 subBusParm,
182 u8 deviceIdParm)
183{
184 struct HvCallPci_DsaAddr dsa;
185 struct HvCallPci_LoadReturn retVal;
186
187 *((u64*)&dsa) = 0;
188
189 dsa.busNumber = busNumberParm;
190 dsa.subBusNumber = subBusParm;
191 dsa.deviceId = deviceIdParm;
192
193 HvCall1Ret16(HvCallPciEoi, &retVal, *(u64*)&dsa);
194
195 return retVal.rc;
196}
197
198static inline u64 HvCallPci_getBarParms(u16 busNumberParm, u8 subBusParm,
199 u8 deviceIdParm, u8 barNumberParm, u64 parms, u32 sizeofParms)
200{
201 struct HvCallPci_DsaAddr dsa;
202
203 *((u64*)&dsa) = 0;
204
205 dsa.busNumber = busNumberParm;
206 dsa.subBusNumber = subBusParm;
207 dsa.deviceId = deviceIdParm;
208 dsa.barNumber = barNumberParm;
209
210 return HvCall3(HvCallPciGetBarParms, *(u64*)&dsa, parms, sizeofParms);
211}
212
213static inline u64 HvCallPci_maskFisr(u16 busNumberParm, u8 subBusParm,
214 u8 deviceIdParm, u64 fisrMask)
215{
216 struct HvCallPci_DsaAddr dsa;
217
218 *((u64*)&dsa) = 0;
219
220 dsa.busNumber = busNumberParm;
221 dsa.subBusNumber = subBusParm;
222 dsa.deviceId = deviceIdParm;
223
224 return HvCall2(HvCallPciMaskFisr, *(u64*)&dsa, fisrMask);
225}
226
227static inline u64 HvCallPci_unmaskFisr(u16 busNumberParm, u8 subBusParm,
228 u8 deviceIdParm, u64 fisrMask)
229{
230 struct HvCallPci_DsaAddr dsa;
231
232 *((u64*)&dsa) = 0;
233
234 dsa.busNumber = busNumberParm;
235 dsa.subBusNumber = subBusParm;
236 dsa.deviceId = deviceIdParm;
237
238 return HvCall2(HvCallPciUnmaskFisr, *(u64*)&dsa, fisrMask);
239}
240
241static inline u64 HvCallPci_getDeviceInfo(u16 busNumberParm, u8 subBusParm,
242 u8 deviceNumberParm, u64 parms, u32 sizeofParms)
243{
244 struct HvCallPci_DsaAddr dsa;
245
246 *((u64*)&dsa) = 0;
247
248 dsa.busNumber = busNumberParm;
249 dsa.subBusNumber = subBusParm;
250 dsa.deviceId = deviceNumberParm << 4;
251
252 return HvCall3(HvCallPciGetDeviceInfo, *(u64*)&dsa, parms, sizeofParms);
253}
254
255static inline u64 HvCallPci_maskInterrupts(u16 busNumberParm, u8 subBusParm,
256 u8 deviceIdParm, u64 interruptMask)
257{
258 struct HvCallPci_DsaAddr dsa;
259
260 *((u64*)&dsa) = 0;
261
262 dsa.busNumber = busNumberParm;
263 dsa.subBusNumber = subBusParm;
264 dsa.deviceId = deviceIdParm;
265
266 return HvCall2(HvCallPciMaskInterrupts, *(u64*)&dsa, interruptMask);
267}
268
269static inline u64 HvCallPci_unmaskInterrupts(u16 busNumberParm, u8 subBusParm,
270 u8 deviceIdParm, u64 interruptMask)
271{
272 struct HvCallPci_DsaAddr dsa;
273
274 *((u64*)&dsa) = 0;
275
276 dsa.busNumber = busNumberParm;
277 dsa.subBusNumber = subBusParm;
278 dsa.deviceId = deviceIdParm;
279
280 return HvCall2(HvCallPciUnmaskInterrupts, *(u64*)&dsa, interruptMask);
281}
282
283static inline u64 HvCallPci_getBusUnitInfo(u16 busNumberParm, u8 subBusParm,
284 u8 deviceIdParm, u64 parms, u32 sizeofParms)
285{
286 struct HvCallPci_DsaAddr dsa;
287
288 *((u64*)&dsa) = 0;
289
290 dsa.busNumber = busNumberParm;
291 dsa.subBusNumber = subBusParm;
292 dsa.deviceId = deviceIdParm;
293
294 return HvCall3(HvCallPciGetBusUnitInfo, *(u64*)&dsa, parms,
295 sizeofParms);
296}
297
298static inline int HvCallPci_getBusVpd(u16 busNumParm, u64 destParm,
299 u16 sizeParm)
300{
301 u64 xRc = HvCall4(HvCallPciGetCardVpd, busNumParm, destParm,
302 sizeParm, HvCallPci_BusVpd);
303 if (xRc == -1)
304 return -1;
305 else
306 return xRc & 0xFFFF;
307}
308
309#endif /* _PLATFORMS_ISERIES_CALL_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/call_sm.h b/arch/powerpc/platforms/iseries/call_sm.h
new file mode 100644
index 00000000000..c7e251619f4
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/call_sm.h
@@ -0,0 +1,37 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_CALL_SM_H
19#define _ISERIES_CALL_SM_H
20
21/*
22 * This file contains the "hypervisor call" interface which is used to
23 * drive the hypervisor from the OS.
24 */
25
26#include <asm/iseries/hv_call_sc.h>
27#include <asm/iseries/hv_types.h>
28
29#define HvCallSmGet64BitsOfAccessMap HvCallSm + 11
30
31static inline u64 HvCallSm_get64BitsOfAccessMap(HvLpIndex lpIndex,
32 u64 indexIntoBitMap)
33{
34 return HvCall2(HvCallSmGet64BitsOfAccessMap, lpIndex, indexIntoBitMap);
35}
36
37#endif /* _ISERIES_CALL_SM_H */
diff --git a/arch/powerpc/platforms/iseries/dt.c b/arch/powerpc/platforms/iseries/dt.c
new file mode 100644
index 00000000000..f0491cc2890
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/dt.c
@@ -0,0 +1,643 @@
1/*
2 * Copyright (C) 2005-2006 Michael Ellerman, IBM Corporation
3 * Copyright (C) 2000-2004, IBM Corporation
4 *
5 * Description:
6 * This file contains all the routines to build a flattened device
7 * tree for a legacy iSeries machine.
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/types.h>
18#include <linux/init.h>
19#include <linux/pci.h>
20#include <linux/pci_regs.h>
21#include <linux/pci_ids.h>
22#include <linux/threads.h>
23#include <linux/bitops.h>
24#include <linux/string.h>
25#include <linux/kernel.h>
26#include <linux/if_ether.h> /* ETH_ALEN */
27
28#include <asm/machdep.h>
29#include <asm/prom.h>
30#include <asm/lppaca.h>
31#include <asm/cputable.h>
32#include <asm/abs_addr.h>
33#include <asm/system.h>
34#include <asm/iseries/hv_types.h>
35#include <asm/iseries/hv_lp_config.h>
36#include <asm/iseries/hv_call_xm.h>
37#include <asm/udbg.h>
38
39#include "processor_vpd.h"
40#include "call_hpt.h"
41#include "call_pci.h"
42#include "pci.h"
43#include "it_exp_vpd_panel.h"
44#include "naca.h"
45
46#ifdef DEBUG
47#define DBG(fmt...) udbg_printf(fmt)
48#else
49#define DBG(fmt...)
50#endif
51
52/*
53 * These are created by the linker script at the start and end
54 * of the section containing all the strings marked with the DS macro.
55 */
56extern char __dt_strings_start[];
57extern char __dt_strings_end[];
58
59#define DS(s) ({ \
60 static const char __s[] __attribute__((section(".dt_strings"))) = s; \
61 __s; \
62})
63
64struct iseries_flat_dt {
65 struct boot_param_header header;
66 u64 reserve_map[2];
67};
68
69static void * __initdata dt_data;
70
71/*
72 * Putting these strings here keeps them out of the .dt_strings section
73 * that we capture for the strings blob of the flattened device tree.
74 */
75static char __initdata device_type_cpu[] = "cpu";
76static char __initdata device_type_memory[] = "memory";
77static char __initdata device_type_serial[] = "serial";
78static char __initdata device_type_network[] = "network";
79static char __initdata device_type_pci[] = "pci";
80static char __initdata device_type_vdevice[] = "vdevice";
81static char __initdata device_type_vscsi[] = "vscsi";
82
83
84/* EBCDIC to ASCII conversion routines */
85
86static unsigned char __init e2a(unsigned char x)
87{
88 switch (x) {
89 case 0x81 ... 0x89:
90 return x - 0x81 + 'a';
91 case 0x91 ... 0x99:
92 return x - 0x91 + 'j';
93 case 0xA2 ... 0xA9:
94 return x - 0xA2 + 's';
95 case 0xC1 ... 0xC9:
96 return x - 0xC1 + 'A';
97 case 0xD1 ... 0xD9:
98 return x - 0xD1 + 'J';
99 case 0xE2 ... 0xE9:
100 return x - 0xE2 + 'S';
101 case 0xF0 ... 0xF9:
102 return x - 0xF0 + '0';
103 }
104 return ' ';
105}
106
107static unsigned char * __init strne2a(unsigned char *dest,
108 const unsigned char *src, size_t n)
109{
110 int i;
111
112 n = strnlen(src, n);
113
114 for (i = 0; i < n; i++)
115 dest[i] = e2a(src[i]);
116
117 return dest;
118}
119
120static struct iseries_flat_dt * __init dt_init(void)
121{
122 struct iseries_flat_dt *dt;
123 unsigned long str_len;
124
125 str_len = __dt_strings_end - __dt_strings_start;
126 dt = (struct iseries_flat_dt *)ALIGN(klimit, 8);
127 dt->header.off_mem_rsvmap =
128 offsetof(struct iseries_flat_dt, reserve_map);
129 dt->header.off_dt_strings = ALIGN(sizeof(*dt), 8);
130 dt->header.off_dt_struct = dt->header.off_dt_strings
131 + ALIGN(str_len, 8);
132 dt_data = (void *)((unsigned long)dt + dt->header.off_dt_struct);
133 dt->header.dt_strings_size = str_len;
134
135 /* There is no notion of hardware cpu id on iSeries */
136 dt->header.boot_cpuid_phys = smp_processor_id();
137
138 memcpy((char *)dt + dt->header.off_dt_strings, __dt_strings_start,
139 str_len);
140
141 dt->header.magic = OF_DT_HEADER;
142 dt->header.version = 0x10;
143 dt->header.last_comp_version = 0x10;
144
145 dt->reserve_map[0] = 0;
146 dt->reserve_map[1] = 0;
147
148 return dt;
149}
150
151static void __init dt_push_u32(struct iseries_flat_dt *dt, u32 value)
152{
153 *((u32 *)dt_data) = value;
154 dt_data += sizeof(u32);
155}
156
157#ifdef notyet
158static void __init dt_push_u64(struct iseries_flat_dt *dt, u64 value)
159{
160 *((u64 *)dt_data) = value;
161 dt_data += sizeof(u64);
162}
163#endif
164
165static void __init dt_push_bytes(struct iseries_flat_dt *dt, const char *data,
166 int len)
167{
168 memcpy(dt_data, data, len);
169 dt_data += ALIGN(len, 4);
170}
171
172static void __init dt_start_node(struct iseries_flat_dt *dt, const char *name)
173{
174 dt_push_u32(dt, OF_DT_BEGIN_NODE);
175 dt_push_bytes(dt, name, strlen(name) + 1);
176}
177
178#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
179
180static void __init __dt_prop(struct iseries_flat_dt *dt, const char *name,
181 const void *data, int len)
182{
183 unsigned long offset;
184
185 dt_push_u32(dt, OF_DT_PROP);
186
187 /* Length of the data */
188 dt_push_u32(dt, len);
189
190 offset = name - __dt_strings_start;
191
192 /* The offset of the properties name in the string blob. */
193 dt_push_u32(dt, (u32)offset);
194
195 /* The actual data. */
196 dt_push_bytes(dt, data, len);
197}
198#define dt_prop(dt, name, data, len) __dt_prop((dt), DS(name), (data), (len))
199
200#define dt_prop_str(dt, name, data) \
201 dt_prop((dt), name, (data), strlen((data)) + 1); /* + 1 for NULL */
202
203static void __init __dt_prop_u32(struct iseries_flat_dt *dt, const char *name,
204 u32 data)
205{
206 __dt_prop(dt, name, &data, sizeof(u32));
207}
208#define dt_prop_u32(dt, name, data) __dt_prop_u32((dt), DS(name), (data))
209
210static void __init __maybe_unused __dt_prop_u64(struct iseries_flat_dt *dt,
211 const char *name, u64 data)
212{
213 __dt_prop(dt, name, &data, sizeof(u64));
214}
215#define dt_prop_u64(dt, name, data) __dt_prop_u64((dt), DS(name), (data))
216
217#define dt_prop_u64_list(dt, name, data, n) \
218 dt_prop((dt), name, (data), sizeof(u64) * (n))
219
220#define dt_prop_u32_list(dt, name, data, n) \
221 dt_prop((dt), name, (data), sizeof(u32) * (n))
222
223#define dt_prop_empty(dt, name) dt_prop((dt), name, NULL, 0)
224
225static void __init dt_cpus(struct iseries_flat_dt *dt)
226{
227 unsigned char buf[32];
228 unsigned char *p;
229 unsigned int i, index;
230 struct IoHriProcessorVpd *d;
231 u32 pft_size[2];
232
233 /* yuck */
234 snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
235 p = strchr(buf, ' ');
236 if (!p) p = buf + strlen(buf);
237
238 dt_start_node(dt, "cpus");
239 dt_prop_u32(dt, "#address-cells", 1);
240 dt_prop_u32(dt, "#size-cells", 0);
241
242 pft_size[0] = 0; /* NUMA CEC cookie, 0 for non NUMA */
243 pft_size[1] = __ilog2(HvCallHpt_getHptPages() * HW_PAGE_SIZE);
244
245 for (i = 0; i < NR_LPPACAS; i++) {
246 if (lppaca[i].dyn_proc_status >= 2)
247 continue;
248
249 snprintf(p, 32 - (p - buf), "@%d", i);
250 dt_start_node(dt, buf);
251
252 dt_prop_str(dt, "device_type", device_type_cpu);
253
254 index = lppaca[i].dyn_hv_phys_proc_index;
255 d = &xIoHriProcessorVpd[index];
256
257 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
258 dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
259
260 dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
261 dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
262
263 /* magic conversions to Hz copied from old code */
264 dt_prop_u32(dt, "clock-frequency",
265 ((1UL << 34) * 1000000) / d->xProcFreq);
266 dt_prop_u32(dt, "timebase-frequency",
267 ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
268
269 dt_prop_u32(dt, "reg", i);
270
271 dt_prop_u32_list(dt, "ibm,pft-size", pft_size, 2);
272
273 dt_end_node(dt);
274 }
275
276 dt_end_node(dt);
277}
278
279static void __init dt_model(struct iseries_flat_dt *dt)
280{
281 char buf[16] = "IBM,";
282
283 /* N.B. lparcfg.c knows about the "IBM," prefixes ... */
284 /* "IBM," + mfgId[2:3] + systemSerial[1:5] */
285 strne2a(buf + 4, xItExtVpdPanel.mfgID + 2, 2);
286 strne2a(buf + 6, xItExtVpdPanel.systemSerial + 1, 5);
287 buf[11] = '\0';
288 dt_prop_str(dt, "system-id", buf);
289
290 /* "IBM," + machineType[0:4] */
291 strne2a(buf + 4, xItExtVpdPanel.machineType, 4);
292 buf[8] = '\0';
293 dt_prop_str(dt, "model", buf);
294
295 dt_prop_str(dt, "compatible", "IBM,iSeries");
296 dt_prop_u32(dt, "ibm,partition-no", HvLpConfig_getLpIndex());
297}
298
299static void __init dt_initrd(struct iseries_flat_dt *dt)
300{
301#ifdef CONFIG_BLK_DEV_INITRD
302 if (naca.xRamDisk) {
303 dt_prop_u64(dt, "linux,initrd-start", (u64)naca.xRamDisk);
304 dt_prop_u64(dt, "linux,initrd-end",
305 (u64)naca.xRamDisk + naca.xRamDiskSize * HW_PAGE_SIZE);
306 }
307#endif
308}
309
310static void __init dt_do_vdevice(struct iseries_flat_dt *dt,
311 const char *name, u32 reg, int unit,
312 const char *type, const char *compat, int end)
313{
314 char buf[32];
315
316 snprintf(buf, 32, "%s@%08x", name, reg + ((unit >= 0) ? unit : 0));
317 dt_start_node(dt, buf);
318 dt_prop_str(dt, "device_type", type);
319 if (compat)
320 dt_prop_str(dt, "compatible", compat);
321 dt_prop_u32(dt, "reg", reg + ((unit >= 0) ? unit : 0));
322 if (unit >= 0)
323 dt_prop_u32(dt, "linux,unit_address", unit);
324 if (end)
325 dt_end_node(dt);
326}
327
328static void __init dt_vdevices(struct iseries_flat_dt *dt)
329{
330 u32 reg = 0;
331 HvLpIndexMap vlan_map;
332 int i;
333
334 dt_start_node(dt, "vdevice");
335 dt_prop_str(dt, "device_type", device_type_vdevice);
336 dt_prop_str(dt, "compatible", "IBM,iSeries-vdevice");
337 dt_prop_u32(dt, "#address-cells", 1);
338 dt_prop_u32(dt, "#size-cells", 0);
339
340 dt_do_vdevice(dt, "vty", reg, -1, device_type_serial,
341 "IBM,iSeries-vty", 1);
342 reg++;
343
344 dt_do_vdevice(dt, "v-scsi", reg, -1, device_type_vscsi,
345 "IBM,v-scsi", 1);
346 reg++;
347
348 vlan_map = HvLpConfig_getVirtualLanIndexMap();
349 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
350 unsigned char mac_addr[ETH_ALEN];
351
352 if ((vlan_map & (0x8000 >> i)) == 0)
353 continue;
354 dt_do_vdevice(dt, "l-lan", reg, i, device_type_network,
355 "IBM,iSeries-l-lan", 0);
356 mac_addr[0] = 0x02;
357 mac_addr[1] = 0x01;
358 mac_addr[2] = 0xff;
359 mac_addr[3] = i;
360 mac_addr[4] = 0xff;
361 mac_addr[5] = HvLpConfig_getLpIndex_outline();
362 dt_prop(dt, "local-mac-address", (char *)mac_addr, ETH_ALEN);
363 dt_prop(dt, "mac-address", (char *)mac_addr, ETH_ALEN);
364 dt_prop_u32(dt, "max-frame-size", 9000);
365 dt_prop_u32(dt, "address-bits", 48);
366
367 dt_end_node(dt);
368 }
369
370 dt_end_node(dt);
371}
372
373struct pci_class_name {
374 u16 code;
375 const char *name;
376 const char *type;
377};
378
379static struct pci_class_name __initdata pci_class_name[] = {
380 { PCI_CLASS_NETWORK_ETHERNET, "ethernet", device_type_network },
381};
382
383static struct pci_class_name * __init dt_find_pci_class_name(u16 class_code)
384{
385 struct pci_class_name *cp;
386
387 for (cp = pci_class_name;
388 cp < &pci_class_name[ARRAY_SIZE(pci_class_name)]; cp++)
389 if (cp->code == class_code)
390 return cp;
391 return NULL;
392}
393
394/*
395 * This assumes that the node slot is always on the primary bus!
396 */
397static void __init scan_bridge_slot(struct iseries_flat_dt *dt,
398 HvBusNumber bus, struct HvCallPci_BridgeInfo *bridge_info)
399{
400 HvSubBusNumber sub_bus = bridge_info->subBusNumber;
401 u16 vendor_id;
402 u16 device_id;
403 u32 class_id;
404 int err;
405 char buf[32];
406 u32 reg[5];
407 int id_sel = ISERIES_GET_DEVICE_FROM_SUBBUS(sub_bus);
408 int function = ISERIES_GET_FUNCTION_FROM_SUBBUS(sub_bus);
409 HvAgentId eads_id_sel = ISERIES_PCI_AGENTID(id_sel, function);
410 u8 devfn;
411 struct pci_class_name *cp;
412
413 /*
414 * Connect all functions of any device found.
415 */
416 for (id_sel = 1; id_sel <= bridge_info->maxAgents; id_sel++) {
417 for (function = 0; function < 8; function++) {
418 HvAgentId agent_id = ISERIES_PCI_AGENTID(id_sel,
419 function);
420 err = HvCallXm_connectBusUnit(bus, sub_bus,
421 agent_id, 0);
422 if (err) {
423 if (err != 0x302)
424 DBG("connectBusUnit(%x, %x, %x) %x\n",
425 bus, sub_bus, agent_id, err);
426 continue;
427 }
428
429 err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
430 PCI_VENDOR_ID, &vendor_id);
431 if (err) {
432 DBG("ReadVendor(%x, %x, %x) %x\n",
433 bus, sub_bus, agent_id, err);
434 continue;
435 }
436 err = HvCallPci_configLoad16(bus, sub_bus, agent_id,
437 PCI_DEVICE_ID, &device_id);
438 if (err) {
439 DBG("ReadDevice(%x, %x, %x) %x\n",
440 bus, sub_bus, agent_id, err);
441 continue;
442 }
443 err = HvCallPci_configLoad32(bus, sub_bus, agent_id,
444 PCI_CLASS_REVISION , &class_id);
445 if (err) {
446 DBG("ReadClass(%x, %x, %x) %x\n",
447 bus, sub_bus, agent_id, err);
448 continue;
449 }
450
451 devfn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(eads_id_sel),
452 function);
453 cp = dt_find_pci_class_name(class_id >> 16);
454 if (cp && cp->name)
455 strncpy(buf, cp->name, sizeof(buf) - 1);
456 else
457 snprintf(buf, sizeof(buf), "pci%x,%x",
458 vendor_id, device_id);
459 buf[sizeof(buf) - 1] = '\0';
460 snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf),
461 "@%x", PCI_SLOT(devfn));
462 buf[sizeof(buf) - 1] = '\0';
463 if (function != 0)
464 snprintf(buf + strlen(buf),
465 sizeof(buf) - strlen(buf),
466 ",%x", function);
467 dt_start_node(dt, buf);
468 reg[0] = (bus << 16) | (devfn << 8);
469 reg[1] = 0;
470 reg[2] = 0;
471 reg[3] = 0;
472 reg[4] = 0;
473 dt_prop_u32_list(dt, "reg", reg, 5);
474 if (cp && (cp->type || cp->name))
475 dt_prop_str(dt, "device_type",
476 cp->type ? cp->type : cp->name);
477 dt_prop_u32(dt, "vendor-id", vendor_id);
478 dt_prop_u32(dt, "device-id", device_id);
479 dt_prop_u32(dt, "class-code", class_id >> 8);
480 dt_prop_u32(dt, "revision-id", class_id & 0xff);
481 dt_prop_u32(dt, "linux,subbus", sub_bus);
482 dt_prop_u32(dt, "linux,agent-id", agent_id);
483 dt_prop_u32(dt, "linux,logical-slot-number",
484 bridge_info->logicalSlotNumber);
485 dt_end_node(dt);
486
487 }
488 }
489}
490
491static void __init scan_bridge(struct iseries_flat_dt *dt, HvBusNumber bus,
492 HvSubBusNumber sub_bus, int id_sel)
493{
494 struct HvCallPci_BridgeInfo bridge_info;
495 HvAgentId agent_id;
496 int function;
497 int ret;
498
499 /* Note: hvSubBus and irq is always be 0 at this level! */
500 for (function = 0; function < 8; ++function) {
501 agent_id = ISERIES_PCI_AGENTID(id_sel, function);
502 ret = HvCallXm_connectBusUnit(bus, sub_bus, agent_id, 0);
503 if (ret != 0) {
504 if (ret != 0xb)
505 DBG("connectBusUnit(%x, %x, %x) %x\n",
506 bus, sub_bus, agent_id, ret);
507 continue;
508 }
509 DBG("found device at bus %d idsel %d func %d (AgentId %x)\n",
510 bus, id_sel, function, agent_id);
511 ret = HvCallPci_getBusUnitInfo(bus, sub_bus, agent_id,
512 iseries_hv_addr(&bridge_info),
513 sizeof(struct HvCallPci_BridgeInfo));
514 if (ret != 0)
515 continue;
516 DBG("bridge info: type %x subbus %x "
517 "maxAgents %x maxsubbus %x logslot %x\n",
518 bridge_info.busUnitInfo.deviceType,
519 bridge_info.subBusNumber,
520 bridge_info.maxAgents,
521 bridge_info.maxSubBusNumber,
522 bridge_info.logicalSlotNumber);
523 if (bridge_info.busUnitInfo.deviceType ==
524 HvCallPci_BridgeDevice)
525 scan_bridge_slot(dt, bus, &bridge_info);
526 else
527 DBG("PCI: Invalid Bridge Configuration(0x%02X)",
528 bridge_info.busUnitInfo.deviceType);
529 }
530}
531
532static void __init scan_phb(struct iseries_flat_dt *dt, HvBusNumber bus)
533{
534 struct HvCallPci_DeviceInfo dev_info;
535 const HvSubBusNumber sub_bus = 0; /* EADs is always 0. */
536 int err;
537 int id_sel;
538 const int max_agents = 8;
539
540 /*
541 * Probe for EADs Bridges
542 */
543 for (id_sel = 1; id_sel < max_agents; ++id_sel) {
544 err = HvCallPci_getDeviceInfo(bus, sub_bus, id_sel,
545 iseries_hv_addr(&dev_info),
546 sizeof(struct HvCallPci_DeviceInfo));
547 if (err) {
548 if (err != 0x302)
549 DBG("getDeviceInfo(%x, %x, %x) %x\n",
550 bus, sub_bus, id_sel, err);
551 continue;
552 }
553 if (dev_info.deviceType != HvCallPci_NodeDevice) {
554 DBG("PCI: Invalid System Configuration"
555 "(0x%02X) for bus 0x%02x id 0x%02x.\n",
556 dev_info.deviceType, bus, id_sel);
557 continue;
558 }
559 scan_bridge(dt, bus, sub_bus, id_sel);
560 }
561}
562
563static void __init dt_pci_devices(struct iseries_flat_dt *dt)
564{
565 HvBusNumber bus;
566 char buf[32];
567 u32 buses[2];
568 int phb_num = 0;
569
570 /* Check all possible buses. */
571 for (bus = 0; bus < 256; bus++) {
572 int err = HvCallXm_testBus(bus);
573
574 if (err) {
575 /*
576 * Check for Unexpected Return code, a clue that
577 * something has gone wrong.
578 */
579 if (err != 0x0301)
580 DBG("Unexpected Return on Probe(0x%02X) "
581 "0x%04X\n", bus, err);
582 continue;
583 }
584 DBG("bus %d appears to exist\n", bus);
585 snprintf(buf, 32, "pci@%d", phb_num);
586 dt_start_node(dt, buf);
587 dt_prop_str(dt, "device_type", device_type_pci);
588 dt_prop_str(dt, "compatible", "IBM,iSeries-Logical-PHB");
589 dt_prop_u32(dt, "#address-cells", 3);
590 dt_prop_u32(dt, "#size-cells", 2);
591 buses[0] = buses[1] = bus;
592 dt_prop_u32_list(dt, "bus-range", buses, 2);
593 scan_phb(dt, bus);
594 dt_end_node(dt);
595 phb_num++;
596 }
597}
598
599static void dt_finish(struct iseries_flat_dt *dt)
600{
601 dt_push_u32(dt, OF_DT_END);
602 dt->header.totalsize = (unsigned long)dt_data - (unsigned long)dt;
603 klimit = ALIGN((unsigned long)dt_data, 8);
604}
605
606void * __init build_flat_dt(unsigned long phys_mem_size)
607{
608 struct iseries_flat_dt *iseries_dt;
609 u64 tmp[2];
610
611 iseries_dt = dt_init();
612
613 dt_start_node(iseries_dt, "");
614
615 dt_prop_u32(iseries_dt, "#address-cells", 2);
616 dt_prop_u32(iseries_dt, "#size-cells", 2);
617 dt_model(iseries_dt);
618
619 /* /memory */
620 dt_start_node(iseries_dt, "memory@0");
621 dt_prop_str(iseries_dt, "device_type", device_type_memory);
622 tmp[0] = 0;
623 tmp[1] = phys_mem_size;
624 dt_prop_u64_list(iseries_dt, "reg", tmp, 2);
625 dt_end_node(iseries_dt);
626
627 /* /chosen */
628 dt_start_node(iseries_dt, "chosen");
629 dt_prop_str(iseries_dt, "bootargs", cmd_line);
630 dt_initrd(iseries_dt);
631 dt_end_node(iseries_dt);
632
633 dt_cpus(iseries_dt);
634
635 dt_vdevices(iseries_dt);
636 dt_pci_devices(iseries_dt);
637
638 dt_end_node(iseries_dt);
639
640 dt_finish(iseries_dt);
641
642 return iseries_dt;
643}
diff --git a/arch/powerpc/platforms/iseries/exception.S b/arch/powerpc/platforms/iseries/exception.S
new file mode 100644
index 00000000000..f519ee17ff7
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/exception.S
@@ -0,0 +1,311 @@
1/*
2 * Low level routines for legacy iSeries support.
3 *
4 * Extracted from head_64.S
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 *
16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
18 *
19 * This file contains the low-level support and setup for the
20 * PowerPC-64 platform, including trap and interrupt dispatch.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27
28#include <asm/reg.h>
29#include <asm/ppc_asm.h>
30#include <asm/asm-offsets.h>
31#include <asm/thread_info.h>
32#include <asm/ptrace.h>
33#include <asm/cputable.h>
34#include <asm/mmu.h>
35
36#include "exception.h"
37
38 .text
39
40 .globl system_reset_iSeries
41system_reset_iSeries:
42 bl .relative_toc
43 mfspr r13,SPRN_SPRG3 /* Get alpaca address */
44 LOAD_REG_ADDR(r23, alpaca)
45 li r0,ALPACA_SIZE
46 sub r23,r13,r23
47 divdu r24,r23,r0 /* r24 has cpu number */
48 cmpwi 0,r24,0 /* Are we processor 0? */
49 bne 1f
50 LOAD_REG_ADDR(r13, boot_paca)
51 mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
52 mfmsr r23
53 ori r23,r23,MSR_RI
54 mtmsrd r23 /* RI on */
55 b .__start_initialization_iSeries /* Start up the first processor */
561: mfspr r4,SPRN_CTRLF
57 li r5,CTRL_RUNLATCH /* Turn off the run light */
58 andc r4,r4,r5
59 mtspr SPRN_CTRLT,r4
60
61/* Spin on __secondary_hold_spinloop until it is updated by the boot cpu. */
62/* In the UP case we'll yield() later, and we will not access the paca anyway */
63#ifdef CONFIG_SMP
64iSeries_secondary_wait_paca:
65 HMT_LOW
66 LOAD_REG_ADDR(r23, __secondary_hold_spinloop)
67 ld r23,0(r23)
68
69 cmpdi 0,r23,0
70 bne 2f /* go on when the master is ready */
71
72 /* Keep poking the Hypervisor until we're released */
73 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
74 lis r3,0x8002
75 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
76 li r0,-1 /* r0=-1 indicates a Hypervisor call */
77 sc /* Invoke the hypervisor via a system call */
78 b iSeries_secondary_wait_paca
79
802:
81 HMT_MEDIUM
82 sync
83
84 LOAD_REG_ADDR(r3, nr_cpu_ids) /* get number of pacas allocated */
85 lwz r3,0(r3) /* nr_cpus= or NR_CPUS can limit */
86 cmpld 0,r24,r3 /* is our cpu number allocated? */
87 bge iSeries_secondary_yield /* no, yield forever */
88
89 /* Load our paca now that it's been allocated */
90 LOAD_REG_ADDR(r13, paca)
91 ld r13,0(r13)
92 mulli r0,r24,PACA_SIZE
93 add r13,r13,r0
94 mtspr SPRN_SPRG_PACA,r13 /* Save it away for the future */
95 mfmsr r23
96 ori r23,r23,MSR_RI
97 mtmsrd r23 /* RI on */
98
99iSeries_secondary_smp_loop:
100 lbz r23,PACAPROCSTART(r13) /* Test if this processor
101 * should start */
102 cmpwi 0,r23,0
103 bne 3f /* go on when we are told */
104
105 HMT_LOW
106 /* Let the Hypervisor know we are alive */
107 /* 8002 is a call to HvCallCfg::getLps, a harmless Hypervisor function */
108 lis r3,0x8002
109 rldicr r3,r3,32,15 /* r0 = (r3 << 32) & 0xffff000000000000 */
110 li r0,-1 /* r0=-1 indicates a Hypervisor call */
111 sc /* Invoke the hypervisor via a system call */
112 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
113 b iSeries_secondary_smp_loop /* wait for signal to start */
114
1153:
116 HMT_MEDIUM
117 sync
118 LOAD_REG_ADDR(r3,current_set)
119 sldi r28,r24,3 /* get current_set[cpu#] */
120 ldx r3,r3,r28
121 addi r1,r3,THREAD_SIZE
122 subi r1,r1,STACK_FRAME_OVERHEAD
123
124 b __secondary_start /* Loop until told to go */
125#endif /* CONFIG_SMP */
126
127iSeries_secondary_yield:
128 /* Yield the processor. This is required for non-SMP kernels
129 which are running on multi-threaded machines. */
130 HMT_LOW
131 lis r3,0x8000
132 rldicr r3,r3,32,15 /* r3 = (r3 << 32) & 0xffff000000000000 */
133 addi r3,r3,18 /* r3 = 0x8000000000000012 which is "yield" */
134 li r4,0 /* "yield timed" */
135 li r5,-1 /* "yield forever" */
136 li r0,-1 /* r0=-1 indicates a Hypervisor call */
137 sc /* Invoke the hypervisor via a system call */
138 mfspr r13,SPRN_SPRG_PACA /* Put r13 back ???? */
139 b iSeries_secondary_yield /* If SMP not configured, secondaries
140 * loop forever */
141
142/*** ISeries-LPAR interrupt handlers ***/
143
144 STD_EXCEPTION_ISERIES(machine_check, PACA_EXMC)
145
146 .globl data_access_iSeries
147data_access_iSeries:
148 mtspr SPRN_SPRG_SCRATCH0,r13
149BEGIN_FTR_SECTION
150 mfspr r13,SPRN_SPRG_PACA
151 std r9,PACA_EXSLB+EX_R9(r13)
152 std r10,PACA_EXSLB+EX_R10(r13)
153 mfspr r10,SPRN_DAR
154 mfspr r9,SPRN_DSISR
155 srdi r10,r10,60
156 rlwimi r10,r9,16,0x20
157 mfcr r9
158 cmpwi r10,0x2c
159 beq .do_stab_bolted_iSeries
160 ld r10,PACA_EXSLB+EX_R10(r13)
161 std r11,PACA_EXGEN+EX_R11(r13)
162 ld r11,PACA_EXSLB+EX_R9(r13)
163 std r12,PACA_EXGEN+EX_R12(r13)
164 mfspr r12,SPRN_SPRG_SCRATCH0
165 std r10,PACA_EXGEN+EX_R10(r13)
166 std r11,PACA_EXGEN+EX_R9(r13)
167 std r12,PACA_EXGEN+EX_R13(r13)
168 EXCEPTION_PROLOG_ISERIES_1
169FTR_SECTION_ELSE
170 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0)
171 EXCEPTION_PROLOG_ISERIES_1
172ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_SLB)
173 b data_access_common
174
175.do_stab_bolted_iSeries:
176 std r11,PACA_EXSLB+EX_R11(r13)
177 std r12,PACA_EXSLB+EX_R12(r13)
178 mfspr r10,SPRN_SPRG_SCRATCH0
179 std r10,PACA_EXSLB+EX_R13(r13)
180 EXCEPTION_PROLOG_ISERIES_1
181 b .do_stab_bolted
182
183 .globl data_access_slb_iSeries
184data_access_slb_iSeries:
185 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
186 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
187 std r3,PACA_EXSLB+EX_R3(r13)
188 mfspr r3,SPRN_DAR
189 std r9,PACA_EXSLB+EX_R9(r13)
190 mfcr r9
191#ifdef __DISABLED__
192 cmpdi r3,0
193 bge slb_miss_user_iseries
194#endif
195 std r10,PACA_EXSLB+EX_R10(r13)
196 std r11,PACA_EXSLB+EX_R11(r13)
197 std r12,PACA_EXSLB+EX_R12(r13)
198 mfspr r10,SPRN_SPRG_SCRATCH0
199 std r10,PACA_EXSLB+EX_R13(r13)
200 ld r12,PACALPPACAPTR(r13)
201 ld r12,LPPACASRR1(r12)
202 b .slb_miss_realmode
203
204 STD_EXCEPTION_ISERIES(instruction_access, PACA_EXGEN)
205
206 .globl instruction_access_slb_iSeries
207instruction_access_slb_iSeries:
208 mtspr SPRN_SPRG_SCRATCH0,r13 /* save r13 */
209 mfspr r13,SPRN_SPRG_PACA /* get paca address into r13 */
210 std r3,PACA_EXSLB+EX_R3(r13)
211 ld r3,PACALPPACAPTR(r13)
212 ld r3,LPPACASRR0(r3) /* get SRR0 value */
213 std r9,PACA_EXSLB+EX_R9(r13)
214 mfcr r9
215#ifdef __DISABLED__
216 cmpdi r3,0
217 bge slb_miss_user_iseries
218#endif
219 std r10,PACA_EXSLB+EX_R10(r13)
220 std r11,PACA_EXSLB+EX_R11(r13)
221 std r12,PACA_EXSLB+EX_R12(r13)
222 mfspr r10,SPRN_SPRG_SCRATCH0
223 std r10,PACA_EXSLB+EX_R13(r13)
224 ld r12,PACALPPACAPTR(r13)
225 ld r12,LPPACASRR1(r12)
226 b .slb_miss_realmode
227
228#ifdef __DISABLED__
229slb_miss_user_iseries:
230 std r10,PACA_EXGEN+EX_R10(r13)
231 std r11,PACA_EXGEN+EX_R11(r13)
232 std r12,PACA_EXGEN+EX_R12(r13)
233 mfspr r10,SPRG_SCRATCH0
234 ld r11,PACA_EXSLB+EX_R9(r13)
235 ld r12,PACA_EXSLB+EX_R3(r13)
236 std r10,PACA_EXGEN+EX_R13(r13)
237 std r11,PACA_EXGEN+EX_R9(r13)
238 std r12,PACA_EXGEN+EX_R3(r13)
239 EXCEPTION_PROLOG_ISERIES_1
240 b slb_miss_user_common
241#endif
242
243 MASKABLE_EXCEPTION_ISERIES(hardware_interrupt)
244 STD_EXCEPTION_ISERIES(alignment, PACA_EXGEN)
245 STD_EXCEPTION_ISERIES(program_check, PACA_EXGEN)
246 STD_EXCEPTION_ISERIES(fp_unavailable, PACA_EXGEN)
247 MASKABLE_EXCEPTION_ISERIES(decrementer)
248 STD_EXCEPTION_ISERIES(trap_0a, PACA_EXGEN)
249 STD_EXCEPTION_ISERIES(trap_0b, PACA_EXGEN)
250
251 .globl system_call_iSeries
252system_call_iSeries:
253 mr r9,r13
254 mfspr r13,SPRN_SPRG_PACA
255 EXCEPTION_PROLOG_ISERIES_1
256 b system_call_common
257
258 STD_EXCEPTION_ISERIES(single_step, PACA_EXGEN)
259 STD_EXCEPTION_ISERIES(trap_0e, PACA_EXGEN)
260 STD_EXCEPTION_ISERIES(performance_monitor, PACA_EXGEN)
261
262decrementer_iSeries_masked:
263 /* We may not have a valid TOC pointer in here. */
264 li r11,1
265 ld r12,PACALPPACAPTR(r13)
266 stb r11,LPPACADECRINT(r12)
267 li r12,-1
268 clrldi r12,r12,33 /* set DEC to 0x7fffffff */
269 mtspr SPRN_DEC,r12
270 /* fall through */
271
272hardware_interrupt_iSeries_masked:
273 mtcrf 0x80,r9 /* Restore regs */
274 ld r12,PACALPPACAPTR(r13)
275 ld r11,LPPACASRR0(r12)
276 ld r12,LPPACASRR1(r12)
277 mtspr SPRN_SRR0,r11
278 mtspr SPRN_SRR1,r12
279 ld r9,PACA_EXGEN+EX_R9(r13)
280 ld r10,PACA_EXGEN+EX_R10(r13)
281 ld r11,PACA_EXGEN+EX_R11(r13)
282 ld r12,PACA_EXGEN+EX_R12(r13)
283 ld r13,PACA_EXGEN+EX_R13(r13)
284 rfid
285 b . /* prevent speculative execution */
286
287_INIT_STATIC(__start_initialization_iSeries)
288 /* Clear out the BSS */
289 LOAD_REG_ADDR(r11,__bss_stop)
290 LOAD_REG_ADDR(r8,__bss_start)
291 sub r11,r11,r8 /* bss size */
292 addi r11,r11,7 /* round up to an even double word */
293 rldicl. r11,r11,61,3 /* shift right by 3 */
294 beq 4f
295 addi r8,r8,-8
296 li r0,0
297 mtctr r11 /* zero this many doublewords */
2983: stdu r0,8(r8)
299 bdnz 3b
3004:
301 LOAD_REG_ADDR(r1,init_thread_union)
302 addi r1,r1,THREAD_SIZE
303 li r0,0
304 stdu r0,-STACK_FRAME_OVERHEAD(r1)
305
306 bl .iSeries_early_setup
307 bl .early_setup
308
309 /* relocation is on at this point */
310
311 b .start_here_common
diff --git a/arch/powerpc/platforms/iseries/exception.h b/arch/powerpc/platforms/iseries/exception.h
new file mode 100644
index 00000000000..50271b550a9
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/exception.h
@@ -0,0 +1,58 @@
1#ifndef _ASM_POWERPC_ISERIES_EXCEPTION_H
2#define _ASM_POWERPC_ISERIES_EXCEPTION_H
3/*
4 * Extracted from head_64.S
5 *
6 * PowerPC version
7 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
8 *
9 * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
10 * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
11 * Adapted for Power Macintosh by Paul Mackerras.
12 * Low-level exception handlers and MMU support
13 * rewritten by Paul Mackerras.
14 * Copyright (C) 1996 Paul Mackerras.
15 *
16 * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and
17 * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com
18 *
19 * This file contains the low-level support and setup for the
20 * PowerPC-64 platform, including trap and interrupt dispatch.
21 *
22 * This program is free software; you can redistribute it and/or
23 * modify it under the terms of the GNU General Public License
24 * as published by the Free Software Foundation; either version
25 * 2 of the License, or (at your option) any later version.
26 */
27#include <asm/exception-64s.h>
28
29#define EXCEPTION_PROLOG_ISERIES_1 \
30 mfmsr r10; \
31 ld r12,PACALPPACAPTR(r13); \
32 ld r11,LPPACASRR0(r12); \
33 ld r12,LPPACASRR1(r12); \
34 ori r10,r10,MSR_RI; \
35 mtmsrd r10,1
36
37#define STD_EXCEPTION_ISERIES(label, area) \
38 .globl label##_iSeries; \
39label##_iSeries: \
40 HMT_MEDIUM; \
41 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
42 EXCEPTION_PROLOG_1(area, NOTEST, 0); \
43 EXCEPTION_PROLOG_ISERIES_1; \
44 b label##_common
45
46#define MASKABLE_EXCEPTION_ISERIES(label) \
47 .globl label##_iSeries; \
48label##_iSeries: \
49 HMT_MEDIUM; \
50 mtspr SPRN_SPRG_SCRATCH0,r13; /* save r13 */ \
51 EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0); \
52 lbz r10,PACASOFTIRQEN(r13); \
53 cmpwi 0,r10,0; \
54 beq- label##_iSeries_masked; \
55 EXCEPTION_PROLOG_ISERIES_1; \
56 b label##_common; \
57
58#endif /* _ASM_POWERPC_ISERIES_EXCEPTION_H */
diff --git a/arch/powerpc/platforms/iseries/htab.c b/arch/powerpc/platforms/iseries/htab.c
new file mode 100644
index 00000000000..3ae66ab9d5e
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/htab.c
@@ -0,0 +1,257 @@
1/*
2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c
4 *
5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <asm/machdep.h>
14#include <asm/pgtable.h>
15#include <asm/mmu.h>
16#include <asm/mmu_context.h>
17#include <asm/abs_addr.h>
18#include <linux/spinlock.h>
19
20#include "call_hpt.h"
21
22static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp;
23
24/*
25 * Very primitive algorithm for picking up a lock
26 */
27static inline void iSeries_hlock(unsigned long slot)
28{
29 if (slot & 0x8)
30 slot = ~slot;
31 spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
32}
33
34static inline void iSeries_hunlock(unsigned long slot)
35{
36 if (slot & 0x8)
37 slot = ~slot;
38 spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
39}
40
41static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
42 unsigned long pa, unsigned long rflags,
43 unsigned long vflags, int psize, int ssize)
44{
45 long slot;
46 struct hash_pte lhpte;
47 int secondary = 0;
48
49 BUG_ON(psize != MMU_PAGE_4K);
50
51 /*
52 * The hypervisor tries both primary and secondary.
53 * If we are being called to insert in the secondary,
54 * it means we have already tried both primary and secondary,
55 * so we return failure immediately.
56 */
57 if (vflags & HPTE_V_SECONDARY)
58 return -1;
59
60 iSeries_hlock(hpte_group);
61
62 slot = HvCallHpt_findValid(&lhpte, va >> HW_PAGE_SHIFT);
63 if (unlikely(lhpte.v & HPTE_V_VALID)) {
64 if (vflags & HPTE_V_BOLTED) {
65 HvCallHpt_setSwBits(slot, 0x10, 0);
66 HvCallHpt_setPp(slot, PP_RWXX);
67 iSeries_hunlock(hpte_group);
68 if (slot < 0)
69 return 0x8 | (slot & 7);
70 else
71 return slot & 7;
72 }
73 BUG();
74 }
75
76 if (slot == -1) { /* No available entry found in either group */
77 iSeries_hunlock(hpte_group);
78 return -1;
79 }
80
81 if (slot < 0) { /* MSB set means secondary group */
82 vflags |= HPTE_V_SECONDARY;
83 secondary = 1;
84 slot &= 0x7fffffffffffffff;
85 }
86
87
88 lhpte.v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M) |
89 vflags | HPTE_V_VALID;
90 lhpte.r = hpte_encode_r(phys_to_abs(pa), MMU_PAGE_4K) | rflags;
91
92 /* Now fill in the actual HPTE */
93 HvCallHpt_addValidate(slot, secondary, &lhpte);
94
95 iSeries_hunlock(hpte_group);
96
97 return (secondary << 3) | (slot & 7);
98}
99
100static unsigned long iSeries_hpte_getword0(unsigned long slot)
101{
102 struct hash_pte hpte;
103
104 HvCallHpt_get(&hpte, slot);
105 return hpte.v;
106}
107
108static long iSeries_hpte_remove(unsigned long hpte_group)
109{
110 unsigned long slot_offset;
111 int i;
112 unsigned long hpte_v;
113
114 /* Pick a random slot to start at */
115 slot_offset = mftb() & 0x7;
116
117 iSeries_hlock(hpte_group);
118
119 for (i = 0; i < HPTES_PER_GROUP; i++) {
120 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
121
122 if (! (hpte_v & HPTE_V_BOLTED)) {
123 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
124 slot_offset, 0, 0);
125 iSeries_hunlock(hpte_group);
126 return i;
127 }
128
129 slot_offset++;
130 slot_offset &= 0x7;
131 }
132
133 iSeries_hunlock(hpte_group);
134
135 return -1;
136}
137
138/*
139 * The HyperVisor expects the "flags" argument in this form:
140 * bits 0..59 : reserved
141 * bit 60 : N
142 * bits 61..63 : PP2,PP1,PP0
143 */
144static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
145 unsigned long va, int psize, int ssize, int local)
146{
147 struct hash_pte hpte;
148 unsigned long want_v;
149
150 iSeries_hlock(slot);
151
152 HvCallHpt_get(&hpte, slot);
153 want_v = hpte_encode_v(va, MMU_PAGE_4K, MMU_SEGSIZE_256M);
154
155 if (HPTE_V_COMPARE(hpte.v, want_v) && (hpte.v & HPTE_V_VALID)) {
156 /*
157 * Hypervisor expects bits as NPPP, which is
158 * different from how they are mapped in our PP.
159 */
160 HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
161 iSeries_hunlock(slot);
162 return 0;
163 }
164 iSeries_hunlock(slot);
165
166 return -1;
167}
168
169/*
170 * Functions used to find the PTE for a particular virtual address.
171 * Only used during boot when bolting pages.
172 *
173 * Input : vpn : virtual page number
174 * Output: PTE index within the page table of the entry
175 * -1 on failure
176 */
177static long iSeries_hpte_find(unsigned long vpn)
178{
179 struct hash_pte hpte;
180 long slot;
181
182 /*
183 * The HvCallHpt_findValid interface is as follows:
184 * 0xffffffffffffffff : No entry found.
185 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
186 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
187 */
188 slot = HvCallHpt_findValid(&hpte, vpn);
189 if (hpte.v & HPTE_V_VALID) {
190 if (slot < 0) {
191 slot &= 0x7fffffffffffffff;
192 slot = -slot;
193 }
194 } else
195 slot = -1;
196 return slot;
197}
198
199/*
200 * Update the page protection bits. Intended to be used to create
201 * guard pages for kernel data structures on pages which are bolted
202 * in the HPT. Assumes pages being operated on will not be stolen.
203 * Does not work on large pages.
204 *
205 * No need to lock here because we should be the only user.
206 */
207static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea,
208 int psize, int ssize)
209{
210 unsigned long vsid,va,vpn;
211 long slot;
212
213 BUG_ON(psize != MMU_PAGE_4K);
214
215 vsid = get_kernel_vsid(ea, MMU_SEGSIZE_256M);
216 va = (vsid << 28) | (ea & 0x0fffffff);
217 vpn = va >> HW_PAGE_SHIFT;
218 slot = iSeries_hpte_find(vpn);
219 if (slot == -1)
220 panic("updateboltedpp: Could not find page to bolt\n");
221 HvCallHpt_setPp(slot, newpp);
222}
223
224static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
225 int psize, int ssize, int local)
226{
227 unsigned long hpte_v;
228 unsigned long avpn = va >> 23;
229 unsigned long flags;
230
231 local_irq_save(flags);
232
233 iSeries_hlock(slot);
234
235 hpte_v = iSeries_hpte_getword0(slot);
236
237 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
238 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
239
240 iSeries_hunlock(slot);
241
242 local_irq_restore(flags);
243}
244
245void __init hpte_init_iSeries(void)
246{
247 int i;
248
249 for (i = 0; i < ARRAY_SIZE(iSeries_hlocks); i++)
250 spin_lock_init(&iSeries_hlocks[i]);
251
252 ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
253 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
254 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
255 ppc_md.hpte_insert = iSeries_hpte_insert;
256 ppc_md.hpte_remove = iSeries_hpte_remove;
257}
diff --git a/arch/powerpc/platforms/iseries/hvcall.S b/arch/powerpc/platforms/iseries/hvcall.S
new file mode 100644
index 00000000000..07ae6ad5f49
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/hvcall.S
@@ -0,0 +1,94 @@
1/*
2 * This file contains the code to perform calls to the
3 * iSeries LPAR hypervisor
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version
8 * 2 of the License, or (at your option) any later version.
9 */
10
11#include <asm/ppc_asm.h>
12#include <asm/processor.h>
13#include <asm/ptrace.h> /* XXX for STACK_FRAME_OVERHEAD */
14
15 .text
16
17/*
18 * Hypervisor call
19 *
20 * Invoke the iSeries hypervisor via the System Call instruction
21 * Parameters are passed to this routine in registers r3 - r10
22 *
23 * r3 contains the HV function to be called
24 * r4-r10 contain the operands to the hypervisor function
25 *
26 */
27
28_GLOBAL(HvCall)
29_GLOBAL(HvCall0)
30_GLOBAL(HvCall1)
31_GLOBAL(HvCall2)
32_GLOBAL(HvCall3)
33_GLOBAL(HvCall4)
34_GLOBAL(HvCall5)
35_GLOBAL(HvCall6)
36_GLOBAL(HvCall7)
37
38
39 mfcr r0
40 std r0,-8(r1)
41 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
42
43 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
44
45 li r0,-1
46
47 /* Invoke the hypervisor */
48
49 sc
50
51 ld r1,0(r1)
52 ld r0,-8(r1)
53 mtcrf 0xff,r0
54
55 /* return to caller, return value in r3 */
56
57 blr
58
59_GLOBAL(HvCall0Ret16)
60_GLOBAL(HvCall1Ret16)
61_GLOBAL(HvCall2Ret16)
62_GLOBAL(HvCall3Ret16)
63_GLOBAL(HvCall4Ret16)
64_GLOBAL(HvCall5Ret16)
65_GLOBAL(HvCall6Ret16)
66_GLOBAL(HvCall7Ret16)
67
68 mfcr r0
69 std r0,-8(r1)
70 std r31,-16(r1)
71 stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
72
73 mr r31,r4
74 li r0,-1
75 mr r4,r5
76 mr r5,r6
77 mr r6,r7
78 mr r7,r8
79 mr r8,r9
80 mr r9,r10
81
82 sc
83
84 std r3,0(r31)
85 std r4,8(r31)
86
87 mr r3,r5
88
89 ld r1,0(r1)
90 ld r0,-8(r1)
91 mtcrf 0xff,r0
92 ld r31,-16(r1)
93
94 blr
diff --git a/arch/powerpc/platforms/iseries/hvlog.c b/arch/powerpc/platforms/iseries/hvlog.c
new file mode 100644
index 00000000000..f476d71194f
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/hvlog.c
@@ -0,0 +1,35 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <asm/page.h>
11#include <asm/abs_addr.h>
12#include <asm/iseries/hv_call.h>
13#include <asm/iseries/hv_call_sc.h>
14#include <asm/iseries/hv_types.h>
15
16
17void HvCall_writeLogBuffer(const void *buffer, u64 len)
18{
19 struct HvLpBufferList hv_buf;
20 u64 left_this_page;
21 u64 cur = virt_to_abs(buffer);
22
23 while (len) {
24 hv_buf.addr = cur;
25 left_this_page = ((cur & HW_PAGE_MASK) + HW_PAGE_SIZE) - cur;
26 if (left_this_page > len)
27 left_this_page = len;
28 hv_buf.len = left_this_page;
29 len -= left_this_page;
30 HvCall2(HvCallBaseWriteLogBuffer,
31 virt_to_abs(&hv_buf),
32 left_this_page);
33 cur = (cur & HW_PAGE_MASK) + HW_PAGE_SIZE;
34 }
35}
diff --git a/arch/powerpc/platforms/iseries/hvlpconfig.c b/arch/powerpc/platforms/iseries/hvlpconfig.c
new file mode 100644
index 00000000000..f0475f0b185
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/hvlpconfig.c
@@ -0,0 +1,39 @@
1/*
2 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#include <linux/module.h>
20#include <asm/iseries/hv_lp_config.h>
21#include "it_lp_naca.h"
22
23HvLpIndex HvLpConfig_getLpIndex_outline(void)
24{
25 return HvLpConfig_getLpIndex();
26}
27EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
28
29HvLpIndex HvLpConfig_getLpIndex(void)
30{
31 return itLpNaca.xLpIndex;
32}
33EXPORT_SYMBOL(HvLpConfig_getLpIndex);
34
35HvLpIndex HvLpConfig_getPrimaryLpIndex(void)
36{
37 return itLpNaca.xPrimaryLpIndex;
38}
39EXPORT_SYMBOL_GPL(HvLpConfig_getPrimaryLpIndex);
diff --git a/arch/powerpc/platforms/iseries/iommu.c b/arch/powerpc/platforms/iseries/iommu.c
new file mode 100644
index 00000000000..d8b76335bd1
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/iommu.c
@@ -0,0 +1,260 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
3 *
4 * Rewrite, cleanup:
5 *
6 * Copyright (C) 2004 Olof Johansson <olof@lixom.net>, IBM Corporation
7 * Copyright (C) 2006 Olof Johansson <olof@lixom.net>
8 *
9 * Dynamic DMA mapping support, iSeries-specific parts.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/types.h>
28#include <linux/dma-mapping.h>
29#include <linux/list.h>
30#include <linux/pci.h>
31#include <linux/module.h>
32#include <linux/slab.h>
33
34#include <asm/iommu.h>
35#include <asm/vio.h>
36#include <asm/tce.h>
37#include <asm/machdep.h>
38#include <asm/abs_addr.h>
39#include <asm/prom.h>
40#include <asm/pci-bridge.h>
41#include <asm/iseries/hv_call_xm.h>
42#include <asm/iseries/hv_call_event.h>
43#include <asm/iseries/iommu.h>
44
45static int tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
46 unsigned long uaddr, enum dma_data_direction direction,
47 struct dma_attrs *attrs)
48{
49 u64 rc;
50 u64 tce, rpn;
51
52 while (npages--) {
53 rpn = virt_to_abs(uaddr) >> TCE_SHIFT;
54 tce = (rpn & TCE_RPN_MASK) << TCE_RPN_SHIFT;
55
56 if (tbl->it_type == TCE_VB) {
57 /* Virtual Bus */
58 tce |= TCE_VALID|TCE_ALLIO;
59 if (direction != DMA_TO_DEVICE)
60 tce |= TCE_VB_WRITE;
61 } else {
62 /* PCI Bus */
63 tce |= TCE_PCI_READ; /* Read allowed */
64 if (direction != DMA_TO_DEVICE)
65 tce |= TCE_PCI_WRITE;
66 }
67
68 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, tce);
69 if (rc)
70 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
71 rc);
72 index++;
73 uaddr += TCE_PAGE_SIZE;
74 }
75 return 0;
76}
77
78static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
79{
80 u64 rc;
81
82 while (npages--) {
83 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
84 if (rc)
85 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%llx\n",
86 rc);
87 index++;
88 }
89}
90
91/*
92 * Structure passed to HvCallXm_getTceTableParms
93 */
94struct iommu_table_cb {
95 unsigned long itc_busno; /* Bus number for this tce table */
96 unsigned long itc_start; /* Will be NULL for secondary */
97 unsigned long itc_totalsize; /* Size (in pages) of whole table */
98 unsigned long itc_offset; /* Index into real tce table of the
99 start of our section */
100 unsigned long itc_size; /* Size (in pages) of our section */
101 unsigned long itc_index; /* Index of this tce table */
102 unsigned short itc_maxtables; /* Max num of tables for partition */
103 unsigned char itc_virtbus; /* Flag to indicate virtual bus */
104 unsigned char itc_slotno; /* IOA Tce Slot Index */
105 unsigned char itc_rsvd[4];
106};
107
108/*
109 * Call Hv with the architected data structure to get TCE table info.
110 * info. Put the returned data into the Linux representation of the
111 * TCE table data.
112 * The Hardware Tce table comes in three flavors.
113 * 1. TCE table shared between Buses.
114 * 2. TCE table per Bus.
115 * 3. TCE Table per IOA.
116 */
117void iommu_table_getparms_iSeries(unsigned long busno,
118 unsigned char slotno,
119 unsigned char virtbus,
120 struct iommu_table* tbl)
121{
122 struct iommu_table_cb *parms;
123
124 parms = kzalloc(sizeof(*parms), GFP_KERNEL);
125 if (parms == NULL)
126 panic("PCI_DMA: TCE Table Allocation failed.");
127
128 parms->itc_busno = busno;
129 parms->itc_slotno = slotno;
130 parms->itc_virtbus = virtbus;
131
132 HvCallXm_getTceTableParms(iseries_hv_addr(parms));
133
134 if (parms->itc_size == 0)
135 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
136
137 /* itc_size is in pages worth of table, it_size is in # of entries */
138 tbl->it_size = (parms->itc_size * TCE_PAGE_SIZE) / TCE_ENTRY_SIZE;
139 tbl->it_busno = parms->itc_busno;
140 tbl->it_offset = parms->itc_offset;
141 tbl->it_index = parms->itc_index;
142 tbl->it_blocksize = 1;
143 tbl->it_type = virtbus ? TCE_VB : TCE_PCI;
144
145 kfree(parms);
146}
147
148
149#ifdef CONFIG_PCI
150/*
151 * This function compares the known tables to find an iommu_table
152 * that has already been built for hardware TCEs.
153 */
154static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
155{
156 struct device_node *node;
157
158 for (node = NULL; (node = of_find_all_nodes(node)); ) {
159 struct pci_dn *pdn = PCI_DN(node);
160 struct iommu_table *it;
161
162 if (pdn == NULL)
163 continue;
164 it = pdn->iommu_table;
165 if ((it != NULL) &&
166 (it->it_type == TCE_PCI) &&
167 (it->it_offset == tbl->it_offset) &&
168 (it->it_index == tbl->it_index) &&
169 (it->it_size == tbl->it_size)) {
170 of_node_put(node);
171 return it;
172 }
173 }
174 return NULL;
175}
176
177
178static void pci_dma_dev_setup_iseries(struct pci_dev *pdev)
179{
180 struct iommu_table *tbl;
181 struct device_node *dn = pci_device_to_OF_node(pdev);
182 struct pci_dn *pdn = PCI_DN(dn);
183 const u32 *lsn = of_get_property(dn, "linux,logical-slot-number", NULL);
184
185 BUG_ON(lsn == NULL);
186
187 tbl = kzalloc(sizeof(struct iommu_table), GFP_KERNEL);
188
189 iommu_table_getparms_iSeries(pdn->busno, *lsn, 0, tbl);
190
191 /* Look for existing tce table */
192 pdn->iommu_table = iommu_table_find(tbl);
193 if (pdn->iommu_table == NULL)
194 pdn->iommu_table = iommu_init_table(tbl, -1);
195 else
196 kfree(tbl);
197 set_iommu_table_base(&pdev->dev, pdn->iommu_table);
198}
199#else
200#define pci_dma_dev_setup_iseries NULL
201#endif
202
203static struct iommu_table veth_iommu_table;
204static struct iommu_table vio_iommu_table;
205
206void *iseries_hv_alloc(size_t size, dma_addr_t *dma_handle, gfp_t flag)
207{
208 return iommu_alloc_coherent(NULL, &vio_iommu_table, size, dma_handle,
209 DMA_BIT_MASK(32), flag, -1);
210}
211EXPORT_SYMBOL_GPL(iseries_hv_alloc);
212
213void iseries_hv_free(size_t size, void *vaddr, dma_addr_t dma_handle)
214{
215 iommu_free_coherent(&vio_iommu_table, size, vaddr, dma_handle);
216}
217EXPORT_SYMBOL_GPL(iseries_hv_free);
218
219dma_addr_t iseries_hv_map(void *vaddr, size_t size,
220 enum dma_data_direction direction)
221{
222 return iommu_map_page(NULL, &vio_iommu_table, virt_to_page(vaddr),
223 (unsigned long)vaddr % PAGE_SIZE, size,
224 DMA_BIT_MASK(32), direction, NULL);
225}
226
227void iseries_hv_unmap(dma_addr_t dma_handle, size_t size,
228 enum dma_data_direction direction)
229{
230 iommu_unmap_page(&vio_iommu_table, dma_handle, size, direction, NULL);
231}
232
233void __init iommu_vio_init(void)
234{
235 iommu_table_getparms_iSeries(255, 0, 0xff, &veth_iommu_table);
236 veth_iommu_table.it_size /= 2;
237 vio_iommu_table = veth_iommu_table;
238 vio_iommu_table.it_offset += veth_iommu_table.it_size;
239
240 if (!iommu_init_table(&veth_iommu_table, -1))
241 printk("Virtual Bus VETH TCE table failed.\n");
242 if (!iommu_init_table(&vio_iommu_table, -1))
243 printk("Virtual Bus VIO TCE table failed.\n");
244}
245
246struct iommu_table *vio_build_iommu_table_iseries(struct vio_dev *dev)
247{
248 if (strcmp(dev->type, "network") == 0)
249 return &veth_iommu_table;
250 return &vio_iommu_table;
251}
252
253void iommu_init_early_iSeries(void)
254{
255 ppc_md.tce_build = tce_build_iSeries;
256 ppc_md.tce_free = tce_free_iSeries;
257
258 ppc_md.pci_dma_dev_setup = pci_dma_dev_setup_iseries;
259 set_pci_dma_ops(&dma_iommu_ops);
260}
diff --git a/arch/powerpc/platforms/iseries/ipl_parms.h b/arch/powerpc/platforms/iseries/ipl_parms.h
new file mode 100644
index 00000000000..83e4ca42fc5
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/ipl_parms.h
@@ -0,0 +1,68 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_IPL_PARMS_H
19#define _ISERIES_IPL_PARMS_H
20
21/*
22 * This struct maps the IPL Parameters DMA'd from the SP.
23 *
24 * Warning:
25 * This data must map in exactly 64 bytes and match the architecture for
26 * the IPL parms
27 */
28
29#include <asm/types.h>
30
31struct ItIplParmsReal {
32 u8 xFormat; // Defines format of IplParms x00-x00
33 u8 xRsvd01:6; // Reserved x01-x01
34 u8 xAlternateSearch:1; // Alternate search indicator ...
35 u8 xUaSupplied:1; // UA Supplied on programmed IPL...
36 u8 xLsUaFormat; // Format byte for UA x02-x02
37 u8 xRsvd02; // Reserved x03-x03
38 u32 xLsUa; // LS UA x04-x07
39 u32 xUnusedLsLid; // First OS LID to load x08-x0B
40 u16 xLsBusNumber; // LS Bus Number x0C-x0D
41 u8 xLsCardAdr; // LS Card Address x0E-x0E
42 u8 xLsBoardAdr; // LS Board Address x0F-x0F
43 u32 xRsvd03; // Reserved x10-x13
44 u8 xSpcnPresent:1; // SPCN present x14-x14
45 u8 xCpmPresent:1; // CPM present ...
46 u8 xRsvd04:6; // Reserved ...
47 u8 xRsvd05:4; // Reserved x15-x15
48 u8 xKeyLock:4; // Keylock setting ...
49 u8 xRsvd06:6; // Reserved x16-x16
50 u8 xIplMode:2; // Ipl mode (A|B|C|D) ...
51 u8 xHwIplType; // Fast v slow v slow EC HW IPL x17-x17
52 u16 xCpmEnabledIpl:1; // CPM in effect when IPL initiatedx18-x19
53 u16 xPowerOnResetIpl:1; // Indicate POR condition ...
54 u16 xMainStorePreserved:1; // Main Storage is preserved ...
55 u16 xRsvd07:13; // Reserved ...
56 u16 xIplSource:16; // Ipl source x1A-x1B
57 u8 xIplReason:8; // Reason for this IPL x1C-x1C
58 u8 xRsvd08; // Reserved x1D-x1D
59 u16 xRsvd09; // Reserved x1E-x1F
60 u16 xSysBoxType; // System Box Type x20-x21
61 u16 xSysProcType; // System Processor Type x22-x23
62 u32 xRsvd10; // Reserved x24-x27
63 u64 xRsvd11; // Reserved x28-x2F
64 u64 xRsvd12; // Reserved x30-x37
65 u64 xRsvd13; // Reserved x38-x3F
66};
67
68#endif /* _ISERIES_IPL_PARMS_H */
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
new file mode 100644
index 00000000000..b2103453eb0
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -0,0 +1,400 @@
1/*
2 * This module supports the iSeries PCI bus interrupt handling
3 * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp>
4 * Copyright (C) 2004-2005 IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the:
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330,
20 * Boston, MA 02111-1307 USA
21 *
22 * Change Activity:
23 * Created, December 13, 2000 by Wayne Holm
24 * End Change Activity
25 */
26#include <linux/pci.h>
27#include <linux/init.h>
28#include <linux/threads.h>
29#include <linux/smp.h>
30#include <linux/param.h>
31#include <linux/string.h>
32#include <linux/bootmem.h>
33#include <linux/irq.h>
34#include <linux/spinlock.h>
35
36#include <asm/paca.h>
37#include <asm/iseries/hv_types.h>
38#include <asm/iseries/hv_lp_event.h>
39#include <asm/iseries/hv_call_xm.h>
40#include <asm/iseries/it_lp_queue.h>
41
42#include "irq.h"
43#include "pci.h"
44#include "call_pci.h"
45
46#ifdef CONFIG_PCI
47
48enum pci_event_type {
49 pe_bus_created = 0, /* PHB has been created */
50 pe_bus_error = 1, /* PHB has failed */
51 pe_bus_failed = 2, /* Msg to Secondary, Primary failed bus */
52 pe_node_failed = 4, /* Multi-adapter bridge has failed */
53 pe_node_recovered = 5, /* Multi-adapter bridge has recovered */
54 pe_bus_recovered = 12, /* PHB has been recovered */
55 pe_unquiese_bus = 18, /* Secondary bus unqiescing */
56 pe_bridge_error = 21, /* Bridge Error */
57 pe_slot_interrupt = 22 /* Slot interrupt */
58};
59
60struct pci_event {
61 struct HvLpEvent event;
62 union {
63 u64 __align; /* Align on an 8-byte boundary */
64 struct {
65 u32 fisr;
66 HvBusNumber bus_number;
67 HvSubBusNumber sub_bus_number;
68 HvAgentId dev_id;
69 } slot;
70 struct {
71 HvBusNumber bus_number;
72 HvSubBusNumber sub_bus_number;
73 } bus;
74 struct {
75 HvBusNumber bus_number;
76 HvSubBusNumber sub_bus_number;
77 HvAgentId dev_id;
78 } node;
79 } data;
80};
81
82static DEFINE_SPINLOCK(pending_irqs_lock);
83static int num_pending_irqs;
84static int pending_irqs[NR_IRQS];
85
86static void int_received(struct pci_event *event)
87{
88 int irq;
89
90 switch (event->event.xSubtype) {
91 case pe_slot_interrupt:
92 irq = event->event.xCorrelationToken;
93 if (irq < NR_IRQS) {
94 spin_lock(&pending_irqs_lock);
95 pending_irqs[irq]++;
96 num_pending_irqs++;
97 spin_unlock(&pending_irqs_lock);
98 } else {
99 printk(KERN_WARNING "int_received: bad irq number %d\n",
100 irq);
101 HvCallPci_eoi(event->data.slot.bus_number,
102 event->data.slot.sub_bus_number,
103 event->data.slot.dev_id);
104 }
105 break;
106 /* Ignore error recovery events for now */
107 case pe_bus_created:
108 printk(KERN_INFO "int_received: system bus %d created\n",
109 event->data.bus.bus_number);
110 break;
111 case pe_bus_error:
112 case pe_bus_failed:
113 printk(KERN_INFO "int_received: system bus %d failed\n",
114 event->data.bus.bus_number);
115 break;
116 case pe_bus_recovered:
117 case pe_unquiese_bus:
118 printk(KERN_INFO "int_received: system bus %d recovered\n",
119 event->data.bus.bus_number);
120 break;
121 case pe_node_failed:
122 case pe_bridge_error:
123 printk(KERN_INFO
124 "int_received: multi-adapter bridge %d/%d/%d failed\n",
125 event->data.node.bus_number,
126 event->data.node.sub_bus_number,
127 event->data.node.dev_id);
128 break;
129 case pe_node_recovered:
130 printk(KERN_INFO
131 "int_received: multi-adapter bridge %d/%d/%d recovered\n",
132 event->data.node.bus_number,
133 event->data.node.sub_bus_number,
134 event->data.node.dev_id);
135 break;
136 default:
137 printk(KERN_ERR
138 "int_received: unrecognized event subtype 0x%x\n",
139 event->event.xSubtype);
140 break;
141 }
142}
143
144static void pci_event_handler(struct HvLpEvent *event)
145{
146 if (event && (event->xType == HvLpEvent_Type_PciIo)) {
147 if (hvlpevent_is_int(event))
148 int_received((struct pci_event *)event);
149 else
150 printk(KERN_ERR
151 "pci_event_handler: unexpected ack received\n");
152 } else if (event)
153 printk(KERN_ERR
154 "pci_event_handler: Unrecognized PCI event type 0x%x\n",
155 (int)event->xType);
156 else
157 printk(KERN_ERR "pci_event_handler: NULL event received\n");
158}
159
160#define REAL_IRQ_TO_SUBBUS(irq) (((irq) >> 14) & 0xff)
161#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
162#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
163#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
164
165/*
166 * This will be called by device drivers (via enable_IRQ)
167 * to enable INTA in the bridge interrupt status register.
168 */
169static void iseries_enable_IRQ(struct irq_data *d)
170{
171 u32 bus, dev_id, function, mask;
172 const u32 sub_bus = 0;
173 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
174
175 /* The IRQ has already been locked by the caller */
176 bus = REAL_IRQ_TO_BUS(rirq);
177 function = REAL_IRQ_TO_FUNC(rirq);
178 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
179
180 /* Unmask secondary INTA */
181 mask = 0x80000000;
182 HvCallPci_unmaskInterrupts(bus, sub_bus, dev_id, mask);
183}
184
185/* This is called by iseries_activate_IRQs */
186static unsigned int iseries_startup_IRQ(struct irq_data *d)
187{
188 u32 bus, dev_id, function, mask;
189 const u32 sub_bus = 0;
190 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
191
192 bus = REAL_IRQ_TO_BUS(rirq);
193 function = REAL_IRQ_TO_FUNC(rirq);
194 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
195
196 /* Link the IRQ number to the bridge */
197 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, d->irq);
198
199 /* Unmask bridge interrupts in the FISR */
200 mask = 0x01010000 << function;
201 HvCallPci_unmaskFisr(bus, sub_bus, dev_id, mask);
202 iseries_enable_IRQ(d);
203 return 0;
204}
205
206/*
207 * This is called out of iSeries_fixup to activate interrupt
208 * generation for usable slots
209 */
210void __init iSeries_activate_IRQs()
211{
212 int irq;
213 unsigned long flags;
214
215 for_each_irq (irq) {
216 struct irq_desc *desc = irq_to_desc(irq);
217 struct irq_chip *chip;
218
219 if (!desc)
220 continue;
221
222 chip = irq_desc_get_chip(desc);
223 if (chip && chip->irq_startup) {
224 raw_spin_lock_irqsave(&desc->lock, flags);
225 chip->irq_startup(&desc->irq_data);
226 raw_spin_unlock_irqrestore(&desc->lock, flags);
227 }
228 }
229}
230
231/* this is not called anywhere currently */
232static void iseries_shutdown_IRQ(struct irq_data *d)
233{
234 u32 bus, dev_id, function, mask;
235 const u32 sub_bus = 0;
236 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
237
238 /* irq should be locked by the caller */
239 bus = REAL_IRQ_TO_BUS(rirq);
240 function = REAL_IRQ_TO_FUNC(rirq);
241 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
242
243 /* Invalidate the IRQ number in the bridge */
244 HvCallXm_connectBusUnit(bus, sub_bus, dev_id, 0);
245
246 /* Mask bridge interrupts in the FISR */
247 mask = 0x01010000 << function;
248 HvCallPci_maskFisr(bus, sub_bus, dev_id, mask);
249}
250
251/*
252 * This will be called by device drivers (via disable_IRQ)
253 * to disable INTA in the bridge interrupt status register.
254 */
255static void iseries_disable_IRQ(struct irq_data *d)
256{
257 u32 bus, dev_id, function, mask;
258 const u32 sub_bus = 0;
259 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
260
261 /* The IRQ has already been locked by the caller */
262 bus = REAL_IRQ_TO_BUS(rirq);
263 function = REAL_IRQ_TO_FUNC(rirq);
264 dev_id = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
265
266 /* Mask secondary INTA */
267 mask = 0x80000000;
268 HvCallPci_maskInterrupts(bus, sub_bus, dev_id, mask);
269}
270
271static void iseries_end_IRQ(struct irq_data *d)
272{
273 unsigned int rirq = (unsigned int)irqd_to_hwirq(d);
274
275 HvCallPci_eoi(REAL_IRQ_TO_BUS(rirq), REAL_IRQ_TO_SUBBUS(rirq),
276 (REAL_IRQ_TO_IDSEL(rirq) << 4) + REAL_IRQ_TO_FUNC(rirq));
277}
278
279static struct irq_chip iseries_pic = {
280 .name = "iSeries",
281 .irq_startup = iseries_startup_IRQ,
282 .irq_shutdown = iseries_shutdown_IRQ,
283 .irq_unmask = iseries_enable_IRQ,
284 .irq_mask = iseries_disable_IRQ,
285 .irq_eoi = iseries_end_IRQ
286};
287
288/*
289 * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
290 * It calculates the irq value for the slot.
291 * Note that sub_bus is always 0 (at the moment at least).
292 */
293int __init iSeries_allocate_IRQ(HvBusNumber bus,
294 HvSubBusNumber sub_bus, u32 bsubbus)
295{
296 unsigned int realirq;
297 u8 idsel = ISERIES_GET_DEVICE_FROM_SUBBUS(bsubbus);
298 u8 function = ISERIES_GET_FUNCTION_FROM_SUBBUS(bsubbus);
299
300 realirq = (((((sub_bus << 8) + (bus - 1)) << 3) + (idsel - 1)) << 3)
301 + function;
302
303 return irq_create_mapping(NULL, realirq);
304}
305
306#endif /* CONFIG_PCI */
307
308/*
309 * Get the next pending IRQ.
310 */
311unsigned int iSeries_get_irq(void)
312{
313 int irq = NO_IRQ_IGNORE;
314
315#ifdef CONFIG_SMP
316 if (get_lppaca()->int_dword.fields.ipi_cnt) {
317 get_lppaca()->int_dword.fields.ipi_cnt = 0;
318 smp_ipi_demux();
319 }
320#endif /* CONFIG_SMP */
321 if (hvlpevent_is_pending())
322 process_hvlpevents();
323
324#ifdef CONFIG_PCI
325 if (num_pending_irqs) {
326 spin_lock(&pending_irqs_lock);
327 for (irq = 0; irq < NR_IRQS; irq++) {
328 if (pending_irqs[irq]) {
329 pending_irqs[irq]--;
330 num_pending_irqs--;
331 break;
332 }
333 }
334 spin_unlock(&pending_irqs_lock);
335 if (irq >= NR_IRQS)
336 irq = NO_IRQ_IGNORE;
337 }
338#endif
339
340 return irq;
341}
342
343#ifdef CONFIG_PCI
344
345static int iseries_irq_host_map(struct irq_host *h, unsigned int virq,
346 irq_hw_number_t hw)
347{
348 irq_set_chip_and_handler(virq, &iseries_pic, handle_fasteoi_irq);
349
350 return 0;
351}
352
353static int iseries_irq_host_match(struct irq_host *h, struct device_node *np)
354{
355 /* Match all */
356 return 1;
357}
358
359static struct irq_host_ops iseries_irq_host_ops = {
360 .map = iseries_irq_host_map,
361 .match = iseries_irq_host_match,
362};
363
364/*
365 * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
366 * It must be called before the bus walk.
367 */
368void __init iSeries_init_IRQ(void)
369{
370 /* Register PCI event handler and open an event path */
371 struct irq_host *host;
372 int ret;
373
374 /*
375 * The Hypervisor only allows us up to 256 interrupt
376 * sources (the irq number is passed in a u8).
377 */
378 irq_set_virq_count(256);
379
380 /* Create irq host. No need for a revmap since HV will give us
381 * back our virtual irq number
382 */
383 host = irq_alloc_host(NULL, IRQ_HOST_MAP_NOMAP, 0,
384 &iseries_irq_host_ops, 0);
385 BUG_ON(host == NULL);
386 irq_set_default_host(host);
387
388 ret = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
389 &pci_event_handler);
390 if (ret == 0) {
391 ret = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
392 if (ret != 0)
393 printk(KERN_ERR "iseries_init_IRQ: open event path "
394 "failed with rc 0x%x\n", ret);
395 } else
396 printk(KERN_ERR "iseries_init_IRQ: register handler "
397 "failed with rc 0x%x\n", ret);
398}
399
400#endif /* CONFIG_PCI */
diff --git a/arch/powerpc/platforms/iseries/irq.h b/arch/powerpc/platforms/iseries/irq.h
new file mode 100644
index 00000000000..a1c23607403
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/irq.h
@@ -0,0 +1,13 @@
1#ifndef _ISERIES_IRQ_H
2#define _ISERIES_IRQ_H
3
4#ifdef CONFIG_PCI
5extern void iSeries_init_IRQ(void);
6extern int iSeries_allocate_IRQ(HvBusNumber, HvSubBusNumber, u32);
7extern void iSeries_activate_IRQs(void);
8#else
9#define iSeries_init_IRQ NULL
10#endif
11extern unsigned int iSeries_get_irq(void);
12
13#endif /* _ISERIES_IRQ_H */
diff --git a/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h b/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
new file mode 100644
index 00000000000..6de9097b7f5
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/it_exp_vpd_panel.h
@@ -0,0 +1,51 @@
1/*
2 * Copyright (C) 2002 Dave Boutcher IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
19#define _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H
20
21/*
22 * This struct maps the panel information
23 *
24 * Warning:
25 * This data must match the architecture for the panel information
26 */
27
28#include <asm/types.h>
29
30struct ItExtVpdPanel {
31 /* Definition of the Extended Vpd On Panel Data Area */
32 char systemSerial[8];
33 char mfgID[4];
34 char reserved1[24];
35 char machineType[4];
36 char systemID[6];
37 char somUniqueCnt[4];
38 char serialNumberCount;
39 char reserved2[7];
40 u16 bbu3;
41 u16 bbu2;
42 u16 bbu1;
43 char xLocationLabel[8];
44 u8 xRsvd1[6];
45 u16 xFrameId;
46 u8 xRsvd2[48];
47};
48
49extern struct ItExtVpdPanel xItExtVpdPanel;
50
51#endif /* _PLATFORMS_ISERIES_IT_EXT_VPD_PANEL_H */
diff --git a/arch/powerpc/platforms/iseries/it_lp_naca.h b/arch/powerpc/platforms/iseries/it_lp_naca.h
new file mode 100644
index 00000000000..cf6dcf6ef07
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/it_lp_naca.h
@@ -0,0 +1,80 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _PLATFORMS_ISERIES_IT_LP_NACA_H
19#define _PLATFORMS_ISERIES_IT_LP_NACA_H
20
21#include <linux/types.h>
22
23/*
24 * This control block contains the data that is shared between the
25 * hypervisor (PLIC) and the OS.
26 */
27
28struct ItLpNaca {
29// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
30 u32 xDesc; // Eye catcher x00-x03
31 u16 xSize; // Size of this class x04-x05
32 u16 xIntHdlrOffset; // Offset to IntHdlr array x06-x07
33 u8 xMaxIntHdlrEntries; // Number of entries in array x08-x08
34 u8 xPrimaryLpIndex; // LP Index of Primary x09-x09
35 u8 xServiceLpIndex; // LP Ind of Service Focal Pointx0A-x0A
36 u8 xLpIndex; // LP Index x0B-x0B
37 u16 xMaxLpQueues; // Number of allocated queues x0C-x0D
38 u16 xLpQueueOffset; // Offset to start of LP queues x0E-x0F
39 u8 xPirEnvironMode; // Piranha or hardware x10-x10
40 u8 xPirConsoleMode; // Piranha console indicator x11-x11
41 u8 xPirDasdMode; // Piranha dasd indicator x12-x12
42 u8 xRsvd1_0[5]; // Reserved for Piranha related x13-x17
43 u8 flags; // flags, see below x18-x1F
44 u8 xSpVpdFormat; // VPD areas are in CSP format ...
45 u8 xIntProcRatio; // Ratio of int procs to procs ...
46 u8 xRsvd1_2[5]; // Reserved ...
47 u16 xRsvd1_3; // Reserved x20-x21
48 u16 xPlicVrmIndex; // VRM index of PLIC x22-x23
49 u16 xMinSupportedSlicVrmInd;// Min supported OS VRM index x24-x25
50 u16 xMinCompatableSlicVrmInd;// Min compatible OS VRM index x26-x27
51 u64 xLoadAreaAddr; // ER address of load area x28-x2F
52 u32 xLoadAreaChunks; // Chunks for the load area x30-x33
53 u32 xPaseSysCallCRMask; // Mask used to test CR before x34-x37
54 // doing an ASR switch on PASE
55 // system call.
56 u64 xSlicSegmentTablePtr; // Pointer to Slic seg table. x38-x3f
57 u8 xRsvd1_4[64]; // x40-x7F
58
59// CACHE_LINE_2 0x0080 - 0x00FF Contains local read-write data
60 u8 xRsvd2_0[128]; // Reserved x00-x7F
61
62// CACHE_LINE_3-6 0x0100 - 0x02FF Contains LP Queue indicators
63// NB: Padding required to keep xInterruptHdlr at x300 which is required
64// for v4r4 PLIC.
65 u8 xOldLpQueue[128]; // LP Queue needed for v4r4 100-17F
66 u8 xRsvd3_0[384]; // Reserved 180-2FF
67
68// CACHE_LINE_7-8 0x0300 - 0x03FF Contains the address of the OS interrupt
69// handlers
70 u64 xInterruptHdlr[32]; // Interrupt handlers 300-x3FF
71};
72
73extern struct ItLpNaca itLpNaca;
74
75#define ITLPNACA_LPAR 0x80 /* Is LPAR installed on the system */
76#define ITLPNACA_PARTITIONED 0x40 /* Is the system partitioned */
77#define ITLPNACA_HWSYNCEDTBS 0x20 /* Hardware synced TBs */
78#define ITLPNACA_HMTINT 0x10 /* Utilize MHT for interrupts */
79
80#endif /* _PLATFORMS_ISERIES_IT_LP_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/ksyms.c b/arch/powerpc/platforms/iseries/ksyms.c
new file mode 100644
index 00000000000..2430848b98e
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/ksyms.c
@@ -0,0 +1,21 @@
1/*
2 * (C) 2001-2005 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/module.h>
10
11#include <asm/hw_irq.h>
12#include <asm/iseries/hv_call_sc.h>
13
14EXPORT_SYMBOL(HvCall0);
15EXPORT_SYMBOL(HvCall1);
16EXPORT_SYMBOL(HvCall2);
17EXPORT_SYMBOL(HvCall3);
18EXPORT_SYMBOL(HvCall4);
19EXPORT_SYMBOL(HvCall5);
20EXPORT_SYMBOL(HvCall6);
21EXPORT_SYMBOL(HvCall7);
diff --git a/arch/powerpc/platforms/iseries/lpardata.c b/arch/powerpc/platforms/iseries/lpardata.c
new file mode 100644
index 00000000000..98bd2d37038
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/lpardata.c
@@ -0,0 +1,319 @@
1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/types.h>
10#include <linux/threads.h>
11#include <linux/module.h>
12#include <linux/bitops.h>
13#include <asm/processor.h>
14#include <asm/ptrace.h>
15#include <asm/abs_addr.h>
16#include <asm/lppaca.h>
17#include <asm/paca.h>
18#include <asm/iseries/lpar_map.h>
19#include <asm/iseries/it_lp_queue.h>
20#include <asm/iseries/alpaca.h>
21
22#include "naca.h"
23#include "vpd_areas.h"
24#include "spcomm_area.h"
25#include "ipl_parms.h"
26#include "processor_vpd.h"
27#include "release_data.h"
28#include "it_exp_vpd_panel.h"
29#include "it_lp_naca.h"
30
31/* The HvReleaseData is the root of the information shared between
32 * the hypervisor and Linux.
33 */
34const struct HvReleaseData hvReleaseData = {
35 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
36 .xSize = sizeof(struct HvReleaseData),
37 .xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
38 .xSlicNacaAddr = &naca, /* 64-bit Naca address */
39 .xMsNucDataOffset = LPARMAP_PHYS,
40 .xFlags = HVREL_TAGSINACTIVE /* tags inactive */
41 /* 64 bit */
42 /* shared processors */
43 /* HMT allowed */
44 | 6, /* TEMP: This allows non-GA driver */
45 .xVrmIndex = 4, /* We are v5r2m0 */
46 .xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
47 .xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
48 .xVrmName = { 0xd3, 0x89, 0x95, 0xa4, /* "Linux 2.4.64" ebcdic */
49 0xa7, 0x40, 0xf2, 0x4b,
50 0xf4, 0x4b, 0xf6, 0xf4 },
51};
52
53/*
54 * The NACA. The first dword of the naca is required by the iSeries
55 * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
56 * through the pointer in hvReleaseData.
57 */
58struct naca_struct naca = {
59 .xItVpdAreas = &itVpdAreas,
60 .xRamDisk = 0,
61 .xRamDiskSize = 0,
62};
63
64struct ItLpRegSave {
65 u32 xDesc; // Eye catcher "LpRS" ebcdic 000-003
66 u16 xSize; // Size of this class 004-005
67 u8 xInUse; // Area is live 006-007
68 u8 xRsvd1[9]; // Reserved 007-00F
69
70 u8 xFixedRegSave[352]; // Fixed Register Save Area 010-16F
71 u32 xCTRL; // Control Register 170-173
72 u32 xDEC; // Decrementer 174-177
73 u32 xFPSCR; // FP Status and Control Reg 178-17B
74 u32 xPVR; // Processor Version Number 17C-17F
75
76 u64 xMMCR0; // Monitor Mode Control Reg 0 180-187
77 u32 xPMC1; // Perf Monitor Counter 1 188-18B
78 u32 xPMC2; // Perf Monitor Counter 2 18C-18F
79 u32 xPMC3; // Perf Monitor Counter 3 190-193
80 u32 xPMC4; // Perf Monitor Counter 4 194-197
81 u32 xPIR; // Processor ID Reg 198-19B
82
83 u32 xMMCR1; // Monitor Mode Control Reg 1 19C-19F
84 u32 xMMCRA; // Monitor Mode Control Reg A 1A0-1A3
85 u32 xPMC5; // Perf Monitor Counter 5 1A4-1A7
86 u32 xPMC6; // Perf Monitor Counter 6 1A8-1AB
87 u32 xPMC7; // Perf Monitor Counter 7 1AC-1AF
88 u32 xPMC8; // Perf Monitor Counter 8 1B0-1B3
89 u32 xTSC; // Thread Switch Control 1B4-1B7
90 u32 xTST; // Thread Switch Timeout 1B8-1BB
91 u32 xRsvd; // Reserved 1BC-1BF
92
93 u64 xACCR; // Address Compare Control Reg 1C0-1C7
94 u64 xIMR; // Instruction Match Register 1C8-1CF
95 u64 xSDR1; // Storage Description Reg 1 1D0-1D7
96 u64 xSPRG0; // Special Purpose Reg General0 1D8-1DF
97 u64 xSPRG1; // Special Purpose Reg General1 1E0-1E7
98 u64 xSPRG2; // Special Purpose Reg General2 1E8-1EF
99 u64 xSPRG3; // Special Purpose Reg General3 1F0-1F7
100 u64 xTB; // Time Base Register 1F8-1FF
101
102 u64 xFPR[32]; // Floating Point Registers 200-2FF
103
104 u64 xMSR; // Machine State Register 300-307
105 u64 xNIA; // Next Instruction Address 308-30F
106
107 u64 xDABR; // Data Address Breakpoint Reg 310-317
108 u64 xIABR; // Inst Address Breakpoint Reg 318-31F
109
110 u64 xHID0; // HW Implementation Dependent0 320-327
111
112 u64 xHID4; // HW Implementation Dependent4 328-32F
113 u64 xSCOMd; // SCON Data Reg (SPRG4) 330-337
114 u64 xSCOMc; // SCON Command Reg (SPRG5) 338-33F
115 u64 xSDAR; // Sample Data Address Register 340-347
116 u64 xSIAR; // Sample Inst Address Register 348-34F
117
118 u8 xRsvd3[176]; // Reserved 350-3FF
119};
120
121extern void system_reset_iSeries(void);
122extern void machine_check_iSeries(void);
123extern void data_access_iSeries(void);
124extern void instruction_access_iSeries(void);
125extern void hardware_interrupt_iSeries(void);
126extern void alignment_iSeries(void);
127extern void program_check_iSeries(void);
128extern void fp_unavailable_iSeries(void);
129extern void decrementer_iSeries(void);
130extern void trap_0a_iSeries(void);
131extern void trap_0b_iSeries(void);
132extern void system_call_iSeries(void);
133extern void single_step_iSeries(void);
134extern void trap_0e_iSeries(void);
135extern void performance_monitor_iSeries(void);
136extern void data_access_slb_iSeries(void);
137extern void instruction_access_slb_iSeries(void);
138
139struct ItLpNaca itLpNaca = {
140 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
141 .xSize = 0x0400, /* size of ItLpNaca */
142 .xIntHdlrOffset = 0x0300, /* offset to int array */
143 .xMaxIntHdlrEntries = 19, /* # ents */
144 .xPrimaryLpIndex = 0, /* Part # of primary */
145 .xServiceLpIndex = 0, /* Part # of serv */
146 .xLpIndex = 0, /* Part # of me */
147 .xMaxLpQueues = 0, /* # of LP queues */
148 .xLpQueueOffset = 0x100, /* offset of start of LP queues */
149 .xPirEnvironMode = 0, /* Piranha stuff */
150 .xPirConsoleMode = 0,
151 .xPirDasdMode = 0,
152 .flags = 0,
153 .xSpVpdFormat = 0,
154 .xIntProcRatio = 0,
155 .xPlicVrmIndex = 0, /* VRM index of PLIC */
156 .xMinSupportedSlicVrmInd = 0, /* min supported SLIC */
157 .xMinCompatableSlicVrmInd = 0, /* min compat SLIC */
158 .xLoadAreaAddr = 0, /* 64-bit addr of load area */
159 .xLoadAreaChunks = 0, /* chunks for load area */
160 .xPaseSysCallCRMask = 0, /* PASE mask */
161 .xSlicSegmentTablePtr = 0, /* seg table */
162 .xOldLpQueue = { 0 }, /* Old LP Queue */
163 .xInterruptHdlr = {
164 (u64)system_reset_iSeries, /* 0x100 System Reset */
165 (u64)machine_check_iSeries, /* 0x200 Machine Check */
166 (u64)data_access_iSeries, /* 0x300 Data Access */
167 (u64)instruction_access_iSeries, /* 0x400 Instruction Access */
168 (u64)hardware_interrupt_iSeries, /* 0x500 External */
169 (u64)alignment_iSeries, /* 0x600 Alignment */
170 (u64)program_check_iSeries, /* 0x700 Program Check */
171 (u64)fp_unavailable_iSeries, /* 0x800 FP Unavailable */
172 (u64)decrementer_iSeries, /* 0x900 Decrementer */
173 (u64)trap_0a_iSeries, /* 0xa00 Trap 0A */
174 (u64)trap_0b_iSeries, /* 0xb00 Trap 0B */
175 (u64)system_call_iSeries, /* 0xc00 System Call */
176 (u64)single_step_iSeries, /* 0xd00 Single Step */
177 (u64)trap_0e_iSeries, /* 0xe00 Trap 0E */
178 (u64)performance_monitor_iSeries,/* 0xf00 Performance Monitor */
179 0, /* int 0x1000 */
180 0, /* int 0x1010 */
181 0, /* int 0x1020 CPU ctls */
182 (u64)hardware_interrupt_iSeries, /* SC Ret Hdlr */
183 (u64)data_access_slb_iSeries, /* 0x380 D-SLB */
184 (u64)instruction_access_slb_iSeries /* 0x480 I-SLB */
185 }
186};
187
188/* May be filled in by the hypervisor so cannot end up in the BSS */
189static struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
190
191/* May be filled in by the hypervisor so cannot end up in the BSS */
192struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
193
194#define maxPhysicalProcessors 32
195
196struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
197 {
198 .xInstCacheOperandSize = 32,
199 .xDataCacheOperandSize = 32,
200 .xProcFreq = 50000000,
201 .xTimeBaseFreq = 50000000,
202 .xPVR = 0x3600
203 }
204};
205
206/* Space for Main Store Vpd 27,200 bytes */
207/* May be filled in by the hypervisor so cannot end up in the BSS */
208u64 xMsVpd[3400] __attribute__((__section__(".data")));
209
210/* Space for Recovery Log Buffer */
211/* May be filled in by the hypervisor so cannot end up in the BSS */
212static u64 xRecoveryLogBuffer[32] __attribute__((__section__(".data")));
213
214static const struct SpCommArea xSpCommArea = {
215 .xDesc = 0xE2D7C3C2,
216 .xFormat = 1,
217};
218
219static const struct ItLpRegSave iseries_reg_save[] = {
220 [0 ... (NR_CPUS-1)] = {
221 .xDesc = 0xd397d9e2, /* "LpRS" */
222 .xSize = sizeof(struct ItLpRegSave),
223 },
224};
225
226#define ALPACA_INIT(number) \
227{ \
228 .lppaca_ptr = &lppaca[number], \
229 .reg_save_ptr = &iseries_reg_save[number], \
230}
231
232const struct alpaca alpaca[] = {
233 ALPACA_INIT( 0),
234#if NR_CPUS > 1
235 ALPACA_INIT( 1), ALPACA_INIT( 2), ALPACA_INIT( 3),
236#if NR_CPUS > 4
237 ALPACA_INIT( 4), ALPACA_INIT( 5), ALPACA_INIT( 6), ALPACA_INIT( 7),
238#if NR_CPUS > 8
239 ALPACA_INIT( 8), ALPACA_INIT( 9), ALPACA_INIT(10), ALPACA_INIT(11),
240 ALPACA_INIT(12), ALPACA_INIT(13), ALPACA_INIT(14), ALPACA_INIT(15),
241 ALPACA_INIT(16), ALPACA_INIT(17), ALPACA_INIT(18), ALPACA_INIT(19),
242 ALPACA_INIT(20), ALPACA_INIT(21), ALPACA_INIT(22), ALPACA_INIT(23),
243 ALPACA_INIT(24), ALPACA_INIT(25), ALPACA_INIT(26), ALPACA_INIT(27),
244 ALPACA_INIT(28), ALPACA_INIT(29), ALPACA_INIT(30), ALPACA_INIT(31),
245#if NR_CPUS > 32
246 ALPACA_INIT(32), ALPACA_INIT(33), ALPACA_INIT(34), ALPACA_INIT(35),
247 ALPACA_INIT(36), ALPACA_INIT(37), ALPACA_INIT(38), ALPACA_INIT(39),
248 ALPACA_INIT(40), ALPACA_INIT(41), ALPACA_INIT(42), ALPACA_INIT(43),
249 ALPACA_INIT(44), ALPACA_INIT(45), ALPACA_INIT(46), ALPACA_INIT(47),
250 ALPACA_INIT(48), ALPACA_INIT(49), ALPACA_INIT(50), ALPACA_INIT(51),
251 ALPACA_INIT(52), ALPACA_INIT(53), ALPACA_INIT(54), ALPACA_INIT(55),
252 ALPACA_INIT(56), ALPACA_INIT(57), ALPACA_INIT(58), ALPACA_INIT(59),
253 ALPACA_INIT(60), ALPACA_INIT(61), ALPACA_INIT(62), ALPACA_INIT(63),
254#endif
255#endif
256#endif
257#endif
258};
259
260/* The LparMap data is now located at offset 0x6000 in head.S
261 * It was put there so that the HvReleaseData could address it
262 * with a 32-bit offset as required by the iSeries hypervisor
263 *
264 * The Naca has a pointer to the ItVpdAreas. The hypervisor finds
265 * the Naca via the HvReleaseData area. The HvReleaseData has the
266 * offset into the Naca of the pointer to the ItVpdAreas.
267 */
268const struct ItVpdAreas itVpdAreas = {
269 .xSlicDesc = 0xc9a3e5c1, /* "ItVA" */
270 .xSlicSize = sizeof(struct ItVpdAreas),
271 .xSlicVpdEntries = ItVpdMaxEntries, /* # VPD array entries */
272 .xSlicDmaEntries = ItDmaMaxEntries, /* # DMA array entries */
273 .xSlicMaxLogicalProcs = NR_CPUS * 2, /* Max logical procs */
274 .xSlicMaxPhysicalProcs = maxPhysicalProcessors, /* Max physical procs */
275 .xSlicDmaToksOffset = offsetof(struct ItVpdAreas, xPlicDmaToks),
276 .xSlicVpdAdrsOffset = offsetof(struct ItVpdAreas, xSlicVpdAdrs),
277 .xSlicDmaLensOffset = offsetof(struct ItVpdAreas, xPlicDmaLens),
278 .xSlicVpdLensOffset = offsetof(struct ItVpdAreas, xSlicVpdLens),
279 .xSlicMaxSlotLabels = 0, /* max slot labels */
280 .xSlicMaxLpQueues = 1, /* max LP queues */
281 .xPlicDmaLens = { 0 }, /* DMA lengths */
282 .xPlicDmaToks = { 0 }, /* DMA tokens */
283 .xSlicVpdLens = { /* VPD lengths */
284 0,0,0, /* 0 - 2 */
285 sizeof(xItExtVpdPanel), /* 3 Extended VPD */
286 sizeof(struct alpaca), /* 4 length of (fake) Paca */
287 0, /* 5 */
288 sizeof(struct ItIplParmsReal),/* 6 length of IPL parms */
289 26992, /* 7 length of MS VPD */
290 0, /* 8 */
291 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
292 0, /* 10 */
293 256, /* 11 length of Recovery Log Buf */
294 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
295 0,0,0, /* 13 - 15 */
296 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
297 0,0,0,0,0,0, /* 17 - 22 */
298 sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
299 0,0 /* 24 - 25 */
300 },
301 .xSlicVpdAdrs = { /* VPD addresses */
302 0,0,0, /* 0 - 2 */
303 &xItExtVpdPanel, /* 3 Extended VPD */
304 &alpaca[0], /* 4 first (fake) Paca */
305 0, /* 5 */
306 &xItIplParmsReal, /* 6 IPL parms */
307 &xMsVpd, /* 7 MS Vpd */
308 0, /* 8 */
309 &itLpNaca, /* 9 LpNaca */
310 0, /* 10 */
311 &xRecoveryLogBuffer, /* 11 Recovery Log Buffer */
312 &xSpCommArea, /* 12 SP Comm Area */
313 0,0,0, /* 13 - 15 */
314 &xIoHriProcessorVpd, /* 16 Proc Vpd */
315 0,0,0,0,0,0, /* 17 - 22 */
316 &hvlpevent_queue, /* 23 Lp Queue */
317 0,0
318 }
319};
diff --git a/arch/powerpc/platforms/iseries/lpevents.c b/arch/powerpc/platforms/iseries/lpevents.c
new file mode 100644
index 00000000000..b0f8a857ec0
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/lpevents.c
@@ -0,0 +1,341 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/stddef.h>
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/bootmem.h>
14#include <linux/seq_file.h>
15#include <linux/proc_fs.h>
16#include <linux/module.h>
17
18#include <asm/system.h>
19#include <asm/paca.h>
20#include <asm/firmware.h>
21#include <asm/iseries/it_lp_queue.h>
22#include <asm/iseries/hv_lp_event.h>
23#include <asm/iseries/hv_call_event.h>
24#include "it_lp_naca.h"
25
26/*
27 * The LpQueue is used to pass event data from the hypervisor to
28 * the partition. This is where I/O interrupt events are communicated.
29 *
30 * It is written to by the hypervisor so cannot end up in the BSS.
31 */
32struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
33
34DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
35
36static char *event_types[HvLpEvent_Type_NumTypes] = {
37 "Hypervisor",
38 "Machine Facilities",
39 "Session Manager",
40 "SPD I/O",
41 "Virtual Bus",
42 "PCI I/O",
43 "RIO I/O",
44 "Virtual Lan",
45 "Virtual I/O"
46};
47
48/* Array of LpEvent handler functions */
49static LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
50static unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
51
52static struct HvLpEvent * get_next_hvlpevent(void)
53{
54 struct HvLpEvent * event;
55 event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
56
57 if (hvlpevent_is_valid(event)) {
58 /* rmb() needed only for weakly consistent machines (regatta) */
59 rmb();
60 /* Set pointer to next potential event */
61 hvlpevent_queue.hq_current_event += ((event->xSizeMinus1 +
62 IT_LP_EVENT_ALIGN) / IT_LP_EVENT_ALIGN) *
63 IT_LP_EVENT_ALIGN;
64
65 /* Wrap to beginning if no room at end */
66 if (hvlpevent_queue.hq_current_event >
67 hvlpevent_queue.hq_last_event) {
68 hvlpevent_queue.hq_current_event =
69 hvlpevent_queue.hq_event_stack;
70 }
71 } else {
72 event = NULL;
73 }
74
75 return event;
76}
77
78static unsigned long spread_lpevents = NR_CPUS;
79
80int hvlpevent_is_pending(void)
81{
82 struct HvLpEvent *next_event;
83
84 if (smp_processor_id() >= spread_lpevents)
85 return 0;
86
87 next_event = (struct HvLpEvent *)hvlpevent_queue.hq_current_event;
88
89 return hvlpevent_is_valid(next_event) ||
90 hvlpevent_queue.hq_overflow_pending;
91}
92
93static void hvlpevent_clear_valid(struct HvLpEvent * event)
94{
95 /* Tell the Hypervisor that we're done with this event.
96 * Also clear bits within this event that might look like valid bits.
97 * ie. on 64-byte boundaries.
98 */
99 struct HvLpEvent *tmp;
100 unsigned extra = ((event->xSizeMinus1 + IT_LP_EVENT_ALIGN) /
101 IT_LP_EVENT_ALIGN) - 1;
102
103 switch (extra) {
104 case 3:
105 tmp = (struct HvLpEvent*)((char*)event + 3 * IT_LP_EVENT_ALIGN);
106 hvlpevent_invalidate(tmp);
107 case 2:
108 tmp = (struct HvLpEvent*)((char*)event + 2 * IT_LP_EVENT_ALIGN);
109 hvlpevent_invalidate(tmp);
110 case 1:
111 tmp = (struct HvLpEvent*)((char*)event + 1 * IT_LP_EVENT_ALIGN);
112 hvlpevent_invalidate(tmp);
113 }
114
115 mb();
116
117 hvlpevent_invalidate(event);
118}
119
120void process_hvlpevents(void)
121{
122 struct HvLpEvent * event;
123
124 restart:
125 /* If we have recursed, just return */
126 if (!spin_trylock(&hvlpevent_queue.hq_lock))
127 return;
128
129 for (;;) {
130 event = get_next_hvlpevent();
131 if (event) {
132 /* Call appropriate handler here, passing
133 * a pointer to the LpEvent. The handler
134 * must make a copy of the LpEvent if it
135 * needs it in a bottom half. (perhaps for
136 * an ACK)
137 *
138 * Handlers are responsible for ACK processing
139 *
140 * The Hypervisor guarantees that LpEvents will
141 * only be delivered with types that we have
142 * registered for, so no type check is necessary
143 * here!
144 */
145 if (event->xType < HvLpEvent_Type_NumTypes)
146 __get_cpu_var(hvlpevent_counts)[event->xType]++;
147 if (event->xType < HvLpEvent_Type_NumTypes &&
148 lpEventHandler[event->xType])
149 lpEventHandler[event->xType](event);
150 else {
151 u8 type = event->xType;
152
153 /*
154 * Don't printk in the spinlock as printk
155 * may require ack events form the HV to send
156 * any characters there.
157 */
158 hvlpevent_clear_valid(event);
159 spin_unlock(&hvlpevent_queue.hq_lock);
160 printk(KERN_INFO
161 "Unexpected Lp Event type=%d\n", type);
162 goto restart;
163 }
164
165 hvlpevent_clear_valid(event);
166 } else if (hvlpevent_queue.hq_overflow_pending)
167 /*
168 * No more valid events. If overflow events are
169 * pending process them
170 */
171 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.hq_index);
172 else
173 break;
174 }
175
176 spin_unlock(&hvlpevent_queue.hq_lock);
177}
178
179static int set_spread_lpevents(char *str)
180{
181 unsigned long val = simple_strtoul(str, NULL, 0);
182
183 /*
184 * The parameter is the number of processors to share in processing
185 * lp events.
186 */
187 if (( val > 0) && (val <= NR_CPUS)) {
188 spread_lpevents = val;
189 printk("lpevent processing spread over %ld processors\n", val);
190 } else {
191 printk("invalid spread_lpevents %ld\n", val);
192 }
193
194 return 1;
195}
196__setup("spread_lpevents=", set_spread_lpevents);
197
198void __init setup_hvlpevent_queue(void)
199{
200 void *eventStack;
201
202 spin_lock_init(&hvlpevent_queue.hq_lock);
203
204 /* Allocate a page for the Event Stack. */
205 eventStack = alloc_bootmem_pages(IT_LP_EVENT_STACK_SIZE);
206 memset(eventStack, 0, IT_LP_EVENT_STACK_SIZE);
207
208 /* Invoke the hypervisor to initialize the event stack */
209 HvCallEvent_setLpEventStack(0, eventStack, IT_LP_EVENT_STACK_SIZE);
210
211 hvlpevent_queue.hq_event_stack = eventStack;
212 hvlpevent_queue.hq_current_event = eventStack;
213 hvlpevent_queue.hq_last_event = (char *)eventStack +
214 (IT_LP_EVENT_STACK_SIZE - IT_LP_EVENT_MAX_SIZE);
215 hvlpevent_queue.hq_index = 0;
216}
217
218/* Register a handler for an LpEvent type */
219int HvLpEvent_registerHandler(HvLpEvent_Type eventType, LpEventHandler handler)
220{
221 if (eventType < HvLpEvent_Type_NumTypes) {
222 lpEventHandler[eventType] = handler;
223 return 0;
224 }
225 return 1;
226}
227EXPORT_SYMBOL(HvLpEvent_registerHandler);
228
229int HvLpEvent_unregisterHandler(HvLpEvent_Type eventType)
230{
231 might_sleep();
232
233 if (eventType < HvLpEvent_Type_NumTypes) {
234 if (!lpEventHandlerPaths[eventType]) {
235 lpEventHandler[eventType] = NULL;
236 /*
237 * We now sleep until all other CPUs have scheduled.
238 * This ensures that the deletion is seen by all
239 * other CPUs, and that the deleted handler isn't
240 * still running on another CPU when we return.
241 */
242 synchronize_sched();
243 return 0;
244 }
245 }
246 return 1;
247}
248EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
249
250/*
251 * lpIndex is the partition index of the target partition.
252 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
253 * indicates to use our partition index - for the other types.
254 */
255int HvLpEvent_openPath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
256{
257 if ((eventType < HvLpEvent_Type_NumTypes) &&
258 lpEventHandler[eventType]) {
259 if (lpIndex == 0)
260 lpIndex = itLpNaca.xLpIndex;
261 HvCallEvent_openLpEventPath(lpIndex, eventType);
262 ++lpEventHandlerPaths[eventType];
263 return 0;
264 }
265 return 1;
266}
267
268int HvLpEvent_closePath(HvLpEvent_Type eventType, HvLpIndex lpIndex)
269{
270 if ((eventType < HvLpEvent_Type_NumTypes) &&
271 lpEventHandler[eventType] &&
272 lpEventHandlerPaths[eventType]) {
273 if (lpIndex == 0)
274 lpIndex = itLpNaca.xLpIndex;
275 HvCallEvent_closeLpEventPath(lpIndex, eventType);
276 --lpEventHandlerPaths[eventType];
277 return 0;
278 }
279 return 1;
280}
281
282static int proc_lpevents_show(struct seq_file *m, void *v)
283{
284 int cpu, i;
285 unsigned long sum;
286 static unsigned long cpu_totals[NR_CPUS];
287
288 /* FIXME: do we care that there's no locking here? */
289 sum = 0;
290 for_each_online_cpu(cpu) {
291 cpu_totals[cpu] = 0;
292 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
293 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
294 }
295 sum += cpu_totals[cpu];
296 }
297
298 seq_printf(m, "LpEventQueue 0\n");
299 seq_printf(m, " events processed:\t%lu\n", sum);
300
301 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
302 sum = 0;
303 for_each_online_cpu(cpu) {
304 sum += per_cpu(hvlpevent_counts, cpu)[i];
305 }
306
307 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
308 }
309
310 seq_printf(m, "\n events processed by processor:\n");
311
312 for_each_online_cpu(cpu) {
313 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
314 }
315
316 return 0;
317}
318
319static int proc_lpevents_open(struct inode *inode, struct file *file)
320{
321 return single_open(file, proc_lpevents_show, NULL);
322}
323
324static const struct file_operations proc_lpevents_operations = {
325 .open = proc_lpevents_open,
326 .read = seq_read,
327 .llseek = seq_lseek,
328 .release = single_release,
329};
330
331static int __init proc_lpevents_init(void)
332{
333 if (!firmware_has_feature(FW_FEATURE_ISERIES))
334 return 0;
335
336 proc_create("iSeries/lpevents", S_IFREG|S_IRUGO, NULL,
337 &proc_lpevents_operations);
338 return 0;
339}
340__initcall(proc_lpevents_init);
341
diff --git a/arch/powerpc/platforms/iseries/main_store.h b/arch/powerpc/platforms/iseries/main_store.h
new file mode 100644
index 00000000000..1a7a3f50e40
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/main_store.h
@@ -0,0 +1,165 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_MAIN_STORE_H
20#define _ISERIES_MAIN_STORE_H
21
22/* Main Store Vpd for Condor,iStar,sStar */
23struct IoHriMainStoreSegment4 {
24 u8 msArea0Exists:1;
25 u8 msArea1Exists:1;
26 u8 msArea2Exists:1;
27 u8 msArea3Exists:1;
28 u8 reserved1:4;
29 u8 reserved2;
30
31 u8 msArea0Functional:1;
32 u8 msArea1Functional:1;
33 u8 msArea2Functional:1;
34 u8 msArea3Functional:1;
35 u8 reserved3:4;
36 u8 reserved4;
37
38 u32 totalMainStore;
39
40 u64 msArea0Ptr;
41 u64 msArea1Ptr;
42 u64 msArea2Ptr;
43 u64 msArea3Ptr;
44
45 u32 cardProductionLevel;
46
47 u32 msAdrHole;
48
49 u8 msArea0HasRiserVpd:1;
50 u8 msArea1HasRiserVpd:1;
51 u8 msArea2HasRiserVpd:1;
52 u8 msArea3HasRiserVpd:1;
53 u8 reserved5:4;
54 u8 reserved6;
55 u16 reserved7;
56
57 u8 reserved8[28];
58
59 u64 nonInterleavedBlocksStartAdr;
60 u64 nonInterleavedBlocksEndAdr;
61};
62
63/* Main Store VPD for Power4 */
64struct __attribute((packed)) IoHriMainStoreChipInfo1 {
65 u32 chipMfgID;
66 char chipECLevel[4];
67};
68
69struct IoHriMainStoreVpdIdData {
70 char typeNumber[4];
71 char modelNumber[4];
72 char partNumber[12];
73 char serialNumber[12];
74};
75
76struct __attribute((packed)) IoHriMainStoreVpdFruData {
77 char fruLabel[8];
78 u8 numberOfSlots;
79 u8 pluggingType;
80 u16 slotMapIndex;
81};
82
83struct __attribute((packed)) IoHriMainStoreAdrRangeBlock {
84 void *blockStart;
85 void *blockEnd;
86 u32 blockProcChipId;
87};
88
89#define MaxAreaAdrRangeBlocks 4
90
91struct __attribute((packed)) IoHriMainStoreArea4 {
92 u32 msVpdFormat;
93 u8 containedVpdType;
94 u8 reserved1;
95 u16 reserved2;
96
97 u64 msExists;
98 u64 msFunctional;
99
100 u32 memorySize;
101 u32 procNodeId;
102
103 u32 numAdrRangeBlocks;
104 struct IoHriMainStoreAdrRangeBlock xAdrRangeBlock[MaxAreaAdrRangeBlocks];
105
106 struct IoHriMainStoreChipInfo1 chipInfo0;
107 struct IoHriMainStoreChipInfo1 chipInfo1;
108 struct IoHriMainStoreChipInfo1 chipInfo2;
109 struct IoHriMainStoreChipInfo1 chipInfo3;
110 struct IoHriMainStoreChipInfo1 chipInfo4;
111 struct IoHriMainStoreChipInfo1 chipInfo5;
112 struct IoHriMainStoreChipInfo1 chipInfo6;
113 struct IoHriMainStoreChipInfo1 chipInfo7;
114
115 void *msRamAreaArray;
116 u32 msRamAreaArrayNumEntries;
117 u32 msRamAreaArrayEntrySize;
118
119 u32 numaDimmExists;
120 u32 numaDimmFunctional;
121 void *numaDimmArray;
122 u32 numaDimmArrayNumEntries;
123 u32 numaDimmArrayEntrySize;
124
125 struct IoHriMainStoreVpdIdData idData;
126
127 u64 powerData;
128 u64 cardAssemblyPartNum;
129 u64 chipSerialNum;
130
131 u64 reserved3;
132 char reserved4[16];
133
134 struct IoHriMainStoreVpdFruData fruData;
135
136 u8 vpdPortNum;
137 u8 reserved5;
138 u8 frameId;
139 u8 rackUnit;
140 char asciiKeywordVpd[256];
141 u32 reserved6;
142};
143
144
145struct IoHriMainStoreSegment5 {
146 u16 reserved1;
147 u8 reserved2;
148 u8 msVpdFormat;
149
150 u32 totalMainStore;
151 u64 maxConfiguredMsAdr;
152
153 struct IoHriMainStoreArea4 *msAreaArray;
154 u32 msAreaArrayNumEntries;
155 u32 msAreaArrayEntrySize;
156
157 u32 msAreaExists;
158 u32 msAreaFunctional;
159
160 u64 reserved3;
161};
162
163extern u64 xMsVpd[];
164
165#endif /* _ISERIES_MAIN_STORE_H */
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c
new file mode 100644
index 00000000000..62dabe3c2bf
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/mf.c
@@ -0,0 +1,1274 @@
1/*
2 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
3 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
4 *
5 * This modules exists as an interface between a Linux secondary partition
6 * running on an iSeries and the primary partition's Virtual Service
7 * Processor (VSP) object. The VSP has final authority over powering on/off
8 * all partitions in the iSeries. It also provides miscellaneous low-level
9 * machine facility type operations.
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 */
26
27#include <linux/types.h>
28#include <linux/errno.h>
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/completion.h>
32#include <linux/delay.h>
33#include <linux/proc_fs.h>
34#include <linux/dma-mapping.h>
35#include <linux/bcd.h>
36#include <linux/rtc.h>
37#include <linux/slab.h>
38
39#include <asm/time.h>
40#include <asm/uaccess.h>
41#include <asm/paca.h>
42#include <asm/abs_addr.h>
43#include <asm/firmware.h>
44#include <asm/iseries/mf.h>
45#include <asm/iseries/hv_lp_config.h>
46#include <asm/iseries/hv_lp_event.h>
47#include <asm/iseries/it_lp_queue.h>
48
49#include "setup.h"
50
51static int mf_initialized;
52
53/*
54 * This is the structure layout for the Machine Facilities LPAR event
55 * flows.
56 */
57struct vsp_cmd_data {
58 u64 token;
59 u16 cmd;
60 HvLpIndex lp_index;
61 u8 result_code;
62 u32 reserved;
63 union {
64 u64 state; /* GetStateOut */
65 u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
66 u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
67 u64 page[4]; /* GetSrcHistoryIn */
68 u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
69 SetAutoIplWhenPrimaryIplsIn,
70 WhiteButtonPowerOffIn,
71 Function08FastPowerOffIn,
72 IsSpcnRackPowerIncompleteOut */
73 struct {
74 u64 token;
75 u64 address_type;
76 u64 side;
77 u32 length;
78 u32 offset;
79 } kern; /* SetKernelImageIn, GetKernelImageIn,
80 SetKernelCmdLineIn, GetKernelCmdLineIn */
81 u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
82 u8 reserved[80];
83 } sub_data;
84};
85
86struct vsp_rsp_data {
87 struct completion com;
88 struct vsp_cmd_data *response;
89};
90
91struct alloc_data {
92 u16 size;
93 u16 type;
94 u32 count;
95 u16 reserved1;
96 u8 reserved2;
97 HvLpIndex target_lp;
98};
99
100struct ce_msg_data;
101
102typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
103
104struct ce_msg_comp_data {
105 ce_msg_comp_hdlr handler;
106 void *token;
107};
108
109struct ce_msg_data {
110 u8 ce_msg[12];
111 char reserved[4];
112 struct ce_msg_comp_data *completion;
113};
114
115struct io_mf_lp_event {
116 struct HvLpEvent hp_lp_event;
117 u16 subtype_result_code;
118 u16 reserved1;
119 u32 reserved2;
120 union {
121 struct alloc_data alloc;
122 struct ce_msg_data ce_msg;
123 struct vsp_cmd_data vsp_cmd;
124 } data;
125};
126
127#define subtype_data(a, b, c, d) \
128 (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
129
130/*
131 * All outgoing event traffic is kept on a FIFO queue. The first
132 * pointer points to the one that is outstanding, and all new
133 * requests get stuck on the end. Also, we keep a certain number of
134 * preallocated pending events so that we can operate very early in
135 * the boot up sequence (before kmalloc is ready).
136 */
137struct pending_event {
138 struct pending_event *next;
139 struct io_mf_lp_event event;
140 MFCompleteHandler hdlr;
141 char dma_data[72];
142 unsigned dma_data_length;
143 unsigned remote_address;
144};
145static spinlock_t pending_event_spinlock;
146static struct pending_event *pending_event_head;
147static struct pending_event *pending_event_tail;
148static struct pending_event *pending_event_avail;
149#define PENDING_EVENT_PREALLOC_LEN 16
150static struct pending_event pending_event_prealloc[PENDING_EVENT_PREALLOC_LEN];
151
152/*
153 * Put a pending event onto the available queue, so it can get reused.
154 * Attention! You must have the pending_event_spinlock before calling!
155 */
156static void free_pending_event(struct pending_event *ev)
157{
158 if (ev != NULL) {
159 ev->next = pending_event_avail;
160 pending_event_avail = ev;
161 }
162}
163
164/*
165 * Enqueue the outbound event onto the stack. If the queue was
166 * empty to begin with, we must also issue it via the Hypervisor
167 * interface. There is a section of code below that will touch
168 * the first stack pointer without the protection of the pending_event_spinlock.
169 * This is OK, because we know that nobody else will be modifying
170 * the first pointer when we do this.
171 */
172static int signal_event(struct pending_event *ev)
173{
174 int rc = 0;
175 unsigned long flags;
176 int go = 1;
177 struct pending_event *ev1;
178 HvLpEvent_Rc hv_rc;
179
180 /* enqueue the event */
181 if (ev != NULL) {
182 ev->next = NULL;
183 spin_lock_irqsave(&pending_event_spinlock, flags);
184 if (pending_event_head == NULL)
185 pending_event_head = ev;
186 else {
187 go = 0;
188 pending_event_tail->next = ev;
189 }
190 pending_event_tail = ev;
191 spin_unlock_irqrestore(&pending_event_spinlock, flags);
192 }
193
194 /* send the event */
195 while (go) {
196 go = 0;
197
198 /* any DMA data to send beforehand? */
199 if (pending_event_head->dma_data_length > 0)
200 HvCallEvent_dmaToSp(pending_event_head->dma_data,
201 pending_event_head->remote_address,
202 pending_event_head->dma_data_length,
203 HvLpDma_Direction_LocalToRemote);
204
205 hv_rc = HvCallEvent_signalLpEvent(
206 &pending_event_head->event.hp_lp_event);
207 if (hv_rc != HvLpEvent_Rc_Good) {
208 printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
209 "failed with %d\n", (int)hv_rc);
210
211 spin_lock_irqsave(&pending_event_spinlock, flags);
212 ev1 = pending_event_head;
213 pending_event_head = pending_event_head->next;
214 if (pending_event_head != NULL)
215 go = 1;
216 spin_unlock_irqrestore(&pending_event_spinlock, flags);
217
218 if (ev1 == ev)
219 rc = -EIO;
220 else if (ev1->hdlr != NULL)
221 (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
222
223 spin_lock_irqsave(&pending_event_spinlock, flags);
224 free_pending_event(ev1);
225 spin_unlock_irqrestore(&pending_event_spinlock, flags);
226 }
227 }
228
229 return rc;
230}
231
232/*
233 * Allocate a new pending_event structure, and initialize it.
234 */
235static struct pending_event *new_pending_event(void)
236{
237 struct pending_event *ev = NULL;
238 HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
239 unsigned long flags;
240 struct HvLpEvent *hev;
241
242 spin_lock_irqsave(&pending_event_spinlock, flags);
243 if (pending_event_avail != NULL) {
244 ev = pending_event_avail;
245 pending_event_avail = pending_event_avail->next;
246 }
247 spin_unlock_irqrestore(&pending_event_spinlock, flags);
248 if (ev == NULL) {
249 ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
250 if (ev == NULL) {
251 printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
252 sizeof(struct pending_event));
253 return NULL;
254 }
255 }
256 memset(ev, 0, sizeof(struct pending_event));
257 hev = &ev->event.hp_lp_event;
258 hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT;
259 hev->xType = HvLpEvent_Type_MachineFac;
260 hev->xSourceLp = HvLpConfig_getLpIndex();
261 hev->xTargetLp = primary_lp;
262 hev->xSizeMinus1 = sizeof(ev->event) - 1;
263 hev->xRc = HvLpEvent_Rc_Good;
264 hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
265 HvLpEvent_Type_MachineFac);
266 hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
267 HvLpEvent_Type_MachineFac);
268
269 return ev;
270}
271
272static int __maybe_unused
273signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
274{
275 struct pending_event *ev = new_pending_event();
276 int rc;
277 struct vsp_rsp_data response;
278
279 if (ev == NULL)
280 return -ENOMEM;
281
282 init_completion(&response.com);
283 response.response = vsp_cmd;
284 ev->event.hp_lp_event.xSubtype = 6;
285 ev->event.hp_lp_event.x.xSubtypeData =
286 subtype_data('M', 'F', 'V', 'I');
287 ev->event.data.vsp_cmd.token = (u64)&response;
288 ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
289 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
290 ev->event.data.vsp_cmd.result_code = 0xFF;
291 ev->event.data.vsp_cmd.reserved = 0;
292 memcpy(&(ev->event.data.vsp_cmd.sub_data),
293 &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
294 mb();
295
296 rc = signal_event(ev);
297 if (rc == 0)
298 wait_for_completion(&response.com);
299 return rc;
300}
301
302
303/*
304 * Send a 12-byte CE message to the primary partition VSP object
305 */
306static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
307{
308 struct pending_event *ev = new_pending_event();
309
310 if (ev == NULL)
311 return -ENOMEM;
312
313 ev->event.hp_lp_event.xSubtype = 0;
314 ev->event.hp_lp_event.x.xSubtypeData =
315 subtype_data('M', 'F', 'C', 'E');
316 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
317 ev->event.data.ce_msg.completion = completion;
318 return signal_event(ev);
319}
320
321/*
322 * Send a 12-byte CE message (with no data) to the primary partition VSP object
323 */
324static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
325{
326 u8 ce_msg[12];
327
328 memset(ce_msg, 0, sizeof(ce_msg));
329 ce_msg[3] = ce_op;
330 return signal_ce_msg(ce_msg, completion);
331}
332
333/*
334 * Send a 12-byte CE message and DMA data to the primary partition VSP object
335 */
336static int dma_and_signal_ce_msg(char *ce_msg,
337 struct ce_msg_comp_data *completion, void *dma_data,
338 unsigned dma_data_length, unsigned remote_address)
339{
340 struct pending_event *ev = new_pending_event();
341
342 if (ev == NULL)
343 return -ENOMEM;
344
345 ev->event.hp_lp_event.xSubtype = 0;
346 ev->event.hp_lp_event.x.xSubtypeData =
347 subtype_data('M', 'F', 'C', 'E');
348 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
349 ev->event.data.ce_msg.completion = completion;
350 memcpy(ev->dma_data, dma_data, dma_data_length);
351 ev->dma_data_length = dma_data_length;
352 ev->remote_address = remote_address;
353 return signal_event(ev);
354}
355
356/*
357 * Initiate a nice (hopefully) shutdown of Linux. We simply are
358 * going to try and send the init process a SIGINT signal. If
359 * this fails (why?), we'll simply force it off in a not-so-nice
360 * manner.
361 */
362static int shutdown(void)
363{
364 int rc = kill_cad_pid(SIGINT, 1);
365
366 if (rc) {
367 printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
368 "hard shutdown commencing\n", rc);
369 mf_power_off();
370 } else
371 printk(KERN_INFO "mf.c: init has been successfully notified "
372 "to proceed with shutdown\n");
373 return rc;
374}
375
376/*
377 * The primary partition VSP object is sending us a new
378 * event flow. Handle it...
379 */
380static void handle_int(struct io_mf_lp_event *event)
381{
382 struct ce_msg_data *ce_msg_data;
383 struct ce_msg_data *pce_msg_data;
384 unsigned long flags;
385 struct pending_event *pev;
386
387 /* ack the interrupt */
388 event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
389 HvCallEvent_ackLpEvent(&event->hp_lp_event);
390
391 /* process interrupt */
392 switch (event->hp_lp_event.xSubtype) {
393 case 0: /* CE message */
394 ce_msg_data = &event->data.ce_msg;
395 switch (ce_msg_data->ce_msg[3]) {
396 case 0x5B: /* power control notification */
397 if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
398 printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
399 if (shutdown() == 0)
400 signal_ce_msg_simple(0xDB, NULL);
401 }
402 break;
403 case 0xC0: /* get time */
404 spin_lock_irqsave(&pending_event_spinlock, flags);
405 pev = pending_event_head;
406 if (pev != NULL)
407 pending_event_head = pending_event_head->next;
408 spin_unlock_irqrestore(&pending_event_spinlock, flags);
409 if (pev == NULL)
410 break;
411 pce_msg_data = &pev->event.data.ce_msg;
412 if (pce_msg_data->ce_msg[3] != 0x40)
413 break;
414 if (pce_msg_data->completion != NULL) {
415 ce_msg_comp_hdlr handler =
416 pce_msg_data->completion->handler;
417 void *token = pce_msg_data->completion->token;
418
419 if (handler != NULL)
420 (*handler)(token, ce_msg_data);
421 }
422 spin_lock_irqsave(&pending_event_spinlock, flags);
423 free_pending_event(pev);
424 spin_unlock_irqrestore(&pending_event_spinlock, flags);
425 /* send next waiting event */
426 if (pending_event_head != NULL)
427 signal_event(NULL);
428 break;
429 }
430 break;
431 case 1: /* IT sys shutdown */
432 printk(KERN_INFO "mf.c: Commencing system shutdown\n");
433 shutdown();
434 break;
435 }
436}
437
438/*
439 * The primary partition VSP object is acknowledging the receipt
440 * of a flow we sent to them. If there are other flows queued
441 * up, we must send another one now...
442 */
443static void handle_ack(struct io_mf_lp_event *event)
444{
445 unsigned long flags;
446 struct pending_event *two = NULL;
447 unsigned long free_it = 0;
448 struct ce_msg_data *ce_msg_data;
449 struct ce_msg_data *pce_msg_data;
450 struct vsp_rsp_data *rsp;
451
452 /* handle current event */
453 if (pending_event_head == NULL) {
454 printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
455 return;
456 }
457
458 switch (event->hp_lp_event.xSubtype) {
459 case 0: /* CE msg */
460 ce_msg_data = &event->data.ce_msg;
461 if (ce_msg_data->ce_msg[3] != 0x40) {
462 free_it = 1;
463 break;
464 }
465 if (ce_msg_data->ce_msg[2] == 0)
466 break;
467 free_it = 1;
468 pce_msg_data = &pending_event_head->event.data.ce_msg;
469 if (pce_msg_data->completion != NULL) {
470 ce_msg_comp_hdlr handler =
471 pce_msg_data->completion->handler;
472 void *token = pce_msg_data->completion->token;
473
474 if (handler != NULL)
475 (*handler)(token, ce_msg_data);
476 }
477 break;
478 case 4: /* allocate */
479 case 5: /* deallocate */
480 if (pending_event_head->hdlr != NULL)
481 (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
482 free_it = 1;
483 break;
484 case 6:
485 free_it = 1;
486 rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
487 if (rsp == NULL) {
488 printk(KERN_ERR "mf.c: no rsp\n");
489 break;
490 }
491 if (rsp->response != NULL)
492 memcpy(rsp->response, &event->data.vsp_cmd,
493 sizeof(event->data.vsp_cmd));
494 complete(&rsp->com);
495 break;
496 }
497
498 /* remove from queue */
499 spin_lock_irqsave(&pending_event_spinlock, flags);
500 if ((pending_event_head != NULL) && (free_it == 1)) {
501 struct pending_event *oldHead = pending_event_head;
502
503 pending_event_head = pending_event_head->next;
504 two = pending_event_head;
505 free_pending_event(oldHead);
506 }
507 spin_unlock_irqrestore(&pending_event_spinlock, flags);
508
509 /* send next waiting event */
510 if (two != NULL)
511 signal_event(NULL);
512}
513
514/*
515 * This is the generic event handler we are registering with
516 * the Hypervisor. Ensure the flows are for us, and then
517 * parse it enough to know if it is an interrupt or an
518 * acknowledge.
519 */
520static void hv_handler(struct HvLpEvent *event)
521{
522 if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
523 if (hvlpevent_is_ack(event))
524 handle_ack((struct io_mf_lp_event *)event);
525 else
526 handle_int((struct io_mf_lp_event *)event);
527 } else
528 printk(KERN_ERR "mf.c: alien event received\n");
529}
530
531/*
532 * Global kernel interface to allocate and seed events into the
533 * Hypervisor.
534 */
535void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
536 unsigned size, unsigned count, MFCompleteHandler hdlr,
537 void *user_token)
538{
539 struct pending_event *ev = new_pending_event();
540 int rc;
541
542 if (ev == NULL) {
543 rc = -ENOMEM;
544 } else {
545 ev->event.hp_lp_event.xSubtype = 4;
546 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
547 ev->event.hp_lp_event.x.xSubtypeData =
548 subtype_data('M', 'F', 'M', 'A');
549 ev->event.data.alloc.target_lp = target_lp;
550 ev->event.data.alloc.type = type;
551 ev->event.data.alloc.size = size;
552 ev->event.data.alloc.count = count;
553 ev->hdlr = hdlr;
554 rc = signal_event(ev);
555 }
556 if ((rc != 0) && (hdlr != NULL))
557 (*hdlr)(user_token, rc);
558}
559EXPORT_SYMBOL(mf_allocate_lp_events);
560
561/*
562 * Global kernel interface to unseed and deallocate events already in
563 * Hypervisor.
564 */
565void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
566 unsigned count, MFCompleteHandler hdlr, void *user_token)
567{
568 struct pending_event *ev = new_pending_event();
569 int rc;
570
571 if (ev == NULL)
572 rc = -ENOMEM;
573 else {
574 ev->event.hp_lp_event.xSubtype = 5;
575 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
576 ev->event.hp_lp_event.x.xSubtypeData =
577 subtype_data('M', 'F', 'M', 'D');
578 ev->event.data.alloc.target_lp = target_lp;
579 ev->event.data.alloc.type = type;
580 ev->event.data.alloc.count = count;
581 ev->hdlr = hdlr;
582 rc = signal_event(ev);
583 }
584 if ((rc != 0) && (hdlr != NULL))
585 (*hdlr)(user_token, rc);
586}
587EXPORT_SYMBOL(mf_deallocate_lp_events);
588
589/*
590 * Global kernel interface to tell the VSP object in the primary
591 * partition to power this partition off.
592 */
593void mf_power_off(void)
594{
595 printk(KERN_INFO "mf.c: Down it goes...\n");
596 signal_ce_msg_simple(0x4d, NULL);
597 for (;;)
598 ;
599}
600
601/*
602 * Global kernel interface to tell the VSP object in the primary
603 * partition to reboot this partition.
604 */
605void mf_reboot(char *cmd)
606{
607 printk(KERN_INFO "mf.c: Preparing to bounce...\n");
608 signal_ce_msg_simple(0x4e, NULL);
609 for (;;)
610 ;
611}
612
613/*
614 * Display a single word SRC onto the VSP control panel.
615 */
616void mf_display_src(u32 word)
617{
618 u8 ce[12];
619
620 memset(ce, 0, sizeof(ce));
621 ce[3] = 0x4a;
622 ce[7] = 0x01;
623 ce[8] = word >> 24;
624 ce[9] = word >> 16;
625 ce[10] = word >> 8;
626 ce[11] = word;
627 signal_ce_msg(ce, NULL);
628}
629
630/*
631 * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
632 */
633static __init void mf_display_progress_src(u16 value)
634{
635 u8 ce[12];
636 u8 src[72];
637
638 memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
639 memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
640 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
641 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
642 "\x00\x00\x00\x00PROGxxxx ",
643 72);
644 src[6] = value >> 8;
645 src[7] = value & 255;
646 src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
647 src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
648 src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
649 src[47] = "0123456789ABCDEF"[value & 15];
650 dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
651}
652
653/*
654 * Clear the VSP control panel. Used to "erase" an SRC that was
655 * previously displayed.
656 */
657static void mf_clear_src(void)
658{
659 signal_ce_msg_simple(0x4b, NULL);
660}
661
662void __init mf_display_progress(u16 value)
663{
664 if (!mf_initialized)
665 return;
666
667 if (0xFFFF == value)
668 mf_clear_src();
669 else
670 mf_display_progress_src(value);
671}
672
673/*
674 * Initialization code here.
675 */
676void __init mf_init(void)
677{
678 int i;
679
680 spin_lock_init(&pending_event_spinlock);
681
682 for (i = 0; i < PENDING_EVENT_PREALLOC_LEN; i++)
683 free_pending_event(&pending_event_prealloc[i]);
684
685 HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
686
687 /* virtual continue ack */
688 signal_ce_msg_simple(0x57, NULL);
689
690 mf_initialized = 1;
691 mb();
692
693 printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
694 "initialized\n");
695}
696
697struct rtc_time_data {
698 struct completion com;
699 struct ce_msg_data ce_msg;
700 int rc;
701};
702
703static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
704{
705 struct rtc_time_data *rtc = token;
706
707 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
708 rtc->rc = 0;
709 complete(&rtc->com);
710}
711
712static int mf_set_rtc(struct rtc_time *tm)
713{
714 char ce_time[12];
715 u8 day, mon, hour, min, sec, y1, y2;
716 unsigned year;
717
718 year = 1900 + tm->tm_year;
719 y1 = year / 100;
720 y2 = year % 100;
721
722 sec = tm->tm_sec;
723 min = tm->tm_min;
724 hour = tm->tm_hour;
725 day = tm->tm_mday;
726 mon = tm->tm_mon + 1;
727
728 sec = bin2bcd(sec);
729 min = bin2bcd(min);
730 hour = bin2bcd(hour);
731 mon = bin2bcd(mon);
732 day = bin2bcd(day);
733 y1 = bin2bcd(y1);
734 y2 = bin2bcd(y2);
735
736 memset(ce_time, 0, sizeof(ce_time));
737 ce_time[3] = 0x41;
738 ce_time[4] = y1;
739 ce_time[5] = y2;
740 ce_time[6] = sec;
741 ce_time[7] = min;
742 ce_time[8] = hour;
743 ce_time[10] = day;
744 ce_time[11] = mon;
745
746 return signal_ce_msg(ce_time, NULL);
747}
748
749static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
750{
751 tm->tm_wday = 0;
752 tm->tm_yday = 0;
753 tm->tm_isdst = 0;
754 if (rc) {
755 tm->tm_sec = 0;
756 tm->tm_min = 0;
757 tm->tm_hour = 0;
758 tm->tm_mday = 15;
759 tm->tm_mon = 5;
760 tm->tm_year = 52;
761 return rc;
762 }
763
764 if ((ce_msg[2] == 0xa9) ||
765 (ce_msg[2] == 0xaf)) {
766 /* TOD clock is not set */
767 tm->tm_sec = 1;
768 tm->tm_min = 1;
769 tm->tm_hour = 1;
770 tm->tm_mday = 10;
771 tm->tm_mon = 8;
772 tm->tm_year = 71;
773 mf_set_rtc(tm);
774 }
775 {
776 u8 year = ce_msg[5];
777 u8 sec = ce_msg[6];
778 u8 min = ce_msg[7];
779 u8 hour = ce_msg[8];
780 u8 day = ce_msg[10];
781 u8 mon = ce_msg[11];
782
783 sec = bcd2bin(sec);
784 min = bcd2bin(min);
785 hour = bcd2bin(hour);
786 day = bcd2bin(day);
787 mon = bcd2bin(mon);
788 year = bcd2bin(year);
789
790 if (year <= 69)
791 year += 100;
792
793 tm->tm_sec = sec;
794 tm->tm_min = min;
795 tm->tm_hour = hour;
796 tm->tm_mday = day;
797 tm->tm_mon = mon;
798 tm->tm_year = year;
799 }
800
801 return 0;
802}
803
804static int mf_get_rtc(struct rtc_time *tm)
805{
806 struct ce_msg_comp_data ce_complete;
807 struct rtc_time_data rtc_data;
808 int rc;
809
810 memset(&ce_complete, 0, sizeof(ce_complete));
811 memset(&rtc_data, 0, sizeof(rtc_data));
812 init_completion(&rtc_data.com);
813 ce_complete.handler = &get_rtc_time_complete;
814 ce_complete.token = &rtc_data;
815 rc = signal_ce_msg_simple(0x40, &ce_complete);
816 if (rc)
817 return rc;
818 wait_for_completion(&rtc_data.com);
819 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
820}
821
822struct boot_rtc_time_data {
823 int busy;
824 struct ce_msg_data ce_msg;
825 int rc;
826};
827
828static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
829{
830 struct boot_rtc_time_data *rtc = token;
831
832 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
833 rtc->rc = 0;
834 rtc->busy = 0;
835}
836
837static int mf_get_boot_rtc(struct rtc_time *tm)
838{
839 struct ce_msg_comp_data ce_complete;
840 struct boot_rtc_time_data rtc_data;
841 int rc;
842
843 memset(&ce_complete, 0, sizeof(ce_complete));
844 memset(&rtc_data, 0, sizeof(rtc_data));
845 rtc_data.busy = 1;
846 ce_complete.handler = &get_boot_rtc_time_complete;
847 ce_complete.token = &rtc_data;
848 rc = signal_ce_msg_simple(0x40, &ce_complete);
849 if (rc)
850 return rc;
851 /* We need to poll here as we are not yet taking interrupts */
852 while (rtc_data.busy) {
853 if (hvlpevent_is_pending())
854 process_hvlpevents();
855 }
856 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
857}
858
859#ifdef CONFIG_PROC_FS
860static int mf_cmdline_proc_show(struct seq_file *m, void *v)
861{
862 char *page, *p;
863 struct vsp_cmd_data vsp_cmd;
864 int rc;
865 dma_addr_t dma_addr;
866
867 /* The HV appears to return no more than 256 bytes of command line */
868 page = kmalloc(256, GFP_KERNEL);
869 if (!page)
870 return -ENOMEM;
871
872 dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE);
873 if (dma_addr == DMA_ERROR_CODE) {
874 kfree(page);
875 return -ENOMEM;
876 }
877 memset(page, 0, 256);
878 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
879 vsp_cmd.cmd = 33;
880 vsp_cmd.sub_data.kern.token = dma_addr;
881 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
882 vsp_cmd.sub_data.kern.side = (u64)m->private;
883 vsp_cmd.sub_data.kern.length = 256;
884 mb();
885 rc = signal_vsp_instruction(&vsp_cmd);
886 iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE);
887 if (rc) {
888 kfree(page);
889 return rc;
890 }
891 if (vsp_cmd.result_code != 0) {
892 kfree(page);
893 return -ENOMEM;
894 }
895 p = page;
896 while (p - page < 256) {
897 if (*p == '\0' || *p == '\n') {
898 *p = '\n';
899 break;
900 }
901 p++;
902
903 }
904 seq_write(m, page, p - page);
905 kfree(page);
906 return 0;
907}
908
909static int mf_cmdline_proc_open(struct inode *inode, struct file *file)
910{
911 return single_open(file, mf_cmdline_proc_show, PDE(inode)->data);
912}
913
914#if 0
915static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
916{
917 struct vsp_cmd_data vsp_cmd;
918 int rc;
919 int len = *size;
920 dma_addr_t dma_addr;
921
922 dma_addr = iseries_hv_map(buffer, len, DMA_FROM_DEVICE);
923 memset(buffer, 0, len);
924 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
925 vsp_cmd.cmd = 32;
926 vsp_cmd.sub_data.kern.token = dma_addr;
927 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
928 vsp_cmd.sub_data.kern.side = side;
929 vsp_cmd.sub_data.kern.offset = offset;
930 vsp_cmd.sub_data.kern.length = len;
931 mb();
932 rc = signal_vsp_instruction(&vsp_cmd);
933 if (rc == 0) {
934 if (vsp_cmd.result_code == 0)
935 *size = vsp_cmd.sub_data.length_out;
936 else
937 rc = -ENOMEM;
938 }
939
940 iseries_hv_unmap(dma_addr, len, DMA_FROM_DEVICE);
941
942 return rc;
943}
944
945static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
946 int count, int *eof, void *data)
947{
948 int sizeToGet = count;
949
950 if (!capable(CAP_SYS_ADMIN))
951 return -EACCES;
952
953 if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) {
954 if (sizeToGet != 0) {
955 *start = page + off;
956 return sizeToGet;
957 }
958 *eof = 1;
959 return 0;
960 }
961 *eof = 1;
962 return 0;
963}
964#endif
965
966static int mf_side_proc_show(struct seq_file *m, void *v)
967{
968 char mf_current_side = ' ';
969 struct vsp_cmd_data vsp_cmd;
970
971 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
972 vsp_cmd.cmd = 2;
973 vsp_cmd.sub_data.ipl_type = 0;
974 mb();
975
976 if (signal_vsp_instruction(&vsp_cmd) == 0) {
977 if (vsp_cmd.result_code == 0) {
978 switch (vsp_cmd.sub_data.ipl_type) {
979 case 0: mf_current_side = 'A';
980 break;
981 case 1: mf_current_side = 'B';
982 break;
983 case 2: mf_current_side = 'C';
984 break;
985 default: mf_current_side = 'D';
986 break;
987 }
988 }
989 }
990
991 seq_printf(m, "%c\n", mf_current_side);
992 return 0;
993}
994
995static int mf_side_proc_open(struct inode *inode, struct file *file)
996{
997 return single_open(file, mf_side_proc_show, NULL);
998}
999
1000static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer,
1001 size_t count, loff_t *pos)
1002{
1003 char side;
1004 u64 newSide;
1005 struct vsp_cmd_data vsp_cmd;
1006
1007 if (!capable(CAP_SYS_ADMIN))
1008 return -EACCES;
1009
1010 if (count == 0)
1011 return 0;
1012
1013 if (get_user(side, buffer))
1014 return -EFAULT;
1015
1016 switch (side) {
1017 case 'A': newSide = 0;
1018 break;
1019 case 'B': newSide = 1;
1020 break;
1021 case 'C': newSide = 2;
1022 break;
1023 case 'D': newSide = 3;
1024 break;
1025 default:
1026 printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
1027 return -EINVAL;
1028 }
1029
1030 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1031 vsp_cmd.sub_data.ipl_type = newSide;
1032 vsp_cmd.cmd = 10;
1033
1034 (void)signal_vsp_instruction(&vsp_cmd);
1035
1036 return count;
1037}
1038
1039static const struct file_operations mf_side_proc_fops = {
1040 .owner = THIS_MODULE,
1041 .open = mf_side_proc_open,
1042 .read = seq_read,
1043 .llseek = seq_lseek,
1044 .release = single_release,
1045 .write = mf_side_proc_write,
1046};
1047
1048static int mf_src_proc_show(struct seq_file *m, void *v)
1049{
1050 return 0;
1051}
1052
1053static int mf_src_proc_open(struct inode *inode, struct file *file)
1054{
1055 return single_open(file, mf_src_proc_show, NULL);
1056}
1057
1058static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer,
1059 size_t count, loff_t *pos)
1060{
1061 char stkbuf[10];
1062
1063 if (!capable(CAP_SYS_ADMIN))
1064 return -EACCES;
1065
1066 if ((count < 4) && (count != 1)) {
1067 printk(KERN_ERR "mf_proc: invalid src\n");
1068 return -EINVAL;
1069 }
1070
1071 if (count > (sizeof(stkbuf) - 1))
1072 count = sizeof(stkbuf) - 1;
1073 if (copy_from_user(stkbuf, buffer, count))
1074 return -EFAULT;
1075
1076 if ((count == 1) && (*stkbuf == '\0'))
1077 mf_clear_src();
1078 else
1079 mf_display_src(*(u32 *)stkbuf);
1080
1081 return count;
1082}
1083
1084static const struct file_operations mf_src_proc_fops = {
1085 .owner = THIS_MODULE,
1086 .open = mf_src_proc_open,
1087 .read = seq_read,
1088 .llseek = seq_lseek,
1089 .release = single_release,
1090 .write = mf_src_proc_write,
1091};
1092
1093static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer,
1094 size_t count, loff_t *pos)
1095{
1096 void *data = PDE(file->f_path.dentry->d_inode)->data;
1097 struct vsp_cmd_data vsp_cmd;
1098 dma_addr_t dma_addr;
1099 char *page;
1100 int ret = -EACCES;
1101
1102 if (!capable(CAP_SYS_ADMIN))
1103 goto out;
1104
1105 dma_addr = 0;
1106 page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1107 ret = -ENOMEM;
1108 if (page == NULL)
1109 goto out;
1110
1111 ret = -EFAULT;
1112 if (copy_from_user(page, buffer, count))
1113 goto out_free;
1114
1115 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1116 vsp_cmd.cmd = 31;
1117 vsp_cmd.sub_data.kern.token = dma_addr;
1118 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1119 vsp_cmd.sub_data.kern.side = (u64)data;
1120 vsp_cmd.sub_data.kern.length = count;
1121 mb();
1122 (void)signal_vsp_instruction(&vsp_cmd);
1123 ret = count;
1124
1125out_free:
1126 iseries_hv_free(count, page, dma_addr);
1127out:
1128 return ret;
1129}
1130
1131static const struct file_operations mf_cmdline_proc_fops = {
1132 .owner = THIS_MODULE,
1133 .open = mf_cmdline_proc_open,
1134 .read = seq_read,
1135 .llseek = seq_lseek,
1136 .release = single_release,
1137 .write = mf_cmdline_proc_write,
1138};
1139
1140static ssize_t proc_mf_change_vmlinux(struct file *file,
1141 const char __user *buf,
1142 size_t count, loff_t *ppos)
1143{
1144 struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode);
1145 ssize_t rc;
1146 dma_addr_t dma_addr;
1147 char *page;
1148 struct vsp_cmd_data vsp_cmd;
1149
1150 rc = -EACCES;
1151 if (!capable(CAP_SYS_ADMIN))
1152 goto out;
1153
1154 dma_addr = 0;
1155 page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC);
1156 rc = -ENOMEM;
1157 if (page == NULL) {
1158 printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
1159 goto out;
1160 }
1161 rc = -EFAULT;
1162 if (copy_from_user(page, buf, count))
1163 goto out_free;
1164
1165 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1166 vsp_cmd.cmd = 30;
1167 vsp_cmd.sub_data.kern.token = dma_addr;
1168 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1169 vsp_cmd.sub_data.kern.side = (u64)dp->data;
1170 vsp_cmd.sub_data.kern.offset = *ppos;
1171 vsp_cmd.sub_data.kern.length = count;
1172 mb();
1173 rc = signal_vsp_instruction(&vsp_cmd);
1174 if (rc)
1175 goto out_free;
1176 rc = -ENOMEM;
1177 if (vsp_cmd.result_code != 0)
1178 goto out_free;
1179
1180 *ppos += count;
1181 rc = count;
1182out_free:
1183 iseries_hv_free(count, page, dma_addr);
1184out:
1185 return rc;
1186}
1187
1188static const struct file_operations proc_vmlinux_operations = {
1189 .write = proc_mf_change_vmlinux,
1190 .llseek = default_llseek,
1191};
1192
1193static int __init mf_proc_init(void)
1194{
1195 struct proc_dir_entry *mf_proc_root;
1196 struct proc_dir_entry *ent;
1197 struct proc_dir_entry *mf;
1198 char name[2];
1199 int i;
1200
1201 if (!firmware_has_feature(FW_FEATURE_ISERIES))
1202 return 0;
1203
1204 mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1205 if (!mf_proc_root)
1206 return 1;
1207
1208 name[1] = '\0';
1209 for (i = 0; i < 4; i++) {
1210 name[0] = 'A' + i;
1211 mf = proc_mkdir(name, mf_proc_root);
1212 if (!mf)
1213 return 1;
1214
1215 ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf,
1216 &mf_cmdline_proc_fops, (void *)(long)i);
1217 if (!ent)
1218 return 1;
1219
1220 if (i == 3) /* no vmlinux entry for 'D' */
1221 continue;
1222
1223 ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf,
1224 &proc_vmlinux_operations,
1225 (void *)(long)i);
1226 if (!ent)
1227 return 1;
1228 }
1229
1230 ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1231 &mf_side_proc_fops);
1232 if (!ent)
1233 return 1;
1234
1235 ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root,
1236 &mf_src_proc_fops);
1237 if (!ent)
1238 return 1;
1239
1240 return 0;
1241}
1242
1243__initcall(mf_proc_init);
1244
1245#endif /* CONFIG_PROC_FS */
1246
1247/*
1248 * Get the RTC from the virtual service processor
1249 * This requires flowing LpEvents to the primary partition
1250 */
1251void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
1252{
1253 mf_get_rtc(rtc_tm);
1254 rtc_tm->tm_mon--;
1255}
1256
1257/*
1258 * Set the RTC in the virtual service processor
1259 * This requires flowing LpEvents to the primary partition
1260 */
1261int iSeries_set_rtc_time(struct rtc_time *tm)
1262{
1263 mf_set_rtc(tm);
1264 return 0;
1265}
1266
1267unsigned long iSeries_get_boot_time(void)
1268{
1269 struct rtc_time tm;
1270
1271 mf_get_boot_rtc(&tm);
1272 return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday,
1273 tm.tm_hour, tm.tm_min, tm.tm_sec);
1274}
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
new file mode 100644
index 00000000000..2c6ff0fdac9
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -0,0 +1,26 @@
1/*
2 * This file contains miscellaneous low-level functions.
3 * Copyright (C) 1995-2005 IBM Corp
4 *
5 * Largely rewritten by Cort Dougan (cort@cs.nmt.edu)
6 * and Paul Mackerras.
7 * Adapted for iSeries by Mike Corrigan (mikejc@us.ibm.com)
8 * PPC64 updates by Dave Engebretsen (engebret@us.ibm.com)
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
14 */
15
16#include <asm/processor.h>
17#include <asm/asm-offsets.h>
18#include <asm/ppc_asm.h>
19
20 .text
21
22/* Handle pending interrupts in interrupt context */
23_GLOBAL(iseries_handle_interrupts)
24 li r0,0x5555
25 sc
26 blr
diff --git a/arch/powerpc/platforms/iseries/naca.h b/arch/powerpc/platforms/iseries/naca.h
new file mode 100644
index 00000000000..f01708e1286
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/naca.h
@@ -0,0 +1,24 @@
1#ifndef _PLATFORMS_ISERIES_NACA_H
2#define _PLATFORMS_ISERIES_NACA_H
3
4/*
5 * c 2001 PPC 64 Team, IBM Corp
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
11 */
12
13#include <asm/types.h>
14
15struct naca_struct {
16 /* Kernel only data - undefined for user space */
17 const void *xItVpdAreas; /* VPD Data 0x00 */
18 void *xRamDisk; /* iSeries ramdisk 0x08 */
19 u64 xRamDiskSize; /* In pages 0x10 */
20};
21
22extern struct naca_struct naca;
23
24#endif /* _PLATFORMS_ISERIES_NACA_H */
diff --git a/arch/powerpc/platforms/iseries/pci.c b/arch/powerpc/platforms/iseries/pci.c
new file mode 100644
index 00000000000..ab3962b0d24
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/pci.c
@@ -0,0 +1,920 @@
1/*
2 * Copyright (C) 2001 Allan Trautman, IBM Corporation
3 * Copyright (C) 2005,2007 Stephen Rothwell, IBM Corp
4 *
5 * iSeries specific routines for PCI.
6 *
7 * Based on code from pci.c and iSeries_pci.c 32bit
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#undef DEBUG
25
26#include <linux/jiffies.h>
27#include <linux/kernel.h>
28#include <linux/list.h>
29#include <linux/string.h>
30#include <linux/slab.h>
31#include <linux/init.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/of.h>
35#include <linux/ratelimit.h>
36
37#include <asm/types.h>
38#include <asm/io.h>
39#include <asm/irq.h>
40#include <asm/prom.h>
41#include <asm/machdep.h>
42#include <asm/pci-bridge.h>
43#include <asm/iommu.h>
44#include <asm/abs_addr.h>
45#include <asm/firmware.h>
46
47#include <asm/iseries/hv_types.h>
48#include <asm/iseries/hv_call_xm.h>
49#include <asm/iseries/mf.h>
50#include <asm/iseries/iommu.h>
51
52#include <asm/ppc-pci.h>
53
54#include "irq.h"
55#include "pci.h"
56#include "call_pci.h"
57
58#define PCI_RETRY_MAX 3
59static int limit_pci_retries = 1; /* Set Retry Error on. */
60
61/*
62 * Table defines
63 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
64 */
65#define IOMM_TABLE_MAX_ENTRIES 1024
66#define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
67#define BASE_IO_MEMORY 0xE000000000000000UL
68#define END_IO_MEMORY 0xEFFFFFFFFFFFFFFFUL
69
70static unsigned long max_io_memory = BASE_IO_MEMORY;
71static long current_iomm_table_entry;
72
73/*
74 * Lookup Tables.
75 */
76static struct device_node *iomm_table[IOMM_TABLE_MAX_ENTRIES];
77static u64 ds_addr_table[IOMM_TABLE_MAX_ENTRIES];
78
79static DEFINE_SPINLOCK(iomm_table_lock);
80
81/*
82 * Generate a Direct Select Address for the Hypervisor
83 */
84static inline u64 iseries_ds_addr(struct device_node *node)
85{
86 struct pci_dn *pdn = PCI_DN(node);
87 const u32 *sbp = of_get_property(node, "linux,subbus", NULL);
88
89 return ((u64)pdn->busno << 48) + ((u64)(sbp ? *sbp : 0) << 40)
90 + ((u64)0x10 << 32);
91}
92
93/*
94 * Size of Bus VPD data
95 */
96#define BUS_VPDSIZE 1024
97
98/*
99 * Bus Vpd Tags
100 */
101#define VPD_END_OF_AREA 0x79
102#define VPD_ID_STRING 0x82
103#define VPD_VENDOR_AREA 0x84
104
105/*
106 * Mfg Area Tags
107 */
108#define VPD_FRU_FRAME_ID 0x4649 /* "FI" */
109#define VPD_SLOT_MAP_FORMAT 0x4D46 /* "MF" */
110#define VPD_SLOT_MAP 0x534D /* "SM" */
111
112/*
113 * Structures of the areas
114 */
115struct mfg_vpd_area {
116 u16 tag;
117 u8 length;
118 u8 data1;
119 u8 data2;
120};
121#define MFG_ENTRY_SIZE 3
122
123struct slot_map {
124 u8 agent;
125 u8 secondary_agent;
126 u8 phb;
127 char card_location[3];
128 char parms[8];
129 char reserved[2];
130};
131#define SLOT_ENTRY_SIZE 16
132
133/*
134 * Parse the Slot Area
135 */
136static void __init iseries_parse_slot_area(struct slot_map *map, int len,
137 HvAgentId agent, u8 *phb, char card[4])
138{
139 /*
140 * Parse Slot label until we find the one requested
141 */
142 while (len > 0) {
143 if (map->agent == agent) {
144 /*
145 * If Phb wasn't found, grab the entry first one found.
146 */
147 if (*phb == 0xff)
148 *phb = map->phb;
149 /* Found it, extract the data. */
150 if (map->phb == *phb) {
151 memcpy(card, &map->card_location, 3);
152 card[3] = 0;
153 break;
154 }
155 }
156 /* Point to the next Slot */
157 map = (struct slot_map *)((char *)map + SLOT_ENTRY_SIZE);
158 len -= SLOT_ENTRY_SIZE;
159 }
160}
161
162/*
163 * Parse the Mfg Area
164 */
165static void __init iseries_parse_mfg_area(struct mfg_vpd_area *area, int len,
166 HvAgentId agent, u8 *phb, u8 *frame, char card[4])
167{
168 u16 slot_map_fmt = 0;
169
170 /* Parse Mfg Data */
171 while (len > 0) {
172 int mfg_tag_len = area->length;
173 /* Frame ID (FI 4649020310 ) */
174 if (area->tag == VPD_FRU_FRAME_ID)
175 *frame = area->data1;
176 /* Slot Map Format (MF 4D46020004 ) */
177 else if (area->tag == VPD_SLOT_MAP_FORMAT)
178 slot_map_fmt = (area->data1 * 256)
179 + area->data2;
180 /* Slot Map (SM 534D90 */
181 else if (area->tag == VPD_SLOT_MAP) {
182 struct slot_map *slot_map;
183
184 if (slot_map_fmt == 0x1004)
185 slot_map = (struct slot_map *)((char *)area
186 + MFG_ENTRY_SIZE + 1);
187 else
188 slot_map = (struct slot_map *)((char *)area
189 + MFG_ENTRY_SIZE);
190 iseries_parse_slot_area(slot_map, mfg_tag_len,
191 agent, phb, card);
192 }
193 /*
194 * Point to the next Mfg Area
195 * Use defined size, sizeof give wrong answer
196 */
197 area = (struct mfg_vpd_area *)((char *)area + mfg_tag_len
198 + MFG_ENTRY_SIZE);
199 len -= (mfg_tag_len + MFG_ENTRY_SIZE);
200 }
201}
202
203/*
204 * Look for "BUS".. Data is not Null terminated.
205 * PHBID of 0xFF indicates PHB was not found in VPD Data.
206 */
207static u8 __init iseries_parse_phbid(u8 *area, int len)
208{
209 while (len > 0) {
210 if ((*area == 'B') && (*(area + 1) == 'U')
211 && (*(area + 2) == 'S')) {
212 area += 3;
213 while (*area == ' ')
214 area++;
215 return *area & 0x0F;
216 }
217 area++;
218 len--;
219 }
220 return 0xff;
221}
222
223/*
224 * Parse out the VPD Areas
225 */
226static void __init iseries_parse_vpd(u8 *data, int data_len,
227 HvAgentId agent, u8 *frame, char card[4])
228{
229 u8 phb = 0xff;
230
231 while (data_len > 0) {
232 int len;
233 u8 tag = *data;
234
235 if (tag == VPD_END_OF_AREA)
236 break;
237 len = *(data + 1) + (*(data + 2) * 256);
238 data += 3;
239 data_len -= 3;
240 if (tag == VPD_ID_STRING)
241 phb = iseries_parse_phbid(data, len);
242 else if (tag == VPD_VENDOR_AREA)
243 iseries_parse_mfg_area((struct mfg_vpd_area *)data, len,
244 agent, &phb, frame, card);
245 /* Point to next Area. */
246 data += len;
247 data_len -= len;
248 }
249}
250
251static int __init iseries_get_location_code(u16 bus, HvAgentId agent,
252 u8 *frame, char card[4])
253{
254 int status = 0;
255 int bus_vpd_len = 0;
256 u8 *bus_vpd = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
257
258 if (bus_vpd == NULL) {
259 printk("PCI: Bus VPD Buffer allocation failure.\n");
260 return 0;
261 }
262 bus_vpd_len = HvCallPci_getBusVpd(bus, iseries_hv_addr(bus_vpd),
263 BUS_VPDSIZE);
264 if (bus_vpd_len == 0) {
265 printk("PCI: Bus VPD Buffer zero length.\n");
266 goto out_free;
267 }
268 /* printk("PCI: bus_vpd: %p, %d\n",bus_vpd, bus_vpd_len); */
269 /* Make sure this is what I think it is */
270 if (*bus_vpd != VPD_ID_STRING) {
271 printk("PCI: Bus VPD Buffer missing starting tag.\n");
272 goto out_free;
273 }
274 iseries_parse_vpd(bus_vpd, bus_vpd_len, agent, frame, card);
275 status = 1;
276out_free:
277 kfree(bus_vpd);
278 return status;
279}
280
281/*
282 * Prints the device information.
283 * - Pass in pci_dev* pointer to the device.
284 * - Pass in the device count
285 *
286 * Format:
287 * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet
288 * controller
289 */
290static void __init iseries_device_information(struct pci_dev *pdev,
291 u16 bus, HvSubBusNumber subbus)
292{
293 u8 frame = 0;
294 char card[4];
295 HvAgentId agent;
296
297 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
298 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
299
300 if (iseries_get_location_code(bus, agent, &frame, card)) {
301 printk(KERN_INFO "PCI: %s, Vendor %04X Frame%3d, "
302 "Card %4s 0x%04X\n", pci_name(pdev), pdev->vendor,
303 frame, card, (int)(pdev->class >> 8));
304 }
305}
306
307/*
308 * iomm_table_allocate_entry
309 *
310 * Adds pci_dev entry in address translation table
311 *
312 * - Allocates the number of entries required in table base on BAR
313 * size.
314 * - Allocates starting at BASE_IO_MEMORY and increases.
315 * - The size is round up to be a multiple of entry size.
316 * - CurrentIndex is incremented to keep track of the last entry.
317 * - Builds the resource entry for allocated BARs.
318 */
319static void __init iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
320{
321 struct resource *bar_res = &dev->resource[bar_num];
322 long bar_size = pci_resource_len(dev, bar_num);
323 struct device_node *dn = pci_device_to_OF_node(dev);
324
325 /*
326 * No space to allocate, quick exit, skip Allocation.
327 */
328 if (bar_size == 0)
329 return;
330 /*
331 * Set Resource values.
332 */
333 spin_lock(&iomm_table_lock);
334 bar_res->start = BASE_IO_MEMORY +
335 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
336 bar_res->end = bar_res->start + bar_size - 1;
337 /*
338 * Allocate the number of table entries needed for BAR.
339 */
340 while (bar_size > 0 ) {
341 iomm_table[current_iomm_table_entry] = dn;
342 ds_addr_table[current_iomm_table_entry] =
343 iseries_ds_addr(dn) | (bar_num << 24);
344 bar_size -= IOMM_TABLE_ENTRY_SIZE;
345 ++current_iomm_table_entry;
346 }
347 max_io_memory = BASE_IO_MEMORY +
348 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
349 spin_unlock(&iomm_table_lock);
350}
351
352/*
353 * allocate_device_bars
354 *
355 * - Allocates ALL pci_dev BAR's and updates the resources with the
356 * BAR value. BARS with zero length will have the resources
357 * The HvCallPci_getBarParms is used to get the size of the BAR
358 * space. It calls iomm_table_allocate_entry to allocate
359 * each entry.
360 * - Loops through The Bar resources(0 - 5) including the ROM
361 * is resource(6).
362 */
363static void __init allocate_device_bars(struct pci_dev *dev)
364{
365 int bar_num;
366
367 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num)
368 iomm_table_allocate_entry(dev, bar_num);
369}
370
371/*
372 * Log error information to system console.
373 * Filter out the device not there errors.
374 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
375 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
376 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
377 */
378static void pci_log_error(char *error, int bus, int subbus,
379 int agent, int hv_res)
380{
381 if (hv_res == 0x0302)
382 return;
383 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
384 error, bus, subbus, agent, hv_res);
385}
386
387/*
388 * Look down the chain to find the matching Device Device
389 */
390static struct device_node *find_device_node(int bus, int devfn)
391{
392 struct device_node *node;
393
394 for (node = NULL; (node = of_find_all_nodes(node)); ) {
395 struct pci_dn *pdn = PCI_DN(node);
396
397 if (pdn && (bus == pdn->busno) && (devfn == pdn->devfn))
398 return node;
399 }
400 return NULL;
401}
402
403/*
404 * iSeries_pcibios_fixup_resources
405 *
406 * Fixes up all resources for devices
407 */
408void __init iSeries_pcibios_fixup_resources(struct pci_dev *pdev)
409{
410 const u32 *agent;
411 const u32 *sub_bus;
412 unsigned char bus = pdev->bus->number;
413 struct device_node *node;
414 int i;
415
416 node = pci_device_to_OF_node(pdev);
417 pr_debug("PCI: iSeries %s, pdev %p, node %p\n",
418 pci_name(pdev), pdev, node);
419 if (!node) {
420 printk("PCI: %s disabled, device tree entry not found !\n",
421 pci_name(pdev));
422 for (i = 0; i <= PCI_ROM_RESOURCE; i++)
423 pdev->resource[i].flags = 0;
424 return;
425 }
426 sub_bus = of_get_property(node, "linux,subbus", NULL);
427 agent = of_get_property(node, "linux,agent-id", NULL);
428 if (agent && sub_bus) {
429 u8 irq = iSeries_allocate_IRQ(bus, 0, *sub_bus);
430 int err;
431
432 err = HvCallXm_connectBusUnit(bus, *sub_bus, *agent, irq);
433 if (err)
434 pci_log_error("Connect Bus Unit",
435 bus, *sub_bus, *agent, err);
436 else {
437 err = HvCallPci_configStore8(bus, *sub_bus,
438 *agent, PCI_INTERRUPT_LINE, irq);
439 if (err)
440 pci_log_error("PciCfgStore Irq Failed!",
441 bus, *sub_bus, *agent, err);
442 else
443 pdev->irq = irq;
444 }
445 }
446
447 allocate_device_bars(pdev);
448 if (likely(sub_bus))
449 iseries_device_information(pdev, bus, *sub_bus);
450 else
451 printk(KERN_ERR "PCI: Device node %s has missing or invalid "
452 "linux,subbus property\n", node->full_name);
453}
454
455/*
456 * iSeries_pci_final_fixup(void)
457 */
458void __init iSeries_pci_final_fixup(void)
459{
460 /* Fix up at the device node and pci_dev relationship */
461 mf_display_src(0xC9000100);
462 iSeries_activate_IRQs();
463 mf_display_src(0xC9000200);
464}
465
466/*
467 * Config space read and write functions.
468 * For now at least, we look for the device node for the bus and devfn
469 * that we are asked to access. It may be possible to translate the devfn
470 * to a subbus and deviceid more directly.
471 */
472static u64 hv_cfg_read_func[4] = {
473 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
474 HvCallPciConfigLoad32, HvCallPciConfigLoad32
475};
476
477static u64 hv_cfg_write_func[4] = {
478 HvCallPciConfigStore8, HvCallPciConfigStore16,
479 HvCallPciConfigStore32, HvCallPciConfigStore32
480};
481
482/*
483 * Read PCI config space
484 */
485static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
486 int offset, int size, u32 *val)
487{
488 struct device_node *node = find_device_node(bus->number, devfn);
489 u64 fn;
490 struct HvCallPci_LoadReturn ret;
491
492 if (node == NULL)
493 return PCIBIOS_DEVICE_NOT_FOUND;
494 if (offset > 255) {
495 *val = ~0;
496 return PCIBIOS_BAD_REGISTER_NUMBER;
497 }
498
499 fn = hv_cfg_read_func[(size - 1) & 3];
500 HvCall3Ret16(fn, &ret, iseries_ds_addr(node), offset, 0);
501
502 if (ret.rc != 0) {
503 *val = ~0;
504 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
505 }
506
507 *val = ret.value;
508 return 0;
509}
510
511/*
512 * Write PCI config space
513 */
514
515static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
516 int offset, int size, u32 val)
517{
518 struct device_node *node = find_device_node(bus->number, devfn);
519 u64 fn;
520 u64 ret;
521
522 if (node == NULL)
523 return PCIBIOS_DEVICE_NOT_FOUND;
524 if (offset > 255)
525 return PCIBIOS_BAD_REGISTER_NUMBER;
526
527 fn = hv_cfg_write_func[(size - 1) & 3];
528 ret = HvCall4(fn, iseries_ds_addr(node), offset, val, 0);
529
530 if (ret != 0)
531 return PCIBIOS_DEVICE_NOT_FOUND;
532
533 return 0;
534}
535
536static struct pci_ops iSeries_pci_ops = {
537 .read = iSeries_pci_read_config,
538 .write = iSeries_pci_write_config
539};
540
541/*
542 * Check Return Code
543 * -> On Failure, print and log information.
544 * Increment Retry Count, if exceeds max, panic partition.
545 *
546 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
547 * PCI: Device 23.90 ReadL Retry( 1)
548 * PCI: Device 23.90 ReadL Retry Successful(1)
549 */
550static int check_return_code(char *type, struct device_node *dn,
551 int *retry, u64 ret)
552{
553 if (ret != 0) {
554 struct pci_dn *pdn = PCI_DN(dn);
555
556 (*retry)++;
557 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
558 type, pdn->busno, pdn->devfn,
559 *retry, (int)ret);
560 /*
561 * Bump the retry and check for retry count exceeded.
562 * If, Exceeded, panic the system.
563 */
564 if (((*retry) > PCI_RETRY_MAX) &&
565 (limit_pci_retries > 0)) {
566 mf_display_src(0xB6000103);
567 panic_timeout = 0;
568 panic("PCI: Hardware I/O Error, SRC B6000103, "
569 "Automatic Reboot Disabled.\n");
570 }
571 return -1; /* Retry Try */
572 }
573 return 0;
574}
575
576/*
577 * Translate the I/O Address into a device node, bar, and bar offset.
578 * Note: Make sure the passed variable end up on the stack to avoid
579 * the exposure of being device global.
580 */
581static inline struct device_node *xlate_iomm_address(
582 const volatile void __iomem *addr,
583 u64 *dsaptr, u64 *bar_offset, const char *func)
584{
585 unsigned long orig_addr;
586 unsigned long base_addr;
587 unsigned long ind;
588 struct device_node *dn;
589
590 orig_addr = (unsigned long __force)addr;
591 if ((orig_addr < BASE_IO_MEMORY) || (orig_addr >= max_io_memory)) {
592 static DEFINE_RATELIMIT_STATE(ratelimit, 60 * HZ, 10);
593
594 if (__ratelimit(&ratelimit))
595 printk(KERN_ERR
596 "iSeries_%s: invalid access at IO address %p\n",
597 func, addr);
598 return NULL;
599 }
600 base_addr = orig_addr - BASE_IO_MEMORY;
601 ind = base_addr / IOMM_TABLE_ENTRY_SIZE;
602 dn = iomm_table[ind];
603
604 if (dn != NULL) {
605 *dsaptr = ds_addr_table[ind];
606 *bar_offset = base_addr % IOMM_TABLE_ENTRY_SIZE;
607 } else
608 panic("PCI: Invalid PCI IO address detected!\n");
609 return dn;
610}
611
612/*
613 * Read MM I/O Instructions for the iSeries
614 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
615 * else, data is returned in Big Endian format.
616 */
617static u8 iseries_readb(const volatile void __iomem *addr)
618{
619 u64 bar_offset;
620 u64 dsa;
621 int retry = 0;
622 struct HvCallPci_LoadReturn ret;
623 struct device_node *dn =
624 xlate_iomm_address(addr, &dsa, &bar_offset, "read_byte");
625
626 if (dn == NULL)
627 return 0xff;
628 do {
629 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, bar_offset, 0);
630 } while (check_return_code("RDB", dn, &retry, ret.rc) != 0);
631
632 return ret.value;
633}
634
635static u16 iseries_readw_be(const volatile void __iomem *addr)
636{
637 u64 bar_offset;
638 u64 dsa;
639 int retry = 0;
640 struct HvCallPci_LoadReturn ret;
641 struct device_node *dn =
642 xlate_iomm_address(addr, &dsa, &bar_offset, "read_word");
643
644 if (dn == NULL)
645 return 0xffff;
646 do {
647 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
648 bar_offset, 0);
649 } while (check_return_code("RDW", dn, &retry, ret.rc) != 0);
650
651 return ret.value;
652}
653
654static u32 iseries_readl_be(const volatile void __iomem *addr)
655{
656 u64 bar_offset;
657 u64 dsa;
658 int retry = 0;
659 struct HvCallPci_LoadReturn ret;
660 struct device_node *dn =
661 xlate_iomm_address(addr, &dsa, &bar_offset, "read_long");
662
663 if (dn == NULL)
664 return 0xffffffff;
665 do {
666 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
667 bar_offset, 0);
668 } while (check_return_code("RDL", dn, &retry, ret.rc) != 0);
669
670 return ret.value;
671}
672
673/*
674 * Write MM I/O Instructions for the iSeries
675 *
676 */
677static void iseries_writeb(u8 data, volatile void __iomem *addr)
678{
679 u64 bar_offset;
680 u64 dsa;
681 int retry = 0;
682 u64 rc;
683 struct device_node *dn =
684 xlate_iomm_address(addr, &dsa, &bar_offset, "write_byte");
685
686 if (dn == NULL)
687 return;
688 do {
689 rc = HvCall4(HvCallPciBarStore8, dsa, bar_offset, data, 0);
690 } while (check_return_code("WWB", dn, &retry, rc) != 0);
691}
692
693static void iseries_writew_be(u16 data, volatile void __iomem *addr)
694{
695 u64 bar_offset;
696 u64 dsa;
697 int retry = 0;
698 u64 rc;
699 struct device_node *dn =
700 xlate_iomm_address(addr, &dsa, &bar_offset, "write_word");
701
702 if (dn == NULL)
703 return;
704 do {
705 rc = HvCall4(HvCallPciBarStore16, dsa, bar_offset, data, 0);
706 } while (check_return_code("WWW", dn, &retry, rc) != 0);
707}
708
709static void iseries_writel_be(u32 data, volatile void __iomem *addr)
710{
711 u64 bar_offset;
712 u64 dsa;
713 int retry = 0;
714 u64 rc;
715 struct device_node *dn =
716 xlate_iomm_address(addr, &dsa, &bar_offset, "write_long");
717
718 if (dn == NULL)
719 return;
720 do {
721 rc = HvCall4(HvCallPciBarStore32, dsa, bar_offset, data, 0);
722 } while (check_return_code("WWL", dn, &retry, rc) != 0);
723}
724
725static u16 iseries_readw(const volatile void __iomem *addr)
726{
727 return le16_to_cpu(iseries_readw_be(addr));
728}
729
730static u32 iseries_readl(const volatile void __iomem *addr)
731{
732 return le32_to_cpu(iseries_readl_be(addr));
733}
734
735static void iseries_writew(u16 data, volatile void __iomem *addr)
736{
737 iseries_writew_be(cpu_to_le16(data), addr);
738}
739
740static void iseries_writel(u32 data, volatile void __iomem *addr)
741{
742 iseries_writel(cpu_to_le32(data), addr);
743}
744
745static void iseries_readsb(const volatile void __iomem *addr, void *buf,
746 unsigned long count)
747{
748 u8 *dst = buf;
749 while(count-- > 0)
750 *(dst++) = iseries_readb(addr);
751}
752
753static void iseries_readsw(const volatile void __iomem *addr, void *buf,
754 unsigned long count)
755{
756 u16 *dst = buf;
757 while(count-- > 0)
758 *(dst++) = iseries_readw_be(addr);
759}
760
761static void iseries_readsl(const volatile void __iomem *addr, void *buf,
762 unsigned long count)
763{
764 u32 *dst = buf;
765 while(count-- > 0)
766 *(dst++) = iseries_readl_be(addr);
767}
768
769static void iseries_writesb(volatile void __iomem *addr, const void *buf,
770 unsigned long count)
771{
772 const u8 *src = buf;
773 while(count-- > 0)
774 iseries_writeb(*(src++), addr);
775}
776
777static void iseries_writesw(volatile void __iomem *addr, const void *buf,
778 unsigned long count)
779{
780 const u16 *src = buf;
781 while(count-- > 0)
782 iseries_writew_be(*(src++), addr);
783}
784
785static void iseries_writesl(volatile void __iomem *addr, const void *buf,
786 unsigned long count)
787{
788 const u32 *src = buf;
789 while(count-- > 0)
790 iseries_writel_be(*(src++), addr);
791}
792
793static void iseries_memset_io(volatile void __iomem *addr, int c,
794 unsigned long n)
795{
796 volatile char __iomem *d = addr;
797
798 while (n-- > 0)
799 iseries_writeb(c, d++);
800}
801
802static void iseries_memcpy_fromio(void *dest, const volatile void __iomem *src,
803 unsigned long n)
804{
805 char *d = dest;
806 const volatile char __iomem *s = src;
807
808 while (n-- > 0)
809 *d++ = iseries_readb(s++);
810}
811
812static void iseries_memcpy_toio(volatile void __iomem *dest, const void *src,
813 unsigned long n)
814{
815 const char *s = src;
816 volatile char __iomem *d = dest;
817
818 while (n-- > 0)
819 iseries_writeb(*s++, d++);
820}
821
822/* We only set MMIO ops. The default PIO ops will be default
823 * to the MMIO ops + pci_io_base which is 0 on iSeries as
824 * expected so both should work.
825 *
826 * Note that we don't implement the readq/writeq versions as
827 * I don't know of an HV call for doing so. Thus, the default
828 * operation will be used instead, which will fault a the value
829 * return by iSeries for MMIO addresses always hits a non mapped
830 * area. This is as good as the BUG() we used to have there.
831 */
832static struct ppc_pci_io __initdata iseries_pci_io = {
833 .readb = iseries_readb,
834 .readw = iseries_readw,
835 .readl = iseries_readl,
836 .readw_be = iseries_readw_be,
837 .readl_be = iseries_readl_be,
838 .writeb = iseries_writeb,
839 .writew = iseries_writew,
840 .writel = iseries_writel,
841 .writew_be = iseries_writew_be,
842 .writel_be = iseries_writel_be,
843 .readsb = iseries_readsb,
844 .readsw = iseries_readsw,
845 .readsl = iseries_readsl,
846 .writesb = iseries_writesb,
847 .writesw = iseries_writesw,
848 .writesl = iseries_writesl,
849 .memset_io = iseries_memset_io,
850 .memcpy_fromio = iseries_memcpy_fromio,
851 .memcpy_toio = iseries_memcpy_toio,
852};
853
854/*
855 * iSeries_pcibios_init
856 *
857 * Description:
858 * This function checks for all possible system PCI host bridges that connect
859 * PCI buses. The system hypervisor is queried as to the guest partition
860 * ownership status. A pci_controller is built for any bus which is partially
861 * owned or fully owned by this guest partition.
862 */
863void __init iSeries_pcibios_init(void)
864{
865 struct pci_controller *phb;
866 struct device_node *root = of_find_node_by_path("/");
867 struct device_node *node = NULL;
868
869 /* Install IO hooks */
870 ppc_pci_io = iseries_pci_io;
871
872 pci_probe_only = 1;
873
874 /* iSeries has no IO space in the common sense, it needs to set
875 * the IO base to 0
876 */
877 pci_io_base = 0;
878
879 if (root == NULL) {
880 printk(KERN_CRIT "iSeries_pcibios_init: can't find root "
881 "of device tree\n");
882 return;
883 }
884 while ((node = of_get_next_child(root, node)) != NULL) {
885 HvBusNumber bus;
886 const u32 *busp;
887
888 if ((node->type == NULL) || (strcmp(node->type, "pci") != 0))
889 continue;
890
891 busp = of_get_property(node, "bus-range", NULL);
892 if (busp == NULL)
893 continue;
894 bus = *busp;
895 printk("bus %d appears to exist\n", bus);
896 phb = pcibios_alloc_controller(node);
897 if (phb == NULL)
898 continue;
899 /* All legacy iSeries PHBs are in domain zero */
900 phb->global_number = 0;
901
902 phb->first_busno = bus;
903 phb->last_busno = bus;
904 phb->ops = &iSeries_pci_ops;
905 phb->io_base_virt = (void __iomem *)_IO_BASE;
906 phb->io_resource.flags = IORESOURCE_IO;
907 phb->io_resource.start = BASE_IO_MEMORY;
908 phb->io_resource.end = END_IO_MEMORY;
909 phb->io_resource.name = "iSeries PCI IO";
910 phb->mem_resources[0].flags = IORESOURCE_MEM;
911 phb->mem_resources[0].start = BASE_IO_MEMORY;
912 phb->mem_resources[0].end = END_IO_MEMORY;
913 phb->mem_resources[0].name = "Series PCI MEM";
914 }
915
916 of_node_put(root);
917
918 pci_devs_phb_init();
919}
920
diff --git a/arch/powerpc/platforms/iseries/pci.h b/arch/powerpc/platforms/iseries/pci.h
new file mode 100644
index 00000000000..d9cf974c271
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/pci.h
@@ -0,0 +1,58 @@
1#ifndef _PLATFORMS_ISERIES_PCI_H
2#define _PLATFORMS_ISERIES_PCI_H
3
4/*
5 * Created by Allan Trautman on Tue Feb 20, 2001.
6 *
7 * Define some useful macros for the iSeries pci routines.
8 * Copyright (C) 2001 Allan H Trautman, IBM Corporation
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the:
22 * Free Software Foundation, Inc.,
23 * 59 Temple Place, Suite 330,
24 * Boston, MA 02111-1307 USA
25 *
26 * Change Activity:
27 * Created Feb 20, 2001
28 * Added device reset, March 22, 2001
29 * Ported to ppc64, May 25, 2001
30 * End Change Activity
31 */
32
33/*
34 * Decodes Linux DevFn to iSeries DevFn, bridge device, or function.
35 * For Linux, see PCI_SLOT and PCI_FUNC in include/linux/pci.h
36 */
37
38#define ISERIES_PCI_AGENTID(idsel, func) \
39 (((idsel & 0x0F) << 4) | (func & 0x07))
40#define ISERIES_ENCODE_DEVICE(agentid) \
41 ((0x10) | ((agentid & 0x20) >> 2) | (agentid & 0x07))
42
43#define ISERIES_GET_DEVICE_FROM_SUBBUS(subbus) ((subbus >> 5) & 0x7)
44#define ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus) ((subbus >> 2) & 0x7)
45
46struct pci_dev;
47
48#ifdef CONFIG_PCI
49extern void iSeries_pcibios_init(void);
50extern void iSeries_pci_final_fixup(void);
51extern void iSeries_pcibios_fixup_resources(struct pci_dev *dev);
52#else
53static inline void iSeries_pcibios_init(void) { }
54static inline void iSeries_pci_final_fixup(void) { }
55static inline void iSeries_pcibios_fixup_resources(struct pci_dev *dev) {}
56#endif
57
58#endif /* _PLATFORMS_ISERIES_PCI_H */
diff --git a/arch/powerpc/platforms/iseries/proc.c b/arch/powerpc/platforms/iseries/proc.c
new file mode 100644
index 00000000000..06763682db4
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/proc.c
@@ -0,0 +1,120 @@
1/*
2 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
3 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19#include <linux/init.h>
20#include <linux/proc_fs.h>
21#include <linux/seq_file.h>
22#include <linux/param.h> /* for HZ */
23#include <asm/paca.h>
24#include <asm/processor.h>
25#include <asm/time.h>
26#include <asm/lppaca.h>
27#include <asm/firmware.h>
28#include <asm/iseries/hv_call_xm.h>
29
30#include "processor_vpd.h"
31#include "main_store.h"
32
33static int __init iseries_proc_create(void)
34{
35 struct proc_dir_entry *e;
36
37 if (!firmware_has_feature(FW_FEATURE_ISERIES))
38 return 0;
39
40 e = proc_mkdir("iSeries", 0);
41 if (!e)
42 return 1;
43
44 return 0;
45}
46core_initcall(iseries_proc_create);
47
48static unsigned long startTitan = 0;
49static unsigned long startTb = 0;
50
51static int proc_titantod_show(struct seq_file *m, void *v)
52{
53 unsigned long tb0, titan_tod;
54
55 tb0 = get_tb();
56 titan_tod = HvCallXm_loadTod();
57
58 seq_printf(m, "Titan\n" );
59 seq_printf(m, " time base = %016lx\n", tb0);
60 seq_printf(m, " titan tod = %016lx\n", titan_tod);
61 seq_printf(m, " xProcFreq = %016x\n",
62 xIoHriProcessorVpd[0].xProcFreq);
63 seq_printf(m, " xTimeBaseFreq = %016x\n",
64 xIoHriProcessorVpd[0].xTimeBaseFreq);
65 seq_printf(m, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy);
66 seq_printf(m, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec);
67
68 if (!startTitan) {
69 startTitan = titan_tod;
70 startTb = tb0;
71 } else {
72 unsigned long titan_usec = (titan_tod - startTitan) >> 12;
73 unsigned long tb_ticks = (tb0 - startTb);
74 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
75 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
76 unsigned long titan_jiff_rem_usec =
77 titan_usec - titan_jiff_usec;
78 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
79 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
80 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
81 unsigned long tb_jiff_rem_usec =
82 tb_jiff_rem_ticks / tb_ticks_per_usec;
83 unsigned long new_tb_ticks_per_jiffy =
84 (tb_ticks * (1000000/HZ))/titan_usec;
85
86 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
87 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
88 seq_printf(m, " titan jiffies = %lu.%04lu\n", titan_jiffies,
89 titan_jiff_rem_usec);
90 seq_printf(m, " tb jiffies = %lu.%04lu\n", tb_jiffies,
91 tb_jiff_rem_usec);
92 seq_printf(m, " new tb_ticks_per_jiffy = %lu\n",
93 new_tb_ticks_per_jiffy);
94 }
95
96 return 0;
97}
98
99static int proc_titantod_open(struct inode *inode, struct file *file)
100{
101 return single_open(file, proc_titantod_show, NULL);
102}
103
104static const struct file_operations proc_titantod_operations = {
105 .open = proc_titantod_open,
106 .read = seq_read,
107 .llseek = seq_lseek,
108 .release = single_release,
109};
110
111static int __init iseries_proc_init(void)
112{
113 if (!firmware_has_feature(FW_FEATURE_ISERIES))
114 return 0;
115
116 proc_create("iSeries/titanTod", S_IFREG|S_IRUGO, NULL,
117 &proc_titantod_operations);
118 return 0;
119}
120__initcall(iseries_proc_init);
diff --git a/arch/powerpc/platforms/iseries/processor_vpd.h b/arch/powerpc/platforms/iseries/processor_vpd.h
new file mode 100644
index 00000000000..7ac5d0d0dbf
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/processor_vpd.h
@@ -0,0 +1,85 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_PROCESSOR_VPD_H
19#define _ISERIES_PROCESSOR_VPD_H
20
21#include <asm/types.h>
22
23/*
24 * This struct maps Processor Vpd that is DMAd to SLIC by CSP
25 */
26struct IoHriProcessorVpd {
27 u8 xFormat; // VPD format indicator x00-x00
28 u8 xProcStatus:8; // Processor State x01-x01
29 u8 xSecondaryThreadCount; // Secondary thread cnt x02-x02
30 u8 xSrcType:1; // Src Type x03-x03
31 u8 xSrcSoft:1; // Src stay soft ...
32 u8 xSrcParable:1; // Src parable ...
33 u8 xRsvd1:5; // Reserved ...
34 u16 xHvPhysicalProcIndex; // Hypervisor physical proc index04-x05
35 u16 xRsvd2; // Reserved x06-x07
36 u32 xHwNodeId; // Hardware node id x08-x0B
37 u32 xHwProcId; // Hardware processor id x0C-x0F
38
39 u32 xTypeNum; // Card Type/CCIN number x10-x13
40 u32 xModelNum; // Model/Feature number x14-x17
41 u64 xSerialNum; // Serial number x18-x1F
42 char xPartNum[12]; // Book Part or FPU number x20-x2B
43 char xMfgID[4]; // Manufacturing ID x2C-x2F
44
45 u32 xProcFreq; // Processor Frequency x30-x33
46 u32 xTimeBaseFreq; // Time Base Frequency x34-x37
47
48 u32 xChipEcLevel; // Chip EC Levels x38-x3B
49 u32 xProcIdReg; // PIR SPR value x3C-x3F
50 u32 xPVR; // PVR value x40-x43
51 u8 xRsvd3[12]; // Reserved x44-x4F
52
53 u32 xInstCacheSize; // Instruction cache size in KB x50-x53
54 u32 xInstBlockSize; // Instruction cache block size x54-x57
55 u32 xDataCacheOperandSize; // Data cache operand size x58-x5B
56 u32 xInstCacheOperandSize; // Inst cache operand size x5C-x5F
57
58 u32 xDataL1CacheSizeKB; // L1 data cache size in KB x60-x63
59 u32 xDataL1CacheLineSize; // L1 data cache block size x64-x67
60 u64 xRsvd4; // Reserved x68-x6F
61
62 u32 xDataL2CacheSizeKB; // L2 data cache size in KB x70-x73
63 u32 xDataL2CacheLineSize; // L2 data cache block size x74-x77
64 u64 xRsvd5; // Reserved x78-x7F
65
66 u32 xDataL3CacheSizeKB; // L3 data cache size in KB x80-x83
67 u32 xDataL3CacheLineSize; // L3 data cache block size x84-x87
68 u64 xRsvd6; // Reserved x88-x8F
69
70 u64 xFruLabel; // Card Location Label x90-x97
71 u8 xSlotsOnCard; // Slots on card (0=no slots) x98-x98
72 u8 xPartLocFlag; // Location flag (0-pluggable 1-imbedded) x99-x99
73 u16 xSlotMapIndex; // Index in slot map table x9A-x9B
74 u8 xSmartCardPortNo; // Smart card port number x9C-x9C
75 u8 xRsvd7; // Reserved x9D-x9D
76 u16 xFrameIdAndRackUnit; // Frame ID and rack unit adr x9E-x9F
77
78 u8 xRsvd8[24]; // Reserved xA0-xB7
79
80 char xProcSrc[72]; // CSP format SRC xB8-xFF
81};
82
83extern struct IoHriProcessorVpd xIoHriProcessorVpd[];
84
85#endif /* _ISERIES_PROCESSOR_VPD_H */
diff --git a/arch/powerpc/platforms/iseries/release_data.h b/arch/powerpc/platforms/iseries/release_data.h
new file mode 100644
index 00000000000..6ad7d843e8f
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/release_data.h
@@ -0,0 +1,63 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_RELEASE_DATA_H
19#define _ISERIES_RELEASE_DATA_H
20
21/*
22 * This control block contains the critical information about the
23 * release so that it can be changed in the future (ie, the virtual
24 * address of the OS's NACA).
25 */
26#include <asm/types.h>
27#include "naca.h"
28
29/*
30 * When we IPL a secondary partition, we will check if if the
31 * secondary xMinPlicVrmIndex > the primary xVrmIndex.
32 * If it is then this tells PLIC that this secondary is not
33 * supported running on this "old" of a level of PLIC.
34 *
35 * Likewise, we will compare the primary xMinSlicVrmIndex to
36 * the secondary xVrmIndex.
37 * If the primary xMinSlicVrmDelta > secondary xVrmDelta then we
38 * know that this PLIC does not support running an OS "that old".
39 */
40
41#define HVREL_TAGSINACTIVE 0x8000
42#define HVREL_32BIT 0x4000
43#define HVREL_NOSHAREDPROCS 0x2000
44#define HVREL_NOHMT 0x1000
45
46struct HvReleaseData {
47 u32 xDesc; /* Descriptor "HvRD" ebcdic x00-x03 */
48 u16 xSize; /* Size of this control block x04-x05 */
49 u16 xVpdAreasPtrOffset; /* Offset in NACA of ItVpdAreas x06-x07 */
50 struct naca_struct *xSlicNacaAddr; /* Virt addr of SLIC NACA x08-x0F */
51 u32 xMsNucDataOffset; /* Offset of Linux Mapping Data x10-x13 */
52 u32 xRsvd1; /* Reserved x14-x17 */
53 u16 xFlags;
54 u16 xVrmIndex; /* VRM Index of OS image x1A-x1B */
55 u16 xMinSupportedPlicVrmIndex; /* Min PLIC level (soft) x1C-x1D */
56 u16 xMinCompatablePlicVrmIndex; /* Min PLIC levelP (hard) x1E-x1F */
57 char xVrmName[12]; /* Displayable name x20-x2B */
58 char xRsvd3[20]; /* Reserved x2C-x3F */
59};
60
61extern const struct HvReleaseData hvReleaseData;
62
63#endif /* _ISERIES_RELEASE_DATA_H */
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
new file mode 100644
index 00000000000..c25a0815c26
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -0,0 +1,717 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Description:
6 * Architecture- / platform-specific boot-time initialization code for
7 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
8 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
9 * <dan@net4x.com>.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#undef DEBUG
18
19#include <linux/init.h>
20#include <linux/threads.h>
21#include <linux/smp.h>
22#include <linux/param.h>
23#include <linux/string.h>
24#include <linux/seq_file.h>
25#include <linux/kdev_t.h>
26#include <linux/kexec.h>
27#include <linux/major.h>
28#include <linux/root_dev.h>
29#include <linux/kernel.h>
30#include <linux/hrtimer.h>
31#include <linux/tick.h>
32
33#include <asm/processor.h>
34#include <asm/machdep.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/mmu_context.h>
39#include <asm/cputable.h>
40#include <asm/sections.h>
41#include <asm/iommu.h>
42#include <asm/firmware.h>
43#include <asm/system.h>
44#include <asm/time.h>
45#include <asm/paca.h>
46#include <asm/cache.h>
47#include <asm/abs_addr.h>
48#include <asm/iseries/hv_lp_config.h>
49#include <asm/iseries/hv_call_event.h>
50#include <asm/iseries/hv_call_xm.h>
51#include <asm/iseries/it_lp_queue.h>
52#include <asm/iseries/mf.h>
53#include <asm/iseries/hv_lp_event.h>
54#include <asm/iseries/lpar_map.h>
55#include <asm/udbg.h>
56#include <asm/irq.h>
57
58#include "naca.h"
59#include "setup.h"
60#include "irq.h"
61#include "vpd_areas.h"
62#include "processor_vpd.h"
63#include "it_lp_naca.h"
64#include "main_store.h"
65#include "call_sm.h"
66#include "call_hpt.h"
67#include "pci.h"
68
69#ifdef DEBUG
70#define DBG(fmt...) udbg_printf(fmt)
71#else
72#define DBG(fmt...)
73#endif
74
75/* Function Prototypes */
76static unsigned long build_iSeries_Memory_Map(void);
77static void iseries_shared_idle(void);
78static void iseries_dedicated_idle(void);
79
80
81struct MemoryBlock {
82 unsigned long absStart;
83 unsigned long absEnd;
84 unsigned long logicalStart;
85 unsigned long logicalEnd;
86};
87
88/*
89 * Process the main store vpd to determine where the holes in memory are
90 * and return the number of physical blocks and fill in the array of
91 * block data.
92 */
93static unsigned long iSeries_process_Condor_mainstore_vpd(
94 struct MemoryBlock *mb_array, unsigned long max_entries)
95{
96 unsigned long holeFirstChunk, holeSizeChunks;
97 unsigned long numMemoryBlocks = 1;
98 struct IoHriMainStoreSegment4 *msVpd =
99 (struct IoHriMainStoreSegment4 *)xMsVpd;
100 unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
101 unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
102 unsigned long holeSize = holeEnd - holeStart;
103
104 printk("Mainstore_VPD: Condor\n");
105 /*
106 * Determine if absolute memory has any
107 * holes so that we can interpret the
108 * access map we get back from the hypervisor
109 * correctly.
110 */
111 mb_array[0].logicalStart = 0;
112 mb_array[0].logicalEnd = 0x100000000UL;
113 mb_array[0].absStart = 0;
114 mb_array[0].absEnd = 0x100000000UL;
115
116 if (holeSize) {
117 numMemoryBlocks = 2;
118 holeStart = holeStart & 0x000fffffffffffffUL;
119 holeStart = addr_to_chunk(holeStart);
120 holeFirstChunk = holeStart;
121 holeSize = addr_to_chunk(holeSize);
122 holeSizeChunks = holeSize;
123 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
124 holeFirstChunk, holeSizeChunks );
125 mb_array[0].logicalEnd = holeFirstChunk;
126 mb_array[0].absEnd = holeFirstChunk;
127 mb_array[1].logicalStart = holeFirstChunk;
128 mb_array[1].logicalEnd = 0x100000000UL - holeSizeChunks;
129 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
130 mb_array[1].absEnd = 0x100000000UL;
131 }
132 return numMemoryBlocks;
133}
134
135#define MaxSegmentAreas 32
136#define MaxSegmentAdrRangeBlocks 128
137#define MaxAreaRangeBlocks 4
138
139static unsigned long iSeries_process_Regatta_mainstore_vpd(
140 struct MemoryBlock *mb_array, unsigned long max_entries)
141{
142 struct IoHriMainStoreSegment5 *msVpdP =
143 (struct IoHriMainStoreSegment5 *)xMsVpd;
144 unsigned long numSegmentBlocks = 0;
145 u32 existsBits = msVpdP->msAreaExists;
146 unsigned long area_num;
147
148 printk("Mainstore_VPD: Regatta\n");
149
150 for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
151 unsigned long numAreaBlocks;
152 struct IoHriMainStoreArea4 *currentArea;
153
154 if (existsBits & 0x80000000) {
155 unsigned long block_num;
156
157 currentArea = &msVpdP->msAreaArray[area_num];
158 numAreaBlocks = currentArea->numAdrRangeBlocks;
159 printk("ms_vpd: processing area %2ld blocks=%ld",
160 area_num, numAreaBlocks);
161 for (block_num = 0; block_num < numAreaBlocks;
162 ++block_num ) {
163 /* Process an address range block */
164 struct MemoryBlock tempBlock;
165 unsigned long i;
166
167 tempBlock.absStart =
168 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
169 tempBlock.absEnd =
170 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
171 tempBlock.logicalStart = 0;
172 tempBlock.logicalEnd = 0;
173 printk("\n block %ld absStart=%016lx absEnd=%016lx",
174 block_num, tempBlock.absStart,
175 tempBlock.absEnd);
176
177 for (i = 0; i < numSegmentBlocks; ++i) {
178 if (mb_array[i].absStart ==
179 tempBlock.absStart)
180 break;
181 }
182 if (i == numSegmentBlocks) {
183 if (numSegmentBlocks == max_entries)
184 panic("iSeries_process_mainstore_vpd: too many memory blocks");
185 mb_array[numSegmentBlocks] = tempBlock;
186 ++numSegmentBlocks;
187 } else
188 printk(" (duplicate)");
189 }
190 printk("\n");
191 }
192 existsBits <<= 1;
193 }
194 /* Now sort the blocks found into ascending sequence */
195 if (numSegmentBlocks > 1) {
196 unsigned long m, n;
197
198 for (m = 0; m < numSegmentBlocks - 1; ++m) {
199 for (n = numSegmentBlocks - 1; m < n; --n) {
200 if (mb_array[n].absStart <
201 mb_array[n-1].absStart) {
202 struct MemoryBlock tempBlock;
203
204 tempBlock = mb_array[n];
205 mb_array[n] = mb_array[n-1];
206 mb_array[n-1] = tempBlock;
207 }
208 }
209 }
210 }
211 /*
212 * Assign "logical" addresses to each block. These
213 * addresses correspond to the hypervisor "bitmap" space.
214 * Convert all addresses into units of 256K chunks.
215 */
216 {
217 unsigned long i, nextBitmapAddress;
218
219 printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
220 nextBitmapAddress = 0;
221 for (i = 0; i < numSegmentBlocks; ++i) {
222 unsigned long length = mb_array[i].absEnd -
223 mb_array[i].absStart;
224
225 mb_array[i].logicalStart = nextBitmapAddress;
226 mb_array[i].logicalEnd = nextBitmapAddress + length;
227 nextBitmapAddress += length;
228 printk(" Bitmap range: %016lx - %016lx\n"
229 " Absolute range: %016lx - %016lx\n",
230 mb_array[i].logicalStart,
231 mb_array[i].logicalEnd,
232 mb_array[i].absStart, mb_array[i].absEnd);
233 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
234 0x000fffffffffffffUL);
235 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
236 0x000fffffffffffffUL);
237 mb_array[i].logicalStart =
238 addr_to_chunk(mb_array[i].logicalStart);
239 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
240 }
241 }
242
243 return numSegmentBlocks;
244}
245
246static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
247 unsigned long max_entries)
248{
249 unsigned long i;
250 unsigned long mem_blocks = 0;
251
252 if (mmu_has_feature(MMU_FTR_SLB))
253 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
254 max_entries);
255 else
256 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
257 max_entries);
258
259 printk("Mainstore_VPD: numMemoryBlocks = %ld\n", mem_blocks);
260 for (i = 0; i < mem_blocks; ++i) {
261 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
262 " abs chunks %016lx - %016lx\n",
263 i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
264 mb_array[i].absStart, mb_array[i].absEnd);
265 }
266 return mem_blocks;
267}
268
269static void __init iSeries_get_cmdline(void)
270{
271 char *p, *q;
272
273 /* copy the command line parameter from the primary VSP */
274 HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
275 HvLpDma_Direction_RemoteToLocal);
276
277 p = cmd_line;
278 q = cmd_line + 255;
279 while(p < q) {
280 if (!*p || *p == '\n')
281 break;
282 ++p;
283 }
284 *p = 0;
285}
286
287static void __init iSeries_init_early(void)
288{
289 DBG(" -> iSeries_init_early()\n");
290
291 /* Snapshot the timebase, for use in later recalibration */
292 iSeries_time_init_early();
293
294 /*
295 * Initialize the DMA/TCE management
296 */
297 iommu_init_early_iSeries();
298
299 /* Initialize machine-dependency vectors */
300#ifdef CONFIG_SMP
301 smp_init_iSeries();
302#endif
303
304 /* Associate Lp Event Queue 0 with processor 0 */
305 HvCallEvent_setLpEventQueueInterruptProc(0, 0);
306
307 mf_init();
308
309 DBG(" <- iSeries_init_early()\n");
310}
311
312struct mschunks_map mschunks_map = {
313 /* XXX We don't use these, but Piranha might need them. */
314 .chunk_size = MSCHUNKS_CHUNK_SIZE,
315 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
316 .chunk_mask = MSCHUNKS_OFFSET_MASK,
317};
318EXPORT_SYMBOL(mschunks_map);
319
320static void mschunks_alloc(unsigned long num_chunks)
321{
322 klimit = _ALIGN(klimit, sizeof(u32));
323 mschunks_map.mapping = (u32 *)klimit;
324 klimit += num_chunks * sizeof(u32);
325 mschunks_map.num_chunks = num_chunks;
326}
327
328/*
329 * The iSeries may have very large memories ( > 128 GB ) and a partition
330 * may get memory in "chunks" that may be anywhere in the 2**52 real
331 * address space. The chunks are 256K in size. To map this to the
332 * memory model Linux expects, the AS/400 specific code builds a
333 * translation table to translate what Linux thinks are "physical"
334 * addresses to the actual real addresses. This allows us to make
335 * it appear to Linux that we have contiguous memory starting at
336 * physical address zero while in fact this could be far from the truth.
337 * To avoid confusion, I'll let the words physical and/or real address
338 * apply to the Linux addresses while I'll use "absolute address" to
339 * refer to the actual hardware real address.
340 *
341 * build_iSeries_Memory_Map gets information from the Hypervisor and
342 * looks at the Main Store VPD to determine the absolute addresses
343 * of the memory that has been assigned to our partition and builds
344 * a table used to translate Linux's physical addresses to these
345 * absolute addresses. Absolute addresses are needed when
346 * communicating with the hypervisor (e.g. to build HPT entries)
347 *
348 * Returns the physical memory size
349 */
350
351static unsigned long __init build_iSeries_Memory_Map(void)
352{
353 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
354 u32 nextPhysChunk;
355 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
356 u32 totalChunks,moreChunks;
357 u32 currChunk, thisChunk, absChunk;
358 u32 currDword;
359 u32 chunkBit;
360 u64 map;
361 struct MemoryBlock mb[32];
362 unsigned long numMemoryBlocks, curBlock;
363
364 /* Chunk size on iSeries is 256K bytes */
365 totalChunks = (u32)HvLpConfig_getMsChunks();
366 mschunks_alloc(totalChunks);
367
368 /*
369 * Get absolute address of our load area
370 * and map it to physical address 0
371 * This guarantees that the loadarea ends up at physical 0
372 * otherwise, it might not be returned by PLIC as the first
373 * chunks
374 */
375
376 loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
377 loadAreaSize = itLpNaca.xLoadAreaChunks;
378
379 /*
380 * Only add the pages already mapped here.
381 * Otherwise we might add the hpt pages
382 * The rest of the pages of the load area
383 * aren't in the HPT yet and can still
384 * be assigned an arbitrary physical address
385 */
386 if ((loadAreaSize * 64) > HvPagesToMap)
387 loadAreaSize = HvPagesToMap / 64;
388
389 loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
390
391 /*
392 * TODO Do we need to do something if the HPT is in the 64MB load area?
393 * This would be required if the itLpNaca.xLoadAreaChunks includes
394 * the HPT size
395 */
396
397 printk("Mapping load area - physical addr = 0000000000000000\n"
398 " absolute addr = %016lx\n",
399 chunk_to_addr(loadAreaFirstChunk));
400 printk("Load area size %dK\n", loadAreaSize * 256);
401
402 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
403 mschunks_map.mapping[nextPhysChunk] =
404 loadAreaFirstChunk + nextPhysChunk;
405
406 /*
407 * Get absolute address of our HPT and remember it so
408 * we won't map it to any physical address
409 */
410 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
411 hptSizePages = (u32)HvCallHpt_getHptPages();
412 hptSizeChunks = hptSizePages >>
413 (MSCHUNKS_CHUNK_SHIFT - HW_PAGE_SHIFT);
414 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
415
416 printk("HPT absolute addr = %016lx, size = %dK\n",
417 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
418
419 /*
420 * Determine if absolute memory has any
421 * holes so that we can interpret the
422 * access map we get back from the hypervisor
423 * correctly.
424 */
425 numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
426
427 /*
428 * Process the main store access map from the hypervisor
429 * to build up our physical -> absolute translation table
430 */
431 curBlock = 0;
432 currChunk = 0;
433 currDword = 0;
434 moreChunks = totalChunks;
435
436 while (moreChunks) {
437 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
438 currDword);
439 thisChunk = currChunk;
440 while (map) {
441 chunkBit = map >> 63;
442 map <<= 1;
443 if (chunkBit) {
444 --moreChunks;
445 while (thisChunk >= mb[curBlock].logicalEnd) {
446 ++curBlock;
447 if (curBlock >= numMemoryBlocks)
448 panic("out of memory blocks");
449 }
450 if (thisChunk < mb[curBlock].logicalStart)
451 panic("memory block error");
452
453 absChunk = mb[curBlock].absStart +
454 (thisChunk - mb[curBlock].logicalStart);
455 if (((absChunk < hptFirstChunk) ||
456 (absChunk > hptLastChunk)) &&
457 ((absChunk < loadAreaFirstChunk) ||
458 (absChunk > loadAreaLastChunk))) {
459 mschunks_map.mapping[nextPhysChunk] =
460 absChunk;
461 ++nextPhysChunk;
462 }
463 }
464 ++thisChunk;
465 }
466 ++currDword;
467 currChunk += 64;
468 }
469
470 /*
471 * main store size (in chunks) is
472 * totalChunks - hptSizeChunks
473 * which should be equal to
474 * nextPhysChunk
475 */
476 return chunk_to_addr(nextPhysChunk);
477}
478
479/*
480 * Document me.
481 */
482static void __init iSeries_setup_arch(void)
483{
484 if (get_lppaca()->shared_proc) {
485 ppc_md.idle_loop = iseries_shared_idle;
486 printk(KERN_DEBUG "Using shared processor idle loop\n");
487 } else {
488 ppc_md.idle_loop = iseries_dedicated_idle;
489 printk(KERN_DEBUG "Using dedicated idle loop\n");
490 }
491
492 /* Setup the Lp Event Queue */
493 setup_hvlpevent_queue();
494
495 printk("Max logical processors = %d\n",
496 itVpdAreas.xSlicMaxLogicalProcs);
497 printk("Max physical processors = %d\n",
498 itVpdAreas.xSlicMaxPhysicalProcs);
499
500 iSeries_pcibios_init();
501}
502
503static void iSeries_show_cpuinfo(struct seq_file *m)
504{
505 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
506}
507
508static void __init iSeries_progress(char * st, unsigned short code)
509{
510 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
511 mf_display_progress(code);
512}
513
514static void __init iSeries_fixup_klimit(void)
515{
516 /*
517 * Change klimit to take into account any ram disk
518 * that may be included
519 */
520 if (naca.xRamDisk)
521 klimit = KERNELBASE + (u64)naca.xRamDisk +
522 (naca.xRamDiskSize * HW_PAGE_SIZE);
523}
524
525static int __init iSeries_src_init(void)
526{
527 /* clear the progress line */
528 if (firmware_has_feature(FW_FEATURE_ISERIES))
529 ppc_md.progress(" ", 0xffff);
530 return 0;
531}
532
533late_initcall(iSeries_src_init);
534
535static inline void process_iSeries_events(void)
536{
537 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
538}
539
540static void yield_shared_processor(void)
541{
542 unsigned long tb;
543
544 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
545 HvCall_MaskLpEvent |
546 HvCall_MaskLpProd |
547 HvCall_MaskTimeout);
548
549 tb = get_tb();
550 /* Compute future tb value when yield should expire */
551 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
552
553 /*
554 * The decrementer stops during the yield. Force a fake decrementer
555 * here and let the timer_interrupt code sort out the actual time.
556 */
557 get_lppaca()->int_dword.fields.decr_int = 1;
558 ppc64_runlatch_on();
559 process_iSeries_events();
560}
561
562static void iseries_shared_idle(void)
563{
564 while (1) {
565 tick_nohz_stop_sched_tick(1);
566 while (!need_resched() && !hvlpevent_is_pending()) {
567 local_irq_disable();
568 ppc64_runlatch_off();
569
570 /* Recheck with irqs off */
571 if (!need_resched() && !hvlpevent_is_pending())
572 yield_shared_processor();
573
574 HMT_medium();
575 local_irq_enable();
576 }
577
578 ppc64_runlatch_on();
579 tick_nohz_restart_sched_tick();
580
581 if (hvlpevent_is_pending())
582 process_iSeries_events();
583
584 preempt_enable_no_resched();
585 schedule();
586 preempt_disable();
587 }
588}
589
590static void iseries_dedicated_idle(void)
591{
592 set_thread_flag(TIF_POLLING_NRFLAG);
593
594 while (1) {
595 tick_nohz_stop_sched_tick(1);
596 if (!need_resched()) {
597 while (!need_resched()) {
598 ppc64_runlatch_off();
599 HMT_low();
600
601 if (hvlpevent_is_pending()) {
602 HMT_medium();
603 ppc64_runlatch_on();
604 process_iSeries_events();
605 }
606 }
607
608 HMT_medium();
609 }
610
611 ppc64_runlatch_on();
612 tick_nohz_restart_sched_tick();
613 preempt_enable_no_resched();
614 schedule();
615 preempt_disable();
616 }
617}
618
619static void __iomem *iseries_ioremap(phys_addr_t address, unsigned long size,
620 unsigned long flags, void *caller)
621{
622 return (void __iomem *)address;
623}
624
625static void iseries_iounmap(volatile void __iomem *token)
626{
627}
628
629static int __init iseries_probe(void)
630{
631 unsigned long root = of_get_flat_dt_root();
632 if (!of_flat_dt_is_compatible(root, "IBM,iSeries"))
633 return 0;
634
635 hpte_init_iSeries();
636 /* iSeries does not support 16M pages */
637 cur_cpu_spec->mmu_features &= ~MMU_FTR_16M_PAGE;
638
639 return 1;
640}
641
642#ifdef CONFIG_KEXEC
643static int iseries_kexec_prepare(struct kimage *image)
644{
645 return -ENOSYS;
646}
647#endif
648
649define_machine(iseries) {
650 .name = "iSeries",
651 .setup_arch = iSeries_setup_arch,
652 .show_cpuinfo = iSeries_show_cpuinfo,
653 .init_IRQ = iSeries_init_IRQ,
654 .get_irq = iSeries_get_irq,
655 .init_early = iSeries_init_early,
656 .pcibios_fixup = iSeries_pci_final_fixup,
657 .pcibios_fixup_resources= iSeries_pcibios_fixup_resources,
658 .restart = mf_reboot,
659 .power_off = mf_power_off,
660 .halt = mf_power_off,
661 .get_boot_time = iSeries_get_boot_time,
662 .set_rtc_time = iSeries_set_rtc_time,
663 .get_rtc_time = iSeries_get_rtc_time,
664 .calibrate_decr = generic_calibrate_decr,
665 .progress = iSeries_progress,
666 .probe = iseries_probe,
667 .ioremap = iseries_ioremap,
668 .iounmap = iseries_iounmap,
669#ifdef CONFIG_KEXEC
670 .machine_kexec_prepare = iseries_kexec_prepare,
671#endif
672 /* XXX Implement enable_pmcs for iSeries */
673};
674
675void * __init iSeries_early_setup(void)
676{
677 unsigned long phys_mem_size;
678
679 /* Identify CPU type. This is done again by the common code later
680 * on but calling this function multiple times is fine.
681 */
682 identify_cpu(0, mfspr(SPRN_PVR));
683 initialise_paca(&boot_paca, 0);
684
685 powerpc_firmware_features |= FW_FEATURE_ISERIES;
686 powerpc_firmware_features |= FW_FEATURE_LPAR;
687
688#ifdef CONFIG_SMP
689 /* On iSeries we know we can never have more than 64 cpus */
690 nr_cpu_ids = max(nr_cpu_ids, 64);
691#endif
692
693 iSeries_fixup_klimit();
694
695 /*
696 * Initialize the table which translate Linux physical addresses to
697 * AS/400 absolute addresses
698 */
699 phys_mem_size = build_iSeries_Memory_Map();
700
701 iSeries_get_cmdline();
702
703 return (void *) __pa(build_flat_dt(phys_mem_size));
704}
705
706static void hvputc(char c)
707{
708 if (c == '\n')
709 hvputc('\r');
710
711 HvCall_writeLogBuffer(&c, 1);
712}
713
714void __init udbg_init_iseries(void)
715{
716 udbg_putc = hvputc;
717}
diff --git a/arch/powerpc/platforms/iseries/setup.h b/arch/powerpc/platforms/iseries/setup.h
new file mode 100644
index 00000000000..729754bbb01
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/setup.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Description:
6 * Architecture- / platform-specific boot-time initialization code for
7 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
8 * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
9 * <dan@netx4.com>.
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
15 */
16
17#ifndef __ISERIES_SETUP_H__
18#define __ISERIES_SETUP_H__
19
20extern void *iSeries_early_setup(void);
21extern unsigned long iSeries_get_boot_time(void);
22extern int iSeries_set_rtc_time(struct rtc_time *tm);
23extern void iSeries_get_rtc_time(struct rtc_time *tm);
24
25extern void *build_flat_dt(unsigned long phys_mem_size);
26
27#endif /* __ISERIES_SETUP_H__ */
diff --git a/arch/powerpc/platforms/iseries/smp.c b/arch/powerpc/platforms/iseries/smp.c
new file mode 100644
index 00000000000..8bda9be06fa
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/smp.c
@@ -0,0 +1,89 @@
1/*
2 * SMP support for iSeries machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/sched.h>
20#include <linux/smp.h>
21#include <linux/interrupt.h>
22#include <linux/kernel_stat.h>
23#include <linux/delay.h>
24#include <linux/init.h>
25#include <linux/spinlock.h>
26#include <linux/cache.h>
27#include <linux/err.h>
28#include <linux/sysdev.h>
29#include <linux/cpu.h>
30
31#include <asm/ptrace.h>
32#include <linux/atomic.h>
33#include <asm/irq.h>
34#include <asm/page.h>
35#include <asm/pgtable.h>
36#include <asm/io.h>
37#include <asm/smp.h>
38#include <asm/paca.h>
39#include <asm/iseries/hv_call.h>
40#include <asm/time.h>
41#include <asm/machdep.h>
42#include <asm/cputable.h>
43#include <asm/system.h>
44
45static void smp_iSeries_cause_ipi(int cpu, unsigned long data)
46{
47 HvCall_sendIPI(&(paca[cpu]));
48}
49
50static int smp_iSeries_probe(void)
51{
52 return cpumask_weight(cpu_possible_mask);
53}
54
55static int smp_iSeries_kick_cpu(int nr)
56{
57 BUG_ON((nr < 0) || (nr >= NR_CPUS));
58
59 /* Verify that our partition has a processor nr */
60 if (lppaca_of(nr).dyn_proc_status >= 2)
61 return -ENOENT;
62
63 /* The processor is currently spinning, waiting
64 * for the cpu_start field to become non-zero
65 * After we set cpu_start, the processor will
66 * continue on to secondary_start in iSeries_head.S
67 */
68 paca[nr].cpu_start = 1;
69
70 return 0;
71}
72
73static void __devinit smp_iSeries_setup_cpu(int nr)
74{
75}
76
77static struct smp_ops_t iSeries_smp_ops = {
78 .message_pass = NULL, /* Use smp_muxed_ipi_message_pass */
79 .cause_ipi = smp_iSeries_cause_ipi,
80 .probe = smp_iSeries_probe,
81 .kick_cpu = smp_iSeries_kick_cpu,
82 .setup_cpu = smp_iSeries_setup_cpu,
83};
84
85/* This is called very early. */
86void __init smp_init_iSeries(void)
87{
88 smp_ops = &iSeries_smp_ops;
89}
diff --git a/arch/powerpc/platforms/iseries/spcomm_area.h b/arch/powerpc/platforms/iseries/spcomm_area.h
new file mode 100644
index 00000000000..598b7c14573
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/spcomm_area.h
@@ -0,0 +1,34 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18
19#ifndef _ISERIES_SPCOMM_AREA_H
20#define _ISERIES_SPCOMM_AREA_H
21
22
23struct SpCommArea {
24 u32 xDesc; // Descriptor (only in new formats) 000-003
25 u8 xFormat; // Format (only in new formats) 004-004
26 u8 xRsvd1[11]; // Reserved 005-00F
27 u64 xRawTbAtIplStart; // Raw HW TB value when IPL is started 010-017
28 u64 xRawTodAtIplStart; // Raw HW TOD value when IPL is started 018-01F
29 u64 xBcdTimeAtIplStart; // BCD time when IPL is started 020-027
30 u64 xBcdTimeAtOsStart; // BCD time when OS passed control 028-02F
31 u8 xRsvd2[80]; // Reserved 030-07F
32};
33
34#endif /* _ISERIES_SPCOMM_AREA_H */
diff --git a/arch/powerpc/platforms/iseries/vio.c b/arch/powerpc/platforms/iseries/vio.c
new file mode 100644
index 00000000000..b6db7cef83b
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/vio.c
@@ -0,0 +1,556 @@
1/*
2 * Legacy iSeries specific vio initialisation
3 * that needs to be built in (not a module).
4 *
5 * © Copyright 2007 IBM Corporation
6 * Author: Stephen Rothwell
7 * Some parts collected from various other files
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation; either version 2 of the
12 * License, or (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23#include <linux/of.h>
24#include <linux/init.h>
25#include <linux/slab.h>
26#include <linux/completion.h>
27#include <linux/proc_fs.h>
28#include <linux/module.h>
29
30#include <asm/firmware.h>
31#include <asm/vio.h>
32#include <asm/iseries/vio.h>
33#include <asm/iseries/iommu.h>
34#include <asm/iseries/hv_types.h>
35#include <asm/iseries/hv_lp_event.h>
36
37#define FIRST_VTY 0
38#define NUM_VTYS 1
39#define FIRST_VSCSI (FIRST_VTY + NUM_VTYS)
40#define NUM_VSCSIS 1
41#define FIRST_VLAN (FIRST_VSCSI + NUM_VSCSIS)
42#define NUM_VLANS HVMAXARCHITECTEDVIRTUALLANS
43#define FIRST_VIODASD (FIRST_VLAN + NUM_VLANS)
44#define NUM_VIODASDS HVMAXARCHITECTEDVIRTUALDISKS
45#define FIRST_VIOCD (FIRST_VIODASD + NUM_VIODASDS)
46#define NUM_VIOCDS HVMAXARCHITECTEDVIRTUALCDROMS
47#define FIRST_VIOTAPE (FIRST_VIOCD + NUM_VIOCDS)
48#define NUM_VIOTAPES HVMAXARCHITECTEDVIRTUALTAPES
49
50struct vio_waitevent {
51 struct completion com;
52 int rc;
53 u16 sub_result;
54};
55
56struct vio_resource {
57 char rsrcname[10];
58 char type[4];
59 char model[3];
60};
61
62static struct property *new_property(const char *name, int length,
63 const void *value)
64{
65 struct property *np = kzalloc(sizeof(*np) + strlen(name) + 1 + length,
66 GFP_KERNEL);
67
68 if (!np)
69 return NULL;
70 np->name = (char *)(np + 1);
71 np->value = np->name + strlen(name) + 1;
72 strcpy(np->name, name);
73 memcpy(np->value, value, length);
74 np->length = length;
75 return np;
76}
77
78static void free_property(struct property *np)
79{
80 kfree(np);
81}
82
83static struct device_node *new_node(const char *path,
84 struct device_node *parent)
85{
86 struct device_node *np = kzalloc(sizeof(*np), GFP_KERNEL);
87
88 if (!np)
89 return NULL;
90 np->full_name = kstrdup(path, GFP_KERNEL);
91 if (!np->full_name) {
92 kfree(np);
93 return NULL;
94 }
95 of_node_set_flag(np, OF_DYNAMIC);
96 kref_init(&np->kref);
97 np->parent = of_node_get(parent);
98 return np;
99}
100
101static void free_node(struct device_node *np)
102{
103 struct property *next;
104 struct property *prop;
105
106 next = np->properties;
107 while (next) {
108 prop = next;
109 next = prop->next;
110 free_property(prop);
111 }
112 of_node_put(np->parent);
113 kfree(np->full_name);
114 kfree(np);
115}
116
117static int add_string_property(struct device_node *np, const char *name,
118 const char *value)
119{
120 struct property *nprop = new_property(name, strlen(value) + 1, value);
121
122 if (!nprop)
123 return 0;
124 prom_add_property(np, nprop);
125 return 1;
126}
127
128static int add_raw_property(struct device_node *np, const char *name,
129 int length, const void *value)
130{
131 struct property *nprop = new_property(name, length, value);
132
133 if (!nprop)
134 return 0;
135 prom_add_property(np, nprop);
136 return 1;
137}
138
139static struct device_node *do_device_node(struct device_node *parent,
140 const char *name, u32 reg, u32 unit, const char *type,
141 const char *compat, struct vio_resource *res)
142{
143 struct device_node *np;
144 char path[32];
145
146 snprintf(path, sizeof(path), "/vdevice/%s@%08x", name, reg);
147 np = new_node(path, parent);
148 if (!np)
149 return NULL;
150 if (!add_string_property(np, "name", name) ||
151 !add_string_property(np, "device_type", type) ||
152 !add_string_property(np, "compatible", compat) ||
153 !add_raw_property(np, "reg", sizeof(reg), &reg) ||
154 !add_raw_property(np, "linux,unit_address",
155 sizeof(unit), &unit)) {
156 goto node_free;
157 }
158 if (res) {
159 if (!add_raw_property(np, "linux,vio_rsrcname",
160 sizeof(res->rsrcname), res->rsrcname) ||
161 !add_raw_property(np, "linux,vio_type",
162 sizeof(res->type), res->type) ||
163 !add_raw_property(np, "linux,vio_model",
164 sizeof(res->model), res->model))
165 goto node_free;
166 }
167 np->name = of_get_property(np, "name", NULL);
168 np->type = of_get_property(np, "device_type", NULL);
169 of_attach_node(np);
170#ifdef CONFIG_PROC_DEVICETREE
171 if (parent->pde) {
172 struct proc_dir_entry *ent;
173
174 ent = proc_mkdir(strrchr(np->full_name, '/') + 1, parent->pde);
175 if (ent)
176 proc_device_tree_add_node(np, ent);
177 }
178#endif
179 return np;
180
181 node_free:
182 free_node(np);
183 return NULL;
184}
185
186/*
187 * This is here so that we can dynamically add viodasd
188 * devices without exposing all the above infrastructure.
189 */
190struct vio_dev *vio_create_viodasd(u32 unit)
191{
192 struct device_node *vio_root;
193 struct device_node *np;
194 struct vio_dev *vdev = NULL;
195
196 vio_root = of_find_node_by_path("/vdevice");
197 if (!vio_root)
198 return NULL;
199 np = do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
200 "block", "IBM,iSeries-viodasd", NULL);
201 of_node_put(vio_root);
202 if (np) {
203 vdev = vio_register_device_node(np);
204 if (!vdev)
205 free_node(np);
206 }
207 return vdev;
208}
209EXPORT_SYMBOL_GPL(vio_create_viodasd);
210
211static void __init handle_block_event(struct HvLpEvent *event)
212{
213 struct vioblocklpevent *bevent = (struct vioblocklpevent *)event;
214 struct vio_waitevent *pwe;
215
216 if (event == NULL)
217 /* Notification that a partition went away! */
218 return;
219 /* First, we should NEVER get an int here...only acks */
220 if (hvlpevent_is_int(event)) {
221 printk(KERN_WARNING "handle_viod_request: "
222 "Yikes! got an int in viodasd event handler!\n");
223 if (hvlpevent_need_ack(event)) {
224 event->xRc = HvLpEvent_Rc_InvalidSubtype;
225 HvCallEvent_ackLpEvent(event);
226 }
227 return;
228 }
229
230 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
231 case vioblockopen:
232 /*
233 * Handle a response to an open request. We get all the
234 * disk information in the response, so update it. The
235 * correlation token contains a pointer to a waitevent
236 * structure that has a completion in it. update the
237 * return code in the waitevent structure and post the
238 * completion to wake up the guy who sent the request
239 */
240 pwe = (struct vio_waitevent *)event->xCorrelationToken;
241 pwe->rc = event->xRc;
242 pwe->sub_result = bevent->sub_result;
243 complete(&pwe->com);
244 break;
245 case vioblockclose:
246 break;
247 default:
248 printk(KERN_WARNING "handle_viod_request: unexpected subtype!");
249 if (hvlpevent_need_ack(event)) {
250 event->xRc = HvLpEvent_Rc_InvalidSubtype;
251 HvCallEvent_ackLpEvent(event);
252 }
253 }
254}
255
256static void __init probe_disk(struct device_node *vio_root, u32 unit)
257{
258 HvLpEvent_Rc hvrc;
259 struct vio_waitevent we;
260 u16 flags = 0;
261
262retry:
263 init_completion(&we.com);
264
265 /* Send the open event to OS/400 */
266 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
267 HvLpEvent_Type_VirtualIo,
268 viomajorsubtype_blockio | vioblockopen,
269 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
270 viopath_sourceinst(viopath_hostLp),
271 viopath_targetinst(viopath_hostLp),
272 (u64)(unsigned long)&we, VIOVERSION << 16,
273 ((u64)unit << 48) | ((u64)flags<< 32),
274 0, 0, 0);
275 if (hvrc != 0) {
276 printk(KERN_WARNING "probe_disk: bad rc on HV open %d\n",
277 (int)hvrc);
278 return;
279 }
280
281 wait_for_completion(&we.com);
282
283 if (we.rc != 0) {
284 if (flags != 0)
285 return;
286 /* try again with read only flag set */
287 flags = vioblockflags_ro;
288 goto retry;
289 }
290
291 /* Send the close event to OS/400. We DON'T expect a response */
292 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
293 HvLpEvent_Type_VirtualIo,
294 viomajorsubtype_blockio | vioblockclose,
295 HvLpEvent_AckInd_NoAck, HvLpEvent_AckType_ImmediateAck,
296 viopath_sourceinst(viopath_hostLp),
297 viopath_targetinst(viopath_hostLp),
298 0, VIOVERSION << 16,
299 ((u64)unit << 48) | ((u64)flags << 32),
300 0, 0, 0);
301 if (hvrc != 0) {
302 printk(KERN_WARNING "probe_disk: "
303 "bad rc sending event to OS/400 %d\n", (int)hvrc);
304 return;
305 }
306
307 do_device_node(vio_root, "viodasd", FIRST_VIODASD + unit, unit,
308 "block", "IBM,iSeries-viodasd", NULL);
309}
310
311static void __init get_viodasd_info(struct device_node *vio_root)
312{
313 int rc;
314 u32 unit;
315
316 rc = viopath_open(viopath_hostLp, viomajorsubtype_blockio, 2);
317 if (rc) {
318 printk(KERN_WARNING "get_viodasd_info: "
319 "error opening path to host partition %d\n",
320 viopath_hostLp);
321 return;
322 }
323
324 /* Initialize our request handler */
325 vio_setHandler(viomajorsubtype_blockio, handle_block_event);
326
327 for (unit = 0; unit < HVMAXARCHITECTEDVIRTUALDISKS; unit++)
328 probe_disk(vio_root, unit);
329
330 vio_clearHandler(viomajorsubtype_blockio);
331 viopath_close(viopath_hostLp, viomajorsubtype_blockio, 2);
332}
333
334static void __init handle_cd_event(struct HvLpEvent *event)
335{
336 struct viocdlpevent *bevent;
337 struct vio_waitevent *pwe;
338
339 if (!event)
340 /* Notification that a partition went away! */
341 return;
342
343 /* First, we should NEVER get an int here...only acks */
344 if (hvlpevent_is_int(event)) {
345 printk(KERN_WARNING "handle_cd_event: got an unexpected int\n");
346 if (hvlpevent_need_ack(event)) {
347 event->xRc = HvLpEvent_Rc_InvalidSubtype;
348 HvCallEvent_ackLpEvent(event);
349 }
350 return;
351 }
352
353 bevent = (struct viocdlpevent *)event;
354
355 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
356 case viocdgetinfo:
357 pwe = (struct vio_waitevent *)event->xCorrelationToken;
358 pwe->rc = event->xRc;
359 pwe->sub_result = bevent->sub_result;
360 complete(&pwe->com);
361 break;
362
363 default:
364 printk(KERN_WARNING "handle_cd_event: "
365 "message with unexpected subtype %0x04X!\n",
366 event->xSubtype & VIOMINOR_SUBTYPE_MASK);
367 if (hvlpevent_need_ack(event)) {
368 event->xRc = HvLpEvent_Rc_InvalidSubtype;
369 HvCallEvent_ackLpEvent(event);
370 }
371 }
372}
373
374static void __init get_viocd_info(struct device_node *vio_root)
375{
376 HvLpEvent_Rc hvrc;
377 u32 unit;
378 struct vio_waitevent we;
379 struct vio_resource *unitinfo;
380 dma_addr_t unitinfo_dmaaddr;
381 int ret;
382
383 ret = viopath_open(viopath_hostLp, viomajorsubtype_cdio, 2);
384 if (ret) {
385 printk(KERN_WARNING
386 "get_viocd_info: error opening path to host partition %d\n",
387 viopath_hostLp);
388 return;
389 }
390
391 /* Initialize our request handler */
392 vio_setHandler(viomajorsubtype_cdio, handle_cd_event);
393
394 unitinfo = iseries_hv_alloc(
395 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
396 &unitinfo_dmaaddr, GFP_ATOMIC);
397 if (!unitinfo) {
398 printk(KERN_WARNING
399 "get_viocd_info: error allocating unitinfo\n");
400 goto clear_handler;
401 }
402
403 memset(unitinfo, 0, sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS);
404
405 init_completion(&we.com);
406
407 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
408 HvLpEvent_Type_VirtualIo,
409 viomajorsubtype_cdio | viocdgetinfo,
410 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
411 viopath_sourceinst(viopath_hostLp),
412 viopath_targetinst(viopath_hostLp),
413 (u64)&we, VIOVERSION << 16, unitinfo_dmaaddr, 0,
414 sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS, 0);
415 if (hvrc != HvLpEvent_Rc_Good) {
416 printk(KERN_WARNING
417 "get_viocd_info: cdrom error sending event. rc %d\n",
418 (int)hvrc);
419 goto hv_free;
420 }
421
422 wait_for_completion(&we.com);
423
424 if (we.rc) {
425 printk(KERN_WARNING "get_viocd_info: bad rc %d:0x%04X\n",
426 we.rc, we.sub_result);
427 goto hv_free;
428 }
429
430 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALCDROMS) &&
431 unitinfo[unit].rsrcname[0]; unit++) {
432 if (!do_device_node(vio_root, "viocd", FIRST_VIOCD + unit, unit,
433 "block", "IBM,iSeries-viocd", &unitinfo[unit]))
434 break;
435 }
436
437 hv_free:
438 iseries_hv_free(sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALCDROMS,
439 unitinfo, unitinfo_dmaaddr);
440 clear_handler:
441 vio_clearHandler(viomajorsubtype_cdio);
442 viopath_close(viopath_hostLp, viomajorsubtype_cdio, 2);
443}
444
445/* Handle interrupt events for tape */
446static void __init handle_tape_event(struct HvLpEvent *event)
447{
448 struct vio_waitevent *we;
449 struct viotapelpevent *tevent = (struct viotapelpevent *)event;
450
451 if (event == NULL)
452 /* Notification that a partition went away! */
453 return;
454
455 we = (struct vio_waitevent *)event->xCorrelationToken;
456 switch (event->xSubtype & VIOMINOR_SUBTYPE_MASK) {
457 case viotapegetinfo:
458 we->rc = tevent->sub_type_result;
459 complete(&we->com);
460 break;
461 default:
462 printk(KERN_WARNING "handle_tape_event: weird ack\n");
463 }
464}
465
466static void __init get_viotape_info(struct device_node *vio_root)
467{
468 HvLpEvent_Rc hvrc;
469 u32 unit;
470 struct vio_resource *unitinfo;
471 dma_addr_t unitinfo_dmaaddr;
472 size_t len = sizeof(*unitinfo) * HVMAXARCHITECTEDVIRTUALTAPES;
473 struct vio_waitevent we;
474 int ret;
475
476 init_completion(&we.com);
477
478 ret = viopath_open(viopath_hostLp, viomajorsubtype_tape, 2);
479 if (ret) {
480 printk(KERN_WARNING "get_viotape_info: "
481 "error on viopath_open to hostlp %d\n", ret);
482 return;
483 }
484
485 vio_setHandler(viomajorsubtype_tape, handle_tape_event);
486
487 unitinfo = iseries_hv_alloc(len, &unitinfo_dmaaddr, GFP_ATOMIC);
488 if (!unitinfo)
489 goto clear_handler;
490
491 memset(unitinfo, 0, len);
492
493 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
494 HvLpEvent_Type_VirtualIo,
495 viomajorsubtype_tape | viotapegetinfo,
496 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
497 viopath_sourceinst(viopath_hostLp),
498 viopath_targetinst(viopath_hostLp),
499 (u64)(unsigned long)&we, VIOVERSION << 16,
500 unitinfo_dmaaddr, len, 0, 0);
501 if (hvrc != HvLpEvent_Rc_Good) {
502 printk(KERN_WARNING "get_viotape_info: hv error on op %d\n",
503 (int)hvrc);
504 goto hv_free;
505 }
506
507 wait_for_completion(&we.com);
508
509 for (unit = 0; (unit < HVMAXARCHITECTEDVIRTUALTAPES) &&
510 unitinfo[unit].rsrcname[0]; unit++) {
511 if (!do_device_node(vio_root, "viotape", FIRST_VIOTAPE + unit,
512 unit, "byte", "IBM,iSeries-viotape",
513 &unitinfo[unit]))
514 break;
515 }
516
517 hv_free:
518 iseries_hv_free(len, unitinfo, unitinfo_dmaaddr);
519 clear_handler:
520 vio_clearHandler(viomajorsubtype_tape);
521 viopath_close(viopath_hostLp, viomajorsubtype_tape, 2);
522}
523
524static int __init iseries_vio_init(void)
525{
526 struct device_node *vio_root;
527 int ret = -ENODEV;
528
529 if (!firmware_has_feature(FW_FEATURE_ISERIES))
530 goto out;
531
532 iommu_vio_init();
533
534 vio_root = of_find_node_by_path("/vdevice");
535 if (!vio_root)
536 goto out;
537
538 if (viopath_hostLp == HvLpIndexInvalid) {
539 vio_set_hostlp();
540 /* If we don't have a host, bail out */
541 if (viopath_hostLp == HvLpIndexInvalid)
542 goto put_node;
543 }
544
545 get_viodasd_info(vio_root);
546 get_viocd_info(vio_root);
547 get_viotape_info(vio_root);
548
549 ret = 0;
550
551 put_node:
552 of_node_put(vio_root);
553 out:
554 return ret;
555}
556arch_initcall(iseries_vio_init);
diff --git a/arch/powerpc/platforms/iseries/viopath.c b/arch/powerpc/platforms/iseries/viopath.c
new file mode 100644
index 00000000000..2376069cdc1
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/viopath.c
@@ -0,0 +1,677 @@
1/* -*- linux-c -*-
2 *
3 * iSeries Virtual I/O Message Path code
4 *
5 * Authors: Dave Boutcher <boutcher@us.ibm.com>
6 * Ryan Arnold <ryanarn@us.ibm.com>
7 * Colin Devilbiss <devilbis@us.ibm.com>
8 *
9 * (C) Copyright 2000-2005 IBM Corporation
10 *
11 * This code is used by the iSeries virtual disk, cd,
12 * tape, and console to communicate with OS/400 in another
13 * partition.
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License as
17 * published by the Free Software Foundation; either version 2 of the
18 * License, or (at your option) anyu later version.
19 *
20 * This program is distributed in the hope that it will be useful, but
21 * WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
23 * General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software Foundation,
27 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
28 *
29 */
30#include <linux/module.h>
31#include <linux/kernel.h>
32#include <linux/slab.h>
33#include <linux/errno.h>
34#include <linux/vmalloc.h>
35#include <linux/string.h>
36#include <linux/proc_fs.h>
37#include <linux/dma-mapping.h>
38#include <linux/wait.h>
39#include <linux/seq_file.h>
40#include <linux/interrupt.h>
41#include <linux/completion.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/prom.h>
46#include <asm/firmware.h>
47#include <asm/iseries/hv_types.h>
48#include <asm/iseries/hv_lp_event.h>
49#include <asm/iseries/hv_lp_config.h>
50#include <asm/iseries/mf.h>
51#include <asm/iseries/vio.h>
52
53/* Status of the path to each other partition in the system.
54 * This is overkill, since we will only ever establish connections
55 * to our hosting partition and the primary partition on the system.
56 * But this allows for other support in the future.
57 */
58static struct viopathStatus {
59 int isOpen; /* Did we open the path? */
60 int isActive; /* Do we have a mon msg outstanding */
61 int users[VIO_MAX_SUBTYPES];
62 HvLpInstanceId mSourceInst;
63 HvLpInstanceId mTargetInst;
64 int numberAllocated;
65} viopathStatus[HVMAXARCHITECTEDLPS];
66
67static DEFINE_SPINLOCK(statuslock);
68
69/*
70 * For each kind of event we allocate a buffer that is
71 * guaranteed not to cross a page boundary
72 */
73static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256]
74 __attribute__((__aligned__(4096)));
75static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
76static int event_buffer_initialised;
77
78static void handleMonitorEvent(struct HvLpEvent *event);
79
80/*
81 * We use this structure to handle asynchronous responses. The caller
82 * blocks on the semaphore and the handler posts the semaphore. However,
83 * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
84 */
85struct alloc_parms {
86 struct completion done;
87 int number;
88 atomic_t wait_atomic;
89 int used_wait_atomic;
90};
91
92/* Put a sequence number in each mon msg. The value is not
93 * important. Start at something other than 0 just for
94 * readability. wrapping this is ok.
95 */
96static u8 viomonseq = 22;
97
98/* Our hosting logical partition. We get this at startup
99 * time, and different modules access this variable directly.
100 */
101HvLpIndex viopath_hostLp = HvLpIndexInvalid;
102EXPORT_SYMBOL(viopath_hostLp);
103HvLpIndex viopath_ourLp = HvLpIndexInvalid;
104EXPORT_SYMBOL(viopath_ourLp);
105
106/* For each kind of incoming event we set a pointer to a
107 * routine to call.
108 */
109static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
110
111#define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
112#define VIOPATH_KERN_INFO KERN_INFO "viopath: "
113
114static int proc_viopath_show(struct seq_file *m, void *v)
115{
116 char *buf;
117 u16 vlanMap;
118 dma_addr_t handle;
119 HvLpEvent_Rc hvrc;
120 DECLARE_COMPLETION_ONSTACK(done);
121 struct device_node *node;
122 const char *sysid;
123
124 buf = kzalloc(HW_PAGE_SIZE, GFP_KERNEL);
125 if (!buf)
126 return 0;
127
128 handle = iseries_hv_map(buf, HW_PAGE_SIZE, DMA_FROM_DEVICE);
129
130 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
131 HvLpEvent_Type_VirtualIo,
132 viomajorsubtype_config | vioconfigget,
133 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
134 viopath_sourceinst(viopath_hostLp),
135 viopath_targetinst(viopath_hostLp),
136 (u64)(unsigned long)&done, VIOVERSION << 16,
137 ((u64)handle) << 32, HW_PAGE_SIZE, 0, 0);
138
139 if (hvrc != HvLpEvent_Rc_Good)
140 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
141
142 wait_for_completion(&done);
143
144 vlanMap = HvLpConfig_getVirtualLanIndexMap();
145
146 buf[HW_PAGE_SIZE-1] = '\0';
147 seq_printf(m, "%s", buf);
148
149 iseries_hv_unmap(handle, HW_PAGE_SIZE, DMA_FROM_DEVICE);
150 kfree(buf);
151
152 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
153
154 node = of_find_node_by_path("/");
155 sysid = NULL;
156 if (node != NULL)
157 sysid = of_get_property(node, "system-id", NULL);
158
159 if (sysid == NULL)
160 seq_printf(m, "SRLNBR=<UNKNOWN>\n");
161 else
162 /* Skip "IBM," on front of serial number, see dt.c */
163 seq_printf(m, "SRLNBR=%s\n", sysid + 4);
164
165 of_node_put(node);
166
167 return 0;
168}
169
170static int proc_viopath_open(struct inode *inode, struct file *file)
171{
172 return single_open(file, proc_viopath_show, NULL);
173}
174
175static const struct file_operations proc_viopath_operations = {
176 .open = proc_viopath_open,
177 .read = seq_read,
178 .llseek = seq_lseek,
179 .release = single_release,
180};
181
182static int __init vio_proc_init(void)
183{
184 if (!firmware_has_feature(FW_FEATURE_ISERIES))
185 return 0;
186
187 proc_create("iSeries/config", 0, NULL, &proc_viopath_operations);
188 return 0;
189}
190__initcall(vio_proc_init);
191
192/* See if a given LP is active. Allow for invalid lps to be passed in
193 * and just return invalid
194 */
195int viopath_isactive(HvLpIndex lp)
196{
197 if (lp == HvLpIndexInvalid)
198 return 0;
199 if (lp < HVMAXARCHITECTEDLPS)
200 return viopathStatus[lp].isActive;
201 else
202 return 0;
203}
204EXPORT_SYMBOL(viopath_isactive);
205
206/*
207 * We cache the source and target instance ids for each
208 * partition.
209 */
210HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
211{
212 return viopathStatus[lp].mSourceInst;
213}
214EXPORT_SYMBOL(viopath_sourceinst);
215
216HvLpInstanceId viopath_targetinst(HvLpIndex lp)
217{
218 return viopathStatus[lp].mTargetInst;
219}
220EXPORT_SYMBOL(viopath_targetinst);
221
222/*
223 * Send a monitor message. This is a message with the acknowledge
224 * bit on that the other side will NOT explicitly acknowledge. When
225 * the other side goes down, the hypervisor will acknowledge any
226 * outstanding messages....so we will know when the other side dies.
227 */
228static void sendMonMsg(HvLpIndex remoteLp)
229{
230 HvLpEvent_Rc hvrc;
231
232 viopathStatus[remoteLp].mSourceInst =
233 HvCallEvent_getSourceLpInstanceId(remoteLp,
234 HvLpEvent_Type_VirtualIo);
235 viopathStatus[remoteLp].mTargetInst =
236 HvCallEvent_getTargetLpInstanceId(remoteLp,
237 HvLpEvent_Type_VirtualIo);
238
239 /*
240 * Deliberately ignore the return code here. if we call this
241 * more than once, we don't care.
242 */
243 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
244
245 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
246 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
247 HvLpEvent_AckType_DeferredAck,
248 viopathStatus[remoteLp].mSourceInst,
249 viopathStatus[remoteLp].mTargetInst,
250 viomonseq++, 0, 0, 0, 0, 0);
251
252 if (hvrc == HvLpEvent_Rc_Good)
253 viopathStatus[remoteLp].isActive = 1;
254 else {
255 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
256 remoteLp);
257 viopathStatus[remoteLp].isActive = 0;
258 }
259}
260
261static void handleMonitorEvent(struct HvLpEvent *event)
262{
263 HvLpIndex remoteLp;
264 int i;
265
266 /*
267 * This handler is _also_ called as part of the loop
268 * at the end of this routine, so it must be able to
269 * ignore NULL events...
270 */
271 if (!event)
272 return;
273
274 /*
275 * First see if this is just a normal monitor message from the
276 * other partition
277 */
278 if (hvlpevent_is_int(event)) {
279 remoteLp = event->xSourceLp;
280 if (!viopathStatus[remoteLp].isActive)
281 sendMonMsg(remoteLp);
282 return;
283 }
284
285 /*
286 * This path is for an acknowledgement; the other partition
287 * died
288 */
289 remoteLp = event->xTargetLp;
290 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
291 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
292 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
293 return;
294 }
295
296 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
297
298 viopathStatus[remoteLp].isActive = 0;
299
300 /*
301 * For each active handler, pass them a NULL
302 * message to indicate that the other partition
303 * died
304 */
305 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
306 if (vio_handler[i] != NULL)
307 (*vio_handler[i])(NULL);
308 }
309}
310
311int vio_setHandler(int subtype, vio_event_handler_t *beh)
312{
313 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
314 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
315 return -EINVAL;
316 if (vio_handler[subtype] != NULL)
317 return -EBUSY;
318 vio_handler[subtype] = beh;
319 return 0;
320}
321EXPORT_SYMBOL(vio_setHandler);
322
323int vio_clearHandler(int subtype)
324{
325 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
326 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
327 return -EINVAL;
328 if (vio_handler[subtype] == NULL)
329 return -EAGAIN;
330 vio_handler[subtype] = NULL;
331 return 0;
332}
333EXPORT_SYMBOL(vio_clearHandler);
334
335static void handleConfig(struct HvLpEvent *event)
336{
337 if (!event)
338 return;
339 if (hvlpevent_is_int(event)) {
340 printk(VIOPATH_KERN_WARN
341 "unexpected config request from partition %d",
342 event->xSourceLp);
343
344 if (hvlpevent_need_ack(event)) {
345 event->xRc = HvLpEvent_Rc_InvalidSubtype;
346 HvCallEvent_ackLpEvent(event);
347 }
348 return;
349 }
350
351 complete((struct completion *)event->xCorrelationToken);
352}
353
354/*
355 * Initialization of the hosting partition
356 */
357void vio_set_hostlp(void)
358{
359 /*
360 * If this has already been set then we DON'T want to either change
361 * it or re-register the proc file system
362 */
363 if (viopath_hostLp != HvLpIndexInvalid)
364 return;
365
366 /*
367 * Figure out our hosting partition. This isn't allowed to change
368 * while we're active
369 */
370 viopath_ourLp = HvLpConfig_getLpIndex();
371 viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
372
373 if (viopath_hostLp != HvLpIndexInvalid)
374 vio_setHandler(viomajorsubtype_config, handleConfig);
375}
376EXPORT_SYMBOL(vio_set_hostlp);
377
378static void vio_handleEvent(struct HvLpEvent *event)
379{
380 HvLpIndex remoteLp;
381 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
382 >> VIOMAJOR_SUBTYPE_SHIFT;
383
384 if (hvlpevent_is_int(event)) {
385 remoteLp = event->xSourceLp;
386 /*
387 * The isActive is checked because if the hosting partition
388 * went down and came back up it would not be active but it
389 * would have different source and target instances, in which
390 * case we'd want to reset them. This case really protects
391 * against an unauthorized active partition sending interrupts
392 * or acks to this linux partition.
393 */
394 if (viopathStatus[remoteLp].isActive
395 && (event->xSourceInstanceId !=
396 viopathStatus[remoteLp].mTargetInst)) {
397 printk(VIOPATH_KERN_WARN
398 "message from invalid partition. "
399 "int msg rcvd, source inst (%d) doesn't match (%d)\n",
400 viopathStatus[remoteLp].mTargetInst,
401 event->xSourceInstanceId);
402 return;
403 }
404
405 if (viopathStatus[remoteLp].isActive
406 && (event->xTargetInstanceId !=
407 viopathStatus[remoteLp].mSourceInst)) {
408 printk(VIOPATH_KERN_WARN
409 "message from invalid partition. "
410 "int msg rcvd, target inst (%d) doesn't match (%d)\n",
411 viopathStatus[remoteLp].mSourceInst,
412 event->xTargetInstanceId);
413 return;
414 }
415 } else {
416 remoteLp = event->xTargetLp;
417 if (event->xSourceInstanceId !=
418 viopathStatus[remoteLp].mSourceInst) {
419 printk(VIOPATH_KERN_WARN
420 "message from invalid partition. "
421 "ack msg rcvd, source inst (%d) doesn't match (%d)\n",
422 viopathStatus[remoteLp].mSourceInst,
423 event->xSourceInstanceId);
424 return;
425 }
426
427 if (event->xTargetInstanceId !=
428 viopathStatus[remoteLp].mTargetInst) {
429 printk(VIOPATH_KERN_WARN
430 "message from invalid partition. "
431 "viopath: ack msg rcvd, target inst (%d) doesn't match (%d)\n",
432 viopathStatus[remoteLp].mTargetInst,
433 event->xTargetInstanceId);
434 return;
435 }
436 }
437
438 if (vio_handler[subtype] == NULL) {
439 printk(VIOPATH_KERN_WARN
440 "unexpected virtual io event subtype %d from partition %d\n",
441 event->xSubtype, remoteLp);
442 /* No handler. Ack if necessary */
443 if (hvlpevent_is_int(event) && hvlpevent_need_ack(event)) {
444 event->xRc = HvLpEvent_Rc_InvalidSubtype;
445 HvCallEvent_ackLpEvent(event);
446 }
447 return;
448 }
449
450 /* This innocuous little line is where all the real work happens */
451 (*vio_handler[subtype])(event);
452}
453
454static void viopath_donealloc(void *parm, int number)
455{
456 struct alloc_parms *parmsp = parm;
457
458 parmsp->number = number;
459 if (parmsp->used_wait_atomic)
460 atomic_set(&parmsp->wait_atomic, 0);
461 else
462 complete(&parmsp->done);
463}
464
465static int allocateEvents(HvLpIndex remoteLp, int numEvents)
466{
467 struct alloc_parms parms;
468
469 if (system_state != SYSTEM_RUNNING) {
470 parms.used_wait_atomic = 1;
471 atomic_set(&parms.wait_atomic, 1);
472 } else {
473 parms.used_wait_atomic = 0;
474 init_completion(&parms.done);
475 }
476 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
477 numEvents, &viopath_donealloc, &parms);
478 if (system_state != SYSTEM_RUNNING) {
479 while (atomic_read(&parms.wait_atomic))
480 mb();
481 } else
482 wait_for_completion(&parms.done);
483 return parms.number;
484}
485
486int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
487{
488 int i;
489 unsigned long flags;
490 int tempNumAllocated;
491
492 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
493 return -EINVAL;
494
495 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
496 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
497 return -EINVAL;
498
499 spin_lock_irqsave(&statuslock, flags);
500
501 if (!event_buffer_initialised) {
502 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
503 atomic_set(&event_buffer_available[i], 1);
504 event_buffer_initialised = 1;
505 }
506
507 viopathStatus[remoteLp].users[subtype]++;
508
509 if (!viopathStatus[remoteLp].isOpen) {
510 viopathStatus[remoteLp].isOpen = 1;
511 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
512
513 /*
514 * Don't hold the spinlock during an operation that
515 * can sleep.
516 */
517 spin_unlock_irqrestore(&statuslock, flags);
518 tempNumAllocated = allocateEvents(remoteLp, 1);
519 spin_lock_irqsave(&statuslock, flags);
520
521 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
522
523 if (viopathStatus[remoteLp].numberAllocated == 0) {
524 HvCallEvent_closeLpEventPath(remoteLp,
525 HvLpEvent_Type_VirtualIo);
526
527 spin_unlock_irqrestore(&statuslock, flags);
528 return -ENOMEM;
529 }
530
531 viopathStatus[remoteLp].mSourceInst =
532 HvCallEvent_getSourceLpInstanceId(remoteLp,
533 HvLpEvent_Type_VirtualIo);
534 viopathStatus[remoteLp].mTargetInst =
535 HvCallEvent_getTargetLpInstanceId(remoteLp,
536 HvLpEvent_Type_VirtualIo);
537 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
538 &vio_handleEvent);
539 sendMonMsg(remoteLp);
540 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
541 "setting sinst %d, tinst %d\n",
542 remoteLp, viopathStatus[remoteLp].mSourceInst,
543 viopathStatus[remoteLp].mTargetInst);
544 }
545
546 spin_unlock_irqrestore(&statuslock, flags);
547 tempNumAllocated = allocateEvents(remoteLp, numReq);
548 spin_lock_irqsave(&statuslock, flags);
549 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
550 spin_unlock_irqrestore(&statuslock, flags);
551
552 return 0;
553}
554EXPORT_SYMBOL(viopath_open);
555
556int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
557{
558 unsigned long flags;
559 int i;
560 int numOpen;
561 struct alloc_parms parms;
562
563 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
564 return -EINVAL;
565
566 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
567 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
568 return -EINVAL;
569
570 spin_lock_irqsave(&statuslock, flags);
571 /*
572 * If the viopath_close somehow gets called before a
573 * viopath_open it could decrement to -1 which is a non
574 * recoverable state so we'll prevent this from
575 * happening.
576 */
577 if (viopathStatus[remoteLp].users[subtype] > 0)
578 viopathStatus[remoteLp].users[subtype]--;
579
580 spin_unlock_irqrestore(&statuslock, flags);
581
582 parms.used_wait_atomic = 0;
583 init_completion(&parms.done);
584 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
585 numReq, &viopath_donealloc, &parms);
586 wait_for_completion(&parms.done);
587
588 spin_lock_irqsave(&statuslock, flags);
589 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
590 numOpen += viopathStatus[remoteLp].users[i];
591
592 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
593 printk(VIOPATH_KERN_INFO "closing connection to partition %d\n",
594 remoteLp);
595
596 HvCallEvent_closeLpEventPath(remoteLp,
597 HvLpEvent_Type_VirtualIo);
598 viopathStatus[remoteLp].isOpen = 0;
599 viopathStatus[remoteLp].isActive = 0;
600
601 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
602 atomic_set(&event_buffer_available[i], 0);
603 event_buffer_initialised = 0;
604 }
605 spin_unlock_irqrestore(&statuslock, flags);
606 return 0;
607}
608EXPORT_SYMBOL(viopath_close);
609
610void *vio_get_event_buffer(int subtype)
611{
612 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
613 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
614 return NULL;
615
616 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
617 return &event_buffer[subtype * 256];
618 else
619 return NULL;
620}
621EXPORT_SYMBOL(vio_get_event_buffer);
622
623void vio_free_event_buffer(int subtype, void *buffer)
624{
625 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
626 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
627 printk(VIOPATH_KERN_WARN
628 "unexpected subtype %d freeing event buffer\n", subtype);
629 return;
630 }
631
632 if (atomic_read(&event_buffer_available[subtype]) != 0) {
633 printk(VIOPATH_KERN_WARN
634 "freeing unallocated event buffer, subtype %d\n",
635 subtype);
636 return;
637 }
638
639 if (buffer != &event_buffer[subtype * 256]) {
640 printk(VIOPATH_KERN_WARN
641 "freeing invalid event buffer, subtype %d\n", subtype);
642 }
643
644 atomic_set(&event_buffer_available[subtype], 1);
645}
646EXPORT_SYMBOL(vio_free_event_buffer);
647
648static const struct vio_error_entry vio_no_error =
649 { 0, 0, "Non-VIO Error" };
650static const struct vio_error_entry vio_unknown_error =
651 { 0, EIO, "Unknown Error" };
652
653static const struct vio_error_entry vio_default_errors[] = {
654 {0x0001, EIO, "No Connection"},
655 {0x0002, EIO, "No Receiver"},
656 {0x0003, EIO, "No Buffer Available"},
657 {0x0004, EBADRQC, "Invalid Message Type"},
658 {0x0000, 0, NULL},
659};
660
661const struct vio_error_entry *vio_lookup_rc(
662 const struct vio_error_entry *local_table, u16 rc)
663{
664 const struct vio_error_entry *cur;
665
666 if (!rc)
667 return &vio_no_error;
668 if (local_table)
669 for (cur = local_table; cur->rc; ++cur)
670 if (cur->rc == rc)
671 return cur;
672 for (cur = vio_default_errors; cur->rc; ++cur)
673 if (cur->rc == rc)
674 return cur;
675 return &vio_unknown_error;
676}
677EXPORT_SYMBOL(vio_lookup_rc);
diff --git a/arch/powerpc/platforms/iseries/vpd_areas.h b/arch/powerpc/platforms/iseries/vpd_areas.h
new file mode 100644
index 00000000000..feb001f3a5f
--- /dev/null
+++ b/arch/powerpc/platforms/iseries/vpd_areas.h
@@ -0,0 +1,88 @@
1/*
2 * Copyright (C) 2001 Mike Corrigan IBM Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 */
18#ifndef _ISERIES_VPD_AREAS_H
19#define _ISERIES_VPD_AREAS_H
20
21/*
22 * This file defines the address and length of all of the VPD area passed to
23 * the OS from PLIC (most of which start from the SP).
24 */
25
26#include <asm/types.h>
27
28/* VPD Entry index is carved in stone - cannot be changed (easily). */
29#define ItVpdCecVpd 0
30#define ItVpdDynamicSpace 1
31#define ItVpdExtVpd 2
32#define ItVpdExtVpdOnPanel 3
33#define ItVpdFirstPaca 4
34#define ItVpdIoVpd 5
35#define ItVpdIplParms 6
36#define ItVpdMsVpd 7
37#define ItVpdPanelVpd 8
38#define ItVpdLpNaca 9
39#define ItVpdBackplaneAndMaybeClockCardVpd 10
40#define ItVpdRecoveryLogBuffer 11
41#define ItVpdSpCommArea 12
42#define ItVpdSpLogBuffer 13
43#define ItVpdSpLogBufferSave 14
44#define ItVpdSpCardVpd 15
45#define ItVpdFirstProcVpd 16
46#define ItVpdApModelVpd 17
47#define ItVpdClockCardVpd 18
48#define ItVpdBusExtCardVpd 19
49#define ItVpdProcCapacityVpd 20
50#define ItVpdInteractiveCapacityVpd 21
51#define ItVpdFirstSlotLabel 22
52#define ItVpdFirstLpQueue 23
53#define ItVpdFirstL3CacheVpd 24
54#define ItVpdFirstProcFruVpd 25
55
56#define ItVpdMaxEntries 26
57
58#define ItDmaMaxEntries 10
59
60#define ItVpdAreasMaxSlotLabels 192
61
62
63struct ItVpdAreas {
64 u32 xSlicDesc; // Descriptor 000-003
65 u16 xSlicSize; // Size of this control block 004-005
66 u16 xPlicAdjustVpdLens:1; // Flag to indicate new interface006-007
67 u16 xRsvd1:15; // Reserved bits ...
68 u16 xSlicVpdEntries; // Number of VPD entries 008-009
69 u16 xSlicDmaEntries; // Number of DMA entries 00A-00B
70 u16 xSlicMaxLogicalProcs; // Maximum logical processors 00C-00D
71 u16 xSlicMaxPhysicalProcs; // Maximum physical processors 00E-00F
72 u16 xSlicDmaToksOffset; // Offset into this of array 010-011
73 u16 xSlicVpdAdrsOffset; // Offset into this of array 012-013
74 u16 xSlicDmaLensOffset; // Offset into this of array 014-015
75 u16 xSlicVpdLensOffset; // Offset into this of array 016-017
76 u16 xSlicMaxSlotLabels; // Maximum number of slot labels018-019
77 u16 xSlicMaxLpQueues; // Maximum number of LP Queues 01A-01B
78 u8 xRsvd2[4]; // Reserved 01C-01F
79 u64 xRsvd3[12]; // Reserved 020-07F
80 u32 xPlicDmaLens[ItDmaMaxEntries];// Array of DMA lengths 080-0A7
81 u32 xPlicDmaToks[ItDmaMaxEntries];// Array of DMA tokens 0A8-0CF
82 u32 xSlicVpdLens[ItVpdMaxEntries];// Array of VPD lengths 0D0-12F
83 const void *xSlicVpdAdrs[ItVpdMaxEntries];// Array of VPD buffers 130-1EF
84};
85
86extern const struct ItVpdAreas itVpdAreas;
87
88#endif /* _ISERIES_VPD_AREAS_H */