aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ppc64/kernel')
-rw-r--r--arch/ppc64/kernel/HvCall.c36
-rw-r--r--arch/ppc64/kernel/HvLpConfig.c27
-rw-r--r--arch/ppc64/kernel/HvLpEvent.c88
-rw-r--r--arch/ppc64/kernel/ItLpQueue.c262
-rw-r--r--arch/ppc64/kernel/LparData.c227
-rw-r--r--arch/ppc64/kernel/Makefile10
-rw-r--r--arch/ppc64/kernel/bpa_iommu.c2
-rw-r--r--arch/ppc64/kernel/bpa_setup.c2
-rw-r--r--arch/ppc64/kernel/eeh.c2
-rw-r--r--arch/ppc64/kernel/head.S2
-rw-r--r--arch/ppc64/kernel/hvCall.S98
-rw-r--r--arch/ppc64/kernel/iSeries_VpdInfo.c268
-rw-r--r--arch/ppc64/kernel/iSeries_htab.c255
-rw-r--r--arch/ppc64/kernel/iSeries_iommu.c177
-rw-r--r--arch/ppc64/kernel/iSeries_irq.c366
-rw-r--r--arch/ppc64/kernel/iSeries_pci.c905
-rw-r--r--arch/ppc64/kernel/iSeries_proc.c113
-rw-r--r--arch/ppc64/kernel/iSeries_setup.c1007
-rw-r--r--arch/ppc64/kernel/iSeries_setup.h26
-rw-r--r--arch/ppc64/kernel/iSeries_smp.c121
-rw-r--r--arch/ppc64/kernel/iSeries_vio.c156
-rw-r--r--arch/ppc64/kernel/maple_pci.c3
-rw-r--r--arch/ppc64/kernel/mf.c1281
-rw-r--r--arch/ppc64/kernel/misc.S38
-rw-r--r--arch/ppc64/kernel/pSeries_iommu.c2
-rw-r--r--arch/ppc64/kernel/pSeries_pci.c3
-rw-r--r--arch/ppc64/kernel/pSeries_setup.c2
-rw-r--r--arch/ppc64/kernel/pci.c3
-rw-r--r--arch/ppc64/kernel/pci.h54
-rw-r--r--arch/ppc64/kernel/pci_direct_iommu.c3
-rw-r--r--arch/ppc64/kernel/pci_dn.c3
-rw-r--r--arch/ppc64/kernel/pci_iommu.c8
-rw-r--r--arch/ppc64/kernel/pmac_pci.c2
-rw-r--r--arch/ppc64/kernel/ppc_ksyms.c20
-rw-r--r--arch/ppc64/kernel/rtas_pci.c3
-rw-r--r--arch/ppc64/kernel/rtc.c37
-rw-r--r--arch/ppc64/kernel/sys_ppc32.c3
-rw-r--r--arch/ppc64/kernel/u3_iommu.c3
-rw-r--r--arch/ppc64/kernel/viopath.c673
39 files changed, 16 insertions, 6275 deletions
diff --git a/arch/ppc64/kernel/HvCall.c b/arch/ppc64/kernel/HvCall.c
deleted file mode 100644
index b772e65b57a2..000000000000
--- a/arch/ppc64/kernel/HvCall.c
+++ /dev/null
@@ -1,36 +0,0 @@
1/*
2 * HvCall.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <asm/page.h>
12#include <asm/abs_addr.h>
13#include <asm/iSeries/HvCall.h>
14#include <asm/iSeries/HvCallSc.h>
15#include <asm/iSeries/HvTypes.h>
16
17
18void HvCall_writeLogBuffer(const void *buffer, u64 len)
19{
20 struct HvLpBufferList hv_buf;
21 u64 left_this_page;
22 u64 cur = virt_to_abs(buffer);
23
24 while (len) {
25 hv_buf.addr = cur;
26 left_this_page = ((cur & PAGE_MASK) + PAGE_SIZE) - cur;
27 if (left_this_page > len)
28 left_this_page = len;
29 hv_buf.len = left_this_page;
30 len -= left_this_page;
31 HvCall2(HvCallBaseWriteLogBuffer,
32 virt_to_abs(&hv_buf),
33 left_this_page);
34 cur = (cur & PAGE_MASK) + PAGE_SIZE;
35 }
36}
diff --git a/arch/ppc64/kernel/HvLpConfig.c b/arch/ppc64/kernel/HvLpConfig.c
deleted file mode 100644
index cb1d6473203c..000000000000
--- a/arch/ppc64/kernel/HvLpConfig.c
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * HvLpConfig.c
3 * Copyright (C) 2001 Kyle A. Lucke, IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/module.h>
21#include <asm/iSeries/HvLpConfig.h>
22
23HvLpIndex HvLpConfig_getLpIndex_outline(void)
24{
25 return HvLpConfig_getLpIndex();
26}
27EXPORT_SYMBOL(HvLpConfig_getLpIndex_outline);
diff --git a/arch/ppc64/kernel/HvLpEvent.c b/arch/ppc64/kernel/HvLpEvent.c
deleted file mode 100644
index 90032b138902..000000000000
--- a/arch/ppc64/kernel/HvLpEvent.c
+++ /dev/null
@@ -1,88 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/stddef.h>
10#include <linux/kernel.h>
11#include <linux/module.h>
12#include <asm/system.h>
13#include <asm/iSeries/HvLpEvent.h>
14#include <asm/iSeries/HvCallEvent.h>
15#include <asm/iSeries/ItLpNaca.h>
16
17/* Array of LpEvent handler functions */
18LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
19unsigned lpEventHandlerPaths[HvLpEvent_Type_NumTypes];
20
21/* Register a handler for an LpEvent type */
22
23int HvLpEvent_registerHandler( HvLpEvent_Type eventType, LpEventHandler handler )
24{
25 int rc = 1;
26 if ( eventType < HvLpEvent_Type_NumTypes ) {
27 lpEventHandler[eventType] = handler;
28 rc = 0;
29 }
30 return rc;
31
32}
33
34int HvLpEvent_unregisterHandler( HvLpEvent_Type eventType )
35{
36 int rc = 1;
37
38 might_sleep();
39
40 if ( eventType < HvLpEvent_Type_NumTypes ) {
41 if ( !lpEventHandlerPaths[eventType] ) {
42 lpEventHandler[eventType] = NULL;
43 rc = 0;
44
45 /* We now sleep until all other CPUs have scheduled. This ensures that
46 * the deletion is seen by all other CPUs, and that the deleted handler
47 * isn't still running on another CPU when we return. */
48 synchronize_rcu();
49 }
50 }
51 return rc;
52}
53EXPORT_SYMBOL(HvLpEvent_registerHandler);
54EXPORT_SYMBOL(HvLpEvent_unregisterHandler);
55
56/* (lpIndex is the partition index of the target partition.
57 * needed only for VirtualIo, VirtualLan and SessionMgr. Zero
58 * indicates to use our partition index - for the other types)
59 */
60int HvLpEvent_openPath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
61{
62 int rc = 1;
63 if ( eventType < HvLpEvent_Type_NumTypes &&
64 lpEventHandler[eventType] ) {
65 if ( lpIndex == 0 )
66 lpIndex = itLpNaca.xLpIndex;
67 HvCallEvent_openLpEventPath( lpIndex, eventType );
68 ++lpEventHandlerPaths[eventType];
69 rc = 0;
70 }
71 return rc;
72}
73
74int HvLpEvent_closePath( HvLpEvent_Type eventType, HvLpIndex lpIndex )
75{
76 int rc = 1;
77 if ( eventType < HvLpEvent_Type_NumTypes &&
78 lpEventHandler[eventType] &&
79 lpEventHandlerPaths[eventType] ) {
80 if ( lpIndex == 0 )
81 lpIndex = itLpNaca.xLpIndex;
82 HvCallEvent_closeLpEventPath( lpIndex, eventType );
83 --lpEventHandlerPaths[eventType];
84 rc = 0;
85 }
86 return rc;
87}
88
diff --git a/arch/ppc64/kernel/ItLpQueue.c b/arch/ppc64/kernel/ItLpQueue.c
deleted file mode 100644
index 4231861288a3..000000000000
--- a/arch/ppc64/kernel/ItLpQueue.c
+++ /dev/null
@@ -1,262 +0,0 @@
1/*
2 * ItLpQueue.c
3 * Copyright (C) 2001 Mike Corrigan IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 */
10
11#include <linux/stddef.h>
12#include <linux/kernel.h>
13#include <linux/sched.h>
14#include <linux/bootmem.h>
15#include <linux/seq_file.h>
16#include <linux/proc_fs.h>
17#include <asm/system.h>
18#include <asm/paca.h>
19#include <asm/iSeries/ItLpQueue.h>
20#include <asm/iSeries/HvLpEvent.h>
21#include <asm/iSeries/HvCallEvent.h>
22
23/*
24 * The LpQueue is used to pass event data from the hypervisor to
25 * the partition. This is where I/O interrupt events are communicated.
26 *
27 * It is written to by the hypervisor so cannot end up in the BSS.
28 */
29struct hvlpevent_queue hvlpevent_queue __attribute__((__section__(".data")));
30
31DEFINE_PER_CPU(unsigned long[HvLpEvent_Type_NumTypes], hvlpevent_counts);
32
33static char *event_types[HvLpEvent_Type_NumTypes] = {
34 "Hypervisor",
35 "Machine Facilities",
36 "Session Manager",
37 "SPD I/O",
38 "Virtual Bus",
39 "PCI I/O",
40 "RIO I/O",
41 "Virtual Lan",
42 "Virtual I/O"
43};
44
45/* Array of LpEvent handler functions */
46extern LpEventHandler lpEventHandler[HvLpEvent_Type_NumTypes];
47
48static struct HvLpEvent * get_next_hvlpevent(void)
49{
50 struct HvLpEvent * event;
51 event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
52
53 if (event->xFlags.xValid) {
54 /* rmb() needed only for weakly consistent machines (regatta) */
55 rmb();
56 /* Set pointer to next potential event */
57 hvlpevent_queue.xSlicCurEventPtr += ((event->xSizeMinus1 +
58 LpEventAlign) / LpEventAlign) * LpEventAlign;
59
60 /* Wrap to beginning if no room at end */
61 if (hvlpevent_queue.xSlicCurEventPtr >
62 hvlpevent_queue.xSlicLastValidEventPtr) {
63 hvlpevent_queue.xSlicCurEventPtr =
64 hvlpevent_queue.xSlicEventStackPtr;
65 }
66 } else {
67 event = NULL;
68 }
69
70 return event;
71}
72
73static unsigned long spread_lpevents = NR_CPUS;
74
75int hvlpevent_is_pending(void)
76{
77 struct HvLpEvent *next_event;
78
79 if (smp_processor_id() >= spread_lpevents)
80 return 0;
81
82 next_event = (struct HvLpEvent *)hvlpevent_queue.xSlicCurEventPtr;
83
84 return next_event->xFlags.xValid |
85 hvlpevent_queue.xPlicOverflowIntPending;
86}
87
88static void hvlpevent_clear_valid(struct HvLpEvent * event)
89{
90 /* Tell the Hypervisor that we're done with this event.
91 * Also clear bits within this event that might look like valid bits.
92 * ie. on 64-byte boundaries.
93 */
94 struct HvLpEvent *tmp;
95 unsigned extra = ((event->xSizeMinus1 + LpEventAlign) /
96 LpEventAlign) - 1;
97
98 switch (extra) {
99 case 3:
100 tmp = (struct HvLpEvent*)((char*)event + 3 * LpEventAlign);
101 tmp->xFlags.xValid = 0;
102 case 2:
103 tmp = (struct HvLpEvent*)((char*)event + 2 * LpEventAlign);
104 tmp->xFlags.xValid = 0;
105 case 1:
106 tmp = (struct HvLpEvent*)((char*)event + 1 * LpEventAlign);
107 tmp->xFlags.xValid = 0;
108 }
109
110 mb();
111
112 event->xFlags.xValid = 0;
113}
114
115void process_hvlpevents(struct pt_regs *regs)
116{
117 struct HvLpEvent * event;
118
119 /* If we have recursed, just return */
120 if (!spin_trylock(&hvlpevent_queue.lock))
121 return;
122
123 for (;;) {
124 event = get_next_hvlpevent();
125 if (event) {
126 /* Call appropriate handler here, passing
127 * a pointer to the LpEvent. The handler
128 * must make a copy of the LpEvent if it
129 * needs it in a bottom half. (perhaps for
130 * an ACK)
131 *
132 * Handlers are responsible for ACK processing
133 *
134 * The Hypervisor guarantees that LpEvents will
135 * only be delivered with types that we have
136 * registered for, so no type check is necessary
137 * here!
138 */
139 if (event->xType < HvLpEvent_Type_NumTypes)
140 __get_cpu_var(hvlpevent_counts)[event->xType]++;
141 if (event->xType < HvLpEvent_Type_NumTypes &&
142 lpEventHandler[event->xType])
143 lpEventHandler[event->xType](event, regs);
144 else
145 printk(KERN_INFO "Unexpected Lp Event type=%d\n", event->xType );
146
147 hvlpevent_clear_valid(event);
148 } else if (hvlpevent_queue.xPlicOverflowIntPending)
149 /*
150 * No more valid events. If overflow events are
151 * pending process them
152 */
153 HvCallEvent_getOverflowLpEvents(hvlpevent_queue.xIndex);
154 else
155 break;
156 }
157
158 spin_unlock(&hvlpevent_queue.lock);
159}
160
161static int set_spread_lpevents(char *str)
162{
163 unsigned long val = simple_strtoul(str, NULL, 0);
164
165 /*
166 * The parameter is the number of processors to share in processing
167 * lp events.
168 */
169 if (( val > 0) && (val <= NR_CPUS)) {
170 spread_lpevents = val;
171 printk("lpevent processing spread over %ld processors\n", val);
172 } else {
173 printk("invalid spread_lpevents %ld\n", val);
174 }
175
176 return 1;
177}
178__setup("spread_lpevents=", set_spread_lpevents);
179
180void setup_hvlpevent_queue(void)
181{
182 void *eventStack;
183
184 /*
185 * Allocate a page for the Event Stack. The Hypervisor needs the
186 * absolute real address, so we subtract out the KERNELBASE and add
187 * in the absolute real address of the kernel load area.
188 */
189 eventStack = alloc_bootmem_pages(LpEventStackSize);
190 memset(eventStack, 0, LpEventStackSize);
191
192 /* Invoke the hypervisor to initialize the event stack */
193 HvCallEvent_setLpEventStack(0, eventStack, LpEventStackSize);
194
195 hvlpevent_queue.xSlicEventStackPtr = (char *)eventStack;
196 hvlpevent_queue.xSlicCurEventPtr = (char *)eventStack;
197 hvlpevent_queue.xSlicLastValidEventPtr = (char *)eventStack +
198 (LpEventStackSize - LpEventMaxSize);
199 hvlpevent_queue.xIndex = 0;
200}
201
202static int proc_lpevents_show(struct seq_file *m, void *v)
203{
204 int cpu, i;
205 unsigned long sum;
206 static unsigned long cpu_totals[NR_CPUS];
207
208 /* FIXME: do we care that there's no locking here? */
209 sum = 0;
210 for_each_online_cpu(cpu) {
211 cpu_totals[cpu] = 0;
212 for (i = 0; i < HvLpEvent_Type_NumTypes; i++) {
213 cpu_totals[cpu] += per_cpu(hvlpevent_counts, cpu)[i];
214 }
215 sum += cpu_totals[cpu];
216 }
217
218 seq_printf(m, "LpEventQueue 0\n");
219 seq_printf(m, " events processed:\t%lu\n", sum);
220
221 for (i = 0; i < HvLpEvent_Type_NumTypes; ++i) {
222 sum = 0;
223 for_each_online_cpu(cpu) {
224 sum += per_cpu(hvlpevent_counts, cpu)[i];
225 }
226
227 seq_printf(m, " %-20s %10lu\n", event_types[i], sum);
228 }
229
230 seq_printf(m, "\n events processed by processor:\n");
231
232 for_each_online_cpu(cpu) {
233 seq_printf(m, " CPU%02d %10lu\n", cpu, cpu_totals[cpu]);
234 }
235
236 return 0;
237}
238
239static int proc_lpevents_open(struct inode *inode, struct file *file)
240{
241 return single_open(file, proc_lpevents_show, NULL);
242}
243
244static struct file_operations proc_lpevents_operations = {
245 .open = proc_lpevents_open,
246 .read = seq_read,
247 .llseek = seq_lseek,
248 .release = single_release,
249};
250
251static int __init proc_lpevents_init(void)
252{
253 struct proc_dir_entry *e;
254
255 e = create_proc_entry("iSeries/lpevents", S_IFREG|S_IRUGO, NULL);
256 if (e)
257 e->proc_fops = &proc_lpevents_operations;
258
259 return 0;
260}
261__initcall(proc_lpevents_init);
262
diff --git a/arch/ppc64/kernel/LparData.c b/arch/ppc64/kernel/LparData.c
deleted file mode 100644
index 0a9c23ca2f0c..000000000000
--- a/arch/ppc64/kernel/LparData.c
+++ /dev/null
@@ -1,227 +0,0 @@
1/*
2 * Copyright 2001 Mike Corrigan, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#include <linux/config.h>
10#include <linux/types.h>
11#include <linux/threads.h>
12#include <linux/module.h>
13#include <linux/bitops.h>
14#include <asm/processor.h>
15#include <asm/ptrace.h>
16#include <asm/naca.h>
17#include <asm/abs_addr.h>
18#include <asm/iSeries/ItLpNaca.h>
19#include <asm/lppaca.h>
20#include <asm/iSeries/ItLpRegSave.h>
21#include <asm/paca.h>
22#include <asm/iSeries/HvReleaseData.h>
23#include <asm/iSeries/LparMap.h>
24#include <asm/iSeries/ItVpdAreas.h>
25#include <asm/iSeries/ItIplParmsReal.h>
26#include <asm/iSeries/ItExtVpdPanel.h>
27#include <asm/iSeries/ItLpQueue.h>
28#include <asm/iSeries/IoHriProcessorVpd.h>
29#include <asm/iSeries/ItSpCommArea.h>
30
31
32/* The HvReleaseData is the root of the information shared between
33 * the hypervisor and Linux.
34 */
35struct HvReleaseData hvReleaseData = {
36 .xDesc = 0xc8a5d9c4, /* "HvRD" ebcdic */
37 .xSize = sizeof(struct HvReleaseData),
38 .xVpdAreasPtrOffset = offsetof(struct naca_struct, xItVpdAreas),
39 .xSlicNacaAddr = &naca, /* 64-bit Naca address */
40 .xMsNucDataOffset = LPARMAP_PHYS,
41 .xFlags = HVREL_TAGSINACTIVE /* tags inactive */
42 /* 64 bit */
43 /* shared processors */
44 /* HMT allowed */
45 | 6, /* TEMP: This allows non-GA driver */
46 .xVrmIndex = 4, /* We are v5r2m0 */
47 .xMinSupportedPlicVrmIndex = 3, /* v5r1m0 */
48 .xMinCompatablePlicVrmIndex = 3, /* v5r1m0 */
49 .xVrmName = { 0xd3, 0x89, 0x95, 0xa4, /* "Linux 2.4.64" ebcdic */
50 0xa7, 0x40, 0xf2, 0x4b,
51 0xf4, 0x4b, 0xf6, 0xf4 },
52};
53
54/*
55 * The NACA. The first dword of the naca is required by the iSeries
56 * hypervisor to point to itVpdAreas. The hypervisor finds the NACA
57 * through the pointer in hvReleaseData.
58 */
59struct naca_struct naca = {
60 .xItVpdAreas = &itVpdAreas,
61 .xRamDisk = 0,
62 .xRamDiskSize = 0,
63};
64
65extern void system_reset_iSeries(void);
66extern void machine_check_iSeries(void);
67extern void data_access_iSeries(void);
68extern void instruction_access_iSeries(void);
69extern void hardware_interrupt_iSeries(void);
70extern void alignment_iSeries(void);
71extern void program_check_iSeries(void);
72extern void fp_unavailable_iSeries(void);
73extern void decrementer_iSeries(void);
74extern void trap_0a_iSeries(void);
75extern void trap_0b_iSeries(void);
76extern void system_call_iSeries(void);
77extern void single_step_iSeries(void);
78extern void trap_0e_iSeries(void);
79extern void performance_monitor_iSeries(void);
80extern void data_access_slb_iSeries(void);
81extern void instruction_access_slb_iSeries(void);
82
83struct ItLpNaca itLpNaca = {
84 .xDesc = 0xd397d581, /* "LpNa" ebcdic */
85 .xSize = 0x0400, /* size of ItLpNaca */
86 .xIntHdlrOffset = 0x0300, /* offset to int array */
87 .xMaxIntHdlrEntries = 19, /* # ents */
88 .xPrimaryLpIndex = 0, /* Part # of primary */
89 .xServiceLpIndex = 0, /* Part # of serv */
90 .xLpIndex = 0, /* Part # of me */
91 .xMaxLpQueues = 0, /* # of LP queues */
92 .xLpQueueOffset = 0x100, /* offset of start of LP queues */
93 .xPirEnvironMode = 0, /* Piranha stuff */
94 .xPirConsoleMode = 0,
95 .xPirDasdMode = 0,
96 .xLparInstalled = 0,
97 .xSysPartitioned = 0,
98 .xHwSyncedTBs = 0,
99 .xIntProcUtilHmt = 0,
100 .xSpVpdFormat = 0,
101 .xIntProcRatio = 0,
102 .xPlicVrmIndex = 0, /* VRM index of PLIC */
103 .xMinSupportedSlicVrmInd = 0, /* min supported SLIC */
104 .xMinCompatableSlicVrmInd = 0, /* min compat SLIC */
105 .xLoadAreaAddr = 0, /* 64-bit addr of load area */
106 .xLoadAreaChunks = 0, /* chunks for load area */
107 .xPaseSysCallCRMask = 0, /* PASE mask */
108 .xSlicSegmentTablePtr = 0, /* seg table */
109 .xOldLpQueue = { 0 }, /* Old LP Queue */
110 .xInterruptHdlr = {
111 (u64)system_reset_iSeries, /* 0x100 System Reset */
112 (u64)machine_check_iSeries, /* 0x200 Machine Check */
113 (u64)data_access_iSeries, /* 0x300 Data Access */
114 (u64)instruction_access_iSeries, /* 0x400 Instruction Access */
115 (u64)hardware_interrupt_iSeries, /* 0x500 External */
116 (u64)alignment_iSeries, /* 0x600 Alignment */
117 (u64)program_check_iSeries, /* 0x700 Program Check */
118 (u64)fp_unavailable_iSeries, /* 0x800 FP Unavailable */
119 (u64)decrementer_iSeries, /* 0x900 Decrementer */
120 (u64)trap_0a_iSeries, /* 0xa00 Trap 0A */
121 (u64)trap_0b_iSeries, /* 0xb00 Trap 0B */
122 (u64)system_call_iSeries, /* 0xc00 System Call */
123 (u64)single_step_iSeries, /* 0xd00 Single Step */
124 (u64)trap_0e_iSeries, /* 0xe00 Trap 0E */
125 (u64)performance_monitor_iSeries,/* 0xf00 Performance Monitor */
126 0, /* int 0x1000 */
127 0, /* int 0x1010 */
128 0, /* int 0x1020 CPU ctls */
129 (u64)hardware_interrupt_iSeries, /* SC Ret Hdlr */
130 (u64)data_access_slb_iSeries, /* 0x380 D-SLB */
131 (u64)instruction_access_slb_iSeries /* 0x480 I-SLB */
132 }
133};
134EXPORT_SYMBOL(itLpNaca);
135
136/* May be filled in by the hypervisor so cannot end up in the BSS */
137struct ItIplParmsReal xItIplParmsReal __attribute__((__section__(".data")));
138
139/* May be filled in by the hypervisor so cannot end up in the BSS */
140struct ItExtVpdPanel xItExtVpdPanel __attribute__((__section__(".data")));
141EXPORT_SYMBOL(xItExtVpdPanel);
142
143#define maxPhysicalProcessors 32
144
145struct IoHriProcessorVpd xIoHriProcessorVpd[maxPhysicalProcessors] = {
146 {
147 .xInstCacheOperandSize = 32,
148 .xDataCacheOperandSize = 32,
149 .xProcFreq = 50000000,
150 .xTimeBaseFreq = 50000000,
151 .xPVR = 0x3600
152 }
153};
154
155/* Space for Main Store Vpd 27,200 bytes */
156/* May be filled in by the hypervisor so cannot end up in the BSS */
157u64 xMsVpd[3400] __attribute__((__section__(".data")));
158
159/* Space for Recovery Log Buffer */
160/* May be filled in by the hypervisor so cannot end up in the BSS */
161u64 xRecoveryLogBuffer[32] __attribute__((__section__(".data")));
162
163struct SpCommArea xSpCommArea = {
164 .xDesc = 0xE2D7C3C2,
165 .xFormat = 1,
166};
167
168/* The LparMap data is now located at offset 0x6000 in head.S
169 * It was put there so that the HvReleaseData could address it
170 * with a 32-bit offset as required by the iSeries hypervisor
171 *
172 * The Naca has a pointer to the ItVpdAreas. The hypervisor finds
173 * the Naca via the HvReleaseData area. The HvReleaseData has the
174 * offset into the Naca of the pointer to the ItVpdAreas.
175 */
176struct ItVpdAreas itVpdAreas = {
177 .xSlicDesc = 0xc9a3e5c1, /* "ItVA" */
178 .xSlicSize = sizeof(struct ItVpdAreas),
179 .xSlicVpdEntries = ItVpdMaxEntries, /* # VPD array entries */
180 .xSlicDmaEntries = ItDmaMaxEntries, /* # DMA array entries */
181 .xSlicMaxLogicalProcs = NR_CPUS * 2, /* Max logical procs */
182 .xSlicMaxPhysicalProcs = maxPhysicalProcessors, /* Max physical procs */
183 .xSlicDmaToksOffset = offsetof(struct ItVpdAreas, xPlicDmaToks),
184 .xSlicVpdAdrsOffset = offsetof(struct ItVpdAreas, xSlicVpdAdrs),
185 .xSlicDmaLensOffset = offsetof(struct ItVpdAreas, xPlicDmaLens),
186 .xSlicVpdLensOffset = offsetof(struct ItVpdAreas, xSlicVpdLens),
187 .xSlicMaxSlotLabels = 0, /* max slot labels */
188 .xSlicMaxLpQueues = 1, /* max LP queues */
189 .xPlicDmaLens = { 0 }, /* DMA lengths */
190 .xPlicDmaToks = { 0 }, /* DMA tokens */
191 .xSlicVpdLens = { /* VPD lengths */
192 0,0,0, /* 0 - 2 */
193 sizeof(xItExtVpdPanel), /* 3 Extended VPD */
194 sizeof(struct paca_struct), /* 4 length of Paca */
195 0, /* 5 */
196 sizeof(struct ItIplParmsReal),/* 6 length of IPL parms */
197 26992, /* 7 length of MS VPD */
198 0, /* 8 */
199 sizeof(struct ItLpNaca),/* 9 length of LP Naca */
200 0, /* 10 */
201 256, /* 11 length of Recovery Log Buf */
202 sizeof(struct SpCommArea), /* 12 length of SP Comm Area */
203 0,0,0, /* 13 - 15 */
204 sizeof(struct IoHriProcessorVpd),/* 16 length of Proc Vpd */
205 0,0,0,0,0,0, /* 17 - 22 */
206 sizeof(struct hvlpevent_queue), /* 23 length of Lp Queue */
207 0,0 /* 24 - 25 */
208 },
209 .xSlicVpdAdrs = { /* VPD addresses */
210 0,0,0, /* 0 - 2 */
211 &xItExtVpdPanel, /* 3 Extended VPD */
212 &paca[0], /* 4 first Paca */
213 0, /* 5 */
214 &xItIplParmsReal, /* 6 IPL parms */
215 &xMsVpd, /* 7 MS Vpd */
216 0, /* 8 */
217 &itLpNaca, /* 9 LpNaca */
218 0, /* 10 */
219 &xRecoveryLogBuffer, /* 11 Recovery Log Buffer */
220 &xSpCommArea, /* 12 SP Comm Area */
221 0,0,0, /* 13 - 15 */
222 &xIoHriProcessorVpd, /* 16 Proc Vpd */
223 0,0,0,0,0,0, /* 17 - 22 */
224 &hvlpevent_queue, /* 23 Lp Queue */
225 0,0
226 }
227};
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index afadb6e4a6dc..bb5946b88b8b 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -16,17 +16,10 @@ obj-y += vdso32/ vdso64/
16 16
17obj-$(CONFIG_PPC_OF) += of_device.o 17obj-$(CONFIG_PPC_OF) += of_device.o
18 18
19pci-obj-$(CONFIG_PPC_ISERIES) += iSeries_pci.o iSeries_irq.o \
20 iSeries_VpdInfo.o
21pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o 19pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
22 20
23obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y) 21obj-$(CONFIG_PCI) += pci.o pci_iommu.o iomap.o $(pci-obj-y)
24 22
25obj-$(CONFIG_PPC_ISERIES) += HvCall.o HvLpConfig.o LparData.o \
26 iSeries_setup.o ItLpQueue.o hvCall.o \
27 mf.o HvLpEvent.o iSeries_proc.o iSeries_htab.o \
28 iSeries_iommu.o
29
30obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o 23obj-$(CONFIG_PPC_MULTIPLATFORM) += nvram.o i8259.o prom_init.o
31 24
32obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \ 25obj-$(CONFIG_PPC_PSERIES) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o \
@@ -45,14 +38,12 @@ obj-$(CONFIG_MODULES) += module.o ppc_ksyms.o
45obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o 38obj-$(CONFIG_PPC_RTAS) += rtas.o rtas_pci.o
46obj-$(CONFIG_RTAS_PROC) += rtas-proc.o 39obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
47obj-$(CONFIG_SCANLOG) += scanlog.o 40obj-$(CONFIG_SCANLOG) += scanlog.o
48obj-$(CONFIG_VIOPATH) += viopath.o
49obj-$(CONFIG_LPARCFG) += lparcfg.o 41obj-$(CONFIG_LPARCFG) += lparcfg.o
50obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o 42obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
51obj-$(CONFIG_BOOTX_TEXT) += btext.o 43obj-$(CONFIG_BOOTX_TEXT) += btext.o
52obj-$(CONFIG_HVCS) += hvcserver.o 44obj-$(CONFIG_HVCS) += hvcserver.o
53 45
54vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o 46vio-obj-$(CONFIG_PPC_PSERIES) += pSeries_vio.o
55vio-obj-$(CONFIG_PPC_ISERIES) += iSeries_vio.o
56obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y) 47obj-$(CONFIG_IBMVIO) += vio.o $(vio-obj-y)
57obj-$(CONFIG_XICS) += xics.o 48obj-$(CONFIG_XICS) += xics.o
58obj-$(CONFIG_MPIC) += mpic.o 49obj-$(CONFIG_MPIC) += mpic.o
@@ -68,7 +59,6 @@ obj-$(CONFIG_U3_DART) += u3_iommu.o
68 59
69ifdef CONFIG_SMP 60ifdef CONFIG_SMP
70obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o 61obj-$(CONFIG_PPC_PMAC) += pmac_smp.o smp-tbsync.o
71obj-$(CONFIG_PPC_ISERIES) += iSeries_smp.o
72obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o 62obj-$(CONFIG_PPC_PSERIES) += pSeries_smp.o
73obj-$(CONFIG_PPC_BPA) += pSeries_smp.o 63obj-$(CONFIG_PPC_BPA) += pSeries_smp.o
74obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o 64obj-$(CONFIG_PPC_MAPLE) += smp-tbsync.o
diff --git a/arch/ppc64/kernel/bpa_iommu.c b/arch/ppc64/kernel/bpa_iommu.c
index f33a7bccb0d7..0cc463f24539 100644
--- a/arch/ppc64/kernel/bpa_iommu.c
+++ b/arch/ppc64/kernel/bpa_iommu.c
@@ -39,8 +39,8 @@
39#include <asm/pmac_feature.h> 39#include <asm/pmac_feature.h>
40#include <asm/abs_addr.h> 40#include <asm/abs_addr.h>
41#include <asm/system.h> 41#include <asm/system.h>
42#include <asm/ppc-pci.h>
42 43
43#include "pci.h"
44#include "bpa_iommu.h" 44#include "bpa_iommu.h"
45 45
46static inline unsigned long 46static inline unsigned long
diff --git a/arch/ppc64/kernel/bpa_setup.c b/arch/ppc64/kernel/bpa_setup.c
index 57b3db66f458..9f915f4222b1 100644
--- a/arch/ppc64/kernel/bpa_setup.c
+++ b/arch/ppc64/kernel/bpa_setup.c
@@ -43,8 +43,8 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/nvram.h> 44#include <asm/nvram.h>
45#include <asm/cputable.h> 45#include <asm/cputable.h>
46#include <asm/ppc-pci.h>
46 47
47#include "pci.h"
48#include "bpa_iic.h" 48#include "bpa_iic.h"
49#include "bpa_iommu.h" 49#include "bpa_iommu.h"
50 50
diff --git a/arch/ppc64/kernel/eeh.c b/arch/ppc64/kernel/eeh.c
index ba93fd731222..035d1b14a207 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/ppc64/kernel/eeh.c
@@ -33,7 +33,7 @@
33#include <asm/rtas.h> 33#include <asm/rtas.h>
34#include <asm/atomic.h> 34#include <asm/atomic.h>
35#include <asm/systemcfg.h> 35#include <asm/systemcfg.h>
36#include "pci.h" 36#include <asm/ppc-pci.h>
37 37
38#undef DEBUG 38#undef DEBUG
39 39
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index eb526c480b6c..db0cd3587627 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -1253,7 +1253,7 @@ unrecov_slb:
1253 * 1253 *
1254 * On iSeries, the hypervisor must fill in at least one entry before 1254 * On iSeries, the hypervisor must fill in at least one entry before
1255 * we get control (with relocate on). The address is give to the hv 1255 * we get control (with relocate on). The address is give to the hv
1256 * as a page number (see xLparMap in LparData.c), so this must be at a 1256 * as a page number (see xLparMap in lpardata.c), so this must be at a
1257 * fixed address (the linker can't compute (u64)&initial_stab >> 1257 * fixed address (the linker can't compute (u64)&initial_stab >>
1258 * PAGE_SHIFT). 1258 * PAGE_SHIFT).
1259 */ 1259 */
diff --git a/arch/ppc64/kernel/hvCall.S b/arch/ppc64/kernel/hvCall.S
deleted file mode 100644
index 4c699eab1b95..000000000000
--- a/arch/ppc64/kernel/hvCall.S
+++ /dev/null
@@ -1,98 +0,0 @@
1/*
2 * arch/ppc64/kernel/hvCall.S
3 *
4 *
5 * This file contains the code to perform calls to the
6 * iSeries LPAR hypervisor
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13
14#include <asm/ppc_asm.h>
15#include <asm/processor.h>
16
17 .text
18
19/*
20 * Hypervisor call
21 *
22 * Invoke the iSeries hypervisor via the System Call instruction
23 * Parameters are passed to this routine in registers r3 - r10
24 *
25 * r3 contains the HV function to be called
26 * r4-r10 contain the operands to the hypervisor function
27 *
28 */
29
30_GLOBAL(HvCall)
31_GLOBAL(HvCall0)
32_GLOBAL(HvCall1)
33_GLOBAL(HvCall2)
34_GLOBAL(HvCall3)
35_GLOBAL(HvCall4)
36_GLOBAL(HvCall5)
37_GLOBAL(HvCall6)
38_GLOBAL(HvCall7)
39
40
41 mfcr r0
42 std r0,-8(r1)
43 stdu r1,-(STACK_FRAME_OVERHEAD+16)(r1)
44
45 /* r0 = 0xffffffffffffffff indicates a hypervisor call */
46
47 li r0,-1
48
49 /* Invoke the hypervisor */
50
51 sc
52
53 ld r1,0(r1)
54 ld r0,-8(r1)
55 mtcrf 0xff,r0
56
57 /* return to caller, return value in r3 */
58
59 blr
60
61_GLOBAL(HvCall0Ret16)
62_GLOBAL(HvCall1Ret16)
63_GLOBAL(HvCall2Ret16)
64_GLOBAL(HvCall3Ret16)
65_GLOBAL(HvCall4Ret16)
66_GLOBAL(HvCall5Ret16)
67_GLOBAL(HvCall6Ret16)
68_GLOBAL(HvCall7Ret16)
69
70 mfcr r0
71 std r0,-8(r1)
72 std r31,-16(r1)
73 stdu r1,-(STACK_FRAME_OVERHEAD+32)(r1)
74
75 mr r31,r4
76 li r0,-1
77 mr r4,r5
78 mr r5,r6
79 mr r6,r7
80 mr r7,r8
81 mr r8,r9
82 mr r9,r10
83
84 sc
85
86 std r3,0(r31)
87 std r4,8(r31)
88
89 mr r3,r5
90
91 ld r1,0(r1)
92 ld r0,-8(r1)
93 mtcrf 0xff,r0
94 ld r31,-16(r1)
95
96 blr
97
98
diff --git a/arch/ppc64/kernel/iSeries_VpdInfo.c b/arch/ppc64/kernel/iSeries_VpdInfo.c
deleted file mode 100644
index 5d921792571f..000000000000
--- a/arch/ppc64/kernel/iSeries_VpdInfo.c
+++ /dev/null
@@ -1,268 +0,0 @@
1/*
2 * File iSeries_vpdInfo.c created by Allan Trautman on Fri Feb 2 2001.
3 *
4 * This code gets the card location of the hardware
5 * Copyright (C) 2001 <Allan H Trautman> <IBM Corp>
6 * Copyright (C) 2005 Stephen Rothwel, IBM Corp
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the:
20 * Free Software Foundation, Inc.,
21 * 59 Temple Place, Suite 330,
22 * Boston, MA 02111-1307 USA
23 *
24 * Change Activity:
25 * Created, Feb 2, 2001
26 * Ported to ppc64, August 20, 2001
27 * End Change Activity
28 */
29#include <linux/init.h>
30#include <linux/module.h>
31#include <linux/pci.h>
32#include <asm/types.h>
33#include <asm/resource.h>
34
35#include <asm/iSeries/HvCallPci.h>
36#include <asm/iSeries/HvTypes.h>
37#include <asm/iSeries/iSeries_pci.h>
38
39/*
40 * Size of Bus VPD data
41 */
42#define BUS_VPDSIZE 1024
43
44/*
45 * Bus Vpd Tags
46 */
47#define VpdEndOfAreaTag 0x79
48#define VpdIdStringTag 0x82
49#define VpdVendorAreaTag 0x84
50
51/*
52 * Mfg Area Tags
53 */
54#define VpdFruFrameId 0x4649 // "FI"
55#define VpdSlotMapFormat 0x4D46 // "MF"
56#define VpdSlotMap 0x534D // "SM"
57
58/*
59 * Structures of the areas
60 */
61struct MfgVpdAreaStruct {
62 u16 Tag;
63 u8 TagLength;
64 u8 AreaData1;
65 u8 AreaData2;
66};
67typedef struct MfgVpdAreaStruct MfgArea;
68#define MFG_ENTRY_SIZE 3
69
70struct SlotMapStruct {
71 u8 AgentId;
72 u8 SecondaryAgentId;
73 u8 PhbId;
74 char CardLocation[3];
75 char Parms[8];
76 char Reserved[2];
77};
78typedef struct SlotMapStruct SlotMap;
79#define SLOT_ENTRY_SIZE 16
80
81/*
82 * Parse the Slot Area
83 */
84static void __init iSeries_Parse_SlotArea(SlotMap *MapPtr, int MapLen,
85 HvAgentId agent, u8 *PhbId, char card[4])
86{
87 int SlotMapLen = MapLen;
88 SlotMap *SlotMapPtr = MapPtr;
89
90 /*
91 * Parse Slot label until we find the one requested
92 */
93 while (SlotMapLen > 0) {
94 if (SlotMapPtr->AgentId == agent) {
95 /*
96 * If Phb wasn't found, grab the entry first one found.
97 */
98 if (*PhbId == 0xff)
99 *PhbId = SlotMapPtr->PhbId;
100 /* Found it, extract the data. */
101 if (SlotMapPtr->PhbId == *PhbId) {
102 memcpy(card, &SlotMapPtr->CardLocation, 3);
103 card[3] = 0;
104 break;
105 }
106 }
107 /* Point to the next Slot */
108 SlotMapPtr = (SlotMap *)((char *)SlotMapPtr + SLOT_ENTRY_SIZE);
109 SlotMapLen -= SLOT_ENTRY_SIZE;
110 }
111}
112
113/*
114 * Parse the Mfg Area
115 */
116static void __init iSeries_Parse_MfgArea(u8 *AreaData, int AreaLen,
117 HvAgentId agent, u8 *PhbId,
118 u8 *frame, char card[4])
119{
120 MfgArea *MfgAreaPtr = (MfgArea *)AreaData;
121 int MfgAreaLen = AreaLen;
122 u16 SlotMapFmt = 0;
123
124 /* Parse Mfg Data */
125 while (MfgAreaLen > 0) {
126 int MfgTagLen = MfgAreaPtr->TagLength;
127 /* Frame ID (FI 4649020310 ) */
128 if (MfgAreaPtr->Tag == VpdFruFrameId) /* FI */
129 *frame = MfgAreaPtr->AreaData1;
130 /* Slot Map Format (MF 4D46020004 ) */
131 else if (MfgAreaPtr->Tag == VpdSlotMapFormat) /* MF */
132 SlotMapFmt = (MfgAreaPtr->AreaData1 * 256)
133 + MfgAreaPtr->AreaData2;
134 /* Slot Map (SM 534D90 */
135 else if (MfgAreaPtr->Tag == VpdSlotMap) { /* SM */
136 SlotMap *SlotMapPtr;
137
138 if (SlotMapFmt == 0x1004)
139 SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr
140 + MFG_ENTRY_SIZE + 1);
141 else
142 SlotMapPtr = (SlotMap *)((char *)MfgAreaPtr
143 + MFG_ENTRY_SIZE);
144 iSeries_Parse_SlotArea(SlotMapPtr, MfgTagLen,
145 agent, PhbId, card);
146 }
147 /*
148 * Point to the next Mfg Area
149 * Use defined size, sizeof give wrong answer
150 */
151 MfgAreaPtr = (MfgArea *)((char *)MfgAreaPtr + MfgTagLen
152 + MFG_ENTRY_SIZE);
153 MfgAreaLen -= (MfgTagLen + MFG_ENTRY_SIZE);
154 }
155}
156
157/*
158 * Look for "BUS".. Data is not Null terminated.
159 * PHBID of 0xFF indicates PHB was not found in VPD Data.
160 */
161static int __init iSeries_Parse_PhbId(u8 *AreaPtr, int AreaLength)
162{
163 u8 *PhbPtr = AreaPtr;
164 int DataLen = AreaLength;
165 char PhbId = 0xFF;
166
167 while (DataLen > 0) {
168 if ((*PhbPtr == 'B') && (*(PhbPtr + 1) == 'U')
169 && (*(PhbPtr + 2) == 'S')) {
170 PhbPtr += 3;
171 while (*PhbPtr == ' ')
172 ++PhbPtr;
173 PhbId = (*PhbPtr & 0x0F);
174 break;
175 }
176 ++PhbPtr;
177 --DataLen;
178 }
179 return PhbId;
180}
181
182/*
183 * Parse out the VPD Areas
184 */
185static void __init iSeries_Parse_Vpd(u8 *VpdData, int VpdDataLen,
186 HvAgentId agent, u8 *frame, char card[4])
187{
188 u8 *TagPtr = VpdData;
189 int DataLen = VpdDataLen - 3;
190 u8 PhbId;
191
192 while ((*TagPtr != VpdEndOfAreaTag) && (DataLen > 0)) {
193 int AreaLen = *(TagPtr + 1) + (*(TagPtr + 2) * 256);
194 u8 *AreaData = TagPtr + 3;
195
196 if (*TagPtr == VpdIdStringTag)
197 PhbId = iSeries_Parse_PhbId(AreaData, AreaLen);
198 else if (*TagPtr == VpdVendorAreaTag)
199 iSeries_Parse_MfgArea(AreaData, AreaLen,
200 agent, &PhbId, frame, card);
201 /* Point to next Area. */
202 TagPtr = AreaData + AreaLen;
203 DataLen -= AreaLen;
204 }
205}
206
207static void __init iSeries_Get_Location_Code(u16 bus, HvAgentId agent,
208 u8 *frame, char card[4])
209{
210 int BusVpdLen = 0;
211 u8 *BusVpdPtr = kmalloc(BUS_VPDSIZE, GFP_KERNEL);
212
213 if (BusVpdPtr == NULL) {
214 printk("PCI: Bus VPD Buffer allocation failure.\n");
215 return;
216 }
217 BusVpdLen = HvCallPci_getBusVpd(bus, ISERIES_HV_ADDR(BusVpdPtr),
218 BUS_VPDSIZE);
219 if (BusVpdLen == 0) {
220 printk("PCI: Bus VPD Buffer zero length.\n");
221 goto out_free;
222 }
223 /* printk("PCI: BusVpdPtr: %p, %d\n",BusVpdPtr, BusVpdLen); */
224 /* Make sure this is what I think it is */
225 if (*BusVpdPtr != VpdIdStringTag) { /* 0x82 */
226 printk("PCI: Bus VPD Buffer missing starting tag.\n");
227 goto out_free;
228 }
229 iSeries_Parse_Vpd(BusVpdPtr, BusVpdLen, agent, frame, card);
230out_free:
231 kfree(BusVpdPtr);
232}
233
234/*
235 * Prints the device information.
236 * - Pass in pci_dev* pointer to the device.
237 * - Pass in the device count
238 *
239 * Format:
240 * PCI: Bus 0, Device 26, Vendor 0x12AE Frame 1, Card C10 Ethernet
241 * controller
242 */
243void __init iSeries_Device_Information(struct pci_dev *PciDev, int count)
244{
245 struct iSeries_Device_Node *DevNode = PciDev->sysdata;
246 u16 bus;
247 u8 frame;
248 char card[4];
249 HvSubBusNumber subbus;
250 HvAgentId agent;
251
252 if (DevNode == NULL) {
253 printk("%d. PCI: iSeries_Device_Information DevNode is NULL\n",
254 count);
255 return;
256 }
257
258 bus = ISERIES_BUS(DevNode);
259 subbus = ISERIES_SUBBUS(DevNode);
260 agent = ISERIES_PCI_AGENTID(ISERIES_GET_DEVICE_FROM_SUBBUS(subbus),
261 ISERIES_GET_FUNCTION_FROM_SUBBUS(subbus));
262 iSeries_Get_Location_Code(bus, agent, &frame, card);
263
264 printk("%d. PCI: Bus%3d, Device%3d, Vendor %04X Frame%3d, Card %4s ",
265 count, bus, PCI_SLOT(PciDev->devfn), PciDev->vendor,
266 frame, card);
267 printk("0x%04X\n", (int)(PciDev->class >> 8));
268}
diff --git a/arch/ppc64/kernel/iSeries_htab.c b/arch/ppc64/kernel/iSeries_htab.c
deleted file mode 100644
index 9a2be3abf349..000000000000
--- a/arch/ppc64/kernel/iSeries_htab.c
+++ /dev/null
@@ -1,255 +0,0 @@
1/*
2 * iSeries hashtable management.
3 * Derived from pSeries_htab.c
4 *
5 * SMP scalability work:
6 * Copyright (C) 2001 Anton Blanchard <anton@au.ibm.com>, IBM
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
12 */
13#include <asm/machdep.h>
14#include <asm/pgtable.h>
15#include <asm/mmu.h>
16#include <asm/mmu_context.h>
17#include <asm/iSeries/HvCallHpt.h>
18#include <asm/abs_addr.h>
19#include <linux/spinlock.h>
20
21static spinlock_t iSeries_hlocks[64] __cacheline_aligned_in_smp = { [0 ... 63] = SPIN_LOCK_UNLOCKED};
22
23/*
24 * Very primitive algorithm for picking up a lock
25 */
26static inline void iSeries_hlock(unsigned long slot)
27{
28 if (slot & 0x8)
29 slot = ~slot;
30 spin_lock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
31}
32
33static inline void iSeries_hunlock(unsigned long slot)
34{
35 if (slot & 0x8)
36 slot = ~slot;
37 spin_unlock(&iSeries_hlocks[(slot >> 4) & 0x3f]);
38}
39
40static long iSeries_hpte_insert(unsigned long hpte_group, unsigned long va,
41 unsigned long prpn, unsigned long vflags,
42 unsigned long rflags)
43{
44 unsigned long arpn;
45 long slot;
46 hpte_t lhpte;
47 int secondary = 0;
48
49 /*
50 * The hypervisor tries both primary and secondary.
51 * If we are being called to insert in the secondary,
52 * it means we have already tried both primary and secondary,
53 * so we return failure immediately.
54 */
55 if (vflags & HPTE_V_SECONDARY)
56 return -1;
57
58 iSeries_hlock(hpte_group);
59
60 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
61 BUG_ON(lhpte.v & HPTE_V_VALID);
62
63 if (slot == -1) { /* No available entry found in either group */
64 iSeries_hunlock(hpte_group);
65 return -1;
66 }
67
68 if (slot < 0) { /* MSB set means secondary group */
69 vflags |= HPTE_V_VALID;
70 secondary = 1;
71 slot &= 0x7fffffffffffffff;
72 }
73
74 arpn = phys_to_abs(prpn << PAGE_SHIFT) >> PAGE_SHIFT;
75
76 lhpte.v = (va >> 23) << HPTE_V_AVPN_SHIFT | vflags | HPTE_V_VALID;
77 lhpte.r = (arpn << HPTE_R_RPN_SHIFT) | rflags;
78
79 /* Now fill in the actual HPTE */
80 HvCallHpt_addValidate(slot, secondary, &lhpte);
81
82 iSeries_hunlock(hpte_group);
83
84 return (secondary << 3) | (slot & 7);
85}
86
87long iSeries_hpte_bolt_or_insert(unsigned long hpte_group,
88 unsigned long va, unsigned long prpn, unsigned long vflags,
89 unsigned long rflags)
90{
91 long slot;
92 hpte_t lhpte;
93
94 slot = HvCallHpt_findValid(&lhpte, va >> PAGE_SHIFT);
95
96 if (lhpte.v & HPTE_V_VALID) {
97 /* Bolt the existing HPTE */
98 HvCallHpt_setSwBits(slot, 0x10, 0);
99 HvCallHpt_setPp(slot, PP_RWXX);
100 return 0;
101 }
102
103 return iSeries_hpte_insert(hpte_group, va, prpn, vflags, rflags);
104}
105
106static unsigned long iSeries_hpte_getword0(unsigned long slot)
107{
108 hpte_t hpte;
109
110 HvCallHpt_get(&hpte, slot);
111 return hpte.v;
112}
113
114static long iSeries_hpte_remove(unsigned long hpte_group)
115{
116 unsigned long slot_offset;
117 int i;
118 unsigned long hpte_v;
119
120 /* Pick a random slot to start at */
121 slot_offset = mftb() & 0x7;
122
123 iSeries_hlock(hpte_group);
124
125 for (i = 0; i < HPTES_PER_GROUP; i++) {
126 hpte_v = iSeries_hpte_getword0(hpte_group + slot_offset);
127
128 if (! (hpte_v & HPTE_V_BOLTED)) {
129 HvCallHpt_invalidateSetSwBitsGet(hpte_group +
130 slot_offset, 0, 0);
131 iSeries_hunlock(hpte_group);
132 return i;
133 }
134
135 slot_offset++;
136 slot_offset &= 0x7;
137 }
138
139 iSeries_hunlock(hpte_group);
140
141 return -1;
142}
143
144/*
145 * The HyperVisor expects the "flags" argument in this form:
146 * bits 0..59 : reserved
147 * bit 60 : N
148 * bits 61..63 : PP2,PP1,PP0
149 */
150static long iSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
151 unsigned long va, int large, int local)
152{
153 hpte_t hpte;
154 unsigned long avpn = va >> 23;
155
156 iSeries_hlock(slot);
157
158 HvCallHpt_get(&hpte, slot);
159 if ((HPTE_V_AVPN_VAL(hpte.v) == avpn) && (hpte.v & HPTE_V_VALID)) {
160 /*
161 * Hypervisor expects bits as NPPP, which is
162 * different from how they are mapped in our PP.
163 */
164 HvCallHpt_setPp(slot, (newpp & 0x3) | ((newpp & 0x4) << 1));
165 iSeries_hunlock(slot);
166 return 0;
167 }
168 iSeries_hunlock(slot);
169
170 return -1;
171}
172
173/*
174 * Functions used to find the PTE for a particular virtual address.
175 * Only used during boot when bolting pages.
176 *
177 * Input : vpn : virtual page number
178 * Output: PTE index within the page table of the entry
179 * -1 on failure
180 */
181static long iSeries_hpte_find(unsigned long vpn)
182{
183 hpte_t hpte;
184 long slot;
185
186 /*
187 * The HvCallHpt_findValid interface is as follows:
188 * 0xffffffffffffffff : No entry found.
189 * 0x00000000xxxxxxxx : Entry found in primary group, slot x
190 * 0x80000000xxxxxxxx : Entry found in secondary group, slot x
191 */
192 slot = HvCallHpt_findValid(&hpte, vpn);
193 if (hpte.v & HPTE_V_VALID) {
194 if (slot < 0) {
195 slot &= 0x7fffffffffffffff;
196 slot = -slot;
197 }
198 } else
199 slot = -1;
200 return slot;
201}
202
203/*
204 * Update the page protection bits. Intended to be used to create
205 * guard pages for kernel data structures on pages which are bolted
206 * in the HPT. Assumes pages being operated on will not be stolen.
207 * Does not work on large pages.
208 *
209 * No need to lock here because we should be the only user.
210 */
211static void iSeries_hpte_updateboltedpp(unsigned long newpp, unsigned long ea)
212{
213 unsigned long vsid,va,vpn;
214 long slot;
215
216 vsid = get_kernel_vsid(ea);
217 va = (vsid << 28) | (ea & 0x0fffffff);
218 vpn = va >> PAGE_SHIFT;
219 slot = iSeries_hpte_find(vpn);
220 if (slot == -1)
221 panic("updateboltedpp: Could not find page to bolt\n");
222 HvCallHpt_setPp(slot, newpp);
223}
224
225static void iSeries_hpte_invalidate(unsigned long slot, unsigned long va,
226 int large, int local)
227{
228 unsigned long hpte_v;
229 unsigned long avpn = va >> 23;
230 unsigned long flags;
231
232 local_irq_save(flags);
233
234 iSeries_hlock(slot);
235
236 hpte_v = iSeries_hpte_getword0(slot);
237
238 if ((HPTE_V_AVPN_VAL(hpte_v) == avpn) && (hpte_v & HPTE_V_VALID))
239 HvCallHpt_invalidateSetSwBitsGet(slot, 0, 0);
240
241 iSeries_hunlock(slot);
242
243 local_irq_restore(flags);
244}
245
246void hpte_init_iSeries(void)
247{
248 ppc_md.hpte_invalidate = iSeries_hpte_invalidate;
249 ppc_md.hpte_updatepp = iSeries_hpte_updatepp;
250 ppc_md.hpte_updateboltedpp = iSeries_hpte_updateboltedpp;
251 ppc_md.hpte_insert = iSeries_hpte_insert;
252 ppc_md.hpte_remove = iSeries_hpte_remove;
253
254 htab_finish_init();
255}
diff --git a/arch/ppc64/kernel/iSeries_iommu.c b/arch/ppc64/kernel/iSeries_iommu.c
deleted file mode 100644
index 287db32d9867..000000000000
--- a/arch/ppc64/kernel/iSeries_iommu.c
+++ /dev/null
@@ -1,177 +0,0 @@
1/*
2 * arch/ppc64/kernel/iSeries_iommu.c
3 *
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen, IBM Corporation
5 *
6 * Rewrite, cleanup:
7 *
8 * Copyright (C) 2004 Olof Johansson <olof@austin.ibm.com>, IBM Corporation
9 *
10 * Dynamic DMA mapping support, iSeries-specific parts.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/types.h>
29#include <linux/dma-mapping.h>
30#include <linux/list.h>
31
32#include <asm/iommu.h>
33#include <asm/tce.h>
34#include <asm/machdep.h>
35#include <asm/iSeries/HvCallXm.h>
36#include <asm/iSeries/iSeries_pci.h>
37
38extern struct list_head iSeries_Global_Device_List;
39
40
41static void tce_build_iSeries(struct iommu_table *tbl, long index, long npages,
42 unsigned long uaddr, enum dma_data_direction direction)
43{
44 u64 rc;
45 union tce_entry tce;
46
47 while (npages--) {
48 tce.te_word = 0;
49 tce.te_bits.tb_rpn = virt_to_abs(uaddr) >> PAGE_SHIFT;
50
51 if (tbl->it_type == TCE_VB) {
52 /* Virtual Bus */
53 tce.te_bits.tb_valid = 1;
54 tce.te_bits.tb_allio = 1;
55 if (direction != DMA_TO_DEVICE)
56 tce.te_bits.tb_rdwr = 1;
57 } else {
58 /* PCI Bus */
59 tce.te_bits.tb_rdwr = 1; /* Read allowed */
60 if (direction != DMA_TO_DEVICE)
61 tce.te_bits.tb_pciwr = 1;
62 }
63
64 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index,
65 tce.te_word);
66 if (rc)
67 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
68 rc);
69 index++;
70 uaddr += PAGE_SIZE;
71 }
72}
73
74static void tce_free_iSeries(struct iommu_table *tbl, long index, long npages)
75{
76 u64 rc;
77
78 while (npages--) {
79 rc = HvCallXm_setTce((u64)tbl->it_index, (u64)index, 0);
80 if (rc)
81 panic("PCI_DMA: HvCallXm_setTce failed, Rc: 0x%lx\n",
82 rc);
83 index++;
84 }
85}
86
87#ifdef CONFIG_PCI
88/*
89 * This function compares the known tables to find an iommu_table
90 * that has already been built for hardware TCEs.
91 */
92static struct iommu_table *iommu_table_find(struct iommu_table * tbl)
93{
94 struct iSeries_Device_Node *dp;
95
96 list_for_each_entry(dp, &iSeries_Global_Device_List, Device_List) {
97 if ((dp->iommu_table != NULL) &&
98 (dp->iommu_table->it_type == TCE_PCI) &&
99 (dp->iommu_table->it_offset == tbl->it_offset) &&
100 (dp->iommu_table->it_index == tbl->it_index) &&
101 (dp->iommu_table->it_size == tbl->it_size))
102 return dp->iommu_table;
103 }
104 return NULL;
105}
106
107/*
108 * Call Hv with the architected data structure to get TCE table info.
109 * info. Put the returned data into the Linux representation of the
110 * TCE table data.
111 * The Hardware Tce table comes in three flavors.
112 * 1. TCE table shared between Buses.
113 * 2. TCE table per Bus.
114 * 3. TCE Table per IOA.
115 */
116static void iommu_table_getparms(struct iSeries_Device_Node* dn,
117 struct iommu_table* tbl)
118{
119 struct iommu_table_cb *parms;
120
121 parms = kmalloc(sizeof(*parms), GFP_KERNEL);
122 if (parms == NULL)
123 panic("PCI_DMA: TCE Table Allocation failed.");
124
125 memset(parms, 0, sizeof(*parms));
126
127 parms->itc_busno = ISERIES_BUS(dn);
128 parms->itc_slotno = dn->LogicalSlot;
129 parms->itc_virtbus = 0;
130
131 HvCallXm_getTceTableParms(ISERIES_HV_ADDR(parms));
132
133 if (parms->itc_size == 0)
134 panic("PCI_DMA: parms->size is zero, parms is 0x%p", parms);
135
136 /* itc_size is in pages worth of table, it_size is in # of entries */
137 tbl->it_size = (parms->itc_size * PAGE_SIZE) / sizeof(union tce_entry);
138 tbl->it_busno = parms->itc_busno;
139 tbl->it_offset = parms->itc_offset;
140 tbl->it_index = parms->itc_index;
141 tbl->it_blocksize = 1;
142 tbl->it_type = TCE_PCI;
143
144 kfree(parms);
145}
146
147
148void iommu_devnode_init_iSeries(struct iSeries_Device_Node *dn)
149{
150 struct iommu_table *tbl;
151
152 tbl = kmalloc(sizeof(struct iommu_table), GFP_KERNEL);
153
154 iommu_table_getparms(dn, tbl);
155
156 /* Look for existing tce table */
157 dn->iommu_table = iommu_table_find(tbl);
158 if (dn->iommu_table == NULL)
159 dn->iommu_table = iommu_init_table(tbl);
160 else
161 kfree(tbl);
162}
163#endif
164
165static void iommu_dev_setup_iSeries(struct pci_dev *dev) { }
166static void iommu_bus_setup_iSeries(struct pci_bus *bus) { }
167
168void iommu_init_early_iSeries(void)
169{
170 ppc_md.tce_build = tce_build_iSeries;
171 ppc_md.tce_free = tce_free_iSeries;
172
173 ppc_md.iommu_dev_setup = iommu_dev_setup_iSeries;
174 ppc_md.iommu_bus_setup = iommu_bus_setup_iSeries;
175
176 pci_iommu_init();
177}
diff --git a/arch/ppc64/kernel/iSeries_irq.c b/arch/ppc64/kernel/iSeries_irq.c
deleted file mode 100644
index 0170682a8ca5..000000000000
--- a/arch/ppc64/kernel/iSeries_irq.c
+++ /dev/null
@@ -1,366 +0,0 @@
1/*
2 * This module supports the iSeries PCI bus interrupt handling
3 * Copyright (C) 20yy <Robert L Holtorf> <IBM Corp>
4 * Copyright (C) 2004-2005 IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the:
18 * Free Software Foundation, Inc.,
19 * 59 Temple Place, Suite 330,
20 * Boston, MA 02111-1307 USA
21 *
22 * Change Activity:
23 * Created, December 13, 2000 by Wayne Holm
24 * End Change Activity
25 */
26#include <linux/config.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/threads.h>
30#include <linux/smp.h>
31#include <linux/param.h>
32#include <linux/string.h>
33#include <linux/bootmem.h>
34#include <linux/ide.h>
35#include <linux/irq.h>
36#include <linux/spinlock.h>
37
38#include <asm/ppcdebug.h>
39#include <asm/iSeries/HvTypes.h>
40#include <asm/iSeries/HvLpEvent.h>
41#include <asm/iSeries/HvCallPci.h>
42#include <asm/iSeries/HvCallXm.h>
43#include <asm/iSeries/iSeries_irq.h>
44
45/* This maps virtual irq numbers to real irqs */
46unsigned int virt_irq_to_real_map[NR_IRQS];
47
48/* The next available virtual irq number */
49/* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */
50static int next_virtual_irq = 2;
51
52static long Pci_Interrupt_Count;
53static long Pci_Event_Count;
54
55enum XmPciLpEvent_Subtype {
56 XmPciLpEvent_BusCreated = 0, // PHB has been created
57 XmPciLpEvent_BusError = 1, // PHB has failed
58 XmPciLpEvent_BusFailed = 2, // Msg to Secondary, Primary failed bus
59 XmPciLpEvent_NodeFailed = 4, // Multi-adapter bridge has failed
60 XmPciLpEvent_NodeRecovered = 5, // Multi-adapter bridge has recovered
61 XmPciLpEvent_BusRecovered = 12, // PHB has been recovered
62 XmPciLpEvent_UnQuiesceBus = 18, // Secondary bus unqiescing
63 XmPciLpEvent_BridgeError = 21, // Bridge Error
64 XmPciLpEvent_SlotInterrupt = 22 // Slot interrupt
65};
66
67struct XmPciLpEvent_BusInterrupt {
68 HvBusNumber busNumber;
69 HvSubBusNumber subBusNumber;
70};
71
72struct XmPciLpEvent_NodeInterrupt {
73 HvBusNumber busNumber;
74 HvSubBusNumber subBusNumber;
75 HvAgentId deviceId;
76};
77
78struct XmPciLpEvent {
79 struct HvLpEvent hvLpEvent;
80
81 union {
82 u64 alignData; // Align on an 8-byte boundary
83
84 struct {
85 u32 fisr;
86 HvBusNumber busNumber;
87 HvSubBusNumber subBusNumber;
88 HvAgentId deviceId;
89 } slotInterrupt;
90
91 struct XmPciLpEvent_BusInterrupt busFailed;
92 struct XmPciLpEvent_BusInterrupt busRecovered;
93 struct XmPciLpEvent_BusInterrupt busCreated;
94
95 struct XmPciLpEvent_NodeInterrupt nodeFailed;
96 struct XmPciLpEvent_NodeInterrupt nodeRecovered;
97
98 } eventData;
99
100};
101
102static void intReceived(struct XmPciLpEvent *eventParm,
103 struct pt_regs *regsParm)
104{
105 int irq;
106
107 ++Pci_Interrupt_Count;
108
109 switch (eventParm->hvLpEvent.xSubtype) {
110 case XmPciLpEvent_SlotInterrupt:
111 irq = eventParm->hvLpEvent.xCorrelationToken;
112 /* Dispatch the interrupt handlers for this irq */
113 ppc_irq_dispatch_handler(regsParm, irq);
114 HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
115 eventParm->eventData.slotInterrupt.subBusNumber,
116 eventParm->eventData.slotInterrupt.deviceId);
117 break;
118 /* Ignore error recovery events for now */
119 case XmPciLpEvent_BusCreated:
120 printk(KERN_INFO "intReceived: system bus %d created\n",
121 eventParm->eventData.busCreated.busNumber);
122 break;
123 case XmPciLpEvent_BusError:
124 case XmPciLpEvent_BusFailed:
125 printk(KERN_INFO "intReceived: system bus %d failed\n",
126 eventParm->eventData.busFailed.busNumber);
127 break;
128 case XmPciLpEvent_BusRecovered:
129 case XmPciLpEvent_UnQuiesceBus:
130 printk(KERN_INFO "intReceived: system bus %d recovered\n",
131 eventParm->eventData.busRecovered.busNumber);
132 break;
133 case XmPciLpEvent_NodeFailed:
134 case XmPciLpEvent_BridgeError:
135 printk(KERN_INFO
136 "intReceived: multi-adapter bridge %d/%d/%d failed\n",
137 eventParm->eventData.nodeFailed.busNumber,
138 eventParm->eventData.nodeFailed.subBusNumber,
139 eventParm->eventData.nodeFailed.deviceId);
140 break;
141 case XmPciLpEvent_NodeRecovered:
142 printk(KERN_INFO
143 "intReceived: multi-adapter bridge %d/%d/%d recovered\n",
144 eventParm->eventData.nodeRecovered.busNumber,
145 eventParm->eventData.nodeRecovered.subBusNumber,
146 eventParm->eventData.nodeRecovered.deviceId);
147 break;
148 default:
149 printk(KERN_ERR
150 "intReceived: unrecognized event subtype 0x%x\n",
151 eventParm->hvLpEvent.xSubtype);
152 break;
153 }
154}
155
156static void XmPciLpEvent_handler(struct HvLpEvent *eventParm,
157 struct pt_regs *regsParm)
158{
159#ifdef CONFIG_PCI
160 ++Pci_Event_Count;
161
162 if (eventParm && (eventParm->xType == HvLpEvent_Type_PciIo)) {
163 switch (eventParm->xFlags.xFunction) {
164 case HvLpEvent_Function_Int:
165 intReceived((struct XmPciLpEvent *)eventParm, regsParm);
166 break;
167 case HvLpEvent_Function_Ack:
168 printk(KERN_ERR
169 "XmPciLpEvent_handler: unexpected ack received\n");
170 break;
171 default:
172 printk(KERN_ERR
173 "XmPciLpEvent_handler: unexpected event function %d\n",
174 (int)eventParm->xFlags.xFunction);
175 break;
176 }
177 } else if (eventParm)
178 printk(KERN_ERR
179 "XmPciLpEvent_handler: Unrecognized PCI event type 0x%x\n",
180 (int)eventParm->xType);
181 else
182 printk(KERN_ERR "XmPciLpEvent_handler: NULL event received\n");
183#endif
184}
185
186/*
187 * This is called by init_IRQ. set in ppc_md.init_IRQ by iSeries_setup.c
188 * It must be called before the bus walk.
189 */
190void __init iSeries_init_IRQ(void)
191{
192 /* Register PCI event handler and open an event path */
193 int xRc;
194
195 xRc = HvLpEvent_registerHandler(HvLpEvent_Type_PciIo,
196 &XmPciLpEvent_handler);
197 if (xRc == 0) {
198 xRc = HvLpEvent_openPath(HvLpEvent_Type_PciIo, 0);
199 if (xRc != 0)
200 printk(KERN_ERR "iSeries_init_IRQ: open event path "
201 "failed with rc 0x%x\n", xRc);
202 } else
203 printk(KERN_ERR "iSeries_init_IRQ: register handler "
204 "failed with rc 0x%x\n", xRc);
205}
206
207#define REAL_IRQ_TO_BUS(irq) ((((irq) >> 6) & 0xff) + 1)
208#define REAL_IRQ_TO_IDSEL(irq) ((((irq) >> 3) & 7) + 1)
209#define REAL_IRQ_TO_FUNC(irq) ((irq) & 7)
210
211/*
212 * This will be called by device drivers (via enable_IRQ)
213 * to enable INTA in the bridge interrupt status register.
214 */
215static void iSeries_enable_IRQ(unsigned int irq)
216{
217 u32 bus, deviceId, function, mask;
218 const u32 subBus = 0;
219 unsigned int rirq = virt_irq_to_real_map[irq];
220
221 /* The IRQ has already been locked by the caller */
222 bus = REAL_IRQ_TO_BUS(rirq);
223 function = REAL_IRQ_TO_FUNC(rirq);
224 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
225
226 /* Unmask secondary INTA */
227 mask = 0x80000000;
228 HvCallPci_unmaskInterrupts(bus, subBus, deviceId, mask);
229 PPCDBG(PPCDBG_BUSWALK, "iSeries_enable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
230 bus, subBus, deviceId, irq);
231}
232
233/* This is called by iSeries_activate_IRQs */
234static unsigned int iSeries_startup_IRQ(unsigned int irq)
235{
236 u32 bus, deviceId, function, mask;
237 const u32 subBus = 0;
238 unsigned int rirq = virt_irq_to_real_map[irq];
239
240 bus = REAL_IRQ_TO_BUS(rirq);
241 function = REAL_IRQ_TO_FUNC(rirq);
242 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
243
244 /* Link the IRQ number to the bridge */
245 HvCallXm_connectBusUnit(bus, subBus, deviceId, irq);
246
247 /* Unmask bridge interrupts in the FISR */
248 mask = 0x01010000 << function;
249 HvCallPci_unmaskFisr(bus, subBus, deviceId, mask);
250 iSeries_enable_IRQ(irq);
251 return 0;
252}
253
254/*
255 * This is called out of iSeries_fixup to activate interrupt
256 * generation for usable slots
257 */
258void __init iSeries_activate_IRQs()
259{
260 int irq;
261 unsigned long flags;
262
263 for_each_irq (irq) {
264 irq_desc_t *desc = get_irq_desc(irq);
265
266 if (desc && desc->handler && desc->handler->startup) {
267 spin_lock_irqsave(&desc->lock, flags);
268 desc->handler->startup(irq);
269 spin_unlock_irqrestore(&desc->lock, flags);
270 }
271 }
272}
273
274/* this is not called anywhere currently */
275static void iSeries_shutdown_IRQ(unsigned int irq)
276{
277 u32 bus, deviceId, function, mask;
278 const u32 subBus = 0;
279 unsigned int rirq = virt_irq_to_real_map[irq];
280
281 /* irq should be locked by the caller */
282 bus = REAL_IRQ_TO_BUS(rirq);
283 function = REAL_IRQ_TO_FUNC(rirq);
284 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
285
286 /* Invalidate the IRQ number in the bridge */
287 HvCallXm_connectBusUnit(bus, subBus, deviceId, 0);
288
289 /* Mask bridge interrupts in the FISR */
290 mask = 0x01010000 << function;
291 HvCallPci_maskFisr(bus, subBus, deviceId, mask);
292}
293
294/*
295 * This will be called by device drivers (via disable_IRQ)
296 * to disable INTA in the bridge interrupt status register.
297 */
298static void iSeries_disable_IRQ(unsigned int irq)
299{
300 u32 bus, deviceId, function, mask;
301 const u32 subBus = 0;
302 unsigned int rirq = virt_irq_to_real_map[irq];
303
304 /* The IRQ has already been locked by the caller */
305 bus = REAL_IRQ_TO_BUS(rirq);
306 function = REAL_IRQ_TO_FUNC(rirq);
307 deviceId = (REAL_IRQ_TO_IDSEL(rirq) << 4) + function;
308
309 /* Mask secondary INTA */
310 mask = 0x80000000;
311 HvCallPci_maskInterrupts(bus, subBus, deviceId, mask);
312 PPCDBG(PPCDBG_BUSWALK, "iSeries_disable_IRQ 0x%02X.%02X.%02X 0x%04X\n",
313 bus, subBus, deviceId, irq);
314}
315
316/*
317 * Need to define this so ppc_irq_dispatch_handler will NOT call
318 * enable_IRQ at the end of interrupt handling. However, this does
319 * nothing because there is not enough information provided to do
320 * the EOI HvCall. This is done by XmPciLpEvent.c
321 */
322static void iSeries_end_IRQ(unsigned int irq)
323{
324}
325
326static hw_irq_controller iSeries_IRQ_handler = {
327 .typename = "iSeries irq controller",
328 .startup = iSeries_startup_IRQ,
329 .shutdown = iSeries_shutdown_IRQ,
330 .enable = iSeries_enable_IRQ,
331 .disable = iSeries_disable_IRQ,
332 .end = iSeries_end_IRQ
333};
334
335/*
336 * This is called out of iSeries_scan_slot to allocate an IRQ for an EADS slot
337 * It calculates the irq value for the slot.
338 * Note that subBusNumber is always 0 (at the moment at least).
339 */
340int __init iSeries_allocate_IRQ(HvBusNumber busNumber,
341 HvSubBusNumber subBusNumber, HvAgentId deviceId)
342{
343 unsigned int realirq, virtirq;
344 u8 idsel = (deviceId >> 4);
345 u8 function = deviceId & 7;
346
347 virtirq = next_virtual_irq++;
348 realirq = ((busNumber - 1) << 6) + ((idsel - 1) << 3) + function;
349 virt_irq_to_real_map[virtirq] = realirq;
350
351 irq_desc[virtirq].handler = &iSeries_IRQ_handler;
352 return virtirq;
353}
354
355int virt_irq_create_mapping(unsigned int real_irq)
356{
357 BUG(); /* Don't call this on iSeries, yet */
358
359 return 0;
360}
361
362void virt_irq_init(void)
363{
364 return;
365}
366
diff --git a/arch/ppc64/kernel/iSeries_pci.c b/arch/ppc64/kernel/iSeries_pci.c
deleted file mode 100644
index fbc273c32bcc..000000000000
--- a/arch/ppc64/kernel/iSeries_pci.c
+++ /dev/null
@@ -1,905 +0,0 @@
1/*
2 * iSeries_pci.c
3 *
4 * Copyright (C) 2001 Allan Trautman, IBM Corporation
5 *
6 * iSeries specific routines for PCI.
7 *
8 * Based on code from pci.c and iSeries_pci.c 32bit
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
23 */
24#include <linux/kernel.h>
25#include <linux/list.h>
26#include <linux/string.h>
27#include <linux/init.h>
28#include <linux/module.h>
29#include <linux/ide.h>
30#include <linux/pci.h>
31
32#include <asm/io.h>
33#include <asm/irq.h>
34#include <asm/prom.h>
35#include <asm/machdep.h>
36#include <asm/pci-bridge.h>
37#include <asm/ppcdebug.h>
38#include <asm/iommu.h>
39
40#include <asm/iSeries/HvCallPci.h>
41#include <asm/iSeries/HvCallXm.h>
42#include <asm/iSeries/iSeries_irq.h>
43#include <asm/iSeries/iSeries_pci.h>
44#include <asm/iSeries/mf.h>
45
46#include "pci.h"
47
48extern unsigned long io_page_mask;
49
50/*
51 * Forward declares of prototypes.
52 */
53static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn);
54static void scan_PHB_slots(struct pci_controller *Phb);
55static void scan_EADS_bridge(HvBusNumber Bus, HvSubBusNumber SubBus, int IdSel);
56static int scan_bridge_slot(HvBusNumber Bus, struct HvCallPci_BridgeInfo *Info);
57
58LIST_HEAD(iSeries_Global_Device_List);
59
60static int DeviceCount;
61
62/* Counters and control flags. */
63static long Pci_Io_Read_Count;
64static long Pci_Io_Write_Count;
65#if 0
66static long Pci_Cfg_Read_Count;
67static long Pci_Cfg_Write_Count;
68#endif
69static long Pci_Error_Count;
70
71static int Pci_Retry_Max = 3; /* Only retry 3 times */
72static int Pci_Error_Flag = 1; /* Set Retry Error on. */
73
74static struct pci_ops iSeries_pci_ops;
75
76/*
77 * Table defines
78 * Each Entry size is 4 MB * 1024 Entries = 4GB I/O address space.
79 */
80#define IOMM_TABLE_MAX_ENTRIES 1024
81#define IOMM_TABLE_ENTRY_SIZE 0x0000000000400000UL
82#define BASE_IO_MEMORY 0xE000000000000000UL
83
84static unsigned long max_io_memory = 0xE000000000000000UL;
85static long current_iomm_table_entry;
86
87/*
88 * Lookup Tables.
89 */
90static struct iSeries_Device_Node **iomm_table;
91static u8 *iobar_table;
92
93/*
94 * Static and Global variables
95 */
96static char *pci_io_text = "iSeries PCI I/O";
97static DEFINE_SPINLOCK(iomm_table_lock);
98
99/*
100 * iomm_table_initialize
101 *
102 * Allocates and initalizes the Address Translation Table and Bar
103 * Tables to get them ready for use. Must be called before any
104 * I/O space is handed out to the device BARs.
105 */
106static void iomm_table_initialize(void)
107{
108 spin_lock(&iomm_table_lock);
109 iomm_table = kmalloc(sizeof(*iomm_table) * IOMM_TABLE_MAX_ENTRIES,
110 GFP_KERNEL);
111 iobar_table = kmalloc(sizeof(*iobar_table) * IOMM_TABLE_MAX_ENTRIES,
112 GFP_KERNEL);
113 spin_unlock(&iomm_table_lock);
114 if ((iomm_table == NULL) || (iobar_table == NULL))
115 panic("PCI: I/O tables allocation failed.\n");
116}
117
118/*
119 * iomm_table_allocate_entry
120 *
121 * Adds pci_dev entry in address translation table
122 *
123 * - Allocates the number of entries required in table base on BAR
124 * size.
125 * - Allocates starting at BASE_IO_MEMORY and increases.
126 * - The size is round up to be a multiple of entry size.
127 * - CurrentIndex is incremented to keep track of the last entry.
128 * - Builds the resource entry for allocated BARs.
129 */
130static void iomm_table_allocate_entry(struct pci_dev *dev, int bar_num)
131{
132 struct resource *bar_res = &dev->resource[bar_num];
133 long bar_size = pci_resource_len(dev, bar_num);
134
135 /*
136 * No space to allocate, quick exit, skip Allocation.
137 */
138 if (bar_size == 0)
139 return;
140 /*
141 * Set Resource values.
142 */
143 spin_lock(&iomm_table_lock);
144 bar_res->name = pci_io_text;
145 bar_res->start =
146 IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry;
147 bar_res->start += BASE_IO_MEMORY;
148 bar_res->end = bar_res->start + bar_size - 1;
149 /*
150 * Allocate the number of table entries needed for BAR.
151 */
152 while (bar_size > 0 ) {
153 iomm_table[current_iomm_table_entry] = dev->sysdata;
154 iobar_table[current_iomm_table_entry] = bar_num;
155 bar_size -= IOMM_TABLE_ENTRY_SIZE;
156 ++current_iomm_table_entry;
157 }
158 max_io_memory = BASE_IO_MEMORY +
159 (IOMM_TABLE_ENTRY_SIZE * current_iomm_table_entry);
160 spin_unlock(&iomm_table_lock);
161}
162
163/*
164 * allocate_device_bars
165 *
166 * - Allocates ALL pci_dev BAR's and updates the resources with the
167 * BAR value. BARS with zero length will have the resources
168 * The HvCallPci_getBarParms is used to get the size of the BAR
169 * space. It calls iomm_table_allocate_entry to allocate
170 * each entry.
171 * - Loops through The Bar resources(0 - 5) including the ROM
172 * is resource(6).
173 */
174static void allocate_device_bars(struct pci_dev *dev)
175{
176 struct resource *bar_res;
177 int bar_num;
178
179 for (bar_num = 0; bar_num <= PCI_ROM_RESOURCE; ++bar_num) {
180 bar_res = &dev->resource[bar_num];
181 iomm_table_allocate_entry(dev, bar_num);
182 }
183}
184
185/*
186 * Log error information to system console.
187 * Filter out the device not there errors.
188 * PCI: EADs Connect Failed 0x18.58.10 Rc: 0x00xx
189 * PCI: Read Vendor Failed 0x18.58.10 Rc: 0x00xx
190 * PCI: Connect Bus Unit Failed 0x18.58.10 Rc: 0x00xx
191 */
192static void pci_Log_Error(char *Error_Text, int Bus, int SubBus,
193 int AgentId, int HvRc)
194{
195 if (HvRc == 0x0302)
196 return;
197 printk(KERN_ERR "PCI: %s Failed: 0x%02X.%02X.%02X Rc: 0x%04X",
198 Error_Text, Bus, SubBus, AgentId, HvRc);
199}
200
201/*
202 * build_device_node(u16 Bus, int SubBus, u8 DevFn)
203 */
204static struct iSeries_Device_Node *build_device_node(HvBusNumber Bus,
205 HvSubBusNumber SubBus, int AgentId, int Function)
206{
207 struct iSeries_Device_Node *node;
208
209 PPCDBG(PPCDBG_BUSWALK,
210 "-build_device_node 0x%02X.%02X.%02X Function: %02X\n",
211 Bus, SubBus, AgentId, Function);
212
213 node = kmalloc(sizeof(struct iSeries_Device_Node), GFP_KERNEL);
214 if (node == NULL)
215 return NULL;
216
217 memset(node, 0, sizeof(struct iSeries_Device_Node));
218 list_add_tail(&node->Device_List, &iSeries_Global_Device_List);
219#if 0
220 node->DsaAddr = ((u64)Bus << 48) + ((u64)SubBus << 40) + ((u64)0x10 << 32);
221#endif
222 node->DsaAddr.DsaAddr = 0;
223 node->DsaAddr.Dsa.busNumber = Bus;
224 node->DsaAddr.Dsa.subBusNumber = SubBus;
225 node->DsaAddr.Dsa.deviceId = 0x10;
226 node->DevFn = PCI_DEVFN(ISERIES_ENCODE_DEVICE(AgentId), Function);
227 return node;
228}
229
230/*
231 * unsigned long __init find_and_init_phbs(void)
232 *
233 * Description:
234 * This function checks for all possible system PCI host bridges that connect
235 * PCI buses. The system hypervisor is queried as to the guest partition
236 * ownership status. A pci_controller is built for any bus which is partially
237 * owned or fully owned by this guest partition.
238 */
239unsigned long __init find_and_init_phbs(void)
240{
241 struct pci_controller *phb;
242 HvBusNumber bus;
243
244 PPCDBG(PPCDBG_BUSWALK, "find_and_init_phbs Entry\n");
245
246 /* Check all possible buses. */
247 for (bus = 0; bus < 256; bus++) {
248 int ret = HvCallXm_testBus(bus);
249 if (ret == 0) {
250 printk("bus %d appears to exist\n", bus);
251
252 phb = (struct pci_controller *)kmalloc(sizeof(struct pci_controller), GFP_KERNEL);
253 if (phb == NULL)
254 return -ENOMEM;
255 pci_setup_pci_controller(phb);
256
257 phb->pci_mem_offset = phb->local_number = bus;
258 phb->first_busno = bus;
259 phb->last_busno = bus;
260 phb->ops = &iSeries_pci_ops;
261
262 PPCDBG(PPCDBG_BUSWALK, "PCI:Create iSeries pci_controller(%p), Bus: %04X\n",
263 phb, bus);
264
265 /* Find and connect the devices. */
266 scan_PHB_slots(phb);
267 }
268 /*
269 * Check for Unexpected Return code, a clue that something
270 * has gone wrong.
271 */
272 else if (ret != 0x0301)
273 printk(KERN_ERR "Unexpected Return on Probe(0x%04X): 0x%04X",
274 bus, ret);
275 }
276 return 0;
277}
278
279/*
280 * iSeries_pcibios_init
281 *
282 * Chance to initialize and structures or variable before PCI Bus walk.
283 */
284void iSeries_pcibios_init(void)
285{
286 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Entry.\n");
287 iomm_table_initialize();
288 find_and_init_phbs();
289 io_page_mask = -1;
290 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_init Exit.\n");
291}
292
293/*
294 * iSeries_pci_final_fixup(void)
295 */
296void __init iSeries_pci_final_fixup(void)
297{
298 struct pci_dev *pdev = NULL;
299 struct iSeries_Device_Node *node;
300 int DeviceCount = 0;
301
302 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup Entry.\n");
303
304 /* Fix up at the device node and pci_dev relationship */
305 mf_display_src(0xC9000100);
306
307 printk("pcibios_final_fixup\n");
308 for_each_pci_dev(pdev) {
309 node = find_Device_Node(pdev->bus->number, pdev->devfn);
310 printk("pci dev %p (%x.%x), node %p\n", pdev,
311 pdev->bus->number, pdev->devfn, node);
312
313 if (node != NULL) {
314 ++DeviceCount;
315 pdev->sysdata = (void *)node;
316 node->PciDev = pdev;
317 PPCDBG(PPCDBG_BUSWALK,
318 "pdev 0x%p <==> DevNode 0x%p\n",
319 pdev, node);
320 allocate_device_bars(pdev);
321 iSeries_Device_Information(pdev, DeviceCount);
322 iommu_devnode_init_iSeries(node);
323 } else
324 printk("PCI: Device Tree not found for 0x%016lX\n",
325 (unsigned long)pdev);
326 pdev->irq = node->Irq;
327 }
328 iSeries_activate_IRQs();
329 mf_display_src(0xC9000200);
330}
331
332void pcibios_fixup_bus(struct pci_bus *PciBus)
333{
334 PPCDBG(PPCDBG_BUSWALK, "iSeries_pcibios_fixup_bus(0x%04X) Entry.\n",
335 PciBus->number);
336}
337
338void pcibios_fixup_resources(struct pci_dev *pdev)
339{
340 PPCDBG(PPCDBG_BUSWALK, "fixup_resources pdev %p\n", pdev);
341}
342
343/*
344 * Loop through each node function to find usable EADs bridges.
345 */
346static void scan_PHB_slots(struct pci_controller *Phb)
347{
348 struct HvCallPci_DeviceInfo *DevInfo;
349 HvBusNumber bus = Phb->local_number; /* System Bus */
350 const HvSubBusNumber SubBus = 0; /* EADs is always 0. */
351 int HvRc = 0;
352 int IdSel;
353 const int MaxAgents = 8;
354
355 DevInfo = (struct HvCallPci_DeviceInfo*)
356 kmalloc(sizeof(struct HvCallPci_DeviceInfo), GFP_KERNEL);
357 if (DevInfo == NULL)
358 return;
359
360 /*
361 * Probe for EADs Bridges
362 */
363 for (IdSel = 1; IdSel < MaxAgents; ++IdSel) {
364 HvRc = HvCallPci_getDeviceInfo(bus, SubBus, IdSel,
365 ISERIES_HV_ADDR(DevInfo),
366 sizeof(struct HvCallPci_DeviceInfo));
367 if (HvRc == 0) {
368 if (DevInfo->deviceType == HvCallPci_NodeDevice)
369 scan_EADS_bridge(bus, SubBus, IdSel);
370 else
371 printk("PCI: Invalid System Configuration(0x%02X)"
372 " for bus 0x%02x id 0x%02x.\n",
373 DevInfo->deviceType, bus, IdSel);
374 }
375 else
376 pci_Log_Error("getDeviceInfo", bus, SubBus, IdSel, HvRc);
377 }
378 kfree(DevInfo);
379}
380
381static void scan_EADS_bridge(HvBusNumber bus, HvSubBusNumber SubBus,
382 int IdSel)
383{
384 struct HvCallPci_BridgeInfo *BridgeInfo;
385 HvAgentId AgentId;
386 int Function;
387 int HvRc;
388
389 BridgeInfo = (struct HvCallPci_BridgeInfo *)
390 kmalloc(sizeof(struct HvCallPci_BridgeInfo), GFP_KERNEL);
391 if (BridgeInfo == NULL)
392 return;
393
394 /* Note: hvSubBus and irq is always be 0 at this level! */
395 for (Function = 0; Function < 8; ++Function) {
396 AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
397 HvRc = HvCallXm_connectBusUnit(bus, SubBus, AgentId, 0);
398 if (HvRc == 0) {
399 printk("found device at bus %d idsel %d func %d (AgentId %x)\n",
400 bus, IdSel, Function, AgentId);
401 /* Connect EADs: 0x18.00.12 = 0x00 */
402 PPCDBG(PPCDBG_BUSWALK,
403 "PCI:Connect EADs: 0x%02X.%02X.%02X\n",
404 bus, SubBus, AgentId);
405 HvRc = HvCallPci_getBusUnitInfo(bus, SubBus, AgentId,
406 ISERIES_HV_ADDR(BridgeInfo),
407 sizeof(struct HvCallPci_BridgeInfo));
408 if (HvRc == 0) {
409 printk("bridge info: type %x subbus %x maxAgents %x maxsubbus %x logslot %x\n",
410 BridgeInfo->busUnitInfo.deviceType,
411 BridgeInfo->subBusNumber,
412 BridgeInfo->maxAgents,
413 BridgeInfo->maxSubBusNumber,
414 BridgeInfo->logicalSlotNumber);
415 PPCDBG(PPCDBG_BUSWALK,
416 "PCI: BridgeInfo, Type:0x%02X, SubBus:0x%02X, MaxAgents:0x%02X, MaxSubBus: 0x%02X, LSlot: 0x%02X\n",
417 BridgeInfo->busUnitInfo.deviceType,
418 BridgeInfo->subBusNumber,
419 BridgeInfo->maxAgents,
420 BridgeInfo->maxSubBusNumber,
421 BridgeInfo->logicalSlotNumber);
422
423 if (BridgeInfo->busUnitInfo.deviceType ==
424 HvCallPci_BridgeDevice) {
425 /* Scan_Bridge_Slot...: 0x18.00.12 */
426 scan_bridge_slot(bus, BridgeInfo);
427 } else
428 printk("PCI: Invalid Bridge Configuration(0x%02X)",
429 BridgeInfo->busUnitInfo.deviceType);
430 }
431 } else if (HvRc != 0x000B)
432 pci_Log_Error("EADs Connect",
433 bus, SubBus, AgentId, HvRc);
434 }
435 kfree(BridgeInfo);
436}
437
438/*
439 * This assumes that the node slot is always on the primary bus!
440 */
441static int scan_bridge_slot(HvBusNumber Bus,
442 struct HvCallPci_BridgeInfo *BridgeInfo)
443{
444 struct iSeries_Device_Node *node;
445 HvSubBusNumber SubBus = BridgeInfo->subBusNumber;
446 u16 VendorId = 0;
447 int HvRc = 0;
448 u8 Irq = 0;
449 int IdSel = ISERIES_GET_DEVICE_FROM_SUBBUS(SubBus);
450 int Function = ISERIES_GET_FUNCTION_FROM_SUBBUS(SubBus);
451 HvAgentId EADsIdSel = ISERIES_PCI_AGENTID(IdSel, Function);
452
453 /* iSeries_allocate_IRQ.: 0x18.00.12(0xA3) */
454 Irq = iSeries_allocate_IRQ(Bus, 0, EADsIdSel);
455 PPCDBG(PPCDBG_BUSWALK,
456 "PCI:- allocate and assign IRQ 0x%02X.%02X.%02X = 0x%02X\n",
457 Bus, 0, EADsIdSel, Irq);
458
459 /*
460 * Connect all functions of any device found.
461 */
462 for (IdSel = 1; IdSel <= BridgeInfo->maxAgents; ++IdSel) {
463 for (Function = 0; Function < 8; ++Function) {
464 HvAgentId AgentId = ISERIES_PCI_AGENTID(IdSel, Function);
465 HvRc = HvCallXm_connectBusUnit(Bus, SubBus,
466 AgentId, Irq);
467 if (HvRc != 0) {
468 pci_Log_Error("Connect Bus Unit",
469 Bus, SubBus, AgentId, HvRc);
470 continue;
471 }
472
473 HvRc = HvCallPci_configLoad16(Bus, SubBus, AgentId,
474 PCI_VENDOR_ID, &VendorId);
475 if (HvRc != 0) {
476 pci_Log_Error("Read Vendor",
477 Bus, SubBus, AgentId, HvRc);
478 continue;
479 }
480 printk("read vendor ID: %x\n", VendorId);
481
482 /* FoundDevice: 0x18.28.10 = 0x12AE */
483 PPCDBG(PPCDBG_BUSWALK,
484 "PCI:- FoundDevice: 0x%02X.%02X.%02X = 0x%04X, irq %d\n",
485 Bus, SubBus, AgentId, VendorId, Irq);
486 HvRc = HvCallPci_configStore8(Bus, SubBus, AgentId,
487 PCI_INTERRUPT_LINE, Irq);
488 if (HvRc != 0)
489 pci_Log_Error("PciCfgStore Irq Failed!",
490 Bus, SubBus, AgentId, HvRc);
491
492 ++DeviceCount;
493 node = build_device_node(Bus, SubBus, EADsIdSel, Function);
494 node->Irq = Irq;
495 node->LogicalSlot = BridgeInfo->logicalSlotNumber;
496
497 } /* for (Function = 0; Function < 8; ++Function) */
498 } /* for (IdSel = 1; IdSel <= MaxAgents; ++IdSel) */
499 return HvRc;
500}
501
502/*
503 * I/0 Memory copy MUST use mmio commands on iSeries
504 * To do; For performance, include the hv call directly
505 */
506void iSeries_memset_io(volatile void __iomem *dest, char c, size_t Count)
507{
508 u8 ByteValue = c;
509 long NumberOfBytes = Count;
510
511 while (NumberOfBytes > 0) {
512 iSeries_Write_Byte(ByteValue, dest++);
513 -- NumberOfBytes;
514 }
515}
516EXPORT_SYMBOL(iSeries_memset_io);
517
518void iSeries_memcpy_toio(volatile void __iomem *dest, void *source, size_t count)
519{
520 char *src = source;
521 long NumberOfBytes = count;
522
523 while (NumberOfBytes > 0) {
524 iSeries_Write_Byte(*src++, dest++);
525 -- NumberOfBytes;
526 }
527}
528EXPORT_SYMBOL(iSeries_memcpy_toio);
529
530void iSeries_memcpy_fromio(void *dest, const volatile void __iomem *src, size_t count)
531{
532 char *dst = dest;
533 long NumberOfBytes = count;
534
535 while (NumberOfBytes > 0) {
536 *dst++ = iSeries_Read_Byte(src++);
537 -- NumberOfBytes;
538 }
539}
540EXPORT_SYMBOL(iSeries_memcpy_fromio);
541
542/*
543 * Look down the chain to find the matching Device Device
544 */
545static struct iSeries_Device_Node *find_Device_Node(int bus, int devfn)
546{
547 struct list_head *pos;
548
549 list_for_each(pos, &iSeries_Global_Device_List) {
550 struct iSeries_Device_Node *node =
551 list_entry(pos, struct iSeries_Device_Node, Device_List);
552
553 if ((bus == ISERIES_BUS(node)) && (devfn == node->DevFn))
554 return node;
555 }
556 return NULL;
557}
558
559#if 0
560/*
561 * Returns the device node for the passed pci_dev
562 * Sanity Check Node PciDev to passed pci_dev
563 * If none is found, returns a NULL which the client must handle.
564 */
565static struct iSeries_Device_Node *get_Device_Node(struct pci_dev *pdev)
566{
567 struct iSeries_Device_Node *node;
568
569 node = pdev->sysdata;
570 if (node == NULL || node->PciDev != pdev)
571 node = find_Device_Node(pdev->bus->number, pdev->devfn);
572 return node;
573}
574#endif
575
576/*
577 * Config space read and write functions.
578 * For now at least, we look for the device node for the bus and devfn
579 * that we are asked to access. It may be possible to translate the devfn
580 * to a subbus and deviceid more directly.
581 */
582static u64 hv_cfg_read_func[4] = {
583 HvCallPciConfigLoad8, HvCallPciConfigLoad16,
584 HvCallPciConfigLoad32, HvCallPciConfigLoad32
585};
586
587static u64 hv_cfg_write_func[4] = {
588 HvCallPciConfigStore8, HvCallPciConfigStore16,
589 HvCallPciConfigStore32, HvCallPciConfigStore32
590};
591
592/*
593 * Read PCI config space
594 */
595static int iSeries_pci_read_config(struct pci_bus *bus, unsigned int devfn,
596 int offset, int size, u32 *val)
597{
598 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
599 u64 fn;
600 struct HvCallPci_LoadReturn ret;
601
602 if (node == NULL)
603 return PCIBIOS_DEVICE_NOT_FOUND;
604 if (offset > 255) {
605 *val = ~0;
606 return PCIBIOS_BAD_REGISTER_NUMBER;
607 }
608
609 fn = hv_cfg_read_func[(size - 1) & 3];
610 HvCall3Ret16(fn, &ret, node->DsaAddr.DsaAddr, offset, 0);
611
612 if (ret.rc != 0) {
613 *val = ~0;
614 return PCIBIOS_DEVICE_NOT_FOUND; /* or something */
615 }
616
617 *val = ret.value;
618 return 0;
619}
620
621/*
622 * Write PCI config space
623 */
624
625static int iSeries_pci_write_config(struct pci_bus *bus, unsigned int devfn,
626 int offset, int size, u32 val)
627{
628 struct iSeries_Device_Node *node = find_Device_Node(bus->number, devfn);
629 u64 fn;
630 u64 ret;
631
632 if (node == NULL)
633 return PCIBIOS_DEVICE_NOT_FOUND;
634 if (offset > 255)
635 return PCIBIOS_BAD_REGISTER_NUMBER;
636
637 fn = hv_cfg_write_func[(size - 1) & 3];
638 ret = HvCall4(fn, node->DsaAddr.DsaAddr, offset, val, 0);
639
640 if (ret != 0)
641 return PCIBIOS_DEVICE_NOT_FOUND;
642
643 return 0;
644}
645
646static struct pci_ops iSeries_pci_ops = {
647 .read = iSeries_pci_read_config,
648 .write = iSeries_pci_write_config
649};
650
651/*
652 * Check Return Code
653 * -> On Failure, print and log information.
654 * Increment Retry Count, if exceeds max, panic partition.
655 *
656 * PCI: Device 23.90 ReadL I/O Error( 0): 0x1234
657 * PCI: Device 23.90 ReadL Retry( 1)
658 * PCI: Device 23.90 ReadL Retry Successful(1)
659 */
660static int CheckReturnCode(char *TextHdr, struct iSeries_Device_Node *DevNode,
661 int *retry, u64 ret)
662{
663 if (ret != 0) {
664 ++Pci_Error_Count;
665 (*retry)++;
666 printk("PCI: %s: Device 0x%04X:%02X I/O Error(%2d): 0x%04X\n",
667 TextHdr, DevNode->DsaAddr.Dsa.busNumber, DevNode->DevFn,
668 *retry, (int)ret);
669 /*
670 * Bump the retry and check for retry count exceeded.
671 * If, Exceeded, panic the system.
672 */
673 if (((*retry) > Pci_Retry_Max) &&
674 (Pci_Error_Flag > 0)) {
675 mf_display_src(0xB6000103);
676 panic_timeout = 0;
677 panic("PCI: Hardware I/O Error, SRC B6000103, "
678 "Automatic Reboot Disabled.\n");
679 }
680 return -1; /* Retry Try */
681 }
682 return 0;
683}
684
685/*
686 * Translate the I/O Address into a device node, bar, and bar offset.
687 * Note: Make sure the passed variable end up on the stack to avoid
688 * the exposure of being device global.
689 */
690static inline struct iSeries_Device_Node *xlate_iomm_address(
691 const volatile void __iomem *IoAddress,
692 u64 *dsaptr, u64 *BarOffsetPtr)
693{
694 unsigned long OrigIoAddr;
695 unsigned long BaseIoAddr;
696 unsigned long TableIndex;
697 struct iSeries_Device_Node *DevNode;
698
699 OrigIoAddr = (unsigned long __force)IoAddress;
700 if ((OrigIoAddr < BASE_IO_MEMORY) || (OrigIoAddr >= max_io_memory))
701 return NULL;
702 BaseIoAddr = OrigIoAddr - BASE_IO_MEMORY;
703 TableIndex = BaseIoAddr / IOMM_TABLE_ENTRY_SIZE;
704 DevNode = iomm_table[TableIndex];
705
706 if (DevNode != NULL) {
707 int barnum = iobar_table[TableIndex];
708 *dsaptr = DevNode->DsaAddr.DsaAddr | (barnum << 24);
709 *BarOffsetPtr = BaseIoAddr % IOMM_TABLE_ENTRY_SIZE;
710 } else
711 panic("PCI: Invalid PCI IoAddress detected!\n");
712 return DevNode;
713}
714
715/*
716 * Read MM I/O Instructions for the iSeries
717 * On MM I/O error, all ones are returned and iSeries_pci_IoError is cal
718 * else, data is returned in big Endian format.
719 *
720 * iSeries_Read_Byte = Read Byte ( 8 bit)
721 * iSeries_Read_Word = Read Word (16 bit)
722 * iSeries_Read_Long = Read Long (32 bit)
723 */
724u8 iSeries_Read_Byte(const volatile void __iomem *IoAddress)
725{
726 u64 BarOffset;
727 u64 dsa;
728 int retry = 0;
729 struct HvCallPci_LoadReturn ret;
730 struct iSeries_Device_Node *DevNode =
731 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
732
733 if (DevNode == NULL) {
734 static unsigned long last_jiffies;
735 static int num_printed;
736
737 if ((jiffies - last_jiffies) > 60 * HZ) {
738 last_jiffies = jiffies;
739 num_printed = 0;
740 }
741 if (num_printed++ < 10)
742 printk(KERN_ERR "iSeries_Read_Byte: invalid access at IO address %p\n", IoAddress);
743 return 0xff;
744 }
745 do {
746 ++Pci_Io_Read_Count;
747 HvCall3Ret16(HvCallPciBarLoad8, &ret, dsa, BarOffset, 0);
748 } while (CheckReturnCode("RDB", DevNode, &retry, ret.rc) != 0);
749
750 return (u8)ret.value;
751}
752EXPORT_SYMBOL(iSeries_Read_Byte);
753
754u16 iSeries_Read_Word(const volatile void __iomem *IoAddress)
755{
756 u64 BarOffset;
757 u64 dsa;
758 int retry = 0;
759 struct HvCallPci_LoadReturn ret;
760 struct iSeries_Device_Node *DevNode =
761 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
762
763 if (DevNode == NULL) {
764 static unsigned long last_jiffies;
765 static int num_printed;
766
767 if ((jiffies - last_jiffies) > 60 * HZ) {
768 last_jiffies = jiffies;
769 num_printed = 0;
770 }
771 if (num_printed++ < 10)
772 printk(KERN_ERR "iSeries_Read_Word: invalid access at IO address %p\n", IoAddress);
773 return 0xffff;
774 }
775 do {
776 ++Pci_Io_Read_Count;
777 HvCall3Ret16(HvCallPciBarLoad16, &ret, dsa,
778 BarOffset, 0);
779 } while (CheckReturnCode("RDW", DevNode, &retry, ret.rc) != 0);
780
781 return swab16((u16)ret.value);
782}
783EXPORT_SYMBOL(iSeries_Read_Word);
784
785u32 iSeries_Read_Long(const volatile void __iomem *IoAddress)
786{
787 u64 BarOffset;
788 u64 dsa;
789 int retry = 0;
790 struct HvCallPci_LoadReturn ret;
791 struct iSeries_Device_Node *DevNode =
792 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
793
794 if (DevNode == NULL) {
795 static unsigned long last_jiffies;
796 static int num_printed;
797
798 if ((jiffies - last_jiffies) > 60 * HZ) {
799 last_jiffies = jiffies;
800 num_printed = 0;
801 }
802 if (num_printed++ < 10)
803 printk(KERN_ERR "iSeries_Read_Long: invalid access at IO address %p\n", IoAddress);
804 return 0xffffffff;
805 }
806 do {
807 ++Pci_Io_Read_Count;
808 HvCall3Ret16(HvCallPciBarLoad32, &ret, dsa,
809 BarOffset, 0);
810 } while (CheckReturnCode("RDL", DevNode, &retry, ret.rc) != 0);
811
812 return swab32((u32)ret.value);
813}
814EXPORT_SYMBOL(iSeries_Read_Long);
815
816/*
817 * Write MM I/O Instructions for the iSeries
818 *
819 * iSeries_Write_Byte = Write Byte (8 bit)
820 * iSeries_Write_Word = Write Word(16 bit)
821 * iSeries_Write_Long = Write Long(32 bit)
822 */
823void iSeries_Write_Byte(u8 data, volatile void __iomem *IoAddress)
824{
825 u64 BarOffset;
826 u64 dsa;
827 int retry = 0;
828 u64 rc;
829 struct iSeries_Device_Node *DevNode =
830 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
831
832 if (DevNode == NULL) {
833 static unsigned long last_jiffies;
834 static int num_printed;
835
836 if ((jiffies - last_jiffies) > 60 * HZ) {
837 last_jiffies = jiffies;
838 num_printed = 0;
839 }
840 if (num_printed++ < 10)
841 printk(KERN_ERR "iSeries_Write_Byte: invalid access at IO address %p\n", IoAddress);
842 return;
843 }
844 do {
845 ++Pci_Io_Write_Count;
846 rc = HvCall4(HvCallPciBarStore8, dsa, BarOffset, data, 0);
847 } while (CheckReturnCode("WWB", DevNode, &retry, rc) != 0);
848}
849EXPORT_SYMBOL(iSeries_Write_Byte);
850
851void iSeries_Write_Word(u16 data, volatile void __iomem *IoAddress)
852{
853 u64 BarOffset;
854 u64 dsa;
855 int retry = 0;
856 u64 rc;
857 struct iSeries_Device_Node *DevNode =
858 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
859
860 if (DevNode == NULL) {
861 static unsigned long last_jiffies;
862 static int num_printed;
863
864 if ((jiffies - last_jiffies) > 60 * HZ) {
865 last_jiffies = jiffies;
866 num_printed = 0;
867 }
868 if (num_printed++ < 10)
869 printk(KERN_ERR "iSeries_Write_Word: invalid access at IO address %p\n", IoAddress);
870 return;
871 }
872 do {
873 ++Pci_Io_Write_Count;
874 rc = HvCall4(HvCallPciBarStore16, dsa, BarOffset, swab16(data), 0);
875 } while (CheckReturnCode("WWW", DevNode, &retry, rc) != 0);
876}
877EXPORT_SYMBOL(iSeries_Write_Word);
878
879void iSeries_Write_Long(u32 data, volatile void __iomem *IoAddress)
880{
881 u64 BarOffset;
882 u64 dsa;
883 int retry = 0;
884 u64 rc;
885 struct iSeries_Device_Node *DevNode =
886 xlate_iomm_address(IoAddress, &dsa, &BarOffset);
887
888 if (DevNode == NULL) {
889 static unsigned long last_jiffies;
890 static int num_printed;
891
892 if ((jiffies - last_jiffies) > 60 * HZ) {
893 last_jiffies = jiffies;
894 num_printed = 0;
895 }
896 if (num_printed++ < 10)
897 printk(KERN_ERR "iSeries_Write_Long: invalid access at IO address %p\n", IoAddress);
898 return;
899 }
900 do {
901 ++Pci_Io_Write_Count;
902 rc = HvCall4(HvCallPciBarStore32, dsa, BarOffset, swab32(data), 0);
903 } while (CheckReturnCode("WWL", DevNode, &retry, rc) != 0);
904}
905EXPORT_SYMBOL(iSeries_Write_Long);
diff --git a/arch/ppc64/kernel/iSeries_proc.c b/arch/ppc64/kernel/iSeries_proc.c
deleted file mode 100644
index 0fe3116eba29..000000000000
--- a/arch/ppc64/kernel/iSeries_proc.c
+++ /dev/null
@@ -1,113 +0,0 @@
1/*
2 * iSeries_proc.c
3 * Copyright (C) 2001 Kyle A. Lucke IBM Corporation
4 * Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/init.h>
21#include <linux/proc_fs.h>
22#include <linux/seq_file.h>
23#include <linux/param.h> /* for HZ */
24#include <asm/paca.h>
25#include <asm/processor.h>
26#include <asm/time.h>
27#include <asm/lppaca.h>
28#include <asm/iSeries/ItLpQueue.h>
29#include <asm/iSeries/HvCallXm.h>
30#include <asm/iSeries/IoHriMainStore.h>
31#include <asm/iSeries/IoHriProcessorVpd.h>
32
33static int __init iseries_proc_create(void)
34{
35 struct proc_dir_entry *e = proc_mkdir("iSeries", 0);
36 if (!e)
37 return 1;
38
39 return 0;
40}
41core_initcall(iseries_proc_create);
42
43static unsigned long startTitan = 0;
44static unsigned long startTb = 0;
45
46static int proc_titantod_show(struct seq_file *m, void *v)
47{
48 unsigned long tb0, titan_tod;
49
50 tb0 = get_tb();
51 titan_tod = HvCallXm_loadTod();
52
53 seq_printf(m, "Titan\n" );
54 seq_printf(m, " time base = %016lx\n", tb0);
55 seq_printf(m, " titan tod = %016lx\n", titan_tod);
56 seq_printf(m, " xProcFreq = %016x\n",
57 xIoHriProcessorVpd[0].xProcFreq);
58 seq_printf(m, " xTimeBaseFreq = %016x\n",
59 xIoHriProcessorVpd[0].xTimeBaseFreq);
60 seq_printf(m, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy);
61 seq_printf(m, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec);
62
63 if (!startTitan) {
64 startTitan = titan_tod;
65 startTb = tb0;
66 } else {
67 unsigned long titan_usec = (titan_tod - startTitan) >> 12;
68 unsigned long tb_ticks = (tb0 - startTb);
69 unsigned long titan_jiffies = titan_usec / (1000000/HZ);
70 unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ);
71 unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec;
72 unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy;
73 unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy;
74 unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks;
75 unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec;
76 unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec;
77
78 seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec);
79 seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks);
80 seq_printf(m, " titan jiffies = %lu.%04lu \n", titan_jiffies,
81 titan_jiff_rem_usec);
82 seq_printf(m, " tb jiffies = %lu.%04lu\n", tb_jiffies,
83 tb_jiff_rem_usec);
84 seq_printf(m, " new tb_ticks_per_jiffy = %lu\n",
85 new_tb_ticks_per_jiffy);
86 }
87
88 return 0;
89}
90
91static int proc_titantod_open(struct inode *inode, struct file *file)
92{
93 return single_open(file, proc_titantod_show, NULL);
94}
95
96static struct file_operations proc_titantod_operations = {
97 .open = proc_titantod_open,
98 .read = seq_read,
99 .llseek = seq_lseek,
100 .release = single_release,
101};
102
103static int __init iseries_proc_init(void)
104{
105 struct proc_dir_entry *e;
106
107 e = create_proc_entry("iSeries/titanTod", S_IFREG|S_IRUGO, NULL);
108 if (e)
109 e->proc_fops = &proc_titantod_operations;
110
111 return 0;
112}
113__initcall(iseries_proc_init);
diff --git a/arch/ppc64/kernel/iSeries_setup.c b/arch/ppc64/kernel/iSeries_setup.c
deleted file mode 100644
index 9daf734adbd5..000000000000
--- a/arch/ppc64/kernel/iSeries_setup.c
+++ /dev/null
@@ -1,1007 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Module name: iSeries_setup.c
6 *
7 * Description:
8 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and
10 * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek
11 * <dan@net4x.com>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#undef DEBUG
20
21#include <linux/config.h>
22#include <linux/init.h>
23#include <linux/threads.h>
24#include <linux/smp.h>
25#include <linux/param.h>
26#include <linux/string.h>
27#include <linux/initrd.h>
28#include <linux/seq_file.h>
29#include <linux/kdev_t.h>
30#include <linux/major.h>
31#include <linux/root_dev.h>
32
33#include <asm/processor.h>
34#include <asm/machdep.h>
35#include <asm/page.h>
36#include <asm/mmu.h>
37#include <asm/pgtable.h>
38#include <asm/mmu_context.h>
39#include <asm/cputable.h>
40#include <asm/sections.h>
41#include <asm/iommu.h>
42#include <asm/firmware.h>
43
44#include <asm/time.h>
45#include "iSeries_setup.h"
46#include <asm/naca.h>
47#include <asm/paca.h>
48#include <asm/cache.h>
49#include <asm/sections.h>
50#include <asm/abs_addr.h>
51#include <asm/iSeries/HvCallHpt.h>
52#include <asm/iSeries/HvLpConfig.h>
53#include <asm/iSeries/HvCallEvent.h>
54#include <asm/iSeries/HvCallSm.h>
55#include <asm/iSeries/HvCallXm.h>
56#include <asm/iSeries/ItLpQueue.h>
57#include <asm/iSeries/IoHriMainStore.h>
58#include <asm/iSeries/mf.h>
59#include <asm/iSeries/HvLpEvent.h>
60#include <asm/iSeries/iSeries_irq.h>
61#include <asm/iSeries/IoHriProcessorVpd.h>
62#include <asm/iSeries/ItVpdAreas.h>
63#include <asm/iSeries/LparMap.h>
64
65extern void hvlog(char *fmt, ...);
66
67#ifdef DEBUG
68#define DBG(fmt...) hvlog(fmt)
69#else
70#define DBG(fmt...)
71#endif
72
73/* Function Prototypes */
74extern void ppcdbg_initialize(void);
75
76static void build_iSeries_Memory_Map(void);
77static int iseries_shared_idle(void);
78static int iseries_dedicated_idle(void);
79#ifdef CONFIG_PCI
80extern void iSeries_pci_final_fixup(void);
81#else
82static void iSeries_pci_final_fixup(void) { }
83#endif
84
85/* Global Variables */
86int piranha_simulator;
87
88extern int rd_size; /* Defined in drivers/block/rd.c */
89extern unsigned long klimit;
90extern unsigned long embedded_sysmap_start;
91extern unsigned long embedded_sysmap_end;
92
93extern unsigned long iSeries_recal_tb;
94extern unsigned long iSeries_recal_titan;
95
96static int mf_initialized;
97
98struct MemoryBlock {
99 unsigned long absStart;
100 unsigned long absEnd;
101 unsigned long logicalStart;
102 unsigned long logicalEnd;
103};
104
105/*
106 * Process the main store vpd to determine where the holes in memory are
107 * and return the number of physical blocks and fill in the array of
108 * block data.
109 */
110static unsigned long iSeries_process_Condor_mainstore_vpd(
111 struct MemoryBlock *mb_array, unsigned long max_entries)
112{
113 unsigned long holeFirstChunk, holeSizeChunks;
114 unsigned long numMemoryBlocks = 1;
115 struct IoHriMainStoreSegment4 *msVpd =
116 (struct IoHriMainStoreSegment4 *)xMsVpd;
117 unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr;
118 unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr;
119 unsigned long holeSize = holeEnd - holeStart;
120
121 printk("Mainstore_VPD: Condor\n");
122 /*
123 * Determine if absolute memory has any
124 * holes so that we can interpret the
125 * access map we get back from the hypervisor
126 * correctly.
127 */
128 mb_array[0].logicalStart = 0;
129 mb_array[0].logicalEnd = 0x100000000;
130 mb_array[0].absStart = 0;
131 mb_array[0].absEnd = 0x100000000;
132
133 if (holeSize) {
134 numMemoryBlocks = 2;
135 holeStart = holeStart & 0x000fffffffffffff;
136 holeStart = addr_to_chunk(holeStart);
137 holeFirstChunk = holeStart;
138 holeSize = addr_to_chunk(holeSize);
139 holeSizeChunks = holeSize;
140 printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n",
141 holeFirstChunk, holeSizeChunks );
142 mb_array[0].logicalEnd = holeFirstChunk;
143 mb_array[0].absEnd = holeFirstChunk;
144 mb_array[1].logicalStart = holeFirstChunk;
145 mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks;
146 mb_array[1].absStart = holeFirstChunk + holeSizeChunks;
147 mb_array[1].absEnd = 0x100000000;
148 }
149 return numMemoryBlocks;
150}
151
152#define MaxSegmentAreas 32
153#define MaxSegmentAdrRangeBlocks 128
154#define MaxAreaRangeBlocks 4
155
156static unsigned long iSeries_process_Regatta_mainstore_vpd(
157 struct MemoryBlock *mb_array, unsigned long max_entries)
158{
159 struct IoHriMainStoreSegment5 *msVpdP =
160 (struct IoHriMainStoreSegment5 *)xMsVpd;
161 unsigned long numSegmentBlocks = 0;
162 u32 existsBits = msVpdP->msAreaExists;
163 unsigned long area_num;
164
165 printk("Mainstore_VPD: Regatta\n");
166
167 for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) {
168 unsigned long numAreaBlocks;
169 struct IoHriMainStoreArea4 *currentArea;
170
171 if (existsBits & 0x80000000) {
172 unsigned long block_num;
173
174 currentArea = &msVpdP->msAreaArray[area_num];
175 numAreaBlocks = currentArea->numAdrRangeBlocks;
176 printk("ms_vpd: processing area %2ld blocks=%ld",
177 area_num, numAreaBlocks);
178 for (block_num = 0; block_num < numAreaBlocks;
179 ++block_num ) {
180 /* Process an address range block */
181 struct MemoryBlock tempBlock;
182 unsigned long i;
183
184 tempBlock.absStart =
185 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart;
186 tempBlock.absEnd =
187 (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd;
188 tempBlock.logicalStart = 0;
189 tempBlock.logicalEnd = 0;
190 printk("\n block %ld absStart=%016lx absEnd=%016lx",
191 block_num, tempBlock.absStart,
192 tempBlock.absEnd);
193
194 for (i = 0; i < numSegmentBlocks; ++i) {
195 if (mb_array[i].absStart ==
196 tempBlock.absStart)
197 break;
198 }
199 if (i == numSegmentBlocks) {
200 if (numSegmentBlocks == max_entries)
201 panic("iSeries_process_mainstore_vpd: too many memory blocks");
202 mb_array[numSegmentBlocks] = tempBlock;
203 ++numSegmentBlocks;
204 } else
205 printk(" (duplicate)");
206 }
207 printk("\n");
208 }
209 existsBits <<= 1;
210 }
211 /* Now sort the blocks found into ascending sequence */
212 if (numSegmentBlocks > 1) {
213 unsigned long m, n;
214
215 for (m = 0; m < numSegmentBlocks - 1; ++m) {
216 for (n = numSegmentBlocks - 1; m < n; --n) {
217 if (mb_array[n].absStart <
218 mb_array[n-1].absStart) {
219 struct MemoryBlock tempBlock;
220
221 tempBlock = mb_array[n];
222 mb_array[n] = mb_array[n-1];
223 mb_array[n-1] = tempBlock;
224 }
225 }
226 }
227 }
228 /*
229 * Assign "logical" addresses to each block. These
230 * addresses correspond to the hypervisor "bitmap" space.
231 * Convert all addresses into units of 256K chunks.
232 */
233 {
234 unsigned long i, nextBitmapAddress;
235
236 printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks);
237 nextBitmapAddress = 0;
238 for (i = 0; i < numSegmentBlocks; ++i) {
239 unsigned long length = mb_array[i].absEnd -
240 mb_array[i].absStart;
241
242 mb_array[i].logicalStart = nextBitmapAddress;
243 mb_array[i].logicalEnd = nextBitmapAddress + length;
244 nextBitmapAddress += length;
245 printk(" Bitmap range: %016lx - %016lx\n"
246 " Absolute range: %016lx - %016lx\n",
247 mb_array[i].logicalStart,
248 mb_array[i].logicalEnd,
249 mb_array[i].absStart, mb_array[i].absEnd);
250 mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart &
251 0x000fffffffffffff);
252 mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd &
253 0x000fffffffffffff);
254 mb_array[i].logicalStart =
255 addr_to_chunk(mb_array[i].logicalStart);
256 mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd);
257 }
258 }
259
260 return numSegmentBlocks;
261}
262
263static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array,
264 unsigned long max_entries)
265{
266 unsigned long i;
267 unsigned long mem_blocks = 0;
268
269 if (cpu_has_feature(CPU_FTR_SLB))
270 mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array,
271 max_entries);
272 else
273 mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array,
274 max_entries);
275
276 printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks);
277 for (i = 0; i < mem_blocks; ++i) {
278 printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n"
279 " abs chunks %016lx - %016lx\n",
280 i, mb_array[i].logicalStart, mb_array[i].logicalEnd,
281 mb_array[i].absStart, mb_array[i].absEnd);
282 }
283 return mem_blocks;
284}
285
286static void __init iSeries_get_cmdline(void)
287{
288 char *p, *q;
289
290 /* copy the command line parameter from the primary VSP */
291 HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256,
292 HvLpDma_Direction_RemoteToLocal);
293
294 p = cmd_line;
295 q = cmd_line + 255;
296 while(p < q) {
297 if (!*p || *p == '\n')
298 break;
299 ++p;
300 }
301 *p = 0;
302}
303
304static void __init iSeries_init_early(void)
305{
306 extern unsigned long memory_limit;
307
308 DBG(" -> iSeries_init_early()\n");
309
310 ppc64_firmware_features = FW_FEATURE_ISERIES;
311
312 ppcdbg_initialize();
313
314 ppc64_interrupt_controller = IC_ISERIES;
315
316#if defined(CONFIG_BLK_DEV_INITRD)
317 /*
318 * If the init RAM disk has been configured and there is
319 * a non-zero starting address for it, set it up
320 */
321 if (naca.xRamDisk) {
322 initrd_start = (unsigned long)__va(naca.xRamDisk);
323 initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE;
324 initrd_below_start_ok = 1; // ramdisk in kernel space
325 ROOT_DEV = Root_RAM0;
326 if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize)
327 rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024;
328 } else
329#endif /* CONFIG_BLK_DEV_INITRD */
330 {
331 /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */
332 }
333
334 iSeries_recal_tb = get_tb();
335 iSeries_recal_titan = HvCallXm_loadTod();
336
337 /*
338 * Initialize the hash table management pointers
339 */
340 hpte_init_iSeries();
341
342 /*
343 * Initialize the DMA/TCE management
344 */
345 iommu_init_early_iSeries();
346
347 iSeries_get_cmdline();
348
349 /* Save unparsed command line copy for /proc/cmdline */
350 strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
351
352 /* Parse early parameters, in particular mem=x */
353 parse_early_param();
354
355 if (memory_limit) {
356 if (memory_limit < systemcfg->physicalMemorySize)
357 systemcfg->physicalMemorySize = memory_limit;
358 else {
359 printk("Ignoring mem=%lu >= ram_top.\n", memory_limit);
360 memory_limit = 0;
361 }
362 }
363
364 /* Initialize machine-dependency vectors */
365#ifdef CONFIG_SMP
366 smp_init_iSeries();
367#endif
368 if (itLpNaca.xPirEnvironMode == 0)
369 piranha_simulator = 1;
370
371 /* Associate Lp Event Queue 0 with processor 0 */
372 HvCallEvent_setLpEventQueueInterruptProc(0, 0);
373
374 mf_init();
375 mf_initialized = 1;
376 mb();
377
378 /* If we were passed an initrd, set the ROOT_DEV properly if the values
379 * look sensible. If not, clear initrd reference.
380 */
381#ifdef CONFIG_BLK_DEV_INITRD
382 if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE &&
383 initrd_end > initrd_start)
384 ROOT_DEV = Root_RAM0;
385 else
386 initrd_start = initrd_end = 0;
387#endif /* CONFIG_BLK_DEV_INITRD */
388
389 DBG(" <- iSeries_init_early()\n");
390}
391
392struct mschunks_map mschunks_map = {
393 /* XXX We don't use these, but Piranha might need them. */
394 .chunk_size = MSCHUNKS_CHUNK_SIZE,
395 .chunk_shift = MSCHUNKS_CHUNK_SHIFT,
396 .chunk_mask = MSCHUNKS_OFFSET_MASK,
397};
398EXPORT_SYMBOL(mschunks_map);
399
400void mschunks_alloc(unsigned long num_chunks)
401{
402 klimit = _ALIGN(klimit, sizeof(u32));
403 mschunks_map.mapping = (u32 *)klimit;
404 klimit += num_chunks * sizeof(u32);
405 mschunks_map.num_chunks = num_chunks;
406}
407
408/*
409 * The iSeries may have very large memories ( > 128 GB ) and a partition
410 * may get memory in "chunks" that may be anywhere in the 2**52 real
411 * address space. The chunks are 256K in size. To map this to the
412 * memory model Linux expects, the AS/400 specific code builds a
413 * translation table to translate what Linux thinks are "physical"
414 * addresses to the actual real addresses. This allows us to make
415 * it appear to Linux that we have contiguous memory starting at
416 * physical address zero while in fact this could be far from the truth.
417 * To avoid confusion, I'll let the words physical and/or real address
418 * apply to the Linux addresses while I'll use "absolute address" to
419 * refer to the actual hardware real address.
420 *
421 * build_iSeries_Memory_Map gets information from the Hypervisor and
422 * looks at the Main Store VPD to determine the absolute addresses
423 * of the memory that has been assigned to our partition and builds
424 * a table used to translate Linux's physical addresses to these
425 * absolute addresses. Absolute addresses are needed when
426 * communicating with the hypervisor (e.g. to build HPT entries)
427 */
428
429static void __init build_iSeries_Memory_Map(void)
430{
431 u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
432 u32 nextPhysChunk;
433 u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages;
434 u32 num_ptegs;
435 u32 totalChunks,moreChunks;
436 u32 currChunk, thisChunk, absChunk;
437 u32 currDword;
438 u32 chunkBit;
439 u64 map;
440 struct MemoryBlock mb[32];
441 unsigned long numMemoryBlocks, curBlock;
442
443 /* Chunk size on iSeries is 256K bytes */
444 totalChunks = (u32)HvLpConfig_getMsChunks();
445 mschunks_alloc(totalChunks);
446
447 /*
448 * Get absolute address of our load area
449 * and map it to physical address 0
450 * This guarantees that the loadarea ends up at physical 0
451 * otherwise, it might not be returned by PLIC as the first
452 * chunks
453 */
454
455 loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr);
456 loadAreaSize = itLpNaca.xLoadAreaChunks;
457
458 /*
459 * Only add the pages already mapped here.
460 * Otherwise we might add the hpt pages
461 * The rest of the pages of the load area
462 * aren't in the HPT yet and can still
463 * be assigned an arbitrary physical address
464 */
465 if ((loadAreaSize * 64) > HvPagesToMap)
466 loadAreaSize = HvPagesToMap / 64;
467
468 loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1;
469
470 /*
471 * TODO Do we need to do something if the HPT is in the 64MB load area?
472 * This would be required if the itLpNaca.xLoadAreaChunks includes
473 * the HPT size
474 */
475
476 printk("Mapping load area - physical addr = 0000000000000000\n"
477 " absolute addr = %016lx\n",
478 chunk_to_addr(loadAreaFirstChunk));
479 printk("Load area size %dK\n", loadAreaSize * 256);
480
481 for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk)
482 mschunks_map.mapping[nextPhysChunk] =
483 loadAreaFirstChunk + nextPhysChunk;
484
485 /*
486 * Get absolute address of our HPT and remember it so
487 * we won't map it to any physical address
488 */
489 hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress());
490 hptSizePages = (u32)HvCallHpt_getHptPages();
491 hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT);
492 hptLastChunk = hptFirstChunk + hptSizeChunks - 1;
493
494 printk("HPT absolute addr = %016lx, size = %dK\n",
495 chunk_to_addr(hptFirstChunk), hptSizeChunks * 256);
496
497 /* Fill in the hashed page table hash mask */
498 num_ptegs = hptSizePages *
499 (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP));
500 htab_hash_mask = num_ptegs - 1;
501
502 /*
503 * The actual hashed page table is in the hypervisor,
504 * we have no direct access
505 */
506 htab_address = NULL;
507
508 /*
509 * Determine if absolute memory has any
510 * holes so that we can interpret the
511 * access map we get back from the hypervisor
512 * correctly.
513 */
514 numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32);
515
516 /*
517 * Process the main store access map from the hypervisor
518 * to build up our physical -> absolute translation table
519 */
520 curBlock = 0;
521 currChunk = 0;
522 currDword = 0;
523 moreChunks = totalChunks;
524
525 while (moreChunks) {
526 map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex,
527 currDword);
528 thisChunk = currChunk;
529 while (map) {
530 chunkBit = map >> 63;
531 map <<= 1;
532 if (chunkBit) {
533 --moreChunks;
534 while (thisChunk >= mb[curBlock].logicalEnd) {
535 ++curBlock;
536 if (curBlock >= numMemoryBlocks)
537 panic("out of memory blocks");
538 }
539 if (thisChunk < mb[curBlock].logicalStart)
540 panic("memory block error");
541
542 absChunk = mb[curBlock].absStart +
543 (thisChunk - mb[curBlock].logicalStart);
544 if (((absChunk < hptFirstChunk) ||
545 (absChunk > hptLastChunk)) &&
546 ((absChunk < loadAreaFirstChunk) ||
547 (absChunk > loadAreaLastChunk))) {
548 mschunks_map.mapping[nextPhysChunk] =
549 absChunk;
550 ++nextPhysChunk;
551 }
552 }
553 ++thisChunk;
554 }
555 ++currDword;
556 currChunk += 64;
557 }
558
559 /*
560 * main store size (in chunks) is
561 * totalChunks - hptSizeChunks
562 * which should be equal to
563 * nextPhysChunk
564 */
565 systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
566}
567
568/*
569 * Document me.
570 */
571static void __init iSeries_setup_arch(void)
572{
573 unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index;
574
575 if (get_paca()->lppaca.shared_proc) {
576 ppc_md.idle_loop = iseries_shared_idle;
577 printk(KERN_INFO "Using shared processor idle loop\n");
578 } else {
579 ppc_md.idle_loop = iseries_dedicated_idle;
580 printk(KERN_INFO "Using dedicated idle loop\n");
581 }
582
583 /* Setup the Lp Event Queue */
584 setup_hvlpevent_queue();
585
586 printk("Max logical processors = %d\n",
587 itVpdAreas.xSlicMaxLogicalProcs);
588 printk("Max physical processors = %d\n",
589 itVpdAreas.xSlicMaxPhysicalProcs);
590
591 systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
592 printk("Processor version = %x\n", systemcfg->processor);
593}
594
595static void iSeries_get_cpuinfo(struct seq_file *m)
596{
597 seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n");
598}
599
600/*
601 * Document me.
602 * and Implement me.
603 */
604static int iSeries_get_irq(struct pt_regs *regs)
605{
606 /* -2 means ignore this interrupt */
607 return -2;
608}
609
610/*
611 * Document me.
612 */
613static void iSeries_restart(char *cmd)
614{
615 mf_reboot();
616}
617
618/*
619 * Document me.
620 */
621static void iSeries_power_off(void)
622{
623 mf_power_off();
624}
625
626/*
627 * Document me.
628 */
629static void iSeries_halt(void)
630{
631 mf_power_off();
632}
633
634static void __init iSeries_progress(char * st, unsigned short code)
635{
636 printk("Progress: [%04x] - %s\n", (unsigned)code, st);
637 if (!piranha_simulator && mf_initialized) {
638 if (code != 0xffff)
639 mf_display_progress(code);
640 else
641 mf_clear_src();
642 }
643}
644
645static void __init iSeries_fixup_klimit(void)
646{
647 /*
648 * Change klimit to take into account any ram disk
649 * that may be included
650 */
651 if (naca.xRamDisk)
652 klimit = KERNELBASE + (u64)naca.xRamDisk +
653 (naca.xRamDiskSize * PAGE_SIZE);
654 else {
655 /*
656 * No ram disk was included - check and see if there
657 * was an embedded system map. Change klimit to take
658 * into account any embedded system map
659 */
660 if (embedded_sysmap_end)
661 klimit = KERNELBASE + ((embedded_sysmap_end + 4095) &
662 0xfffffffffffff000);
663 }
664}
665
666static int __init iSeries_src_init(void)
667{
668 /* clear the progress line */
669 ppc_md.progress(" ", 0xffff);
670 return 0;
671}
672
673late_initcall(iSeries_src_init);
674
675static inline void process_iSeries_events(void)
676{
677 asm volatile ("li 0,0x5555; sc" : : : "r0", "r3");
678}
679
680static void yield_shared_processor(void)
681{
682 unsigned long tb;
683
684 HvCall_setEnabledInterrupts(HvCall_MaskIPI |
685 HvCall_MaskLpEvent |
686 HvCall_MaskLpProd |
687 HvCall_MaskTimeout);
688
689 tb = get_tb();
690 /* Compute future tb value when yield should expire */
691 HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy);
692
693 /*
694 * The decrementer stops during the yield. Force a fake decrementer
695 * here and let the timer_interrupt code sort out the actual time.
696 */
697 get_paca()->lppaca.int_dword.fields.decr_int = 1;
698 process_iSeries_events();
699}
700
701static int iseries_shared_idle(void)
702{
703 while (1) {
704 while (!need_resched() && !hvlpevent_is_pending()) {
705 local_irq_disable();
706 ppc64_runlatch_off();
707
708 /* Recheck with irqs off */
709 if (!need_resched() && !hvlpevent_is_pending())
710 yield_shared_processor();
711
712 HMT_medium();
713 local_irq_enable();
714 }
715
716 ppc64_runlatch_on();
717
718 if (hvlpevent_is_pending())
719 process_iSeries_events();
720
721 schedule();
722 }
723
724 return 0;
725}
726
727static int iseries_dedicated_idle(void)
728{
729 long oldval;
730
731 while (1) {
732 oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
733
734 if (!oldval) {
735 set_thread_flag(TIF_POLLING_NRFLAG);
736
737 while (!need_resched()) {
738 ppc64_runlatch_off();
739 HMT_low();
740
741 if (hvlpevent_is_pending()) {
742 HMT_medium();
743 ppc64_runlatch_on();
744 process_iSeries_events();
745 }
746 }
747
748 HMT_medium();
749 clear_thread_flag(TIF_POLLING_NRFLAG);
750 } else {
751 set_need_resched();
752 }
753
754 ppc64_runlatch_on();
755 schedule();
756 }
757
758 return 0;
759}
760
761#ifndef CONFIG_PCI
762void __init iSeries_init_IRQ(void) { }
763#endif
764
765static int __init iseries_probe(int platform)
766{
767 return PLATFORM_ISERIES_LPAR == platform;
768}
769
770struct machdep_calls __initdata iseries_md = {
771 .setup_arch = iSeries_setup_arch,
772 .get_cpuinfo = iSeries_get_cpuinfo,
773 .init_IRQ = iSeries_init_IRQ,
774 .get_irq = iSeries_get_irq,
775 .init_early = iSeries_init_early,
776 .pcibios_fixup = iSeries_pci_final_fixup,
777 .restart = iSeries_restart,
778 .power_off = iSeries_power_off,
779 .halt = iSeries_halt,
780 .get_boot_time = iSeries_get_boot_time,
781 .set_rtc_time = iSeries_set_rtc_time,
782 .get_rtc_time = iSeries_get_rtc_time,
783 .calibrate_decr = generic_calibrate_decr,
784 .progress = iSeries_progress,
785 .probe = iseries_probe,
786 /* XXX Implement enable_pmcs for iSeries */
787};
788
789struct blob {
790 unsigned char data[PAGE_SIZE];
791 unsigned long next;
792};
793
794struct iseries_flat_dt {
795 struct boot_param_header header;
796 u64 reserve_map[2];
797 struct blob dt;
798 struct blob strings;
799};
800
801struct iseries_flat_dt iseries_dt;
802
803void dt_init(struct iseries_flat_dt *dt)
804{
805 dt->header.off_mem_rsvmap =
806 offsetof(struct iseries_flat_dt, reserve_map);
807 dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt);
808 dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings);
809 dt->header.totalsize = sizeof(struct iseries_flat_dt);
810 dt->header.dt_strings_size = sizeof(struct blob);
811
812 /* There is no notion of hardware cpu id on iSeries */
813 dt->header.boot_cpuid_phys = smp_processor_id();
814
815 dt->dt.next = (unsigned long)&dt->dt.data;
816 dt->strings.next = (unsigned long)&dt->strings.data;
817
818 dt->header.magic = OF_DT_HEADER;
819 dt->header.version = 0x10;
820 dt->header.last_comp_version = 0x10;
821
822 dt->reserve_map[0] = 0;
823 dt->reserve_map[1] = 0;
824}
825
826void dt_check_blob(struct blob *b)
827{
828 if (b->next >= (unsigned long)&b->next) {
829 DBG("Ran out of space in flat device tree blob!\n");
830 BUG();
831 }
832}
833
834void dt_push_u32(struct iseries_flat_dt *dt, u32 value)
835{
836 *((u32*)dt->dt.next) = value;
837 dt->dt.next += sizeof(u32);
838
839 dt_check_blob(&dt->dt);
840}
841
842void dt_push_u64(struct iseries_flat_dt *dt, u64 value)
843{
844 *((u64*)dt->dt.next) = value;
845 dt->dt.next += sizeof(u64);
846
847 dt_check_blob(&dt->dt);
848}
849
850unsigned long dt_push_bytes(struct blob *blob, char *data, int len)
851{
852 unsigned long start = blob->next - (unsigned long)blob->data;
853
854 memcpy((char *)blob->next, data, len);
855 blob->next = _ALIGN(blob->next + len, 4);
856
857 dt_check_blob(blob);
858
859 return start;
860}
861
862void dt_start_node(struct iseries_flat_dt *dt, char *name)
863{
864 dt_push_u32(dt, OF_DT_BEGIN_NODE);
865 dt_push_bytes(&dt->dt, name, strlen(name) + 1);
866}
867
868#define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE)
869
870void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len)
871{
872 unsigned long offset;
873
874 dt_push_u32(dt, OF_DT_PROP);
875
876 /* Length of the data */
877 dt_push_u32(dt, len);
878
879 /* Put the property name in the string blob. */
880 offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1);
881
882 /* The offset of the properties name in the string blob. */
883 dt_push_u32(dt, (u32)offset);
884
885 /* The actual data. */
886 dt_push_bytes(&dt->dt, data, len);
887}
888
889void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data)
890{
891 dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */
892}
893
894void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data)
895{
896 dt_prop(dt, name, (char *)&data, sizeof(u32));
897}
898
899void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data)
900{
901 dt_prop(dt, name, (char *)&data, sizeof(u64));
902}
903
904void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n)
905{
906 dt_prop(dt, name, (char *)data, sizeof(u64) * n);
907}
908
909void dt_prop_empty(struct iseries_flat_dt *dt, char *name)
910{
911 dt_prop(dt, name, NULL, 0);
912}
913
914void dt_cpus(struct iseries_flat_dt *dt)
915{
916 unsigned char buf[32];
917 unsigned char *p;
918 unsigned int i, index;
919 struct IoHriProcessorVpd *d;
920
921 /* yuck */
922 snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name);
923 p = strchr(buf, ' ');
924 if (!p) p = buf + strlen(buf);
925
926 dt_start_node(dt, "cpus");
927 dt_prop_u32(dt, "#address-cells", 1);
928 dt_prop_u32(dt, "#size-cells", 0);
929
930 for (i = 0; i < NR_CPUS; i++) {
931 if (paca[i].lppaca.dyn_proc_status >= 2)
932 continue;
933
934 snprintf(p, 32 - (p - buf), "@%d", i);
935 dt_start_node(dt, buf);
936
937 dt_prop_str(dt, "device_type", "cpu");
938
939 index = paca[i].lppaca.dyn_hv_phys_proc_index;
940 d = &xIoHriProcessorVpd[index];
941
942 dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024);
943 dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize);
944
945 dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024);
946 dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize);
947
948 /* magic conversions to Hz copied from old code */
949 dt_prop_u32(dt, "clock-frequency",
950 ((1UL << 34) * 1000000) / d->xProcFreq);
951 dt_prop_u32(dt, "timebase-frequency",
952 ((1UL << 32) * 1000000) / d->xTimeBaseFreq);
953
954 dt_prop_u32(dt, "reg", i);
955
956 dt_end_node(dt);
957 }
958
959 dt_end_node(dt);
960}
961
962void build_flat_dt(struct iseries_flat_dt *dt)
963{
964 u64 tmp[2];
965
966 dt_init(dt);
967
968 dt_start_node(dt, "");
969
970 dt_prop_u32(dt, "#address-cells", 2);
971 dt_prop_u32(dt, "#size-cells", 2);
972
973 /* /memory */
974 dt_start_node(dt, "memory@0");
975 dt_prop_str(dt, "name", "memory");
976 dt_prop_str(dt, "device_type", "memory");
977 tmp[0] = 0;
978 tmp[1] = systemcfg->physicalMemorySize;
979 dt_prop_u64_list(dt, "reg", tmp, 2);
980 dt_end_node(dt);
981
982 /* /chosen */
983 dt_start_node(dt, "chosen");
984 dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR);
985 dt_end_node(dt);
986
987 dt_cpus(dt);
988
989 dt_end_node(dt);
990
991 dt_push_u32(dt, OF_DT_END);
992}
993
994void * __init iSeries_early_setup(void)
995{
996 iSeries_fixup_klimit();
997
998 /*
999 * Initialize the table which translate Linux physical addresses to
1000 * AS/400 absolute addresses
1001 */
1002 build_iSeries_Memory_Map();
1003
1004 build_flat_dt(&iseries_dt);
1005
1006 return (void *) __pa(&iseries_dt);
1007}
diff --git a/arch/ppc64/kernel/iSeries_setup.h b/arch/ppc64/kernel/iSeries_setup.h
deleted file mode 100644
index c6eb29a245ac..000000000000
--- a/arch/ppc64/kernel/iSeries_setup.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com>
3 * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu>
4 *
5 * Module name: as400_setup.h
6 *
7 * Description:
8 * Architecture- / platform-specific boot-time initialization code for
9 * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and
10 * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek
11 * <dan@netx4.com>.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
17 */
18
19#ifndef __ISERIES_SETUP_H__
20#define __ISERIES_SETUP_H__
21
22extern void iSeries_get_boot_time(struct rtc_time *tm);
23extern int iSeries_set_rtc_time(struct rtc_time *tm);
24extern void iSeries_get_rtc_time(struct rtc_time *tm);
25
26#endif /* __ISERIES_SETUP_H__ */
diff --git a/arch/ppc64/kernel/iSeries_smp.c b/arch/ppc64/kernel/iSeries_smp.c
deleted file mode 100644
index f982e5b805f4..000000000000
--- a/arch/ppc64/kernel/iSeries_smp.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * SMP support for iSeries machines.
3 *
4 * Dave Engebretsen, Peter Bergner, and
5 * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com
6 *
7 * Plus various changes from other IBM teams...
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version
12 * 2 of the License, or (at your option) any later version.
13 */
14
15#undef DEBUG
16
17#include <linux/config.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/sched.h>
21#include <linux/smp.h>
22#include <linux/smp_lock.h>
23#include <linux/interrupt.h>
24#include <linux/kernel_stat.h>
25#include <linux/delay.h>
26#include <linux/init.h>
27#include <linux/spinlock.h>
28#include <linux/cache.h>
29#include <linux/err.h>
30#include <linux/sysdev.h>
31#include <linux/cpu.h>
32
33#include <asm/ptrace.h>
34#include <asm/atomic.h>
35#include <asm/irq.h>
36#include <asm/page.h>
37#include <asm/pgtable.h>
38#include <asm/io.h>
39#include <asm/smp.h>
40#include <asm/paca.h>
41#include <asm/iSeries/HvCall.h>
42#include <asm/time.h>
43#include <asm/ppcdebug.h>
44#include <asm/machdep.h>
45#include <asm/cputable.h>
46#include <asm/system.h>
47
48static unsigned long iSeries_smp_message[NR_CPUS];
49
50void iSeries_smp_message_recv( struct pt_regs * regs )
51{
52 int cpu = smp_processor_id();
53 int msg;
54
55 if ( num_online_cpus() < 2 )
56 return;
57
58 for ( msg = 0; msg < 4; ++msg )
59 if ( test_and_clear_bit( msg, &iSeries_smp_message[cpu] ) )
60 smp_message_recv( msg, regs );
61}
62
63static inline void smp_iSeries_do_message(int cpu, int msg)
64{
65 set_bit(msg, &iSeries_smp_message[cpu]);
66 HvCall_sendIPI(&(paca[cpu]));
67}
68
69static void smp_iSeries_message_pass(int target, int msg)
70{
71 int i;
72
73 if (target < NR_CPUS)
74 smp_iSeries_do_message(target, msg);
75 else {
76 for_each_online_cpu(i) {
77 if (target == MSG_ALL_BUT_SELF
78 && i == smp_processor_id())
79 continue;
80 smp_iSeries_do_message(i, msg);
81 }
82 }
83}
84
85static int smp_iSeries_probe(void)
86{
87 return cpus_weight(cpu_possible_map);
88}
89
90static void smp_iSeries_kick_cpu(int nr)
91{
92 BUG_ON(nr < 0 || nr >= NR_CPUS);
93
94 /* Verify that our partition has a processor nr */
95 if (paca[nr].lppaca.dyn_proc_status >= 2)
96 return;
97
98 /* The processor is currently spinning, waiting
99 * for the cpu_start field to become non-zero
100 * After we set cpu_start, the processor will
101 * continue on to secondary_start in iSeries_head.S
102 */
103 paca[nr].cpu_start = 1;
104}
105
106static void __devinit smp_iSeries_setup_cpu(int nr)
107{
108}
109
110static struct smp_ops_t iSeries_smp_ops = {
111 .message_pass = smp_iSeries_message_pass,
112 .probe = smp_iSeries_probe,
113 .kick_cpu = smp_iSeries_kick_cpu,
114 .setup_cpu = smp_iSeries_setup_cpu,
115};
116
117/* This is called very early. */
118void __init smp_init_iSeries(void)
119{
120 smp_ops = &iSeries_smp_ops;
121}
diff --git a/arch/ppc64/kernel/iSeries_vio.c b/arch/ppc64/kernel/iSeries_vio.c
deleted file mode 100644
index c0f7d2e9153f..000000000000
--- a/arch/ppc64/kernel/iSeries_vio.c
+++ /dev/null
@@ -1,156 +0,0 @@
1/*
2 * IBM PowerPC iSeries Virtual I/O Infrastructure Support.
3 *
4 * Copyright (c) 2005 Stephen Rothwell, IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/types.h>
12#include <linux/device.h>
13#include <linux/init.h>
14
15#include <asm/vio.h>
16#include <asm/iommu.h>
17#include <asm/tce.h>
18#include <asm/abs_addr.h>
19#include <asm/page.h>
20#include <asm/iSeries/vio.h>
21#include <asm/iSeries/HvTypes.h>
22#include <asm/iSeries/HvLpConfig.h>
23#include <asm/iSeries/HvCallXm.h>
24
25struct device *iSeries_vio_dev = &vio_bus_device.dev;
26EXPORT_SYMBOL(iSeries_vio_dev);
27
28static struct iommu_table veth_iommu_table;
29static struct iommu_table vio_iommu_table;
30
31static void __init iommu_vio_init(void)
32{
33 struct iommu_table *t;
34 struct iommu_table_cb cb;
35 unsigned long cbp;
36 unsigned long itc_entries;
37
38 cb.itc_busno = 255; /* Bus 255 is the virtual bus */
39 cb.itc_virtbus = 0xff; /* Ask for virtual bus */
40
41 cbp = virt_to_abs(&cb);
42 HvCallXm_getTceTableParms(cbp);
43
44 itc_entries = cb.itc_size * PAGE_SIZE / sizeof(union tce_entry);
45 veth_iommu_table.it_size = itc_entries / 2;
46 veth_iommu_table.it_busno = cb.itc_busno;
47 veth_iommu_table.it_offset = cb.itc_offset;
48 veth_iommu_table.it_index = cb.itc_index;
49 veth_iommu_table.it_type = TCE_VB;
50 veth_iommu_table.it_blocksize = 1;
51
52 t = iommu_init_table(&veth_iommu_table);
53
54 if (!t)
55 printk("Virtual Bus VETH TCE table failed.\n");
56
57 vio_iommu_table.it_size = itc_entries - veth_iommu_table.it_size;
58 vio_iommu_table.it_busno = cb.itc_busno;
59 vio_iommu_table.it_offset = cb.itc_offset +
60 veth_iommu_table.it_size;
61 vio_iommu_table.it_index = cb.itc_index;
62 vio_iommu_table.it_type = TCE_VB;
63 vio_iommu_table.it_blocksize = 1;
64
65 t = iommu_init_table(&vio_iommu_table);
66
67 if (!t)
68 printk("Virtual Bus VIO TCE table failed.\n");
69}
70
71/**
72 * vio_register_device_iseries: - Register a new iSeries vio device.
73 * @voidev: The device to register.
74 */
75static struct vio_dev *__init vio_register_device_iseries(char *type,
76 uint32_t unit_num)
77{
78 struct vio_dev *viodev;
79
80 /* allocate a vio_dev for this device */
81 viodev = kmalloc(sizeof(struct vio_dev), GFP_KERNEL);
82 if (!viodev)
83 return NULL;
84 memset(viodev, 0, sizeof(struct vio_dev));
85
86 snprintf(viodev->dev.bus_id, BUS_ID_SIZE, "%s%d", type, unit_num);
87
88 viodev->name = viodev->dev.bus_id;
89 viodev->type = type;
90 viodev->unit_address = unit_num;
91 viodev->iommu_table = &vio_iommu_table;
92 if (vio_register_device(viodev) == NULL) {
93 kfree(viodev);
94 return NULL;
95 }
96 return viodev;
97}
98
99void __init probe_bus_iseries(void)
100{
101 HvLpIndexMap vlan_map;
102 struct vio_dev *viodev;
103 int i;
104
105 /* there is only one of each of these */
106 vio_register_device_iseries("viocons", 0);
107 vio_register_device_iseries("vscsi", 0);
108
109 vlan_map = HvLpConfig_getVirtualLanIndexMap();
110 for (i = 0; i < HVMAXARCHITECTEDVIRTUALLANS; i++) {
111 if ((vlan_map & (0x8000 >> i)) == 0)
112 continue;
113 viodev = vio_register_device_iseries("vlan", i);
114 /* veth is special and has it own iommu_table */
115 viodev->iommu_table = &veth_iommu_table;
116 }
117 for (i = 0; i < HVMAXARCHITECTEDVIRTUALDISKS; i++)
118 vio_register_device_iseries("viodasd", i);
119 for (i = 0; i < HVMAXARCHITECTEDVIRTUALCDROMS; i++)
120 vio_register_device_iseries("viocd", i);
121 for (i = 0; i < HVMAXARCHITECTEDVIRTUALTAPES; i++)
122 vio_register_device_iseries("viotape", i);
123}
124
125/**
126 * vio_match_device_iseries: - Tell if a iSeries VIO device matches a
127 * vio_device_id
128 */
129static int vio_match_device_iseries(const struct vio_device_id *id,
130 const struct vio_dev *dev)
131{
132 return strncmp(dev->type, id->type, strlen(id->type)) == 0;
133}
134
135static struct vio_bus_ops vio_bus_ops_iseries = {
136 .match = vio_match_device_iseries,
137};
138
139/**
140 * vio_bus_init_iseries: - Initialize the iSeries virtual IO bus
141 */
142static int __init vio_bus_init_iseries(void)
143{
144 int err;
145
146 err = vio_bus_init(&vio_bus_ops_iseries);
147 if (err == 0) {
148 iommu_vio_init();
149 vio_bus_device.iommu_table = &vio_iommu_table;
150 iSeries_vio_dev = &vio_bus_device.dev;
151 probe_bus_iseries();
152 }
153 return err;
154}
155
156__initcall(vio_bus_init_iseries);
diff --git a/arch/ppc64/kernel/maple_pci.c b/arch/ppc64/kernel/maple_pci.c
index 1d297e0edfc0..0937649f4961 100644
--- a/arch/ppc64/kernel/maple_pci.c
+++ b/arch/ppc64/kernel/maple_pci.c
@@ -23,8 +23,7 @@
23#include <asm/pci-bridge.h> 23#include <asm/pci-bridge.h>
24#include <asm/machdep.h> 24#include <asm/machdep.h>
25#include <asm/iommu.h> 25#include <asm/iommu.h>
26 26#include <asm/ppc-pci.h>
27#include "pci.h"
28 27
29#ifdef DEBUG 28#ifdef DEBUG
30#define DBG(x...) printk(x) 29#define DBG(x...) printk(x)
diff --git a/arch/ppc64/kernel/mf.c b/arch/ppc64/kernel/mf.c
deleted file mode 100644
index ef4a338ebd01..000000000000
--- a/arch/ppc64/kernel/mf.c
+++ /dev/null
@@ -1,1281 +0,0 @@
1/*
2 * mf.c
3 * Copyright (C) 2001 Troy D. Armstrong IBM Corporation
4 * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation
5 *
6 * This modules exists as an interface between a Linux secondary partition
7 * running on an iSeries and the primary partition's Virtual Service
8 * Processor (VSP) object. The VSP has final authority over powering on/off
9 * all partitions in the iSeries. It also provides miscellaneous low-level
10 * machine facility type operations.
11 *
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28#include <linux/types.h>
29#include <linux/errno.h>
30#include <linux/kernel.h>
31#include <linux/init.h>
32#include <linux/completion.h>
33#include <linux/delay.h>
34#include <linux/dma-mapping.h>
35#include <linux/bcd.h>
36
37#include <asm/time.h>
38#include <asm/uaccess.h>
39#include <asm/paca.h>
40#include <asm/iSeries/vio.h>
41#include <asm/iSeries/mf.h>
42#include <asm/iSeries/HvLpConfig.h>
43#include <asm/iSeries/ItLpQueue.h>
44
45/*
46 * This is the structure layout for the Machine Facilites LPAR event
47 * flows.
48 */
49struct vsp_cmd_data {
50 u64 token;
51 u16 cmd;
52 HvLpIndex lp_index;
53 u8 result_code;
54 u32 reserved;
55 union {
56 u64 state; /* GetStateOut */
57 u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */
58 u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */
59 u64 page[4]; /* GetSrcHistoryIn */
60 u64 flag; /* GetAutoIplWhenPrimaryIplsOut,
61 SetAutoIplWhenPrimaryIplsIn,
62 WhiteButtonPowerOffIn,
63 Function08FastPowerOffIn,
64 IsSpcnRackPowerIncompleteOut */
65 struct {
66 u64 token;
67 u64 address_type;
68 u64 side;
69 u32 length;
70 u32 offset;
71 } kern; /* SetKernelImageIn, GetKernelImageIn,
72 SetKernelCmdLineIn, GetKernelCmdLineIn */
73 u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */
74 u8 reserved[80];
75 } sub_data;
76};
77
78struct vsp_rsp_data {
79 struct completion com;
80 struct vsp_cmd_data *response;
81};
82
83struct alloc_data {
84 u16 size;
85 u16 type;
86 u32 count;
87 u16 reserved1;
88 u8 reserved2;
89 HvLpIndex target_lp;
90};
91
92struct ce_msg_data;
93
94typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp);
95
96struct ce_msg_comp_data {
97 ce_msg_comp_hdlr handler;
98 void *token;
99};
100
101struct ce_msg_data {
102 u8 ce_msg[12];
103 char reserved[4];
104 struct ce_msg_comp_data *completion;
105};
106
107struct io_mf_lp_event {
108 struct HvLpEvent hp_lp_event;
109 u16 subtype_result_code;
110 u16 reserved1;
111 u32 reserved2;
112 union {
113 struct alloc_data alloc;
114 struct ce_msg_data ce_msg;
115 struct vsp_cmd_data vsp_cmd;
116 } data;
117};
118
119#define subtype_data(a, b, c, d) \
120 (((a) << 24) + ((b) << 16) + ((c) << 8) + (d))
121
122/*
123 * All outgoing event traffic is kept on a FIFO queue. The first
124 * pointer points to the one that is outstanding, and all new
125 * requests get stuck on the end. Also, we keep a certain number of
126 * preallocated pending events so that we can operate very early in
127 * the boot up sequence (before kmalloc is ready).
128 */
129struct pending_event {
130 struct pending_event *next;
131 struct io_mf_lp_event event;
132 MFCompleteHandler hdlr;
133 char dma_data[72];
134 unsigned dma_data_length;
135 unsigned remote_address;
136};
137static spinlock_t pending_event_spinlock;
138static struct pending_event *pending_event_head;
139static struct pending_event *pending_event_tail;
140static struct pending_event *pending_event_avail;
141static struct pending_event pending_event_prealloc[16];
142
143/*
144 * Put a pending event onto the available queue, so it can get reused.
145 * Attention! You must have the pending_event_spinlock before calling!
146 */
147static void free_pending_event(struct pending_event *ev)
148{
149 if (ev != NULL) {
150 ev->next = pending_event_avail;
151 pending_event_avail = ev;
152 }
153}
154
155/*
156 * Enqueue the outbound event onto the stack. If the queue was
157 * empty to begin with, we must also issue it via the Hypervisor
158 * interface. There is a section of code below that will touch
159 * the first stack pointer without the protection of the pending_event_spinlock.
160 * This is OK, because we know that nobody else will be modifying
161 * the first pointer when we do this.
162 */
163static int signal_event(struct pending_event *ev)
164{
165 int rc = 0;
166 unsigned long flags;
167 int go = 1;
168 struct pending_event *ev1;
169 HvLpEvent_Rc hv_rc;
170
171 /* enqueue the event */
172 if (ev != NULL) {
173 ev->next = NULL;
174 spin_lock_irqsave(&pending_event_spinlock, flags);
175 if (pending_event_head == NULL)
176 pending_event_head = ev;
177 else {
178 go = 0;
179 pending_event_tail->next = ev;
180 }
181 pending_event_tail = ev;
182 spin_unlock_irqrestore(&pending_event_spinlock, flags);
183 }
184
185 /* send the event */
186 while (go) {
187 go = 0;
188
189 /* any DMA data to send beforehand? */
190 if (pending_event_head->dma_data_length > 0)
191 HvCallEvent_dmaToSp(pending_event_head->dma_data,
192 pending_event_head->remote_address,
193 pending_event_head->dma_data_length,
194 HvLpDma_Direction_LocalToRemote);
195
196 hv_rc = HvCallEvent_signalLpEvent(
197 &pending_event_head->event.hp_lp_event);
198 if (hv_rc != HvLpEvent_Rc_Good) {
199 printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() "
200 "failed with %d\n", (int)hv_rc);
201
202 spin_lock_irqsave(&pending_event_spinlock, flags);
203 ev1 = pending_event_head;
204 pending_event_head = pending_event_head->next;
205 if (pending_event_head != NULL)
206 go = 1;
207 spin_unlock_irqrestore(&pending_event_spinlock, flags);
208
209 if (ev1 == ev)
210 rc = -EIO;
211 else if (ev1->hdlr != NULL)
212 (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO);
213
214 spin_lock_irqsave(&pending_event_spinlock, flags);
215 free_pending_event(ev1);
216 spin_unlock_irqrestore(&pending_event_spinlock, flags);
217 }
218 }
219
220 return rc;
221}
222
223/*
224 * Allocate a new pending_event structure, and initialize it.
225 */
226static struct pending_event *new_pending_event(void)
227{
228 struct pending_event *ev = NULL;
229 HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex();
230 unsigned long flags;
231 struct HvLpEvent *hev;
232
233 spin_lock_irqsave(&pending_event_spinlock, flags);
234 if (pending_event_avail != NULL) {
235 ev = pending_event_avail;
236 pending_event_avail = pending_event_avail->next;
237 }
238 spin_unlock_irqrestore(&pending_event_spinlock, flags);
239 if (ev == NULL) {
240 ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC);
241 if (ev == NULL) {
242 printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n",
243 sizeof(struct pending_event));
244 return NULL;
245 }
246 }
247 memset(ev, 0, sizeof(struct pending_event));
248 hev = &ev->event.hp_lp_event;
249 hev->xFlags.xValid = 1;
250 hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck;
251 hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck;
252 hev->xFlags.xFunction = HvLpEvent_Function_Int;
253 hev->xType = HvLpEvent_Type_MachineFac;
254 hev->xSourceLp = HvLpConfig_getLpIndex();
255 hev->xTargetLp = primary_lp;
256 hev->xSizeMinus1 = sizeof(ev->event) - 1;
257 hev->xRc = HvLpEvent_Rc_Good;
258 hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp,
259 HvLpEvent_Type_MachineFac);
260 hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp,
261 HvLpEvent_Type_MachineFac);
262
263 return ev;
264}
265
266static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd)
267{
268 struct pending_event *ev = new_pending_event();
269 int rc;
270 struct vsp_rsp_data response;
271
272 if (ev == NULL)
273 return -ENOMEM;
274
275 init_completion(&response.com);
276 response.response = vsp_cmd;
277 ev->event.hp_lp_event.xSubtype = 6;
278 ev->event.hp_lp_event.x.xSubtypeData =
279 subtype_data('M', 'F', 'V', 'I');
280 ev->event.data.vsp_cmd.token = (u64)&response;
281 ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd;
282 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
283 ev->event.data.vsp_cmd.result_code = 0xFF;
284 ev->event.data.vsp_cmd.reserved = 0;
285 memcpy(&(ev->event.data.vsp_cmd.sub_data),
286 &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data));
287 mb();
288
289 rc = signal_event(ev);
290 if (rc == 0)
291 wait_for_completion(&response.com);
292 return rc;
293}
294
295
296/*
297 * Send a 12-byte CE message to the primary partition VSP object
298 */
299static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion)
300{
301 struct pending_event *ev = new_pending_event();
302
303 if (ev == NULL)
304 return -ENOMEM;
305
306 ev->event.hp_lp_event.xSubtype = 0;
307 ev->event.hp_lp_event.x.xSubtypeData =
308 subtype_data('M', 'F', 'C', 'E');
309 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
310 ev->event.data.ce_msg.completion = completion;
311 return signal_event(ev);
312}
313
314/*
315 * Send a 12-byte CE message (with no data) to the primary partition VSP object
316 */
317static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion)
318{
319 u8 ce_msg[12];
320
321 memset(ce_msg, 0, sizeof(ce_msg));
322 ce_msg[3] = ce_op;
323 return signal_ce_msg(ce_msg, completion);
324}
325
326/*
327 * Send a 12-byte CE message and DMA data to the primary partition VSP object
328 */
329static int dma_and_signal_ce_msg(char *ce_msg,
330 struct ce_msg_comp_data *completion, void *dma_data,
331 unsigned dma_data_length, unsigned remote_address)
332{
333 struct pending_event *ev = new_pending_event();
334
335 if (ev == NULL)
336 return -ENOMEM;
337
338 ev->event.hp_lp_event.xSubtype = 0;
339 ev->event.hp_lp_event.x.xSubtypeData =
340 subtype_data('M', 'F', 'C', 'E');
341 memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12);
342 ev->event.data.ce_msg.completion = completion;
343 memcpy(ev->dma_data, dma_data, dma_data_length);
344 ev->dma_data_length = dma_data_length;
345 ev->remote_address = remote_address;
346 return signal_event(ev);
347}
348
349/*
350 * Initiate a nice (hopefully) shutdown of Linux. We simply are
351 * going to try and send the init process a SIGINT signal. If
352 * this fails (why?), we'll simply force it off in a not-so-nice
353 * manner.
354 */
355static int shutdown(void)
356{
357 int rc = kill_proc(1, SIGINT, 1);
358
359 if (rc) {
360 printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), "
361 "hard shutdown commencing\n", rc);
362 mf_power_off();
363 } else
364 printk(KERN_INFO "mf.c: init has been successfully notified "
365 "to proceed with shutdown\n");
366 return rc;
367}
368
369/*
370 * The primary partition VSP object is sending us a new
371 * event flow. Handle it...
372 */
373static void handle_int(struct io_mf_lp_event *event)
374{
375 struct ce_msg_data *ce_msg_data;
376 struct ce_msg_data *pce_msg_data;
377 unsigned long flags;
378 struct pending_event *pev;
379
380 /* ack the interrupt */
381 event->hp_lp_event.xRc = HvLpEvent_Rc_Good;
382 HvCallEvent_ackLpEvent(&event->hp_lp_event);
383
384 /* process interrupt */
385 switch (event->hp_lp_event.xSubtype) {
386 case 0: /* CE message */
387 ce_msg_data = &event->data.ce_msg;
388 switch (ce_msg_data->ce_msg[3]) {
389 case 0x5B: /* power control notification */
390 if ((ce_msg_data->ce_msg[5] & 0x20) != 0) {
391 printk(KERN_INFO "mf.c: Commencing partition shutdown\n");
392 if (shutdown() == 0)
393 signal_ce_msg_simple(0xDB, NULL);
394 }
395 break;
396 case 0xC0: /* get time */
397 spin_lock_irqsave(&pending_event_spinlock, flags);
398 pev = pending_event_head;
399 if (pev != NULL)
400 pending_event_head = pending_event_head->next;
401 spin_unlock_irqrestore(&pending_event_spinlock, flags);
402 if (pev == NULL)
403 break;
404 pce_msg_data = &pev->event.data.ce_msg;
405 if (pce_msg_data->ce_msg[3] != 0x40)
406 break;
407 if (pce_msg_data->completion != NULL) {
408 ce_msg_comp_hdlr handler =
409 pce_msg_data->completion->handler;
410 void *token = pce_msg_data->completion->token;
411
412 if (handler != NULL)
413 (*handler)(token, ce_msg_data);
414 }
415 spin_lock_irqsave(&pending_event_spinlock, flags);
416 free_pending_event(pev);
417 spin_unlock_irqrestore(&pending_event_spinlock, flags);
418 /* send next waiting event */
419 if (pending_event_head != NULL)
420 signal_event(NULL);
421 break;
422 }
423 break;
424 case 1: /* IT sys shutdown */
425 printk(KERN_INFO "mf.c: Commencing system shutdown\n");
426 shutdown();
427 break;
428 }
429}
430
431/*
432 * The primary partition VSP object is acknowledging the receipt
433 * of a flow we sent to them. If there are other flows queued
434 * up, we must send another one now...
435 */
436static void handle_ack(struct io_mf_lp_event *event)
437{
438 unsigned long flags;
439 struct pending_event *two = NULL;
440 unsigned long free_it = 0;
441 struct ce_msg_data *ce_msg_data;
442 struct ce_msg_data *pce_msg_data;
443 struct vsp_rsp_data *rsp;
444
445 /* handle current event */
446 if (pending_event_head == NULL) {
447 printk(KERN_ERR "mf.c: stack empty for receiving ack\n");
448 return;
449 }
450
451 switch (event->hp_lp_event.xSubtype) {
452 case 0: /* CE msg */
453 ce_msg_data = &event->data.ce_msg;
454 if (ce_msg_data->ce_msg[3] != 0x40) {
455 free_it = 1;
456 break;
457 }
458 if (ce_msg_data->ce_msg[2] == 0)
459 break;
460 free_it = 1;
461 pce_msg_data = &pending_event_head->event.data.ce_msg;
462 if (pce_msg_data->completion != NULL) {
463 ce_msg_comp_hdlr handler =
464 pce_msg_data->completion->handler;
465 void *token = pce_msg_data->completion->token;
466
467 if (handler != NULL)
468 (*handler)(token, ce_msg_data);
469 }
470 break;
471 case 4: /* allocate */
472 case 5: /* deallocate */
473 if (pending_event_head->hdlr != NULL)
474 (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count);
475 free_it = 1;
476 break;
477 case 6:
478 free_it = 1;
479 rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token;
480 if (rsp == NULL) {
481 printk(KERN_ERR "mf.c: no rsp\n");
482 break;
483 }
484 if (rsp->response != NULL)
485 memcpy(rsp->response, &event->data.vsp_cmd,
486 sizeof(event->data.vsp_cmd));
487 complete(&rsp->com);
488 break;
489 }
490
491 /* remove from queue */
492 spin_lock_irqsave(&pending_event_spinlock, flags);
493 if ((pending_event_head != NULL) && (free_it == 1)) {
494 struct pending_event *oldHead = pending_event_head;
495
496 pending_event_head = pending_event_head->next;
497 two = pending_event_head;
498 free_pending_event(oldHead);
499 }
500 spin_unlock_irqrestore(&pending_event_spinlock, flags);
501
502 /* send next waiting event */
503 if (two != NULL)
504 signal_event(NULL);
505}
506
507/*
508 * This is the generic event handler we are registering with
509 * the Hypervisor. Ensure the flows are for us, and then
510 * parse it enough to know if it is an interrupt or an
511 * acknowledge.
512 */
513static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs)
514{
515 if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) {
516 switch(event->xFlags.xFunction) {
517 case HvLpEvent_Function_Ack:
518 handle_ack((struct io_mf_lp_event *)event);
519 break;
520 case HvLpEvent_Function_Int:
521 handle_int((struct io_mf_lp_event *)event);
522 break;
523 default:
524 printk(KERN_ERR "mf.c: non ack/int event received\n");
525 break;
526 }
527 } else
528 printk(KERN_ERR "mf.c: alien event received\n");
529}
530
531/*
532 * Global kernel interface to allocate and seed events into the
533 * Hypervisor.
534 */
535void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
536 unsigned size, unsigned count, MFCompleteHandler hdlr,
537 void *user_token)
538{
539 struct pending_event *ev = new_pending_event();
540 int rc;
541
542 if (ev == NULL) {
543 rc = -ENOMEM;
544 } else {
545 ev->event.hp_lp_event.xSubtype = 4;
546 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
547 ev->event.hp_lp_event.x.xSubtypeData =
548 subtype_data('M', 'F', 'M', 'A');
549 ev->event.data.alloc.target_lp = target_lp;
550 ev->event.data.alloc.type = type;
551 ev->event.data.alloc.size = size;
552 ev->event.data.alloc.count = count;
553 ev->hdlr = hdlr;
554 rc = signal_event(ev);
555 }
556 if ((rc != 0) && (hdlr != NULL))
557 (*hdlr)(user_token, rc);
558}
559EXPORT_SYMBOL(mf_allocate_lp_events);
560
561/*
562 * Global kernel interface to unseed and deallocate events already in
563 * Hypervisor.
564 */
565void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type,
566 unsigned count, MFCompleteHandler hdlr, void *user_token)
567{
568 struct pending_event *ev = new_pending_event();
569 int rc;
570
571 if (ev == NULL)
572 rc = -ENOMEM;
573 else {
574 ev->event.hp_lp_event.xSubtype = 5;
575 ev->event.hp_lp_event.xCorrelationToken = (u64)user_token;
576 ev->event.hp_lp_event.x.xSubtypeData =
577 subtype_data('M', 'F', 'M', 'D');
578 ev->event.data.alloc.target_lp = target_lp;
579 ev->event.data.alloc.type = type;
580 ev->event.data.alloc.count = count;
581 ev->hdlr = hdlr;
582 rc = signal_event(ev);
583 }
584 if ((rc != 0) && (hdlr != NULL))
585 (*hdlr)(user_token, rc);
586}
587EXPORT_SYMBOL(mf_deallocate_lp_events);
588
589/*
590 * Global kernel interface to tell the VSP object in the primary
591 * partition to power this partition off.
592 */
593void mf_power_off(void)
594{
595 printk(KERN_INFO "mf.c: Down it goes...\n");
596 signal_ce_msg_simple(0x4d, NULL);
597 for (;;)
598 ;
599}
600
601/*
602 * Global kernel interface to tell the VSP object in the primary
603 * partition to reboot this partition.
604 */
605void mf_reboot(void)
606{
607 printk(KERN_INFO "mf.c: Preparing to bounce...\n");
608 signal_ce_msg_simple(0x4e, NULL);
609 for (;;)
610 ;
611}
612
613/*
614 * Display a single word SRC onto the VSP control panel.
615 */
616void mf_display_src(u32 word)
617{
618 u8 ce[12];
619
620 memset(ce, 0, sizeof(ce));
621 ce[3] = 0x4a;
622 ce[7] = 0x01;
623 ce[8] = word >> 24;
624 ce[9] = word >> 16;
625 ce[10] = word >> 8;
626 ce[11] = word;
627 signal_ce_msg(ce, NULL);
628}
629
630/*
631 * Display a single word SRC of the form "PROGXXXX" on the VSP control panel.
632 */
633void mf_display_progress(u16 value)
634{
635 u8 ce[12];
636 u8 src[72];
637
638 memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12);
639 memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00"
640 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
641 "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
642 "\x00\x00\x00\x00PROGxxxx ",
643 72);
644 src[6] = value >> 8;
645 src[7] = value & 255;
646 src[44] = "0123456789ABCDEF"[(value >> 12) & 15];
647 src[45] = "0123456789ABCDEF"[(value >> 8) & 15];
648 src[46] = "0123456789ABCDEF"[(value >> 4) & 15];
649 src[47] = "0123456789ABCDEF"[value & 15];
650 dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024);
651}
652
653/*
654 * Clear the VSP control panel. Used to "erase" an SRC that was
655 * previously displayed.
656 */
657void mf_clear_src(void)
658{
659 signal_ce_msg_simple(0x4b, NULL);
660}
661
662/*
663 * Initialization code here.
664 */
665void mf_init(void)
666{
667 int i;
668
669 /* initialize */
670 spin_lock_init(&pending_event_spinlock);
671 for (i = 0;
672 i < sizeof(pending_event_prealloc) / sizeof(*pending_event_prealloc);
673 ++i)
674 free_pending_event(&pending_event_prealloc[i]);
675 HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler);
676
677 /* virtual continue ack */
678 signal_ce_msg_simple(0x57, NULL);
679
680 /* initialization complete */
681 printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities "
682 "initialized\n");
683}
684
685struct rtc_time_data {
686 struct completion com;
687 struct ce_msg_data ce_msg;
688 int rc;
689};
690
691static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
692{
693 struct rtc_time_data *rtc = token;
694
695 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
696 rtc->rc = 0;
697 complete(&rtc->com);
698}
699
700static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm)
701{
702 tm->tm_wday = 0;
703 tm->tm_yday = 0;
704 tm->tm_isdst = 0;
705 if (rc) {
706 tm->tm_sec = 0;
707 tm->tm_min = 0;
708 tm->tm_hour = 0;
709 tm->tm_mday = 15;
710 tm->tm_mon = 5;
711 tm->tm_year = 52;
712 return rc;
713 }
714
715 if ((ce_msg[2] == 0xa9) ||
716 (ce_msg[2] == 0xaf)) {
717 /* TOD clock is not set */
718 tm->tm_sec = 1;
719 tm->tm_min = 1;
720 tm->tm_hour = 1;
721 tm->tm_mday = 10;
722 tm->tm_mon = 8;
723 tm->tm_year = 71;
724 mf_set_rtc(tm);
725 }
726 {
727 u8 year = ce_msg[5];
728 u8 sec = ce_msg[6];
729 u8 min = ce_msg[7];
730 u8 hour = ce_msg[8];
731 u8 day = ce_msg[10];
732 u8 mon = ce_msg[11];
733
734 BCD_TO_BIN(sec);
735 BCD_TO_BIN(min);
736 BCD_TO_BIN(hour);
737 BCD_TO_BIN(day);
738 BCD_TO_BIN(mon);
739 BCD_TO_BIN(year);
740
741 if (year <= 69)
742 year += 100;
743
744 tm->tm_sec = sec;
745 tm->tm_min = min;
746 tm->tm_hour = hour;
747 tm->tm_mday = day;
748 tm->tm_mon = mon;
749 tm->tm_year = year;
750 }
751
752 return 0;
753}
754
755int mf_get_rtc(struct rtc_time *tm)
756{
757 struct ce_msg_comp_data ce_complete;
758 struct rtc_time_data rtc_data;
759 int rc;
760
761 memset(&ce_complete, 0, sizeof(ce_complete));
762 memset(&rtc_data, 0, sizeof(rtc_data));
763 init_completion(&rtc_data.com);
764 ce_complete.handler = &get_rtc_time_complete;
765 ce_complete.token = &rtc_data;
766 rc = signal_ce_msg_simple(0x40, &ce_complete);
767 if (rc)
768 return rc;
769 wait_for_completion(&rtc_data.com);
770 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
771}
772
773struct boot_rtc_time_data {
774 int busy;
775 struct ce_msg_data ce_msg;
776 int rc;
777};
778
779static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg)
780{
781 struct boot_rtc_time_data *rtc = token;
782
783 memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg));
784 rtc->rc = 0;
785 rtc->busy = 0;
786}
787
788int mf_get_boot_rtc(struct rtc_time *tm)
789{
790 struct ce_msg_comp_data ce_complete;
791 struct boot_rtc_time_data rtc_data;
792 int rc;
793
794 memset(&ce_complete, 0, sizeof(ce_complete));
795 memset(&rtc_data, 0, sizeof(rtc_data));
796 rtc_data.busy = 1;
797 ce_complete.handler = &get_boot_rtc_time_complete;
798 ce_complete.token = &rtc_data;
799 rc = signal_ce_msg_simple(0x40, &ce_complete);
800 if (rc)
801 return rc;
802 /* We need to poll here as we are not yet taking interrupts */
803 while (rtc_data.busy) {
804 if (hvlpevent_is_pending())
805 process_hvlpevents(NULL);
806 }
807 return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm);
808}
809
810int mf_set_rtc(struct rtc_time *tm)
811{
812 char ce_time[12];
813 u8 day, mon, hour, min, sec, y1, y2;
814 unsigned year;
815
816 year = 1900 + tm->tm_year;
817 y1 = year / 100;
818 y2 = year % 100;
819
820 sec = tm->tm_sec;
821 min = tm->tm_min;
822 hour = tm->tm_hour;
823 day = tm->tm_mday;
824 mon = tm->tm_mon + 1;
825
826 BIN_TO_BCD(sec);
827 BIN_TO_BCD(min);
828 BIN_TO_BCD(hour);
829 BIN_TO_BCD(mon);
830 BIN_TO_BCD(day);
831 BIN_TO_BCD(y1);
832 BIN_TO_BCD(y2);
833
834 memset(ce_time, 0, sizeof(ce_time));
835 ce_time[3] = 0x41;
836 ce_time[4] = y1;
837 ce_time[5] = y2;
838 ce_time[6] = sec;
839 ce_time[7] = min;
840 ce_time[8] = hour;
841 ce_time[10] = day;
842 ce_time[11] = mon;
843
844 return signal_ce_msg(ce_time, NULL);
845}
846
847#ifdef CONFIG_PROC_FS
848
849static int proc_mf_dump_cmdline(char *page, char **start, off_t off,
850 int count, int *eof, void *data)
851{
852 int len;
853 char *p;
854 struct vsp_cmd_data vsp_cmd;
855 int rc;
856 dma_addr_t dma_addr;
857
858 /* The HV appears to return no more than 256 bytes of command line */
859 if (off >= 256)
860 return 0;
861 if ((off + count) > 256)
862 count = 256 - off;
863
864 dma_addr = dma_map_single(iSeries_vio_dev, page, off + count,
865 DMA_FROM_DEVICE);
866 if (dma_mapping_error(dma_addr))
867 return -ENOMEM;
868 memset(page, 0, off + count);
869 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
870 vsp_cmd.cmd = 33;
871 vsp_cmd.sub_data.kern.token = dma_addr;
872 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
873 vsp_cmd.sub_data.kern.side = (u64)data;
874 vsp_cmd.sub_data.kern.length = off + count;
875 mb();
876 rc = signal_vsp_instruction(&vsp_cmd);
877 dma_unmap_single(iSeries_vio_dev, dma_addr, off + count,
878 DMA_FROM_DEVICE);
879 if (rc)
880 return rc;
881 if (vsp_cmd.result_code != 0)
882 return -ENOMEM;
883 p = page;
884 len = 0;
885 while (len < (off + count)) {
886 if ((*p == '\0') || (*p == '\n')) {
887 if (*p == '\0')
888 *p = '\n';
889 p++;
890 len++;
891 *eof = 1;
892 break;
893 }
894 p++;
895 len++;
896 }
897
898 if (len < off) {
899 *eof = 1;
900 len = 0;
901 }
902 return len;
903}
904
905#if 0
906static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side)
907{
908 struct vsp_cmd_data vsp_cmd;
909 int rc;
910 int len = *size;
911 dma_addr_t dma_addr;
912
913 dma_addr = dma_map_single(iSeries_vio_dev, buffer, len,
914 DMA_FROM_DEVICE);
915 memset(buffer, 0, len);
916 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
917 vsp_cmd.cmd = 32;
918 vsp_cmd.sub_data.kern.token = dma_addr;
919 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
920 vsp_cmd.sub_data.kern.side = side;
921 vsp_cmd.sub_data.kern.offset = offset;
922 vsp_cmd.sub_data.kern.length = len;
923 mb();
924 rc = signal_vsp_instruction(&vsp_cmd);
925 if (rc == 0) {
926 if (vsp_cmd.result_code == 0)
927 *size = vsp_cmd.sub_data.length_out;
928 else
929 rc = -ENOMEM;
930 }
931
932 dma_unmap_single(iSeries_vio_dev, dma_addr, len, DMA_FROM_DEVICE);
933
934 return rc;
935}
936
937static int proc_mf_dump_vmlinux(char *page, char **start, off_t off,
938 int count, int *eof, void *data)
939{
940 int sizeToGet = count;
941
942 if (!capable(CAP_SYS_ADMIN))
943 return -EACCES;
944
945 if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) {
946 if (sizeToGet != 0) {
947 *start = page + off;
948 return sizeToGet;
949 }
950 *eof = 1;
951 return 0;
952 }
953 *eof = 1;
954 return 0;
955}
956#endif
957
958static int proc_mf_dump_side(char *page, char **start, off_t off,
959 int count, int *eof, void *data)
960{
961 int len;
962 char mf_current_side = ' ';
963 struct vsp_cmd_data vsp_cmd;
964
965 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
966 vsp_cmd.cmd = 2;
967 vsp_cmd.sub_data.ipl_type = 0;
968 mb();
969
970 if (signal_vsp_instruction(&vsp_cmd) == 0) {
971 if (vsp_cmd.result_code == 0) {
972 switch (vsp_cmd.sub_data.ipl_type) {
973 case 0: mf_current_side = 'A';
974 break;
975 case 1: mf_current_side = 'B';
976 break;
977 case 2: mf_current_side = 'C';
978 break;
979 default: mf_current_side = 'D';
980 break;
981 }
982 }
983 }
984
985 len = sprintf(page, "%c\n", mf_current_side);
986
987 if (len <= (off + count))
988 *eof = 1;
989 *start = page + off;
990 len -= off;
991 if (len > count)
992 len = count;
993 if (len < 0)
994 len = 0;
995 return len;
996}
997
998static int proc_mf_change_side(struct file *file, const char __user *buffer,
999 unsigned long count, void *data)
1000{
1001 char side;
1002 u64 newSide;
1003 struct vsp_cmd_data vsp_cmd;
1004
1005 if (!capable(CAP_SYS_ADMIN))
1006 return -EACCES;
1007
1008 if (count == 0)
1009 return 0;
1010
1011 if (get_user(side, buffer))
1012 return -EFAULT;
1013
1014 switch (side) {
1015 case 'A': newSide = 0;
1016 break;
1017 case 'B': newSide = 1;
1018 break;
1019 case 'C': newSide = 2;
1020 break;
1021 case 'D': newSide = 3;
1022 break;
1023 default:
1024 printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n");
1025 return -EINVAL;
1026 }
1027
1028 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1029 vsp_cmd.sub_data.ipl_type = newSide;
1030 vsp_cmd.cmd = 10;
1031
1032 (void)signal_vsp_instruction(&vsp_cmd);
1033
1034 return count;
1035}
1036
1037#if 0
1038static void mf_getSrcHistory(char *buffer, int size)
1039{
1040 struct IplTypeReturnStuff return_stuff;
1041 struct pending_event *ev = new_pending_event();
1042 int rc = 0;
1043 char *pages[4];
1044
1045 pages[0] = kmalloc(4096, GFP_ATOMIC);
1046 pages[1] = kmalloc(4096, GFP_ATOMIC);
1047 pages[2] = kmalloc(4096, GFP_ATOMIC);
1048 pages[3] = kmalloc(4096, GFP_ATOMIC);
1049 if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL)
1050 || (pages[2] == NULL) || (pages[3] == NULL))
1051 return -ENOMEM;
1052
1053 return_stuff.xType = 0;
1054 return_stuff.xRc = 0;
1055 return_stuff.xDone = 0;
1056 ev->event.hp_lp_event.xSubtype = 6;
1057 ev->event.hp_lp_event.x.xSubtypeData =
1058 subtype_data('M', 'F', 'V', 'I');
1059 ev->event.data.vsp_cmd.xEvent = &return_stuff;
1060 ev->event.data.vsp_cmd.cmd = 4;
1061 ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex();
1062 ev->event.data.vsp_cmd.result_code = 0xFF;
1063 ev->event.data.vsp_cmd.reserved = 0;
1064 ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]);
1065 ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]);
1066 ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]);
1067 ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]);
1068 mb();
1069 if (signal_event(ev) != 0)
1070 return;
1071
1072 while (return_stuff.xDone != 1)
1073 udelay(10);
1074 if (return_stuff.xRc == 0)
1075 memcpy(buffer, pages[0], size);
1076 kfree(pages[0]);
1077 kfree(pages[1]);
1078 kfree(pages[2]);
1079 kfree(pages[3]);
1080}
1081#endif
1082
1083static int proc_mf_dump_src(char *page, char **start, off_t off,
1084 int count, int *eof, void *data)
1085{
1086#if 0
1087 int len;
1088
1089 mf_getSrcHistory(page, count);
1090 len = count;
1091 len -= off;
1092 if (len < count) {
1093 *eof = 1;
1094 if (len <= 0)
1095 return 0;
1096 } else
1097 len = count;
1098 *start = page + off;
1099 return len;
1100#else
1101 return 0;
1102#endif
1103}
1104
1105static int proc_mf_change_src(struct file *file, const char __user *buffer,
1106 unsigned long count, void *data)
1107{
1108 char stkbuf[10];
1109
1110 if (!capable(CAP_SYS_ADMIN))
1111 return -EACCES;
1112
1113 if ((count < 4) && (count != 1)) {
1114 printk(KERN_ERR "mf_proc: invalid src\n");
1115 return -EINVAL;
1116 }
1117
1118 if (count > (sizeof(stkbuf) - 1))
1119 count = sizeof(stkbuf) - 1;
1120 if (copy_from_user(stkbuf, buffer, count))
1121 return -EFAULT;
1122
1123 if ((count == 1) && (*stkbuf == '\0'))
1124 mf_clear_src();
1125 else
1126 mf_display_src(*(u32 *)stkbuf);
1127
1128 return count;
1129}
1130
1131static int proc_mf_change_cmdline(struct file *file, const char __user *buffer,
1132 unsigned long count, void *data)
1133{
1134 struct vsp_cmd_data vsp_cmd;
1135 dma_addr_t dma_addr;
1136 char *page;
1137 int ret = -EACCES;
1138
1139 if (!capable(CAP_SYS_ADMIN))
1140 goto out;
1141
1142 dma_addr = 0;
1143 page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
1144 GFP_ATOMIC);
1145 ret = -ENOMEM;
1146 if (page == NULL)
1147 goto out;
1148
1149 ret = -EFAULT;
1150 if (copy_from_user(page, buffer, count))
1151 goto out_free;
1152
1153 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1154 vsp_cmd.cmd = 31;
1155 vsp_cmd.sub_data.kern.token = dma_addr;
1156 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1157 vsp_cmd.sub_data.kern.side = (u64)data;
1158 vsp_cmd.sub_data.kern.length = count;
1159 mb();
1160 (void)signal_vsp_instruction(&vsp_cmd);
1161 ret = count;
1162
1163out_free:
1164 dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
1165out:
1166 return ret;
1167}
1168
1169static ssize_t proc_mf_change_vmlinux(struct file *file,
1170 const char __user *buf,
1171 size_t count, loff_t *ppos)
1172{
1173 struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode);
1174 ssize_t rc;
1175 dma_addr_t dma_addr;
1176 char *page;
1177 struct vsp_cmd_data vsp_cmd;
1178
1179 rc = -EACCES;
1180 if (!capable(CAP_SYS_ADMIN))
1181 goto out;
1182
1183 dma_addr = 0;
1184 page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr,
1185 GFP_ATOMIC);
1186 rc = -ENOMEM;
1187 if (page == NULL) {
1188 printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n");
1189 goto out;
1190 }
1191 rc = -EFAULT;
1192 if (copy_from_user(page, buf, count))
1193 goto out_free;
1194
1195 memset(&vsp_cmd, 0, sizeof(vsp_cmd));
1196 vsp_cmd.cmd = 30;
1197 vsp_cmd.sub_data.kern.token = dma_addr;
1198 vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex;
1199 vsp_cmd.sub_data.kern.side = (u64)dp->data;
1200 vsp_cmd.sub_data.kern.offset = *ppos;
1201 vsp_cmd.sub_data.kern.length = count;
1202 mb();
1203 rc = signal_vsp_instruction(&vsp_cmd);
1204 if (rc)
1205 goto out_free;
1206 rc = -ENOMEM;
1207 if (vsp_cmd.result_code != 0)
1208 goto out_free;
1209
1210 *ppos += count;
1211 rc = count;
1212out_free:
1213 dma_free_coherent(iSeries_vio_dev, count, page, dma_addr);
1214out:
1215 return rc;
1216}
1217
1218static struct file_operations proc_vmlinux_operations = {
1219 .write = proc_mf_change_vmlinux,
1220};
1221
1222static int __init mf_proc_init(void)
1223{
1224 struct proc_dir_entry *mf_proc_root;
1225 struct proc_dir_entry *ent;
1226 struct proc_dir_entry *mf;
1227 char name[2];
1228 int i;
1229
1230 mf_proc_root = proc_mkdir("iSeries/mf", NULL);
1231 if (!mf_proc_root)
1232 return 1;
1233
1234 name[1] = '\0';
1235 for (i = 0; i < 4; i++) {
1236 name[0] = 'A' + i;
1237 mf = proc_mkdir(name, mf_proc_root);
1238 if (!mf)
1239 return 1;
1240
1241 ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf);
1242 if (!ent)
1243 return 1;
1244 ent->nlink = 1;
1245 ent->data = (void *)(long)i;
1246 ent->read_proc = proc_mf_dump_cmdline;
1247 ent->write_proc = proc_mf_change_cmdline;
1248
1249 if (i == 3) /* no vmlinux entry for 'D' */
1250 continue;
1251
1252 ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf);
1253 if (!ent)
1254 return 1;
1255 ent->nlink = 1;
1256 ent->data = (void *)(long)i;
1257 ent->proc_fops = &proc_vmlinux_operations;
1258 }
1259
1260 ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
1261 if (!ent)
1262 return 1;
1263 ent->nlink = 1;
1264 ent->data = (void *)0;
1265 ent->read_proc = proc_mf_dump_side;
1266 ent->write_proc = proc_mf_change_side;
1267
1268 ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root);
1269 if (!ent)
1270 return 1;
1271 ent->nlink = 1;
1272 ent->data = (void *)0;
1273 ent->read_proc = proc_mf_dump_src;
1274 ent->write_proc = proc_mf_change_src;
1275
1276 return 0;
1277}
1278
1279__initcall(mf_proc_init);
1280
1281#endif /* CONFIG_PROC_FS */
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index a25b59759ddb..d069bbd7f81f 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -64,44 +64,6 @@ _GLOBAL(get_srr1)
64_GLOBAL(get_sp) 64_GLOBAL(get_sp)
65 mr r3,r1 65 mr r3,r1
66 blr 66 blr
67
68#ifdef CONFIG_PPC_ISERIES
69/* unsigned long local_save_flags(void) */
70_GLOBAL(local_get_flags)
71 lbz r3,PACAPROCENABLED(r13)
72 blr
73
74/* unsigned long local_irq_disable(void) */
75_GLOBAL(local_irq_disable)
76 lbz r3,PACAPROCENABLED(r13)
77 li r4,0
78 stb r4,PACAPROCENABLED(r13)
79 blr /* Done */
80
81/* void local_irq_restore(unsigned long flags) */
82_GLOBAL(local_irq_restore)
83 lbz r5,PACAPROCENABLED(r13)
84 /* Check if things are setup the way we want _already_. */
85 cmpw 0,r3,r5
86 beqlr
87 /* are we enabling interrupts? */
88 cmpdi 0,r3,0
89 stb r3,PACAPROCENABLED(r13)
90 beqlr
91 /* Check pending interrupts */
92 /* A decrementer, IPI or PMC interrupt may have occurred
93 * while we were in the hypervisor (which enables) */
94 ld r4,PACALPPACA+LPPACAANYINT(r13)
95 cmpdi r4,0
96 beqlr
97
98 /*
99 * Handle pending interrupts in interrupt context
100 */
101 li r0,0x5555
102 sc
103 blr
104#endif /* CONFIG_PPC_ISERIES */
105 67
106#ifdef CONFIG_IRQSTACKS 68#ifdef CONFIG_IRQSTACKS
107_GLOBAL(call_do_softirq) 69_GLOBAL(call_do_softirq)
diff --git a/arch/ppc64/kernel/pSeries_iommu.c b/arch/ppc64/kernel/pSeries_iommu.c
index 5914f61a152e..9e90d41131d8 100644
--- a/arch/ppc64/kernel/pSeries_iommu.c
+++ b/arch/ppc64/kernel/pSeries_iommu.c
@@ -47,7 +47,7 @@
47#include <asm/systemcfg.h> 47#include <asm/systemcfg.h>
48#include <asm/firmware.h> 48#include <asm/firmware.h>
49#include <asm/tce.h> 49#include <asm/tce.h>
50#include "pci.h" 50#include <asm/ppc-pci.h>
51 51
52#define DBG(fmt...) 52#define DBG(fmt...)
53 53
diff --git a/arch/ppc64/kernel/pSeries_pci.c b/arch/ppc64/kernel/pSeries_pci.c
index 1f5f141fb7a1..2dd477eb1c53 100644
--- a/arch/ppc64/kernel/pSeries_pci.c
+++ b/arch/ppc64/kernel/pSeries_pci.c
@@ -29,8 +29,7 @@
29 29
30#include <asm/pci-bridge.h> 30#include <asm/pci-bridge.h>
31#include <asm/prom.h> 31#include <asm/prom.h>
32 32#include <asm/ppc-pci.h>
33#include "pci.h"
34 33
35static int __initdata s7a_workaround = -1; 34static int __initdata s7a_workaround = -1;
36 35
diff --git a/arch/ppc64/kernel/pSeries_setup.c b/arch/ppc64/kernel/pSeries_setup.c
index b9bcff21b463..5a9fe96f9f67 100644
--- a/arch/ppc64/kernel/pSeries_setup.c
+++ b/arch/ppc64/kernel/pSeries_setup.c
@@ -63,9 +63,9 @@
63#include <asm/firmware.h> 63#include <asm/firmware.h>
64#include <asm/pmc.h> 64#include <asm/pmc.h>
65#include <asm/mpic.h> 65#include <asm/mpic.h>
66#include <asm/ppc-pci.h>
66 67
67#include "i8259.h" 68#include "i8259.h"
68#include "pci.h"
69 69
70#ifdef DEBUG 70#ifdef DEBUG
71#define DBG(fmt...) udbg_printf(fmt) 71#define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index ff4be1da69d5..feec06bbafc3 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -31,8 +31,7 @@
31#include <asm/irq.h> 31#include <asm/irq.h>
32#include <asm/machdep.h> 32#include <asm/machdep.h>
33#include <asm/udbg.h> 33#include <asm/udbg.h>
34 34#include <asm/ppc-pci.h>
35#include "pci.h"
36 35
37#ifdef DEBUG 36#ifdef DEBUG
38#define DBG(fmt...) udbg_printf(fmt) 37#define DBG(fmt...) udbg_printf(fmt)
diff --git a/arch/ppc64/kernel/pci.h b/arch/ppc64/kernel/pci.h
deleted file mode 100644
index 5eb2cc320566..000000000000
--- a/arch/ppc64/kernel/pci.h
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * c 2001 PPC 64 Team, IBM Corp
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9#ifndef __PPC_KERNEL_PCI_H__
10#define __PPC_KERNEL_PCI_H__
11
12#include <linux/pci.h>
13#include <asm/pci-bridge.h>
14
15extern unsigned long isa_io_base;
16
17extern void pci_setup_pci_controller(struct pci_controller *hose);
18extern void pci_setup_phb_io(struct pci_controller *hose, int primary);
19extern void pci_setup_phb_io_dynamic(struct pci_controller *hose, int primary);
20
21
22extern struct list_head hose_list;
23extern int global_phb_number;
24
25extern unsigned long find_and_init_phbs(void);
26
27extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */
28
29/* PCI device_node operations */
30struct device_node;
31typedef void *(*traverse_func)(struct device_node *me, void *data);
32void *traverse_pci_devices(struct device_node *start, traverse_func pre,
33 void *data);
34
35void pci_devs_phb_init(void);
36void pci_devs_phb_init_dynamic(struct pci_controller *phb);
37
38/* PCI address cache management routines */
39void pci_addr_cache_insert_device(struct pci_dev *dev);
40void pci_addr_cache_remove_device(struct pci_dev *dev);
41
42/* From rtas_pci.h */
43void init_pci_config_tokens (void);
44unsigned long get_phb_buid (struct device_node *);
45
46/* From pSeries_pci.h */
47extern void pSeries_final_fixup(void);
48extern void pSeries_irq_bus_setup(struct pci_bus *bus);
49
50extern unsigned long pci_probe_only;
51extern unsigned long pci_assign_all_buses;
52extern int pci_read_irq_line(struct pci_dev *pci_dev);
53
54#endif /* __PPC_KERNEL_PCI_H__ */
diff --git a/arch/ppc64/kernel/pci_direct_iommu.c b/arch/ppc64/kernel/pci_direct_iommu.c
index b8f7f58824f4..57980a5674d7 100644
--- a/arch/ppc64/kernel/pci_direct_iommu.c
+++ b/arch/ppc64/kernel/pci_direct_iommu.c
@@ -27,8 +27,7 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/pmac_feature.h> 28#include <asm/pmac_feature.h>
29#include <asm/abs_addr.h> 29#include <asm/abs_addr.h>
30 30#include <asm/ppc-pci.h>
31#include "pci.h"
32 31
33static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size, 32static void *pci_direct_alloc_coherent(struct device *hwdev, size_t size,
34 dma_addr_t *dma_handle, unsigned int __nocast flag) 33 dma_addr_t *dma_handle, unsigned int __nocast flag)
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index a86389d07d57..493bbe43f5b4 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -30,8 +30,7 @@
30#include <asm/prom.h> 30#include <asm/prom.h>
31#include <asm/pci-bridge.h> 31#include <asm/pci-bridge.h>
32#include <asm/pSeries_reconfig.h> 32#include <asm/pSeries_reconfig.h>
33 33#include <asm/ppc-pci.h>
34#include "pci.h"
35 34
36/* 35/*
37 * Traverse_func that inits the PCI fields of the device node. 36 * Traverse_func that inits the PCI fields of the device node.
diff --git a/arch/ppc64/kernel/pci_iommu.c b/arch/ppc64/kernel/pci_iommu.c
index 14647e09c9cd..6c9dc67f59d8 100644
--- a/arch/ppc64/kernel/pci_iommu.c
+++ b/arch/ppc64/kernel/pci_iommu.c
@@ -37,7 +37,7 @@
37#include <asm/iommu.h> 37#include <asm/iommu.h>
38#include <asm/pci-bridge.h> 38#include <asm/pci-bridge.h>
39#include <asm/machdep.h> 39#include <asm/machdep.h>
40#include "pci.h" 40#include <asm/ppc-pci.h>
41 41
42#ifdef CONFIG_PPC_ISERIES 42#ifdef CONFIG_PPC_ISERIES
43#include <asm/iSeries/iSeries_pci.h> 43#include <asm/iSeries/iSeries_pci.h>
@@ -61,13 +61,7 @@ static inline struct iommu_table *devnode_table(struct device *dev)
61 } else 61 } else
62 pdev = to_pci_dev(dev); 62 pdev = to_pci_dev(dev);
63 63
64#ifdef CONFIG_PPC_ISERIES
65 return ISERIES_DEVNODE(pdev)->iommu_table;
66#endif /* CONFIG_PPC_ISERIES */
67
68#ifdef CONFIG_PPC_MULTIPLATFORM
69 return PCI_DN(PCI_GET_DN(pdev))->iommu_table; 64 return PCI_DN(PCI_GET_DN(pdev))->iommu_table;
70#endif /* CONFIG_PPC_MULTIPLATFORM */
71} 65}
72 66
73 67
diff --git a/arch/ppc64/kernel/pmac_pci.c b/arch/ppc64/kernel/pmac_pci.c
index 1f61aa4746ec..f139fc034199 100644
--- a/arch/ppc64/kernel/pmac_pci.c
+++ b/arch/ppc64/kernel/pmac_pci.c
@@ -27,8 +27,8 @@
27#include <asm/machdep.h> 27#include <asm/machdep.h>
28#include <asm/pmac_feature.h> 28#include <asm/pmac_feature.h>
29#include <asm/iommu.h> 29#include <asm/iommu.h>
30#include <asm/ppc-pci.h>
30 31
31#include "pci.h"
32#include "pmac.h" 32#include "pmac.h"
33 33
34#define DEBUG 34#define DEBUG
diff --git a/arch/ppc64/kernel/ppc_ksyms.c b/arch/ppc64/kernel/ppc_ksyms.c
index 705742f4eec6..84006e26342c 100644
--- a/arch/ppc64/kernel/ppc_ksyms.c
+++ b/arch/ppc64/kernel/ppc_ksyms.c
@@ -19,7 +19,6 @@
19#include <asm/hw_irq.h> 19#include <asm/hw_irq.h>
20#include <asm/abs_addr.h> 20#include <asm/abs_addr.h>
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/iSeries/HvCallSc.h>
23 22
24EXPORT_SYMBOL(strcpy); 23EXPORT_SYMBOL(strcpy);
25EXPORT_SYMBOL(strncpy); 24EXPORT_SYMBOL(strncpy);
@@ -46,17 +45,6 @@ EXPORT_SYMBOL(__strnlen_user);
46 45
47EXPORT_SYMBOL(reloc_offset); 46EXPORT_SYMBOL(reloc_offset);
48 47
49#ifdef CONFIG_PPC_ISERIES
50EXPORT_SYMBOL(HvCall0);
51EXPORT_SYMBOL(HvCall1);
52EXPORT_SYMBOL(HvCall2);
53EXPORT_SYMBOL(HvCall3);
54EXPORT_SYMBOL(HvCall4);
55EXPORT_SYMBOL(HvCall5);
56EXPORT_SYMBOL(HvCall6);
57EXPORT_SYMBOL(HvCall7);
58#endif
59
60EXPORT_SYMBOL(_insb); 48EXPORT_SYMBOL(_insb);
61EXPORT_SYMBOL(_outsb); 49EXPORT_SYMBOL(_outsb);
62EXPORT_SYMBOL(_insw); 50EXPORT_SYMBOL(_insw);
@@ -77,14 +65,6 @@ EXPORT_SYMBOL(giveup_altivec);
77EXPORT_SYMBOL(__flush_icache_range); 65EXPORT_SYMBOL(__flush_icache_range);
78EXPORT_SYMBOL(flush_dcache_range); 66EXPORT_SYMBOL(flush_dcache_range);
79 67
80#ifdef CONFIG_SMP
81#ifdef CONFIG_PPC_ISERIES
82EXPORT_SYMBOL(local_get_flags);
83EXPORT_SYMBOL(local_irq_disable);
84EXPORT_SYMBOL(local_irq_restore);
85#endif
86#endif
87
88EXPORT_SYMBOL(memcpy); 68EXPORT_SYMBOL(memcpy);
89EXPORT_SYMBOL(memset); 69EXPORT_SYMBOL(memset);
90EXPORT_SYMBOL(memmove); 70EXPORT_SYMBOL(memmove);
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/ppc64/kernel/rtas_pci.c
index 4d920dd41dc6..20361bcd8cfb 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/ppc64/kernel/rtas_pci.c
@@ -39,8 +39,7 @@
39#include <asm/iommu.h> 39#include <asm/iommu.h>
40#include <asm/rtas.h> 40#include <asm/rtas.h>
41#include <asm/mpic.h> 41#include <asm/mpic.h>
42 42#include <asm/ppc-pci.h>
43#include "pci.h"
44 43
45/* RTAS tokens */ 44/* RTAS tokens */
46static int read_pci_config; 45static int read_pci_config;
diff --git a/arch/ppc64/kernel/rtc.c b/arch/ppc64/kernel/rtc.c
index 6ff52bc61325..88ae13f81c46 100644
--- a/arch/ppc64/kernel/rtc.c
+++ b/arch/ppc64/kernel/rtc.c
@@ -43,11 +43,8 @@
43#include <asm/time.h> 43#include <asm/time.h>
44#include <asm/rtas.h> 44#include <asm/rtas.h>
45 45
46#include <asm/iSeries/mf.h>
47#include <asm/machdep.h> 46#include <asm/machdep.h>
48 47
49extern int piranha_simulator;
50
51/* 48/*
52 * We sponge a minor off of the misc major. No need slurping 49 * We sponge a minor off of the misc major. No need slurping
53 * up another valuable major dev number for this. If you add 50 * up another valuable major dev number for this. If you add
@@ -265,40 +262,6 @@ static int rtc_read_proc(char *page, char **start, off_t off,
265 return len; 262 return len;
266} 263}
267 264
268#ifdef CONFIG_PPC_ISERIES
269/*
270 * Get the RTC from the virtual service processor
271 * This requires flowing LpEvents to the primary partition
272 */
273void iSeries_get_rtc_time(struct rtc_time *rtc_tm)
274{
275 if (piranha_simulator)
276 return;
277
278 mf_get_rtc(rtc_tm);
279 rtc_tm->tm_mon--;
280}
281
282/*
283 * Set the RTC in the virtual service processor
284 * This requires flowing LpEvents to the primary partition
285 */
286int iSeries_set_rtc_time(struct rtc_time *tm)
287{
288 mf_set_rtc(tm);
289 return 0;
290}
291
292void iSeries_get_boot_time(struct rtc_time *tm)
293{
294 if ( piranha_simulator )
295 return;
296
297 mf_get_boot_rtc(tm);
298 tm->tm_mon -= 1;
299}
300#endif
301
302#ifdef CONFIG_PPC_RTAS 265#ifdef CONFIG_PPC_RTAS
303#define MAX_RTC_WAIT 5000 /* 5 sec */ 266#define MAX_RTC_WAIT 5000 /* 5 sec */
304#define RTAS_CLOCK_BUSY (-2) 267#define RTAS_CLOCK_BUSY (-2)
diff --git a/arch/ppc64/kernel/sys_ppc32.c b/arch/ppc64/kernel/sys_ppc32.c
index e93c13458910..1cacf61f9c91 100644
--- a/arch/ppc64/kernel/sys_ppc32.c
+++ b/arch/ppc64/kernel/sys_ppc32.c
@@ -53,8 +53,7 @@
53#include <asm/time.h> 53#include <asm/time.h>
54#include <asm/mmu_context.h> 54#include <asm/mmu_context.h>
55#include <asm/systemcfg.h> 55#include <asm/systemcfg.h>
56 56#include <asm/ppc-pci.h>
57#include "pci.h"
58 57
59/* readdir & getdents */ 58/* readdir & getdents */
60#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de))) 59#define NAME_OFFSET(de) ((int) ((de)->d_name - (char __user *) (de)))
diff --git a/arch/ppc64/kernel/u3_iommu.c b/arch/ppc64/kernel/u3_iommu.c
index df9c775f4955..fba871a1bda5 100644
--- a/arch/ppc64/kernel/u3_iommu.c
+++ b/arch/ppc64/kernel/u3_iommu.c
@@ -45,8 +45,7 @@
45#include <asm/cacheflush.h> 45#include <asm/cacheflush.h>
46#include <asm/lmb.h> 46#include <asm/lmb.h>
47#include <asm/dart.h> 47#include <asm/dart.h>
48 48#include <asm/ppc-pci.h>
49#include "pci.h"
50 49
51extern int iommu_force_on; 50extern int iommu_force_on;
52 51
diff --git a/arch/ppc64/kernel/viopath.c b/arch/ppc64/kernel/viopath.c
deleted file mode 100644
index 2a6c4f01c45e..000000000000
--- a/arch/ppc64/kernel/viopath.c
+++ /dev/null
@@ -1,673 +0,0 @@
1/* -*- linux-c -*-
2 * arch/ppc64/kernel/viopath.c
3 *
4 * iSeries Virtual I/O Message Path code
5 *
6 * Authors: Dave Boutcher <boutcher@us.ibm.com>
7 * Ryan Arnold <ryanarn@us.ibm.com>
8 * Colin Devilbiss <devilbis@us.ibm.com>
9 *
10 * (C) Copyright 2000-2003 IBM Corporation
11 *
12 * This code is used by the iSeries virtual disk, cd,
13 * tape, and console to communicate with OS/400 in another
14 * partition.
15 *
16 * This program is free software; you can redistribute it and/or
17 * modify it under the terms of the GNU General Public License as
18 * published by the Free Software Foundation; either version 2 of the
19 * License, or (at your option) anyu later version.
20 *
21 * This program is distributed in the hope that it will be useful, but
22 * WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 * General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software Foundation,
28 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
29 *
30 */
31#include <linux/module.h>
32#include <linux/kernel.h>
33#include <linux/errno.h>
34#include <linux/vmalloc.h>
35#include <linux/string.h>
36#include <linux/proc_fs.h>
37#include <linux/dma-mapping.h>
38#include <linux/wait.h>
39#include <linux/seq_file.h>
40#include <linux/smp_lock.h>
41#include <linux/interrupt.h>
42
43#include <asm/system.h>
44#include <asm/uaccess.h>
45#include <asm/iSeries/HvTypes.h>
46#include <asm/iSeries/ItExtVpdPanel.h>
47#include <asm/iSeries/HvLpEvent.h>
48#include <asm/iSeries/HvLpConfig.h>
49#include <asm/iSeries/mf.h>
50#include <asm/iSeries/vio.h>
51
52/* Status of the path to each other partition in the system.
53 * This is overkill, since we will only ever establish connections
54 * to our hosting partition and the primary partition on the system.
55 * But this allows for other support in the future.
56 */
57static struct viopathStatus {
58 int isOpen; /* Did we open the path? */
59 int isActive; /* Do we have a mon msg outstanding */
60 int users[VIO_MAX_SUBTYPES];
61 HvLpInstanceId mSourceInst;
62 HvLpInstanceId mTargetInst;
63 int numberAllocated;
64} viopathStatus[HVMAXARCHITECTEDLPS];
65
66static DEFINE_SPINLOCK(statuslock);
67
68/*
69 * For each kind of event we allocate a buffer that is
70 * guaranteed not to cross a page boundary
71 */
72static unsigned char event_buffer[VIO_MAX_SUBTYPES * 256] __page_aligned;
73static atomic_t event_buffer_available[VIO_MAX_SUBTYPES];
74static int event_buffer_initialised;
75
76static void handleMonitorEvent(struct HvLpEvent *event);
77
78/*
79 * We use this structure to handle asynchronous responses. The caller
80 * blocks on the semaphore and the handler posts the semaphore. However,
81 * if system_state is not SYSTEM_RUNNING, then wait_atomic is used ...
82 */
83struct alloc_parms {
84 struct semaphore sem;
85 int number;
86 atomic_t wait_atomic;
87 int used_wait_atomic;
88};
89
90/* Put a sequence number in each mon msg. The value is not
91 * important. Start at something other than 0 just for
92 * readability. wrapping this is ok.
93 */
94static u8 viomonseq = 22;
95
96/* Our hosting logical partition. We get this at startup
97 * time, and different modules access this variable directly.
98 */
99HvLpIndex viopath_hostLp = HvLpIndexInvalid;
100EXPORT_SYMBOL(viopath_hostLp);
101HvLpIndex viopath_ourLp = HvLpIndexInvalid;
102EXPORT_SYMBOL(viopath_ourLp);
103
104/* For each kind of incoming event we set a pointer to a
105 * routine to call.
106 */
107static vio_event_handler_t *vio_handler[VIO_MAX_SUBTYPES];
108
109#define VIOPATH_KERN_WARN KERN_WARNING "viopath: "
110#define VIOPATH_KERN_INFO KERN_INFO "viopath: "
111
112static int proc_viopath_show(struct seq_file *m, void *v)
113{
114 char *buf;
115 u16 vlanMap;
116 dma_addr_t handle;
117 HvLpEvent_Rc hvrc;
118 DECLARE_MUTEX_LOCKED(Semaphore);
119
120 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
121 if (!buf)
122 return 0;
123 memset(buf, 0, PAGE_SIZE);
124
125 handle = dma_map_single(iSeries_vio_dev, buf, PAGE_SIZE,
126 DMA_FROM_DEVICE);
127
128 hvrc = HvCallEvent_signalLpEventFast(viopath_hostLp,
129 HvLpEvent_Type_VirtualIo,
130 viomajorsubtype_config | vioconfigget,
131 HvLpEvent_AckInd_DoAck, HvLpEvent_AckType_ImmediateAck,
132 viopath_sourceinst(viopath_hostLp),
133 viopath_targetinst(viopath_hostLp),
134 (u64)(unsigned long)&Semaphore, VIOVERSION << 16,
135 ((u64)handle) << 32, PAGE_SIZE, 0, 0);
136
137 if (hvrc != HvLpEvent_Rc_Good)
138 printk(VIOPATH_KERN_WARN "hv error on op %d\n", (int)hvrc);
139
140 down(&Semaphore);
141
142 vlanMap = HvLpConfig_getVirtualLanIndexMap();
143
144 buf[PAGE_SIZE-1] = '\0';
145 seq_printf(m, "%s", buf);
146 seq_printf(m, "AVAILABLE_VETH=%x\n", vlanMap);
147 seq_printf(m, "SRLNBR=%c%c%c%c%c%c%c\n",
148 e2a(xItExtVpdPanel.mfgID[2]),
149 e2a(xItExtVpdPanel.mfgID[3]),
150 e2a(xItExtVpdPanel.systemSerial[1]),
151 e2a(xItExtVpdPanel.systemSerial[2]),
152 e2a(xItExtVpdPanel.systemSerial[3]),
153 e2a(xItExtVpdPanel.systemSerial[4]),
154 e2a(xItExtVpdPanel.systemSerial[5]));
155
156 dma_unmap_single(iSeries_vio_dev, handle, PAGE_SIZE, DMA_FROM_DEVICE);
157 kfree(buf);
158
159 return 0;
160}
161
162static int proc_viopath_open(struct inode *inode, struct file *file)
163{
164 return single_open(file, proc_viopath_show, NULL);
165}
166
167static struct file_operations proc_viopath_operations = {
168 .open = proc_viopath_open,
169 .read = seq_read,
170 .llseek = seq_lseek,
171 .release = single_release,
172};
173
174static int __init vio_proc_init(void)
175{
176 struct proc_dir_entry *e;
177
178 e = create_proc_entry("iSeries/config", 0, NULL);
179 if (e)
180 e->proc_fops = &proc_viopath_operations;
181
182 return 0;
183}
184__initcall(vio_proc_init);
185
186/* See if a given LP is active. Allow for invalid lps to be passed in
187 * and just return invalid
188 */
189int viopath_isactive(HvLpIndex lp)
190{
191 if (lp == HvLpIndexInvalid)
192 return 0;
193 if (lp < HVMAXARCHITECTEDLPS)
194 return viopathStatus[lp].isActive;
195 else
196 return 0;
197}
198EXPORT_SYMBOL(viopath_isactive);
199
200/*
201 * We cache the source and target instance ids for each
202 * partition.
203 */
204HvLpInstanceId viopath_sourceinst(HvLpIndex lp)
205{
206 return viopathStatus[lp].mSourceInst;
207}
208EXPORT_SYMBOL(viopath_sourceinst);
209
210HvLpInstanceId viopath_targetinst(HvLpIndex lp)
211{
212 return viopathStatus[lp].mTargetInst;
213}
214EXPORT_SYMBOL(viopath_targetinst);
215
216/*
217 * Send a monitor message. This is a message with the acknowledge
218 * bit on that the other side will NOT explicitly acknowledge. When
219 * the other side goes down, the hypervisor will acknowledge any
220 * outstanding messages....so we will know when the other side dies.
221 */
222static void sendMonMsg(HvLpIndex remoteLp)
223{
224 HvLpEvent_Rc hvrc;
225
226 viopathStatus[remoteLp].mSourceInst =
227 HvCallEvent_getSourceLpInstanceId(remoteLp,
228 HvLpEvent_Type_VirtualIo);
229 viopathStatus[remoteLp].mTargetInst =
230 HvCallEvent_getTargetLpInstanceId(remoteLp,
231 HvLpEvent_Type_VirtualIo);
232
233 /*
234 * Deliberately ignore the return code here. if we call this
235 * more than once, we don't care.
236 */
237 vio_setHandler(viomajorsubtype_monitor, handleMonitorEvent);
238
239 hvrc = HvCallEvent_signalLpEventFast(remoteLp, HvLpEvent_Type_VirtualIo,
240 viomajorsubtype_monitor, HvLpEvent_AckInd_DoAck,
241 HvLpEvent_AckType_DeferredAck,
242 viopathStatus[remoteLp].mSourceInst,
243 viopathStatus[remoteLp].mTargetInst,
244 viomonseq++, 0, 0, 0, 0, 0);
245
246 if (hvrc == HvLpEvent_Rc_Good)
247 viopathStatus[remoteLp].isActive = 1;
248 else {
249 printk(VIOPATH_KERN_WARN "could not connect to partition %d\n",
250 remoteLp);
251 viopathStatus[remoteLp].isActive = 0;
252 }
253}
254
255static void handleMonitorEvent(struct HvLpEvent *event)
256{
257 HvLpIndex remoteLp;
258 int i;
259
260 /*
261 * This handler is _also_ called as part of the loop
262 * at the end of this routine, so it must be able to
263 * ignore NULL events...
264 */
265 if (!event)
266 return;
267
268 /*
269 * First see if this is just a normal monitor message from the
270 * other partition
271 */
272 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
273 remoteLp = event->xSourceLp;
274 if (!viopathStatus[remoteLp].isActive)
275 sendMonMsg(remoteLp);
276 return;
277 }
278
279 /*
280 * This path is for an acknowledgement; the other partition
281 * died
282 */
283 remoteLp = event->xTargetLp;
284 if ((event->xSourceInstanceId != viopathStatus[remoteLp].mSourceInst) ||
285 (event->xTargetInstanceId != viopathStatus[remoteLp].mTargetInst)) {
286 printk(VIOPATH_KERN_WARN "ignoring ack....mismatched instances\n");
287 return;
288 }
289
290 printk(VIOPATH_KERN_WARN "partition %d ended\n", remoteLp);
291
292 viopathStatus[remoteLp].isActive = 0;
293
294 /*
295 * For each active handler, pass them a NULL
296 * message to indicate that the other partition
297 * died
298 */
299 for (i = 0; i < VIO_MAX_SUBTYPES; i++) {
300 if (vio_handler[i] != NULL)
301 (*vio_handler[i])(NULL);
302 }
303}
304
305int vio_setHandler(int subtype, vio_event_handler_t *beh)
306{
307 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
308 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
309 return -EINVAL;
310 if (vio_handler[subtype] != NULL)
311 return -EBUSY;
312 vio_handler[subtype] = beh;
313 return 0;
314}
315EXPORT_SYMBOL(vio_setHandler);
316
317int vio_clearHandler(int subtype)
318{
319 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
320 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
321 return -EINVAL;
322 if (vio_handler[subtype] == NULL)
323 return -EAGAIN;
324 vio_handler[subtype] = NULL;
325 return 0;
326}
327EXPORT_SYMBOL(vio_clearHandler);
328
329static void handleConfig(struct HvLpEvent *event)
330{
331 if (!event)
332 return;
333 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
334 printk(VIOPATH_KERN_WARN
335 "unexpected config request from partition %d",
336 event->xSourceLp);
337
338 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
339 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
340 event->xRc = HvLpEvent_Rc_InvalidSubtype;
341 HvCallEvent_ackLpEvent(event);
342 }
343 return;
344 }
345
346 up((struct semaphore *)event->xCorrelationToken);
347}
348
349/*
350 * Initialization of the hosting partition
351 */
352void vio_set_hostlp(void)
353{
354 /*
355 * If this has already been set then we DON'T want to either change
356 * it or re-register the proc file system
357 */
358 if (viopath_hostLp != HvLpIndexInvalid)
359 return;
360
361 /*
362 * Figure out our hosting partition. This isn't allowed to change
363 * while we're active
364 */
365 viopath_ourLp = HvLpConfig_getLpIndex();
366 viopath_hostLp = HvLpConfig_getHostingLpIndex(viopath_ourLp);
367
368 if (viopath_hostLp != HvLpIndexInvalid)
369 vio_setHandler(viomajorsubtype_config, handleConfig);
370}
371EXPORT_SYMBOL(vio_set_hostlp);
372
373static void vio_handleEvent(struct HvLpEvent *event, struct pt_regs *regs)
374{
375 HvLpIndex remoteLp;
376 int subtype = (event->xSubtype & VIOMAJOR_SUBTYPE_MASK)
377 >> VIOMAJOR_SUBTYPE_SHIFT;
378
379 if (event->xFlags.xFunction == HvLpEvent_Function_Int) {
380 remoteLp = event->xSourceLp;
381 /*
382 * The isActive is checked because if the hosting partition
383 * went down and came back up it would not be active but it
384 * would have different source and target instances, in which
385 * case we'd want to reset them. This case really protects
386 * against an unauthorized active partition sending interrupts
387 * or acks to this linux partition.
388 */
389 if (viopathStatus[remoteLp].isActive
390 && (event->xSourceInstanceId !=
391 viopathStatus[remoteLp].mTargetInst)) {
392 printk(VIOPATH_KERN_WARN
393 "message from invalid partition. "
394 "int msg rcvd, source inst (%d) doesnt match (%d)\n",
395 viopathStatus[remoteLp].mTargetInst,
396 event->xSourceInstanceId);
397 return;
398 }
399
400 if (viopathStatus[remoteLp].isActive
401 && (event->xTargetInstanceId !=
402 viopathStatus[remoteLp].mSourceInst)) {
403 printk(VIOPATH_KERN_WARN
404 "message from invalid partition. "
405 "int msg rcvd, target inst (%d) doesnt match (%d)\n",
406 viopathStatus[remoteLp].mSourceInst,
407 event->xTargetInstanceId);
408 return;
409 }
410 } else {
411 remoteLp = event->xTargetLp;
412 if (event->xSourceInstanceId !=
413 viopathStatus[remoteLp].mSourceInst) {
414 printk(VIOPATH_KERN_WARN
415 "message from invalid partition. "
416 "ack msg rcvd, source inst (%d) doesnt match (%d)\n",
417 viopathStatus[remoteLp].mSourceInst,
418 event->xSourceInstanceId);
419 return;
420 }
421
422 if (event->xTargetInstanceId !=
423 viopathStatus[remoteLp].mTargetInst) {
424 printk(VIOPATH_KERN_WARN
425 "message from invalid partition. "
426 "viopath: ack msg rcvd, target inst (%d) doesnt match (%d)\n",
427 viopathStatus[remoteLp].mTargetInst,
428 event->xTargetInstanceId);
429 return;
430 }
431 }
432
433 if (vio_handler[subtype] == NULL) {
434 printk(VIOPATH_KERN_WARN
435 "unexpected virtual io event subtype %d from partition %d\n",
436 event->xSubtype, remoteLp);
437 /* No handler. Ack if necessary */
438 if ((event->xFlags.xFunction == HvLpEvent_Function_Int) &&
439 (event->xFlags.xAckInd == HvLpEvent_AckInd_DoAck)) {
440 event->xRc = HvLpEvent_Rc_InvalidSubtype;
441 HvCallEvent_ackLpEvent(event);
442 }
443 return;
444 }
445
446 /* This innocuous little line is where all the real work happens */
447 (*vio_handler[subtype])(event);
448}
449
450static void viopath_donealloc(void *parm, int number)
451{
452 struct alloc_parms *parmsp = parm;
453
454 parmsp->number = number;
455 if (parmsp->used_wait_atomic)
456 atomic_set(&parmsp->wait_atomic, 0);
457 else
458 up(&parmsp->sem);
459}
460
461static int allocateEvents(HvLpIndex remoteLp, int numEvents)
462{
463 struct alloc_parms parms;
464
465 if (system_state != SYSTEM_RUNNING) {
466 parms.used_wait_atomic = 1;
467 atomic_set(&parms.wait_atomic, 1);
468 } else {
469 parms.used_wait_atomic = 0;
470 init_MUTEX_LOCKED(&parms.sem);
471 }
472 mf_allocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo, 250, /* It would be nice to put a real number here! */
473 numEvents, &viopath_donealloc, &parms);
474 if (system_state != SYSTEM_RUNNING) {
475 while (atomic_read(&parms.wait_atomic))
476 mb();
477 } else
478 down(&parms.sem);
479 return parms.number;
480}
481
482int viopath_open(HvLpIndex remoteLp, int subtype, int numReq)
483{
484 int i;
485 unsigned long flags;
486 int tempNumAllocated;
487
488 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
489 return -EINVAL;
490
491 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
492 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
493 return -EINVAL;
494
495 spin_lock_irqsave(&statuslock, flags);
496
497 if (!event_buffer_initialised) {
498 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
499 atomic_set(&event_buffer_available[i], 1);
500 event_buffer_initialised = 1;
501 }
502
503 viopathStatus[remoteLp].users[subtype]++;
504
505 if (!viopathStatus[remoteLp].isOpen) {
506 viopathStatus[remoteLp].isOpen = 1;
507 HvCallEvent_openLpEventPath(remoteLp, HvLpEvent_Type_VirtualIo);
508
509 /*
510 * Don't hold the spinlock during an operation that
511 * can sleep.
512 */
513 spin_unlock_irqrestore(&statuslock, flags);
514 tempNumAllocated = allocateEvents(remoteLp, 1);
515 spin_lock_irqsave(&statuslock, flags);
516
517 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
518
519 if (viopathStatus[remoteLp].numberAllocated == 0) {
520 HvCallEvent_closeLpEventPath(remoteLp,
521 HvLpEvent_Type_VirtualIo);
522
523 spin_unlock_irqrestore(&statuslock, flags);
524 return -ENOMEM;
525 }
526
527 viopathStatus[remoteLp].mSourceInst =
528 HvCallEvent_getSourceLpInstanceId(remoteLp,
529 HvLpEvent_Type_VirtualIo);
530 viopathStatus[remoteLp].mTargetInst =
531 HvCallEvent_getTargetLpInstanceId(remoteLp,
532 HvLpEvent_Type_VirtualIo);
533 HvLpEvent_registerHandler(HvLpEvent_Type_VirtualIo,
534 &vio_handleEvent);
535 sendMonMsg(remoteLp);
536 printk(VIOPATH_KERN_INFO "opening connection to partition %d, "
537 "setting sinst %d, tinst %d\n",
538 remoteLp, viopathStatus[remoteLp].mSourceInst,
539 viopathStatus[remoteLp].mTargetInst);
540 }
541
542 spin_unlock_irqrestore(&statuslock, flags);
543 tempNumAllocated = allocateEvents(remoteLp, numReq);
544 spin_lock_irqsave(&statuslock, flags);
545 viopathStatus[remoteLp].numberAllocated += tempNumAllocated;
546 spin_unlock_irqrestore(&statuslock, flags);
547
548 return 0;
549}
550EXPORT_SYMBOL(viopath_open);
551
552int viopath_close(HvLpIndex remoteLp, int subtype, int numReq)
553{
554 unsigned long flags;
555 int i;
556 int numOpen;
557 struct alloc_parms parms;
558
559 if ((remoteLp >= HVMAXARCHITECTEDLPS) || (remoteLp == HvLpIndexInvalid))
560 return -EINVAL;
561
562 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
563 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
564 return -EINVAL;
565
566 spin_lock_irqsave(&statuslock, flags);
567 /*
568 * If the viopath_close somehow gets called before a
569 * viopath_open it could decrement to -1 which is a non
570 * recoverable state so we'll prevent this from
571 * happening.
572 */
573 if (viopathStatus[remoteLp].users[subtype] > 0)
574 viopathStatus[remoteLp].users[subtype]--;
575
576 spin_unlock_irqrestore(&statuslock, flags);
577
578 parms.used_wait_atomic = 0;
579 init_MUTEX_LOCKED(&parms.sem);
580 mf_deallocate_lp_events(remoteLp, HvLpEvent_Type_VirtualIo,
581 numReq, &viopath_donealloc, &parms);
582 down(&parms.sem);
583
584 spin_lock_irqsave(&statuslock, flags);
585 for (i = 0, numOpen = 0; i < VIO_MAX_SUBTYPES; i++)
586 numOpen += viopathStatus[remoteLp].users[i];
587
588 if ((viopathStatus[remoteLp].isOpen) && (numOpen == 0)) {
589 printk(VIOPATH_KERN_INFO "closing connection to partition %d",
590 remoteLp);
591
592 HvCallEvent_closeLpEventPath(remoteLp,
593 HvLpEvent_Type_VirtualIo);
594 viopathStatus[remoteLp].isOpen = 0;
595 viopathStatus[remoteLp].isActive = 0;
596
597 for (i = 0; i < VIO_MAX_SUBTYPES; i++)
598 atomic_set(&event_buffer_available[i], 0);
599 event_buffer_initialised = 0;
600 }
601 spin_unlock_irqrestore(&statuslock, flags);
602 return 0;
603}
604EXPORT_SYMBOL(viopath_close);
605
606void *vio_get_event_buffer(int subtype)
607{
608 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
609 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES))
610 return NULL;
611
612 if (atomic_dec_if_positive(&event_buffer_available[subtype]) == 0)
613 return &event_buffer[subtype * 256];
614 else
615 return NULL;
616}
617EXPORT_SYMBOL(vio_get_event_buffer);
618
619void vio_free_event_buffer(int subtype, void *buffer)
620{
621 subtype = subtype >> VIOMAJOR_SUBTYPE_SHIFT;
622 if ((subtype < 0) || (subtype >= VIO_MAX_SUBTYPES)) {
623 printk(VIOPATH_KERN_WARN
624 "unexpected subtype %d freeing event buffer\n", subtype);
625 return;
626 }
627
628 if (atomic_read(&event_buffer_available[subtype]) != 0) {
629 printk(VIOPATH_KERN_WARN
630 "freeing unallocated event buffer, subtype %d\n",
631 subtype);
632 return;
633 }
634
635 if (buffer != &event_buffer[subtype * 256]) {
636 printk(VIOPATH_KERN_WARN
637 "freeing invalid event buffer, subtype %d\n", subtype);
638 }
639
640 atomic_set(&event_buffer_available[subtype], 1);
641}
642EXPORT_SYMBOL(vio_free_event_buffer);
643
644static const struct vio_error_entry vio_no_error =
645 { 0, 0, "Non-VIO Error" };
646static const struct vio_error_entry vio_unknown_error =
647 { 0, EIO, "Unknown Error" };
648
649static const struct vio_error_entry vio_default_errors[] = {
650 {0x0001, EIO, "No Connection"},
651 {0x0002, EIO, "No Receiver"},
652 {0x0003, EIO, "No Buffer Available"},
653 {0x0004, EBADRQC, "Invalid Message Type"},
654 {0x0000, 0, NULL},
655};
656
657const struct vio_error_entry *vio_lookup_rc(
658 const struct vio_error_entry *local_table, u16 rc)
659{
660 const struct vio_error_entry *cur;
661
662 if (!rc)
663 return &vio_no_error;
664 if (local_table)
665 for (cur = local_table; cur->rc; ++cur)
666 if (cur->rc == rc)
667 return cur;
668 for (cur = vio_default_errors; cur->rc; ++cur)
669 if (cur->rc == rc)
670 return cur;
671 return &vio_unknown_error;
672}
673EXPORT_SYMBOL(vio_lookup_rc);