diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-09-27 04:44:42 -0400 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2005-09-27 04:44:42 -0400 |
commit | c8b84976f86adcd10c221d398e1d0be2b778f3c8 (patch) | |
tree | 54924b199234c014ad6d70269e24c59041a69432 /arch/powerpc/platforms | |
parent | 2960eb661a82131b9492cdd1b6500a5f74ccc394 (diff) |
powerpc: move iSeries_setup.[ch] and mf.c into platforms/iseries
iSeries_setup.c becomes setup.c
iSeries_setup.h becomes setup.h
mf.c retains its name
Also moved iSeries_[gs]et_rtc_time and iSeries_get_boot_time into
mf.c since they are just small wrappers around mf_ functions.
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Diffstat (limited to 'arch/powerpc/platforms')
-rw-r--r-- | arch/powerpc/platforms/iseries/Makefile | 2 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/mf.c | 1316 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/setup.c | 1006 | ||||
-rw-r--r-- | arch/powerpc/platforms/iseries/setup.h | 24 |
4 files changed, 2347 insertions, 1 deletions
diff --git a/arch/powerpc/platforms/iseries/Makefile b/arch/powerpc/platforms/iseries/Makefile index 095471d50d9f..f5e11907cab1 100644 --- a/arch/powerpc/platforms/iseries/Makefile +++ b/arch/powerpc/platforms/iseries/Makefile | |||
@@ -1 +1 @@ | |||
obj-$(CONFIG_PPC_ISERIES) += hvcall.o hvlpconfig.o lpardata.o | obj-y += hvcall.o hvlpconfig.o lpardata.o setup.o mf.o | ||
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c new file mode 100644 index 000000000000..82f5abab9afa --- /dev/null +++ b/arch/powerpc/platforms/iseries/mf.c | |||
@@ -0,0 +1,1316 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 Troy D. Armstrong IBM Corporation | ||
3 | * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation | ||
4 | * | ||
5 | * This modules exists as an interface between a Linux secondary partition | ||
6 | * running on an iSeries and the primary partition's Virtual Service | ||
7 | * Processor (VSP) object. The VSP has final authority over powering on/off | ||
8 | * all partitions in the iSeries. It also provides miscellaneous low-level | ||
9 | * machine facility type operations. | ||
10 | * | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/completion.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/dma-mapping.h> | ||
34 | #include <linux/bcd.h> | ||
35 | |||
36 | #include <asm/time.h> | ||
37 | #include <asm/uaccess.h> | ||
38 | #include <asm/paca.h> | ||
39 | #include <asm/iSeries/vio.h> | ||
40 | #include <asm/iSeries/mf.h> | ||
41 | #include <asm/iSeries/HvLpConfig.h> | ||
42 | #include <asm/iSeries/ItLpQueue.h> | ||
43 | |||
44 | #include "setup.h" | ||
45 | |||
46 | extern int piranha_simulator; | ||
47 | |||
48 | /* | ||
49 | * This is the structure layout for the Machine Facilites LPAR event | ||
50 | * flows. | ||
51 | */ | ||
52 | struct vsp_cmd_data { | ||
53 | u64 token; | ||
54 | u16 cmd; | ||
55 | HvLpIndex lp_index; | ||
56 | u8 result_code; | ||
57 | u32 reserved; | ||
58 | union { | ||
59 | u64 state; /* GetStateOut */ | ||
60 | u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */ | ||
61 | u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */ | ||
62 | u64 page[4]; /* GetSrcHistoryIn */ | ||
63 | u64 flag; /* GetAutoIplWhenPrimaryIplsOut, | ||
64 | SetAutoIplWhenPrimaryIplsIn, | ||
65 | WhiteButtonPowerOffIn, | ||
66 | Function08FastPowerOffIn, | ||
67 | IsSpcnRackPowerIncompleteOut */ | ||
68 | struct { | ||
69 | u64 token; | ||
70 | u64 address_type; | ||
71 | u64 side; | ||
72 | u32 length; | ||
73 | u32 offset; | ||
74 | } kern; /* SetKernelImageIn, GetKernelImageIn, | ||
75 | SetKernelCmdLineIn, GetKernelCmdLineIn */ | ||
76 | u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */ | ||
77 | u8 reserved[80]; | ||
78 | } sub_data; | ||
79 | }; | ||
80 | |||
81 | struct vsp_rsp_data { | ||
82 | struct completion com; | ||
83 | struct vsp_cmd_data *response; | ||
84 | }; | ||
85 | |||
86 | struct alloc_data { | ||
87 | u16 size; | ||
88 | u16 type; | ||
89 | u32 count; | ||
90 | u16 reserved1; | ||
91 | u8 reserved2; | ||
92 | HvLpIndex target_lp; | ||
93 | }; | ||
94 | |||
95 | struct ce_msg_data; | ||
96 | |||
97 | typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp); | ||
98 | |||
99 | struct ce_msg_comp_data { | ||
100 | ce_msg_comp_hdlr handler; | ||
101 | void *token; | ||
102 | }; | ||
103 | |||
104 | struct ce_msg_data { | ||
105 | u8 ce_msg[12]; | ||
106 | char reserved[4]; | ||
107 | struct ce_msg_comp_data *completion; | ||
108 | }; | ||
109 | |||
110 | struct io_mf_lp_event { | ||
111 | struct HvLpEvent hp_lp_event; | ||
112 | u16 subtype_result_code; | ||
113 | u16 reserved1; | ||
114 | u32 reserved2; | ||
115 | union { | ||
116 | struct alloc_data alloc; | ||
117 | struct ce_msg_data ce_msg; | ||
118 | struct vsp_cmd_data vsp_cmd; | ||
119 | } data; | ||
120 | }; | ||
121 | |||
122 | #define subtype_data(a, b, c, d) \ | ||
123 | (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) | ||
124 | |||
125 | /* | ||
126 | * All outgoing event traffic is kept on a FIFO queue. The first | ||
127 | * pointer points to the one that is outstanding, and all new | ||
128 | * requests get stuck on the end. Also, we keep a certain number of | ||
129 | * preallocated pending events so that we can operate very early in | ||
130 | * the boot up sequence (before kmalloc is ready). | ||
131 | */ | ||
132 | struct pending_event { | ||
133 | struct pending_event *next; | ||
134 | struct io_mf_lp_event event; | ||
135 | MFCompleteHandler hdlr; | ||
136 | char dma_data[72]; | ||
137 | unsigned dma_data_length; | ||
138 | unsigned remote_address; | ||
139 | }; | ||
140 | static spinlock_t pending_event_spinlock; | ||
141 | static struct pending_event *pending_event_head; | ||
142 | static struct pending_event *pending_event_tail; | ||
143 | static struct pending_event *pending_event_avail; | ||
144 | static struct pending_event pending_event_prealloc[16]; | ||
145 | |||
146 | /* | ||
147 | * Put a pending event onto the available queue, so it can get reused. | ||
148 | * Attention! You must have the pending_event_spinlock before calling! | ||
149 | */ | ||
150 | static void free_pending_event(struct pending_event *ev) | ||
151 | { | ||
152 | if (ev != NULL) { | ||
153 | ev->next = pending_event_avail; | ||
154 | pending_event_avail = ev; | ||
155 | } | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * Enqueue the outbound event onto the stack. If the queue was | ||
160 | * empty to begin with, we must also issue it via the Hypervisor | ||
161 | * interface. There is a section of code below that will touch | ||
162 | * the first stack pointer without the protection of the pending_event_spinlock. | ||
163 | * This is OK, because we know that nobody else will be modifying | ||
164 | * the first pointer when we do this. | ||
165 | */ | ||
166 | static int signal_event(struct pending_event *ev) | ||
167 | { | ||
168 | int rc = 0; | ||
169 | unsigned long flags; | ||
170 | int go = 1; | ||
171 | struct pending_event *ev1; | ||
172 | HvLpEvent_Rc hv_rc; | ||
173 | |||
174 | /* enqueue the event */ | ||
175 | if (ev != NULL) { | ||
176 | ev->next = NULL; | ||
177 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
178 | if (pending_event_head == NULL) | ||
179 | pending_event_head = ev; | ||
180 | else { | ||
181 | go = 0; | ||
182 | pending_event_tail->next = ev; | ||
183 | } | ||
184 | pending_event_tail = ev; | ||
185 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
186 | } | ||
187 | |||
188 | /* send the event */ | ||
189 | while (go) { | ||
190 | go = 0; | ||
191 | |||
192 | /* any DMA data to send beforehand? */ | ||
193 | if (pending_event_head->dma_data_length > 0) | ||
194 | HvCallEvent_dmaToSp(pending_event_head->dma_data, | ||
195 | pending_event_head->remote_address, | ||
196 | pending_event_head->dma_data_length, | ||
197 | HvLpDma_Direction_LocalToRemote); | ||
198 | |||
199 | hv_rc = HvCallEvent_signalLpEvent( | ||
200 | &pending_event_head->event.hp_lp_event); | ||
201 | if (hv_rc != HvLpEvent_Rc_Good) { | ||
202 | printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() " | ||
203 | "failed with %d\n", (int)hv_rc); | ||
204 | |||
205 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
206 | ev1 = pending_event_head; | ||
207 | pending_event_head = pending_event_head->next; | ||
208 | if (pending_event_head != NULL) | ||
209 | go = 1; | ||
210 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
211 | |||
212 | if (ev1 == ev) | ||
213 | rc = -EIO; | ||
214 | else if (ev1->hdlr != NULL) | ||
215 | (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO); | ||
216 | |||
217 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
218 | free_pending_event(ev1); | ||
219 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
220 | } | ||
221 | } | ||
222 | |||
223 | return rc; | ||
224 | } | ||
225 | |||
226 | /* | ||
227 | * Allocate a new pending_event structure, and initialize it. | ||
228 | */ | ||
229 | static struct pending_event *new_pending_event(void) | ||
230 | { | ||
231 | struct pending_event *ev = NULL; | ||
232 | HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex(); | ||
233 | unsigned long flags; | ||
234 | struct HvLpEvent *hev; | ||
235 | |||
236 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
237 | if (pending_event_avail != NULL) { | ||
238 | ev = pending_event_avail; | ||
239 | pending_event_avail = pending_event_avail->next; | ||
240 | } | ||
241 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
242 | if (ev == NULL) { | ||
243 | ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC); | ||
244 | if (ev == NULL) { | ||
245 | printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n", | ||
246 | sizeof(struct pending_event)); | ||
247 | return NULL; | ||
248 | } | ||
249 | } | ||
250 | memset(ev, 0, sizeof(struct pending_event)); | ||
251 | hev = &ev->event.hp_lp_event; | ||
252 | hev->xFlags.xValid = 1; | ||
253 | hev->xFlags.xAckType = HvLpEvent_AckType_ImmediateAck; | ||
254 | hev->xFlags.xAckInd = HvLpEvent_AckInd_DoAck; | ||
255 | hev->xFlags.xFunction = HvLpEvent_Function_Int; | ||
256 | hev->xType = HvLpEvent_Type_MachineFac; | ||
257 | hev->xSourceLp = HvLpConfig_getLpIndex(); | ||
258 | hev->xTargetLp = primary_lp; | ||
259 | hev->xSizeMinus1 = sizeof(ev->event) - 1; | ||
260 | hev->xRc = HvLpEvent_Rc_Good; | ||
261 | hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp, | ||
262 | HvLpEvent_Type_MachineFac); | ||
263 | hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp, | ||
264 | HvLpEvent_Type_MachineFac); | ||
265 | |||
266 | return ev; | ||
267 | } | ||
268 | |||
269 | static int signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd) | ||
270 | { | ||
271 | struct pending_event *ev = new_pending_event(); | ||
272 | int rc; | ||
273 | struct vsp_rsp_data response; | ||
274 | |||
275 | if (ev == NULL) | ||
276 | return -ENOMEM; | ||
277 | |||
278 | init_completion(&response.com); | ||
279 | response.response = vsp_cmd; | ||
280 | ev->event.hp_lp_event.xSubtype = 6; | ||
281 | ev->event.hp_lp_event.x.xSubtypeData = | ||
282 | subtype_data('M', 'F', 'V', 'I'); | ||
283 | ev->event.data.vsp_cmd.token = (u64)&response; | ||
284 | ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd; | ||
285 | ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); | ||
286 | ev->event.data.vsp_cmd.result_code = 0xFF; | ||
287 | ev->event.data.vsp_cmd.reserved = 0; | ||
288 | memcpy(&(ev->event.data.vsp_cmd.sub_data), | ||
289 | &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data)); | ||
290 | mb(); | ||
291 | |||
292 | rc = signal_event(ev); | ||
293 | if (rc == 0) | ||
294 | wait_for_completion(&response.com); | ||
295 | return rc; | ||
296 | } | ||
297 | |||
298 | |||
299 | /* | ||
300 | * Send a 12-byte CE message to the primary partition VSP object | ||
301 | */ | ||
302 | static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion) | ||
303 | { | ||
304 | struct pending_event *ev = new_pending_event(); | ||
305 | |||
306 | if (ev == NULL) | ||
307 | return -ENOMEM; | ||
308 | |||
309 | ev->event.hp_lp_event.xSubtype = 0; | ||
310 | ev->event.hp_lp_event.x.xSubtypeData = | ||
311 | subtype_data('M', 'F', 'C', 'E'); | ||
312 | memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12); | ||
313 | ev->event.data.ce_msg.completion = completion; | ||
314 | return signal_event(ev); | ||
315 | } | ||
316 | |||
317 | /* | ||
318 | * Send a 12-byte CE message (with no data) to the primary partition VSP object | ||
319 | */ | ||
320 | static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion) | ||
321 | { | ||
322 | u8 ce_msg[12]; | ||
323 | |||
324 | memset(ce_msg, 0, sizeof(ce_msg)); | ||
325 | ce_msg[3] = ce_op; | ||
326 | return signal_ce_msg(ce_msg, completion); | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * Send a 12-byte CE message and DMA data to the primary partition VSP object | ||
331 | */ | ||
332 | static int dma_and_signal_ce_msg(char *ce_msg, | ||
333 | struct ce_msg_comp_data *completion, void *dma_data, | ||
334 | unsigned dma_data_length, unsigned remote_address) | ||
335 | { | ||
336 | struct pending_event *ev = new_pending_event(); | ||
337 | |||
338 | if (ev == NULL) | ||
339 | return -ENOMEM; | ||
340 | |||
341 | ev->event.hp_lp_event.xSubtype = 0; | ||
342 | ev->event.hp_lp_event.x.xSubtypeData = | ||
343 | subtype_data('M', 'F', 'C', 'E'); | ||
344 | memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12); | ||
345 | ev->event.data.ce_msg.completion = completion; | ||
346 | memcpy(ev->dma_data, dma_data, dma_data_length); | ||
347 | ev->dma_data_length = dma_data_length; | ||
348 | ev->remote_address = remote_address; | ||
349 | return signal_event(ev); | ||
350 | } | ||
351 | |||
352 | /* | ||
353 | * Initiate a nice (hopefully) shutdown of Linux. We simply are | ||
354 | * going to try and send the init process a SIGINT signal. If | ||
355 | * this fails (why?), we'll simply force it off in a not-so-nice | ||
356 | * manner. | ||
357 | */ | ||
358 | static int shutdown(void) | ||
359 | { | ||
360 | int rc = kill_proc(1, SIGINT, 1); | ||
361 | |||
362 | if (rc) { | ||
363 | printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), " | ||
364 | "hard shutdown commencing\n", rc); | ||
365 | mf_power_off(); | ||
366 | } else | ||
367 | printk(KERN_INFO "mf.c: init has been successfully notified " | ||
368 | "to proceed with shutdown\n"); | ||
369 | return rc; | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * The primary partition VSP object is sending us a new | ||
374 | * event flow. Handle it... | ||
375 | */ | ||
376 | static void handle_int(struct io_mf_lp_event *event) | ||
377 | { | ||
378 | struct ce_msg_data *ce_msg_data; | ||
379 | struct ce_msg_data *pce_msg_data; | ||
380 | unsigned long flags; | ||
381 | struct pending_event *pev; | ||
382 | |||
383 | /* ack the interrupt */ | ||
384 | event->hp_lp_event.xRc = HvLpEvent_Rc_Good; | ||
385 | HvCallEvent_ackLpEvent(&event->hp_lp_event); | ||
386 | |||
387 | /* process interrupt */ | ||
388 | switch (event->hp_lp_event.xSubtype) { | ||
389 | case 0: /* CE message */ | ||
390 | ce_msg_data = &event->data.ce_msg; | ||
391 | switch (ce_msg_data->ce_msg[3]) { | ||
392 | case 0x5B: /* power control notification */ | ||
393 | if ((ce_msg_data->ce_msg[5] & 0x20) != 0) { | ||
394 | printk(KERN_INFO "mf.c: Commencing partition shutdown\n"); | ||
395 | if (shutdown() == 0) | ||
396 | signal_ce_msg_simple(0xDB, NULL); | ||
397 | } | ||
398 | break; | ||
399 | case 0xC0: /* get time */ | ||
400 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
401 | pev = pending_event_head; | ||
402 | if (pev != NULL) | ||
403 | pending_event_head = pending_event_head->next; | ||
404 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
405 | if (pev == NULL) | ||
406 | break; | ||
407 | pce_msg_data = &pev->event.data.ce_msg; | ||
408 | if (pce_msg_data->ce_msg[3] != 0x40) | ||
409 | break; | ||
410 | if (pce_msg_data->completion != NULL) { | ||
411 | ce_msg_comp_hdlr handler = | ||
412 | pce_msg_data->completion->handler; | ||
413 | void *token = pce_msg_data->completion->token; | ||
414 | |||
415 | if (handler != NULL) | ||
416 | (*handler)(token, ce_msg_data); | ||
417 | } | ||
418 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
419 | free_pending_event(pev); | ||
420 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
421 | /* send next waiting event */ | ||
422 | if (pending_event_head != NULL) | ||
423 | signal_event(NULL); | ||
424 | break; | ||
425 | } | ||
426 | break; | ||
427 | case 1: /* IT sys shutdown */ | ||
428 | printk(KERN_INFO "mf.c: Commencing system shutdown\n"); | ||
429 | shutdown(); | ||
430 | break; | ||
431 | } | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * The primary partition VSP object is acknowledging the receipt | ||
436 | * of a flow we sent to them. If there are other flows queued | ||
437 | * up, we must send another one now... | ||
438 | */ | ||
439 | static void handle_ack(struct io_mf_lp_event *event) | ||
440 | { | ||
441 | unsigned long flags; | ||
442 | struct pending_event *two = NULL; | ||
443 | unsigned long free_it = 0; | ||
444 | struct ce_msg_data *ce_msg_data; | ||
445 | struct ce_msg_data *pce_msg_data; | ||
446 | struct vsp_rsp_data *rsp; | ||
447 | |||
448 | /* handle current event */ | ||
449 | if (pending_event_head == NULL) { | ||
450 | printk(KERN_ERR "mf.c: stack empty for receiving ack\n"); | ||
451 | return; | ||
452 | } | ||
453 | |||
454 | switch (event->hp_lp_event.xSubtype) { | ||
455 | case 0: /* CE msg */ | ||
456 | ce_msg_data = &event->data.ce_msg; | ||
457 | if (ce_msg_data->ce_msg[3] != 0x40) { | ||
458 | free_it = 1; | ||
459 | break; | ||
460 | } | ||
461 | if (ce_msg_data->ce_msg[2] == 0) | ||
462 | break; | ||
463 | free_it = 1; | ||
464 | pce_msg_data = &pending_event_head->event.data.ce_msg; | ||
465 | if (pce_msg_data->completion != NULL) { | ||
466 | ce_msg_comp_hdlr handler = | ||
467 | pce_msg_data->completion->handler; | ||
468 | void *token = pce_msg_data->completion->token; | ||
469 | |||
470 | if (handler != NULL) | ||
471 | (*handler)(token, ce_msg_data); | ||
472 | } | ||
473 | break; | ||
474 | case 4: /* allocate */ | ||
475 | case 5: /* deallocate */ | ||
476 | if (pending_event_head->hdlr != NULL) | ||
477 | (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count); | ||
478 | free_it = 1; | ||
479 | break; | ||
480 | case 6: | ||
481 | free_it = 1; | ||
482 | rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token; | ||
483 | if (rsp == NULL) { | ||
484 | printk(KERN_ERR "mf.c: no rsp\n"); | ||
485 | break; | ||
486 | } | ||
487 | if (rsp->response != NULL) | ||
488 | memcpy(rsp->response, &event->data.vsp_cmd, | ||
489 | sizeof(event->data.vsp_cmd)); | ||
490 | complete(&rsp->com); | ||
491 | break; | ||
492 | } | ||
493 | |||
494 | /* remove from queue */ | ||
495 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
496 | if ((pending_event_head != NULL) && (free_it == 1)) { | ||
497 | struct pending_event *oldHead = pending_event_head; | ||
498 | |||
499 | pending_event_head = pending_event_head->next; | ||
500 | two = pending_event_head; | ||
501 | free_pending_event(oldHead); | ||
502 | } | ||
503 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
504 | |||
505 | /* send next waiting event */ | ||
506 | if (two != NULL) | ||
507 | signal_event(NULL); | ||
508 | } | ||
509 | |||
510 | /* | ||
511 | * This is the generic event handler we are registering with | ||
512 | * the Hypervisor. Ensure the flows are for us, and then | ||
513 | * parse it enough to know if it is an interrupt or an | ||
514 | * acknowledge. | ||
515 | */ | ||
516 | static void hv_handler(struct HvLpEvent *event, struct pt_regs *regs) | ||
517 | { | ||
518 | if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) { | ||
519 | switch(event->xFlags.xFunction) { | ||
520 | case HvLpEvent_Function_Ack: | ||
521 | handle_ack((struct io_mf_lp_event *)event); | ||
522 | break; | ||
523 | case HvLpEvent_Function_Int: | ||
524 | handle_int((struct io_mf_lp_event *)event); | ||
525 | break; | ||
526 | default: | ||
527 | printk(KERN_ERR "mf.c: non ack/int event received\n"); | ||
528 | break; | ||
529 | } | ||
530 | } else | ||
531 | printk(KERN_ERR "mf.c: alien event received\n"); | ||
532 | } | ||
533 | |||
534 | /* | ||
535 | * Global kernel interface to allocate and seed events into the | ||
536 | * Hypervisor. | ||
537 | */ | ||
538 | void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type, | ||
539 | unsigned size, unsigned count, MFCompleteHandler hdlr, | ||
540 | void *user_token) | ||
541 | { | ||
542 | struct pending_event *ev = new_pending_event(); | ||
543 | int rc; | ||
544 | |||
545 | if (ev == NULL) { | ||
546 | rc = -ENOMEM; | ||
547 | } else { | ||
548 | ev->event.hp_lp_event.xSubtype = 4; | ||
549 | ev->event.hp_lp_event.xCorrelationToken = (u64)user_token; | ||
550 | ev->event.hp_lp_event.x.xSubtypeData = | ||
551 | subtype_data('M', 'F', 'M', 'A'); | ||
552 | ev->event.data.alloc.target_lp = target_lp; | ||
553 | ev->event.data.alloc.type = type; | ||
554 | ev->event.data.alloc.size = size; | ||
555 | ev->event.data.alloc.count = count; | ||
556 | ev->hdlr = hdlr; | ||
557 | rc = signal_event(ev); | ||
558 | } | ||
559 | if ((rc != 0) && (hdlr != NULL)) | ||
560 | (*hdlr)(user_token, rc); | ||
561 | } | ||
562 | EXPORT_SYMBOL(mf_allocate_lp_events); | ||
563 | |||
564 | /* | ||
565 | * Global kernel interface to unseed and deallocate events already in | ||
566 | * Hypervisor. | ||
567 | */ | ||
568 | void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type, | ||
569 | unsigned count, MFCompleteHandler hdlr, void *user_token) | ||
570 | { | ||
571 | struct pending_event *ev = new_pending_event(); | ||
572 | int rc; | ||
573 | |||
574 | if (ev == NULL) | ||
575 | rc = -ENOMEM; | ||
576 | else { | ||
577 | ev->event.hp_lp_event.xSubtype = 5; | ||
578 | ev->event.hp_lp_event.xCorrelationToken = (u64)user_token; | ||
579 | ev->event.hp_lp_event.x.xSubtypeData = | ||
580 | subtype_data('M', 'F', 'M', 'D'); | ||
581 | ev->event.data.alloc.target_lp = target_lp; | ||
582 | ev->event.data.alloc.type = type; | ||
583 | ev->event.data.alloc.count = count; | ||
584 | ev->hdlr = hdlr; | ||
585 | rc = signal_event(ev); | ||
586 | } | ||
587 | if ((rc != 0) && (hdlr != NULL)) | ||
588 | (*hdlr)(user_token, rc); | ||
589 | } | ||
590 | EXPORT_SYMBOL(mf_deallocate_lp_events); | ||
591 | |||
592 | /* | ||
593 | * Global kernel interface to tell the VSP object in the primary | ||
594 | * partition to power this partition off. | ||
595 | */ | ||
596 | void mf_power_off(void) | ||
597 | { | ||
598 | printk(KERN_INFO "mf.c: Down it goes...\n"); | ||
599 | signal_ce_msg_simple(0x4d, NULL); | ||
600 | for (;;) | ||
601 | ; | ||
602 | } | ||
603 | |||
604 | /* | ||
605 | * Global kernel interface to tell the VSP object in the primary | ||
606 | * partition to reboot this partition. | ||
607 | */ | ||
608 | void mf_reboot(void) | ||
609 | { | ||
610 | printk(KERN_INFO "mf.c: Preparing to bounce...\n"); | ||
611 | signal_ce_msg_simple(0x4e, NULL); | ||
612 | for (;;) | ||
613 | ; | ||
614 | } | ||
615 | |||
616 | /* | ||
617 | * Display a single word SRC onto the VSP control panel. | ||
618 | */ | ||
619 | void mf_display_src(u32 word) | ||
620 | { | ||
621 | u8 ce[12]; | ||
622 | |||
623 | memset(ce, 0, sizeof(ce)); | ||
624 | ce[3] = 0x4a; | ||
625 | ce[7] = 0x01; | ||
626 | ce[8] = word >> 24; | ||
627 | ce[9] = word >> 16; | ||
628 | ce[10] = word >> 8; | ||
629 | ce[11] = word; | ||
630 | signal_ce_msg(ce, NULL); | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * Display a single word SRC of the form "PROGXXXX" on the VSP control panel. | ||
635 | */ | ||
636 | void mf_display_progress(u16 value) | ||
637 | { | ||
638 | u8 ce[12]; | ||
639 | u8 src[72]; | ||
640 | |||
641 | memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12); | ||
642 | memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00" | ||
643 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
644 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
645 | "\x00\x00\x00\x00PROGxxxx ", | ||
646 | 72); | ||
647 | src[6] = value >> 8; | ||
648 | src[7] = value & 255; | ||
649 | src[44] = "0123456789ABCDEF"[(value >> 12) & 15]; | ||
650 | src[45] = "0123456789ABCDEF"[(value >> 8) & 15]; | ||
651 | src[46] = "0123456789ABCDEF"[(value >> 4) & 15]; | ||
652 | src[47] = "0123456789ABCDEF"[value & 15]; | ||
653 | dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024); | ||
654 | } | ||
655 | |||
656 | /* | ||
657 | * Clear the VSP control panel. Used to "erase" an SRC that was | ||
658 | * previously displayed. | ||
659 | */ | ||
660 | void mf_clear_src(void) | ||
661 | { | ||
662 | signal_ce_msg_simple(0x4b, NULL); | ||
663 | } | ||
664 | |||
665 | /* | ||
666 | * Initialization code here. | ||
667 | */ | ||
668 | void mf_init(void) | ||
669 | { | ||
670 | int i; | ||
671 | |||
672 | /* initialize */ | ||
673 | spin_lock_init(&pending_event_spinlock); | ||
674 | for (i = 0; | ||
675 | i < sizeof(pending_event_prealloc) / sizeof(*pending_event_prealloc); | ||
676 | ++i) | ||
677 | free_pending_event(&pending_event_prealloc[i]); | ||
678 | HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler); | ||
679 | |||
680 | /* virtual continue ack */ | ||
681 | signal_ce_msg_simple(0x57, NULL); | ||
682 | |||
683 | /* initialization complete */ | ||
684 | printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities " | ||
685 | "initialized\n"); | ||
686 | } | ||
687 | |||
688 | struct rtc_time_data { | ||
689 | struct completion com; | ||
690 | struct ce_msg_data ce_msg; | ||
691 | int rc; | ||
692 | }; | ||
693 | |||
694 | static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg) | ||
695 | { | ||
696 | struct rtc_time_data *rtc = token; | ||
697 | |||
698 | memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg)); | ||
699 | rtc->rc = 0; | ||
700 | complete(&rtc->com); | ||
701 | } | ||
702 | |||
703 | static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm) | ||
704 | { | ||
705 | tm->tm_wday = 0; | ||
706 | tm->tm_yday = 0; | ||
707 | tm->tm_isdst = 0; | ||
708 | if (rc) { | ||
709 | tm->tm_sec = 0; | ||
710 | tm->tm_min = 0; | ||
711 | tm->tm_hour = 0; | ||
712 | tm->tm_mday = 15; | ||
713 | tm->tm_mon = 5; | ||
714 | tm->tm_year = 52; | ||
715 | return rc; | ||
716 | } | ||
717 | |||
718 | if ((ce_msg[2] == 0xa9) || | ||
719 | (ce_msg[2] == 0xaf)) { | ||
720 | /* TOD clock is not set */ | ||
721 | tm->tm_sec = 1; | ||
722 | tm->tm_min = 1; | ||
723 | tm->tm_hour = 1; | ||
724 | tm->tm_mday = 10; | ||
725 | tm->tm_mon = 8; | ||
726 | tm->tm_year = 71; | ||
727 | mf_set_rtc(tm); | ||
728 | } | ||
729 | { | ||
730 | u8 year = ce_msg[5]; | ||
731 | u8 sec = ce_msg[6]; | ||
732 | u8 min = ce_msg[7]; | ||
733 | u8 hour = ce_msg[8]; | ||
734 | u8 day = ce_msg[10]; | ||
735 | u8 mon = ce_msg[11]; | ||
736 | |||
737 | BCD_TO_BIN(sec); | ||
738 | BCD_TO_BIN(min); | ||
739 | BCD_TO_BIN(hour); | ||
740 | BCD_TO_BIN(day); | ||
741 | BCD_TO_BIN(mon); | ||
742 | BCD_TO_BIN(year); | ||
743 | |||
744 | if (year <= 69) | ||
745 | year += 100; | ||
746 | |||
747 | tm->tm_sec = sec; | ||
748 | tm->tm_min = min; | ||
749 | tm->tm_hour = hour; | ||
750 | tm->tm_mday = day; | ||
751 | tm->tm_mon = mon; | ||
752 | tm->tm_year = year; | ||
753 | } | ||
754 | |||
755 | return 0; | ||
756 | } | ||
757 | |||
758 | int mf_get_rtc(struct rtc_time *tm) | ||
759 | { | ||
760 | struct ce_msg_comp_data ce_complete; | ||
761 | struct rtc_time_data rtc_data; | ||
762 | int rc; | ||
763 | |||
764 | memset(&ce_complete, 0, sizeof(ce_complete)); | ||
765 | memset(&rtc_data, 0, sizeof(rtc_data)); | ||
766 | init_completion(&rtc_data.com); | ||
767 | ce_complete.handler = &get_rtc_time_complete; | ||
768 | ce_complete.token = &rtc_data; | ||
769 | rc = signal_ce_msg_simple(0x40, &ce_complete); | ||
770 | if (rc) | ||
771 | return rc; | ||
772 | wait_for_completion(&rtc_data.com); | ||
773 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | ||
774 | } | ||
775 | |||
776 | struct boot_rtc_time_data { | ||
777 | int busy; | ||
778 | struct ce_msg_data ce_msg; | ||
779 | int rc; | ||
780 | }; | ||
781 | |||
782 | static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg) | ||
783 | { | ||
784 | struct boot_rtc_time_data *rtc = token; | ||
785 | |||
786 | memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg)); | ||
787 | rtc->rc = 0; | ||
788 | rtc->busy = 0; | ||
789 | } | ||
790 | |||
791 | int mf_get_boot_rtc(struct rtc_time *tm) | ||
792 | { | ||
793 | struct ce_msg_comp_data ce_complete; | ||
794 | struct boot_rtc_time_data rtc_data; | ||
795 | int rc; | ||
796 | |||
797 | memset(&ce_complete, 0, sizeof(ce_complete)); | ||
798 | memset(&rtc_data, 0, sizeof(rtc_data)); | ||
799 | rtc_data.busy = 1; | ||
800 | ce_complete.handler = &get_boot_rtc_time_complete; | ||
801 | ce_complete.token = &rtc_data; | ||
802 | rc = signal_ce_msg_simple(0x40, &ce_complete); | ||
803 | if (rc) | ||
804 | return rc; | ||
805 | /* We need to poll here as we are not yet taking interrupts */ | ||
806 | while (rtc_data.busy) { | ||
807 | if (hvlpevent_is_pending()) | ||
808 | process_hvlpevents(NULL); | ||
809 | } | ||
810 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | ||
811 | } | ||
812 | |||
813 | int mf_set_rtc(struct rtc_time *tm) | ||
814 | { | ||
815 | char ce_time[12]; | ||
816 | u8 day, mon, hour, min, sec, y1, y2; | ||
817 | unsigned year; | ||
818 | |||
819 | year = 1900 + tm->tm_year; | ||
820 | y1 = year / 100; | ||
821 | y2 = year % 100; | ||
822 | |||
823 | sec = tm->tm_sec; | ||
824 | min = tm->tm_min; | ||
825 | hour = tm->tm_hour; | ||
826 | day = tm->tm_mday; | ||
827 | mon = tm->tm_mon + 1; | ||
828 | |||
829 | BIN_TO_BCD(sec); | ||
830 | BIN_TO_BCD(min); | ||
831 | BIN_TO_BCD(hour); | ||
832 | BIN_TO_BCD(mon); | ||
833 | BIN_TO_BCD(day); | ||
834 | BIN_TO_BCD(y1); | ||
835 | BIN_TO_BCD(y2); | ||
836 | |||
837 | memset(ce_time, 0, sizeof(ce_time)); | ||
838 | ce_time[3] = 0x41; | ||
839 | ce_time[4] = y1; | ||
840 | ce_time[5] = y2; | ||
841 | ce_time[6] = sec; | ||
842 | ce_time[7] = min; | ||
843 | ce_time[8] = hour; | ||
844 | ce_time[10] = day; | ||
845 | ce_time[11] = mon; | ||
846 | |||
847 | return signal_ce_msg(ce_time, NULL); | ||
848 | } | ||
849 | |||
850 | #ifdef CONFIG_PROC_FS | ||
851 | |||
852 | static int proc_mf_dump_cmdline(char *page, char **start, off_t off, | ||
853 | int count, int *eof, void *data) | ||
854 | { | ||
855 | int len; | ||
856 | char *p; | ||
857 | struct vsp_cmd_data vsp_cmd; | ||
858 | int rc; | ||
859 | dma_addr_t dma_addr; | ||
860 | |||
861 | /* The HV appears to return no more than 256 bytes of command line */ | ||
862 | if (off >= 256) | ||
863 | return 0; | ||
864 | if ((off + count) > 256) | ||
865 | count = 256 - off; | ||
866 | |||
867 | dma_addr = dma_map_single(iSeries_vio_dev, page, off + count, | ||
868 | DMA_FROM_DEVICE); | ||
869 | if (dma_mapping_error(dma_addr)) | ||
870 | return -ENOMEM; | ||
871 | memset(page, 0, off + count); | ||
872 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
873 | vsp_cmd.cmd = 33; | ||
874 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
875 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
876 | vsp_cmd.sub_data.kern.side = (u64)data; | ||
877 | vsp_cmd.sub_data.kern.length = off + count; | ||
878 | mb(); | ||
879 | rc = signal_vsp_instruction(&vsp_cmd); | ||
880 | dma_unmap_single(iSeries_vio_dev, dma_addr, off + count, | ||
881 | DMA_FROM_DEVICE); | ||
882 | if (rc) | ||
883 | return rc; | ||
884 | if (vsp_cmd.result_code != 0) | ||
885 | return -ENOMEM; | ||
886 | p = page; | ||
887 | len = 0; | ||
888 | while (len < (off + count)) { | ||
889 | if ((*p == '\0') || (*p == '\n')) { | ||
890 | if (*p == '\0') | ||
891 | *p = '\n'; | ||
892 | p++; | ||
893 | len++; | ||
894 | *eof = 1; | ||
895 | break; | ||
896 | } | ||
897 | p++; | ||
898 | len++; | ||
899 | } | ||
900 | |||
901 | if (len < off) { | ||
902 | *eof = 1; | ||
903 | len = 0; | ||
904 | } | ||
905 | return len; | ||
906 | } | ||
907 | |||
908 | #if 0 | ||
909 | static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side) | ||
910 | { | ||
911 | struct vsp_cmd_data vsp_cmd; | ||
912 | int rc; | ||
913 | int len = *size; | ||
914 | dma_addr_t dma_addr; | ||
915 | |||
916 | dma_addr = dma_map_single(iSeries_vio_dev, buffer, len, | ||
917 | DMA_FROM_DEVICE); | ||
918 | memset(buffer, 0, len); | ||
919 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
920 | vsp_cmd.cmd = 32; | ||
921 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
922 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
923 | vsp_cmd.sub_data.kern.side = side; | ||
924 | vsp_cmd.sub_data.kern.offset = offset; | ||
925 | vsp_cmd.sub_data.kern.length = len; | ||
926 | mb(); | ||
927 | rc = signal_vsp_instruction(&vsp_cmd); | ||
928 | if (rc == 0) { | ||
929 | if (vsp_cmd.result_code == 0) | ||
930 | *size = vsp_cmd.sub_data.length_out; | ||
931 | else | ||
932 | rc = -ENOMEM; | ||
933 | } | ||
934 | |||
935 | dma_unmap_single(iSeries_vio_dev, dma_addr, len, DMA_FROM_DEVICE); | ||
936 | |||
937 | return rc; | ||
938 | } | ||
939 | |||
940 | static int proc_mf_dump_vmlinux(char *page, char **start, off_t off, | ||
941 | int count, int *eof, void *data) | ||
942 | { | ||
943 | int sizeToGet = count; | ||
944 | |||
945 | if (!capable(CAP_SYS_ADMIN)) | ||
946 | return -EACCES; | ||
947 | |||
948 | if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) { | ||
949 | if (sizeToGet != 0) { | ||
950 | *start = page + off; | ||
951 | return sizeToGet; | ||
952 | } | ||
953 | *eof = 1; | ||
954 | return 0; | ||
955 | } | ||
956 | *eof = 1; | ||
957 | return 0; | ||
958 | } | ||
959 | #endif | ||
960 | |||
961 | static int proc_mf_dump_side(char *page, char **start, off_t off, | ||
962 | int count, int *eof, void *data) | ||
963 | { | ||
964 | int len; | ||
965 | char mf_current_side = ' '; | ||
966 | struct vsp_cmd_data vsp_cmd; | ||
967 | |||
968 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
969 | vsp_cmd.cmd = 2; | ||
970 | vsp_cmd.sub_data.ipl_type = 0; | ||
971 | mb(); | ||
972 | |||
973 | if (signal_vsp_instruction(&vsp_cmd) == 0) { | ||
974 | if (vsp_cmd.result_code == 0) { | ||
975 | switch (vsp_cmd.sub_data.ipl_type) { | ||
976 | case 0: mf_current_side = 'A'; | ||
977 | break; | ||
978 | case 1: mf_current_side = 'B'; | ||
979 | break; | ||
980 | case 2: mf_current_side = 'C'; | ||
981 | break; | ||
982 | default: mf_current_side = 'D'; | ||
983 | break; | ||
984 | } | ||
985 | } | ||
986 | } | ||
987 | |||
988 | len = sprintf(page, "%c\n", mf_current_side); | ||
989 | |||
990 | if (len <= (off + count)) | ||
991 | *eof = 1; | ||
992 | *start = page + off; | ||
993 | len -= off; | ||
994 | if (len > count) | ||
995 | len = count; | ||
996 | if (len < 0) | ||
997 | len = 0; | ||
998 | return len; | ||
999 | } | ||
1000 | |||
1001 | static int proc_mf_change_side(struct file *file, const char __user *buffer, | ||
1002 | unsigned long count, void *data) | ||
1003 | { | ||
1004 | char side; | ||
1005 | u64 newSide; | ||
1006 | struct vsp_cmd_data vsp_cmd; | ||
1007 | |||
1008 | if (!capable(CAP_SYS_ADMIN)) | ||
1009 | return -EACCES; | ||
1010 | |||
1011 | if (count == 0) | ||
1012 | return 0; | ||
1013 | |||
1014 | if (get_user(side, buffer)) | ||
1015 | return -EFAULT; | ||
1016 | |||
1017 | switch (side) { | ||
1018 | case 'A': newSide = 0; | ||
1019 | break; | ||
1020 | case 'B': newSide = 1; | ||
1021 | break; | ||
1022 | case 'C': newSide = 2; | ||
1023 | break; | ||
1024 | case 'D': newSide = 3; | ||
1025 | break; | ||
1026 | default: | ||
1027 | printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n"); | ||
1028 | return -EINVAL; | ||
1029 | } | ||
1030 | |||
1031 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
1032 | vsp_cmd.sub_data.ipl_type = newSide; | ||
1033 | vsp_cmd.cmd = 10; | ||
1034 | |||
1035 | (void)signal_vsp_instruction(&vsp_cmd); | ||
1036 | |||
1037 | return count; | ||
1038 | } | ||
1039 | |||
1040 | #if 0 | ||
1041 | static void mf_getSrcHistory(char *buffer, int size) | ||
1042 | { | ||
1043 | struct IplTypeReturnStuff return_stuff; | ||
1044 | struct pending_event *ev = new_pending_event(); | ||
1045 | int rc = 0; | ||
1046 | char *pages[4]; | ||
1047 | |||
1048 | pages[0] = kmalloc(4096, GFP_ATOMIC); | ||
1049 | pages[1] = kmalloc(4096, GFP_ATOMIC); | ||
1050 | pages[2] = kmalloc(4096, GFP_ATOMIC); | ||
1051 | pages[3] = kmalloc(4096, GFP_ATOMIC); | ||
1052 | if ((ev == NULL) || (pages[0] == NULL) || (pages[1] == NULL) | ||
1053 | || (pages[2] == NULL) || (pages[3] == NULL)) | ||
1054 | return -ENOMEM; | ||
1055 | |||
1056 | return_stuff.xType = 0; | ||
1057 | return_stuff.xRc = 0; | ||
1058 | return_stuff.xDone = 0; | ||
1059 | ev->event.hp_lp_event.xSubtype = 6; | ||
1060 | ev->event.hp_lp_event.x.xSubtypeData = | ||
1061 | subtype_data('M', 'F', 'V', 'I'); | ||
1062 | ev->event.data.vsp_cmd.xEvent = &return_stuff; | ||
1063 | ev->event.data.vsp_cmd.cmd = 4; | ||
1064 | ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); | ||
1065 | ev->event.data.vsp_cmd.result_code = 0xFF; | ||
1066 | ev->event.data.vsp_cmd.reserved = 0; | ||
1067 | ev->event.data.vsp_cmd.sub_data.page[0] = ISERIES_HV_ADDR(pages[0]); | ||
1068 | ev->event.data.vsp_cmd.sub_data.page[1] = ISERIES_HV_ADDR(pages[1]); | ||
1069 | ev->event.data.vsp_cmd.sub_data.page[2] = ISERIES_HV_ADDR(pages[2]); | ||
1070 | ev->event.data.vsp_cmd.sub_data.page[3] = ISERIES_HV_ADDR(pages[3]); | ||
1071 | mb(); | ||
1072 | if (signal_event(ev) != 0) | ||
1073 | return; | ||
1074 | |||
1075 | while (return_stuff.xDone != 1) | ||
1076 | udelay(10); | ||
1077 | if (return_stuff.xRc == 0) | ||
1078 | memcpy(buffer, pages[0], size); | ||
1079 | kfree(pages[0]); | ||
1080 | kfree(pages[1]); | ||
1081 | kfree(pages[2]); | ||
1082 | kfree(pages[3]); | ||
1083 | } | ||
1084 | #endif | ||
1085 | |||
1086 | static int proc_mf_dump_src(char *page, char **start, off_t off, | ||
1087 | int count, int *eof, void *data) | ||
1088 | { | ||
1089 | #if 0 | ||
1090 | int len; | ||
1091 | |||
1092 | mf_getSrcHistory(page, count); | ||
1093 | len = count; | ||
1094 | len -= off; | ||
1095 | if (len < count) { | ||
1096 | *eof = 1; | ||
1097 | if (len <= 0) | ||
1098 | return 0; | ||
1099 | } else | ||
1100 | len = count; | ||
1101 | *start = page + off; | ||
1102 | return len; | ||
1103 | #else | ||
1104 | return 0; | ||
1105 | #endif | ||
1106 | } | ||
1107 | |||
1108 | static int proc_mf_change_src(struct file *file, const char __user *buffer, | ||
1109 | unsigned long count, void *data) | ||
1110 | { | ||
1111 | char stkbuf[10]; | ||
1112 | |||
1113 | if (!capable(CAP_SYS_ADMIN)) | ||
1114 | return -EACCES; | ||
1115 | |||
1116 | if ((count < 4) && (count != 1)) { | ||
1117 | printk(KERN_ERR "mf_proc: invalid src\n"); | ||
1118 | return -EINVAL; | ||
1119 | } | ||
1120 | |||
1121 | if (count > (sizeof(stkbuf) - 1)) | ||
1122 | count = sizeof(stkbuf) - 1; | ||
1123 | if (copy_from_user(stkbuf, buffer, count)) | ||
1124 | return -EFAULT; | ||
1125 | |||
1126 | if ((count == 1) && (*stkbuf == '\0')) | ||
1127 | mf_clear_src(); | ||
1128 | else | ||
1129 | mf_display_src(*(u32 *)stkbuf); | ||
1130 | |||
1131 | return count; | ||
1132 | } | ||
1133 | |||
1134 | static int proc_mf_change_cmdline(struct file *file, const char __user *buffer, | ||
1135 | unsigned long count, void *data) | ||
1136 | { | ||
1137 | struct vsp_cmd_data vsp_cmd; | ||
1138 | dma_addr_t dma_addr; | ||
1139 | char *page; | ||
1140 | int ret = -EACCES; | ||
1141 | |||
1142 | if (!capable(CAP_SYS_ADMIN)) | ||
1143 | goto out; | ||
1144 | |||
1145 | dma_addr = 0; | ||
1146 | page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr, | ||
1147 | GFP_ATOMIC); | ||
1148 | ret = -ENOMEM; | ||
1149 | if (page == NULL) | ||
1150 | goto out; | ||
1151 | |||
1152 | ret = -EFAULT; | ||
1153 | if (copy_from_user(page, buffer, count)) | ||
1154 | goto out_free; | ||
1155 | |||
1156 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
1157 | vsp_cmd.cmd = 31; | ||
1158 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
1159 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
1160 | vsp_cmd.sub_data.kern.side = (u64)data; | ||
1161 | vsp_cmd.sub_data.kern.length = count; | ||
1162 | mb(); | ||
1163 | (void)signal_vsp_instruction(&vsp_cmd); | ||
1164 | ret = count; | ||
1165 | |||
1166 | out_free: | ||
1167 | dma_free_coherent(iSeries_vio_dev, count, page, dma_addr); | ||
1168 | out: | ||
1169 | return ret; | ||
1170 | } | ||
1171 | |||
1172 | static ssize_t proc_mf_change_vmlinux(struct file *file, | ||
1173 | const char __user *buf, | ||
1174 | size_t count, loff_t *ppos) | ||
1175 | { | ||
1176 | struct proc_dir_entry *dp = PDE(file->f_dentry->d_inode); | ||
1177 | ssize_t rc; | ||
1178 | dma_addr_t dma_addr; | ||
1179 | char *page; | ||
1180 | struct vsp_cmd_data vsp_cmd; | ||
1181 | |||
1182 | rc = -EACCES; | ||
1183 | if (!capable(CAP_SYS_ADMIN)) | ||
1184 | goto out; | ||
1185 | |||
1186 | dma_addr = 0; | ||
1187 | page = dma_alloc_coherent(iSeries_vio_dev, count, &dma_addr, | ||
1188 | GFP_ATOMIC); | ||
1189 | rc = -ENOMEM; | ||
1190 | if (page == NULL) { | ||
1191 | printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n"); | ||
1192 | goto out; | ||
1193 | } | ||
1194 | rc = -EFAULT; | ||
1195 | if (copy_from_user(page, buf, count)) | ||
1196 | goto out_free; | ||
1197 | |||
1198 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
1199 | vsp_cmd.cmd = 30; | ||
1200 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
1201 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
1202 | vsp_cmd.sub_data.kern.side = (u64)dp->data; | ||
1203 | vsp_cmd.sub_data.kern.offset = *ppos; | ||
1204 | vsp_cmd.sub_data.kern.length = count; | ||
1205 | mb(); | ||
1206 | rc = signal_vsp_instruction(&vsp_cmd); | ||
1207 | if (rc) | ||
1208 | goto out_free; | ||
1209 | rc = -ENOMEM; | ||
1210 | if (vsp_cmd.result_code != 0) | ||
1211 | goto out_free; | ||
1212 | |||
1213 | *ppos += count; | ||
1214 | rc = count; | ||
1215 | out_free: | ||
1216 | dma_free_coherent(iSeries_vio_dev, count, page, dma_addr); | ||
1217 | out: | ||
1218 | return rc; | ||
1219 | } | ||
1220 | |||
1221 | static struct file_operations proc_vmlinux_operations = { | ||
1222 | .write = proc_mf_change_vmlinux, | ||
1223 | }; | ||
1224 | |||
1225 | static int __init mf_proc_init(void) | ||
1226 | { | ||
1227 | struct proc_dir_entry *mf_proc_root; | ||
1228 | struct proc_dir_entry *ent; | ||
1229 | struct proc_dir_entry *mf; | ||
1230 | char name[2]; | ||
1231 | int i; | ||
1232 | |||
1233 | mf_proc_root = proc_mkdir("iSeries/mf", NULL); | ||
1234 | if (!mf_proc_root) | ||
1235 | return 1; | ||
1236 | |||
1237 | name[1] = '\0'; | ||
1238 | for (i = 0; i < 4; i++) { | ||
1239 | name[0] = 'A' + i; | ||
1240 | mf = proc_mkdir(name, mf_proc_root); | ||
1241 | if (!mf) | ||
1242 | return 1; | ||
1243 | |||
1244 | ent = create_proc_entry("cmdline", S_IFREG|S_IRUSR|S_IWUSR, mf); | ||
1245 | if (!ent) | ||
1246 | return 1; | ||
1247 | ent->nlink = 1; | ||
1248 | ent->data = (void *)(long)i; | ||
1249 | ent->read_proc = proc_mf_dump_cmdline; | ||
1250 | ent->write_proc = proc_mf_change_cmdline; | ||
1251 | |||
1252 | if (i == 3) /* no vmlinux entry for 'D' */ | ||
1253 | continue; | ||
1254 | |||
1255 | ent = create_proc_entry("vmlinux", S_IFREG|S_IWUSR, mf); | ||
1256 | if (!ent) | ||
1257 | return 1; | ||
1258 | ent->nlink = 1; | ||
1259 | ent->data = (void *)(long)i; | ||
1260 | ent->proc_fops = &proc_vmlinux_operations; | ||
1261 | } | ||
1262 | |||
1263 | ent = create_proc_entry("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); | ||
1264 | if (!ent) | ||
1265 | return 1; | ||
1266 | ent->nlink = 1; | ||
1267 | ent->data = (void *)0; | ||
1268 | ent->read_proc = proc_mf_dump_side; | ||
1269 | ent->write_proc = proc_mf_change_side; | ||
1270 | |||
1271 | ent = create_proc_entry("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root); | ||
1272 | if (!ent) | ||
1273 | return 1; | ||
1274 | ent->nlink = 1; | ||
1275 | ent->data = (void *)0; | ||
1276 | ent->read_proc = proc_mf_dump_src; | ||
1277 | ent->write_proc = proc_mf_change_src; | ||
1278 | |||
1279 | return 0; | ||
1280 | } | ||
1281 | |||
1282 | __initcall(mf_proc_init); | ||
1283 | |||
1284 | #endif /* CONFIG_PROC_FS */ | ||
1285 | |||
1286 | /* | ||
1287 | * Get the RTC from the virtual service processor | ||
1288 | * This requires flowing LpEvents to the primary partition | ||
1289 | */ | ||
1290 | void iSeries_get_rtc_time(struct rtc_time *rtc_tm) | ||
1291 | { | ||
1292 | if (piranha_simulator) | ||
1293 | return; | ||
1294 | |||
1295 | mf_get_rtc(rtc_tm); | ||
1296 | rtc_tm->tm_mon--; | ||
1297 | } | ||
1298 | |||
1299 | /* | ||
1300 | * Set the RTC in the virtual service processor | ||
1301 | * This requires flowing LpEvents to the primary partition | ||
1302 | */ | ||
1303 | int iSeries_set_rtc_time(struct rtc_time *tm) | ||
1304 | { | ||
1305 | mf_set_rtc(tm); | ||
1306 | return 0; | ||
1307 | } | ||
1308 | |||
1309 | void iSeries_get_boot_time(struct rtc_time *tm) | ||
1310 | { | ||
1311 | if (piranha_simulator) | ||
1312 | return; | ||
1313 | |||
1314 | mf_get_boot_rtc(tm); | ||
1315 | tm->tm_mon -= 1; | ||
1316 | } | ||
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c new file mode 100644 index 000000000000..ad78c8581a5a --- /dev/null +++ b/arch/powerpc/platforms/iseries/setup.c | |||
@@ -0,0 +1,1006 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> | ||
3 | * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> | ||
4 | * | ||
5 | * Description: | ||
6 | * Architecture- / platform-specific boot-time initialization code for | ||
7 | * the IBM iSeries LPAR. Adapted from original code by Grant Erickson and | ||
8 | * code by Gary Thomas, Cort Dougan <cort@fsmlabs.com>, and Dan Malek | ||
9 | * <dan@net4x.com>. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #undef DEBUG | ||
18 | |||
19 | #include <linux/config.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/threads.h> | ||
22 | #include <linux/smp.h> | ||
23 | #include <linux/param.h> | ||
24 | #include <linux/string.h> | ||
25 | #include <linux/initrd.h> | ||
26 | #include <linux/seq_file.h> | ||
27 | #include <linux/kdev_t.h> | ||
28 | #include <linux/major.h> | ||
29 | #include <linux/root_dev.h> | ||
30 | |||
31 | #include <asm/processor.h> | ||
32 | #include <asm/machdep.h> | ||
33 | #include <asm/page.h> | ||
34 | #include <asm/mmu.h> | ||
35 | #include <asm/pgtable.h> | ||
36 | #include <asm/mmu_context.h> | ||
37 | #include <asm/cputable.h> | ||
38 | #include <asm/sections.h> | ||
39 | #include <asm/iommu.h> | ||
40 | #include <asm/firmware.h> | ||
41 | |||
42 | #include <asm/time.h> | ||
43 | #include <asm/naca.h> | ||
44 | #include <asm/paca.h> | ||
45 | #include <asm/cache.h> | ||
46 | #include <asm/sections.h> | ||
47 | #include <asm/abs_addr.h> | ||
48 | #include <asm/iSeries/HvCallHpt.h> | ||
49 | #include <asm/iSeries/HvLpConfig.h> | ||
50 | #include <asm/iSeries/HvCallEvent.h> | ||
51 | #include <asm/iSeries/HvCallSm.h> | ||
52 | #include <asm/iSeries/HvCallXm.h> | ||
53 | #include <asm/iSeries/ItLpQueue.h> | ||
54 | #include <asm/iSeries/IoHriMainStore.h> | ||
55 | #include <asm/iSeries/mf.h> | ||
56 | #include <asm/iSeries/HvLpEvent.h> | ||
57 | #include <asm/iSeries/iSeries_irq.h> | ||
58 | #include <asm/iSeries/IoHriProcessorVpd.h> | ||
59 | #include <asm/iSeries/ItVpdAreas.h> | ||
60 | #include <asm/iSeries/LparMap.h> | ||
61 | |||
62 | #include "setup.h" | ||
63 | |||
64 | extern void hvlog(char *fmt, ...); | ||
65 | |||
66 | #ifdef DEBUG | ||
67 | #define DBG(fmt...) hvlog(fmt) | ||
68 | #else | ||
69 | #define DBG(fmt...) | ||
70 | #endif | ||
71 | |||
72 | /* Function Prototypes */ | ||
73 | extern void ppcdbg_initialize(void); | ||
74 | |||
75 | static void build_iSeries_Memory_Map(void); | ||
76 | static int iseries_shared_idle(void); | ||
77 | static int iseries_dedicated_idle(void); | ||
78 | #ifdef CONFIG_PCI | ||
79 | extern void iSeries_pci_final_fixup(void); | ||
80 | #else | ||
81 | static void iSeries_pci_final_fixup(void) { } | ||
82 | #endif | ||
83 | |||
84 | /* Global Variables */ | ||
85 | int piranha_simulator; | ||
86 | |||
87 | extern int rd_size; /* Defined in drivers/block/rd.c */ | ||
88 | extern unsigned long klimit; | ||
89 | extern unsigned long embedded_sysmap_start; | ||
90 | extern unsigned long embedded_sysmap_end; | ||
91 | |||
92 | extern unsigned long iSeries_recal_tb; | ||
93 | extern unsigned long iSeries_recal_titan; | ||
94 | |||
95 | static int mf_initialized; | ||
96 | |||
97 | struct MemoryBlock { | ||
98 | unsigned long absStart; | ||
99 | unsigned long absEnd; | ||
100 | unsigned long logicalStart; | ||
101 | unsigned long logicalEnd; | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * Process the main store vpd to determine where the holes in memory are | ||
106 | * and return the number of physical blocks and fill in the array of | ||
107 | * block data. | ||
108 | */ | ||
109 | static unsigned long iSeries_process_Condor_mainstore_vpd( | ||
110 | struct MemoryBlock *mb_array, unsigned long max_entries) | ||
111 | { | ||
112 | unsigned long holeFirstChunk, holeSizeChunks; | ||
113 | unsigned long numMemoryBlocks = 1; | ||
114 | struct IoHriMainStoreSegment4 *msVpd = | ||
115 | (struct IoHriMainStoreSegment4 *)xMsVpd; | ||
116 | unsigned long holeStart = msVpd->nonInterleavedBlocksStartAdr; | ||
117 | unsigned long holeEnd = msVpd->nonInterleavedBlocksEndAdr; | ||
118 | unsigned long holeSize = holeEnd - holeStart; | ||
119 | |||
120 | printk("Mainstore_VPD: Condor\n"); | ||
121 | /* | ||
122 | * Determine if absolute memory has any | ||
123 | * holes so that we can interpret the | ||
124 | * access map we get back from the hypervisor | ||
125 | * correctly. | ||
126 | */ | ||
127 | mb_array[0].logicalStart = 0; | ||
128 | mb_array[0].logicalEnd = 0x100000000; | ||
129 | mb_array[0].absStart = 0; | ||
130 | mb_array[0].absEnd = 0x100000000; | ||
131 | |||
132 | if (holeSize) { | ||
133 | numMemoryBlocks = 2; | ||
134 | holeStart = holeStart & 0x000fffffffffffff; | ||
135 | holeStart = addr_to_chunk(holeStart); | ||
136 | holeFirstChunk = holeStart; | ||
137 | holeSize = addr_to_chunk(holeSize); | ||
138 | holeSizeChunks = holeSize; | ||
139 | printk( "Main store hole: start chunk = %0lx, size = %0lx chunks\n", | ||
140 | holeFirstChunk, holeSizeChunks ); | ||
141 | mb_array[0].logicalEnd = holeFirstChunk; | ||
142 | mb_array[0].absEnd = holeFirstChunk; | ||
143 | mb_array[1].logicalStart = holeFirstChunk; | ||
144 | mb_array[1].logicalEnd = 0x100000000 - holeSizeChunks; | ||
145 | mb_array[1].absStart = holeFirstChunk + holeSizeChunks; | ||
146 | mb_array[1].absEnd = 0x100000000; | ||
147 | } | ||
148 | return numMemoryBlocks; | ||
149 | } | ||
150 | |||
151 | #define MaxSegmentAreas 32 | ||
152 | #define MaxSegmentAdrRangeBlocks 128 | ||
153 | #define MaxAreaRangeBlocks 4 | ||
154 | |||
155 | static unsigned long iSeries_process_Regatta_mainstore_vpd( | ||
156 | struct MemoryBlock *mb_array, unsigned long max_entries) | ||
157 | { | ||
158 | struct IoHriMainStoreSegment5 *msVpdP = | ||
159 | (struct IoHriMainStoreSegment5 *)xMsVpd; | ||
160 | unsigned long numSegmentBlocks = 0; | ||
161 | u32 existsBits = msVpdP->msAreaExists; | ||
162 | unsigned long area_num; | ||
163 | |||
164 | printk("Mainstore_VPD: Regatta\n"); | ||
165 | |||
166 | for (area_num = 0; area_num < MaxSegmentAreas; ++area_num ) { | ||
167 | unsigned long numAreaBlocks; | ||
168 | struct IoHriMainStoreArea4 *currentArea; | ||
169 | |||
170 | if (existsBits & 0x80000000) { | ||
171 | unsigned long block_num; | ||
172 | |||
173 | currentArea = &msVpdP->msAreaArray[area_num]; | ||
174 | numAreaBlocks = currentArea->numAdrRangeBlocks; | ||
175 | printk("ms_vpd: processing area %2ld blocks=%ld", | ||
176 | area_num, numAreaBlocks); | ||
177 | for (block_num = 0; block_num < numAreaBlocks; | ||
178 | ++block_num ) { | ||
179 | /* Process an address range block */ | ||
180 | struct MemoryBlock tempBlock; | ||
181 | unsigned long i; | ||
182 | |||
183 | tempBlock.absStart = | ||
184 | (unsigned long)currentArea->xAdrRangeBlock[block_num].blockStart; | ||
185 | tempBlock.absEnd = | ||
186 | (unsigned long)currentArea->xAdrRangeBlock[block_num].blockEnd; | ||
187 | tempBlock.logicalStart = 0; | ||
188 | tempBlock.logicalEnd = 0; | ||
189 | printk("\n block %ld absStart=%016lx absEnd=%016lx", | ||
190 | block_num, tempBlock.absStart, | ||
191 | tempBlock.absEnd); | ||
192 | |||
193 | for (i = 0; i < numSegmentBlocks; ++i) { | ||
194 | if (mb_array[i].absStart == | ||
195 | tempBlock.absStart) | ||
196 | break; | ||
197 | } | ||
198 | if (i == numSegmentBlocks) { | ||
199 | if (numSegmentBlocks == max_entries) | ||
200 | panic("iSeries_process_mainstore_vpd: too many memory blocks"); | ||
201 | mb_array[numSegmentBlocks] = tempBlock; | ||
202 | ++numSegmentBlocks; | ||
203 | } else | ||
204 | printk(" (duplicate)"); | ||
205 | } | ||
206 | printk("\n"); | ||
207 | } | ||
208 | existsBits <<= 1; | ||
209 | } | ||
210 | /* Now sort the blocks found into ascending sequence */ | ||
211 | if (numSegmentBlocks > 1) { | ||
212 | unsigned long m, n; | ||
213 | |||
214 | for (m = 0; m < numSegmentBlocks - 1; ++m) { | ||
215 | for (n = numSegmentBlocks - 1; m < n; --n) { | ||
216 | if (mb_array[n].absStart < | ||
217 | mb_array[n-1].absStart) { | ||
218 | struct MemoryBlock tempBlock; | ||
219 | |||
220 | tempBlock = mb_array[n]; | ||
221 | mb_array[n] = mb_array[n-1]; | ||
222 | mb_array[n-1] = tempBlock; | ||
223 | } | ||
224 | } | ||
225 | } | ||
226 | } | ||
227 | /* | ||
228 | * Assign "logical" addresses to each block. These | ||
229 | * addresses correspond to the hypervisor "bitmap" space. | ||
230 | * Convert all addresses into units of 256K chunks. | ||
231 | */ | ||
232 | { | ||
233 | unsigned long i, nextBitmapAddress; | ||
234 | |||
235 | printk("ms_vpd: %ld sorted memory blocks\n", numSegmentBlocks); | ||
236 | nextBitmapAddress = 0; | ||
237 | for (i = 0; i < numSegmentBlocks; ++i) { | ||
238 | unsigned long length = mb_array[i].absEnd - | ||
239 | mb_array[i].absStart; | ||
240 | |||
241 | mb_array[i].logicalStart = nextBitmapAddress; | ||
242 | mb_array[i].logicalEnd = nextBitmapAddress + length; | ||
243 | nextBitmapAddress += length; | ||
244 | printk(" Bitmap range: %016lx - %016lx\n" | ||
245 | " Absolute range: %016lx - %016lx\n", | ||
246 | mb_array[i].logicalStart, | ||
247 | mb_array[i].logicalEnd, | ||
248 | mb_array[i].absStart, mb_array[i].absEnd); | ||
249 | mb_array[i].absStart = addr_to_chunk(mb_array[i].absStart & | ||
250 | 0x000fffffffffffff); | ||
251 | mb_array[i].absEnd = addr_to_chunk(mb_array[i].absEnd & | ||
252 | 0x000fffffffffffff); | ||
253 | mb_array[i].logicalStart = | ||
254 | addr_to_chunk(mb_array[i].logicalStart); | ||
255 | mb_array[i].logicalEnd = addr_to_chunk(mb_array[i].logicalEnd); | ||
256 | } | ||
257 | } | ||
258 | |||
259 | return numSegmentBlocks; | ||
260 | } | ||
261 | |||
262 | static unsigned long iSeries_process_mainstore_vpd(struct MemoryBlock *mb_array, | ||
263 | unsigned long max_entries) | ||
264 | { | ||
265 | unsigned long i; | ||
266 | unsigned long mem_blocks = 0; | ||
267 | |||
268 | if (cpu_has_feature(CPU_FTR_SLB)) | ||
269 | mem_blocks = iSeries_process_Regatta_mainstore_vpd(mb_array, | ||
270 | max_entries); | ||
271 | else | ||
272 | mem_blocks = iSeries_process_Condor_mainstore_vpd(mb_array, | ||
273 | max_entries); | ||
274 | |||
275 | printk("Mainstore_VPD: numMemoryBlocks = %ld \n", mem_blocks); | ||
276 | for (i = 0; i < mem_blocks; ++i) { | ||
277 | printk("Mainstore_VPD: block %3ld logical chunks %016lx - %016lx\n" | ||
278 | " abs chunks %016lx - %016lx\n", | ||
279 | i, mb_array[i].logicalStart, mb_array[i].logicalEnd, | ||
280 | mb_array[i].absStart, mb_array[i].absEnd); | ||
281 | } | ||
282 | return mem_blocks; | ||
283 | } | ||
284 | |||
285 | static void __init iSeries_get_cmdline(void) | ||
286 | { | ||
287 | char *p, *q; | ||
288 | |||
289 | /* copy the command line parameter from the primary VSP */ | ||
290 | HvCallEvent_dmaToSp(cmd_line, 2 * 64* 1024, 256, | ||
291 | HvLpDma_Direction_RemoteToLocal); | ||
292 | |||
293 | p = cmd_line; | ||
294 | q = cmd_line + 255; | ||
295 | while(p < q) { | ||
296 | if (!*p || *p == '\n') | ||
297 | break; | ||
298 | ++p; | ||
299 | } | ||
300 | *p = 0; | ||
301 | } | ||
302 | |||
303 | static void __init iSeries_init_early(void) | ||
304 | { | ||
305 | extern unsigned long memory_limit; | ||
306 | |||
307 | DBG(" -> iSeries_init_early()\n"); | ||
308 | |||
309 | ppc64_firmware_features = FW_FEATURE_ISERIES; | ||
310 | |||
311 | ppcdbg_initialize(); | ||
312 | |||
313 | ppc64_interrupt_controller = IC_ISERIES; | ||
314 | |||
315 | #if defined(CONFIG_BLK_DEV_INITRD) | ||
316 | /* | ||
317 | * If the init RAM disk has been configured and there is | ||
318 | * a non-zero starting address for it, set it up | ||
319 | */ | ||
320 | if (naca.xRamDisk) { | ||
321 | initrd_start = (unsigned long)__va(naca.xRamDisk); | ||
322 | initrd_end = initrd_start + naca.xRamDiskSize * PAGE_SIZE; | ||
323 | initrd_below_start_ok = 1; // ramdisk in kernel space | ||
324 | ROOT_DEV = Root_RAM0; | ||
325 | if (((rd_size * 1024) / PAGE_SIZE) < naca.xRamDiskSize) | ||
326 | rd_size = (naca.xRamDiskSize * PAGE_SIZE) / 1024; | ||
327 | } else | ||
328 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
329 | { | ||
330 | /* ROOT_DEV = MKDEV(VIODASD_MAJOR, 1); */ | ||
331 | } | ||
332 | |||
333 | iSeries_recal_tb = get_tb(); | ||
334 | iSeries_recal_titan = HvCallXm_loadTod(); | ||
335 | |||
336 | /* | ||
337 | * Initialize the hash table management pointers | ||
338 | */ | ||
339 | hpte_init_iSeries(); | ||
340 | |||
341 | /* | ||
342 | * Initialize the DMA/TCE management | ||
343 | */ | ||
344 | iommu_init_early_iSeries(); | ||
345 | |||
346 | iSeries_get_cmdline(); | ||
347 | |||
348 | /* Save unparsed command line copy for /proc/cmdline */ | ||
349 | strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE); | ||
350 | |||
351 | /* Parse early parameters, in particular mem=x */ | ||
352 | parse_early_param(); | ||
353 | |||
354 | if (memory_limit) { | ||
355 | if (memory_limit < systemcfg->physicalMemorySize) | ||
356 | systemcfg->physicalMemorySize = memory_limit; | ||
357 | else { | ||
358 | printk("Ignoring mem=%lu >= ram_top.\n", memory_limit); | ||
359 | memory_limit = 0; | ||
360 | } | ||
361 | } | ||
362 | |||
363 | /* Initialize machine-dependency vectors */ | ||
364 | #ifdef CONFIG_SMP | ||
365 | smp_init_iSeries(); | ||
366 | #endif | ||
367 | if (itLpNaca.xPirEnvironMode == 0) | ||
368 | piranha_simulator = 1; | ||
369 | |||
370 | /* Associate Lp Event Queue 0 with processor 0 */ | ||
371 | HvCallEvent_setLpEventQueueInterruptProc(0, 0); | ||
372 | |||
373 | mf_init(); | ||
374 | mf_initialized = 1; | ||
375 | mb(); | ||
376 | |||
377 | /* If we were passed an initrd, set the ROOT_DEV properly if the values | ||
378 | * look sensible. If not, clear initrd reference. | ||
379 | */ | ||
380 | #ifdef CONFIG_BLK_DEV_INITRD | ||
381 | if (initrd_start >= KERNELBASE && initrd_end >= KERNELBASE && | ||
382 | initrd_end > initrd_start) | ||
383 | ROOT_DEV = Root_RAM0; | ||
384 | else | ||
385 | initrd_start = initrd_end = 0; | ||
386 | #endif /* CONFIG_BLK_DEV_INITRD */ | ||
387 | |||
388 | DBG(" <- iSeries_init_early()\n"); | ||
389 | } | ||
390 | |||
391 | struct mschunks_map mschunks_map = { | ||
392 | /* XXX We don't use these, but Piranha might need them. */ | ||
393 | .chunk_size = MSCHUNKS_CHUNK_SIZE, | ||
394 | .chunk_shift = MSCHUNKS_CHUNK_SHIFT, | ||
395 | .chunk_mask = MSCHUNKS_OFFSET_MASK, | ||
396 | }; | ||
397 | EXPORT_SYMBOL(mschunks_map); | ||
398 | |||
399 | void mschunks_alloc(unsigned long num_chunks) | ||
400 | { | ||
401 | klimit = _ALIGN(klimit, sizeof(u32)); | ||
402 | mschunks_map.mapping = (u32 *)klimit; | ||
403 | klimit += num_chunks * sizeof(u32); | ||
404 | mschunks_map.num_chunks = num_chunks; | ||
405 | } | ||
406 | |||
407 | /* | ||
408 | * The iSeries may have very large memories ( > 128 GB ) and a partition | ||
409 | * may get memory in "chunks" that may be anywhere in the 2**52 real | ||
410 | * address space. The chunks are 256K in size. To map this to the | ||
411 | * memory model Linux expects, the AS/400 specific code builds a | ||
412 | * translation table to translate what Linux thinks are "physical" | ||
413 | * addresses to the actual real addresses. This allows us to make | ||
414 | * it appear to Linux that we have contiguous memory starting at | ||
415 | * physical address zero while in fact this could be far from the truth. | ||
416 | * To avoid confusion, I'll let the words physical and/or real address | ||
417 | * apply to the Linux addresses while I'll use "absolute address" to | ||
418 | * refer to the actual hardware real address. | ||
419 | * | ||
420 | * build_iSeries_Memory_Map gets information from the Hypervisor and | ||
421 | * looks at the Main Store VPD to determine the absolute addresses | ||
422 | * of the memory that has been assigned to our partition and builds | ||
423 | * a table used to translate Linux's physical addresses to these | ||
424 | * absolute addresses. Absolute addresses are needed when | ||
425 | * communicating with the hypervisor (e.g. to build HPT entries) | ||
426 | */ | ||
427 | |||
428 | static void __init build_iSeries_Memory_Map(void) | ||
429 | { | ||
430 | u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize; | ||
431 | u32 nextPhysChunk; | ||
432 | u32 hptFirstChunk, hptLastChunk, hptSizeChunks, hptSizePages; | ||
433 | u32 num_ptegs; | ||
434 | u32 totalChunks,moreChunks; | ||
435 | u32 currChunk, thisChunk, absChunk; | ||
436 | u32 currDword; | ||
437 | u32 chunkBit; | ||
438 | u64 map; | ||
439 | struct MemoryBlock mb[32]; | ||
440 | unsigned long numMemoryBlocks, curBlock; | ||
441 | |||
442 | /* Chunk size on iSeries is 256K bytes */ | ||
443 | totalChunks = (u32)HvLpConfig_getMsChunks(); | ||
444 | mschunks_alloc(totalChunks); | ||
445 | |||
446 | /* | ||
447 | * Get absolute address of our load area | ||
448 | * and map it to physical address 0 | ||
449 | * This guarantees that the loadarea ends up at physical 0 | ||
450 | * otherwise, it might not be returned by PLIC as the first | ||
451 | * chunks | ||
452 | */ | ||
453 | |||
454 | loadAreaFirstChunk = (u32)addr_to_chunk(itLpNaca.xLoadAreaAddr); | ||
455 | loadAreaSize = itLpNaca.xLoadAreaChunks; | ||
456 | |||
457 | /* | ||
458 | * Only add the pages already mapped here. | ||
459 | * Otherwise we might add the hpt pages | ||
460 | * The rest of the pages of the load area | ||
461 | * aren't in the HPT yet and can still | ||
462 | * be assigned an arbitrary physical address | ||
463 | */ | ||
464 | if ((loadAreaSize * 64) > HvPagesToMap) | ||
465 | loadAreaSize = HvPagesToMap / 64; | ||
466 | |||
467 | loadAreaLastChunk = loadAreaFirstChunk + loadAreaSize - 1; | ||
468 | |||
469 | /* | ||
470 | * TODO Do we need to do something if the HPT is in the 64MB load area? | ||
471 | * This would be required if the itLpNaca.xLoadAreaChunks includes | ||
472 | * the HPT size | ||
473 | */ | ||
474 | |||
475 | printk("Mapping load area - physical addr = 0000000000000000\n" | ||
476 | " absolute addr = %016lx\n", | ||
477 | chunk_to_addr(loadAreaFirstChunk)); | ||
478 | printk("Load area size %dK\n", loadAreaSize * 256); | ||
479 | |||
480 | for (nextPhysChunk = 0; nextPhysChunk < loadAreaSize; ++nextPhysChunk) | ||
481 | mschunks_map.mapping[nextPhysChunk] = | ||
482 | loadAreaFirstChunk + nextPhysChunk; | ||
483 | |||
484 | /* | ||
485 | * Get absolute address of our HPT and remember it so | ||
486 | * we won't map it to any physical address | ||
487 | */ | ||
488 | hptFirstChunk = (u32)addr_to_chunk(HvCallHpt_getHptAddress()); | ||
489 | hptSizePages = (u32)HvCallHpt_getHptPages(); | ||
490 | hptSizeChunks = hptSizePages >> (MSCHUNKS_CHUNK_SHIFT - PAGE_SHIFT); | ||
491 | hptLastChunk = hptFirstChunk + hptSizeChunks - 1; | ||
492 | |||
493 | printk("HPT absolute addr = %016lx, size = %dK\n", | ||
494 | chunk_to_addr(hptFirstChunk), hptSizeChunks * 256); | ||
495 | |||
496 | /* Fill in the hashed page table hash mask */ | ||
497 | num_ptegs = hptSizePages * | ||
498 | (PAGE_SIZE / (sizeof(hpte_t) * HPTES_PER_GROUP)); | ||
499 | htab_hash_mask = num_ptegs - 1; | ||
500 | |||
501 | /* | ||
502 | * The actual hashed page table is in the hypervisor, | ||
503 | * we have no direct access | ||
504 | */ | ||
505 | htab_address = NULL; | ||
506 | |||
507 | /* | ||
508 | * Determine if absolute memory has any | ||
509 | * holes so that we can interpret the | ||
510 | * access map we get back from the hypervisor | ||
511 | * correctly. | ||
512 | */ | ||
513 | numMemoryBlocks = iSeries_process_mainstore_vpd(mb, 32); | ||
514 | |||
515 | /* | ||
516 | * Process the main store access map from the hypervisor | ||
517 | * to build up our physical -> absolute translation table | ||
518 | */ | ||
519 | curBlock = 0; | ||
520 | currChunk = 0; | ||
521 | currDword = 0; | ||
522 | moreChunks = totalChunks; | ||
523 | |||
524 | while (moreChunks) { | ||
525 | map = HvCallSm_get64BitsOfAccessMap(itLpNaca.xLpIndex, | ||
526 | currDword); | ||
527 | thisChunk = currChunk; | ||
528 | while (map) { | ||
529 | chunkBit = map >> 63; | ||
530 | map <<= 1; | ||
531 | if (chunkBit) { | ||
532 | --moreChunks; | ||
533 | while (thisChunk >= mb[curBlock].logicalEnd) { | ||
534 | ++curBlock; | ||
535 | if (curBlock >= numMemoryBlocks) | ||
536 | panic("out of memory blocks"); | ||
537 | } | ||
538 | if (thisChunk < mb[curBlock].logicalStart) | ||
539 | panic("memory block error"); | ||
540 | |||
541 | absChunk = mb[curBlock].absStart + | ||
542 | (thisChunk - mb[curBlock].logicalStart); | ||
543 | if (((absChunk < hptFirstChunk) || | ||
544 | (absChunk > hptLastChunk)) && | ||
545 | ((absChunk < loadAreaFirstChunk) || | ||
546 | (absChunk > loadAreaLastChunk))) { | ||
547 | mschunks_map.mapping[nextPhysChunk] = | ||
548 | absChunk; | ||
549 | ++nextPhysChunk; | ||
550 | } | ||
551 | } | ||
552 | ++thisChunk; | ||
553 | } | ||
554 | ++currDword; | ||
555 | currChunk += 64; | ||
556 | } | ||
557 | |||
558 | /* | ||
559 | * main store size (in chunks) is | ||
560 | * totalChunks - hptSizeChunks | ||
561 | * which should be equal to | ||
562 | * nextPhysChunk | ||
563 | */ | ||
564 | systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk); | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * Document me. | ||
569 | */ | ||
570 | static void __init iSeries_setup_arch(void) | ||
571 | { | ||
572 | unsigned procIx = get_paca()->lppaca.dyn_hv_phys_proc_index; | ||
573 | |||
574 | if (get_paca()->lppaca.shared_proc) { | ||
575 | ppc_md.idle_loop = iseries_shared_idle; | ||
576 | printk(KERN_INFO "Using shared processor idle loop\n"); | ||
577 | } else { | ||
578 | ppc_md.idle_loop = iseries_dedicated_idle; | ||
579 | printk(KERN_INFO "Using dedicated idle loop\n"); | ||
580 | } | ||
581 | |||
582 | /* Setup the Lp Event Queue */ | ||
583 | setup_hvlpevent_queue(); | ||
584 | |||
585 | printk("Max logical processors = %d\n", | ||
586 | itVpdAreas.xSlicMaxLogicalProcs); | ||
587 | printk("Max physical processors = %d\n", | ||
588 | itVpdAreas.xSlicMaxPhysicalProcs); | ||
589 | |||
590 | systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR; | ||
591 | printk("Processor version = %x\n", systemcfg->processor); | ||
592 | } | ||
593 | |||
594 | static void iSeries_get_cpuinfo(struct seq_file *m) | ||
595 | { | ||
596 | seq_printf(m, "machine\t\t: 64-bit iSeries Logical Partition\n"); | ||
597 | } | ||
598 | |||
599 | /* | ||
600 | * Document me. | ||
601 | * and Implement me. | ||
602 | */ | ||
603 | static int iSeries_get_irq(struct pt_regs *regs) | ||
604 | { | ||
605 | /* -2 means ignore this interrupt */ | ||
606 | return -2; | ||
607 | } | ||
608 | |||
609 | /* | ||
610 | * Document me. | ||
611 | */ | ||
612 | static void iSeries_restart(char *cmd) | ||
613 | { | ||
614 | mf_reboot(); | ||
615 | } | ||
616 | |||
617 | /* | ||
618 | * Document me. | ||
619 | */ | ||
620 | static void iSeries_power_off(void) | ||
621 | { | ||
622 | mf_power_off(); | ||
623 | } | ||
624 | |||
625 | /* | ||
626 | * Document me. | ||
627 | */ | ||
628 | static void iSeries_halt(void) | ||
629 | { | ||
630 | mf_power_off(); | ||
631 | } | ||
632 | |||
633 | static void __init iSeries_progress(char * st, unsigned short code) | ||
634 | { | ||
635 | printk("Progress: [%04x] - %s\n", (unsigned)code, st); | ||
636 | if (!piranha_simulator && mf_initialized) { | ||
637 | if (code != 0xffff) | ||
638 | mf_display_progress(code); | ||
639 | else | ||
640 | mf_clear_src(); | ||
641 | } | ||
642 | } | ||
643 | |||
644 | static void __init iSeries_fixup_klimit(void) | ||
645 | { | ||
646 | /* | ||
647 | * Change klimit to take into account any ram disk | ||
648 | * that may be included | ||
649 | */ | ||
650 | if (naca.xRamDisk) | ||
651 | klimit = KERNELBASE + (u64)naca.xRamDisk + | ||
652 | (naca.xRamDiskSize * PAGE_SIZE); | ||
653 | else { | ||
654 | /* | ||
655 | * No ram disk was included - check and see if there | ||
656 | * was an embedded system map. Change klimit to take | ||
657 | * into account any embedded system map | ||
658 | */ | ||
659 | if (embedded_sysmap_end) | ||
660 | klimit = KERNELBASE + ((embedded_sysmap_end + 4095) & | ||
661 | 0xfffffffffffff000); | ||
662 | } | ||
663 | } | ||
664 | |||
665 | static int __init iSeries_src_init(void) | ||
666 | { | ||
667 | /* clear the progress line */ | ||
668 | ppc_md.progress(" ", 0xffff); | ||
669 | return 0; | ||
670 | } | ||
671 | |||
672 | late_initcall(iSeries_src_init); | ||
673 | |||
674 | static inline void process_iSeries_events(void) | ||
675 | { | ||
676 | asm volatile ("li 0,0x5555; sc" : : : "r0", "r3"); | ||
677 | } | ||
678 | |||
679 | static void yield_shared_processor(void) | ||
680 | { | ||
681 | unsigned long tb; | ||
682 | |||
683 | HvCall_setEnabledInterrupts(HvCall_MaskIPI | | ||
684 | HvCall_MaskLpEvent | | ||
685 | HvCall_MaskLpProd | | ||
686 | HvCall_MaskTimeout); | ||
687 | |||
688 | tb = get_tb(); | ||
689 | /* Compute future tb value when yield should expire */ | ||
690 | HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); | ||
691 | |||
692 | /* | ||
693 | * The decrementer stops during the yield. Force a fake decrementer | ||
694 | * here and let the timer_interrupt code sort out the actual time. | ||
695 | */ | ||
696 | get_paca()->lppaca.int_dword.fields.decr_int = 1; | ||
697 | process_iSeries_events(); | ||
698 | } | ||
699 | |||
700 | static int iseries_shared_idle(void) | ||
701 | { | ||
702 | while (1) { | ||
703 | while (!need_resched() && !hvlpevent_is_pending()) { | ||
704 | local_irq_disable(); | ||
705 | ppc64_runlatch_off(); | ||
706 | |||
707 | /* Recheck with irqs off */ | ||
708 | if (!need_resched() && !hvlpevent_is_pending()) | ||
709 | yield_shared_processor(); | ||
710 | |||
711 | HMT_medium(); | ||
712 | local_irq_enable(); | ||
713 | } | ||
714 | |||
715 | ppc64_runlatch_on(); | ||
716 | |||
717 | if (hvlpevent_is_pending()) | ||
718 | process_iSeries_events(); | ||
719 | |||
720 | schedule(); | ||
721 | } | ||
722 | |||
723 | return 0; | ||
724 | } | ||
725 | |||
726 | static int iseries_dedicated_idle(void) | ||
727 | { | ||
728 | long oldval; | ||
729 | |||
730 | while (1) { | ||
731 | oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED); | ||
732 | |||
733 | if (!oldval) { | ||
734 | set_thread_flag(TIF_POLLING_NRFLAG); | ||
735 | |||
736 | while (!need_resched()) { | ||
737 | ppc64_runlatch_off(); | ||
738 | HMT_low(); | ||
739 | |||
740 | if (hvlpevent_is_pending()) { | ||
741 | HMT_medium(); | ||
742 | ppc64_runlatch_on(); | ||
743 | process_iSeries_events(); | ||
744 | } | ||
745 | } | ||
746 | |||
747 | HMT_medium(); | ||
748 | clear_thread_flag(TIF_POLLING_NRFLAG); | ||
749 | } else { | ||
750 | set_need_resched(); | ||
751 | } | ||
752 | |||
753 | ppc64_runlatch_on(); | ||
754 | schedule(); | ||
755 | } | ||
756 | |||
757 | return 0; | ||
758 | } | ||
759 | |||
760 | #ifndef CONFIG_PCI | ||
761 | void __init iSeries_init_IRQ(void) { } | ||
762 | #endif | ||
763 | |||
764 | static int __init iseries_probe(int platform) | ||
765 | { | ||
766 | return PLATFORM_ISERIES_LPAR == platform; | ||
767 | } | ||
768 | |||
769 | struct machdep_calls __initdata iseries_md = { | ||
770 | .setup_arch = iSeries_setup_arch, | ||
771 | .get_cpuinfo = iSeries_get_cpuinfo, | ||
772 | .init_IRQ = iSeries_init_IRQ, | ||
773 | .get_irq = iSeries_get_irq, | ||
774 | .init_early = iSeries_init_early, | ||
775 | .pcibios_fixup = iSeries_pci_final_fixup, | ||
776 | .restart = iSeries_restart, | ||
777 | .power_off = iSeries_power_off, | ||
778 | .halt = iSeries_halt, | ||
779 | .get_boot_time = iSeries_get_boot_time, | ||
780 | .set_rtc_time = iSeries_set_rtc_time, | ||
781 | .get_rtc_time = iSeries_get_rtc_time, | ||
782 | .calibrate_decr = generic_calibrate_decr, | ||
783 | .progress = iSeries_progress, | ||
784 | .probe = iseries_probe, | ||
785 | /* XXX Implement enable_pmcs for iSeries */ | ||
786 | }; | ||
787 | |||
788 | struct blob { | ||
789 | unsigned char data[PAGE_SIZE]; | ||
790 | unsigned long next; | ||
791 | }; | ||
792 | |||
793 | struct iseries_flat_dt { | ||
794 | struct boot_param_header header; | ||
795 | u64 reserve_map[2]; | ||
796 | struct blob dt; | ||
797 | struct blob strings; | ||
798 | }; | ||
799 | |||
800 | struct iseries_flat_dt iseries_dt; | ||
801 | |||
802 | void dt_init(struct iseries_flat_dt *dt) | ||
803 | { | ||
804 | dt->header.off_mem_rsvmap = | ||
805 | offsetof(struct iseries_flat_dt, reserve_map); | ||
806 | dt->header.off_dt_struct = offsetof(struct iseries_flat_dt, dt); | ||
807 | dt->header.off_dt_strings = offsetof(struct iseries_flat_dt, strings); | ||
808 | dt->header.totalsize = sizeof(struct iseries_flat_dt); | ||
809 | dt->header.dt_strings_size = sizeof(struct blob); | ||
810 | |||
811 | /* There is no notion of hardware cpu id on iSeries */ | ||
812 | dt->header.boot_cpuid_phys = smp_processor_id(); | ||
813 | |||
814 | dt->dt.next = (unsigned long)&dt->dt.data; | ||
815 | dt->strings.next = (unsigned long)&dt->strings.data; | ||
816 | |||
817 | dt->header.magic = OF_DT_HEADER; | ||
818 | dt->header.version = 0x10; | ||
819 | dt->header.last_comp_version = 0x10; | ||
820 | |||
821 | dt->reserve_map[0] = 0; | ||
822 | dt->reserve_map[1] = 0; | ||
823 | } | ||
824 | |||
825 | void dt_check_blob(struct blob *b) | ||
826 | { | ||
827 | if (b->next >= (unsigned long)&b->next) { | ||
828 | DBG("Ran out of space in flat device tree blob!\n"); | ||
829 | BUG(); | ||
830 | } | ||
831 | } | ||
832 | |||
833 | void dt_push_u32(struct iseries_flat_dt *dt, u32 value) | ||
834 | { | ||
835 | *((u32*)dt->dt.next) = value; | ||
836 | dt->dt.next += sizeof(u32); | ||
837 | |||
838 | dt_check_blob(&dt->dt); | ||
839 | } | ||
840 | |||
841 | void dt_push_u64(struct iseries_flat_dt *dt, u64 value) | ||
842 | { | ||
843 | *((u64*)dt->dt.next) = value; | ||
844 | dt->dt.next += sizeof(u64); | ||
845 | |||
846 | dt_check_blob(&dt->dt); | ||
847 | } | ||
848 | |||
849 | unsigned long dt_push_bytes(struct blob *blob, char *data, int len) | ||
850 | { | ||
851 | unsigned long start = blob->next - (unsigned long)blob->data; | ||
852 | |||
853 | memcpy((char *)blob->next, data, len); | ||
854 | blob->next = _ALIGN(blob->next + len, 4); | ||
855 | |||
856 | dt_check_blob(blob); | ||
857 | |||
858 | return start; | ||
859 | } | ||
860 | |||
861 | void dt_start_node(struct iseries_flat_dt *dt, char *name) | ||
862 | { | ||
863 | dt_push_u32(dt, OF_DT_BEGIN_NODE); | ||
864 | dt_push_bytes(&dt->dt, name, strlen(name) + 1); | ||
865 | } | ||
866 | |||
867 | #define dt_end_node(dt) dt_push_u32(dt, OF_DT_END_NODE) | ||
868 | |||
869 | void dt_prop(struct iseries_flat_dt *dt, char *name, char *data, int len) | ||
870 | { | ||
871 | unsigned long offset; | ||
872 | |||
873 | dt_push_u32(dt, OF_DT_PROP); | ||
874 | |||
875 | /* Length of the data */ | ||
876 | dt_push_u32(dt, len); | ||
877 | |||
878 | /* Put the property name in the string blob. */ | ||
879 | offset = dt_push_bytes(&dt->strings, name, strlen(name) + 1); | ||
880 | |||
881 | /* The offset of the properties name in the string blob. */ | ||
882 | dt_push_u32(dt, (u32)offset); | ||
883 | |||
884 | /* The actual data. */ | ||
885 | dt_push_bytes(&dt->dt, data, len); | ||
886 | } | ||
887 | |||
888 | void dt_prop_str(struct iseries_flat_dt *dt, char *name, char *data) | ||
889 | { | ||
890 | dt_prop(dt, name, data, strlen(data) + 1); /* + 1 for NULL */ | ||
891 | } | ||
892 | |||
893 | void dt_prop_u32(struct iseries_flat_dt *dt, char *name, u32 data) | ||
894 | { | ||
895 | dt_prop(dt, name, (char *)&data, sizeof(u32)); | ||
896 | } | ||
897 | |||
898 | void dt_prop_u64(struct iseries_flat_dt *dt, char *name, u64 data) | ||
899 | { | ||
900 | dt_prop(dt, name, (char *)&data, sizeof(u64)); | ||
901 | } | ||
902 | |||
903 | void dt_prop_u64_list(struct iseries_flat_dt *dt, char *name, u64 *data, int n) | ||
904 | { | ||
905 | dt_prop(dt, name, (char *)data, sizeof(u64) * n); | ||
906 | } | ||
907 | |||
908 | void dt_prop_empty(struct iseries_flat_dt *dt, char *name) | ||
909 | { | ||
910 | dt_prop(dt, name, NULL, 0); | ||
911 | } | ||
912 | |||
913 | void dt_cpus(struct iseries_flat_dt *dt) | ||
914 | { | ||
915 | unsigned char buf[32]; | ||
916 | unsigned char *p; | ||
917 | unsigned int i, index; | ||
918 | struct IoHriProcessorVpd *d; | ||
919 | |||
920 | /* yuck */ | ||
921 | snprintf(buf, 32, "PowerPC,%s", cur_cpu_spec->cpu_name); | ||
922 | p = strchr(buf, ' '); | ||
923 | if (!p) p = buf + strlen(buf); | ||
924 | |||
925 | dt_start_node(dt, "cpus"); | ||
926 | dt_prop_u32(dt, "#address-cells", 1); | ||
927 | dt_prop_u32(dt, "#size-cells", 0); | ||
928 | |||
929 | for (i = 0; i < NR_CPUS; i++) { | ||
930 | if (paca[i].lppaca.dyn_proc_status >= 2) | ||
931 | continue; | ||
932 | |||
933 | snprintf(p, 32 - (p - buf), "@%d", i); | ||
934 | dt_start_node(dt, buf); | ||
935 | |||
936 | dt_prop_str(dt, "device_type", "cpu"); | ||
937 | |||
938 | index = paca[i].lppaca.dyn_hv_phys_proc_index; | ||
939 | d = &xIoHriProcessorVpd[index]; | ||
940 | |||
941 | dt_prop_u32(dt, "i-cache-size", d->xInstCacheSize * 1024); | ||
942 | dt_prop_u32(dt, "i-cache-line-size", d->xInstCacheOperandSize); | ||
943 | |||
944 | dt_prop_u32(dt, "d-cache-size", d->xDataL1CacheSizeKB * 1024); | ||
945 | dt_prop_u32(dt, "d-cache-line-size", d->xDataCacheOperandSize); | ||
946 | |||
947 | /* magic conversions to Hz copied from old code */ | ||
948 | dt_prop_u32(dt, "clock-frequency", | ||
949 | ((1UL << 34) * 1000000) / d->xProcFreq); | ||
950 | dt_prop_u32(dt, "timebase-frequency", | ||
951 | ((1UL << 32) * 1000000) / d->xTimeBaseFreq); | ||
952 | |||
953 | dt_prop_u32(dt, "reg", i); | ||
954 | |||
955 | dt_end_node(dt); | ||
956 | } | ||
957 | |||
958 | dt_end_node(dt); | ||
959 | } | ||
960 | |||
961 | void build_flat_dt(struct iseries_flat_dt *dt) | ||
962 | { | ||
963 | u64 tmp[2]; | ||
964 | |||
965 | dt_init(dt); | ||
966 | |||
967 | dt_start_node(dt, ""); | ||
968 | |||
969 | dt_prop_u32(dt, "#address-cells", 2); | ||
970 | dt_prop_u32(dt, "#size-cells", 2); | ||
971 | |||
972 | /* /memory */ | ||
973 | dt_start_node(dt, "memory@0"); | ||
974 | dt_prop_str(dt, "name", "memory"); | ||
975 | dt_prop_str(dt, "device_type", "memory"); | ||
976 | tmp[0] = 0; | ||
977 | tmp[1] = systemcfg->physicalMemorySize; | ||
978 | dt_prop_u64_list(dt, "reg", tmp, 2); | ||
979 | dt_end_node(dt); | ||
980 | |||
981 | /* /chosen */ | ||
982 | dt_start_node(dt, "chosen"); | ||
983 | dt_prop_u32(dt, "linux,platform", PLATFORM_ISERIES_LPAR); | ||
984 | dt_end_node(dt); | ||
985 | |||
986 | dt_cpus(dt); | ||
987 | |||
988 | dt_end_node(dt); | ||
989 | |||
990 | dt_push_u32(dt, OF_DT_END); | ||
991 | } | ||
992 | |||
993 | void * __init iSeries_early_setup(void) | ||
994 | { | ||
995 | iSeries_fixup_klimit(); | ||
996 | |||
997 | /* | ||
998 | * Initialize the table which translate Linux physical addresses to | ||
999 | * AS/400 absolute addresses | ||
1000 | */ | ||
1001 | build_iSeries_Memory_Map(); | ||
1002 | |||
1003 | build_flat_dt(&iseries_dt); | ||
1004 | |||
1005 | return (void *) __pa(&iseries_dt); | ||
1006 | } | ||
diff --git a/arch/powerpc/platforms/iseries/setup.h b/arch/powerpc/platforms/iseries/setup.h new file mode 100644 index 000000000000..6da89ae991ce --- /dev/null +++ b/arch/powerpc/platforms/iseries/setup.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2000 Mike Corrigan <mikejc@us.ibm.com> | ||
3 | * Copyright (c) 1999-2000 Grant Erickson <grant@lcse.umn.edu> | ||
4 | * | ||
5 | * Description: | ||
6 | * Architecture- / platform-specific boot-time initialization code for | ||
7 | * the IBM AS/400 LPAR. Adapted from original code by Grant Erickson and | ||
8 | * code by Gary Thomas, Cort Dougan <cort@cs.nmt.edu>, and Dan Malek | ||
9 | * <dan@netx4.com>. | ||
10 | * | ||
11 | * This program is free software; you can redistribute it and/or | ||
12 | * modify it under the terms of the GNU General Public License | ||
13 | * as published by the Free Software Foundation; either version | ||
14 | * 2 of the License, or (at your option) any later version. | ||
15 | */ | ||
16 | |||
17 | #ifndef __ISERIES_SETUP_H__ | ||
18 | #define __ISERIES_SETUP_H__ | ||
19 | |||
20 | extern void iSeries_get_boot_time(struct rtc_time *tm); | ||
21 | extern int iSeries_set_rtc_time(struct rtc_time *tm); | ||
22 | extern void iSeries_get_rtc_time(struct rtc_time *tm); | ||
23 | |||
24 | #endif /* __ISERIES_SETUP_H__ */ | ||