diff options
Diffstat (limited to 'arch/powerpc/platforms/iseries/mf.c')
-rw-r--r-- | arch/powerpc/platforms/iseries/mf.c | 1275 |
1 files changed, 0 insertions, 1275 deletions
diff --git a/arch/powerpc/platforms/iseries/mf.c b/arch/powerpc/platforms/iseries/mf.c deleted file mode 100644 index 254c1fc3d8dd..000000000000 --- a/arch/powerpc/platforms/iseries/mf.c +++ /dev/null | |||
@@ -1,1275 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2001 Troy D. Armstrong IBM Corporation | ||
3 | * Copyright (C) 2004-2005 Stephen Rothwell IBM Corporation | ||
4 | * | ||
5 | * This modules exists as an interface between a Linux secondary partition | ||
6 | * running on an iSeries and the primary partition's Virtual Service | ||
7 | * Processor (VSP) object. The VSP has final authority over powering on/off | ||
8 | * all partitions in the iSeries. It also provides miscellaneous low-level | ||
9 | * machine facility type operations. | ||
10 | * | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2 of the License, or | ||
15 | * (at your option) any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
25 | */ | ||
26 | |||
27 | #include <linux/types.h> | ||
28 | #include <linux/errno.h> | ||
29 | #include <linux/kernel.h> | ||
30 | #include <linux/init.h> | ||
31 | #include <linux/completion.h> | ||
32 | #include <linux/delay.h> | ||
33 | #include <linux/export.h> | ||
34 | #include <linux/proc_fs.h> | ||
35 | #include <linux/dma-mapping.h> | ||
36 | #include <linux/bcd.h> | ||
37 | #include <linux/rtc.h> | ||
38 | #include <linux/slab.h> | ||
39 | |||
40 | #include <asm/time.h> | ||
41 | #include <asm/uaccess.h> | ||
42 | #include <asm/paca.h> | ||
43 | #include <asm/abs_addr.h> | ||
44 | #include <asm/firmware.h> | ||
45 | #include <asm/iseries/mf.h> | ||
46 | #include <asm/iseries/hv_lp_config.h> | ||
47 | #include <asm/iseries/hv_lp_event.h> | ||
48 | #include <asm/iseries/it_lp_queue.h> | ||
49 | |||
50 | #include "setup.h" | ||
51 | |||
52 | static int mf_initialized; | ||
53 | |||
54 | /* | ||
55 | * This is the structure layout for the Machine Facilities LPAR event | ||
56 | * flows. | ||
57 | */ | ||
58 | struct vsp_cmd_data { | ||
59 | u64 token; | ||
60 | u16 cmd; | ||
61 | HvLpIndex lp_index; | ||
62 | u8 result_code; | ||
63 | u32 reserved; | ||
64 | union { | ||
65 | u64 state; /* GetStateOut */ | ||
66 | u64 ipl_type; /* GetIplTypeOut, Function02SelectIplTypeIn */ | ||
67 | u64 ipl_mode; /* GetIplModeOut, Function02SelectIplModeIn */ | ||
68 | u64 page[4]; /* GetSrcHistoryIn */ | ||
69 | u64 flag; /* GetAutoIplWhenPrimaryIplsOut, | ||
70 | SetAutoIplWhenPrimaryIplsIn, | ||
71 | WhiteButtonPowerOffIn, | ||
72 | Function08FastPowerOffIn, | ||
73 | IsSpcnRackPowerIncompleteOut */ | ||
74 | struct { | ||
75 | u64 token; | ||
76 | u64 address_type; | ||
77 | u64 side; | ||
78 | u32 length; | ||
79 | u32 offset; | ||
80 | } kern; /* SetKernelImageIn, GetKernelImageIn, | ||
81 | SetKernelCmdLineIn, GetKernelCmdLineIn */ | ||
82 | u32 length_out; /* GetKernelImageOut, GetKernelCmdLineOut */ | ||
83 | u8 reserved[80]; | ||
84 | } sub_data; | ||
85 | }; | ||
86 | |||
87 | struct vsp_rsp_data { | ||
88 | struct completion com; | ||
89 | struct vsp_cmd_data *response; | ||
90 | }; | ||
91 | |||
92 | struct alloc_data { | ||
93 | u16 size; | ||
94 | u16 type; | ||
95 | u32 count; | ||
96 | u16 reserved1; | ||
97 | u8 reserved2; | ||
98 | HvLpIndex target_lp; | ||
99 | }; | ||
100 | |||
101 | struct ce_msg_data; | ||
102 | |||
103 | typedef void (*ce_msg_comp_hdlr)(void *token, struct ce_msg_data *vsp_cmd_rsp); | ||
104 | |||
105 | struct ce_msg_comp_data { | ||
106 | ce_msg_comp_hdlr handler; | ||
107 | void *token; | ||
108 | }; | ||
109 | |||
110 | struct ce_msg_data { | ||
111 | u8 ce_msg[12]; | ||
112 | char reserved[4]; | ||
113 | struct ce_msg_comp_data *completion; | ||
114 | }; | ||
115 | |||
116 | struct io_mf_lp_event { | ||
117 | struct HvLpEvent hp_lp_event; | ||
118 | u16 subtype_result_code; | ||
119 | u16 reserved1; | ||
120 | u32 reserved2; | ||
121 | union { | ||
122 | struct alloc_data alloc; | ||
123 | struct ce_msg_data ce_msg; | ||
124 | struct vsp_cmd_data vsp_cmd; | ||
125 | } data; | ||
126 | }; | ||
127 | |||
128 | #define subtype_data(a, b, c, d) \ | ||
129 | (((a) << 24) + ((b) << 16) + ((c) << 8) + (d)) | ||
130 | |||
131 | /* | ||
132 | * All outgoing event traffic is kept on a FIFO queue. The first | ||
133 | * pointer points to the one that is outstanding, and all new | ||
134 | * requests get stuck on the end. Also, we keep a certain number of | ||
135 | * preallocated pending events so that we can operate very early in | ||
136 | * the boot up sequence (before kmalloc is ready). | ||
137 | */ | ||
138 | struct pending_event { | ||
139 | struct pending_event *next; | ||
140 | struct io_mf_lp_event event; | ||
141 | MFCompleteHandler hdlr; | ||
142 | char dma_data[72]; | ||
143 | unsigned dma_data_length; | ||
144 | unsigned remote_address; | ||
145 | }; | ||
146 | static spinlock_t pending_event_spinlock; | ||
147 | static struct pending_event *pending_event_head; | ||
148 | static struct pending_event *pending_event_tail; | ||
149 | static struct pending_event *pending_event_avail; | ||
150 | #define PENDING_EVENT_PREALLOC_LEN 16 | ||
151 | static struct pending_event pending_event_prealloc[PENDING_EVENT_PREALLOC_LEN]; | ||
152 | |||
153 | /* | ||
154 | * Put a pending event onto the available queue, so it can get reused. | ||
155 | * Attention! You must have the pending_event_spinlock before calling! | ||
156 | */ | ||
157 | static void free_pending_event(struct pending_event *ev) | ||
158 | { | ||
159 | if (ev != NULL) { | ||
160 | ev->next = pending_event_avail; | ||
161 | pending_event_avail = ev; | ||
162 | } | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * Enqueue the outbound event onto the stack. If the queue was | ||
167 | * empty to begin with, we must also issue it via the Hypervisor | ||
168 | * interface. There is a section of code below that will touch | ||
169 | * the first stack pointer without the protection of the pending_event_spinlock. | ||
170 | * This is OK, because we know that nobody else will be modifying | ||
171 | * the first pointer when we do this. | ||
172 | */ | ||
173 | static int signal_event(struct pending_event *ev) | ||
174 | { | ||
175 | int rc = 0; | ||
176 | unsigned long flags; | ||
177 | int go = 1; | ||
178 | struct pending_event *ev1; | ||
179 | HvLpEvent_Rc hv_rc; | ||
180 | |||
181 | /* enqueue the event */ | ||
182 | if (ev != NULL) { | ||
183 | ev->next = NULL; | ||
184 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
185 | if (pending_event_head == NULL) | ||
186 | pending_event_head = ev; | ||
187 | else { | ||
188 | go = 0; | ||
189 | pending_event_tail->next = ev; | ||
190 | } | ||
191 | pending_event_tail = ev; | ||
192 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
193 | } | ||
194 | |||
195 | /* send the event */ | ||
196 | while (go) { | ||
197 | go = 0; | ||
198 | |||
199 | /* any DMA data to send beforehand? */ | ||
200 | if (pending_event_head->dma_data_length > 0) | ||
201 | HvCallEvent_dmaToSp(pending_event_head->dma_data, | ||
202 | pending_event_head->remote_address, | ||
203 | pending_event_head->dma_data_length, | ||
204 | HvLpDma_Direction_LocalToRemote); | ||
205 | |||
206 | hv_rc = HvCallEvent_signalLpEvent( | ||
207 | &pending_event_head->event.hp_lp_event); | ||
208 | if (hv_rc != HvLpEvent_Rc_Good) { | ||
209 | printk(KERN_ERR "mf.c: HvCallEvent_signalLpEvent() " | ||
210 | "failed with %d\n", (int)hv_rc); | ||
211 | |||
212 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
213 | ev1 = pending_event_head; | ||
214 | pending_event_head = pending_event_head->next; | ||
215 | if (pending_event_head != NULL) | ||
216 | go = 1; | ||
217 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
218 | |||
219 | if (ev1 == ev) | ||
220 | rc = -EIO; | ||
221 | else if (ev1->hdlr != NULL) | ||
222 | (*ev1->hdlr)((void *)ev1->event.hp_lp_event.xCorrelationToken, -EIO); | ||
223 | |||
224 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
225 | free_pending_event(ev1); | ||
226 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
227 | } | ||
228 | } | ||
229 | |||
230 | return rc; | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Allocate a new pending_event structure, and initialize it. | ||
235 | */ | ||
236 | static struct pending_event *new_pending_event(void) | ||
237 | { | ||
238 | struct pending_event *ev = NULL; | ||
239 | HvLpIndex primary_lp = HvLpConfig_getPrimaryLpIndex(); | ||
240 | unsigned long flags; | ||
241 | struct HvLpEvent *hev; | ||
242 | |||
243 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
244 | if (pending_event_avail != NULL) { | ||
245 | ev = pending_event_avail; | ||
246 | pending_event_avail = pending_event_avail->next; | ||
247 | } | ||
248 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
249 | if (ev == NULL) { | ||
250 | ev = kmalloc(sizeof(struct pending_event), GFP_ATOMIC); | ||
251 | if (ev == NULL) { | ||
252 | printk(KERN_ERR "mf.c: unable to kmalloc %ld bytes\n", | ||
253 | sizeof(struct pending_event)); | ||
254 | return NULL; | ||
255 | } | ||
256 | } | ||
257 | memset(ev, 0, sizeof(struct pending_event)); | ||
258 | hev = &ev->event.hp_lp_event; | ||
259 | hev->flags = HV_LP_EVENT_VALID | HV_LP_EVENT_DO_ACK | HV_LP_EVENT_INT; | ||
260 | hev->xType = HvLpEvent_Type_MachineFac; | ||
261 | hev->xSourceLp = HvLpConfig_getLpIndex(); | ||
262 | hev->xTargetLp = primary_lp; | ||
263 | hev->xSizeMinus1 = sizeof(ev->event) - 1; | ||
264 | hev->xRc = HvLpEvent_Rc_Good; | ||
265 | hev->xSourceInstanceId = HvCallEvent_getSourceLpInstanceId(primary_lp, | ||
266 | HvLpEvent_Type_MachineFac); | ||
267 | hev->xTargetInstanceId = HvCallEvent_getTargetLpInstanceId(primary_lp, | ||
268 | HvLpEvent_Type_MachineFac); | ||
269 | |||
270 | return ev; | ||
271 | } | ||
272 | |||
273 | static int __maybe_unused | ||
274 | signal_vsp_instruction(struct vsp_cmd_data *vsp_cmd) | ||
275 | { | ||
276 | struct pending_event *ev = new_pending_event(); | ||
277 | int rc; | ||
278 | struct vsp_rsp_data response; | ||
279 | |||
280 | if (ev == NULL) | ||
281 | return -ENOMEM; | ||
282 | |||
283 | init_completion(&response.com); | ||
284 | response.response = vsp_cmd; | ||
285 | ev->event.hp_lp_event.xSubtype = 6; | ||
286 | ev->event.hp_lp_event.x.xSubtypeData = | ||
287 | subtype_data('M', 'F', 'V', 'I'); | ||
288 | ev->event.data.vsp_cmd.token = (u64)&response; | ||
289 | ev->event.data.vsp_cmd.cmd = vsp_cmd->cmd; | ||
290 | ev->event.data.vsp_cmd.lp_index = HvLpConfig_getLpIndex(); | ||
291 | ev->event.data.vsp_cmd.result_code = 0xFF; | ||
292 | ev->event.data.vsp_cmd.reserved = 0; | ||
293 | memcpy(&(ev->event.data.vsp_cmd.sub_data), | ||
294 | &(vsp_cmd->sub_data), sizeof(vsp_cmd->sub_data)); | ||
295 | mb(); | ||
296 | |||
297 | rc = signal_event(ev); | ||
298 | if (rc == 0) | ||
299 | wait_for_completion(&response.com); | ||
300 | return rc; | ||
301 | } | ||
302 | |||
303 | |||
304 | /* | ||
305 | * Send a 12-byte CE message to the primary partition VSP object | ||
306 | */ | ||
307 | static int signal_ce_msg(char *ce_msg, struct ce_msg_comp_data *completion) | ||
308 | { | ||
309 | struct pending_event *ev = new_pending_event(); | ||
310 | |||
311 | if (ev == NULL) | ||
312 | return -ENOMEM; | ||
313 | |||
314 | ev->event.hp_lp_event.xSubtype = 0; | ||
315 | ev->event.hp_lp_event.x.xSubtypeData = | ||
316 | subtype_data('M', 'F', 'C', 'E'); | ||
317 | memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12); | ||
318 | ev->event.data.ce_msg.completion = completion; | ||
319 | return signal_event(ev); | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * Send a 12-byte CE message (with no data) to the primary partition VSP object | ||
324 | */ | ||
325 | static int signal_ce_msg_simple(u8 ce_op, struct ce_msg_comp_data *completion) | ||
326 | { | ||
327 | u8 ce_msg[12]; | ||
328 | |||
329 | memset(ce_msg, 0, sizeof(ce_msg)); | ||
330 | ce_msg[3] = ce_op; | ||
331 | return signal_ce_msg(ce_msg, completion); | ||
332 | } | ||
333 | |||
334 | /* | ||
335 | * Send a 12-byte CE message and DMA data to the primary partition VSP object | ||
336 | */ | ||
337 | static int dma_and_signal_ce_msg(char *ce_msg, | ||
338 | struct ce_msg_comp_data *completion, void *dma_data, | ||
339 | unsigned dma_data_length, unsigned remote_address) | ||
340 | { | ||
341 | struct pending_event *ev = new_pending_event(); | ||
342 | |||
343 | if (ev == NULL) | ||
344 | return -ENOMEM; | ||
345 | |||
346 | ev->event.hp_lp_event.xSubtype = 0; | ||
347 | ev->event.hp_lp_event.x.xSubtypeData = | ||
348 | subtype_data('M', 'F', 'C', 'E'); | ||
349 | memcpy(ev->event.data.ce_msg.ce_msg, ce_msg, 12); | ||
350 | ev->event.data.ce_msg.completion = completion; | ||
351 | memcpy(ev->dma_data, dma_data, dma_data_length); | ||
352 | ev->dma_data_length = dma_data_length; | ||
353 | ev->remote_address = remote_address; | ||
354 | return signal_event(ev); | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Initiate a nice (hopefully) shutdown of Linux. We simply are | ||
359 | * going to try and send the init process a SIGINT signal. If | ||
360 | * this fails (why?), we'll simply force it off in a not-so-nice | ||
361 | * manner. | ||
362 | */ | ||
363 | static int shutdown(void) | ||
364 | { | ||
365 | int rc = kill_cad_pid(SIGINT, 1); | ||
366 | |||
367 | if (rc) { | ||
368 | printk(KERN_ALERT "mf.c: SIGINT to init failed (%d), " | ||
369 | "hard shutdown commencing\n", rc); | ||
370 | mf_power_off(); | ||
371 | } else | ||
372 | printk(KERN_INFO "mf.c: init has been successfully notified " | ||
373 | "to proceed with shutdown\n"); | ||
374 | return rc; | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * The primary partition VSP object is sending us a new | ||
379 | * event flow. Handle it... | ||
380 | */ | ||
381 | static void handle_int(struct io_mf_lp_event *event) | ||
382 | { | ||
383 | struct ce_msg_data *ce_msg_data; | ||
384 | struct ce_msg_data *pce_msg_data; | ||
385 | unsigned long flags; | ||
386 | struct pending_event *pev; | ||
387 | |||
388 | /* ack the interrupt */ | ||
389 | event->hp_lp_event.xRc = HvLpEvent_Rc_Good; | ||
390 | HvCallEvent_ackLpEvent(&event->hp_lp_event); | ||
391 | |||
392 | /* process interrupt */ | ||
393 | switch (event->hp_lp_event.xSubtype) { | ||
394 | case 0: /* CE message */ | ||
395 | ce_msg_data = &event->data.ce_msg; | ||
396 | switch (ce_msg_data->ce_msg[3]) { | ||
397 | case 0x5B: /* power control notification */ | ||
398 | if ((ce_msg_data->ce_msg[5] & 0x20) != 0) { | ||
399 | printk(KERN_INFO "mf.c: Commencing partition shutdown\n"); | ||
400 | if (shutdown() == 0) | ||
401 | signal_ce_msg_simple(0xDB, NULL); | ||
402 | } | ||
403 | break; | ||
404 | case 0xC0: /* get time */ | ||
405 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
406 | pev = pending_event_head; | ||
407 | if (pev != NULL) | ||
408 | pending_event_head = pending_event_head->next; | ||
409 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
410 | if (pev == NULL) | ||
411 | break; | ||
412 | pce_msg_data = &pev->event.data.ce_msg; | ||
413 | if (pce_msg_data->ce_msg[3] != 0x40) | ||
414 | break; | ||
415 | if (pce_msg_data->completion != NULL) { | ||
416 | ce_msg_comp_hdlr handler = | ||
417 | pce_msg_data->completion->handler; | ||
418 | void *token = pce_msg_data->completion->token; | ||
419 | |||
420 | if (handler != NULL) | ||
421 | (*handler)(token, ce_msg_data); | ||
422 | } | ||
423 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
424 | free_pending_event(pev); | ||
425 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
426 | /* send next waiting event */ | ||
427 | if (pending_event_head != NULL) | ||
428 | signal_event(NULL); | ||
429 | break; | ||
430 | } | ||
431 | break; | ||
432 | case 1: /* IT sys shutdown */ | ||
433 | printk(KERN_INFO "mf.c: Commencing system shutdown\n"); | ||
434 | shutdown(); | ||
435 | break; | ||
436 | } | ||
437 | } | ||
438 | |||
439 | /* | ||
440 | * The primary partition VSP object is acknowledging the receipt | ||
441 | * of a flow we sent to them. If there are other flows queued | ||
442 | * up, we must send another one now... | ||
443 | */ | ||
444 | static void handle_ack(struct io_mf_lp_event *event) | ||
445 | { | ||
446 | unsigned long flags; | ||
447 | struct pending_event *two = NULL; | ||
448 | unsigned long free_it = 0; | ||
449 | struct ce_msg_data *ce_msg_data; | ||
450 | struct ce_msg_data *pce_msg_data; | ||
451 | struct vsp_rsp_data *rsp; | ||
452 | |||
453 | /* handle current event */ | ||
454 | if (pending_event_head == NULL) { | ||
455 | printk(KERN_ERR "mf.c: stack empty for receiving ack\n"); | ||
456 | return; | ||
457 | } | ||
458 | |||
459 | switch (event->hp_lp_event.xSubtype) { | ||
460 | case 0: /* CE msg */ | ||
461 | ce_msg_data = &event->data.ce_msg; | ||
462 | if (ce_msg_data->ce_msg[3] != 0x40) { | ||
463 | free_it = 1; | ||
464 | break; | ||
465 | } | ||
466 | if (ce_msg_data->ce_msg[2] == 0) | ||
467 | break; | ||
468 | free_it = 1; | ||
469 | pce_msg_data = &pending_event_head->event.data.ce_msg; | ||
470 | if (pce_msg_data->completion != NULL) { | ||
471 | ce_msg_comp_hdlr handler = | ||
472 | pce_msg_data->completion->handler; | ||
473 | void *token = pce_msg_data->completion->token; | ||
474 | |||
475 | if (handler != NULL) | ||
476 | (*handler)(token, ce_msg_data); | ||
477 | } | ||
478 | break; | ||
479 | case 4: /* allocate */ | ||
480 | case 5: /* deallocate */ | ||
481 | if (pending_event_head->hdlr != NULL) | ||
482 | (*pending_event_head->hdlr)((void *)event->hp_lp_event.xCorrelationToken, event->data.alloc.count); | ||
483 | free_it = 1; | ||
484 | break; | ||
485 | case 6: | ||
486 | free_it = 1; | ||
487 | rsp = (struct vsp_rsp_data *)event->data.vsp_cmd.token; | ||
488 | if (rsp == NULL) { | ||
489 | printk(KERN_ERR "mf.c: no rsp\n"); | ||
490 | break; | ||
491 | } | ||
492 | if (rsp->response != NULL) | ||
493 | memcpy(rsp->response, &event->data.vsp_cmd, | ||
494 | sizeof(event->data.vsp_cmd)); | ||
495 | complete(&rsp->com); | ||
496 | break; | ||
497 | } | ||
498 | |||
499 | /* remove from queue */ | ||
500 | spin_lock_irqsave(&pending_event_spinlock, flags); | ||
501 | if ((pending_event_head != NULL) && (free_it == 1)) { | ||
502 | struct pending_event *oldHead = pending_event_head; | ||
503 | |||
504 | pending_event_head = pending_event_head->next; | ||
505 | two = pending_event_head; | ||
506 | free_pending_event(oldHead); | ||
507 | } | ||
508 | spin_unlock_irqrestore(&pending_event_spinlock, flags); | ||
509 | |||
510 | /* send next waiting event */ | ||
511 | if (two != NULL) | ||
512 | signal_event(NULL); | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * This is the generic event handler we are registering with | ||
517 | * the Hypervisor. Ensure the flows are for us, and then | ||
518 | * parse it enough to know if it is an interrupt or an | ||
519 | * acknowledge. | ||
520 | */ | ||
521 | static void hv_handler(struct HvLpEvent *event) | ||
522 | { | ||
523 | if ((event != NULL) && (event->xType == HvLpEvent_Type_MachineFac)) { | ||
524 | if (hvlpevent_is_ack(event)) | ||
525 | handle_ack((struct io_mf_lp_event *)event); | ||
526 | else | ||
527 | handle_int((struct io_mf_lp_event *)event); | ||
528 | } else | ||
529 | printk(KERN_ERR "mf.c: alien event received\n"); | ||
530 | } | ||
531 | |||
532 | /* | ||
533 | * Global kernel interface to allocate and seed events into the | ||
534 | * Hypervisor. | ||
535 | */ | ||
536 | void mf_allocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type, | ||
537 | unsigned size, unsigned count, MFCompleteHandler hdlr, | ||
538 | void *user_token) | ||
539 | { | ||
540 | struct pending_event *ev = new_pending_event(); | ||
541 | int rc; | ||
542 | |||
543 | if (ev == NULL) { | ||
544 | rc = -ENOMEM; | ||
545 | } else { | ||
546 | ev->event.hp_lp_event.xSubtype = 4; | ||
547 | ev->event.hp_lp_event.xCorrelationToken = (u64)user_token; | ||
548 | ev->event.hp_lp_event.x.xSubtypeData = | ||
549 | subtype_data('M', 'F', 'M', 'A'); | ||
550 | ev->event.data.alloc.target_lp = target_lp; | ||
551 | ev->event.data.alloc.type = type; | ||
552 | ev->event.data.alloc.size = size; | ||
553 | ev->event.data.alloc.count = count; | ||
554 | ev->hdlr = hdlr; | ||
555 | rc = signal_event(ev); | ||
556 | } | ||
557 | if ((rc != 0) && (hdlr != NULL)) | ||
558 | (*hdlr)(user_token, rc); | ||
559 | } | ||
560 | EXPORT_SYMBOL(mf_allocate_lp_events); | ||
561 | |||
562 | /* | ||
563 | * Global kernel interface to unseed and deallocate events already in | ||
564 | * Hypervisor. | ||
565 | */ | ||
566 | void mf_deallocate_lp_events(HvLpIndex target_lp, HvLpEvent_Type type, | ||
567 | unsigned count, MFCompleteHandler hdlr, void *user_token) | ||
568 | { | ||
569 | struct pending_event *ev = new_pending_event(); | ||
570 | int rc; | ||
571 | |||
572 | if (ev == NULL) | ||
573 | rc = -ENOMEM; | ||
574 | else { | ||
575 | ev->event.hp_lp_event.xSubtype = 5; | ||
576 | ev->event.hp_lp_event.xCorrelationToken = (u64)user_token; | ||
577 | ev->event.hp_lp_event.x.xSubtypeData = | ||
578 | subtype_data('M', 'F', 'M', 'D'); | ||
579 | ev->event.data.alloc.target_lp = target_lp; | ||
580 | ev->event.data.alloc.type = type; | ||
581 | ev->event.data.alloc.count = count; | ||
582 | ev->hdlr = hdlr; | ||
583 | rc = signal_event(ev); | ||
584 | } | ||
585 | if ((rc != 0) && (hdlr != NULL)) | ||
586 | (*hdlr)(user_token, rc); | ||
587 | } | ||
588 | EXPORT_SYMBOL(mf_deallocate_lp_events); | ||
589 | |||
590 | /* | ||
591 | * Global kernel interface to tell the VSP object in the primary | ||
592 | * partition to power this partition off. | ||
593 | */ | ||
594 | void mf_power_off(void) | ||
595 | { | ||
596 | printk(KERN_INFO "mf.c: Down it goes...\n"); | ||
597 | signal_ce_msg_simple(0x4d, NULL); | ||
598 | for (;;) | ||
599 | ; | ||
600 | } | ||
601 | |||
602 | /* | ||
603 | * Global kernel interface to tell the VSP object in the primary | ||
604 | * partition to reboot this partition. | ||
605 | */ | ||
606 | void mf_reboot(char *cmd) | ||
607 | { | ||
608 | printk(KERN_INFO "mf.c: Preparing to bounce...\n"); | ||
609 | signal_ce_msg_simple(0x4e, NULL); | ||
610 | for (;;) | ||
611 | ; | ||
612 | } | ||
613 | |||
614 | /* | ||
615 | * Display a single word SRC onto the VSP control panel. | ||
616 | */ | ||
617 | void mf_display_src(u32 word) | ||
618 | { | ||
619 | u8 ce[12]; | ||
620 | |||
621 | memset(ce, 0, sizeof(ce)); | ||
622 | ce[3] = 0x4a; | ||
623 | ce[7] = 0x01; | ||
624 | ce[8] = word >> 24; | ||
625 | ce[9] = word >> 16; | ||
626 | ce[10] = word >> 8; | ||
627 | ce[11] = word; | ||
628 | signal_ce_msg(ce, NULL); | ||
629 | } | ||
630 | |||
631 | /* | ||
632 | * Display a single word SRC of the form "PROGXXXX" on the VSP control panel. | ||
633 | */ | ||
634 | static __init void mf_display_progress_src(u16 value) | ||
635 | { | ||
636 | u8 ce[12]; | ||
637 | u8 src[72]; | ||
638 | |||
639 | memcpy(ce, "\x00\x00\x04\x4A\x00\x00\x00\x48\x00\x00\x00\x00", 12); | ||
640 | memcpy(src, "\x01\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00" | ||
641 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
642 | "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00" | ||
643 | "\x00\x00\x00\x00PROGxxxx ", | ||
644 | 72); | ||
645 | src[6] = value >> 8; | ||
646 | src[7] = value & 255; | ||
647 | src[44] = "0123456789ABCDEF"[(value >> 12) & 15]; | ||
648 | src[45] = "0123456789ABCDEF"[(value >> 8) & 15]; | ||
649 | src[46] = "0123456789ABCDEF"[(value >> 4) & 15]; | ||
650 | src[47] = "0123456789ABCDEF"[value & 15]; | ||
651 | dma_and_signal_ce_msg(ce, NULL, src, sizeof(src), 9 * 64 * 1024); | ||
652 | } | ||
653 | |||
654 | /* | ||
655 | * Clear the VSP control panel. Used to "erase" an SRC that was | ||
656 | * previously displayed. | ||
657 | */ | ||
658 | static void mf_clear_src(void) | ||
659 | { | ||
660 | signal_ce_msg_simple(0x4b, NULL); | ||
661 | } | ||
662 | |||
663 | void __init mf_display_progress(u16 value) | ||
664 | { | ||
665 | if (!mf_initialized) | ||
666 | return; | ||
667 | |||
668 | if (0xFFFF == value) | ||
669 | mf_clear_src(); | ||
670 | else | ||
671 | mf_display_progress_src(value); | ||
672 | } | ||
673 | |||
674 | /* | ||
675 | * Initialization code here. | ||
676 | */ | ||
677 | void __init mf_init(void) | ||
678 | { | ||
679 | int i; | ||
680 | |||
681 | spin_lock_init(&pending_event_spinlock); | ||
682 | |||
683 | for (i = 0; i < PENDING_EVENT_PREALLOC_LEN; i++) | ||
684 | free_pending_event(&pending_event_prealloc[i]); | ||
685 | |||
686 | HvLpEvent_registerHandler(HvLpEvent_Type_MachineFac, &hv_handler); | ||
687 | |||
688 | /* virtual continue ack */ | ||
689 | signal_ce_msg_simple(0x57, NULL); | ||
690 | |||
691 | mf_initialized = 1; | ||
692 | mb(); | ||
693 | |||
694 | printk(KERN_NOTICE "mf.c: iSeries Linux LPAR Machine Facilities " | ||
695 | "initialized\n"); | ||
696 | } | ||
697 | |||
698 | struct rtc_time_data { | ||
699 | struct completion com; | ||
700 | struct ce_msg_data ce_msg; | ||
701 | int rc; | ||
702 | }; | ||
703 | |||
704 | static void get_rtc_time_complete(void *token, struct ce_msg_data *ce_msg) | ||
705 | { | ||
706 | struct rtc_time_data *rtc = token; | ||
707 | |||
708 | memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg)); | ||
709 | rtc->rc = 0; | ||
710 | complete(&rtc->com); | ||
711 | } | ||
712 | |||
713 | static int mf_set_rtc(struct rtc_time *tm) | ||
714 | { | ||
715 | char ce_time[12]; | ||
716 | u8 day, mon, hour, min, sec, y1, y2; | ||
717 | unsigned year; | ||
718 | |||
719 | year = 1900 + tm->tm_year; | ||
720 | y1 = year / 100; | ||
721 | y2 = year % 100; | ||
722 | |||
723 | sec = tm->tm_sec; | ||
724 | min = tm->tm_min; | ||
725 | hour = tm->tm_hour; | ||
726 | day = tm->tm_mday; | ||
727 | mon = tm->tm_mon + 1; | ||
728 | |||
729 | sec = bin2bcd(sec); | ||
730 | min = bin2bcd(min); | ||
731 | hour = bin2bcd(hour); | ||
732 | mon = bin2bcd(mon); | ||
733 | day = bin2bcd(day); | ||
734 | y1 = bin2bcd(y1); | ||
735 | y2 = bin2bcd(y2); | ||
736 | |||
737 | memset(ce_time, 0, sizeof(ce_time)); | ||
738 | ce_time[3] = 0x41; | ||
739 | ce_time[4] = y1; | ||
740 | ce_time[5] = y2; | ||
741 | ce_time[6] = sec; | ||
742 | ce_time[7] = min; | ||
743 | ce_time[8] = hour; | ||
744 | ce_time[10] = day; | ||
745 | ce_time[11] = mon; | ||
746 | |||
747 | return signal_ce_msg(ce_time, NULL); | ||
748 | } | ||
749 | |||
750 | static int rtc_set_tm(int rc, u8 *ce_msg, struct rtc_time *tm) | ||
751 | { | ||
752 | tm->tm_wday = 0; | ||
753 | tm->tm_yday = 0; | ||
754 | tm->tm_isdst = 0; | ||
755 | if (rc) { | ||
756 | tm->tm_sec = 0; | ||
757 | tm->tm_min = 0; | ||
758 | tm->tm_hour = 0; | ||
759 | tm->tm_mday = 15; | ||
760 | tm->tm_mon = 5; | ||
761 | tm->tm_year = 52; | ||
762 | return rc; | ||
763 | } | ||
764 | |||
765 | if ((ce_msg[2] == 0xa9) || | ||
766 | (ce_msg[2] == 0xaf)) { | ||
767 | /* TOD clock is not set */ | ||
768 | tm->tm_sec = 1; | ||
769 | tm->tm_min = 1; | ||
770 | tm->tm_hour = 1; | ||
771 | tm->tm_mday = 10; | ||
772 | tm->tm_mon = 8; | ||
773 | tm->tm_year = 71; | ||
774 | mf_set_rtc(tm); | ||
775 | } | ||
776 | { | ||
777 | u8 year = ce_msg[5]; | ||
778 | u8 sec = ce_msg[6]; | ||
779 | u8 min = ce_msg[7]; | ||
780 | u8 hour = ce_msg[8]; | ||
781 | u8 day = ce_msg[10]; | ||
782 | u8 mon = ce_msg[11]; | ||
783 | |||
784 | sec = bcd2bin(sec); | ||
785 | min = bcd2bin(min); | ||
786 | hour = bcd2bin(hour); | ||
787 | day = bcd2bin(day); | ||
788 | mon = bcd2bin(mon); | ||
789 | year = bcd2bin(year); | ||
790 | |||
791 | if (year <= 69) | ||
792 | year += 100; | ||
793 | |||
794 | tm->tm_sec = sec; | ||
795 | tm->tm_min = min; | ||
796 | tm->tm_hour = hour; | ||
797 | tm->tm_mday = day; | ||
798 | tm->tm_mon = mon; | ||
799 | tm->tm_year = year; | ||
800 | } | ||
801 | |||
802 | return 0; | ||
803 | } | ||
804 | |||
805 | static int mf_get_rtc(struct rtc_time *tm) | ||
806 | { | ||
807 | struct ce_msg_comp_data ce_complete; | ||
808 | struct rtc_time_data rtc_data; | ||
809 | int rc; | ||
810 | |||
811 | memset(&ce_complete, 0, sizeof(ce_complete)); | ||
812 | memset(&rtc_data, 0, sizeof(rtc_data)); | ||
813 | init_completion(&rtc_data.com); | ||
814 | ce_complete.handler = &get_rtc_time_complete; | ||
815 | ce_complete.token = &rtc_data; | ||
816 | rc = signal_ce_msg_simple(0x40, &ce_complete); | ||
817 | if (rc) | ||
818 | return rc; | ||
819 | wait_for_completion(&rtc_data.com); | ||
820 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | ||
821 | } | ||
822 | |||
823 | struct boot_rtc_time_data { | ||
824 | int busy; | ||
825 | struct ce_msg_data ce_msg; | ||
826 | int rc; | ||
827 | }; | ||
828 | |||
829 | static void get_boot_rtc_time_complete(void *token, struct ce_msg_data *ce_msg) | ||
830 | { | ||
831 | struct boot_rtc_time_data *rtc = token; | ||
832 | |||
833 | memcpy(&rtc->ce_msg, ce_msg, sizeof(rtc->ce_msg)); | ||
834 | rtc->rc = 0; | ||
835 | rtc->busy = 0; | ||
836 | } | ||
837 | |||
838 | static int mf_get_boot_rtc(struct rtc_time *tm) | ||
839 | { | ||
840 | struct ce_msg_comp_data ce_complete; | ||
841 | struct boot_rtc_time_data rtc_data; | ||
842 | int rc; | ||
843 | |||
844 | memset(&ce_complete, 0, sizeof(ce_complete)); | ||
845 | memset(&rtc_data, 0, sizeof(rtc_data)); | ||
846 | rtc_data.busy = 1; | ||
847 | ce_complete.handler = &get_boot_rtc_time_complete; | ||
848 | ce_complete.token = &rtc_data; | ||
849 | rc = signal_ce_msg_simple(0x40, &ce_complete); | ||
850 | if (rc) | ||
851 | return rc; | ||
852 | /* We need to poll here as we are not yet taking interrupts */ | ||
853 | while (rtc_data.busy) { | ||
854 | if (hvlpevent_is_pending()) | ||
855 | process_hvlpevents(); | ||
856 | } | ||
857 | return rtc_set_tm(rtc_data.rc, rtc_data.ce_msg.ce_msg, tm); | ||
858 | } | ||
859 | |||
860 | #ifdef CONFIG_PROC_FS | ||
861 | static int mf_cmdline_proc_show(struct seq_file *m, void *v) | ||
862 | { | ||
863 | char *page, *p; | ||
864 | struct vsp_cmd_data vsp_cmd; | ||
865 | int rc; | ||
866 | dma_addr_t dma_addr; | ||
867 | |||
868 | /* The HV appears to return no more than 256 bytes of command line */ | ||
869 | page = kmalloc(256, GFP_KERNEL); | ||
870 | if (!page) | ||
871 | return -ENOMEM; | ||
872 | |||
873 | dma_addr = iseries_hv_map(page, 256, DMA_FROM_DEVICE); | ||
874 | if (dma_addr == DMA_ERROR_CODE) { | ||
875 | kfree(page); | ||
876 | return -ENOMEM; | ||
877 | } | ||
878 | memset(page, 0, 256); | ||
879 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
880 | vsp_cmd.cmd = 33; | ||
881 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
882 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
883 | vsp_cmd.sub_data.kern.side = (u64)m->private; | ||
884 | vsp_cmd.sub_data.kern.length = 256; | ||
885 | mb(); | ||
886 | rc = signal_vsp_instruction(&vsp_cmd); | ||
887 | iseries_hv_unmap(dma_addr, 256, DMA_FROM_DEVICE); | ||
888 | if (rc) { | ||
889 | kfree(page); | ||
890 | return rc; | ||
891 | } | ||
892 | if (vsp_cmd.result_code != 0) { | ||
893 | kfree(page); | ||
894 | return -ENOMEM; | ||
895 | } | ||
896 | p = page; | ||
897 | while (p - page < 256) { | ||
898 | if (*p == '\0' || *p == '\n') { | ||
899 | *p = '\n'; | ||
900 | break; | ||
901 | } | ||
902 | p++; | ||
903 | |||
904 | } | ||
905 | seq_write(m, page, p - page); | ||
906 | kfree(page); | ||
907 | return 0; | ||
908 | } | ||
909 | |||
910 | static int mf_cmdline_proc_open(struct inode *inode, struct file *file) | ||
911 | { | ||
912 | return single_open(file, mf_cmdline_proc_show, PDE(inode)->data); | ||
913 | } | ||
914 | |||
915 | #if 0 | ||
916 | static int mf_getVmlinuxChunk(char *buffer, int *size, int offset, u64 side) | ||
917 | { | ||
918 | struct vsp_cmd_data vsp_cmd; | ||
919 | int rc; | ||
920 | int len = *size; | ||
921 | dma_addr_t dma_addr; | ||
922 | |||
923 | dma_addr = iseries_hv_map(buffer, len, DMA_FROM_DEVICE); | ||
924 | memset(buffer, 0, len); | ||
925 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
926 | vsp_cmd.cmd = 32; | ||
927 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
928 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
929 | vsp_cmd.sub_data.kern.side = side; | ||
930 | vsp_cmd.sub_data.kern.offset = offset; | ||
931 | vsp_cmd.sub_data.kern.length = len; | ||
932 | mb(); | ||
933 | rc = signal_vsp_instruction(&vsp_cmd); | ||
934 | if (rc == 0) { | ||
935 | if (vsp_cmd.result_code == 0) | ||
936 | *size = vsp_cmd.sub_data.length_out; | ||
937 | else | ||
938 | rc = -ENOMEM; | ||
939 | } | ||
940 | |||
941 | iseries_hv_unmap(dma_addr, len, DMA_FROM_DEVICE); | ||
942 | |||
943 | return rc; | ||
944 | } | ||
945 | |||
946 | static int proc_mf_dump_vmlinux(char *page, char **start, off_t off, | ||
947 | int count, int *eof, void *data) | ||
948 | { | ||
949 | int sizeToGet = count; | ||
950 | |||
951 | if (!capable(CAP_SYS_ADMIN)) | ||
952 | return -EACCES; | ||
953 | |||
954 | if (mf_getVmlinuxChunk(page, &sizeToGet, off, (u64)data) == 0) { | ||
955 | if (sizeToGet != 0) { | ||
956 | *start = page + off; | ||
957 | return sizeToGet; | ||
958 | } | ||
959 | *eof = 1; | ||
960 | return 0; | ||
961 | } | ||
962 | *eof = 1; | ||
963 | return 0; | ||
964 | } | ||
965 | #endif | ||
966 | |||
967 | static int mf_side_proc_show(struct seq_file *m, void *v) | ||
968 | { | ||
969 | char mf_current_side = ' '; | ||
970 | struct vsp_cmd_data vsp_cmd; | ||
971 | |||
972 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
973 | vsp_cmd.cmd = 2; | ||
974 | vsp_cmd.sub_data.ipl_type = 0; | ||
975 | mb(); | ||
976 | |||
977 | if (signal_vsp_instruction(&vsp_cmd) == 0) { | ||
978 | if (vsp_cmd.result_code == 0) { | ||
979 | switch (vsp_cmd.sub_data.ipl_type) { | ||
980 | case 0: mf_current_side = 'A'; | ||
981 | break; | ||
982 | case 1: mf_current_side = 'B'; | ||
983 | break; | ||
984 | case 2: mf_current_side = 'C'; | ||
985 | break; | ||
986 | default: mf_current_side = 'D'; | ||
987 | break; | ||
988 | } | ||
989 | } | ||
990 | } | ||
991 | |||
992 | seq_printf(m, "%c\n", mf_current_side); | ||
993 | return 0; | ||
994 | } | ||
995 | |||
996 | static int mf_side_proc_open(struct inode *inode, struct file *file) | ||
997 | { | ||
998 | return single_open(file, mf_side_proc_show, NULL); | ||
999 | } | ||
1000 | |||
1001 | static ssize_t mf_side_proc_write(struct file *file, const char __user *buffer, | ||
1002 | size_t count, loff_t *pos) | ||
1003 | { | ||
1004 | char side; | ||
1005 | u64 newSide; | ||
1006 | struct vsp_cmd_data vsp_cmd; | ||
1007 | |||
1008 | if (!capable(CAP_SYS_ADMIN)) | ||
1009 | return -EACCES; | ||
1010 | |||
1011 | if (count == 0) | ||
1012 | return 0; | ||
1013 | |||
1014 | if (get_user(side, buffer)) | ||
1015 | return -EFAULT; | ||
1016 | |||
1017 | switch (side) { | ||
1018 | case 'A': newSide = 0; | ||
1019 | break; | ||
1020 | case 'B': newSide = 1; | ||
1021 | break; | ||
1022 | case 'C': newSide = 2; | ||
1023 | break; | ||
1024 | case 'D': newSide = 3; | ||
1025 | break; | ||
1026 | default: | ||
1027 | printk(KERN_ERR "mf_proc.c: proc_mf_change_side: invalid side\n"); | ||
1028 | return -EINVAL; | ||
1029 | } | ||
1030 | |||
1031 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
1032 | vsp_cmd.sub_data.ipl_type = newSide; | ||
1033 | vsp_cmd.cmd = 10; | ||
1034 | |||
1035 | (void)signal_vsp_instruction(&vsp_cmd); | ||
1036 | |||
1037 | return count; | ||
1038 | } | ||
1039 | |||
1040 | static const struct file_operations mf_side_proc_fops = { | ||
1041 | .owner = THIS_MODULE, | ||
1042 | .open = mf_side_proc_open, | ||
1043 | .read = seq_read, | ||
1044 | .llseek = seq_lseek, | ||
1045 | .release = single_release, | ||
1046 | .write = mf_side_proc_write, | ||
1047 | }; | ||
1048 | |||
1049 | static int mf_src_proc_show(struct seq_file *m, void *v) | ||
1050 | { | ||
1051 | return 0; | ||
1052 | } | ||
1053 | |||
1054 | static int mf_src_proc_open(struct inode *inode, struct file *file) | ||
1055 | { | ||
1056 | return single_open(file, mf_src_proc_show, NULL); | ||
1057 | } | ||
1058 | |||
1059 | static ssize_t mf_src_proc_write(struct file *file, const char __user *buffer, | ||
1060 | size_t count, loff_t *pos) | ||
1061 | { | ||
1062 | char stkbuf[10]; | ||
1063 | |||
1064 | if (!capable(CAP_SYS_ADMIN)) | ||
1065 | return -EACCES; | ||
1066 | |||
1067 | if ((count < 4) && (count != 1)) { | ||
1068 | printk(KERN_ERR "mf_proc: invalid src\n"); | ||
1069 | return -EINVAL; | ||
1070 | } | ||
1071 | |||
1072 | if (count > (sizeof(stkbuf) - 1)) | ||
1073 | count = sizeof(stkbuf) - 1; | ||
1074 | if (copy_from_user(stkbuf, buffer, count)) | ||
1075 | return -EFAULT; | ||
1076 | |||
1077 | if ((count == 1) && (*stkbuf == '\0')) | ||
1078 | mf_clear_src(); | ||
1079 | else | ||
1080 | mf_display_src(*(u32 *)stkbuf); | ||
1081 | |||
1082 | return count; | ||
1083 | } | ||
1084 | |||
1085 | static const struct file_operations mf_src_proc_fops = { | ||
1086 | .owner = THIS_MODULE, | ||
1087 | .open = mf_src_proc_open, | ||
1088 | .read = seq_read, | ||
1089 | .llseek = seq_lseek, | ||
1090 | .release = single_release, | ||
1091 | .write = mf_src_proc_write, | ||
1092 | }; | ||
1093 | |||
1094 | static ssize_t mf_cmdline_proc_write(struct file *file, const char __user *buffer, | ||
1095 | size_t count, loff_t *pos) | ||
1096 | { | ||
1097 | void *data = PDE(file->f_path.dentry->d_inode)->data; | ||
1098 | struct vsp_cmd_data vsp_cmd; | ||
1099 | dma_addr_t dma_addr; | ||
1100 | char *page; | ||
1101 | int ret = -EACCES; | ||
1102 | |||
1103 | if (!capable(CAP_SYS_ADMIN)) | ||
1104 | goto out; | ||
1105 | |||
1106 | dma_addr = 0; | ||
1107 | page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC); | ||
1108 | ret = -ENOMEM; | ||
1109 | if (page == NULL) | ||
1110 | goto out; | ||
1111 | |||
1112 | ret = -EFAULT; | ||
1113 | if (copy_from_user(page, buffer, count)) | ||
1114 | goto out_free; | ||
1115 | |||
1116 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
1117 | vsp_cmd.cmd = 31; | ||
1118 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
1119 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
1120 | vsp_cmd.sub_data.kern.side = (u64)data; | ||
1121 | vsp_cmd.sub_data.kern.length = count; | ||
1122 | mb(); | ||
1123 | (void)signal_vsp_instruction(&vsp_cmd); | ||
1124 | ret = count; | ||
1125 | |||
1126 | out_free: | ||
1127 | iseries_hv_free(count, page, dma_addr); | ||
1128 | out: | ||
1129 | return ret; | ||
1130 | } | ||
1131 | |||
1132 | static const struct file_operations mf_cmdline_proc_fops = { | ||
1133 | .owner = THIS_MODULE, | ||
1134 | .open = mf_cmdline_proc_open, | ||
1135 | .read = seq_read, | ||
1136 | .llseek = seq_lseek, | ||
1137 | .release = single_release, | ||
1138 | .write = mf_cmdline_proc_write, | ||
1139 | }; | ||
1140 | |||
1141 | static ssize_t proc_mf_change_vmlinux(struct file *file, | ||
1142 | const char __user *buf, | ||
1143 | size_t count, loff_t *ppos) | ||
1144 | { | ||
1145 | struct proc_dir_entry *dp = PDE(file->f_path.dentry->d_inode); | ||
1146 | ssize_t rc; | ||
1147 | dma_addr_t dma_addr; | ||
1148 | char *page; | ||
1149 | struct vsp_cmd_data vsp_cmd; | ||
1150 | |||
1151 | rc = -EACCES; | ||
1152 | if (!capable(CAP_SYS_ADMIN)) | ||
1153 | goto out; | ||
1154 | |||
1155 | dma_addr = 0; | ||
1156 | page = iseries_hv_alloc(count, &dma_addr, GFP_ATOMIC); | ||
1157 | rc = -ENOMEM; | ||
1158 | if (page == NULL) { | ||
1159 | printk(KERN_ERR "mf.c: couldn't allocate memory to set vmlinux chunk\n"); | ||
1160 | goto out; | ||
1161 | } | ||
1162 | rc = -EFAULT; | ||
1163 | if (copy_from_user(page, buf, count)) | ||
1164 | goto out_free; | ||
1165 | |||
1166 | memset(&vsp_cmd, 0, sizeof(vsp_cmd)); | ||
1167 | vsp_cmd.cmd = 30; | ||
1168 | vsp_cmd.sub_data.kern.token = dma_addr; | ||
1169 | vsp_cmd.sub_data.kern.address_type = HvLpDma_AddressType_TceIndex; | ||
1170 | vsp_cmd.sub_data.kern.side = (u64)dp->data; | ||
1171 | vsp_cmd.sub_data.kern.offset = *ppos; | ||
1172 | vsp_cmd.sub_data.kern.length = count; | ||
1173 | mb(); | ||
1174 | rc = signal_vsp_instruction(&vsp_cmd); | ||
1175 | if (rc) | ||
1176 | goto out_free; | ||
1177 | rc = -ENOMEM; | ||
1178 | if (vsp_cmd.result_code != 0) | ||
1179 | goto out_free; | ||
1180 | |||
1181 | *ppos += count; | ||
1182 | rc = count; | ||
1183 | out_free: | ||
1184 | iseries_hv_free(count, page, dma_addr); | ||
1185 | out: | ||
1186 | return rc; | ||
1187 | } | ||
1188 | |||
1189 | static const struct file_operations proc_vmlinux_operations = { | ||
1190 | .write = proc_mf_change_vmlinux, | ||
1191 | .llseek = default_llseek, | ||
1192 | }; | ||
1193 | |||
1194 | static int __init mf_proc_init(void) | ||
1195 | { | ||
1196 | struct proc_dir_entry *mf_proc_root; | ||
1197 | struct proc_dir_entry *ent; | ||
1198 | struct proc_dir_entry *mf; | ||
1199 | char name[2]; | ||
1200 | int i; | ||
1201 | |||
1202 | if (!firmware_has_feature(FW_FEATURE_ISERIES)) | ||
1203 | return 0; | ||
1204 | |||
1205 | mf_proc_root = proc_mkdir("iSeries/mf", NULL); | ||
1206 | if (!mf_proc_root) | ||
1207 | return 1; | ||
1208 | |||
1209 | name[1] = '\0'; | ||
1210 | for (i = 0; i < 4; i++) { | ||
1211 | name[0] = 'A' + i; | ||
1212 | mf = proc_mkdir(name, mf_proc_root); | ||
1213 | if (!mf) | ||
1214 | return 1; | ||
1215 | |||
1216 | ent = proc_create_data("cmdline", S_IRUSR|S_IWUSR, mf, | ||
1217 | &mf_cmdline_proc_fops, (void *)(long)i); | ||
1218 | if (!ent) | ||
1219 | return 1; | ||
1220 | |||
1221 | if (i == 3) /* no vmlinux entry for 'D' */ | ||
1222 | continue; | ||
1223 | |||
1224 | ent = proc_create_data("vmlinux", S_IFREG|S_IWUSR, mf, | ||
1225 | &proc_vmlinux_operations, | ||
1226 | (void *)(long)i); | ||
1227 | if (!ent) | ||
1228 | return 1; | ||
1229 | } | ||
1230 | |||
1231 | ent = proc_create("side", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root, | ||
1232 | &mf_side_proc_fops); | ||
1233 | if (!ent) | ||
1234 | return 1; | ||
1235 | |||
1236 | ent = proc_create("src", S_IFREG|S_IRUSR|S_IWUSR, mf_proc_root, | ||
1237 | &mf_src_proc_fops); | ||
1238 | if (!ent) | ||
1239 | return 1; | ||
1240 | |||
1241 | return 0; | ||
1242 | } | ||
1243 | |||
1244 | __initcall(mf_proc_init); | ||
1245 | |||
1246 | #endif /* CONFIG_PROC_FS */ | ||
1247 | |||
1248 | /* | ||
1249 | * Get the RTC from the virtual service processor | ||
1250 | * This requires flowing LpEvents to the primary partition | ||
1251 | */ | ||
1252 | void iSeries_get_rtc_time(struct rtc_time *rtc_tm) | ||
1253 | { | ||
1254 | mf_get_rtc(rtc_tm); | ||
1255 | rtc_tm->tm_mon--; | ||
1256 | } | ||
1257 | |||
1258 | /* | ||
1259 | * Set the RTC in the virtual service processor | ||
1260 | * This requires flowing LpEvents to the primary partition | ||
1261 | */ | ||
1262 | int iSeries_set_rtc_time(struct rtc_time *tm) | ||
1263 | { | ||
1264 | mf_set_rtc(tm); | ||
1265 | return 0; | ||
1266 | } | ||
1267 | |||
1268 | unsigned long iSeries_get_boot_time(void) | ||
1269 | { | ||
1270 | struct rtc_time tm; | ||
1271 | |||
1272 | mf_get_boot_rtc(&tm); | ||
1273 | return mktime(tm.tm_year + 1900, tm.tm_mon, tm.tm_mday, | ||
1274 | tm.tm_hour, tm.tm_min, tm.tm_sec); | ||
1275 | } | ||