diff options
Diffstat (limited to 'arch/sparc64/kernel/ds.c')
-rw-r--r-- | arch/sparc64/kernel/ds.c | 1158 |
1 files changed, 1158 insertions, 0 deletions
diff --git a/arch/sparc64/kernel/ds.c b/arch/sparc64/kernel/ds.c new file mode 100644 index 000000000000..1c587107cef0 --- /dev/null +++ b/arch/sparc64/kernel/ds.c | |||
@@ -0,0 +1,1158 @@ | |||
1 | /* ds.c: Domain Services driver for Logical Domains | ||
2 | * | ||
3 | * Copyright (C) 2007 David S. Miller <davem@davemloft.net> | ||
4 | */ | ||
5 | |||
6 | #include <linux/kernel.h> | ||
7 | #include <linux/module.h> | ||
8 | #include <linux/types.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/string.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/delay.h> | ||
14 | #include <linux/mutex.h> | ||
15 | #include <linux/kthread.h> | ||
16 | #include <linux/cpu.h> | ||
17 | |||
18 | #include <asm/ldc.h> | ||
19 | #include <asm/vio.h> | ||
20 | #include <asm/power.h> | ||
21 | #include <asm/mdesc.h> | ||
22 | #include <asm/head.h> | ||
23 | #include <asm/irq.h> | ||
24 | |||
25 | #define DRV_MODULE_NAME "ds" | ||
26 | #define PFX DRV_MODULE_NAME ": " | ||
27 | #define DRV_MODULE_VERSION "1.0" | ||
28 | #define DRV_MODULE_RELDATE "Jul 11, 2007" | ||
29 | |||
30 | static char version[] __devinitdata = | ||
31 | DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; | ||
32 | MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); | ||
33 | MODULE_DESCRIPTION("Sun LDOM domain services driver"); | ||
34 | MODULE_LICENSE("GPL"); | ||
35 | MODULE_VERSION(DRV_MODULE_VERSION); | ||
36 | |||
37 | struct ds_msg_tag { | ||
38 | __u32 type; | ||
39 | #define DS_INIT_REQ 0x00 | ||
40 | #define DS_INIT_ACK 0x01 | ||
41 | #define DS_INIT_NACK 0x02 | ||
42 | #define DS_REG_REQ 0x03 | ||
43 | #define DS_REG_ACK 0x04 | ||
44 | #define DS_REG_NACK 0x05 | ||
45 | #define DS_UNREG_REQ 0x06 | ||
46 | #define DS_UNREG_ACK 0x07 | ||
47 | #define DS_UNREG_NACK 0x08 | ||
48 | #define DS_DATA 0x09 | ||
49 | #define DS_NACK 0x0a | ||
50 | |||
51 | __u32 len; | ||
52 | }; | ||
53 | |||
54 | /* Result codes */ | ||
55 | #define DS_OK 0x00 | ||
56 | #define DS_REG_VER_NACK 0x01 | ||
57 | #define DS_REG_DUP 0x02 | ||
58 | #define DS_INV_HDL 0x03 | ||
59 | #define DS_TYPE_UNKNOWN 0x04 | ||
60 | |||
61 | struct ds_version { | ||
62 | __u16 major; | ||
63 | __u16 minor; | ||
64 | }; | ||
65 | |||
66 | struct ds_ver_req { | ||
67 | struct ds_msg_tag tag; | ||
68 | struct ds_version ver; | ||
69 | }; | ||
70 | |||
71 | struct ds_ver_ack { | ||
72 | struct ds_msg_tag tag; | ||
73 | __u16 minor; | ||
74 | }; | ||
75 | |||
76 | struct ds_ver_nack { | ||
77 | struct ds_msg_tag tag; | ||
78 | __u16 major; | ||
79 | }; | ||
80 | |||
81 | struct ds_reg_req { | ||
82 | struct ds_msg_tag tag; | ||
83 | __u64 handle; | ||
84 | __u16 major; | ||
85 | __u16 minor; | ||
86 | char svc_id[0]; | ||
87 | }; | ||
88 | |||
89 | struct ds_reg_ack { | ||
90 | struct ds_msg_tag tag; | ||
91 | __u64 handle; | ||
92 | __u16 minor; | ||
93 | }; | ||
94 | |||
95 | struct ds_reg_nack { | ||
96 | struct ds_msg_tag tag; | ||
97 | __u64 handle; | ||
98 | __u16 major; | ||
99 | }; | ||
100 | |||
101 | struct ds_unreg_req { | ||
102 | struct ds_msg_tag tag; | ||
103 | __u64 handle; | ||
104 | }; | ||
105 | |||
106 | struct ds_unreg_ack { | ||
107 | struct ds_msg_tag tag; | ||
108 | __u64 handle; | ||
109 | }; | ||
110 | |||
111 | struct ds_unreg_nack { | ||
112 | struct ds_msg_tag tag; | ||
113 | __u64 handle; | ||
114 | }; | ||
115 | |||
116 | struct ds_data { | ||
117 | struct ds_msg_tag tag; | ||
118 | __u64 handle; | ||
119 | }; | ||
120 | |||
121 | struct ds_data_nack { | ||
122 | struct ds_msg_tag tag; | ||
123 | __u64 handle; | ||
124 | __u64 result; | ||
125 | }; | ||
126 | |||
127 | struct ds_cap_state { | ||
128 | __u64 handle; | ||
129 | |||
130 | void (*data)(struct ldc_channel *lp, | ||
131 | struct ds_cap_state *cp, | ||
132 | void *buf, int len); | ||
133 | |||
134 | const char *service_id; | ||
135 | |||
136 | u8 state; | ||
137 | #define CAP_STATE_UNKNOWN 0x00 | ||
138 | #define CAP_STATE_REG_SENT 0x01 | ||
139 | #define CAP_STATE_REGISTERED 0x02 | ||
140 | }; | ||
141 | |||
142 | static void md_update_data(struct ldc_channel *lp, struct ds_cap_state *cp, | ||
143 | void *buf, int len); | ||
144 | static void domain_shutdown_data(struct ldc_channel *lp, | ||
145 | struct ds_cap_state *cp, | ||
146 | void *buf, int len); | ||
147 | static void domain_panic_data(struct ldc_channel *lp, | ||
148 | struct ds_cap_state *cp, | ||
149 | void *buf, int len); | ||
150 | #ifdef CONFIG_HOTPLUG_CPU | ||
151 | static void dr_cpu_data(struct ldc_channel *lp, | ||
152 | struct ds_cap_state *cp, | ||
153 | void *buf, int len); | ||
154 | #endif | ||
155 | static void ds_pri_data(struct ldc_channel *lp, | ||
156 | struct ds_cap_state *cp, | ||
157 | void *buf, int len); | ||
158 | static void ds_var_data(struct ldc_channel *lp, | ||
159 | struct ds_cap_state *cp, | ||
160 | void *buf, int len); | ||
161 | |||
162 | struct ds_cap_state ds_states[] = { | ||
163 | { | ||
164 | .service_id = "md-update", | ||
165 | .data = md_update_data, | ||
166 | }, | ||
167 | { | ||
168 | .service_id = "domain-shutdown", | ||
169 | .data = domain_shutdown_data, | ||
170 | }, | ||
171 | { | ||
172 | .service_id = "domain-panic", | ||
173 | .data = domain_panic_data, | ||
174 | }, | ||
175 | #ifdef CONFIG_HOTPLUG_CPU | ||
176 | { | ||
177 | .service_id = "dr-cpu", | ||
178 | .data = dr_cpu_data, | ||
179 | }, | ||
180 | #endif | ||
181 | { | ||
182 | .service_id = "pri", | ||
183 | .data = ds_pri_data, | ||
184 | }, | ||
185 | { | ||
186 | .service_id = "var-config", | ||
187 | .data = ds_var_data, | ||
188 | }, | ||
189 | { | ||
190 | .service_id = "var-config-backup", | ||
191 | .data = ds_var_data, | ||
192 | }, | ||
193 | }; | ||
194 | |||
195 | static DEFINE_SPINLOCK(ds_lock); | ||
196 | |||
197 | struct ds_info { | ||
198 | struct ldc_channel *lp; | ||
199 | u8 hs_state; | ||
200 | #define DS_HS_START 0x01 | ||
201 | #define DS_HS_DONE 0x02 | ||
202 | |||
203 | void *rcv_buf; | ||
204 | int rcv_buf_len; | ||
205 | }; | ||
206 | |||
207 | static struct ds_info *ds_info; | ||
208 | |||
209 | static struct ds_cap_state *find_cap(u64 handle) | ||
210 | { | ||
211 | unsigned int index = handle >> 32; | ||
212 | |||
213 | if (index >= ARRAY_SIZE(ds_states)) | ||
214 | return NULL; | ||
215 | return &ds_states[index]; | ||
216 | } | ||
217 | |||
218 | static struct ds_cap_state *find_cap_by_string(const char *name) | ||
219 | { | ||
220 | int i; | ||
221 | |||
222 | for (i = 0; i < ARRAY_SIZE(ds_states); i++) { | ||
223 | if (strcmp(ds_states[i].service_id, name)) | ||
224 | continue; | ||
225 | |||
226 | return &ds_states[i]; | ||
227 | } | ||
228 | return NULL; | ||
229 | } | ||
230 | |||
231 | static int ds_send(struct ldc_channel *lp, void *data, int len) | ||
232 | { | ||
233 | int err, limit = 1000; | ||
234 | |||
235 | err = -EINVAL; | ||
236 | while (limit-- > 0) { | ||
237 | err = ldc_write(lp, data, len); | ||
238 | if (!err || (err != -EAGAIN)) | ||
239 | break; | ||
240 | udelay(1); | ||
241 | } | ||
242 | |||
243 | return err; | ||
244 | } | ||
245 | |||
246 | struct ds_md_update_req { | ||
247 | __u64 req_num; | ||
248 | }; | ||
249 | |||
250 | struct ds_md_update_res { | ||
251 | __u64 req_num; | ||
252 | __u32 result; | ||
253 | }; | ||
254 | |||
255 | static void md_update_data(struct ldc_channel *lp, | ||
256 | struct ds_cap_state *dp, | ||
257 | void *buf, int len) | ||
258 | { | ||
259 | struct ds_data *dpkt = buf; | ||
260 | struct ds_md_update_req *rp; | ||
261 | struct { | ||
262 | struct ds_data data; | ||
263 | struct ds_md_update_res res; | ||
264 | } pkt; | ||
265 | |||
266 | rp = (struct ds_md_update_req *) (dpkt + 1); | ||
267 | |||
268 | printk(KERN_INFO PFX "Machine description update.\n"); | ||
269 | |||
270 | memset(&pkt, 0, sizeof(pkt)); | ||
271 | pkt.data.tag.type = DS_DATA; | ||
272 | pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); | ||
273 | pkt.data.handle = dp->handle; | ||
274 | pkt.res.req_num = rp->req_num; | ||
275 | pkt.res.result = DS_OK; | ||
276 | |||
277 | ds_send(lp, &pkt, sizeof(pkt)); | ||
278 | |||
279 | mdesc_update(); | ||
280 | } | ||
281 | |||
282 | struct ds_shutdown_req { | ||
283 | __u64 req_num; | ||
284 | __u32 ms_delay; | ||
285 | }; | ||
286 | |||
287 | struct ds_shutdown_res { | ||
288 | __u64 req_num; | ||
289 | __u32 result; | ||
290 | char reason[1]; | ||
291 | }; | ||
292 | |||
293 | static void domain_shutdown_data(struct ldc_channel *lp, | ||
294 | struct ds_cap_state *dp, | ||
295 | void *buf, int len) | ||
296 | { | ||
297 | struct ds_data *dpkt = buf; | ||
298 | struct ds_shutdown_req *rp; | ||
299 | struct { | ||
300 | struct ds_data data; | ||
301 | struct ds_shutdown_res res; | ||
302 | } pkt; | ||
303 | |||
304 | rp = (struct ds_shutdown_req *) (dpkt + 1); | ||
305 | |||
306 | printk(KERN_ALERT PFX "Shutdown request from " | ||
307 | "LDOM manager received.\n"); | ||
308 | |||
309 | memset(&pkt, 0, sizeof(pkt)); | ||
310 | pkt.data.tag.type = DS_DATA; | ||
311 | pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); | ||
312 | pkt.data.handle = dp->handle; | ||
313 | pkt.res.req_num = rp->req_num; | ||
314 | pkt.res.result = DS_OK; | ||
315 | pkt.res.reason[0] = 0; | ||
316 | |||
317 | ds_send(lp, &pkt, sizeof(pkt)); | ||
318 | |||
319 | wake_up_powerd(); | ||
320 | } | ||
321 | |||
322 | struct ds_panic_req { | ||
323 | __u64 req_num; | ||
324 | }; | ||
325 | |||
326 | struct ds_panic_res { | ||
327 | __u64 req_num; | ||
328 | __u32 result; | ||
329 | char reason[1]; | ||
330 | }; | ||
331 | |||
332 | static void domain_panic_data(struct ldc_channel *lp, | ||
333 | struct ds_cap_state *dp, | ||
334 | void *buf, int len) | ||
335 | { | ||
336 | struct ds_data *dpkt = buf; | ||
337 | struct ds_panic_req *rp; | ||
338 | struct { | ||
339 | struct ds_data data; | ||
340 | struct ds_panic_res res; | ||
341 | } pkt; | ||
342 | |||
343 | rp = (struct ds_panic_req *) (dpkt + 1); | ||
344 | |||
345 | printk(KERN_ALERT PFX "Panic request from " | ||
346 | "LDOM manager received.\n"); | ||
347 | |||
348 | memset(&pkt, 0, sizeof(pkt)); | ||
349 | pkt.data.tag.type = DS_DATA; | ||
350 | pkt.data.tag.len = sizeof(pkt) - sizeof(struct ds_msg_tag); | ||
351 | pkt.data.handle = dp->handle; | ||
352 | pkt.res.req_num = rp->req_num; | ||
353 | pkt.res.result = DS_OK; | ||
354 | pkt.res.reason[0] = 0; | ||
355 | |||
356 | ds_send(lp, &pkt, sizeof(pkt)); | ||
357 | |||
358 | panic("PANIC requested by LDOM manager."); | ||
359 | } | ||
360 | |||
361 | #ifdef CONFIG_HOTPLUG_CPU | ||
362 | struct dr_cpu_tag { | ||
363 | __u64 req_num; | ||
364 | __u32 type; | ||
365 | #define DR_CPU_CONFIGURE 0x43 | ||
366 | #define DR_CPU_UNCONFIGURE 0x55 | ||
367 | #define DR_CPU_FORCE_UNCONFIGURE 0x46 | ||
368 | #define DR_CPU_STATUS 0x53 | ||
369 | |||
370 | /* Responses */ | ||
371 | #define DR_CPU_OK 0x6f | ||
372 | #define DR_CPU_ERROR 0x65 | ||
373 | |||
374 | __u32 num_records; | ||
375 | }; | ||
376 | |||
377 | struct dr_cpu_resp_entry { | ||
378 | __u32 cpu; | ||
379 | __u32 result; | ||
380 | #define DR_CPU_RES_OK 0x00 | ||
381 | #define DR_CPU_RES_FAILURE 0x01 | ||
382 | #define DR_CPU_RES_BLOCKED 0x02 | ||
383 | #define DR_CPU_RES_CPU_NOT_RESPONDING 0x03 | ||
384 | #define DR_CPU_RES_NOT_IN_MD 0x04 | ||
385 | |||
386 | __u32 stat; | ||
387 | #define DR_CPU_STAT_NOT_PRESENT 0x00 | ||
388 | #define DR_CPU_STAT_UNCONFIGURED 0x01 | ||
389 | #define DR_CPU_STAT_CONFIGURED 0x02 | ||
390 | |||
391 | __u32 str_off; | ||
392 | }; | ||
393 | |||
394 | /* DR cpu requests get queued onto the work list by the | ||
395 | * dr_cpu_data() callback. The list is protected by | ||
396 | * ds_lock, and processed by dr_cpu_process() in order. | ||
397 | */ | ||
398 | static LIST_HEAD(dr_cpu_work_list); | ||
399 | static DECLARE_WAIT_QUEUE_HEAD(dr_cpu_wait); | ||
400 | |||
401 | struct dr_cpu_queue_entry { | ||
402 | struct list_head list; | ||
403 | char req[0]; | ||
404 | }; | ||
405 | |||
406 | static void __dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data) | ||
407 | { | ||
408 | struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); | ||
409 | struct ds_info *dp = ds_info; | ||
410 | struct { | ||
411 | struct ds_data data; | ||
412 | struct dr_cpu_tag tag; | ||
413 | } pkt; | ||
414 | int msg_len; | ||
415 | |||
416 | memset(&pkt, 0, sizeof(pkt)); | ||
417 | pkt.data.tag.type = DS_DATA; | ||
418 | pkt.data.handle = cp->handle; | ||
419 | pkt.tag.req_num = tag->req_num; | ||
420 | pkt.tag.type = DR_CPU_ERROR; | ||
421 | pkt.tag.num_records = 0; | ||
422 | |||
423 | msg_len = (sizeof(struct ds_data) + | ||
424 | sizeof(struct dr_cpu_tag)); | ||
425 | |||
426 | pkt.data.tag.len = msg_len - sizeof(struct ds_msg_tag); | ||
427 | |||
428 | ds_send(dp->lp, &pkt, msg_len); | ||
429 | } | ||
430 | |||
431 | static void dr_cpu_send_error(struct ds_cap_state *cp, struct ds_data *data) | ||
432 | { | ||
433 | unsigned long flags; | ||
434 | |||
435 | spin_lock_irqsave(&ds_lock, flags); | ||
436 | __dr_cpu_send_error(cp, data); | ||
437 | spin_unlock_irqrestore(&ds_lock, flags); | ||
438 | } | ||
439 | |||
440 | #define CPU_SENTINEL 0xffffffff | ||
441 | |||
442 | static void purge_dups(u32 *list, u32 num_ents) | ||
443 | { | ||
444 | unsigned int i; | ||
445 | |||
446 | for (i = 0; i < num_ents; i++) { | ||
447 | u32 cpu = list[i]; | ||
448 | unsigned int j; | ||
449 | |||
450 | if (cpu == CPU_SENTINEL) | ||
451 | continue; | ||
452 | |||
453 | for (j = i + 1; j < num_ents; j++) { | ||
454 | if (list[j] == cpu) | ||
455 | list[j] = CPU_SENTINEL; | ||
456 | } | ||
457 | } | ||
458 | } | ||
459 | |||
460 | static int dr_cpu_size_response(int ncpus) | ||
461 | { | ||
462 | return (sizeof(struct ds_data) + | ||
463 | sizeof(struct dr_cpu_tag) + | ||
464 | (sizeof(struct dr_cpu_resp_entry) * ncpus)); | ||
465 | } | ||
466 | |||
467 | static void dr_cpu_init_response(struct ds_data *resp, u64 req_num, | ||
468 | u64 handle, int resp_len, int ncpus, | ||
469 | cpumask_t *mask, u32 default_stat) | ||
470 | { | ||
471 | struct dr_cpu_resp_entry *ent; | ||
472 | struct dr_cpu_tag *tag; | ||
473 | int i, cpu; | ||
474 | |||
475 | tag = (struct dr_cpu_tag *) (resp + 1); | ||
476 | ent = (struct dr_cpu_resp_entry *) (tag + 1); | ||
477 | |||
478 | resp->tag.type = DS_DATA; | ||
479 | resp->tag.len = resp_len - sizeof(struct ds_msg_tag); | ||
480 | resp->handle = handle; | ||
481 | tag->req_num = req_num; | ||
482 | tag->type = DR_CPU_OK; | ||
483 | tag->num_records = ncpus; | ||
484 | |||
485 | i = 0; | ||
486 | for_each_cpu_mask(cpu, *mask) { | ||
487 | ent[i].cpu = cpu; | ||
488 | ent[i].result = DR_CPU_RES_OK; | ||
489 | ent[i].stat = default_stat; | ||
490 | i++; | ||
491 | } | ||
492 | BUG_ON(i != ncpus); | ||
493 | } | ||
494 | |||
495 | static void dr_cpu_mark(struct ds_data *resp, int cpu, int ncpus, | ||
496 | u32 res, u32 stat) | ||
497 | { | ||
498 | struct dr_cpu_resp_entry *ent; | ||
499 | struct dr_cpu_tag *tag; | ||
500 | int i; | ||
501 | |||
502 | tag = (struct dr_cpu_tag *) (resp + 1); | ||
503 | ent = (struct dr_cpu_resp_entry *) (tag + 1); | ||
504 | |||
505 | for (i = 0; i < ncpus; i++) { | ||
506 | if (ent[i].cpu != cpu) | ||
507 | continue; | ||
508 | ent[i].result = res; | ||
509 | ent[i].stat = stat; | ||
510 | break; | ||
511 | } | ||
512 | } | ||
513 | |||
514 | static int dr_cpu_configure(struct ds_cap_state *cp, u64 req_num, | ||
515 | cpumask_t *mask) | ||
516 | { | ||
517 | struct ds_data *resp; | ||
518 | int resp_len, ncpus, cpu; | ||
519 | unsigned long flags; | ||
520 | |||
521 | ncpus = cpus_weight(*mask); | ||
522 | resp_len = dr_cpu_size_response(ncpus); | ||
523 | resp = kzalloc(resp_len, GFP_KERNEL); | ||
524 | if (!resp) | ||
525 | return -ENOMEM; | ||
526 | |||
527 | dr_cpu_init_response(resp, req_num, cp->handle, | ||
528 | resp_len, ncpus, mask, | ||
529 | DR_CPU_STAT_CONFIGURED); | ||
530 | |||
531 | mdesc_fill_in_cpu_data(*mask); | ||
532 | |||
533 | for_each_cpu_mask(cpu, *mask) { | ||
534 | int err; | ||
535 | |||
536 | printk(KERN_INFO PFX "Starting cpu %d...\n", cpu); | ||
537 | err = cpu_up(cpu); | ||
538 | if (err) { | ||
539 | __u32 res = DR_CPU_RES_FAILURE; | ||
540 | __u32 stat = DR_CPU_STAT_UNCONFIGURED; | ||
541 | |||
542 | if (!cpu_present(cpu)) { | ||
543 | /* CPU not present in MD */ | ||
544 | res = DR_CPU_RES_NOT_IN_MD; | ||
545 | stat = DR_CPU_STAT_NOT_PRESENT; | ||
546 | } else if (err == -ENODEV) { | ||
547 | /* CPU did not call in successfully */ | ||
548 | res = DR_CPU_RES_CPU_NOT_RESPONDING; | ||
549 | } | ||
550 | |||
551 | printk(KERN_INFO PFX "CPU startup failed err=%d\n", | ||
552 | err); | ||
553 | dr_cpu_mark(resp, cpu, ncpus, res, stat); | ||
554 | } | ||
555 | } | ||
556 | |||
557 | spin_lock_irqsave(&ds_lock, flags); | ||
558 | ds_send(ds_info->lp, resp, resp_len); | ||
559 | spin_unlock_irqrestore(&ds_lock, flags); | ||
560 | |||
561 | kfree(resp); | ||
562 | |||
563 | /* Redistribute IRQs, taking into account the new cpus. */ | ||
564 | fixup_irqs(); | ||
565 | |||
566 | return 0; | ||
567 | } | ||
568 | |||
569 | static int dr_cpu_unconfigure(struct ds_cap_state *cp, u64 req_num, | ||
570 | cpumask_t *mask) | ||
571 | { | ||
572 | struct ds_data *resp; | ||
573 | int resp_len, ncpus, cpu; | ||
574 | unsigned long flags; | ||
575 | |||
576 | ncpus = cpus_weight(*mask); | ||
577 | resp_len = dr_cpu_size_response(ncpus); | ||
578 | resp = kzalloc(resp_len, GFP_KERNEL); | ||
579 | if (!resp) | ||
580 | return -ENOMEM; | ||
581 | |||
582 | dr_cpu_init_response(resp, req_num, cp->handle, | ||
583 | resp_len, ncpus, mask, | ||
584 | DR_CPU_STAT_UNCONFIGURED); | ||
585 | |||
586 | for_each_cpu_mask(cpu, *mask) { | ||
587 | int err; | ||
588 | |||
589 | printk(KERN_INFO PFX "CPU[%d]: Shutting down cpu %d...\n", | ||
590 | smp_processor_id(), cpu); | ||
591 | err = cpu_down(cpu); | ||
592 | if (err) | ||
593 | dr_cpu_mark(resp, cpu, ncpus, | ||
594 | DR_CPU_RES_FAILURE, | ||
595 | DR_CPU_STAT_CONFIGURED); | ||
596 | } | ||
597 | |||
598 | spin_lock_irqsave(&ds_lock, flags); | ||
599 | ds_send(ds_info->lp, resp, resp_len); | ||
600 | spin_unlock_irqrestore(&ds_lock, flags); | ||
601 | |||
602 | kfree(resp); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static void process_dr_cpu_list(struct ds_cap_state *cp) | ||
608 | { | ||
609 | struct dr_cpu_queue_entry *qp, *tmp; | ||
610 | unsigned long flags; | ||
611 | LIST_HEAD(todo); | ||
612 | cpumask_t mask; | ||
613 | |||
614 | spin_lock_irqsave(&ds_lock, flags); | ||
615 | list_splice(&dr_cpu_work_list, &todo); | ||
616 | INIT_LIST_HEAD(&dr_cpu_work_list); | ||
617 | spin_unlock_irqrestore(&ds_lock, flags); | ||
618 | |||
619 | list_for_each_entry_safe(qp, tmp, &todo, list) { | ||
620 | struct ds_data *data = (struct ds_data *) qp->req; | ||
621 | struct dr_cpu_tag *tag = (struct dr_cpu_tag *) (data + 1); | ||
622 | u32 *cpu_list = (u32 *) (tag + 1); | ||
623 | u64 req_num = tag->req_num; | ||
624 | unsigned int i; | ||
625 | int err; | ||
626 | |||
627 | switch (tag->type) { | ||
628 | case DR_CPU_CONFIGURE: | ||
629 | case DR_CPU_UNCONFIGURE: | ||
630 | case DR_CPU_FORCE_UNCONFIGURE: | ||
631 | break; | ||
632 | |||
633 | default: | ||
634 | dr_cpu_send_error(cp, data); | ||
635 | goto next; | ||
636 | } | ||
637 | |||
638 | purge_dups(cpu_list, tag->num_records); | ||
639 | |||
640 | cpus_clear(mask); | ||
641 | for (i = 0; i < tag->num_records; i++) { | ||
642 | if (cpu_list[i] == CPU_SENTINEL) | ||
643 | continue; | ||
644 | |||
645 | if (cpu_list[i] < NR_CPUS) | ||
646 | cpu_set(cpu_list[i], mask); | ||
647 | } | ||
648 | |||
649 | if (tag->type == DR_CPU_CONFIGURE) | ||
650 | err = dr_cpu_configure(cp, req_num, &mask); | ||
651 | else | ||
652 | err = dr_cpu_unconfigure(cp, req_num, &mask); | ||
653 | |||
654 | if (err) | ||
655 | dr_cpu_send_error(cp, data); | ||
656 | |||
657 | next: | ||
658 | list_del(&qp->list); | ||
659 | kfree(qp); | ||
660 | } | ||
661 | } | ||
662 | |||
663 | static int dr_cpu_thread(void *__unused) | ||
664 | { | ||
665 | struct ds_cap_state *cp; | ||
666 | DEFINE_WAIT(wait); | ||
667 | |||
668 | cp = find_cap_by_string("dr-cpu"); | ||
669 | |||
670 | while (1) { | ||
671 | prepare_to_wait(&dr_cpu_wait, &wait, TASK_INTERRUPTIBLE); | ||
672 | if (list_empty(&dr_cpu_work_list)) | ||
673 | schedule(); | ||
674 | finish_wait(&dr_cpu_wait, &wait); | ||
675 | |||
676 | if (kthread_should_stop()) | ||
677 | break; | ||
678 | |||
679 | process_dr_cpu_list(cp); | ||
680 | } | ||
681 | |||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | static void dr_cpu_data(struct ldc_channel *lp, | ||
686 | struct ds_cap_state *dp, | ||
687 | void *buf, int len) | ||
688 | { | ||
689 | struct dr_cpu_queue_entry *qp; | ||
690 | struct ds_data *dpkt = buf; | ||
691 | struct dr_cpu_tag *rp; | ||
692 | |||
693 | rp = (struct dr_cpu_tag *) (dpkt + 1); | ||
694 | |||
695 | qp = kmalloc(sizeof(struct dr_cpu_queue_entry) + len, GFP_ATOMIC); | ||
696 | if (!qp) { | ||
697 | struct ds_cap_state *cp; | ||
698 | |||
699 | cp = find_cap_by_string("dr-cpu"); | ||
700 | __dr_cpu_send_error(cp, dpkt); | ||
701 | } else { | ||
702 | memcpy(&qp->req, buf, len); | ||
703 | list_add_tail(&qp->list, &dr_cpu_work_list); | ||
704 | wake_up(&dr_cpu_wait); | ||
705 | } | ||
706 | } | ||
707 | #endif | ||
708 | |||
709 | struct ds_pri_msg { | ||
710 | __u64 req_num; | ||
711 | __u64 type; | ||
712 | #define DS_PRI_REQUEST 0x00 | ||
713 | #define DS_PRI_DATA 0x01 | ||
714 | #define DS_PRI_UPDATE 0x02 | ||
715 | }; | ||
716 | |||
717 | static void ds_pri_data(struct ldc_channel *lp, | ||
718 | struct ds_cap_state *dp, | ||
719 | void *buf, int len) | ||
720 | { | ||
721 | struct ds_data *dpkt = buf; | ||
722 | struct ds_pri_msg *rp; | ||
723 | |||
724 | rp = (struct ds_pri_msg *) (dpkt + 1); | ||
725 | |||
726 | printk(KERN_INFO PFX "PRI REQ [%lx:%lx], len=%d\n", | ||
727 | rp->req_num, rp->type, len); | ||
728 | } | ||
729 | |||
730 | struct ds_var_hdr { | ||
731 | __u32 type; | ||
732 | #define DS_VAR_SET_REQ 0x00 | ||
733 | #define DS_VAR_DELETE_REQ 0x01 | ||
734 | #define DS_VAR_SET_RESP 0x02 | ||
735 | #define DS_VAR_DELETE_RESP 0x03 | ||
736 | }; | ||
737 | |||
738 | struct ds_var_set_msg { | ||
739 | struct ds_var_hdr hdr; | ||
740 | char name_and_value[0]; | ||
741 | }; | ||
742 | |||
743 | struct ds_var_delete_msg { | ||
744 | struct ds_var_hdr hdr; | ||
745 | char name[0]; | ||
746 | }; | ||
747 | |||
748 | struct ds_var_resp { | ||
749 | struct ds_var_hdr hdr; | ||
750 | __u32 result; | ||
751 | #define DS_VAR_SUCCESS 0x00 | ||
752 | #define DS_VAR_NO_SPACE 0x01 | ||
753 | #define DS_VAR_INVALID_VAR 0x02 | ||
754 | #define DS_VAR_INVALID_VAL 0x03 | ||
755 | #define DS_VAR_NOT_PRESENT 0x04 | ||
756 | }; | ||
757 | |||
758 | static DEFINE_MUTEX(ds_var_mutex); | ||
759 | static int ds_var_doorbell; | ||
760 | static int ds_var_response; | ||
761 | |||
762 | static void ds_var_data(struct ldc_channel *lp, | ||
763 | struct ds_cap_state *dp, | ||
764 | void *buf, int len) | ||
765 | { | ||
766 | struct ds_data *dpkt = buf; | ||
767 | struct ds_var_resp *rp; | ||
768 | |||
769 | rp = (struct ds_var_resp *) (dpkt + 1); | ||
770 | |||
771 | if (rp->hdr.type != DS_VAR_SET_RESP && | ||
772 | rp->hdr.type != DS_VAR_DELETE_RESP) | ||
773 | return; | ||
774 | |||
775 | ds_var_response = rp->result; | ||
776 | wmb(); | ||
777 | ds_var_doorbell = 1; | ||
778 | } | ||
779 | |||
780 | void ldom_set_var(const char *var, const char *value) | ||
781 | { | ||
782 | struct ds_info *dp = ds_info; | ||
783 | struct ds_cap_state *cp; | ||
784 | |||
785 | cp = find_cap_by_string("var-config"); | ||
786 | if (cp->state != CAP_STATE_REGISTERED) | ||
787 | cp = find_cap_by_string("var-config-backup"); | ||
788 | |||
789 | if (cp->state == CAP_STATE_REGISTERED) { | ||
790 | union { | ||
791 | struct { | ||
792 | struct ds_data data; | ||
793 | struct ds_var_set_msg msg; | ||
794 | } header; | ||
795 | char all[512]; | ||
796 | } pkt; | ||
797 | unsigned long flags; | ||
798 | char *base, *p; | ||
799 | int msg_len, loops; | ||
800 | |||
801 | memset(&pkt, 0, sizeof(pkt)); | ||
802 | pkt.header.data.tag.type = DS_DATA; | ||
803 | pkt.header.data.handle = cp->handle; | ||
804 | pkt.header.msg.hdr.type = DS_VAR_SET_REQ; | ||
805 | base = p = &pkt.header.msg.name_and_value[0]; | ||
806 | strcpy(p, var); | ||
807 | p += strlen(var) + 1; | ||
808 | strcpy(p, value); | ||
809 | p += strlen(value) + 1; | ||
810 | |||
811 | msg_len = (sizeof(struct ds_data) + | ||
812 | sizeof(struct ds_var_set_msg) + | ||
813 | (p - base)); | ||
814 | msg_len = (msg_len + 3) & ~3; | ||
815 | pkt.header.data.tag.len = msg_len - sizeof(struct ds_msg_tag); | ||
816 | |||
817 | mutex_lock(&ds_var_mutex); | ||
818 | |||
819 | spin_lock_irqsave(&ds_lock, flags); | ||
820 | ds_var_doorbell = 0; | ||
821 | ds_var_response = -1; | ||
822 | |||
823 | ds_send(dp->lp, &pkt, msg_len); | ||
824 | spin_unlock_irqrestore(&ds_lock, flags); | ||
825 | |||
826 | loops = 1000; | ||
827 | while (ds_var_doorbell == 0) { | ||
828 | if (loops-- < 0) | ||
829 | break; | ||
830 | barrier(); | ||
831 | udelay(100); | ||
832 | } | ||
833 | |||
834 | mutex_unlock(&ds_var_mutex); | ||
835 | |||
836 | if (ds_var_doorbell == 0 || | ||
837 | ds_var_response != DS_VAR_SUCCESS) | ||
838 | printk(KERN_ERR PFX "var-config [%s:%s] " | ||
839 | "failed, response(%d).\n", | ||
840 | var, value, | ||
841 | ds_var_response); | ||
842 | } else { | ||
843 | printk(KERN_ERR PFX "var-config not registered so " | ||
844 | "could not set (%s) variable to (%s).\n", | ||
845 | var, value); | ||
846 | } | ||
847 | } | ||
848 | |||
849 | void ldom_reboot(const char *boot_command) | ||
850 | { | ||
851 | /* Don't bother with any of this if the boot_command | ||
852 | * is empty. | ||
853 | */ | ||
854 | if (boot_command && strlen(boot_command)) { | ||
855 | char full_boot_str[256]; | ||
856 | |||
857 | strcpy(full_boot_str, "boot "); | ||
858 | strcpy(full_boot_str + strlen("boot "), boot_command); | ||
859 | |||
860 | ldom_set_var("reboot-command", full_boot_str); | ||
861 | } | ||
862 | sun4v_mach_sir(); | ||
863 | } | ||
864 | |||
865 | void ldom_power_off(void) | ||
866 | { | ||
867 | sun4v_mach_exit(0); | ||
868 | } | ||
869 | |||
870 | static void ds_conn_reset(struct ds_info *dp) | ||
871 | { | ||
872 | printk(KERN_ERR PFX "ds_conn_reset() from %p\n", | ||
873 | __builtin_return_address(0)); | ||
874 | } | ||
875 | |||
876 | static int register_services(struct ds_info *dp) | ||
877 | { | ||
878 | struct ldc_channel *lp = dp->lp; | ||
879 | int i; | ||
880 | |||
881 | for (i = 0; i < ARRAY_SIZE(ds_states); i++) { | ||
882 | struct { | ||
883 | struct ds_reg_req req; | ||
884 | u8 id_buf[256]; | ||
885 | } pbuf; | ||
886 | struct ds_cap_state *cp = &ds_states[i]; | ||
887 | int err, msg_len; | ||
888 | u64 new_count; | ||
889 | |||
890 | if (cp->state == CAP_STATE_REGISTERED) | ||
891 | continue; | ||
892 | |||
893 | new_count = sched_clock() & 0xffffffff; | ||
894 | cp->handle = ((u64) i << 32) | new_count; | ||
895 | |||
896 | msg_len = (sizeof(struct ds_reg_req) + | ||
897 | strlen(cp->service_id)); | ||
898 | |||
899 | memset(&pbuf, 0, sizeof(pbuf)); | ||
900 | pbuf.req.tag.type = DS_REG_REQ; | ||
901 | pbuf.req.tag.len = (msg_len - sizeof(struct ds_msg_tag)); | ||
902 | pbuf.req.handle = cp->handle; | ||
903 | pbuf.req.major = 1; | ||
904 | pbuf.req.minor = 0; | ||
905 | strcpy(pbuf.req.svc_id, cp->service_id); | ||
906 | |||
907 | err = ds_send(lp, &pbuf, msg_len); | ||
908 | if (err > 0) | ||
909 | cp->state = CAP_STATE_REG_SENT; | ||
910 | } | ||
911 | return 0; | ||
912 | } | ||
913 | |||
914 | static int ds_handshake(struct ds_info *dp, struct ds_msg_tag *pkt) | ||
915 | { | ||
916 | |||
917 | if (dp->hs_state == DS_HS_START) { | ||
918 | if (pkt->type != DS_INIT_ACK) | ||
919 | goto conn_reset; | ||
920 | |||
921 | dp->hs_state = DS_HS_DONE; | ||
922 | |||
923 | return register_services(dp); | ||
924 | } | ||
925 | |||
926 | if (dp->hs_state != DS_HS_DONE) | ||
927 | goto conn_reset; | ||
928 | |||
929 | if (pkt->type == DS_REG_ACK) { | ||
930 | struct ds_reg_ack *ap = (struct ds_reg_ack *) pkt; | ||
931 | struct ds_cap_state *cp = find_cap(ap->handle); | ||
932 | |||
933 | if (!cp) { | ||
934 | printk(KERN_ERR PFX "REG ACK for unknown handle %lx\n", | ||
935 | ap->handle); | ||
936 | return 0; | ||
937 | } | ||
938 | printk(KERN_INFO PFX "Registered %s service.\n", | ||
939 | cp->service_id); | ||
940 | cp->state = CAP_STATE_REGISTERED; | ||
941 | } else if (pkt->type == DS_REG_NACK) { | ||
942 | struct ds_reg_nack *np = (struct ds_reg_nack *) pkt; | ||
943 | struct ds_cap_state *cp = find_cap(np->handle); | ||
944 | |||
945 | if (!cp) { | ||
946 | printk(KERN_ERR PFX "REG NACK for " | ||
947 | "unknown handle %lx\n", | ||
948 | np->handle); | ||
949 | return 0; | ||
950 | } | ||
951 | printk(KERN_INFO PFX "Could not register %s service\n", | ||
952 | cp->service_id); | ||
953 | cp->state = CAP_STATE_UNKNOWN; | ||
954 | } | ||
955 | |||
956 | return 0; | ||
957 | |||
958 | conn_reset: | ||
959 | ds_conn_reset(dp); | ||
960 | return -ECONNRESET; | ||
961 | } | ||
962 | |||
963 | static int ds_data(struct ds_info *dp, struct ds_msg_tag *pkt, int len) | ||
964 | { | ||
965 | struct ds_data *dpkt = (struct ds_data *) pkt; | ||
966 | struct ds_cap_state *cp = find_cap(dpkt->handle); | ||
967 | |||
968 | if (!cp) { | ||
969 | struct ds_data_nack nack = { | ||
970 | .tag = { | ||
971 | .type = DS_NACK, | ||
972 | .len = (sizeof(struct ds_data_nack) - | ||
973 | sizeof(struct ds_msg_tag)), | ||
974 | }, | ||
975 | .handle = dpkt->handle, | ||
976 | .result = DS_INV_HDL, | ||
977 | }; | ||
978 | |||
979 | printk(KERN_ERR PFX "Data for unknown handle %lu\n", | ||
980 | dpkt->handle); | ||
981 | ds_send(dp->lp, &nack, sizeof(nack)); | ||
982 | } else { | ||
983 | cp->data(dp->lp, cp, dpkt, len); | ||
984 | } | ||
985 | return 0; | ||
986 | } | ||
987 | |||
988 | static void ds_up(struct ds_info *dp) | ||
989 | { | ||
990 | struct ldc_channel *lp = dp->lp; | ||
991 | struct ds_ver_req req; | ||
992 | int err; | ||
993 | |||
994 | req.tag.type = DS_INIT_REQ; | ||
995 | req.tag.len = sizeof(req) - sizeof(struct ds_msg_tag); | ||
996 | req.ver.major = 1; | ||
997 | req.ver.minor = 0; | ||
998 | |||
999 | err = ds_send(lp, &req, sizeof(req)); | ||
1000 | if (err > 0) | ||
1001 | dp->hs_state = DS_HS_START; | ||
1002 | } | ||
1003 | |||
1004 | static void ds_event(void *arg, int event) | ||
1005 | { | ||
1006 | struct ds_info *dp = arg; | ||
1007 | struct ldc_channel *lp = dp->lp; | ||
1008 | unsigned long flags; | ||
1009 | int err; | ||
1010 | |||
1011 | spin_lock_irqsave(&ds_lock, flags); | ||
1012 | |||
1013 | if (event == LDC_EVENT_UP) { | ||
1014 | ds_up(dp); | ||
1015 | spin_unlock_irqrestore(&ds_lock, flags); | ||
1016 | return; | ||
1017 | } | ||
1018 | |||
1019 | if (event != LDC_EVENT_DATA_READY) { | ||
1020 | printk(KERN_WARNING PFX "Unexpected LDC event %d\n", event); | ||
1021 | spin_unlock_irqrestore(&ds_lock, flags); | ||
1022 | return; | ||
1023 | } | ||
1024 | |||
1025 | err = 0; | ||
1026 | while (1) { | ||
1027 | struct ds_msg_tag *tag; | ||
1028 | |||
1029 | err = ldc_read(lp, dp->rcv_buf, sizeof(*tag)); | ||
1030 | |||
1031 | if (unlikely(err < 0)) { | ||
1032 | if (err == -ECONNRESET) | ||
1033 | ds_conn_reset(dp); | ||
1034 | break; | ||
1035 | } | ||
1036 | if (err == 0) | ||
1037 | break; | ||
1038 | |||
1039 | tag = dp->rcv_buf; | ||
1040 | err = ldc_read(lp, tag + 1, tag->len); | ||
1041 | |||
1042 | if (unlikely(err < 0)) { | ||
1043 | if (err == -ECONNRESET) | ||
1044 | ds_conn_reset(dp); | ||
1045 | break; | ||
1046 | } | ||
1047 | if (err < tag->len) | ||
1048 | break; | ||
1049 | |||
1050 | if (tag->type < DS_DATA) | ||
1051 | err = ds_handshake(dp, dp->rcv_buf); | ||
1052 | else | ||
1053 | err = ds_data(dp, dp->rcv_buf, | ||
1054 | sizeof(*tag) + err); | ||
1055 | if (err == -ECONNRESET) | ||
1056 | break; | ||
1057 | } | ||
1058 | |||
1059 | spin_unlock_irqrestore(&ds_lock, flags); | ||
1060 | } | ||
1061 | |||
1062 | static int __devinit ds_probe(struct vio_dev *vdev, | ||
1063 | const struct vio_device_id *id) | ||
1064 | { | ||
1065 | static int ds_version_printed; | ||
1066 | struct ldc_channel_config ds_cfg = { | ||
1067 | .event = ds_event, | ||
1068 | .mtu = 4096, | ||
1069 | .mode = LDC_MODE_STREAM, | ||
1070 | }; | ||
1071 | struct ldc_channel *lp; | ||
1072 | struct ds_info *dp; | ||
1073 | int err; | ||
1074 | |||
1075 | if (ds_version_printed++ == 0) | ||
1076 | printk(KERN_INFO "%s", version); | ||
1077 | |||
1078 | dp = kzalloc(sizeof(*dp), GFP_KERNEL); | ||
1079 | err = -ENOMEM; | ||
1080 | if (!dp) | ||
1081 | goto out_err; | ||
1082 | |||
1083 | dp->rcv_buf = kzalloc(4096, GFP_KERNEL); | ||
1084 | if (!dp->rcv_buf) | ||
1085 | goto out_free_dp; | ||
1086 | |||
1087 | dp->rcv_buf_len = 4096; | ||
1088 | |||
1089 | ds_cfg.tx_irq = vdev->tx_irq; | ||
1090 | ds_cfg.rx_irq = vdev->rx_irq; | ||
1091 | |||
1092 | lp = ldc_alloc(vdev->channel_id, &ds_cfg, dp); | ||
1093 | if (IS_ERR(lp)) { | ||
1094 | err = PTR_ERR(lp); | ||
1095 | goto out_free_rcv_buf; | ||
1096 | } | ||
1097 | dp->lp = lp; | ||
1098 | |||
1099 | err = ldc_bind(lp, "DS"); | ||
1100 | if (err) | ||
1101 | goto out_free_ldc; | ||
1102 | |||
1103 | ds_info = dp; | ||
1104 | |||
1105 | start_powerd(); | ||
1106 | |||
1107 | return err; | ||
1108 | |||
1109 | out_free_ldc: | ||
1110 | ldc_free(dp->lp); | ||
1111 | |||
1112 | out_free_rcv_buf: | ||
1113 | kfree(dp->rcv_buf); | ||
1114 | |||
1115 | out_free_dp: | ||
1116 | kfree(dp); | ||
1117 | |||
1118 | out_err: | ||
1119 | return err; | ||
1120 | } | ||
1121 | |||
1122 | static int ds_remove(struct vio_dev *vdev) | ||
1123 | { | ||
1124 | return 0; | ||
1125 | } | ||
1126 | |||
1127 | static struct vio_device_id ds_match[] = { | ||
1128 | { | ||
1129 | .type = "domain-services-port", | ||
1130 | }, | ||
1131 | {}, | ||
1132 | }; | ||
1133 | |||
1134 | static struct vio_driver ds_driver = { | ||
1135 | .id_table = ds_match, | ||
1136 | .probe = ds_probe, | ||
1137 | .remove = ds_remove, | ||
1138 | .driver = { | ||
1139 | .name = "ds", | ||
1140 | .owner = THIS_MODULE, | ||
1141 | } | ||
1142 | }; | ||
1143 | |||
1144 | static int __init ds_init(void) | ||
1145 | { | ||
1146 | int i; | ||
1147 | |||
1148 | for (i = 0; i < ARRAY_SIZE(ds_states); i++) | ||
1149 | ds_states[i].handle = ((u64)i << 32); | ||
1150 | |||
1151 | #ifdef CONFIG_HOTPLUG_CPU | ||
1152 | kthread_run(dr_cpu_thread, NULL, "kdrcpud"); | ||
1153 | #endif | ||
1154 | |||
1155 | return vio_register_driver(&ds_driver); | ||
1156 | } | ||
1157 | |||
1158 | subsys_initcall(ds_init); | ||