diff options
author | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-07-14 03:59:19 -0400 |
---|---|---|
committer | Heiko Carstens <heiko.carstens@de.ibm.com> | 2008-07-14 04:02:16 -0400 |
commit | e0bc24958e1305efe176adc9d5f23a09e84c0058 (patch) | |
tree | bebfd38374c261de227be22302c69fa8078439c4 /drivers/s390 | |
parent | 421c175c4d609864350df495b34d3e99f9fb1bdd (diff) |
[S390] Add support for memory hot-add via sclp.
Cc: Peter Oberparleiter <peter.oberparleiter@de.ibm.com>
Cc: Gerald Schaefer <gerald.schaefer@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/char/sclp_cmd.c | 316 |
1 files changed, 308 insertions, 8 deletions
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index b5c23396f8fe..4f45884c92c3 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -11,6 +11,9 @@ | |||
11 | #include <linux/errno.h> | 11 | #include <linux/errno.h> |
12 | #include <linux/slab.h> | 12 | #include <linux/slab.h> |
13 | #include <linux/string.h> | 13 | #include <linux/string.h> |
14 | #include <linux/mm.h> | ||
15 | #include <linux/mmzone.h> | ||
16 | #include <linux/memory.h> | ||
14 | #include <asm/chpid.h> | 17 | #include <asm/chpid.h> |
15 | #include <asm/sclp.h> | 18 | #include <asm/sclp.h> |
16 | #include "sclp.h" | 19 | #include "sclp.h" |
@@ -43,6 +46,8 @@ static int __initdata early_read_info_sccb_valid; | |||
43 | 46 | ||
44 | u64 sclp_facilities; | 47 | u64 sclp_facilities; |
45 | static u8 sclp_fac84; | 48 | static u8 sclp_fac84; |
49 | static unsigned long long rzm; | ||
50 | static unsigned long long rnmax; | ||
46 | 51 | ||
47 | static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) | 52 | static int __init sclp_cmd_sync_early(sclp_cmdw_t cmd, void *sccb) |
48 | { | 53 | { |
@@ -106,14 +111,10 @@ unsigned long long __init sclp_memory_detect(void) | |||
106 | if (!early_read_info_sccb_valid) | 111 | if (!early_read_info_sccb_valid) |
107 | return 0; | 112 | return 0; |
108 | sccb = &early_read_info_sccb; | 113 | sccb = &early_read_info_sccb; |
109 | if (sccb->rnsize) | 114 | rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; |
110 | memsize = sccb->rnsize << 20; | 115 | rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; |
111 | else | 116 | rzm <<= 20; |
112 | memsize = sccb->rnsize2 << 20; | 117 | memsize = rzm * rnmax; |
113 | if (sccb->rnmax) | ||
114 | memsize *= sccb->rnmax; | ||
115 | else | ||
116 | memsize *= sccb->rnmax2; | ||
117 | return memsize; | 118 | return memsize; |
118 | } | 119 | } |
119 | 120 | ||
@@ -278,6 +279,305 @@ int sclp_cpu_deconfigure(u8 cpu) | |||
278 | return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); | 279 | return do_cpu_configure(SCLP_CMDW_DECONFIGURE_CPU | cpu << 8); |
279 | } | 280 | } |
280 | 281 | ||
282 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
283 | |||
284 | static DEFINE_MUTEX(sclp_mem_mutex); | ||
285 | static LIST_HEAD(sclp_mem_list); | ||
286 | static u8 sclp_max_storage_id; | ||
287 | static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; | ||
288 | |||
289 | struct memory_increment { | ||
290 | struct list_head list; | ||
291 | u16 rn; | ||
292 | int standby; | ||
293 | int usecount; | ||
294 | }; | ||
295 | |||
296 | struct assign_storage_sccb { | ||
297 | struct sccb_header header; | ||
298 | u16 rn; | ||
299 | } __packed; | ||
300 | |||
301 | static unsigned long long rn2addr(u16 rn) | ||
302 | { | ||
303 | return (unsigned long long) (rn - 1) * rzm; | ||
304 | } | ||
305 | |||
306 | static int do_assign_storage(sclp_cmdw_t cmd, u16 rn) | ||
307 | { | ||
308 | struct assign_storage_sccb *sccb; | ||
309 | int rc; | ||
310 | |||
311 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
312 | if (!sccb) | ||
313 | return -ENOMEM; | ||
314 | sccb->header.length = PAGE_SIZE; | ||
315 | sccb->rn = rn; | ||
316 | rc = do_sync_request(cmd, sccb); | ||
317 | if (rc) | ||
318 | goto out; | ||
319 | switch (sccb->header.response_code) { | ||
320 | case 0x0020: | ||
321 | case 0x0120: | ||
322 | break; | ||
323 | default: | ||
324 | rc = -EIO; | ||
325 | break; | ||
326 | } | ||
327 | out: | ||
328 | free_page((unsigned long) sccb); | ||
329 | return rc; | ||
330 | } | ||
331 | |||
332 | static int sclp_assign_storage(u16 rn) | ||
333 | { | ||
334 | return do_assign_storage(0x000d0001, rn); | ||
335 | } | ||
336 | |||
337 | static int sclp_unassign_storage(u16 rn) | ||
338 | { | ||
339 | return do_assign_storage(0x000c0001, rn); | ||
340 | } | ||
341 | |||
342 | struct attach_storage_sccb { | ||
343 | struct sccb_header header; | ||
344 | u16 :16; | ||
345 | u16 assigned; | ||
346 | u32 :32; | ||
347 | u32 entries[0]; | ||
348 | } __packed; | ||
349 | |||
350 | static int sclp_attach_storage(u8 id) | ||
351 | { | ||
352 | struct attach_storage_sccb *sccb; | ||
353 | int rc; | ||
354 | int i; | ||
355 | |||
356 | sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
357 | if (!sccb) | ||
358 | return -ENOMEM; | ||
359 | sccb->header.length = PAGE_SIZE; | ||
360 | rc = do_sync_request(0x00080001 | id << 8, sccb); | ||
361 | if (rc) | ||
362 | goto out; | ||
363 | switch (sccb->header.response_code) { | ||
364 | case 0x0020: | ||
365 | set_bit(id, sclp_storage_ids); | ||
366 | for (i = 0; i < sccb->assigned; i++) | ||
367 | sclp_unassign_storage(sccb->entries[i] >> 16); | ||
368 | break; | ||
369 | default: | ||
370 | rc = -EIO; | ||
371 | break; | ||
372 | } | ||
373 | out: | ||
374 | free_page((unsigned long) sccb); | ||
375 | return rc; | ||
376 | } | ||
377 | |||
378 | static int sclp_mem_change_state(unsigned long start, unsigned long size, | ||
379 | int online) | ||
380 | { | ||
381 | struct memory_increment *incr; | ||
382 | unsigned long long istart; | ||
383 | int rc = 0; | ||
384 | |||
385 | list_for_each_entry(incr, &sclp_mem_list, list) { | ||
386 | istart = rn2addr(incr->rn); | ||
387 | if (start + size - 1 < istart) | ||
388 | break; | ||
389 | if (start > istart + rzm - 1) | ||
390 | continue; | ||
391 | if (online) { | ||
392 | if (incr->usecount++) | ||
393 | continue; | ||
394 | /* | ||
395 | * Don't break the loop if one assign fails. Loop may | ||
396 | * be walked again on CANCEL and we can't save | ||
397 | * information if state changed before or not. | ||
398 | * So continue and increase usecount for all increments. | ||
399 | */ | ||
400 | rc |= sclp_assign_storage(incr->rn); | ||
401 | } else { | ||
402 | if (--incr->usecount) | ||
403 | continue; | ||
404 | sclp_unassign_storage(incr->rn); | ||
405 | } | ||
406 | } | ||
407 | return rc ? -EIO : 0; | ||
408 | } | ||
409 | |||
410 | static int sclp_mem_notifier(struct notifier_block *nb, | ||
411 | unsigned long action, void *data) | ||
412 | { | ||
413 | unsigned long start, size; | ||
414 | struct memory_notify *arg; | ||
415 | unsigned char id; | ||
416 | int rc = 0; | ||
417 | |||
418 | arg = data; | ||
419 | start = arg->start_pfn << PAGE_SHIFT; | ||
420 | size = arg->nr_pages << PAGE_SHIFT; | ||
421 | mutex_lock(&sclp_mem_mutex); | ||
422 | for (id = 0; id <= sclp_max_storage_id; id++) | ||
423 | if (!test_bit(id, sclp_storage_ids)) | ||
424 | sclp_attach_storage(id); | ||
425 | switch (action) { | ||
426 | case MEM_ONLINE: | ||
427 | break; | ||
428 | case MEM_GOING_ONLINE: | ||
429 | rc = sclp_mem_change_state(start, size, 1); | ||
430 | break; | ||
431 | case MEM_CANCEL_ONLINE: | ||
432 | sclp_mem_change_state(start, size, 0); | ||
433 | break; | ||
434 | default: | ||
435 | rc = -EINVAL; | ||
436 | break; | ||
437 | } | ||
438 | mutex_unlock(&sclp_mem_mutex); | ||
439 | return rc ? NOTIFY_BAD : NOTIFY_OK; | ||
440 | } | ||
441 | |||
442 | static struct notifier_block sclp_mem_nb = { | ||
443 | .notifier_call = sclp_mem_notifier, | ||
444 | }; | ||
445 | |||
446 | static void __init add_memory_merged(u16 rn) | ||
447 | { | ||
448 | static u16 first_rn, num; | ||
449 | unsigned long long start, size; | ||
450 | |||
451 | if (rn && first_rn && (first_rn + num == rn)) { | ||
452 | num++; | ||
453 | return; | ||
454 | } | ||
455 | if (!first_rn) | ||
456 | goto skip_add; | ||
457 | start = rn2addr(first_rn); | ||
458 | size = (unsigned long long ) num * rzm; | ||
459 | if (start >= VMEM_MAX_PHYS) | ||
460 | goto skip_add; | ||
461 | if (start + size > VMEM_MAX_PHYS) | ||
462 | size = VMEM_MAX_PHYS - start; | ||
463 | add_memory(0, start, size); | ||
464 | skip_add: | ||
465 | first_rn = rn; | ||
466 | num = 1; | ||
467 | } | ||
468 | |||
469 | static void __init sclp_add_standby_memory(void) | ||
470 | { | ||
471 | struct memory_increment *incr; | ||
472 | |||
473 | list_for_each_entry(incr, &sclp_mem_list, list) | ||
474 | if (incr->standby) | ||
475 | add_memory_merged(incr->rn); | ||
476 | add_memory_merged(0); | ||
477 | } | ||
478 | |||
479 | static void __init insert_increment(u16 rn, int standby, int assigned) | ||
480 | { | ||
481 | struct memory_increment *incr, *new_incr; | ||
482 | struct list_head *prev; | ||
483 | u16 last_rn; | ||
484 | |||
485 | new_incr = kzalloc(sizeof(*new_incr), GFP_KERNEL); | ||
486 | if (!new_incr) | ||
487 | return; | ||
488 | new_incr->rn = rn; | ||
489 | new_incr->standby = standby; | ||
490 | last_rn = 0; | ||
491 | prev = &sclp_mem_list; | ||
492 | list_for_each_entry(incr, &sclp_mem_list, list) { | ||
493 | if (assigned && incr->rn > rn) | ||
494 | break; | ||
495 | if (!assigned && incr->rn - last_rn > 1) | ||
496 | break; | ||
497 | last_rn = incr->rn; | ||
498 | prev = &incr->list; | ||
499 | } | ||
500 | if (!assigned) | ||
501 | new_incr->rn = last_rn + 1; | ||
502 | if (new_incr->rn > rnmax) { | ||
503 | kfree(new_incr); | ||
504 | return; | ||
505 | } | ||
506 | list_add(&new_incr->list, prev); | ||
507 | } | ||
508 | |||
509 | struct read_storage_sccb { | ||
510 | struct sccb_header header; | ||
511 | u16 max_id; | ||
512 | u16 assigned; | ||
513 | u16 standby; | ||
514 | u16 :16; | ||
515 | u32 entries[0]; | ||
516 | } __packed; | ||
517 | |||
518 | static int __init sclp_detect_standby_memory(void) | ||
519 | { | ||
520 | struct read_storage_sccb *sccb; | ||
521 | int i, id, assigned, rc; | ||
522 | |||
523 | if (!early_read_info_sccb_valid) | ||
524 | return 0; | ||
525 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) | ||
526 | return 0; | ||
527 | rc = -ENOMEM; | ||
528 | sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA); | ||
529 | if (!sccb) | ||
530 | goto out; | ||
531 | assigned = 0; | ||
532 | for (id = 0; id <= sclp_max_storage_id; id++) { | ||
533 | memset(sccb, 0, PAGE_SIZE); | ||
534 | sccb->header.length = PAGE_SIZE; | ||
535 | rc = do_sync_request(0x00040001 | id << 8, sccb); | ||
536 | if (rc) | ||
537 | goto out; | ||
538 | switch (sccb->header.response_code) { | ||
539 | case 0x0010: | ||
540 | set_bit(id, sclp_storage_ids); | ||
541 | for (i = 0; i < sccb->assigned; i++) { | ||
542 | if (!sccb->entries[i]) | ||
543 | continue; | ||
544 | assigned++; | ||
545 | insert_increment(sccb->entries[i] >> 16, 0, 1); | ||
546 | } | ||
547 | break; | ||
548 | case 0x0310: | ||
549 | break; | ||
550 | case 0x0410: | ||
551 | for (i = 0; i < sccb->assigned; i++) { | ||
552 | if (!sccb->entries[i]) | ||
553 | continue; | ||
554 | assigned++; | ||
555 | insert_increment(sccb->entries[i] >> 16, 1, 1); | ||
556 | } | ||
557 | break; | ||
558 | default: | ||
559 | rc = -EIO; | ||
560 | break; | ||
561 | } | ||
562 | if (!rc) | ||
563 | sclp_max_storage_id = sccb->max_id; | ||
564 | } | ||
565 | if (rc || list_empty(&sclp_mem_list)) | ||
566 | goto out; | ||
567 | for (i = 1; i <= rnmax - assigned; i++) | ||
568 | insert_increment(0, 1, 0); | ||
569 | rc = register_memory_notifier(&sclp_mem_nb); | ||
570 | if (rc) | ||
571 | goto out; | ||
572 | sclp_add_standby_memory(); | ||
573 | out: | ||
574 | free_page((unsigned long) sccb); | ||
575 | return rc; | ||
576 | } | ||
577 | __initcall(sclp_detect_standby_memory); | ||
578 | |||
579 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
580 | |||
281 | /* | 581 | /* |
282 | * Channel path configuration related functions. | 582 | * Channel path configuration related functions. |
283 | */ | 583 | */ |