diff options
69 files changed, 3718 insertions, 499 deletions
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index 99dc3ded6b49..a14dba0e4d67 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -348,6 +348,9 @@ config ARCH_ENABLE_MEMORY_HOTPLUG | |||
348 | config ARCH_ENABLE_MEMORY_HOTREMOVE | 348 | config ARCH_ENABLE_MEMORY_HOTREMOVE |
349 | def_bool y | 349 | def_bool y |
350 | 350 | ||
351 | config ARCH_HIBERNATION_POSSIBLE | ||
352 | def_bool y if 64BIT | ||
353 | |||
351 | source "mm/Kconfig" | 354 | source "mm/Kconfig" |
352 | 355 | ||
353 | comment "I/O subsystem configuration" | 356 | comment "I/O subsystem configuration" |
@@ -592,6 +595,12 @@ config SECCOMP | |||
592 | 595 | ||
593 | endmenu | 596 | endmenu |
594 | 597 | ||
598 | menu "Power Management" | ||
599 | |||
600 | source "kernel/power/Kconfig" | ||
601 | |||
602 | endmenu | ||
603 | |||
595 | source "net/Kconfig" | 604 | source "net/Kconfig" |
596 | 605 | ||
597 | config PCMCIA | 606 | config PCMCIA |
diff --git a/arch/s390/Makefile b/arch/s390/Makefile index 578c61f15a4b..0ff387cebf88 100644 --- a/arch/s390/Makefile +++ b/arch/s390/Makefile | |||
@@ -88,7 +88,9 @@ LDFLAGS_vmlinux := -e start | |||
88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o | 88 | head-y := arch/s390/kernel/head.o arch/s390/kernel/init_task.o |
89 | 89 | ||
90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ | 90 | core-y += arch/s390/mm/ arch/s390/kernel/ arch/s390/crypto/ \ |
91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ | 91 | arch/s390/appldata/ arch/s390/hypfs/ arch/s390/kvm/ \ |
92 | arch/s390/power/ | ||
93 | |||
92 | libs-y += arch/s390/lib/ | 94 | libs-y += arch/s390/lib/ |
93 | drivers-y += drivers/s390/ | 95 | drivers-y += drivers/s390/ |
94 | drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ | 96 | drivers-$(CONFIG_MATHEMU) += arch/s390/math-emu/ |
diff --git a/arch/s390/appldata/appldata_base.c b/arch/s390/appldata/appldata_base.c index 1dfc7100c7ee..264528e4f58d 100644 --- a/arch/s390/appldata/appldata_base.c +++ b/arch/s390/appldata/appldata_base.c | |||
@@ -5,7 +5,7 @@ | |||
5 | * Exports appldata_register_ops() and appldata_unregister_ops() for the | 5 | * Exports appldata_register_ops() and appldata_unregister_ops() for the |
6 | * data gathering modules. | 6 | * data gathering modules. |
7 | * | 7 | * |
8 | * Copyright IBM Corp. 2003, 2008 | 8 | * Copyright IBM Corp. 2003, 2009 |
9 | * | 9 | * |
10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 10 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
11 | */ | 11 | */ |
@@ -26,6 +26,8 @@ | |||
26 | #include <linux/notifier.h> | 26 | #include <linux/notifier.h> |
27 | #include <linux/cpu.h> | 27 | #include <linux/cpu.h> |
28 | #include <linux/workqueue.h> | 28 | #include <linux/workqueue.h> |
29 | #include <linux/suspend.h> | ||
30 | #include <linux/platform_device.h> | ||
29 | #include <asm/appldata.h> | 31 | #include <asm/appldata.h> |
30 | #include <asm/timer.h> | 32 | #include <asm/timer.h> |
31 | #include <asm/uaccess.h> | 33 | #include <asm/uaccess.h> |
@@ -41,6 +43,9 @@ | |||
41 | 43 | ||
42 | #define TOD_MICRO 0x01000 /* nr. of TOD clock units | 44 | #define TOD_MICRO 0x01000 /* nr. of TOD clock units |
43 | for 1 microsecond */ | 45 | for 1 microsecond */ |
46 | |||
47 | static struct platform_device *appldata_pdev; | ||
48 | |||
44 | /* | 49 | /* |
45 | * /proc entries (sysctl) | 50 | * /proc entries (sysctl) |
46 | */ | 51 | */ |
@@ -86,6 +91,7 @@ static atomic_t appldata_expire_count = ATOMIC_INIT(0); | |||
86 | static DEFINE_SPINLOCK(appldata_timer_lock); | 91 | static DEFINE_SPINLOCK(appldata_timer_lock); |
87 | static int appldata_interval = APPLDATA_CPU_INTERVAL; | 92 | static int appldata_interval = APPLDATA_CPU_INTERVAL; |
88 | static int appldata_timer_active; | 93 | static int appldata_timer_active; |
94 | static int appldata_timer_suspended = 0; | ||
89 | 95 | ||
90 | /* | 96 | /* |
91 | * Work queue | 97 | * Work queue |
@@ -475,6 +481,93 @@ void appldata_unregister_ops(struct appldata_ops *ops) | |||
475 | /********************** module-ops management <END> **************************/ | 481 | /********************** module-ops management <END> **************************/ |
476 | 482 | ||
477 | 483 | ||
484 | /**************************** suspend / resume *******************************/ | ||
485 | static int appldata_freeze(struct device *dev) | ||
486 | { | ||
487 | struct appldata_ops *ops; | ||
488 | int rc; | ||
489 | struct list_head *lh; | ||
490 | |||
491 | get_online_cpus(); | ||
492 | spin_lock(&appldata_timer_lock); | ||
493 | if (appldata_timer_active) { | ||
494 | __appldata_vtimer_setup(APPLDATA_DEL_TIMER); | ||
495 | appldata_timer_suspended = 1; | ||
496 | } | ||
497 | spin_unlock(&appldata_timer_lock); | ||
498 | put_online_cpus(); | ||
499 | |||
500 | mutex_lock(&appldata_ops_mutex); | ||
501 | list_for_each(lh, &appldata_ops_list) { | ||
502 | ops = list_entry(lh, struct appldata_ops, list); | ||
503 | if (ops->active == 1) { | ||
504 | rc = appldata_diag(ops->record_nr, APPLDATA_STOP_REC, | ||
505 | (unsigned long) ops->data, ops->size, | ||
506 | ops->mod_lvl); | ||
507 | if (rc != 0) | ||
508 | pr_err("Stopping the data collection for %s " | ||
509 | "failed with rc=%d\n", ops->name, rc); | ||
510 | } | ||
511 | } | ||
512 | mutex_unlock(&appldata_ops_mutex); | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | static int appldata_restore(struct device *dev) | ||
517 | { | ||
518 | struct appldata_ops *ops; | ||
519 | int rc; | ||
520 | struct list_head *lh; | ||
521 | |||
522 | get_online_cpus(); | ||
523 | spin_lock(&appldata_timer_lock); | ||
524 | if (appldata_timer_suspended) { | ||
525 | __appldata_vtimer_setup(APPLDATA_ADD_TIMER); | ||
526 | appldata_timer_suspended = 0; | ||
527 | } | ||
528 | spin_unlock(&appldata_timer_lock); | ||
529 | put_online_cpus(); | ||
530 | |||
531 | mutex_lock(&appldata_ops_mutex); | ||
532 | list_for_each(lh, &appldata_ops_list) { | ||
533 | ops = list_entry(lh, struct appldata_ops, list); | ||
534 | if (ops->active == 1) { | ||
535 | ops->callback(ops->data); // init record | ||
536 | rc = appldata_diag(ops->record_nr, | ||
537 | APPLDATA_START_INTERVAL_REC, | ||
538 | (unsigned long) ops->data, ops->size, | ||
539 | ops->mod_lvl); | ||
540 | if (rc != 0) { | ||
541 | pr_err("Starting the data collection for %s " | ||
542 | "failed with rc=%d\n", ops->name, rc); | ||
543 | } | ||
544 | } | ||
545 | } | ||
546 | mutex_unlock(&appldata_ops_mutex); | ||
547 | return 0; | ||
548 | } | ||
549 | |||
550 | static int appldata_thaw(struct device *dev) | ||
551 | { | ||
552 | return appldata_restore(dev); | ||
553 | } | ||
554 | |||
555 | static struct dev_pm_ops appldata_pm_ops = { | ||
556 | .freeze = appldata_freeze, | ||
557 | .thaw = appldata_thaw, | ||
558 | .restore = appldata_restore, | ||
559 | }; | ||
560 | |||
561 | static struct platform_driver appldata_pdrv = { | ||
562 | .driver = { | ||
563 | .name = "appldata", | ||
564 | .owner = THIS_MODULE, | ||
565 | .pm = &appldata_pm_ops, | ||
566 | }, | ||
567 | }; | ||
568 | /************************* suspend / resume <END> ****************************/ | ||
569 | |||
570 | |||
478 | /******************************* init / exit *********************************/ | 571 | /******************************* init / exit *********************************/ |
479 | 572 | ||
480 | static void __cpuinit appldata_online_cpu(int cpu) | 573 | static void __cpuinit appldata_online_cpu(int cpu) |
@@ -531,11 +624,23 @@ static struct notifier_block __cpuinitdata appldata_nb = { | |||
531 | */ | 624 | */ |
532 | static int __init appldata_init(void) | 625 | static int __init appldata_init(void) |
533 | { | 626 | { |
534 | int i; | 627 | int i, rc; |
628 | |||
629 | rc = platform_driver_register(&appldata_pdrv); | ||
630 | if (rc) | ||
631 | return rc; | ||
535 | 632 | ||
633 | appldata_pdev = platform_device_register_simple("appldata", -1, NULL, | ||
634 | 0); | ||
635 | if (IS_ERR(appldata_pdev)) { | ||
636 | rc = PTR_ERR(appldata_pdev); | ||
637 | goto out_driver; | ||
638 | } | ||
536 | appldata_wq = create_singlethread_workqueue("appldata"); | 639 | appldata_wq = create_singlethread_workqueue("appldata"); |
537 | if (!appldata_wq) | 640 | if (!appldata_wq) { |
538 | return -ENOMEM; | 641 | rc = -ENOMEM; |
642 | goto out_device; | ||
643 | } | ||
539 | 644 | ||
540 | get_online_cpus(); | 645 | get_online_cpus(); |
541 | for_each_online_cpu(i) | 646 | for_each_online_cpu(i) |
@@ -547,6 +652,12 @@ static int __init appldata_init(void) | |||
547 | 652 | ||
548 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); | 653 | appldata_sysctl_header = register_sysctl_table(appldata_dir_table); |
549 | return 0; | 654 | return 0; |
655 | |||
656 | out_device: | ||
657 | platform_device_unregister(appldata_pdev); | ||
658 | out_driver: | ||
659 | platform_driver_unregister(&appldata_pdrv); | ||
660 | return rc; | ||
550 | } | 661 | } |
551 | 662 | ||
552 | __initcall(appldata_init); | 663 | __initcall(appldata_init); |
diff --git a/arch/s390/include/asm/ccwdev.h b/arch/s390/include/asm/ccwdev.h index ba007d8df941..2a5419551176 100644 --- a/arch/s390/include/asm/ccwdev.h +++ b/arch/s390/include/asm/ccwdev.h | |||
@@ -1,11 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-s390/ccwdev.h | 2 | * Copyright IBM Corp. 2002, 2009 |
3 | * include/asm-s390x/ccwdev.h | ||
4 | * | 3 | * |
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Author(s): Arnd Bergmann <arndb@de.ibm.com> |
6 | * Author(s): Arnd Bergmann <arndb@de.ibm.com> | ||
7 | * | 5 | * |
8 | * Interface for CCW device drivers | 6 | * Interface for CCW device drivers |
9 | */ | 7 | */ |
10 | #ifndef _S390_CCWDEV_H_ | 8 | #ifndef _S390_CCWDEV_H_ |
11 | #define _S390_CCWDEV_H_ | 9 | #define _S390_CCWDEV_H_ |
@@ -104,6 +102,11 @@ struct ccw_device { | |||
104 | * @set_offline: called when setting device offline | 102 | * @set_offline: called when setting device offline |
105 | * @notify: notify driver of device state changes | 103 | * @notify: notify driver of device state changes |
106 | * @shutdown: called at device shutdown | 104 | * @shutdown: called at device shutdown |
105 | * @prepare: prepare for pm state transition | ||
106 | * @complete: undo work done in @prepare | ||
107 | * @freeze: callback for freezing during hibernation snapshotting | ||
108 | * @thaw: undo work done in @freeze | ||
109 | * @restore: callback for restoring after hibernation | ||
107 | * @driver: embedded device driver structure | 110 | * @driver: embedded device driver structure |
108 | * @name: device driver name | 111 | * @name: device driver name |
109 | */ | 112 | */ |
@@ -116,6 +119,11 @@ struct ccw_driver { | |||
116 | int (*set_offline) (struct ccw_device *); | 119 | int (*set_offline) (struct ccw_device *); |
117 | int (*notify) (struct ccw_device *, int); | 120 | int (*notify) (struct ccw_device *, int); |
118 | void (*shutdown) (struct ccw_device *); | 121 | void (*shutdown) (struct ccw_device *); |
122 | int (*prepare) (struct ccw_device *); | ||
123 | void (*complete) (struct ccw_device *); | ||
124 | int (*freeze)(struct ccw_device *); | ||
125 | int (*thaw) (struct ccw_device *); | ||
126 | int (*restore)(struct ccw_device *); | ||
119 | struct device_driver driver; | 127 | struct device_driver driver; |
120 | char *name; | 128 | char *name; |
121 | }; | 129 | }; |
@@ -184,6 +192,7 @@ extern void ccw_device_get_id(struct ccw_device *, struct ccw_dev_id *); | |||
184 | #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) | 192 | #define to_ccwdrv(n) container_of(n, struct ccw_driver, driver) |
185 | 193 | ||
186 | extern struct ccw_device *ccw_device_probe_console(void); | 194 | extern struct ccw_device *ccw_device_probe_console(void); |
195 | extern int ccw_device_force_console(void); | ||
187 | 196 | ||
188 | // FIXME: these have to go | 197 | // FIXME: these have to go |
189 | extern int _ccw_device_get_subchannel_number(struct ccw_device *); | 198 | extern int _ccw_device_get_subchannel_number(struct ccw_device *); |
diff --git a/arch/s390/include/asm/ccwgroup.h b/arch/s390/include/asm/ccwgroup.h index a27f68985a79..c79c1e787b86 100644 --- a/arch/s390/include/asm/ccwgroup.h +++ b/arch/s390/include/asm/ccwgroup.h | |||
@@ -38,6 +38,11 @@ struct ccwgroup_device { | |||
38 | * @set_online: function called when device is set online | 38 | * @set_online: function called when device is set online |
39 | * @set_offline: function called when device is set offline | 39 | * @set_offline: function called when device is set offline |
40 | * @shutdown: function called when device is shut down | 40 | * @shutdown: function called when device is shut down |
41 | * @prepare: prepare for pm state transition | ||
42 | * @complete: undo work done in @prepare | ||
43 | * @freeze: callback for freezing during hibernation snapshotting | ||
44 | * @thaw: undo work done in @freeze | ||
45 | * @restore: callback for restoring after hibernation | ||
41 | * @driver: embedded driver structure | 46 | * @driver: embedded driver structure |
42 | */ | 47 | */ |
43 | struct ccwgroup_driver { | 48 | struct ccwgroup_driver { |
@@ -51,6 +56,11 @@ struct ccwgroup_driver { | |||
51 | int (*set_online) (struct ccwgroup_device *); | 56 | int (*set_online) (struct ccwgroup_device *); |
52 | int (*set_offline) (struct ccwgroup_device *); | 57 | int (*set_offline) (struct ccwgroup_device *); |
53 | void (*shutdown)(struct ccwgroup_device *); | 58 | void (*shutdown)(struct ccwgroup_device *); |
59 | int (*prepare) (struct ccwgroup_device *); | ||
60 | void (*complete) (struct ccwgroup_device *); | ||
61 | int (*freeze)(struct ccwgroup_device *); | ||
62 | int (*thaw) (struct ccwgroup_device *); | ||
63 | int (*restore)(struct ccwgroup_device *); | ||
54 | 64 | ||
55 | struct device_driver driver; | 65 | struct device_driver driver; |
56 | }; | 66 | }; |
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h new file mode 100644 index 000000000000..dc75c616eafe --- /dev/null +++ b/arch/s390/include/asm/suspend.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef __ASM_S390_SUSPEND_H | ||
2 | #define __ASM_S390_SUSPEND_H | ||
3 | |||
4 | static inline int arch_prepare_suspend(void) | ||
5 | { | ||
6 | return 0; | ||
7 | } | ||
8 | |||
9 | #endif | ||
10 | |||
diff --git a/arch/s390/include/asm/system.h b/arch/s390/include/asm/system.h index 3a8b26eb1f2e..4fb83c1cdb77 100644 --- a/arch/s390/include/asm/system.h +++ b/arch/s390/include/asm/system.h | |||
@@ -1,11 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * include/asm-s390/system.h | 2 | * Copyright IBM Corp. 1999, 2009 |
3 | * | 3 | * |
4 | * S390 version | 4 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | ||
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
7 | * | ||
8 | * Derived from "include/asm-i386/system.h" | ||
9 | */ | 5 | */ |
10 | 6 | ||
11 | #ifndef __ASM_SYSTEM_H | 7 | #ifndef __ASM_SYSTEM_H |
@@ -469,6 +465,20 @@ extern psw_t sysc_restore_trace_psw; | |||
469 | extern psw_t io_restore_trace_psw; | 465 | extern psw_t io_restore_trace_psw; |
470 | #endif | 466 | #endif |
471 | 467 | ||
468 | static inline int tprot(unsigned long addr) | ||
469 | { | ||
470 | int rc = -EFAULT; | ||
471 | |||
472 | asm volatile( | ||
473 | " tprot 0(%1),0\n" | ||
474 | "0: ipm %0\n" | ||
475 | " srl %0,28\n" | ||
476 | "1:\n" | ||
477 | EX_TABLE(0b,1b) | ||
478 | : "+d" (rc) : "a" (addr) : "cc"); | ||
479 | return rc; | ||
480 | } | ||
481 | |||
472 | #endif /* __KERNEL__ */ | 482 | #endif /* __KERNEL__ */ |
473 | 483 | ||
474 | #endif | 484 | #endif |
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c index fb263736826c..f9b144049dc9 100644 --- a/arch/s390/kernel/early.c +++ b/arch/s390/kernel/early.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/early.c | 2 | * arch/s390/kernel/early.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Copyright IBM Corp. 2007, 2009 |
5 | * Author(s): Hongjie Yang <hongjie@us.ibm.com>, | 5 | * Author(s): Hongjie Yang <hongjie@us.ibm.com>, |
6 | * Heiko Carstens <heiko.carstens@de.ibm.com> | 6 | * Heiko Carstens <heiko.carstens@de.ibm.com> |
7 | */ | 7 | */ |
@@ -210,7 +210,7 @@ static noinline __init void detect_machine_type(void) | |||
210 | machine_flags |= MACHINE_FLAG_VM; | 210 | machine_flags |= MACHINE_FLAG_VM; |
211 | } | 211 | } |
212 | 212 | ||
213 | static __init void early_pgm_check_handler(void) | 213 | static void early_pgm_check_handler(void) |
214 | { | 214 | { |
215 | unsigned long addr; | 215 | unsigned long addr; |
216 | const struct exception_table_entry *fixup; | 216 | const struct exception_table_entry *fixup; |
@@ -222,7 +222,7 @@ static __init void early_pgm_check_handler(void) | |||
222 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; | 222 | S390_lowcore.program_old_psw.addr = fixup->fixup | PSW_ADDR_AMODE; |
223 | } | 223 | } |
224 | 224 | ||
225 | static noinline __init void setup_lowcore_early(void) | 225 | void setup_lowcore_early(void) |
226 | { | 226 | { |
227 | psw_t psw; | 227 | psw_t psw; |
228 | 228 | ||
diff --git a/arch/s390/kernel/mem_detect.c b/arch/s390/kernel/mem_detect.c index 9872999c66d1..559af0d07878 100644 --- a/arch/s390/kernel/mem_detect.c +++ b/arch/s390/kernel/mem_detect.c | |||
@@ -1,6 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2008 | 2 | * Copyright IBM Corp. 2008, 2009 |
3 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 3 | * |
4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | ||
4 | */ | 5 | */ |
5 | 6 | ||
6 | #include <linux/kernel.h> | 7 | #include <linux/kernel.h> |
@@ -9,20 +10,6 @@ | |||
9 | #include <asm/sclp.h> | 10 | #include <asm/sclp.h> |
10 | #include <asm/setup.h> | 11 | #include <asm/setup.h> |
11 | 12 | ||
12 | static inline int tprot(unsigned long addr) | ||
13 | { | ||
14 | int rc = -EFAULT; | ||
15 | |||
16 | asm volatile( | ||
17 | " tprot 0(%1),0\n" | ||
18 | "0: ipm %0\n" | ||
19 | " srl %0,28\n" | ||
20 | "1:\n" | ||
21 | EX_TABLE(0b,1b) | ||
22 | : "+d" (rc) : "a" (addr) : "cc"); | ||
23 | return rc; | ||
24 | } | ||
25 | |||
26 | #define ADDR2G (1ULL << 31) | 13 | #define ADDR2G (1ULL << 31) |
27 | 14 | ||
28 | static void find_memory_chunks(struct mem_chunk chunk[]) | 15 | static void find_memory_chunks(struct mem_chunk chunk[]) |
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index cc8c484984e3..fd8e3111a4e8 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * arch/s390/kernel/smp.c | 2 | * arch/s390/kernel/smp.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 1999,2007 | 4 | * Copyright IBM Corp. 1999, 2009 |
5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), | 5 | * Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com), |
6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
7 | * Heiko Carstens (heiko.carstens@de.ibm.com) | 7 | * Heiko Carstens (heiko.carstens@de.ibm.com) |
@@ -1031,6 +1031,42 @@ out: | |||
1031 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, | 1031 | static SYSDEV_CLASS_ATTR(dispatching, 0644, dispatching_show, |
1032 | dispatching_store); | 1032 | dispatching_store); |
1033 | 1033 | ||
1034 | /* | ||
1035 | * If the resume kernel runs on another cpu than the suspended kernel, | ||
1036 | * we have to switch the cpu IDs in the logical map. | ||
1037 | */ | ||
1038 | void smp_switch_boot_cpu_in_resume(u32 resume_phys_cpu_id, | ||
1039 | struct _lowcore *suspend_lowcore) | ||
1040 | { | ||
1041 | int cpu, suspend_cpu_id, resume_cpu_id; | ||
1042 | u32 suspend_phys_cpu_id; | ||
1043 | |||
1044 | suspend_phys_cpu_id = __cpu_logical_map[suspend_lowcore->cpu_nr]; | ||
1045 | suspend_cpu_id = suspend_lowcore->cpu_nr; | ||
1046 | |||
1047 | for_each_present_cpu(cpu) { | ||
1048 | if (__cpu_logical_map[cpu] == resume_phys_cpu_id) { | ||
1049 | resume_cpu_id = cpu; | ||
1050 | goto found; | ||
1051 | } | ||
1052 | } | ||
1053 | panic("Could not find resume cpu in logical map.\n"); | ||
1054 | |||
1055 | found: | ||
1056 | printk("Resume cpu ID: %i/%i\n", resume_phys_cpu_id, resume_cpu_id); | ||
1057 | printk("Suspend cpu ID: %i/%i\n", suspend_phys_cpu_id, suspend_cpu_id); | ||
1058 | |||
1059 | __cpu_logical_map[resume_cpu_id] = suspend_phys_cpu_id; | ||
1060 | __cpu_logical_map[suspend_cpu_id] = resume_phys_cpu_id; | ||
1061 | |||
1062 | lowcore_ptr[suspend_cpu_id]->cpu_addr = resume_phys_cpu_id; | ||
1063 | } | ||
1064 | |||
1065 | u32 smp_get_phys_cpu_id(void) | ||
1066 | { | ||
1067 | return __cpu_logical_map[smp_processor_id()]; | ||
1068 | } | ||
1069 | |||
1034 | static int __init topology_init(void) | 1070 | static int __init topology_init(void) |
1035 | { | 1071 | { |
1036 | int cpu; | 1072 | int cpu; |
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c index 4ca8e826bf30..565667207985 100644 --- a/arch/s390/mm/pgtable.c +++ b/arch/s390/mm/pgtable.c | |||
@@ -313,3 +313,22 @@ int s390_enable_sie(void) | |||
313 | return 0; | 313 | return 0; |
314 | } | 314 | } |
315 | EXPORT_SYMBOL_GPL(s390_enable_sie); | 315 | EXPORT_SYMBOL_GPL(s390_enable_sie); |
316 | |||
317 | #ifdef CONFIG_DEBUG_PAGEALLOC | ||
318 | #ifdef CONFIG_HIBERNATION | ||
319 | bool kernel_page_present(struct page *page) | ||
320 | { | ||
321 | unsigned long addr; | ||
322 | int cc; | ||
323 | |||
324 | addr = page_to_phys(page); | ||
325 | asm("lra %1,0(%1)\n" | ||
326 | "ipm %0\n" | ||
327 | "srl %0,28" | ||
328 | :"=d"(cc),"+a"(addr)::"cc"); | ||
329 | return cc == 0; | ||
330 | } | ||
331 | |||
332 | #endif /* CONFIG_HIBERNATION */ | ||
333 | #endif /* CONFIG_DEBUG_PAGEALLOC */ | ||
334 | |||
diff --git a/arch/s390/power/Makefile b/arch/s390/power/Makefile new file mode 100644 index 000000000000..973bb45a8fec --- /dev/null +++ b/arch/s390/power/Makefile | |||
@@ -0,0 +1,8 @@ | |||
1 | # | ||
2 | # Makefile for s390 PM support | ||
3 | # | ||
4 | |||
5 | obj-$(CONFIG_HIBERNATION) += suspend.o | ||
6 | obj-$(CONFIG_HIBERNATION) += swsusp.o | ||
7 | obj-$(CONFIG_HIBERNATION) += swsusp_64.o | ||
8 | obj-$(CONFIG_HIBERNATION) += swsusp_asm64.o | ||
diff --git a/arch/s390/power/suspend.c b/arch/s390/power/suspend.c new file mode 100644 index 000000000000..b3351eceebbe --- /dev/null +++ b/arch/s390/power/suspend.c | |||
@@ -0,0 +1,40 @@ | |||
1 | /* | ||
2 | * Suspend support specific for s390. | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | */ | ||
8 | |||
9 | #include <linux/mm.h> | ||
10 | #include <linux/suspend.h> | ||
11 | #include <linux/reboot.h> | ||
12 | #include <linux/pfn.h> | ||
13 | #include <asm/sections.h> | ||
14 | #include <asm/ipl.h> | ||
15 | |||
16 | /* | ||
17 | * References to section boundaries | ||
18 | */ | ||
19 | extern const void __nosave_begin, __nosave_end; | ||
20 | |||
21 | /* | ||
22 | * check if given pfn is in the 'nosave' or in the read only NSS section | ||
23 | */ | ||
24 | int pfn_is_nosave(unsigned long pfn) | ||
25 | { | ||
26 | unsigned long nosave_begin_pfn = __pa(&__nosave_begin) >> PAGE_SHIFT; | ||
27 | unsigned long nosave_end_pfn = PAGE_ALIGN(__pa(&__nosave_end)) | ||
28 | >> PAGE_SHIFT; | ||
29 | unsigned long eshared_pfn = PFN_DOWN(__pa(&_eshared)) - 1; | ||
30 | unsigned long stext_pfn = PFN_DOWN(__pa(&_stext)); | ||
31 | |||
32 | if (pfn >= nosave_begin_pfn && pfn < nosave_end_pfn) | ||
33 | return 1; | ||
34 | if (pfn >= stext_pfn && pfn <= eshared_pfn) { | ||
35 | if (ipl_info.type == IPL_TYPE_NSS) | ||
36 | return 1; | ||
37 | } else if ((tprot(pfn * PAGE_SIZE) && pfn > 0)) | ||
38 | return 1; | ||
39 | return 0; | ||
40 | } | ||
diff --git a/arch/s390/power/swsusp.c b/arch/s390/power/swsusp.c new file mode 100644 index 000000000000..e6a4fe9f5f24 --- /dev/null +++ b/arch/s390/power/swsusp.c | |||
@@ -0,0 +1,30 @@ | |||
1 | /* | ||
2 | * Support for suspend and resume on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | |||
11 | /* | ||
12 | * save CPU registers before creating a hibernation image and before | ||
13 | * restoring the memory state from it | ||
14 | */ | ||
15 | void save_processor_state(void) | ||
16 | { | ||
17 | /* implentation contained in the | ||
18 | * swsusp_arch_suspend function | ||
19 | */ | ||
20 | } | ||
21 | |||
22 | /* | ||
23 | * restore the contents of CPU registers | ||
24 | */ | ||
25 | void restore_processor_state(void) | ||
26 | { | ||
27 | /* implentation contained in the | ||
28 | * swsusp_arch_resume function | ||
29 | */ | ||
30 | } | ||
diff --git a/arch/s390/power/swsusp_64.c b/arch/s390/power/swsusp_64.c new file mode 100644 index 000000000000..9516a517d72f --- /dev/null +++ b/arch/s390/power/swsusp_64.c | |||
@@ -0,0 +1,17 @@ | |||
1 | /* | ||
2 | * Support for suspend and resume on s390 | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * | ||
8 | */ | ||
9 | |||
10 | #include <asm/system.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | |||
13 | void do_after_copyback(void) | ||
14 | { | ||
15 | mb(); | ||
16 | } | ||
17 | |||
diff --git a/arch/s390/power/swsusp_asm64.S b/arch/s390/power/swsusp_asm64.S new file mode 100644 index 000000000000..3c74e7d827c9 --- /dev/null +++ b/arch/s390/power/swsusp_asm64.S | |||
@@ -0,0 +1,199 @@ | |||
1 | /* | ||
2 | * S390 64-bit swsusp implementation | ||
3 | * | ||
4 | * Copyright IBM Corp. 2009 | ||
5 | * | ||
6 | * Author(s): Hans-Joachim Picht <hans@linux.vnet.ibm.com> | ||
7 | * Michael Holzheu <holzheu@linux.vnet.ibm.com> | ||
8 | */ | ||
9 | |||
10 | #include <asm/page.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | #include <asm/asm-offsets.h> | ||
13 | |||
14 | /* | ||
15 | * Save register context in absolute 0 lowcore and call swsusp_save() to | ||
16 | * create in-memory kernel image. The context is saved in the designated | ||
17 | * "store status" memory locations (see POP). | ||
18 | * We return from this function twice. The first time during the suspend to | ||
19 | * disk process. The second time via the swsusp_arch_resume() function | ||
20 | * (see below) in the resume process. | ||
21 | * This function runs with disabled interrupts. | ||
22 | */ | ||
23 | .section .text | ||
24 | .align 2 | ||
25 | .globl swsusp_arch_suspend | ||
26 | swsusp_arch_suspend: | ||
27 | stmg %r6,%r15,__SF_GPRS(%r15) | ||
28 | lgr %r1,%r15 | ||
29 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
30 | stg %r1,__SF_BACKCHAIN(%r15) | ||
31 | |||
32 | /* Deactivate DAT */ | ||
33 | stnsm __SF_EMPTY(%r15),0xfb | ||
34 | |||
35 | /* Switch off lowcore protection */ | ||
36 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
37 | ni __SF_EMPTY+4(%r15),0xef | ||
38 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
39 | |||
40 | /* Store prefix register on stack */ | ||
41 | stpx __SF_EMPTY(%r15) | ||
42 | |||
43 | /* Setup base register for lowcore (absolute 0) */ | ||
44 | llgf %r1,__SF_EMPTY(%r15) | ||
45 | |||
46 | /* Get pointer to save area */ | ||
47 | aghi %r1,0x1000 | ||
48 | |||
49 | /* Store registers */ | ||
50 | mvc 0x318(4,%r1),__SF_EMPTY(%r15) /* move prefix to lowcore */ | ||
51 | stfpc 0x31c(%r1) /* store fpu control */ | ||
52 | std 0,0x200(%r1) /* store f0 */ | ||
53 | std 1,0x208(%r1) /* store f1 */ | ||
54 | std 2,0x210(%r1) /* store f2 */ | ||
55 | std 3,0x218(%r1) /* store f3 */ | ||
56 | std 4,0x220(%r1) /* store f4 */ | ||
57 | std 5,0x228(%r1) /* store f5 */ | ||
58 | std 6,0x230(%r1) /* store f6 */ | ||
59 | std 7,0x238(%r1) /* store f7 */ | ||
60 | std 8,0x240(%r1) /* store f8 */ | ||
61 | std 9,0x248(%r1) /* store f9 */ | ||
62 | std 10,0x250(%r1) /* store f10 */ | ||
63 | std 11,0x258(%r1) /* store f11 */ | ||
64 | std 12,0x260(%r1) /* store f12 */ | ||
65 | std 13,0x268(%r1) /* store f13 */ | ||
66 | std 14,0x270(%r1) /* store f14 */ | ||
67 | std 15,0x278(%r1) /* store f15 */ | ||
68 | stam %a0,%a15,0x340(%r1) /* store access registers */ | ||
69 | stctg %c0,%c15,0x380(%r1) /* store control registers */ | ||
70 | stmg %r0,%r15,0x280(%r1) /* store general registers */ | ||
71 | |||
72 | stpt 0x328(%r1) /* store timer */ | ||
73 | stckc 0x330(%r1) /* store clock comparator */ | ||
74 | |||
75 | /* Activate DAT */ | ||
76 | stosm __SF_EMPTY(%r15),0x04 | ||
77 | |||
78 | /* Set prefix page to zero */ | ||
79 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) | ||
80 | spx __SF_EMPTY(%r15) | ||
81 | |||
82 | /* Setup lowcore */ | ||
83 | brasl %r14,setup_lowcore_early | ||
84 | |||
85 | /* Save image */ | ||
86 | brasl %r14,swsusp_save | ||
87 | |||
88 | /* Switch on lowcore protection */ | ||
89 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
90 | oi __SF_EMPTY+4(%r15),0x10 | ||
91 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
92 | |||
93 | /* Restore prefix register and return */ | ||
94 | lghi %r1,0x1000 | ||
95 | spx 0x318(%r1) | ||
96 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | ||
97 | lghi %r2,0 | ||
98 | br %r14 | ||
99 | |||
100 | /* | ||
101 | * Restore saved memory image to correct place and restore register context. | ||
102 | * Then we return to the function that called swsusp_arch_suspend(). | ||
103 | * swsusp_arch_resume() runs with disabled interrupts. | ||
104 | */ | ||
105 | .globl swsusp_arch_resume | ||
106 | swsusp_arch_resume: | ||
107 | stmg %r6,%r15,__SF_GPRS(%r15) | ||
108 | lgr %r1,%r15 | ||
109 | aghi %r15,-STACK_FRAME_OVERHEAD | ||
110 | stg %r1,__SF_BACKCHAIN(%r15) | ||
111 | |||
112 | /* Save boot cpu number */ | ||
113 | brasl %r14,smp_get_phys_cpu_id | ||
114 | lgr %r10,%r2 | ||
115 | |||
116 | /* Deactivate DAT */ | ||
117 | stnsm __SF_EMPTY(%r15),0xfb | ||
118 | |||
119 | /* Switch off lowcore protection */ | ||
120 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
121 | ni __SF_EMPTY+4(%r15),0xef | ||
122 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
123 | |||
124 | /* Set prefix page to zero */ | ||
125 | xc __SF_EMPTY(4,%r15),__SF_EMPTY(%r15) | ||
126 | spx __SF_EMPTY(%r15) | ||
127 | |||
128 | /* Restore saved image */ | ||
129 | larl %r1,restore_pblist | ||
130 | lg %r1,0(%r1) | ||
131 | ltgr %r1,%r1 | ||
132 | jz 2f | ||
133 | 0: | ||
134 | lg %r2,8(%r1) | ||
135 | lg %r4,0(%r1) | ||
136 | lghi %r3,PAGE_SIZE | ||
137 | lghi %r5,PAGE_SIZE | ||
138 | 1: | ||
139 | mvcle %r2,%r4,0 | ||
140 | jo 1b | ||
141 | lg %r1,16(%r1) | ||
142 | ltgr %r1,%r1 | ||
143 | jnz 0b | ||
144 | 2: | ||
145 | ptlb /* flush tlb */ | ||
146 | |||
147 | /* Restore registers */ | ||
148 | lghi %r13,0x1000 /* %r1 = pointer to save arae */ | ||
149 | |||
150 | spt 0x328(%r13) /* reprogram timer */ | ||
151 | //sckc 0x330(%r13) /* set clock comparator */ | ||
152 | |||
153 | lctlg %c0,%c15,0x380(%r13) /* load control registers */ | ||
154 | lam %a0,%a15,0x340(%r13) /* load access registers */ | ||
155 | |||
156 | lfpc 0x31c(%r13) /* load fpu control */ | ||
157 | ld 0,0x200(%r13) /* load f0 */ | ||
158 | ld 1,0x208(%r13) /* load f1 */ | ||
159 | ld 2,0x210(%r13) /* load f2 */ | ||
160 | ld 3,0x218(%r13) /* load f3 */ | ||
161 | ld 4,0x220(%r13) /* load f4 */ | ||
162 | ld 5,0x228(%r13) /* load f5 */ | ||
163 | ld 6,0x230(%r13) /* load f6 */ | ||
164 | ld 7,0x238(%r13) /* load f7 */ | ||
165 | ld 8,0x240(%r13) /* load f8 */ | ||
166 | ld 9,0x248(%r13) /* load f9 */ | ||
167 | ld 10,0x250(%r13) /* load f10 */ | ||
168 | ld 11,0x258(%r13) /* load f11 */ | ||
169 | ld 12,0x260(%r13) /* load f12 */ | ||
170 | ld 13,0x268(%r13) /* load f13 */ | ||
171 | ld 14,0x270(%r13) /* load f14 */ | ||
172 | ld 15,0x278(%r13) /* load f15 */ | ||
173 | |||
174 | /* Load old stack */ | ||
175 | lg %r15,0x2f8(%r13) | ||
176 | |||
177 | /* Pointer to save arae */ | ||
178 | lghi %r13,0x1000 | ||
179 | |||
180 | /* Switch CPUs */ | ||
181 | lgr %r2,%r10 /* get cpu id */ | ||
182 | llgf %r3,0x318(%r13) | ||
183 | brasl %r14,smp_switch_boot_cpu_in_resume | ||
184 | |||
185 | /* Restore prefix register */ | ||
186 | spx 0x318(%r13) | ||
187 | |||
188 | /* Switch on lowcore protection */ | ||
189 | stctg %c0,%c0,__SF_EMPTY(%r15) | ||
190 | oi __SF_EMPTY+4(%r15),0x10 | ||
191 | lctlg %c0,%c0,__SF_EMPTY(%r15) | ||
192 | |||
193 | /* Activate DAT */ | ||
194 | stosm __SF_EMPTY(%r15),0x04 | ||
195 | |||
196 | /* Return 0 */ | ||
197 | lmg %r6,%r15,STACK_FRAME_OVERHEAD + __SF_GPRS(%r15) | ||
198 | lghi %r2,0 | ||
199 | br %r14 | ||
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c index 54481a887769..86105efb4eb6 100644 --- a/drivers/char/hvc_iucv.c +++ b/drivers/char/hvc_iucv.c | |||
@@ -4,7 +4,7 @@ | |||
4 | * This HVC device driver provides terminal access using | 4 | * This HVC device driver provides terminal access using |
5 | * z/VM IUCV communication paths. | 5 | * z/VM IUCV communication paths. |
6 | * | 6 | * |
7 | * Copyright IBM Corp. 2008 | 7 | * Copyright IBM Corp. 2008, 2009 |
8 | * | 8 | * |
9 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | 9 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> |
10 | */ | 10 | */ |
@@ -15,6 +15,7 @@ | |||
15 | #include <asm/ebcdic.h> | 15 | #include <asm/ebcdic.h> |
16 | #include <linux/ctype.h> | 16 | #include <linux/ctype.h> |
17 | #include <linux/delay.h> | 17 | #include <linux/delay.h> |
18 | #include <linux/device.h> | ||
18 | #include <linux/init.h> | 19 | #include <linux/init.h> |
19 | #include <linux/mempool.h> | 20 | #include <linux/mempool.h> |
20 | #include <linux/moduleparam.h> | 21 | #include <linux/moduleparam.h> |
@@ -74,6 +75,7 @@ struct hvc_iucv_private { | |||
74 | wait_queue_head_t sndbuf_waitq; /* wait for send completion */ | 75 | wait_queue_head_t sndbuf_waitq; /* wait for send completion */ |
75 | struct list_head tty_outqueue; /* outgoing IUCV messages */ | 76 | struct list_head tty_outqueue; /* outgoing IUCV messages */ |
76 | struct list_head tty_inqueue; /* incoming IUCV messages */ | 77 | struct list_head tty_inqueue; /* incoming IUCV messages */ |
78 | struct device *dev; /* device structure */ | ||
77 | }; | 79 | }; |
78 | 80 | ||
79 | struct iucv_tty_buffer { | 81 | struct iucv_tty_buffer { |
@@ -542,7 +544,68 @@ static void flush_sndbuf_sync(struct hvc_iucv_private *priv) | |||
542 | 544 | ||
543 | if (sync_wait) | 545 | if (sync_wait) |
544 | wait_event_timeout(priv->sndbuf_waitq, | 546 | wait_event_timeout(priv->sndbuf_waitq, |
545 | tty_outqueue_empty(priv), HZ); | 547 | tty_outqueue_empty(priv), HZ/10); |
548 | } | ||
549 | |||
550 | /** | ||
551 | * hvc_iucv_hangup() - Sever IUCV path and schedule hvc tty hang up | ||
552 | * @priv: Pointer to hvc_iucv_private structure | ||
553 | * | ||
554 | * This routine severs an existing IUCV communication path and hangs | ||
555 | * up the underlying HVC terminal device. | ||
556 | * The hang-up occurs only if an IUCV communication path is established; | ||
557 | * otherwise there is no need to hang up the terminal device. | ||
558 | * | ||
559 | * The IUCV HVC hang-up is separated into two steps: | ||
560 | * 1. After the IUCV path has been severed, the iucv_state is set to | ||
561 | * IUCV_SEVERED. | ||
562 | * 2. Later, when the HVC thread calls hvc_iucv_get_chars(), the | ||
563 | * IUCV_SEVERED state causes the tty hang-up in the HVC layer. | ||
564 | * | ||
565 | * If the tty has not yet been opened, clean up the hvc_iucv_private | ||
566 | * structure to allow re-connects. | ||
567 | * If the tty has been opened, let get_chars() return -EPIPE to signal | ||
568 | * the HVC layer to hang up the tty and, if so, wake up the HVC thread | ||
569 | * to call get_chars()... | ||
570 | * | ||
571 | * Special notes on hanging up a HVC terminal instantiated as console: | ||
572 | * Hang-up: 1. do_tty_hangup() replaces file ops (= hung_up_tty_fops) | ||
573 | * 2. do_tty_hangup() calls tty->ops->close() for console_filp | ||
574 | * => no hangup notifier is called by HVC (default) | ||
575 | * 2. hvc_close() returns because of tty_hung_up_p(filp) | ||
576 | * => no delete notifier is called! | ||
577 | * Finally, the back-end is not being notified, thus, the tty session is | ||
578 | * kept active (TTY_OPEN) to be ready for re-connects. | ||
579 | * | ||
580 | * Locking: spin_lock(&priv->lock) w/o disabling bh | ||
581 | */ | ||
582 | static void hvc_iucv_hangup(struct hvc_iucv_private *priv) | ||
583 | { | ||
584 | struct iucv_path *path; | ||
585 | |||
586 | path = NULL; | ||
587 | spin_lock(&priv->lock); | ||
588 | if (priv->iucv_state == IUCV_CONNECTED) { | ||
589 | path = priv->path; | ||
590 | priv->path = NULL; | ||
591 | priv->iucv_state = IUCV_SEVERED; | ||
592 | if (priv->tty_state == TTY_CLOSED) | ||
593 | hvc_iucv_cleanup(priv); | ||
594 | else | ||
595 | /* console is special (see above) */ | ||
596 | if (priv->is_console) { | ||
597 | hvc_iucv_cleanup(priv); | ||
598 | priv->tty_state = TTY_OPENED; | ||
599 | } else | ||
600 | hvc_kick(); | ||
601 | } | ||
602 | spin_unlock(&priv->lock); | ||
603 | |||
604 | /* finally sever path (outside of priv->lock due to lock ordering) */ | ||
605 | if (path) { | ||
606 | iucv_path_sever(path, NULL); | ||
607 | iucv_path_free(path); | ||
608 | } | ||
546 | } | 609 | } |
547 | 610 | ||
548 | /** | 611 | /** |
@@ -735,11 +798,8 @@ out_path_handled: | |||
735 | * @ipuser: User specified data for this path | 798 | * @ipuser: User specified data for this path |
736 | * (AF_IUCV: port/service name and originator port) | 799 | * (AF_IUCV: port/service name and originator port) |
737 | * | 800 | * |
738 | * The function also severs the path (as required by the IUCV protocol) and | 801 | * This function calls the hvc_iucv_hangup() function for the |
739 | * sets the iucv state to IUCV_SEVERED for the associated struct | 802 | * respective IUCV HVC terminal. |
740 | * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty | ||
741 | * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). | ||
742 | * If tty portion of the HVC is closed, clean up the outqueue. | ||
743 | * | 803 | * |
744 | * Locking: struct hvc_iucv_private->lock | 804 | * Locking: struct hvc_iucv_private->lock |
745 | */ | 805 | */ |
@@ -747,33 +807,7 @@ static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
747 | { | 807 | { |
748 | struct hvc_iucv_private *priv = path->private; | 808 | struct hvc_iucv_private *priv = path->private; |
749 | 809 | ||
750 | spin_lock(&priv->lock); | 810 | hvc_iucv_hangup(priv); |
751 | priv->iucv_state = IUCV_SEVERED; | ||
752 | |||
753 | /* If the tty has not yet been opened, clean up the hvc_iucv_private | ||
754 | * structure to allow re-connects. | ||
755 | * This is also done for our console device because console hangups | ||
756 | * are handled specially and no notifier is called by HVC. | ||
757 | * The tty session is active (TTY_OPEN) and ready for re-connects... | ||
758 | * | ||
759 | * If it has been opened, let get_chars() return -EPIPE to signal the | ||
760 | * HVC layer to hang up the tty. | ||
761 | * If so, we need to wake up the HVC thread to call get_chars()... | ||
762 | */ | ||
763 | priv->path = NULL; | ||
764 | if (priv->tty_state == TTY_CLOSED) | ||
765 | hvc_iucv_cleanup(priv); | ||
766 | else | ||
767 | if (priv->is_console) { | ||
768 | hvc_iucv_cleanup(priv); | ||
769 | priv->tty_state = TTY_OPENED; | ||
770 | } else | ||
771 | hvc_kick(); | ||
772 | spin_unlock(&priv->lock); | ||
773 | |||
774 | /* finally sever path (outside of priv->lock due to lock ordering) */ | ||
775 | iucv_path_sever(path, ipuser); | ||
776 | iucv_path_free(path); | ||
777 | } | 811 | } |
778 | 812 | ||
779 | /** | 813 | /** |
@@ -853,6 +887,37 @@ static void hvc_iucv_msg_complete(struct iucv_path *path, | |||
853 | destroy_tty_buffer_list(&list_remove); | 887 | destroy_tty_buffer_list(&list_remove); |
854 | } | 888 | } |
855 | 889 | ||
890 | /** | ||
891 | * hvc_iucv_pm_freeze() - Freeze PM callback | ||
892 | * @dev: IUVC HVC terminal device | ||
893 | * | ||
894 | * Sever an established IUCV communication path and | ||
895 | * trigger a hang-up of the underlying HVC terminal. | ||
896 | */ | ||
897 | static int hvc_iucv_pm_freeze(struct device *dev) | ||
898 | { | ||
899 | struct hvc_iucv_private *priv = dev_get_drvdata(dev); | ||
900 | |||
901 | local_bh_disable(); | ||
902 | hvc_iucv_hangup(priv); | ||
903 | local_bh_enable(); | ||
904 | |||
905 | return 0; | ||
906 | } | ||
907 | |||
908 | /** | ||
909 | * hvc_iucv_pm_restore_thaw() - Thaw and restore PM callback | ||
910 | * @dev: IUVC HVC terminal device | ||
911 | * | ||
912 | * Wake up the HVC thread to trigger hang-up and respective | ||
913 | * HVC back-end notifier invocations. | ||
914 | */ | ||
915 | static int hvc_iucv_pm_restore_thaw(struct device *dev) | ||
916 | { | ||
917 | hvc_kick(); | ||
918 | return 0; | ||
919 | } | ||
920 | |||
856 | 921 | ||
857 | /* HVC operations */ | 922 | /* HVC operations */ |
858 | static struct hv_ops hvc_iucv_ops = { | 923 | static struct hv_ops hvc_iucv_ops = { |
@@ -863,6 +928,20 @@ static struct hv_ops hvc_iucv_ops = { | |||
863 | .notifier_hangup = hvc_iucv_notifier_hangup, | 928 | .notifier_hangup = hvc_iucv_notifier_hangup, |
864 | }; | 929 | }; |
865 | 930 | ||
931 | /* Suspend / resume device operations */ | ||
932 | static struct dev_pm_ops hvc_iucv_pm_ops = { | ||
933 | .freeze = hvc_iucv_pm_freeze, | ||
934 | .thaw = hvc_iucv_pm_restore_thaw, | ||
935 | .restore = hvc_iucv_pm_restore_thaw, | ||
936 | }; | ||
937 | |||
938 | /* IUCV HVC device driver */ | ||
939 | static struct device_driver hvc_iucv_driver = { | ||
940 | .name = KMSG_COMPONENT, | ||
941 | .bus = &iucv_bus, | ||
942 | .pm = &hvc_iucv_pm_ops, | ||
943 | }; | ||
944 | |||
866 | /** | 945 | /** |
867 | * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance | 946 | * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance |
868 | * @id: hvc_iucv_table index | 947 | * @id: hvc_iucv_table index |
@@ -897,14 +976,12 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console) | |||
897 | /* set console flag */ | 976 | /* set console flag */ |
898 | priv->is_console = is_console; | 977 | priv->is_console = is_console; |
899 | 978 | ||
900 | /* finally allocate hvc */ | 979 | /* allocate hvc device */ |
901 | priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */ | 980 | priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, /* PAGE_SIZE */ |
902 | HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256); | 981 | HVC_IUCV_MAGIC + id, &hvc_iucv_ops, 256); |
903 | if (IS_ERR(priv->hvc)) { | 982 | if (IS_ERR(priv->hvc)) { |
904 | rc = PTR_ERR(priv->hvc); | 983 | rc = PTR_ERR(priv->hvc); |
905 | free_page((unsigned long) priv->sndbuf); | 984 | goto out_error_hvc; |
906 | kfree(priv); | ||
907 | return rc; | ||
908 | } | 985 | } |
909 | 986 | ||
910 | /* notify HVC thread instead of using polling */ | 987 | /* notify HVC thread instead of using polling */ |
@@ -915,8 +992,45 @@ static int __init hvc_iucv_alloc(int id, unsigned int is_console) | |||
915 | memcpy(priv->srv_name, name, 8); | 992 | memcpy(priv->srv_name, name, 8); |
916 | ASCEBC(priv->srv_name, 8); | 993 | ASCEBC(priv->srv_name, 8); |
917 | 994 | ||
995 | /* create and setup device */ | ||
996 | priv->dev = kzalloc(sizeof(*priv->dev), GFP_KERNEL); | ||
997 | if (!priv->dev) { | ||
998 | rc = -ENOMEM; | ||
999 | goto out_error_dev; | ||
1000 | } | ||
1001 | dev_set_name(priv->dev, "hvc_iucv%d", id); | ||
1002 | dev_set_drvdata(priv->dev, priv); | ||
1003 | priv->dev->bus = &iucv_bus; | ||
1004 | priv->dev->parent = iucv_root; | ||
1005 | priv->dev->driver = &hvc_iucv_driver; | ||
1006 | priv->dev->release = (void (*)(struct device *)) kfree; | ||
1007 | rc = device_register(priv->dev); | ||
1008 | if (rc) { | ||
1009 | kfree(priv->dev); | ||
1010 | goto out_error_dev; | ||
1011 | } | ||
1012 | |||
918 | hvc_iucv_table[id] = priv; | 1013 | hvc_iucv_table[id] = priv; |
919 | return 0; | 1014 | return 0; |
1015 | |||
1016 | out_error_dev: | ||
1017 | hvc_remove(priv->hvc); | ||
1018 | out_error_hvc: | ||
1019 | free_page((unsigned long) priv->sndbuf); | ||
1020 | kfree(priv); | ||
1021 | |||
1022 | return rc; | ||
1023 | } | ||
1024 | |||
1025 | /** | ||
1026 | * hvc_iucv_destroy() - Destroy and free hvc_iucv_private instances | ||
1027 | */ | ||
1028 | static void __init hvc_iucv_destroy(struct hvc_iucv_private *priv) | ||
1029 | { | ||
1030 | hvc_remove(priv->hvc); | ||
1031 | device_unregister(priv->dev); | ||
1032 | free_page((unsigned long) priv->sndbuf); | ||
1033 | kfree(priv); | ||
920 | } | 1034 | } |
921 | 1035 | ||
922 | /** | 1036 | /** |
@@ -1109,6 +1223,11 @@ static int __init hvc_iucv_init(void) | |||
1109 | goto out_error; | 1223 | goto out_error; |
1110 | } | 1224 | } |
1111 | 1225 | ||
1226 | /* register IUCV HVC device driver */ | ||
1227 | rc = driver_register(&hvc_iucv_driver); | ||
1228 | if (rc) | ||
1229 | goto out_error; | ||
1230 | |||
1112 | /* parse hvc_iucv_allow string and create z/VM user ID filter list */ | 1231 | /* parse hvc_iucv_allow string and create z/VM user ID filter list */ |
1113 | if (hvc_iucv_filter_string) { | 1232 | if (hvc_iucv_filter_string) { |
1114 | rc = hvc_iucv_setup_filter(hvc_iucv_filter_string); | 1233 | rc = hvc_iucv_setup_filter(hvc_iucv_filter_string); |
@@ -1183,15 +1302,14 @@ out_error_iucv: | |||
1183 | iucv_unregister(&hvc_iucv_handler, 0); | 1302 | iucv_unregister(&hvc_iucv_handler, 0); |
1184 | out_error_hvc: | 1303 | out_error_hvc: |
1185 | for (i = 0; i < hvc_iucv_devices; i++) | 1304 | for (i = 0; i < hvc_iucv_devices; i++) |
1186 | if (hvc_iucv_table[i]) { | 1305 | if (hvc_iucv_table[i]) |
1187 | if (hvc_iucv_table[i]->hvc) | 1306 | hvc_iucv_destroy(hvc_iucv_table[i]); |
1188 | hvc_remove(hvc_iucv_table[i]->hvc); | ||
1189 | kfree(hvc_iucv_table[i]); | ||
1190 | } | ||
1191 | out_error_memory: | 1307 | out_error_memory: |
1192 | mempool_destroy(hvc_iucv_mempool); | 1308 | mempool_destroy(hvc_iucv_mempool); |
1193 | kmem_cache_destroy(hvc_iucv_buffer_cache); | 1309 | kmem_cache_destroy(hvc_iucv_buffer_cache); |
1194 | out_error: | 1310 | out_error: |
1311 | if (hvc_iucv_filter) | ||
1312 | kfree(hvc_iucv_filter); | ||
1195 | hvc_iucv_devices = 0; /* ensure that we do not provide any device */ | 1313 | hvc_iucv_devices = 0; /* ensure that we do not provide any device */ |
1196 | return rc; | 1314 | return rc; |
1197 | } | 1315 | } |
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 442bb98a2821..e5b84db0aa03 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -5,8 +5,7 @@ | |||
5 | * Carsten Otte <Cotte@de.ibm.com> | 5 | * Carsten Otte <Cotte@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Bugreports.to..: <Linux390@de.ibm.com> | 7 | * Bugreports.to..: <Linux390@de.ibm.com> |
8 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999-2001 | 8 | * Copyright IBM Corp. 1999, 2009 |
9 | * | ||
10 | */ | 9 | */ |
11 | 10 | ||
12 | #define KMSG_COMPONENT "dasd" | 11 | #define KMSG_COMPONENT "dasd" |
@@ -61,6 +60,7 @@ static int dasd_flush_block_queue(struct dasd_block *); | |||
61 | static void dasd_device_tasklet(struct dasd_device *); | 60 | static void dasd_device_tasklet(struct dasd_device *); |
62 | static void dasd_block_tasklet(struct dasd_block *); | 61 | static void dasd_block_tasklet(struct dasd_block *); |
63 | static void do_kick_device(struct work_struct *); | 62 | static void do_kick_device(struct work_struct *); |
63 | static void do_restore_device(struct work_struct *); | ||
64 | static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); | 64 | static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); |
65 | static void dasd_device_timeout(unsigned long); | 65 | static void dasd_device_timeout(unsigned long); |
66 | static void dasd_block_timeout(unsigned long); | 66 | static void dasd_block_timeout(unsigned long); |
@@ -109,6 +109,7 @@ struct dasd_device *dasd_alloc_device(void) | |||
109 | device->timer.function = dasd_device_timeout; | 109 | device->timer.function = dasd_device_timeout; |
110 | device->timer.data = (unsigned long) device; | 110 | device->timer.data = (unsigned long) device; |
111 | INIT_WORK(&device->kick_work, do_kick_device); | 111 | INIT_WORK(&device->kick_work, do_kick_device); |
112 | INIT_WORK(&device->restore_device, do_restore_device); | ||
112 | device->state = DASD_STATE_NEW; | 113 | device->state = DASD_STATE_NEW; |
113 | device->target = DASD_STATE_NEW; | 114 | device->target = DASD_STATE_NEW; |
114 | 115 | ||
@@ -512,6 +513,25 @@ void dasd_kick_device(struct dasd_device *device) | |||
512 | } | 513 | } |
513 | 514 | ||
514 | /* | 515 | /* |
516 | * dasd_restore_device will schedule a call do do_restore_device to the kernel | ||
517 | * event daemon. | ||
518 | */ | ||
519 | static void do_restore_device(struct work_struct *work) | ||
520 | { | ||
521 | struct dasd_device *device = container_of(work, struct dasd_device, | ||
522 | restore_device); | ||
523 | device->cdev->drv->restore(device->cdev); | ||
524 | dasd_put_device(device); | ||
525 | } | ||
526 | |||
527 | void dasd_restore_device(struct dasd_device *device) | ||
528 | { | ||
529 | dasd_get_device(device); | ||
530 | /* queue call to dasd_restore_device to the kernel event daemon. */ | ||
531 | schedule_work(&device->restore_device); | ||
532 | } | ||
533 | |||
534 | /* | ||
515 | * Set the target state for a device and starts the state change. | 535 | * Set the target state for a device and starts the state change. |
516 | */ | 536 | */ |
517 | void dasd_set_target_state(struct dasd_device *device, int target) | 537 | void dasd_set_target_state(struct dasd_device *device, int target) |
@@ -908,6 +928,12 @@ int dasd_start_IO(struct dasd_ccw_req *cqr) | |||
908 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | 928 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", |
909 | "start_IO: -EIO device gone, retry"); | 929 | "start_IO: -EIO device gone, retry"); |
910 | break; | 930 | break; |
931 | case -EINVAL: | ||
932 | /* most likely caused in power management context */ | ||
933 | DBF_DEV_EVENT(DBF_DEBUG, device, "%s", | ||
934 | "start_IO: -EINVAL device currently " | ||
935 | "not accessible"); | ||
936 | break; | ||
911 | default: | 937 | default: |
912 | /* internal error 11 - unknown rc */ | 938 | /* internal error 11 - unknown rc */ |
913 | snprintf(errorstring, ERRORLENGTH, "11 %d", rc); | 939 | snprintf(errorstring, ERRORLENGTH, "11 %d", rc); |
@@ -2400,6 +2426,12 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2400 | case CIO_OPER: | 2426 | case CIO_OPER: |
2401 | /* FIXME: add a sanity check. */ | 2427 | /* FIXME: add a sanity check. */ |
2402 | device->stopped &= ~DASD_STOPPED_DC_WAIT; | 2428 | device->stopped &= ~DASD_STOPPED_DC_WAIT; |
2429 | if (device->stopped & DASD_UNRESUMED_PM) { | ||
2430 | device->stopped &= ~DASD_UNRESUMED_PM; | ||
2431 | dasd_restore_device(device); | ||
2432 | ret = 1; | ||
2433 | break; | ||
2434 | } | ||
2403 | dasd_schedule_device_bh(device); | 2435 | dasd_schedule_device_bh(device); |
2404 | if (device->block) | 2436 | if (device->block) |
2405 | dasd_schedule_block_bh(device->block); | 2437 | dasd_schedule_block_bh(device->block); |
@@ -2410,6 +2442,79 @@ int dasd_generic_notify(struct ccw_device *cdev, int event) | |||
2410 | return ret; | 2442 | return ret; |
2411 | } | 2443 | } |
2412 | 2444 | ||
2445 | int dasd_generic_pm_freeze(struct ccw_device *cdev) | ||
2446 | { | ||
2447 | struct dasd_ccw_req *cqr, *n; | ||
2448 | int rc; | ||
2449 | struct list_head freeze_queue; | ||
2450 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
2451 | |||
2452 | if (IS_ERR(device)) | ||
2453 | return PTR_ERR(device); | ||
2454 | /* disallow new I/O */ | ||
2455 | device->stopped |= DASD_STOPPED_PM; | ||
2456 | /* clear active requests */ | ||
2457 | INIT_LIST_HEAD(&freeze_queue); | ||
2458 | spin_lock_irq(get_ccwdev_lock(cdev)); | ||
2459 | rc = 0; | ||
2460 | list_for_each_entry_safe(cqr, n, &device->ccw_queue, devlist) { | ||
2461 | /* Check status and move request to flush_queue */ | ||
2462 | if (cqr->status == DASD_CQR_IN_IO) { | ||
2463 | rc = device->discipline->term_IO(cqr); | ||
2464 | if (rc) { | ||
2465 | /* unable to terminate requeust */ | ||
2466 | dev_err(&device->cdev->dev, | ||
2467 | "Unable to terminate request %p " | ||
2468 | "on suspend\n", cqr); | ||
2469 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
2470 | dasd_put_device(device); | ||
2471 | return rc; | ||
2472 | } | ||
2473 | } | ||
2474 | list_move_tail(&cqr->devlist, &freeze_queue); | ||
2475 | } | ||
2476 | |||
2477 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
2478 | |||
2479 | list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { | ||
2480 | wait_event(dasd_flush_wq, | ||
2481 | (cqr->status != DASD_CQR_CLEAR_PENDING)); | ||
2482 | if (cqr->status == DASD_CQR_CLEARED) | ||
2483 | cqr->status = DASD_CQR_QUEUED; | ||
2484 | } | ||
2485 | /* move freeze_queue to start of the ccw_queue */ | ||
2486 | spin_lock_irq(get_ccwdev_lock(cdev)); | ||
2487 | list_splice_tail(&freeze_queue, &device->ccw_queue); | ||
2488 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
2489 | |||
2490 | if (device->discipline->freeze) | ||
2491 | rc = device->discipline->freeze(device); | ||
2492 | |||
2493 | dasd_put_device(device); | ||
2494 | return rc; | ||
2495 | } | ||
2496 | EXPORT_SYMBOL_GPL(dasd_generic_pm_freeze); | ||
2497 | |||
2498 | int dasd_generic_restore_device(struct ccw_device *cdev) | ||
2499 | { | ||
2500 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
2501 | int rc = 0; | ||
2502 | |||
2503 | if (IS_ERR(device)) | ||
2504 | return PTR_ERR(device); | ||
2505 | |||
2506 | dasd_schedule_device_bh(device); | ||
2507 | if (device->block) | ||
2508 | dasd_schedule_block_bh(device->block); | ||
2509 | |||
2510 | if (device->discipline->restore) | ||
2511 | rc = device->discipline->restore(device); | ||
2512 | |||
2513 | dasd_put_device(device); | ||
2514 | return rc; | ||
2515 | } | ||
2516 | EXPORT_SYMBOL_GPL(dasd_generic_restore_device); | ||
2517 | |||
2413 | static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, | 2518 | static struct dasd_ccw_req *dasd_generic_build_rdc(struct dasd_device *device, |
2414 | void *rdc_buffer, | 2519 | void *rdc_buffer, |
2415 | int rdc_buffer_size, | 2520 | int rdc_buffer_size, |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index e77666c8e6c0..4cac5b54f26a 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -1098,6 +1098,7 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid) | |||
1098 | spin_unlock(&dasd_devmap_lock); | 1098 | spin_unlock(&dasd_devmap_lock); |
1099 | return 0; | 1099 | return 0; |
1100 | } | 1100 | } |
1101 | EXPORT_SYMBOL_GPL(dasd_get_uid); | ||
1101 | 1102 | ||
1102 | /* | 1103 | /* |
1103 | * Register the given device unique identifier into devmap struct. | 1104 | * Register the given device unique identifier into devmap struct. |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index cf0cfdba1244..1c28ec3e4ccb 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -5,10 +5,9 @@ | |||
5 | * Carsten Otte <Cotte@de.ibm.com> | 5 | * Carsten Otte <Cotte@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Bugreports.to..: <Linux390@de.ibm.com> | 7 | * Bugreports.to..: <Linux390@de.ibm.com> |
8 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 | 8 | * Copyright IBM Corp. 1999, 2009 |
9 | * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 | 9 | * EMC Symmetrix ioctl Copyright EMC Corporation, 2008 |
10 | * Author.........: Nigel Hislop <hislop_nigel@emc.com> | 10 | * Author.........: Nigel Hislop <hislop_nigel@emc.com> |
11 | * | ||
12 | */ | 11 | */ |
13 | 12 | ||
14 | #define KMSG_COMPONENT "dasd" | 13 | #define KMSG_COMPONENT "dasd" |
@@ -104,17 +103,6 @@ dasd_eckd_set_online(struct ccw_device *cdev) | |||
104 | return dasd_generic_set_online(cdev, &dasd_eckd_discipline); | 103 | return dasd_generic_set_online(cdev, &dasd_eckd_discipline); |
105 | } | 104 | } |
106 | 105 | ||
107 | static struct ccw_driver dasd_eckd_driver = { | ||
108 | .name = "dasd-eckd", | ||
109 | .owner = THIS_MODULE, | ||
110 | .ids = dasd_eckd_ids, | ||
111 | .probe = dasd_eckd_probe, | ||
112 | .remove = dasd_generic_remove, | ||
113 | .set_offline = dasd_generic_set_offline, | ||
114 | .set_online = dasd_eckd_set_online, | ||
115 | .notify = dasd_generic_notify, | ||
116 | }; | ||
117 | |||
118 | static const int sizes_trk0[] = { 28, 148, 84 }; | 106 | static const int sizes_trk0[] = { 28, 148, 84 }; |
119 | #define LABEL_SIZE 140 | 107 | #define LABEL_SIZE 140 |
120 | 108 | ||
@@ -3236,6 +3224,98 @@ static void dasd_eckd_dump_sense(struct dasd_device *device, | |||
3236 | dasd_eckd_dump_sense_ccw(device, req, irb); | 3224 | dasd_eckd_dump_sense_ccw(device, req, irb); |
3237 | } | 3225 | } |
3238 | 3226 | ||
3227 | int dasd_eckd_pm_freeze(struct dasd_device *device) | ||
3228 | { | ||
3229 | /* | ||
3230 | * the device should be disconnected from our LCU structure | ||
3231 | * on restore we will reconnect it and reread LCU specific | ||
3232 | * information like PAV support that might have changed | ||
3233 | */ | ||
3234 | dasd_alias_remove_device(device); | ||
3235 | dasd_alias_disconnect_device_from_lcu(device); | ||
3236 | |||
3237 | return 0; | ||
3238 | } | ||
3239 | |||
3240 | int dasd_eckd_restore_device(struct dasd_device *device) | ||
3241 | { | ||
3242 | struct dasd_eckd_private *private; | ||
3243 | int is_known, rc; | ||
3244 | struct dasd_uid temp_uid; | ||
3245 | |||
3246 | /* allow new IO again */ | ||
3247 | device->stopped &= ~DASD_STOPPED_PM; | ||
3248 | |||
3249 | private = (struct dasd_eckd_private *) device->private; | ||
3250 | |||
3251 | /* Read Configuration Data */ | ||
3252 | rc = dasd_eckd_read_conf(device); | ||
3253 | if (rc) | ||
3254 | goto out_err; | ||
3255 | |||
3256 | /* Generate device unique id and register in devmap */ | ||
3257 | rc = dasd_eckd_generate_uid(device, &private->uid); | ||
3258 | dasd_get_uid(device->cdev, &temp_uid); | ||
3259 | if (memcmp(&private->uid, &temp_uid, sizeof(struct dasd_uid)) != 0) | ||
3260 | dev_err(&device->cdev->dev, "The UID of the DASD has changed\n"); | ||
3261 | if (rc) | ||
3262 | goto out_err; | ||
3263 | dasd_set_uid(device->cdev, &private->uid); | ||
3264 | |||
3265 | /* register lcu with alias handling, enable PAV if this is a new lcu */ | ||
3266 | is_known = dasd_alias_make_device_known_to_lcu(device); | ||
3267 | if (is_known < 0) | ||
3268 | return is_known; | ||
3269 | if (!is_known) { | ||
3270 | /* new lcu found */ | ||
3271 | rc = dasd_eckd_validate_server(device); /* will switch pav on */ | ||
3272 | if (rc) | ||
3273 | goto out_err; | ||
3274 | } | ||
3275 | |||
3276 | /* Read Feature Codes */ | ||
3277 | rc = dasd_eckd_read_features(device); | ||
3278 | if (rc) | ||
3279 | goto out_err; | ||
3280 | |||
3281 | /* Read Device Characteristics */ | ||
3282 | memset(&private->rdc_data, 0, sizeof(private->rdc_data)); | ||
3283 | rc = dasd_generic_read_dev_chars(device, "ECKD", | ||
3284 | &private->rdc_data, 64); | ||
3285 | if (rc) { | ||
3286 | DBF_EVENT(DBF_WARNING, | ||
3287 | "Read device characteristics failed, rc=%d for " | ||
3288 | "device: %s", rc, dev_name(&device->cdev->dev)); | ||
3289 | goto out_err; | ||
3290 | } | ||
3291 | |||
3292 | /* add device to alias management */ | ||
3293 | dasd_alias_add_device(device); | ||
3294 | |||
3295 | return 0; | ||
3296 | |||
3297 | out_err: | ||
3298 | /* | ||
3299 | * if the resume failed for the DASD we put it in | ||
3300 | * an UNRESUMED stop state | ||
3301 | */ | ||
3302 | device->stopped |= DASD_UNRESUMED_PM; | ||
3303 | return 0; | ||
3304 | } | ||
3305 | |||
3306 | static struct ccw_driver dasd_eckd_driver = { | ||
3307 | .name = "dasd-eckd", | ||
3308 | .owner = THIS_MODULE, | ||
3309 | .ids = dasd_eckd_ids, | ||
3310 | .probe = dasd_eckd_probe, | ||
3311 | .remove = dasd_generic_remove, | ||
3312 | .set_offline = dasd_generic_set_offline, | ||
3313 | .set_online = dasd_eckd_set_online, | ||
3314 | .notify = dasd_generic_notify, | ||
3315 | .freeze = dasd_generic_pm_freeze, | ||
3316 | .thaw = dasd_generic_restore_device, | ||
3317 | .restore = dasd_generic_restore_device, | ||
3318 | }; | ||
3239 | 3319 | ||
3240 | /* | 3320 | /* |
3241 | * max_blocks is dependent on the amount of storage that is available | 3321 | * max_blocks is dependent on the amount of storage that is available |
@@ -3274,6 +3354,8 @@ static struct dasd_discipline dasd_eckd_discipline = { | |||
3274 | .dump_sense_dbf = dasd_eckd_dump_sense_dbf, | 3354 | .dump_sense_dbf = dasd_eckd_dump_sense_dbf, |
3275 | .fill_info = dasd_eckd_fill_info, | 3355 | .fill_info = dasd_eckd_fill_info, |
3276 | .ioctl = dasd_eckd_ioctl, | 3356 | .ioctl = dasd_eckd_ioctl, |
3357 | .freeze = dasd_eckd_pm_freeze, | ||
3358 | .restore = dasd_eckd_restore_device, | ||
3277 | }; | 3359 | }; |
3278 | 3360 | ||
3279 | static int __init | 3361 | static int __init |
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 597c6ffdb9f2..e21ee735f926 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -2,8 +2,7 @@ | |||
2 | * File...........: linux/drivers/s390/block/dasd_fba.c | 2 | * File...........: linux/drivers/s390/block/dasd_fba.c |
3 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> | 3 | * Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com> |
4 | * Bugreports.to..: <Linux390@de.ibm.com> | 4 | * Bugreports.to..: <Linux390@de.ibm.com> |
5 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 | 5 | * Copyright IBM Corp. 1999, 2009 |
6 | * | ||
7 | */ | 6 | */ |
8 | 7 | ||
9 | #define KMSG_COMPONENT "dasd" | 8 | #define KMSG_COMPONENT "dasd" |
@@ -75,6 +74,9 @@ static struct ccw_driver dasd_fba_driver = { | |||
75 | .set_offline = dasd_generic_set_offline, | 74 | .set_offline = dasd_generic_set_offline, |
76 | .set_online = dasd_fba_set_online, | 75 | .set_online = dasd_fba_set_online, |
77 | .notify = dasd_generic_notify, | 76 | .notify = dasd_generic_notify, |
77 | .freeze = dasd_generic_pm_freeze, | ||
78 | .thaw = dasd_generic_restore_device, | ||
79 | .restore = dasd_generic_restore_device, | ||
78 | }; | 80 | }; |
79 | 81 | ||
80 | static void | 82 | static void |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index f97ceb795078..fd63b2f2bda9 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -4,8 +4,7 @@ | |||
4 | * Horst Hummel <Horst.Hummel@de.ibm.com> | 4 | * Horst Hummel <Horst.Hummel@de.ibm.com> |
5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
6 | * Bugreports.to..: <Linux390@de.ibm.com> | 6 | * Bugreports.to..: <Linux390@de.ibm.com> |
7 | * (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 1999,2000 | 7 | * Copyright IBM Corp. 1999, 2009 |
8 | * | ||
9 | */ | 8 | */ |
10 | 9 | ||
11 | #ifndef DASD_INT_H | 10 | #ifndef DASD_INT_H |
@@ -295,6 +294,10 @@ struct dasd_discipline { | |||
295 | int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); | 294 | int (*fill_geometry) (struct dasd_block *, struct hd_geometry *); |
296 | int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); | 295 | int (*fill_info) (struct dasd_device *, struct dasd_information2_t *); |
297 | int (*ioctl) (struct dasd_block *, unsigned int, void __user *); | 296 | int (*ioctl) (struct dasd_block *, unsigned int, void __user *); |
297 | |||
298 | /* suspend/resume functions */ | ||
299 | int (*freeze) (struct dasd_device *); | ||
300 | int (*restore) (struct dasd_device *); | ||
298 | }; | 301 | }; |
299 | 302 | ||
300 | extern struct dasd_discipline *dasd_diag_discipline_pointer; | 303 | extern struct dasd_discipline *dasd_diag_discipline_pointer; |
@@ -367,6 +370,7 @@ struct dasd_device { | |||
367 | atomic_t tasklet_scheduled; | 370 | atomic_t tasklet_scheduled; |
368 | struct tasklet_struct tasklet; | 371 | struct tasklet_struct tasklet; |
369 | struct work_struct kick_work; | 372 | struct work_struct kick_work; |
373 | struct work_struct restore_device; | ||
370 | struct timer_list timer; | 374 | struct timer_list timer; |
371 | 375 | ||
372 | debug_info_t *debug_area; | 376 | debug_info_t *debug_area; |
@@ -410,6 +414,8 @@ struct dasd_block { | |||
410 | #define DASD_STOPPED_PENDING 4 /* long busy */ | 414 | #define DASD_STOPPED_PENDING 4 /* long busy */ |
411 | #define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */ | 415 | #define DASD_STOPPED_DC_WAIT 8 /* disconnected, wait */ |
412 | #define DASD_STOPPED_SU 16 /* summary unit check handling */ | 416 | #define DASD_STOPPED_SU 16 /* summary unit check handling */ |
417 | #define DASD_STOPPED_PM 32 /* pm state transition */ | ||
418 | #define DASD_UNRESUMED_PM 64 /* pm resume failed state */ | ||
413 | 419 | ||
414 | /* per device flags */ | 420 | /* per device flags */ |
415 | #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ | 421 | #define DASD_FLAG_OFFLINE 3 /* device is in offline processing */ |
@@ -556,6 +562,7 @@ void dasd_free_block(struct dasd_block *); | |||
556 | void dasd_enable_device(struct dasd_device *); | 562 | void dasd_enable_device(struct dasd_device *); |
557 | void dasd_set_target_state(struct dasd_device *, int); | 563 | void dasd_set_target_state(struct dasd_device *, int); |
558 | void dasd_kick_device(struct dasd_device *); | 564 | void dasd_kick_device(struct dasd_device *); |
565 | void dasd_restore_device(struct dasd_device *); | ||
559 | 566 | ||
560 | void dasd_add_request_head(struct dasd_ccw_req *); | 567 | void dasd_add_request_head(struct dasd_ccw_req *); |
561 | void dasd_add_request_tail(struct dasd_ccw_req *); | 568 | void dasd_add_request_tail(struct dasd_ccw_req *); |
@@ -578,6 +585,8 @@ int dasd_generic_set_online(struct ccw_device *, struct dasd_discipline *); | |||
578 | int dasd_generic_set_offline (struct ccw_device *cdev); | 585 | int dasd_generic_set_offline (struct ccw_device *cdev); |
579 | int dasd_generic_notify(struct ccw_device *, int); | 586 | int dasd_generic_notify(struct ccw_device *, int); |
580 | void dasd_generic_handle_state_change(struct dasd_device *); | 587 | void dasd_generic_handle_state_change(struct dasd_device *); |
588 | int dasd_generic_pm_freeze(struct ccw_device *); | ||
589 | int dasd_generic_restore_device(struct ccw_device *); | ||
581 | 590 | ||
582 | int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); | 591 | int dasd_generic_read_dev_chars(struct dasd_device *, char *, void *, int); |
583 | char *dasd_get_sense(struct irb *); | 592 | char *dasd_get_sense(struct irb *); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index b21caf177e37..016f9e9d2591 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -14,10 +14,11 @@ | |||
14 | #include <linux/init.h> | 14 | #include <linux/init.h> |
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/blkdev.h> | 16 | #include <linux/blkdev.h> |
17 | #include <asm/extmem.h> | ||
18 | #include <asm/io.h> | ||
19 | #include <linux/completion.h> | 17 | #include <linux/completion.h> |
20 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/platform_device.h> | ||
20 | #include <asm/extmem.h> | ||
21 | #include <asm/io.h> | ||
21 | 22 | ||
22 | #define DCSSBLK_NAME "dcssblk" | 23 | #define DCSSBLK_NAME "dcssblk" |
23 | #define DCSSBLK_MINORS_PER_DISK 1 | 24 | #define DCSSBLK_MINORS_PER_DISK 1 |
@@ -940,11 +941,94 @@ dcssblk_check_params(void) | |||
940 | } | 941 | } |
941 | 942 | ||
942 | /* | 943 | /* |
944 | * Suspend / Resume | ||
945 | */ | ||
946 | static int dcssblk_freeze(struct device *dev) | ||
947 | { | ||
948 | struct dcssblk_dev_info *dev_info; | ||
949 | int rc = 0; | ||
950 | |||
951 | list_for_each_entry(dev_info, &dcssblk_devices, lh) { | ||
952 | switch (dev_info->segment_type) { | ||
953 | case SEG_TYPE_SR: | ||
954 | case SEG_TYPE_ER: | ||
955 | case SEG_TYPE_SC: | ||
956 | if (!dev_info->is_shared) | ||
957 | rc = -EINVAL; | ||
958 | break; | ||
959 | default: | ||
960 | rc = -EINVAL; | ||
961 | break; | ||
962 | } | ||
963 | if (rc) | ||
964 | break; | ||
965 | } | ||
966 | if (rc) | ||
967 | pr_err("Suspend failed because device %s is writeable.\n", | ||
968 | dev_info->segment_name); | ||
969 | return rc; | ||
970 | } | ||
971 | |||
972 | static int dcssblk_restore(struct device *dev) | ||
973 | { | ||
974 | struct dcssblk_dev_info *dev_info; | ||
975 | struct segment_info *entry; | ||
976 | unsigned long start, end; | ||
977 | int rc = 0; | ||
978 | |||
979 | list_for_each_entry(dev_info, &dcssblk_devices, lh) { | ||
980 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | ||
981 | segment_unload(entry->segment_name); | ||
982 | rc = segment_load(entry->segment_name, SEGMENT_SHARED, | ||
983 | &start, &end); | ||
984 | if (rc < 0) { | ||
985 | // TODO in_use check ? | ||
986 | segment_warning(rc, entry->segment_name); | ||
987 | goto out_panic; | ||
988 | } | ||
989 | if (start != entry->start || end != entry->end) { | ||
990 | pr_err("Mismatch of start / end address after " | ||
991 | "resuming device %s\n", | ||
992 | entry->segment_name); | ||
993 | goto out_panic; | ||
994 | } | ||
995 | } | ||
996 | } | ||
997 | return 0; | ||
998 | out_panic: | ||
999 | panic("fatal dcssblk resume error\n"); | ||
1000 | } | ||
1001 | |||
1002 | static int dcssblk_thaw(struct device *dev) | ||
1003 | { | ||
1004 | return 0; | ||
1005 | } | ||
1006 | |||
1007 | static struct dev_pm_ops dcssblk_pm_ops = { | ||
1008 | .freeze = dcssblk_freeze, | ||
1009 | .thaw = dcssblk_thaw, | ||
1010 | .restore = dcssblk_restore, | ||
1011 | }; | ||
1012 | |||
1013 | static struct platform_driver dcssblk_pdrv = { | ||
1014 | .driver = { | ||
1015 | .name = "dcssblk", | ||
1016 | .owner = THIS_MODULE, | ||
1017 | .pm = &dcssblk_pm_ops, | ||
1018 | }, | ||
1019 | }; | ||
1020 | |||
1021 | static struct platform_device *dcssblk_pdev; | ||
1022 | |||
1023 | |||
1024 | /* | ||
943 | * The init/exit functions. | 1025 | * The init/exit functions. |
944 | */ | 1026 | */ |
945 | static void __exit | 1027 | static void __exit |
946 | dcssblk_exit(void) | 1028 | dcssblk_exit(void) |
947 | { | 1029 | { |
1030 | platform_device_unregister(dcssblk_pdev); | ||
1031 | platform_driver_unregister(&dcssblk_pdrv); | ||
948 | root_device_unregister(dcssblk_root_dev); | 1032 | root_device_unregister(dcssblk_root_dev); |
949 | unregister_blkdev(dcssblk_major, DCSSBLK_NAME); | 1033 | unregister_blkdev(dcssblk_major, DCSSBLK_NAME); |
950 | } | 1034 | } |
@@ -954,30 +1038,44 @@ dcssblk_init(void) | |||
954 | { | 1038 | { |
955 | int rc; | 1039 | int rc; |
956 | 1040 | ||
957 | dcssblk_root_dev = root_device_register("dcssblk"); | 1041 | rc = platform_driver_register(&dcssblk_pdrv); |
958 | if (IS_ERR(dcssblk_root_dev)) | 1042 | if (rc) |
959 | return PTR_ERR(dcssblk_root_dev); | ||
960 | rc = device_create_file(dcssblk_root_dev, &dev_attr_add); | ||
961 | if (rc) { | ||
962 | root_device_unregister(dcssblk_root_dev); | ||
963 | return rc; | 1043 | return rc; |
1044 | |||
1045 | dcssblk_pdev = platform_device_register_simple("dcssblk", -1, NULL, | ||
1046 | 0); | ||
1047 | if (IS_ERR(dcssblk_pdev)) { | ||
1048 | rc = PTR_ERR(dcssblk_pdev); | ||
1049 | goto out_pdrv; | ||
964 | } | 1050 | } |
965 | rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); | 1051 | |
966 | if (rc) { | 1052 | dcssblk_root_dev = root_device_register("dcssblk"); |
967 | root_device_unregister(dcssblk_root_dev); | 1053 | if (IS_ERR(dcssblk_root_dev)) { |
968 | return rc; | 1054 | rc = PTR_ERR(dcssblk_root_dev); |
1055 | goto out_pdev; | ||
969 | } | 1056 | } |
1057 | rc = device_create_file(dcssblk_root_dev, &dev_attr_add); | ||
1058 | if (rc) | ||
1059 | goto out_root; | ||
1060 | rc = device_create_file(dcssblk_root_dev, &dev_attr_remove); | ||
1061 | if (rc) | ||
1062 | goto out_root; | ||
970 | rc = register_blkdev(0, DCSSBLK_NAME); | 1063 | rc = register_blkdev(0, DCSSBLK_NAME); |
971 | if (rc < 0) { | 1064 | if (rc < 0) |
972 | root_device_unregister(dcssblk_root_dev); | 1065 | goto out_root; |
973 | return rc; | ||
974 | } | ||
975 | dcssblk_major = rc; | 1066 | dcssblk_major = rc; |
976 | init_rwsem(&dcssblk_devices_sem); | 1067 | init_rwsem(&dcssblk_devices_sem); |
977 | 1068 | ||
978 | dcssblk_check_params(); | 1069 | dcssblk_check_params(); |
979 | |||
980 | return 0; | 1070 | return 0; |
1071 | |||
1072 | out_root: | ||
1073 | root_device_unregister(dcssblk_root_dev); | ||
1074 | out_pdev: | ||
1075 | platform_device_unregister(dcssblk_pdev); | ||
1076 | out_pdrv: | ||
1077 | platform_driver_unregister(&dcssblk_pdrv); | ||
1078 | return rc; | ||
981 | } | 1079 | } |
982 | 1080 | ||
983 | module_init(dcssblk_init); | 1081 | module_init(dcssblk_init); |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 0ae0c83ef879..2e9e1ecd6d82 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -39,7 +39,10 @@ | |||
39 | #include <linux/hdreg.h> /* HDIO_GETGEO */ | 39 | #include <linux/hdreg.h> /* HDIO_GETGEO */ |
40 | #include <linux/sysdev.h> | 40 | #include <linux/sysdev.h> |
41 | #include <linux/bio.h> | 41 | #include <linux/bio.h> |
42 | #include <linux/suspend.h> | ||
43 | #include <linux/platform_device.h> | ||
42 | #include <asm/uaccess.h> | 44 | #include <asm/uaccess.h> |
45 | #include <asm/checksum.h> | ||
43 | 46 | ||
44 | #define XPRAM_NAME "xpram" | 47 | #define XPRAM_NAME "xpram" |
45 | #define XPRAM_DEVS 1 /* one partition */ | 48 | #define XPRAM_DEVS 1 /* one partition */ |
@@ -48,6 +51,7 @@ | |||
48 | typedef struct { | 51 | typedef struct { |
49 | unsigned int size; /* size of xpram segment in pages */ | 52 | unsigned int size; /* size of xpram segment in pages */ |
50 | unsigned int offset; /* start page of xpram segment */ | 53 | unsigned int offset; /* start page of xpram segment */ |
54 | unsigned int csum; /* partition checksum for suspend */ | ||
51 | } xpram_device_t; | 55 | } xpram_device_t; |
52 | 56 | ||
53 | static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; | 57 | static xpram_device_t xpram_devices[XPRAM_MAX_DEVS]; |
@@ -138,7 +142,7 @@ static long xpram_page_out (unsigned long page_addr, unsigned int xpage_index) | |||
138 | /* | 142 | /* |
139 | * Check if xpram is available. | 143 | * Check if xpram is available. |
140 | */ | 144 | */ |
141 | static int __init xpram_present(void) | 145 | static int xpram_present(void) |
142 | { | 146 | { |
143 | unsigned long mem_page; | 147 | unsigned long mem_page; |
144 | int rc; | 148 | int rc; |
@@ -154,7 +158,7 @@ static int __init xpram_present(void) | |||
154 | /* | 158 | /* |
155 | * Return index of the last available xpram page. | 159 | * Return index of the last available xpram page. |
156 | */ | 160 | */ |
157 | static unsigned long __init xpram_highest_page_index(void) | 161 | static unsigned long xpram_highest_page_index(void) |
158 | { | 162 | { |
159 | unsigned int page_index, add_bit; | 163 | unsigned int page_index, add_bit; |
160 | unsigned long mem_page; | 164 | unsigned long mem_page; |
@@ -383,6 +387,106 @@ out: | |||
383 | } | 387 | } |
384 | 388 | ||
385 | /* | 389 | /* |
390 | * Save checksums for all partitions. | ||
391 | */ | ||
392 | static int xpram_save_checksums(void) | ||
393 | { | ||
394 | unsigned long mem_page; | ||
395 | int rc, i; | ||
396 | |||
397 | rc = 0; | ||
398 | mem_page = (unsigned long) __get_free_page(GFP_KERNEL); | ||
399 | if (!mem_page) | ||
400 | return -ENOMEM; | ||
401 | for (i = 0; i < xpram_devs; i++) { | ||
402 | rc = xpram_page_in(mem_page, xpram_devices[i].offset); | ||
403 | if (rc) | ||
404 | goto fail; | ||
405 | xpram_devices[i].csum = csum_partial((const void *) mem_page, | ||
406 | PAGE_SIZE, 0); | ||
407 | } | ||
408 | fail: | ||
409 | free_page(mem_page); | ||
410 | return rc ? -ENXIO : 0; | ||
411 | } | ||
412 | |||
413 | /* | ||
414 | * Verify checksums for all partitions. | ||
415 | */ | ||
416 | static int xpram_validate_checksums(void) | ||
417 | { | ||
418 | unsigned long mem_page; | ||
419 | unsigned int csum; | ||
420 | int rc, i; | ||
421 | |||
422 | rc = 0; | ||
423 | mem_page = (unsigned long) __get_free_page(GFP_KERNEL); | ||
424 | if (!mem_page) | ||
425 | return -ENOMEM; | ||
426 | for (i = 0; i < xpram_devs; i++) { | ||
427 | rc = xpram_page_in(mem_page, xpram_devices[i].offset); | ||
428 | if (rc) | ||
429 | goto fail; | ||
430 | csum = csum_partial((const void *) mem_page, PAGE_SIZE, 0); | ||
431 | if (xpram_devices[i].csum != csum) { | ||
432 | rc = -EINVAL; | ||
433 | goto fail; | ||
434 | } | ||
435 | } | ||
436 | fail: | ||
437 | free_page(mem_page); | ||
438 | return rc ? -ENXIO : 0; | ||
439 | } | ||
440 | |||
441 | /* | ||
442 | * Resume failed: Print error message and call panic. | ||
443 | */ | ||
444 | static void xpram_resume_error(const char *message) | ||
445 | { | ||
446 | pr_err("Resume error: %s\n", message); | ||
447 | panic("xpram resume error\n"); | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * Check if xpram setup changed between suspend and resume. | ||
452 | */ | ||
453 | static int xpram_restore(struct device *dev) | ||
454 | { | ||
455 | if (!xpram_pages) | ||
456 | return 0; | ||
457 | if (xpram_present() != 0) | ||
458 | xpram_resume_error("xpram disappeared"); | ||
459 | if (xpram_pages != xpram_highest_page_index() + 1) | ||
460 | xpram_resume_error("Size of xpram changed"); | ||
461 | if (xpram_validate_checksums()) | ||
462 | xpram_resume_error("Data of xpram changed"); | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * Save necessary state in suspend. | ||
468 | */ | ||
469 | static int xpram_freeze(struct device *dev) | ||
470 | { | ||
471 | return xpram_save_checksums(); | ||
472 | } | ||
473 | |||
474 | static struct dev_pm_ops xpram_pm_ops = { | ||
475 | .freeze = xpram_freeze, | ||
476 | .restore = xpram_restore, | ||
477 | }; | ||
478 | |||
479 | static struct platform_driver xpram_pdrv = { | ||
480 | .driver = { | ||
481 | .name = XPRAM_NAME, | ||
482 | .owner = THIS_MODULE, | ||
483 | .pm = &xpram_pm_ops, | ||
484 | }, | ||
485 | }; | ||
486 | |||
487 | static struct platform_device *xpram_pdev; | ||
488 | |||
489 | /* | ||
386 | * Finally, the init/exit functions. | 490 | * Finally, the init/exit functions. |
387 | */ | 491 | */ |
388 | static void __exit xpram_exit(void) | 492 | static void __exit xpram_exit(void) |
@@ -394,6 +498,8 @@ static void __exit xpram_exit(void) | |||
394 | put_disk(xpram_disks[i]); | 498 | put_disk(xpram_disks[i]); |
395 | } | 499 | } |
396 | unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); | 500 | unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); |
501 | platform_device_unregister(xpram_pdev); | ||
502 | platform_driver_unregister(&xpram_pdrv); | ||
397 | } | 503 | } |
398 | 504 | ||
399 | static int __init xpram_init(void) | 505 | static int __init xpram_init(void) |
@@ -411,7 +517,24 @@ static int __init xpram_init(void) | |||
411 | rc = xpram_setup_sizes(xpram_pages); | 517 | rc = xpram_setup_sizes(xpram_pages); |
412 | if (rc) | 518 | if (rc) |
413 | return rc; | 519 | return rc; |
414 | return xpram_setup_blkdev(); | 520 | rc = platform_driver_register(&xpram_pdrv); |
521 | if (rc) | ||
522 | return rc; | ||
523 | xpram_pdev = platform_device_register_simple(XPRAM_NAME, -1, NULL, 0); | ||
524 | if (IS_ERR(xpram_pdev)) { | ||
525 | rc = PTR_ERR(xpram_pdev); | ||
526 | goto fail_platform_driver_unregister; | ||
527 | } | ||
528 | rc = xpram_setup_blkdev(); | ||
529 | if (rc) | ||
530 | goto fail_platform_device_unregister; | ||
531 | return 0; | ||
532 | |||
533 | fail_platform_device_unregister: | ||
534 | platform_device_unregister(xpram_pdev); | ||
535 | fail_platform_driver_unregister: | ||
536 | platform_driver_unregister(&xpram_pdrv); | ||
537 | return rc; | ||
415 | } | 538 | } |
416 | 539 | ||
417 | module_init(xpram_init); | 540 | module_init(xpram_init); |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 9ab06e0dad40..b79f31add39c 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -1,14 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/con3215.c | 2 | * 3215 line mode terminal driver. |
3 | * 3215 line mode terminal driver. | ||
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999, 2009 |
6 | * Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | ||
8 | * | 6 | * |
9 | * Updated: | 7 | * Updated: |
10 | * Aug-2000: Added tab support | 8 | * Aug-2000: Added tab support |
11 | * Dan Morrison, IBM Corporation (dmorriso@cse.buffalo.edu) | 9 | * Dan Morrison, IBM Corporation <dmorriso@cse.buffalo.edu> |
12 | */ | 10 | */ |
13 | 11 | ||
14 | #include <linux/module.h> | 12 | #include <linux/module.h> |
@@ -56,6 +54,7 @@ | |||
56 | #define RAW3215_CLOSING 32 /* set while in close process */ | 54 | #define RAW3215_CLOSING 32 /* set while in close process */ |
57 | #define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */ | 55 | #define RAW3215_TIMER_RUNS 64 /* set if the output delay timer is on */ |
58 | #define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */ | 56 | #define RAW3215_FLUSHING 128 /* set to flush buffer (no delay) */ |
57 | #define RAW3215_FROZEN 256 /* set if 3215 is frozen for suspend */ | ||
59 | 58 | ||
60 | #define TAB_STOP_SIZE 8 /* tab stop size */ | 59 | #define TAB_STOP_SIZE 8 /* tab stop size */ |
61 | 60 | ||
@@ -111,8 +110,8 @@ static struct tty_driver *tty3215_driver; | |||
111 | /* | 110 | /* |
112 | * Get a request structure from the free list | 111 | * Get a request structure from the free list |
113 | */ | 112 | */ |
114 | static inline struct raw3215_req * | 113 | static inline struct raw3215_req *raw3215_alloc_req(void) |
115 | raw3215_alloc_req(void) { | 114 | { |
116 | struct raw3215_req *req; | 115 | struct raw3215_req *req; |
117 | unsigned long flags; | 116 | unsigned long flags; |
118 | 117 | ||
@@ -126,8 +125,8 @@ raw3215_alloc_req(void) { | |||
126 | /* | 125 | /* |
127 | * Put a request structure back to the free list | 126 | * Put a request structure back to the free list |
128 | */ | 127 | */ |
129 | static inline void | 128 | static inline void raw3215_free_req(struct raw3215_req *req) |
130 | raw3215_free_req(struct raw3215_req *req) { | 129 | { |
131 | unsigned long flags; | 130 | unsigned long flags; |
132 | 131 | ||
133 | if (req->type == RAW3215_FREE) | 132 | if (req->type == RAW3215_FREE) |
@@ -145,8 +144,7 @@ raw3215_free_req(struct raw3215_req *req) { | |||
145 | * because a 3215 terminal won't accept a new read before the old one is | 144 | * because a 3215 terminal won't accept a new read before the old one is |
146 | * completed. | 145 | * completed. |
147 | */ | 146 | */ |
148 | static void | 147 | static void raw3215_mk_read_req(struct raw3215_info *raw) |
149 | raw3215_mk_read_req(struct raw3215_info *raw) | ||
150 | { | 148 | { |
151 | struct raw3215_req *req; | 149 | struct raw3215_req *req; |
152 | struct ccw1 *ccw; | 150 | struct ccw1 *ccw; |
@@ -174,8 +172,7 @@ raw3215_mk_read_req(struct raw3215_info *raw) | |||
174 | * buffer to the 3215 device. If a queued write exists it is replaced by | 172 | * buffer to the 3215 device. If a queued write exists it is replaced by |
175 | * the new, probably lengthened request. | 173 | * the new, probably lengthened request. |
176 | */ | 174 | */ |
177 | static void | 175 | static void raw3215_mk_write_req(struct raw3215_info *raw) |
178 | raw3215_mk_write_req(struct raw3215_info *raw) | ||
179 | { | 176 | { |
180 | struct raw3215_req *req; | 177 | struct raw3215_req *req; |
181 | struct ccw1 *ccw; | 178 | struct ccw1 *ccw; |
@@ -251,8 +248,7 @@ raw3215_mk_write_req(struct raw3215_info *raw) | |||
251 | /* | 248 | /* |
252 | * Start a read or a write request | 249 | * Start a read or a write request |
253 | */ | 250 | */ |
254 | static void | 251 | static void raw3215_start_io(struct raw3215_info *raw) |
255 | raw3215_start_io(struct raw3215_info *raw) | ||
256 | { | 252 | { |
257 | struct raw3215_req *req; | 253 | struct raw3215_req *req; |
258 | int res; | 254 | int res; |
@@ -290,8 +286,7 @@ raw3215_start_io(struct raw3215_info *raw) | |||
290 | /* | 286 | /* |
291 | * Function to start a delayed output after RAW3215_TIMEOUT seconds | 287 | * Function to start a delayed output after RAW3215_TIMEOUT seconds |
292 | */ | 288 | */ |
293 | static void | 289 | static void raw3215_timeout(unsigned long __data) |
294 | raw3215_timeout(unsigned long __data) | ||
295 | { | 290 | { |
296 | struct raw3215_info *raw = (struct raw3215_info *) __data; | 291 | struct raw3215_info *raw = (struct raw3215_info *) __data; |
297 | unsigned long flags; | 292 | unsigned long flags; |
@@ -300,8 +295,10 @@ raw3215_timeout(unsigned long __data) | |||
300 | if (raw->flags & RAW3215_TIMER_RUNS) { | 295 | if (raw->flags & RAW3215_TIMER_RUNS) { |
301 | del_timer(&raw->timer); | 296 | del_timer(&raw->timer); |
302 | raw->flags &= ~RAW3215_TIMER_RUNS; | 297 | raw->flags &= ~RAW3215_TIMER_RUNS; |
303 | raw3215_mk_write_req(raw); | 298 | if (!(raw->flags & RAW3215_FROZEN)) { |
304 | raw3215_start_io(raw); | 299 | raw3215_mk_write_req(raw); |
300 | raw3215_start_io(raw); | ||
301 | } | ||
305 | } | 302 | } |
306 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | 303 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); |
307 | } | 304 | } |
@@ -312,10 +309,9 @@ raw3215_timeout(unsigned long __data) | |||
312 | * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not | 309 | * amount of data is bigger than RAW3215_MIN_WRITE. If a write is not |
313 | * done immediately a timer is started with a delay of RAW3215_TIMEOUT. | 310 | * done immediately a timer is started with a delay of RAW3215_TIMEOUT. |
314 | */ | 311 | */ |
315 | static inline void | 312 | static inline void raw3215_try_io(struct raw3215_info *raw) |
316 | raw3215_try_io(struct raw3215_info *raw) | ||
317 | { | 313 | { |
318 | if (!(raw->flags & RAW3215_ACTIVE)) | 314 | if (!(raw->flags & RAW3215_ACTIVE) || (raw->flags & RAW3215_FROZEN)) |
319 | return; | 315 | return; |
320 | if (raw->queued_read != NULL) | 316 | if (raw->queued_read != NULL) |
321 | raw3215_start_io(raw); | 317 | raw3215_start_io(raw); |
@@ -359,8 +355,8 @@ static void raw3215_next_io(struct raw3215_info *raw) | |||
359 | /* | 355 | /* |
360 | * Interrupt routine, called from common io layer | 356 | * Interrupt routine, called from common io layer |
361 | */ | 357 | */ |
362 | static void | 358 | static void raw3215_irq(struct ccw_device *cdev, unsigned long intparm, |
363 | raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | 359 | struct irb *irb) |
364 | { | 360 | { |
365 | struct raw3215_info *raw; | 361 | struct raw3215_info *raw; |
366 | struct raw3215_req *req; | 362 | struct raw3215_req *req; |
@@ -459,14 +455,40 @@ raw3215_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
459 | } | 455 | } |
460 | 456 | ||
461 | /* | 457 | /* |
458 | * Drop the oldest line from the output buffer. | ||
459 | */ | ||
460 | static void raw3215_drop_line(struct raw3215_info *raw) | ||
461 | { | ||
462 | int ix; | ||
463 | char ch; | ||
464 | |||
465 | BUG_ON(raw->written != 0); | ||
466 | ix = (raw->head - raw->count) & (RAW3215_BUFFER_SIZE - 1); | ||
467 | while (raw->count > 0) { | ||
468 | ch = raw->buffer[ix]; | ||
469 | ix = (ix + 1) & (RAW3215_BUFFER_SIZE - 1); | ||
470 | raw->count--; | ||
471 | if (ch == 0x15) | ||
472 | break; | ||
473 | } | ||
474 | raw->head = ix; | ||
475 | } | ||
476 | |||
477 | /* | ||
462 | * Wait until length bytes are available int the output buffer. | 478 | * Wait until length bytes are available int the output buffer. |
463 | * Has to be called with the s390irq lock held. Can be called | 479 | * Has to be called with the s390irq lock held. Can be called |
464 | * disabled. | 480 | * disabled. |
465 | */ | 481 | */ |
466 | static void | 482 | static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) |
467 | raw3215_make_room(struct raw3215_info *raw, unsigned int length) | ||
468 | { | 483 | { |
469 | while (RAW3215_BUFFER_SIZE - raw->count < length) { | 484 | while (RAW3215_BUFFER_SIZE - raw->count < length) { |
485 | /* While console is frozen for suspend we have no other | ||
486 | * choice but to drop message from the buffer to make | ||
487 | * room for even more messages. */ | ||
488 | if (raw->flags & RAW3215_FROZEN) { | ||
489 | raw3215_drop_line(raw); | ||
490 | continue; | ||
491 | } | ||
470 | /* there might be a request pending */ | 492 | /* there might be a request pending */ |
471 | raw->flags |= RAW3215_FLUSHING; | 493 | raw->flags |= RAW3215_FLUSHING; |
472 | raw3215_mk_write_req(raw); | 494 | raw3215_mk_write_req(raw); |
@@ -488,8 +510,8 @@ raw3215_make_room(struct raw3215_info *raw, unsigned int length) | |||
488 | /* | 510 | /* |
489 | * String write routine for 3215 devices | 511 | * String write routine for 3215 devices |
490 | */ | 512 | */ |
491 | static void | 513 | static void raw3215_write(struct raw3215_info *raw, const char *str, |
492 | raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length) | 514 | unsigned int length) |
493 | { | 515 | { |
494 | unsigned long flags; | 516 | unsigned long flags; |
495 | int c, count; | 517 | int c, count; |
@@ -529,8 +551,7 @@ raw3215_write(struct raw3215_info *raw, const char *str, unsigned int length) | |||
529 | /* | 551 | /* |
530 | * Put character routine for 3215 devices | 552 | * Put character routine for 3215 devices |
531 | */ | 553 | */ |
532 | static void | 554 | static void raw3215_putchar(struct raw3215_info *raw, unsigned char ch) |
533 | raw3215_putchar(struct raw3215_info *raw, unsigned char ch) | ||
534 | { | 555 | { |
535 | unsigned long flags; | 556 | unsigned long flags; |
536 | unsigned int length, i; | 557 | unsigned int length, i; |
@@ -566,8 +587,7 @@ raw3215_putchar(struct raw3215_info *raw, unsigned char ch) | |||
566 | * Flush routine, it simply sets the flush flag and tries to start | 587 | * Flush routine, it simply sets the flush flag and tries to start |
567 | * pending IO. | 588 | * pending IO. |
568 | */ | 589 | */ |
569 | static void | 590 | static void raw3215_flush_buffer(struct raw3215_info *raw) |
570 | raw3215_flush_buffer(struct raw3215_info *raw) | ||
571 | { | 591 | { |
572 | unsigned long flags; | 592 | unsigned long flags; |
573 | 593 | ||
@@ -583,8 +603,7 @@ raw3215_flush_buffer(struct raw3215_info *raw) | |||
583 | /* | 603 | /* |
584 | * Fire up a 3215 device. | 604 | * Fire up a 3215 device. |
585 | */ | 605 | */ |
586 | static int | 606 | static int raw3215_startup(struct raw3215_info *raw) |
587 | raw3215_startup(struct raw3215_info *raw) | ||
588 | { | 607 | { |
589 | unsigned long flags; | 608 | unsigned long flags; |
590 | 609 | ||
@@ -602,8 +621,7 @@ raw3215_startup(struct raw3215_info *raw) | |||
602 | /* | 621 | /* |
603 | * Shutdown a 3215 device. | 622 | * Shutdown a 3215 device. |
604 | */ | 623 | */ |
605 | static void | 624 | static void raw3215_shutdown(struct raw3215_info *raw) |
606 | raw3215_shutdown(struct raw3215_info *raw) | ||
607 | { | 625 | { |
608 | DECLARE_WAITQUEUE(wait, current); | 626 | DECLARE_WAITQUEUE(wait, current); |
609 | unsigned long flags; | 627 | unsigned long flags; |
@@ -628,8 +646,7 @@ raw3215_shutdown(struct raw3215_info *raw) | |||
628 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | 646 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); |
629 | } | 647 | } |
630 | 648 | ||
631 | static int | 649 | static int raw3215_probe (struct ccw_device *cdev) |
632 | raw3215_probe (struct ccw_device *cdev) | ||
633 | { | 650 | { |
634 | struct raw3215_info *raw; | 651 | struct raw3215_info *raw; |
635 | int line; | 652 | int line; |
@@ -675,8 +692,7 @@ raw3215_probe (struct ccw_device *cdev) | |||
675 | return 0; | 692 | return 0; |
676 | } | 693 | } |
677 | 694 | ||
678 | static void | 695 | static void raw3215_remove (struct ccw_device *cdev) |
679 | raw3215_remove (struct ccw_device *cdev) | ||
680 | { | 696 | { |
681 | struct raw3215_info *raw; | 697 | struct raw3215_info *raw; |
682 | 698 | ||
@@ -689,8 +705,7 @@ raw3215_remove (struct ccw_device *cdev) | |||
689 | } | 705 | } |
690 | } | 706 | } |
691 | 707 | ||
692 | static int | 708 | static int raw3215_set_online (struct ccw_device *cdev) |
693 | raw3215_set_online (struct ccw_device *cdev) | ||
694 | { | 709 | { |
695 | struct raw3215_info *raw; | 710 | struct raw3215_info *raw; |
696 | 711 | ||
@@ -701,8 +716,7 @@ raw3215_set_online (struct ccw_device *cdev) | |||
701 | return raw3215_startup(raw); | 716 | return raw3215_startup(raw); |
702 | } | 717 | } |
703 | 718 | ||
704 | static int | 719 | static int raw3215_set_offline (struct ccw_device *cdev) |
705 | raw3215_set_offline (struct ccw_device *cdev) | ||
706 | { | 720 | { |
707 | struct raw3215_info *raw; | 721 | struct raw3215_info *raw; |
708 | 722 | ||
@@ -715,6 +729,36 @@ raw3215_set_offline (struct ccw_device *cdev) | |||
715 | return 0; | 729 | return 0; |
716 | } | 730 | } |
717 | 731 | ||
732 | static int raw3215_pm_stop(struct ccw_device *cdev) | ||
733 | { | ||
734 | struct raw3215_info *raw; | ||
735 | unsigned long flags; | ||
736 | |||
737 | /* Empty the output buffer, then prevent new I/O. */ | ||
738 | raw = cdev->dev.driver_data; | ||
739 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); | ||
740 | raw3215_make_room(raw, RAW3215_BUFFER_SIZE); | ||
741 | raw->flags |= RAW3215_FROZEN; | ||
742 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | ||
743 | return 0; | ||
744 | } | ||
745 | |||
746 | static int raw3215_pm_start(struct ccw_device *cdev) | ||
747 | { | ||
748 | struct raw3215_info *raw; | ||
749 | unsigned long flags; | ||
750 | |||
751 | /* Allow I/O again and flush output buffer. */ | ||
752 | raw = cdev->dev.driver_data; | ||
753 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); | ||
754 | raw->flags &= ~RAW3215_FROZEN; | ||
755 | raw->flags |= RAW3215_FLUSHING; | ||
756 | raw3215_try_io(raw); | ||
757 | raw->flags &= ~RAW3215_FLUSHING; | ||
758 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | ||
759 | return 0; | ||
760 | } | ||
761 | |||
718 | static struct ccw_device_id raw3215_id[] = { | 762 | static struct ccw_device_id raw3215_id[] = { |
719 | { CCW_DEVICE(0x3215, 0) }, | 763 | { CCW_DEVICE(0x3215, 0) }, |
720 | { /* end of list */ }, | 764 | { /* end of list */ }, |
@@ -728,14 +772,17 @@ static struct ccw_driver raw3215_ccw_driver = { | |||
728 | .remove = &raw3215_remove, | 772 | .remove = &raw3215_remove, |
729 | .set_online = &raw3215_set_online, | 773 | .set_online = &raw3215_set_online, |
730 | .set_offline = &raw3215_set_offline, | 774 | .set_offline = &raw3215_set_offline, |
775 | .freeze = &raw3215_pm_stop, | ||
776 | .thaw = &raw3215_pm_start, | ||
777 | .restore = &raw3215_pm_start, | ||
731 | }; | 778 | }; |
732 | 779 | ||
733 | #ifdef CONFIG_TN3215_CONSOLE | 780 | #ifdef CONFIG_TN3215_CONSOLE |
734 | /* | 781 | /* |
735 | * Write a string to the 3215 console | 782 | * Write a string to the 3215 console |
736 | */ | 783 | */ |
737 | static void | 784 | static void con3215_write(struct console *co, const char *str, |
738 | con3215_write(struct console *co, const char *str, unsigned int count) | 785 | unsigned int count) |
739 | { | 786 | { |
740 | struct raw3215_info *raw; | 787 | struct raw3215_info *raw; |
741 | int i; | 788 | int i; |
@@ -768,13 +815,17 @@ static struct tty_driver *con3215_device(struct console *c, int *index) | |||
768 | * panic() calls con3215_flush through a panic_notifier | 815 | * panic() calls con3215_flush through a panic_notifier |
769 | * before the system enters a disabled, endless loop. | 816 | * before the system enters a disabled, endless loop. |
770 | */ | 817 | */ |
771 | static void | 818 | static void con3215_flush(void) |
772 | con3215_flush(void) | ||
773 | { | 819 | { |
774 | struct raw3215_info *raw; | 820 | struct raw3215_info *raw; |
775 | unsigned long flags; | 821 | unsigned long flags; |
776 | 822 | ||
777 | raw = raw3215[0]; /* console 3215 is the first one */ | 823 | raw = raw3215[0]; /* console 3215 is the first one */ |
824 | if (raw->flags & RAW3215_FROZEN) | ||
825 | /* The console is still frozen for suspend. */ | ||
826 | if (ccw_device_force_console()) | ||
827 | /* Forcing didn't work, no panic message .. */ | ||
828 | return; | ||
778 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); | 829 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); |
779 | raw3215_make_room(raw, RAW3215_BUFFER_SIZE); | 830 | raw3215_make_room(raw, RAW3215_BUFFER_SIZE); |
780 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); | 831 | spin_unlock_irqrestore(get_ccwdev_lock(raw->cdev), flags); |
@@ -811,8 +862,7 @@ static struct console con3215 = { | |||
811 | * 3215 console initialization code called from console_init(). | 862 | * 3215 console initialization code called from console_init(). |
812 | * NOTE: This is called before kmalloc is available. | 863 | * NOTE: This is called before kmalloc is available. |
813 | */ | 864 | */ |
814 | static int __init | 865 | static int __init con3215_init(void) |
815 | con3215_init(void) | ||
816 | { | 866 | { |
817 | struct ccw_device *cdev; | 867 | struct ccw_device *cdev; |
818 | struct raw3215_info *raw; | 868 | struct raw3215_info *raw; |
@@ -875,8 +925,7 @@ console_initcall(con3215_init); | |||
875 | * | 925 | * |
876 | * This routine is called whenever a 3215 tty is opened. | 926 | * This routine is called whenever a 3215 tty is opened. |
877 | */ | 927 | */ |
878 | static int | 928 | static int tty3215_open(struct tty_struct *tty, struct file * filp) |
879 | tty3215_open(struct tty_struct *tty, struct file * filp) | ||
880 | { | 929 | { |
881 | struct raw3215_info *raw; | 930 | struct raw3215_info *raw; |
882 | int retval, line; | 931 | int retval, line; |
@@ -909,8 +958,7 @@ tty3215_open(struct tty_struct *tty, struct file * filp) | |||
909 | * This routine is called when the 3215 tty is closed. We wait | 958 | * This routine is called when the 3215 tty is closed. We wait |
910 | * for the remaining request to be completed. Then we clean up. | 959 | * for the remaining request to be completed. Then we clean up. |
911 | */ | 960 | */ |
912 | static void | 961 | static void tty3215_close(struct tty_struct *tty, struct file * filp) |
913 | tty3215_close(struct tty_struct *tty, struct file * filp) | ||
914 | { | 962 | { |
915 | struct raw3215_info *raw; | 963 | struct raw3215_info *raw; |
916 | 964 | ||
@@ -927,8 +975,7 @@ tty3215_close(struct tty_struct *tty, struct file * filp) | |||
927 | /* | 975 | /* |
928 | * Returns the amount of free space in the output buffer. | 976 | * Returns the amount of free space in the output buffer. |
929 | */ | 977 | */ |
930 | static int | 978 | static int tty3215_write_room(struct tty_struct *tty) |
931 | tty3215_write_room(struct tty_struct *tty) | ||
932 | { | 979 | { |
933 | struct raw3215_info *raw; | 980 | struct raw3215_info *raw; |
934 | 981 | ||
@@ -944,9 +991,8 @@ tty3215_write_room(struct tty_struct *tty) | |||
944 | /* | 991 | /* |
945 | * String write routine for 3215 ttys | 992 | * String write routine for 3215 ttys |
946 | */ | 993 | */ |
947 | static int | 994 | static int tty3215_write(struct tty_struct * tty, |
948 | tty3215_write(struct tty_struct * tty, | 995 | const unsigned char *buf, int count) |
949 | const unsigned char *buf, int count) | ||
950 | { | 996 | { |
951 | struct raw3215_info *raw; | 997 | struct raw3215_info *raw; |
952 | 998 | ||
@@ -960,8 +1006,7 @@ tty3215_write(struct tty_struct * tty, | |||
960 | /* | 1006 | /* |
961 | * Put character routine for 3215 ttys | 1007 | * Put character routine for 3215 ttys |
962 | */ | 1008 | */ |
963 | static int | 1009 | static int tty3215_put_char(struct tty_struct *tty, unsigned char ch) |
964 | tty3215_put_char(struct tty_struct *tty, unsigned char ch) | ||
965 | { | 1010 | { |
966 | struct raw3215_info *raw; | 1011 | struct raw3215_info *raw; |
967 | 1012 | ||
@@ -972,16 +1017,14 @@ tty3215_put_char(struct tty_struct *tty, unsigned char ch) | |||
972 | return 1; | 1017 | return 1; |
973 | } | 1018 | } |
974 | 1019 | ||
975 | static void | 1020 | static void tty3215_flush_chars(struct tty_struct *tty) |
976 | tty3215_flush_chars(struct tty_struct *tty) | ||
977 | { | 1021 | { |
978 | } | 1022 | } |
979 | 1023 | ||
980 | /* | 1024 | /* |
981 | * Returns the number of characters in the output buffer | 1025 | * Returns the number of characters in the output buffer |
982 | */ | 1026 | */ |
983 | static int | 1027 | static int tty3215_chars_in_buffer(struct tty_struct *tty) |
984 | tty3215_chars_in_buffer(struct tty_struct *tty) | ||
985 | { | 1028 | { |
986 | struct raw3215_info *raw; | 1029 | struct raw3215_info *raw; |
987 | 1030 | ||
@@ -989,8 +1032,7 @@ tty3215_chars_in_buffer(struct tty_struct *tty) | |||
989 | return raw->count; | 1032 | return raw->count; |
990 | } | 1033 | } |
991 | 1034 | ||
992 | static void | 1035 | static void tty3215_flush_buffer(struct tty_struct *tty) |
993 | tty3215_flush_buffer(struct tty_struct *tty) | ||
994 | { | 1036 | { |
995 | struct raw3215_info *raw; | 1037 | struct raw3215_info *raw; |
996 | 1038 | ||
@@ -1002,9 +1044,8 @@ tty3215_flush_buffer(struct tty_struct *tty) | |||
1002 | /* | 1044 | /* |
1003 | * Currently we don't have any io controls for 3215 ttys | 1045 | * Currently we don't have any io controls for 3215 ttys |
1004 | */ | 1046 | */ |
1005 | static int | 1047 | static int tty3215_ioctl(struct tty_struct *tty, struct file * file, |
1006 | tty3215_ioctl(struct tty_struct *tty, struct file * file, | 1048 | unsigned int cmd, unsigned long arg) |
1007 | unsigned int cmd, unsigned long arg) | ||
1008 | { | 1049 | { |
1009 | if (tty->flags & (1 << TTY_IO_ERROR)) | 1050 | if (tty->flags & (1 << TTY_IO_ERROR)) |
1010 | return -EIO; | 1051 | return -EIO; |
@@ -1019,8 +1060,7 @@ tty3215_ioctl(struct tty_struct *tty, struct file * file, | |||
1019 | /* | 1060 | /* |
1020 | * Disable reading from a 3215 tty | 1061 | * Disable reading from a 3215 tty |
1021 | */ | 1062 | */ |
1022 | static void | 1063 | static void tty3215_throttle(struct tty_struct * tty) |
1023 | tty3215_throttle(struct tty_struct * tty) | ||
1024 | { | 1064 | { |
1025 | struct raw3215_info *raw; | 1065 | struct raw3215_info *raw; |
1026 | 1066 | ||
@@ -1031,8 +1071,7 @@ tty3215_throttle(struct tty_struct * tty) | |||
1031 | /* | 1071 | /* |
1032 | * Enable reading from a 3215 tty | 1072 | * Enable reading from a 3215 tty |
1033 | */ | 1073 | */ |
1034 | static void | 1074 | static void tty3215_unthrottle(struct tty_struct * tty) |
1035 | tty3215_unthrottle(struct tty_struct * tty) | ||
1036 | { | 1075 | { |
1037 | struct raw3215_info *raw; | 1076 | struct raw3215_info *raw; |
1038 | unsigned long flags; | 1077 | unsigned long flags; |
@@ -1049,8 +1088,7 @@ tty3215_unthrottle(struct tty_struct * tty) | |||
1049 | /* | 1088 | /* |
1050 | * Disable writing to a 3215 tty | 1089 | * Disable writing to a 3215 tty |
1051 | */ | 1090 | */ |
1052 | static void | 1091 | static void tty3215_stop(struct tty_struct *tty) |
1053 | tty3215_stop(struct tty_struct *tty) | ||
1054 | { | 1092 | { |
1055 | struct raw3215_info *raw; | 1093 | struct raw3215_info *raw; |
1056 | 1094 | ||
@@ -1061,8 +1099,7 @@ tty3215_stop(struct tty_struct *tty) | |||
1061 | /* | 1099 | /* |
1062 | * Enable writing to a 3215 tty | 1100 | * Enable writing to a 3215 tty |
1063 | */ | 1101 | */ |
1064 | static void | 1102 | static void tty3215_start(struct tty_struct *tty) |
1065 | tty3215_start(struct tty_struct *tty) | ||
1066 | { | 1103 | { |
1067 | struct raw3215_info *raw; | 1104 | struct raw3215_info *raw; |
1068 | unsigned long flags; | 1105 | unsigned long flags; |
@@ -1096,8 +1133,7 @@ static const struct tty_operations tty3215_ops = { | |||
1096 | * 3215 tty registration code called from tty_init(). | 1133 | * 3215 tty registration code called from tty_init(). |
1097 | * Most kernel services (incl. kmalloc) are available at this poimt. | 1134 | * Most kernel services (incl. kmalloc) are available at this poimt. |
1098 | */ | 1135 | */ |
1099 | static int __init | 1136 | static int __init tty3215_init(void) |
1100 | tty3215_init(void) | ||
1101 | { | 1137 | { |
1102 | struct tty_driver *driver; | 1138 | struct tty_driver *driver; |
1103 | int ret; | 1139 | int ret; |
@@ -1142,8 +1178,7 @@ tty3215_init(void) | |||
1142 | return 0; | 1178 | return 0; |
1143 | } | 1179 | } |
1144 | 1180 | ||
1145 | static void __exit | 1181 | static void __exit tty3215_exit(void) |
1146 | tty3215_exit(void) | ||
1147 | { | 1182 | { |
1148 | tty_unregister_driver(tty3215_driver); | 1183 | tty_unregister_driver(tty3215_driver); |
1149 | put_tty_driver(tty3215_driver); | 1184 | put_tty_driver(tty3215_driver); |
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index ed5396dae58e..44d02e371c04 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/con3270.c | 2 | * IBM/3270 Driver - console view. |
3 | * IBM/3270 Driver - console view. | ||
4 | * | 3 | * |
5 | * Author(s): | 4 | * Author(s): |
6 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) | 5 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) |
7 | * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2003, 2009 |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <linux/bootmem.h> | 10 | #include <linux/bootmem.h> |
@@ -530,6 +529,7 @@ con3270_flush(void) | |||
530 | cp = condev; | 529 | cp = condev; |
531 | if (!cp->view.dev) | 530 | if (!cp->view.dev) |
532 | return; | 531 | return; |
532 | raw3270_pm_unfreeze(&cp->view); | ||
533 | spin_lock_irqsave(&cp->view.lock, flags); | 533 | spin_lock_irqsave(&cp->view.lock, flags); |
534 | con3270_wait_write(cp); | 534 | con3270_wait_write(cp); |
535 | cp->nr_up = 0; | 535 | cp->nr_up = 0; |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 40759c33477d..097d3846a828 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/fs3270.c | 2 | * IBM/3270 Driver - fullscreen driver. |
3 | * IBM/3270 Driver - fullscreen driver. | ||
4 | * | 3 | * |
5 | * Author(s): | 4 | * Author(s): |
6 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) | 5 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) |
7 | * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Rewritten for 2.5/2.6 by Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2003, 2009 |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <linux/bootmem.h> | 10 | #include <linux/bootmem.h> |
@@ -399,6 +398,11 @@ fs3270_free_view(struct raw3270_view *view) | |||
399 | static void | 398 | static void |
400 | fs3270_release(struct raw3270_view *view) | 399 | fs3270_release(struct raw3270_view *view) |
401 | { | 400 | { |
401 | struct fs3270 *fp; | ||
402 | |||
403 | fp = (struct fs3270 *) view; | ||
404 | if (fp->fs_pid) | ||
405 | kill_pid(fp->fs_pid, SIGHUP, 1); | ||
402 | } | 406 | } |
403 | 407 | ||
404 | /* View to a 3270 device. Can be console, tty or fullscreen. */ | 408 | /* View to a 3270 device. Can be console, tty or fullscreen. */ |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 97e63cf46944..75a8831eebbc 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -1,10 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/monreader.c | ||
3 | * | ||
4 | * Character device driver for reading z/VM *MONITOR service records. | 2 | * Character device driver for reading z/VM *MONITOR service records. |
5 | * | 3 | * |
6 | * Copyright IBM Corp. 2004, 2008 | 4 | * Copyright IBM Corp. 2004, 2009 |
7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 5 | * |
6 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | ||
8 | */ | 7 | */ |
9 | 8 | ||
10 | #define KMSG_COMPONENT "monreader" | 9 | #define KMSG_COMPONENT "monreader" |
@@ -22,6 +21,7 @@ | |||
22 | #include <linux/spinlock.h> | 21 | #include <linux/spinlock.h> |
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
24 | #include <linux/poll.h> | 23 | #include <linux/poll.h> |
24 | #include <linux/device.h> | ||
25 | #include <net/iucv/iucv.h> | 25 | #include <net/iucv/iucv.h> |
26 | #include <asm/uaccess.h> | 26 | #include <asm/uaccess.h> |
27 | #include <asm/ebcdic.h> | 27 | #include <asm/ebcdic.h> |
@@ -78,6 +78,7 @@ static u8 user_data_sever[16] = { | |||
78 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, | 78 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, |
79 | }; | 79 | }; |
80 | 80 | ||
81 | static struct device *monreader_device; | ||
81 | 82 | ||
82 | /****************************************************************************** | 83 | /****************************************************************************** |
83 | * helper functions * | 84 | * helper functions * |
@@ -319,11 +320,12 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
319 | goto out_path; | 320 | goto out_path; |
320 | } | 321 | } |
321 | filp->private_data = monpriv; | 322 | filp->private_data = monpriv; |
323 | monreader_device->driver_data = monpriv; | ||
322 | unlock_kernel(); | 324 | unlock_kernel(); |
323 | return nonseekable_open(inode, filp); | 325 | return nonseekable_open(inode, filp); |
324 | 326 | ||
325 | out_path: | 327 | out_path: |
326 | kfree(monpriv->path); | 328 | iucv_path_free(monpriv->path); |
327 | out_priv: | 329 | out_priv: |
328 | mon_free_mem(monpriv); | 330 | mon_free_mem(monpriv); |
329 | out_use: | 331 | out_use: |
@@ -341,10 +343,13 @@ static int mon_close(struct inode *inode, struct file *filp) | |||
341 | /* | 343 | /* |
342 | * Close IUCV connection and unregister | 344 | * Close IUCV connection and unregister |
343 | */ | 345 | */ |
344 | rc = iucv_path_sever(monpriv->path, user_data_sever); | 346 | if (monpriv->path) { |
345 | if (rc) | 347 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
346 | pr_warning("Disconnecting the z/VM *MONITOR system service " | 348 | if (rc) |
347 | "failed with rc=%i\n", rc); | 349 | pr_warning("Disconnecting the z/VM *MONITOR system " |
350 | "service failed with rc=%i\n", rc); | ||
351 | iucv_path_free(monpriv->path); | ||
352 | } | ||
348 | 353 | ||
349 | atomic_set(&monpriv->iucv_severed, 0); | 354 | atomic_set(&monpriv->iucv_severed, 0); |
350 | atomic_set(&monpriv->iucv_connected, 0); | 355 | atomic_set(&monpriv->iucv_connected, 0); |
@@ -452,6 +457,94 @@ static struct miscdevice mon_dev = { | |||
452 | .minor = MISC_DYNAMIC_MINOR, | 457 | .minor = MISC_DYNAMIC_MINOR, |
453 | }; | 458 | }; |
454 | 459 | ||
460 | |||
461 | /****************************************************************************** | ||
462 | * suspend / resume * | ||
463 | *****************************************************************************/ | ||
464 | static int monreader_freeze(struct device *dev) | ||
465 | { | ||
466 | struct mon_private *monpriv = dev->driver_data; | ||
467 | int rc; | ||
468 | |||
469 | if (!monpriv) | ||
470 | return 0; | ||
471 | if (monpriv->path) { | ||
472 | rc = iucv_path_sever(monpriv->path, user_data_sever); | ||
473 | if (rc) | ||
474 | pr_warning("Disconnecting the z/VM *MONITOR system " | ||
475 | "service failed with rc=%i\n", rc); | ||
476 | iucv_path_free(monpriv->path); | ||
477 | } | ||
478 | atomic_set(&monpriv->iucv_severed, 0); | ||
479 | atomic_set(&monpriv->iucv_connected, 0); | ||
480 | atomic_set(&monpriv->read_ready, 0); | ||
481 | atomic_set(&monpriv->msglim_count, 0); | ||
482 | monpriv->write_index = 0; | ||
483 | monpriv->read_index = 0; | ||
484 | monpriv->path = NULL; | ||
485 | return 0; | ||
486 | } | ||
487 | |||
488 | static int monreader_thaw(struct device *dev) | ||
489 | { | ||
490 | struct mon_private *monpriv = dev->driver_data; | ||
491 | int rc; | ||
492 | |||
493 | if (!monpriv) | ||
494 | return 0; | ||
495 | rc = -ENOMEM; | ||
496 | monpriv->path = iucv_path_alloc(MON_MSGLIM, IUCV_IPRMDATA, GFP_KERNEL); | ||
497 | if (!monpriv->path) | ||
498 | goto out; | ||
499 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, | ||
500 | MON_SERVICE, NULL, user_data_connect, monpriv); | ||
501 | if (rc) { | ||
502 | pr_err("Connecting to the z/VM *MONITOR system service " | ||
503 | "failed with rc=%i\n", rc); | ||
504 | goto out_path; | ||
505 | } | ||
506 | wait_event(mon_conn_wait_queue, | ||
507 | atomic_read(&monpriv->iucv_connected) || | ||
508 | atomic_read(&monpriv->iucv_severed)); | ||
509 | if (atomic_read(&monpriv->iucv_severed)) | ||
510 | goto out_path; | ||
511 | return 0; | ||
512 | out_path: | ||
513 | rc = -EIO; | ||
514 | iucv_path_free(monpriv->path); | ||
515 | monpriv->path = NULL; | ||
516 | out: | ||
517 | atomic_set(&monpriv->iucv_severed, 1); | ||
518 | return rc; | ||
519 | } | ||
520 | |||
521 | static int monreader_restore(struct device *dev) | ||
522 | { | ||
523 | int rc; | ||
524 | |||
525 | segment_unload(mon_dcss_name); | ||
526 | rc = segment_load(mon_dcss_name, SEGMENT_SHARED, | ||
527 | &mon_dcss_start, &mon_dcss_end); | ||
528 | if (rc < 0) { | ||
529 | segment_warning(rc, mon_dcss_name); | ||
530 | panic("fatal monreader resume error: no monitor dcss\n"); | ||
531 | } | ||
532 | return monreader_thaw(dev); | ||
533 | } | ||
534 | |||
535 | static struct dev_pm_ops monreader_pm_ops = { | ||
536 | .freeze = monreader_freeze, | ||
537 | .thaw = monreader_thaw, | ||
538 | .restore = monreader_restore, | ||
539 | }; | ||
540 | |||
541 | static struct device_driver monreader_driver = { | ||
542 | .name = "monreader", | ||
543 | .bus = &iucv_bus, | ||
544 | .pm = &monreader_pm_ops, | ||
545 | }; | ||
546 | |||
547 | |||
455 | /****************************************************************************** | 548 | /****************************************************************************** |
456 | * module init/exit * | 549 | * module init/exit * |
457 | *****************************************************************************/ | 550 | *****************************************************************************/ |
@@ -475,16 +568,33 @@ static int __init mon_init(void) | |||
475 | return rc; | 568 | return rc; |
476 | } | 569 | } |
477 | 570 | ||
571 | rc = driver_register(&monreader_driver); | ||
572 | if (rc) | ||
573 | goto out_iucv; | ||
574 | monreader_device = kzalloc(sizeof(struct device), GFP_KERNEL); | ||
575 | if (!monreader_device) | ||
576 | goto out_driver; | ||
577 | dev_set_name(monreader_device, "monreader-dev"); | ||
578 | monreader_device->bus = &iucv_bus; | ||
579 | monreader_device->parent = iucv_root; | ||
580 | monreader_device->driver = &monreader_driver; | ||
581 | monreader_device->release = (void (*)(struct device *))kfree; | ||
582 | rc = device_register(monreader_device); | ||
583 | if (rc) { | ||
584 | kfree(monreader_device); | ||
585 | goto out_driver; | ||
586 | } | ||
587 | |||
478 | rc = segment_type(mon_dcss_name); | 588 | rc = segment_type(mon_dcss_name); |
479 | if (rc < 0) { | 589 | if (rc < 0) { |
480 | segment_warning(rc, mon_dcss_name); | 590 | segment_warning(rc, mon_dcss_name); |
481 | goto out_iucv; | 591 | goto out_device; |
482 | } | 592 | } |
483 | if (rc != SEG_TYPE_SC) { | 593 | if (rc != SEG_TYPE_SC) { |
484 | pr_err("The specified *MONITOR DCSS %s does not have the " | 594 | pr_err("The specified *MONITOR DCSS %s does not have the " |
485 | "required type SC\n", mon_dcss_name); | 595 | "required type SC\n", mon_dcss_name); |
486 | rc = -EINVAL; | 596 | rc = -EINVAL; |
487 | goto out_iucv; | 597 | goto out_device; |
488 | } | 598 | } |
489 | 599 | ||
490 | rc = segment_load(mon_dcss_name, SEGMENT_SHARED, | 600 | rc = segment_load(mon_dcss_name, SEGMENT_SHARED, |
@@ -492,7 +602,7 @@ static int __init mon_init(void) | |||
492 | if (rc < 0) { | 602 | if (rc < 0) { |
493 | segment_warning(rc, mon_dcss_name); | 603 | segment_warning(rc, mon_dcss_name); |
494 | rc = -EINVAL; | 604 | rc = -EINVAL; |
495 | goto out_iucv; | 605 | goto out_device; |
496 | } | 606 | } |
497 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); | 607 | dcss_mkname(mon_dcss_name, &user_data_connect[8]); |
498 | 608 | ||
@@ -503,6 +613,10 @@ static int __init mon_init(void) | |||
503 | 613 | ||
504 | out: | 614 | out: |
505 | segment_unload(mon_dcss_name); | 615 | segment_unload(mon_dcss_name); |
616 | out_device: | ||
617 | device_unregister(monreader_device); | ||
618 | out_driver: | ||
619 | driver_unregister(&monreader_driver); | ||
506 | out_iucv: | 620 | out_iucv: |
507 | iucv_unregister(&monreader_iucv_handler, 1); | 621 | iucv_unregister(&monreader_iucv_handler, 1); |
508 | return rc; | 622 | return rc; |
@@ -512,6 +626,8 @@ static void __exit mon_exit(void) | |||
512 | { | 626 | { |
513 | segment_unload(mon_dcss_name); | 627 | segment_unload(mon_dcss_name); |
514 | WARN_ON(misc_deregister(&mon_dev) != 0); | 628 | WARN_ON(misc_deregister(&mon_dev) != 0); |
629 | device_unregister(monreader_device); | ||
630 | driver_unregister(&monreader_driver); | ||
515 | iucv_unregister(&monreader_iucv_handler, 1); | 631 | iucv_unregister(&monreader_iucv_handler, 1); |
516 | return; | 632 | return; |
517 | } | 633 | } |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index c7d7483bab9a..66fb8eba93f4 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -1,9 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/monwriter.c | ||
3 | * | ||
4 | * Character device driver for writing z/VM *MONITOR service records. | 2 | * Character device driver for writing z/VM *MONITOR service records. |
5 | * | 3 | * |
6 | * Copyright (C) IBM Corp. 2006 | 4 | * Copyright IBM Corp. 2006, 2009 |
7 | * | 5 | * |
8 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> | 6 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> |
9 | */ | 7 | */ |
@@ -22,6 +20,7 @@ | |||
22 | #include <linux/ctype.h> | 20 | #include <linux/ctype.h> |
23 | #include <linux/poll.h> | 21 | #include <linux/poll.h> |
24 | #include <linux/mutex.h> | 22 | #include <linux/mutex.h> |
23 | #include <linux/platform_device.h> | ||
25 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
26 | #include <asm/ebcdic.h> | 25 | #include <asm/ebcdic.h> |
27 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -40,7 +39,10 @@ struct mon_buf { | |||
40 | char *data; | 39 | char *data; |
41 | }; | 40 | }; |
42 | 41 | ||
42 | static LIST_HEAD(mon_priv_list); | ||
43 | |||
43 | struct mon_private { | 44 | struct mon_private { |
45 | struct list_head priv_list; | ||
44 | struct list_head list; | 46 | struct list_head list; |
45 | struct monwrite_hdr hdr; | 47 | struct monwrite_hdr hdr; |
46 | size_t hdr_to_read; | 48 | size_t hdr_to_read; |
@@ -188,6 +190,7 @@ static int monwrite_open(struct inode *inode, struct file *filp) | |||
188 | monpriv->hdr_to_read = sizeof(monpriv->hdr); | 190 | monpriv->hdr_to_read = sizeof(monpriv->hdr); |
189 | mutex_init(&monpriv->thread_mutex); | 191 | mutex_init(&monpriv->thread_mutex); |
190 | filp->private_data = monpriv; | 192 | filp->private_data = monpriv; |
193 | list_add_tail(&monpriv->priv_list, &mon_priv_list); | ||
191 | unlock_kernel(); | 194 | unlock_kernel(); |
192 | return nonseekable_open(inode, filp); | 195 | return nonseekable_open(inode, filp); |
193 | } | 196 | } |
@@ -206,6 +209,7 @@ static int monwrite_close(struct inode *inode, struct file *filp) | |||
206 | kfree(entry->data); | 209 | kfree(entry->data); |
207 | kfree(entry); | 210 | kfree(entry); |
208 | } | 211 | } |
212 | list_del(&monpriv->priv_list); | ||
209 | kfree(monpriv); | 213 | kfree(monpriv); |
210 | return 0; | 214 | return 0; |
211 | } | 215 | } |
@@ -281,20 +285,102 @@ static struct miscdevice mon_dev = { | |||
281 | }; | 285 | }; |
282 | 286 | ||
283 | /* | 287 | /* |
288 | * suspend/resume | ||
289 | */ | ||
290 | |||
291 | static int monwriter_freeze(struct device *dev) | ||
292 | { | ||
293 | struct mon_private *monpriv; | ||
294 | struct mon_buf *monbuf; | ||
295 | |||
296 | list_for_each_entry(monpriv, &mon_priv_list, priv_list) { | ||
297 | list_for_each_entry(monbuf, &monpriv->list, list) { | ||
298 | if (monbuf->hdr.mon_function != MONWRITE_GEN_EVENT) | ||
299 | monwrite_diag(&monbuf->hdr, monbuf->data, | ||
300 | APPLDATA_STOP_REC); | ||
301 | } | ||
302 | } | ||
303 | return 0; | ||
304 | } | ||
305 | |||
306 | static int monwriter_restore(struct device *dev) | ||
307 | { | ||
308 | struct mon_private *monpriv; | ||
309 | struct mon_buf *monbuf; | ||
310 | |||
311 | list_for_each_entry(monpriv, &mon_priv_list, priv_list) { | ||
312 | list_for_each_entry(monbuf, &monpriv->list, list) { | ||
313 | if (monbuf->hdr.mon_function == MONWRITE_START_INTERVAL) | ||
314 | monwrite_diag(&monbuf->hdr, monbuf->data, | ||
315 | APPLDATA_START_INTERVAL_REC); | ||
316 | if (monbuf->hdr.mon_function == MONWRITE_START_CONFIG) | ||
317 | monwrite_diag(&monbuf->hdr, monbuf->data, | ||
318 | APPLDATA_START_CONFIG_REC); | ||
319 | } | ||
320 | } | ||
321 | return 0; | ||
322 | } | ||
323 | |||
324 | static int monwriter_thaw(struct device *dev) | ||
325 | { | ||
326 | return monwriter_restore(dev); | ||
327 | } | ||
328 | |||
329 | static struct dev_pm_ops monwriter_pm_ops = { | ||
330 | .freeze = monwriter_freeze, | ||
331 | .thaw = monwriter_thaw, | ||
332 | .restore = monwriter_restore, | ||
333 | }; | ||
334 | |||
335 | static struct platform_driver monwriter_pdrv = { | ||
336 | .driver = { | ||
337 | .name = "monwriter", | ||
338 | .owner = THIS_MODULE, | ||
339 | .pm = &monwriter_pm_ops, | ||
340 | }, | ||
341 | }; | ||
342 | |||
343 | static struct platform_device *monwriter_pdev; | ||
344 | |||
345 | /* | ||
284 | * module init/exit | 346 | * module init/exit |
285 | */ | 347 | */ |
286 | 348 | ||
287 | static int __init mon_init(void) | 349 | static int __init mon_init(void) |
288 | { | 350 | { |
289 | if (MACHINE_IS_VM) | 351 | int rc; |
290 | return misc_register(&mon_dev); | 352 | |
291 | else | 353 | if (!MACHINE_IS_VM) |
292 | return -ENODEV; | 354 | return -ENODEV; |
355 | |||
356 | rc = platform_driver_register(&monwriter_pdrv); | ||
357 | if (rc) | ||
358 | return rc; | ||
359 | |||
360 | monwriter_pdev = platform_device_register_simple("monwriter", -1, NULL, | ||
361 | 0); | ||
362 | if (IS_ERR(monwriter_pdev)) { | ||
363 | rc = PTR_ERR(monwriter_pdev); | ||
364 | goto out_driver; | ||
365 | } | ||
366 | |||
367 | rc = misc_register(&mon_dev); | ||
368 | if (rc) | ||
369 | goto out_device; | ||
370 | return 0; | ||
371 | |||
372 | out_device: | ||
373 | platform_device_unregister(monwriter_pdev); | ||
374 | out_driver: | ||
375 | platform_driver_unregister(&monwriter_pdrv); | ||
376 | return rc; | ||
293 | } | 377 | } |
294 | 378 | ||
295 | static void __exit mon_exit(void) | 379 | static void __exit mon_exit(void) |
296 | { | 380 | { |
297 | WARN_ON(misc_deregister(&mon_dev) != 0); | 381 | WARN_ON(misc_deregister(&mon_dev) != 0); |
382 | platform_device_unregister(monwriter_pdev); | ||
383 | platform_driver_unregister(&monwriter_pdrv); | ||
298 | } | 384 | } |
299 | 385 | ||
300 | module_init(mon_init); | 386 | module_init(mon_init); |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 0b15cf107ec9..81c151b5f0ac 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/raw3270.c | 2 | * IBM/3270 Driver - core functions. |
3 | * IBM/3270 Driver - core functions. | ||
4 | * | 3 | * |
5 | * Author(s): | 4 | * Author(s): |
6 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) | 5 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) |
7 | * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2003, 2009 |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <linux/bootmem.h> | 10 | #include <linux/bootmem.h> |
@@ -61,6 +60,7 @@ struct raw3270 { | |||
61 | #define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ | 60 | #define RAW3270_FLAGS_ATTN 2 /* Device sent an ATTN interrupt */ |
62 | #define RAW3270_FLAGS_READY 4 /* Device is useable by views */ | 61 | #define RAW3270_FLAGS_READY 4 /* Device is useable by views */ |
63 | #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ | 62 | #define RAW3270_FLAGS_CONSOLE 8 /* Device is the console. */ |
63 | #define RAW3270_FLAGS_FROZEN 16 /* set if 3270 is frozen for suspend */ | ||
64 | 64 | ||
65 | /* Semaphore to protect global data of raw3270 (devices, views, etc). */ | 65 | /* Semaphore to protect global data of raw3270 (devices, views, etc). */ |
66 | static DEFINE_MUTEX(raw3270_mutex); | 66 | static DEFINE_MUTEX(raw3270_mutex); |
@@ -306,7 +306,8 @@ raw3270_start(struct raw3270_view *view, struct raw3270_request *rq) | |||
306 | 306 | ||
307 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); | 307 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); |
308 | rp = view->dev; | 308 | rp = view->dev; |
309 | if (!rp || rp->view != view) | 309 | if (!rp || rp->view != view || |
310 | test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) | ||
310 | rc = -EACCES; | 311 | rc = -EACCES; |
311 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) | 312 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) |
312 | rc = -ENODEV; | 313 | rc = -ENODEV; |
@@ -323,7 +324,8 @@ raw3270_start_locked(struct raw3270_view *view, struct raw3270_request *rq) | |||
323 | int rc; | 324 | int rc; |
324 | 325 | ||
325 | rp = view->dev; | 326 | rp = view->dev; |
326 | if (!rp || rp->view != view) | 327 | if (!rp || rp->view != view || |
328 | test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) | ||
327 | rc = -EACCES; | 329 | rc = -EACCES; |
328 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) | 330 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) |
329 | rc = -ENODEV; | 331 | rc = -ENODEV; |
@@ -764,7 +766,8 @@ raw3270_reset(struct raw3270_view *view) | |||
764 | int rc; | 766 | int rc; |
765 | 767 | ||
766 | rp = view->dev; | 768 | rp = view->dev; |
767 | if (!rp || rp->view != view) | 769 | if (!rp || rp->view != view || |
770 | test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) | ||
768 | rc = -EACCES; | 771 | rc = -EACCES; |
769 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) | 772 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) |
770 | rc = -ENODEV; | 773 | rc = -ENODEV; |
@@ -922,6 +925,8 @@ raw3270_activate_view(struct raw3270_view *view) | |||
922 | rc = 0; | 925 | rc = 0; |
923 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) | 926 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) |
924 | rc = -ENODEV; | 927 | rc = -ENODEV; |
928 | else if (test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) | ||
929 | rc = -EACCES; | ||
925 | else { | 930 | else { |
926 | oldview = NULL; | 931 | oldview = NULL; |
927 | if (rp->view) { | 932 | if (rp->view) { |
@@ -969,7 +974,8 @@ raw3270_deactivate_view(struct raw3270_view *view) | |||
969 | list_del_init(&view->list); | 974 | list_del_init(&view->list); |
970 | list_add_tail(&view->list, &rp->view_list); | 975 | list_add_tail(&view->list, &rp->view_list); |
971 | /* Try to activate another view. */ | 976 | /* Try to activate another view. */ |
972 | if (test_bit(RAW3270_FLAGS_READY, &rp->flags)) { | 977 | if (test_bit(RAW3270_FLAGS_READY, &rp->flags) && |
978 | !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { | ||
973 | list_for_each_entry(view, &rp->view_list, list) { | 979 | list_for_each_entry(view, &rp->view_list, list) { |
974 | rp->view = view; | 980 | rp->view = view; |
975 | if (view->fn->activate(view) == 0) | 981 | if (view->fn->activate(view) == 0) |
@@ -1068,7 +1074,8 @@ raw3270_del_view(struct raw3270_view *view) | |||
1068 | rp->view = NULL; | 1074 | rp->view = NULL; |
1069 | } | 1075 | } |
1070 | list_del_init(&view->list); | 1076 | list_del_init(&view->list); |
1071 | if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) { | 1077 | if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags) && |
1078 | !test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) { | ||
1072 | /* Try to activate another view. */ | 1079 | /* Try to activate another view. */ |
1073 | list_for_each_entry(nv, &rp->view_list, list) { | 1080 | list_for_each_entry(nv, &rp->view_list, list) { |
1074 | if (nv->fn->activate(nv) == 0) { | 1081 | if (nv->fn->activate(nv) == 0) { |
@@ -1337,6 +1344,58 @@ raw3270_set_offline (struct ccw_device *cdev) | |||
1337 | return 0; | 1344 | return 0; |
1338 | } | 1345 | } |
1339 | 1346 | ||
1347 | static int raw3270_pm_stop(struct ccw_device *cdev) | ||
1348 | { | ||
1349 | struct raw3270 *rp; | ||
1350 | struct raw3270_view *view; | ||
1351 | unsigned long flags; | ||
1352 | |||
1353 | rp = cdev->dev.driver_data; | ||
1354 | if (!rp) | ||
1355 | return 0; | ||
1356 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); | ||
1357 | if (rp->view) | ||
1358 | rp->view->fn->deactivate(rp->view); | ||
1359 | if (!test_bit(RAW3270_FLAGS_CONSOLE, &rp->flags)) { | ||
1360 | /* | ||
1361 | * Release tty and fullscreen for all non-console | ||
1362 | * devices. | ||
1363 | */ | ||
1364 | list_for_each_entry(view, &rp->view_list, list) { | ||
1365 | if (view->fn->release) | ||
1366 | view->fn->release(view); | ||
1367 | } | ||
1368 | } | ||
1369 | set_bit(RAW3270_FLAGS_FROZEN, &rp->flags); | ||
1370 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | ||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | static int raw3270_pm_start(struct ccw_device *cdev) | ||
1375 | { | ||
1376 | struct raw3270 *rp; | ||
1377 | unsigned long flags; | ||
1378 | |||
1379 | rp = cdev->dev.driver_data; | ||
1380 | if (!rp) | ||
1381 | return 0; | ||
1382 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); | ||
1383 | clear_bit(RAW3270_FLAGS_FROZEN, &rp->flags); | ||
1384 | if (rp->view) | ||
1385 | rp->view->fn->activate(rp->view); | ||
1386 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | ||
1387 | return 0; | ||
1388 | } | ||
1389 | |||
1390 | void raw3270_pm_unfreeze(struct raw3270_view *view) | ||
1391 | { | ||
1392 | struct raw3270 *rp; | ||
1393 | |||
1394 | rp = view->dev; | ||
1395 | if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) | ||
1396 | ccw_device_force_console(); | ||
1397 | } | ||
1398 | |||
1340 | static struct ccw_device_id raw3270_id[] = { | 1399 | static struct ccw_device_id raw3270_id[] = { |
1341 | { CCW_DEVICE(0x3270, 0) }, | 1400 | { CCW_DEVICE(0x3270, 0) }, |
1342 | { CCW_DEVICE(0x3271, 0) }, | 1401 | { CCW_DEVICE(0x3271, 0) }, |
@@ -1360,6 +1419,9 @@ static struct ccw_driver raw3270_ccw_driver = { | |||
1360 | .remove = &raw3270_remove, | 1419 | .remove = &raw3270_remove, |
1361 | .set_online = &raw3270_set_online, | 1420 | .set_online = &raw3270_set_online, |
1362 | .set_offline = &raw3270_set_offline, | 1421 | .set_offline = &raw3270_set_offline, |
1422 | .freeze = &raw3270_pm_stop, | ||
1423 | .thaw = &raw3270_pm_start, | ||
1424 | .restore = &raw3270_pm_start, | ||
1363 | }; | 1425 | }; |
1364 | 1426 | ||
1365 | static int | 1427 | static int |
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index 90beaa80a782..ed34eb2199cc 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/raw3270.h | 2 | * IBM/3270 Driver |
3 | * IBM/3270 Driver | ||
4 | * | 3 | * |
5 | * Author(s): | 4 | * Author(s): |
6 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) | 5 | * Original 3270 Code for 2.4 written by Richard Hitt (UTS Global) |
7 | * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Rewritten for 2.5 by Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | * -- Copyright (C) 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 7 | * Copyright IBM Corp. 2003, 2009 |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <asm/idals.h> | 10 | #include <asm/idals.h> |
@@ -195,6 +194,7 @@ void raw3270_wait_cons_dev(struct raw3270 *); | |||
195 | /* Notifier for device addition/removal */ | 194 | /* Notifier for device addition/removal */ |
196 | int raw3270_register_notifier(void (*notifier)(int, int)); | 195 | int raw3270_register_notifier(void (*notifier)(int, int)); |
197 | void raw3270_unregister_notifier(void (*notifier)(int, int)); | 196 | void raw3270_unregister_notifier(void (*notifier)(int, int)); |
197 | void raw3270_pm_unfreeze(struct raw3270_view *); | ||
198 | 198 | ||
199 | /* | 199 | /* |
200 | * Little memory allocator for string objects. | 200 | * Little memory allocator for string objects. |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 4377e93a43d7..a983f5086788 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp.c | 2 | * core function to access sclp interface |
3 | * core function to access sclp interface | ||
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999, 2009 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | 6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <linux/module.h> | 10 | #include <linux/module.h> |
@@ -16,6 +15,9 @@ | |||
16 | #include <linux/reboot.h> | 15 | #include <linux/reboot.h> |
17 | #include <linux/jiffies.h> | 16 | #include <linux/jiffies.h> |
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/suspend.h> | ||
19 | #include <linux/completion.h> | ||
20 | #include <linux/platform_device.h> | ||
19 | #include <asm/types.h> | 21 | #include <asm/types.h> |
20 | #include <asm/s390_ext.h> | 22 | #include <asm/s390_ext.h> |
21 | 23 | ||
@@ -47,6 +49,16 @@ static struct sclp_req sclp_init_req; | |||
47 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 49 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); |
48 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 50 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); |
49 | 51 | ||
52 | /* Suspend request */ | ||
53 | static DECLARE_COMPLETION(sclp_request_queue_flushed); | ||
54 | |||
55 | static void sclp_suspend_req_cb(struct sclp_req *req, void *data) | ||
56 | { | ||
57 | complete(&sclp_request_queue_flushed); | ||
58 | } | ||
59 | |||
60 | static struct sclp_req sclp_suspend_req; | ||
61 | |||
50 | /* Timer for request retries. */ | 62 | /* Timer for request retries. */ |
51 | static struct timer_list sclp_request_timer; | 63 | static struct timer_list sclp_request_timer; |
52 | 64 | ||
@@ -84,6 +96,12 @@ static volatile enum sclp_mask_state_t { | |||
84 | sclp_mask_state_initializing | 96 | sclp_mask_state_initializing |
85 | } sclp_mask_state = sclp_mask_state_idle; | 97 | } sclp_mask_state = sclp_mask_state_idle; |
86 | 98 | ||
99 | /* Internal state: is the driver suspended? */ | ||
100 | static enum sclp_suspend_state_t { | ||
101 | sclp_suspend_state_running, | ||
102 | sclp_suspend_state_suspended, | ||
103 | } sclp_suspend_state = sclp_suspend_state_running; | ||
104 | |||
87 | /* Maximum retry counts */ | 105 | /* Maximum retry counts */ |
88 | #define SCLP_INIT_RETRY 3 | 106 | #define SCLP_INIT_RETRY 3 |
89 | #define SCLP_MASK_RETRY 3 | 107 | #define SCLP_MASK_RETRY 3 |
@@ -211,6 +229,8 @@ sclp_process_queue(void) | |||
211 | del_timer(&sclp_request_timer); | 229 | del_timer(&sclp_request_timer); |
212 | while (!list_empty(&sclp_req_queue)) { | 230 | while (!list_empty(&sclp_req_queue)) { |
213 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); | 231 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); |
232 | if (!req->sccb) | ||
233 | goto do_post; | ||
214 | rc = __sclp_start_request(req); | 234 | rc = __sclp_start_request(req); |
215 | if (rc == 0) | 235 | if (rc == 0) |
216 | break; | 236 | break; |
@@ -222,6 +242,7 @@ sclp_process_queue(void) | |||
222 | sclp_request_timeout, 0); | 242 | sclp_request_timeout, 0); |
223 | break; | 243 | break; |
224 | } | 244 | } |
245 | do_post: | ||
225 | /* Post-processing for aborted request */ | 246 | /* Post-processing for aborted request */ |
226 | list_del(&req->list); | 247 | list_del(&req->list); |
227 | if (req->callback) { | 248 | if (req->callback) { |
@@ -233,6 +254,19 @@ sclp_process_queue(void) | |||
233 | spin_unlock_irqrestore(&sclp_lock, flags); | 254 | spin_unlock_irqrestore(&sclp_lock, flags); |
234 | } | 255 | } |
235 | 256 | ||
257 | static int __sclp_can_add_request(struct sclp_req *req) | ||
258 | { | ||
259 | if (req == &sclp_suspend_req || req == &sclp_init_req) | ||
260 | return 1; | ||
261 | if (sclp_suspend_state != sclp_suspend_state_running) | ||
262 | return 0; | ||
263 | if (sclp_init_state != sclp_init_state_initialized) | ||
264 | return 0; | ||
265 | if (sclp_activation_state != sclp_activation_state_active) | ||
266 | return 0; | ||
267 | return 1; | ||
268 | } | ||
269 | |||
236 | /* Queue a new request. Return zero on success, non-zero otherwise. */ | 270 | /* Queue a new request. Return zero on success, non-zero otherwise. */ |
237 | int | 271 | int |
238 | sclp_add_request(struct sclp_req *req) | 272 | sclp_add_request(struct sclp_req *req) |
@@ -241,9 +275,7 @@ sclp_add_request(struct sclp_req *req) | |||
241 | int rc; | 275 | int rc; |
242 | 276 | ||
243 | spin_lock_irqsave(&sclp_lock, flags); | 277 | spin_lock_irqsave(&sclp_lock, flags); |
244 | if ((sclp_init_state != sclp_init_state_initialized || | 278 | if (!__sclp_can_add_request(req)) { |
245 | sclp_activation_state != sclp_activation_state_active) && | ||
246 | req != &sclp_init_req) { | ||
247 | spin_unlock_irqrestore(&sclp_lock, flags); | 279 | spin_unlock_irqrestore(&sclp_lock, flags); |
248 | return -EIO; | 280 | return -EIO; |
249 | } | 281 | } |
@@ -254,10 +286,16 @@ sclp_add_request(struct sclp_req *req) | |||
254 | /* Start if request is first in list */ | 286 | /* Start if request is first in list */ |
255 | if (sclp_running_state == sclp_running_state_idle && | 287 | if (sclp_running_state == sclp_running_state_idle && |
256 | req->list.prev == &sclp_req_queue) { | 288 | req->list.prev == &sclp_req_queue) { |
289 | if (!req->sccb) { | ||
290 | list_del(&req->list); | ||
291 | rc = -ENODATA; | ||
292 | goto out; | ||
293 | } | ||
257 | rc = __sclp_start_request(req); | 294 | rc = __sclp_start_request(req); |
258 | if (rc) | 295 | if (rc) |
259 | list_del(&req->list); | 296 | list_del(&req->list); |
260 | } | 297 | } |
298 | out: | ||
261 | spin_unlock_irqrestore(&sclp_lock, flags); | 299 | spin_unlock_irqrestore(&sclp_lock, flags); |
262 | return rc; | 300 | return rc; |
263 | } | 301 | } |
@@ -560,6 +598,7 @@ sclp_register(struct sclp_register *reg) | |||
560 | /* Trigger initial state change callback */ | 598 | /* Trigger initial state change callback */ |
561 | reg->sclp_receive_mask = 0; | 599 | reg->sclp_receive_mask = 0; |
562 | reg->sclp_send_mask = 0; | 600 | reg->sclp_send_mask = 0; |
601 | reg->pm_event_posted = 0; | ||
563 | list_add(®->list, &sclp_reg_list); | 602 | list_add(®->list, &sclp_reg_list); |
564 | spin_unlock_irqrestore(&sclp_lock, flags); | 603 | spin_unlock_irqrestore(&sclp_lock, flags); |
565 | rc = sclp_init_mask(1); | 604 | rc = sclp_init_mask(1); |
@@ -880,20 +919,134 @@ static struct notifier_block sclp_reboot_notifier = { | |||
880 | .notifier_call = sclp_reboot_event | 919 | .notifier_call = sclp_reboot_event |
881 | }; | 920 | }; |
882 | 921 | ||
922 | /* | ||
923 | * Suspend/resume SCLP notifier implementation | ||
924 | */ | ||
925 | |||
926 | static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) | ||
927 | { | ||
928 | struct sclp_register *reg; | ||
929 | unsigned long flags; | ||
930 | |||
931 | if (!rollback) { | ||
932 | spin_lock_irqsave(&sclp_lock, flags); | ||
933 | list_for_each_entry(reg, &sclp_reg_list, list) | ||
934 | reg->pm_event_posted = 0; | ||
935 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
936 | } | ||
937 | do { | ||
938 | spin_lock_irqsave(&sclp_lock, flags); | ||
939 | list_for_each_entry(reg, &sclp_reg_list, list) { | ||
940 | if (rollback && reg->pm_event_posted) | ||
941 | goto found; | ||
942 | if (!rollback && !reg->pm_event_posted) | ||
943 | goto found; | ||
944 | } | ||
945 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
946 | return; | ||
947 | found: | ||
948 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
949 | if (reg->pm_event_fn) | ||
950 | reg->pm_event_fn(reg, sclp_pm_event); | ||
951 | reg->pm_event_posted = rollback ? 0 : 1; | ||
952 | } while (1); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Susend/resume callbacks for platform device | ||
957 | */ | ||
958 | |||
959 | static int sclp_freeze(struct device *dev) | ||
960 | { | ||
961 | unsigned long flags; | ||
962 | int rc; | ||
963 | |||
964 | sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); | ||
965 | |||
966 | spin_lock_irqsave(&sclp_lock, flags); | ||
967 | sclp_suspend_state = sclp_suspend_state_suspended; | ||
968 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
969 | |||
970 | /* Init supend data */ | ||
971 | memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); | ||
972 | sclp_suspend_req.callback = sclp_suspend_req_cb; | ||
973 | sclp_suspend_req.status = SCLP_REQ_FILLED; | ||
974 | init_completion(&sclp_request_queue_flushed); | ||
975 | |||
976 | rc = sclp_add_request(&sclp_suspend_req); | ||
977 | if (rc == 0) | ||
978 | wait_for_completion(&sclp_request_queue_flushed); | ||
979 | else if (rc != -ENODATA) | ||
980 | goto fail_thaw; | ||
981 | |||
982 | rc = sclp_deactivate(); | ||
983 | if (rc) | ||
984 | goto fail_thaw; | ||
985 | return 0; | ||
986 | |||
987 | fail_thaw: | ||
988 | spin_lock_irqsave(&sclp_lock, flags); | ||
989 | sclp_suspend_state = sclp_suspend_state_running; | ||
990 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
991 | sclp_pm_event(SCLP_PM_EVENT_THAW, 1); | ||
992 | return rc; | ||
993 | } | ||
994 | |||
995 | static int sclp_undo_suspend(enum sclp_pm_event event) | ||
996 | { | ||
997 | unsigned long flags; | ||
998 | int rc; | ||
999 | |||
1000 | rc = sclp_reactivate(); | ||
1001 | if (rc) | ||
1002 | return rc; | ||
1003 | |||
1004 | spin_lock_irqsave(&sclp_lock, flags); | ||
1005 | sclp_suspend_state = sclp_suspend_state_running; | ||
1006 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
1007 | |||
1008 | sclp_pm_event(event, 0); | ||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
1012 | static int sclp_thaw(struct device *dev) | ||
1013 | { | ||
1014 | return sclp_undo_suspend(SCLP_PM_EVENT_THAW); | ||
1015 | } | ||
1016 | |||
1017 | static int sclp_restore(struct device *dev) | ||
1018 | { | ||
1019 | return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); | ||
1020 | } | ||
1021 | |||
1022 | static struct dev_pm_ops sclp_pm_ops = { | ||
1023 | .freeze = sclp_freeze, | ||
1024 | .thaw = sclp_thaw, | ||
1025 | .restore = sclp_restore, | ||
1026 | }; | ||
1027 | |||
1028 | static struct platform_driver sclp_pdrv = { | ||
1029 | .driver = { | ||
1030 | .name = "sclp", | ||
1031 | .owner = THIS_MODULE, | ||
1032 | .pm = &sclp_pm_ops, | ||
1033 | }, | ||
1034 | }; | ||
1035 | |||
1036 | static struct platform_device *sclp_pdev; | ||
1037 | |||
883 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero | 1038 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero |
884 | * otherwise. */ | 1039 | * otherwise. */ |
885 | static int | 1040 | static int |
886 | sclp_init(void) | 1041 | sclp_init(void) |
887 | { | 1042 | { |
888 | unsigned long flags; | 1043 | unsigned long flags; |
889 | int rc; | 1044 | int rc = 0; |
890 | 1045 | ||
891 | spin_lock_irqsave(&sclp_lock, flags); | 1046 | spin_lock_irqsave(&sclp_lock, flags); |
892 | /* Check for previous or running initialization */ | 1047 | /* Check for previous or running initialization */ |
893 | if (sclp_init_state != sclp_init_state_uninitialized) { | 1048 | if (sclp_init_state != sclp_init_state_uninitialized) |
894 | spin_unlock_irqrestore(&sclp_lock, flags); | 1049 | goto fail_unlock; |
895 | return 0; | ||
896 | } | ||
897 | sclp_init_state = sclp_init_state_initializing; | 1050 | sclp_init_state = sclp_init_state_initializing; |
898 | /* Set up variables */ | 1051 | /* Set up variables */ |
899 | INIT_LIST_HEAD(&sclp_req_queue); | 1052 | INIT_LIST_HEAD(&sclp_req_queue); |
@@ -904,27 +1057,17 @@ sclp_init(void) | |||
904 | spin_unlock_irqrestore(&sclp_lock, flags); | 1057 | spin_unlock_irqrestore(&sclp_lock, flags); |
905 | rc = sclp_check_interface(); | 1058 | rc = sclp_check_interface(); |
906 | spin_lock_irqsave(&sclp_lock, flags); | 1059 | spin_lock_irqsave(&sclp_lock, flags); |
907 | if (rc) { | 1060 | if (rc) |
908 | sclp_init_state = sclp_init_state_uninitialized; | 1061 | goto fail_init_state_uninitialized; |
909 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
910 | return rc; | ||
911 | } | ||
912 | /* Register reboot handler */ | 1062 | /* Register reboot handler */ |
913 | rc = register_reboot_notifier(&sclp_reboot_notifier); | 1063 | rc = register_reboot_notifier(&sclp_reboot_notifier); |
914 | if (rc) { | 1064 | if (rc) |
915 | sclp_init_state = sclp_init_state_uninitialized; | 1065 | goto fail_init_state_uninitialized; |
916 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
917 | return rc; | ||
918 | } | ||
919 | /* Register interrupt handler */ | 1066 | /* Register interrupt handler */ |
920 | rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, | 1067 | rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, |
921 | &ext_int_info_hwc); | 1068 | &ext_int_info_hwc); |
922 | if (rc) { | 1069 | if (rc) |
923 | unregister_reboot_notifier(&sclp_reboot_notifier); | 1070 | goto fail_unregister_reboot_notifier; |
924 | sclp_init_state = sclp_init_state_uninitialized; | ||
925 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
926 | return rc; | ||
927 | } | ||
928 | sclp_init_state = sclp_init_state_initialized; | 1071 | sclp_init_state = sclp_init_state_initialized; |
929 | spin_unlock_irqrestore(&sclp_lock, flags); | 1072 | spin_unlock_irqrestore(&sclp_lock, flags); |
930 | /* Enable service-signal external interruption - needs to happen with | 1073 | /* Enable service-signal external interruption - needs to happen with |
@@ -932,11 +1075,56 @@ sclp_init(void) | |||
932 | ctl_set_bit(0, 9); | 1075 | ctl_set_bit(0, 9); |
933 | sclp_init_mask(1); | 1076 | sclp_init_mask(1); |
934 | return 0; | 1077 | return 0; |
1078 | |||
1079 | fail_unregister_reboot_notifier: | ||
1080 | unregister_reboot_notifier(&sclp_reboot_notifier); | ||
1081 | fail_init_state_uninitialized: | ||
1082 | sclp_init_state = sclp_init_state_uninitialized; | ||
1083 | fail_unlock: | ||
1084 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
1085 | return rc; | ||
935 | } | 1086 | } |
936 | 1087 | ||
1088 | /* | ||
1089 | * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able | ||
1090 | * to print the panic message. | ||
1091 | */ | ||
1092 | static int sclp_panic_notify(struct notifier_block *self, | ||
1093 | unsigned long event, void *data) | ||
1094 | { | ||
1095 | if (sclp_suspend_state == sclp_suspend_state_suspended) | ||
1096 | sclp_undo_suspend(SCLP_PM_EVENT_THAW); | ||
1097 | return NOTIFY_OK; | ||
1098 | } | ||
1099 | |||
1100 | static struct notifier_block sclp_on_panic_nb = { | ||
1101 | .notifier_call = sclp_panic_notify, | ||
1102 | .priority = SCLP_PANIC_PRIO, | ||
1103 | }; | ||
1104 | |||
937 | static __init int sclp_initcall(void) | 1105 | static __init int sclp_initcall(void) |
938 | { | 1106 | { |
1107 | int rc; | ||
1108 | |||
1109 | rc = platform_driver_register(&sclp_pdrv); | ||
1110 | if (rc) | ||
1111 | return rc; | ||
1112 | sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); | ||
1113 | rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; | ||
1114 | if (rc) | ||
1115 | goto fail_platform_driver_unregister; | ||
1116 | rc = atomic_notifier_chain_register(&panic_notifier_list, | ||
1117 | &sclp_on_panic_nb); | ||
1118 | if (rc) | ||
1119 | goto fail_platform_device_unregister; | ||
1120 | |||
939 | return sclp_init(); | 1121 | return sclp_init(); |
1122 | |||
1123 | fail_platform_device_unregister: | ||
1124 | platform_device_unregister(sclp_pdev); | ||
1125 | fail_platform_driver_unregister: | ||
1126 | platform_driver_unregister(&sclp_pdrv); | ||
1127 | return rc; | ||
940 | } | 1128 | } |
941 | 1129 | ||
942 | arch_initcall(sclp_initcall); | 1130 | arch_initcall(sclp_initcall); |
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h index bac80e856f97..60e7cb07095b 100644 --- a/drivers/s390/char/sclp.h +++ b/drivers/s390/char/sclp.h | |||
@@ -1,10 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp.h | 2 | * Copyright IBM Corp. 1999, 2009 |
3 | * | 3 | * |
4 | * S390 version | 4 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
5 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | ||
7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
8 | */ | 6 | */ |
9 | 7 | ||
10 | #ifndef __SCLP_H__ | 8 | #ifndef __SCLP_H__ |
@@ -17,7 +15,7 @@ | |||
17 | 15 | ||
18 | /* maximum number of pages concerning our own memory management */ | 16 | /* maximum number of pages concerning our own memory management */ |
19 | #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) | 17 | #define MAX_KMEM_PAGES (sizeof(unsigned long) << 3) |
20 | #define MAX_CONSOLE_PAGES 4 | 18 | #define MAX_CONSOLE_PAGES 6 |
21 | 19 | ||
22 | #define EVTYP_OPCMD 0x01 | 20 | #define EVTYP_OPCMD 0x01 |
23 | #define EVTYP_MSG 0x02 | 21 | #define EVTYP_MSG 0x02 |
@@ -68,6 +66,15 @@ typedef unsigned int sclp_cmdw_t; | |||
68 | 66 | ||
69 | #define GDS_KEY_SELFDEFTEXTMSG 0x31 | 67 | #define GDS_KEY_SELFDEFTEXTMSG 0x31 |
70 | 68 | ||
69 | enum sclp_pm_event { | ||
70 | SCLP_PM_EVENT_FREEZE, | ||
71 | SCLP_PM_EVENT_THAW, | ||
72 | SCLP_PM_EVENT_RESTORE, | ||
73 | }; | ||
74 | |||
75 | #define SCLP_PANIC_PRIO 1 | ||
76 | #define SCLP_PANIC_PRIO_CLIENT 0 | ||
77 | |||
71 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ | 78 | typedef u32 sccb_mask_t; /* ATTENTION: assumes 32bit mask !!! */ |
72 | 79 | ||
73 | struct sccb_header { | 80 | struct sccb_header { |
@@ -134,6 +141,10 @@ struct sclp_register { | |||
134 | void (*state_change_fn)(struct sclp_register *); | 141 | void (*state_change_fn)(struct sclp_register *); |
135 | /* called for events in cp_receive_mask/sclp_receive_mask */ | 142 | /* called for events in cp_receive_mask/sclp_receive_mask */ |
136 | void (*receiver_fn)(struct evbuf_header *); | 143 | void (*receiver_fn)(struct evbuf_header *); |
144 | /* called for power management events */ | ||
145 | void (*pm_event_fn)(struct sclp_register *, enum sclp_pm_event); | ||
146 | /* pm event posted flag */ | ||
147 | int pm_event_posted; | ||
137 | }; | 148 | }; |
138 | 149 | ||
139 | /* externals from sclp.c */ | 150 | /* externals from sclp.c */ |
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 77ab6e34a100..5cc11c636d38 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -1,9 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp_cmd.c | 2 | * Copyright IBM Corp. 2007, 2009 |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, |
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com>, | 5 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | ||
7 | */ | 6 | */ |
8 | 7 | ||
9 | #define KMSG_COMPONENT "sclp_cmd" | 8 | #define KMSG_COMPONENT "sclp_cmd" |
@@ -12,11 +11,13 @@ | |||
12 | #include <linux/completion.h> | 11 | #include <linux/completion.h> |
13 | #include <linux/init.h> | 12 | #include <linux/init.h> |
14 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
14 | #include <linux/err.h> | ||
15 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
16 | #include <linux/string.h> | 16 | #include <linux/string.h> |
17 | #include <linux/mm.h> | 17 | #include <linux/mm.h> |
18 | #include <linux/mmzone.h> | 18 | #include <linux/mmzone.h> |
19 | #include <linux/memory.h> | 19 | #include <linux/memory.h> |
20 | #include <linux/platform_device.h> | ||
20 | #include <asm/chpid.h> | 21 | #include <asm/chpid.h> |
21 | #include <asm/sclp.h> | 22 | #include <asm/sclp.h> |
22 | #include <asm/setup.h> | 23 | #include <asm/setup.h> |
@@ -292,6 +293,7 @@ static DEFINE_MUTEX(sclp_mem_mutex); | |||
292 | static LIST_HEAD(sclp_mem_list); | 293 | static LIST_HEAD(sclp_mem_list); |
293 | static u8 sclp_max_storage_id; | 294 | static u8 sclp_max_storage_id; |
294 | static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; | 295 | static unsigned long sclp_storage_ids[256 / BITS_PER_LONG]; |
296 | static int sclp_mem_state_changed; | ||
295 | 297 | ||
296 | struct memory_increment { | 298 | struct memory_increment { |
297 | struct list_head list; | 299 | struct list_head list; |
@@ -450,6 +452,8 @@ static int sclp_mem_notifier(struct notifier_block *nb, | |||
450 | rc = -EINVAL; | 452 | rc = -EINVAL; |
451 | break; | 453 | break; |
452 | } | 454 | } |
455 | if (!rc) | ||
456 | sclp_mem_state_changed = 1; | ||
453 | mutex_unlock(&sclp_mem_mutex); | 457 | mutex_unlock(&sclp_mem_mutex); |
454 | return rc ? NOTIFY_BAD : NOTIFY_OK; | 458 | return rc ? NOTIFY_BAD : NOTIFY_OK; |
455 | } | 459 | } |
@@ -525,6 +529,14 @@ static void __init insert_increment(u16 rn, int standby, int assigned) | |||
525 | list_add(&new_incr->list, prev); | 529 | list_add(&new_incr->list, prev); |
526 | } | 530 | } |
527 | 531 | ||
532 | static int sclp_mem_freeze(struct device *dev) | ||
533 | { | ||
534 | if (!sclp_mem_state_changed) | ||
535 | return 0; | ||
536 | pr_err("Memory hotplug state changed, suspend refused.\n"); | ||
537 | return -EPERM; | ||
538 | } | ||
539 | |||
528 | struct read_storage_sccb { | 540 | struct read_storage_sccb { |
529 | struct sccb_header header; | 541 | struct sccb_header header; |
530 | u16 max_id; | 542 | u16 max_id; |
@@ -534,8 +546,20 @@ struct read_storage_sccb { | |||
534 | u32 entries[0]; | 546 | u32 entries[0]; |
535 | } __packed; | 547 | } __packed; |
536 | 548 | ||
549 | static struct dev_pm_ops sclp_mem_pm_ops = { | ||
550 | .freeze = sclp_mem_freeze, | ||
551 | }; | ||
552 | |||
553 | static struct platform_driver sclp_mem_pdrv = { | ||
554 | .driver = { | ||
555 | .name = "sclp_mem", | ||
556 | .pm = &sclp_mem_pm_ops, | ||
557 | }, | ||
558 | }; | ||
559 | |||
537 | static int __init sclp_detect_standby_memory(void) | 560 | static int __init sclp_detect_standby_memory(void) |
538 | { | 561 | { |
562 | struct platform_device *sclp_pdev; | ||
539 | struct read_storage_sccb *sccb; | 563 | struct read_storage_sccb *sccb; |
540 | int i, id, assigned, rc; | 564 | int i, id, assigned, rc; |
541 | 565 | ||
@@ -588,7 +612,17 @@ static int __init sclp_detect_standby_memory(void) | |||
588 | rc = register_memory_notifier(&sclp_mem_nb); | 612 | rc = register_memory_notifier(&sclp_mem_nb); |
589 | if (rc) | 613 | if (rc) |
590 | goto out; | 614 | goto out; |
615 | rc = platform_driver_register(&sclp_mem_pdrv); | ||
616 | if (rc) | ||
617 | goto out; | ||
618 | sclp_pdev = platform_device_register_simple("sclp_mem", -1, NULL, 0); | ||
619 | rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; | ||
620 | if (rc) | ||
621 | goto out_driver; | ||
591 | sclp_add_standby_memory(); | 622 | sclp_add_standby_memory(); |
623 | goto out; | ||
624 | out_driver: | ||
625 | platform_driver_unregister(&sclp_mem_pdrv); | ||
592 | out: | 626 | out: |
593 | free_page((unsigned long) sccb); | 627 | free_page((unsigned long) sccb); |
594 | return rc; | 628 | return rc; |
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 9a25c4bd1421..336811a77672 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c | |||
@@ -1,11 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp_con.c | 2 | * SCLP line mode console driver |
3 | * SCLP line mode console driver | ||
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999, 2009 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
9 | */ | 7 | */ |
10 | 8 | ||
11 | #include <linux/kmod.h> | 9 | #include <linux/kmod.h> |
@@ -32,13 +30,14 @@ static spinlock_t sclp_con_lock; | |||
32 | static struct list_head sclp_con_pages; | 30 | static struct list_head sclp_con_pages; |
33 | /* List of full struct sclp_buffer structures ready for output */ | 31 | /* List of full struct sclp_buffer structures ready for output */ |
34 | static struct list_head sclp_con_outqueue; | 32 | static struct list_head sclp_con_outqueue; |
35 | /* Counter how many buffers are emitted (max 1) and how many */ | ||
36 | /* are on the output queue. */ | ||
37 | static int sclp_con_buffer_count; | ||
38 | /* Pointer to current console buffer */ | 33 | /* Pointer to current console buffer */ |
39 | static struct sclp_buffer *sclp_conbuf; | 34 | static struct sclp_buffer *sclp_conbuf; |
40 | /* Timer for delayed output of console messages */ | 35 | /* Timer for delayed output of console messages */ |
41 | static struct timer_list sclp_con_timer; | 36 | static struct timer_list sclp_con_timer; |
37 | /* Suspend mode flag */ | ||
38 | static int sclp_con_suspended; | ||
39 | /* Flag that output queue is currently running */ | ||
40 | static int sclp_con_queue_running; | ||
42 | 41 | ||
43 | /* Output format for console messages */ | 42 | /* Output format for console messages */ |
44 | static unsigned short sclp_con_columns; | 43 | static unsigned short sclp_con_columns; |
@@ -53,42 +52,71 @@ sclp_conbuf_callback(struct sclp_buffer *buffer, int rc) | |||
53 | do { | 52 | do { |
54 | page = sclp_unmake_buffer(buffer); | 53 | page = sclp_unmake_buffer(buffer); |
55 | spin_lock_irqsave(&sclp_con_lock, flags); | 54 | spin_lock_irqsave(&sclp_con_lock, flags); |
55 | |||
56 | /* Remove buffer from outqueue */ | 56 | /* Remove buffer from outqueue */ |
57 | list_del(&buffer->list); | 57 | list_del(&buffer->list); |
58 | sclp_con_buffer_count--; | ||
59 | list_add_tail((struct list_head *) page, &sclp_con_pages); | 58 | list_add_tail((struct list_head *) page, &sclp_con_pages); |
59 | |||
60 | /* Check if there is a pending buffer on the out queue. */ | 60 | /* Check if there is a pending buffer on the out queue. */ |
61 | buffer = NULL; | 61 | buffer = NULL; |
62 | if (!list_empty(&sclp_con_outqueue)) | 62 | if (!list_empty(&sclp_con_outqueue)) |
63 | buffer = list_entry(sclp_con_outqueue.next, | 63 | buffer = list_first_entry(&sclp_con_outqueue, |
64 | struct sclp_buffer, list); | 64 | struct sclp_buffer, list); |
65 | if (!buffer || sclp_con_suspended) { | ||
66 | sclp_con_queue_running = 0; | ||
67 | spin_unlock_irqrestore(&sclp_con_lock, flags); | ||
68 | break; | ||
69 | } | ||
65 | spin_unlock_irqrestore(&sclp_con_lock, flags); | 70 | spin_unlock_irqrestore(&sclp_con_lock, flags); |
66 | } while (buffer && sclp_emit_buffer(buffer, sclp_conbuf_callback)); | 71 | } while (sclp_emit_buffer(buffer, sclp_conbuf_callback)); |
67 | } | 72 | } |
68 | 73 | ||
69 | static void | 74 | /* |
70 | sclp_conbuf_emit(void) | 75 | * Finalize and emit first pending buffer. |
76 | */ | ||
77 | static void sclp_conbuf_emit(void) | ||
71 | { | 78 | { |
72 | struct sclp_buffer* buffer; | 79 | struct sclp_buffer* buffer; |
73 | unsigned long flags; | 80 | unsigned long flags; |
74 | int count; | ||
75 | int rc; | 81 | int rc; |
76 | 82 | ||
77 | spin_lock_irqsave(&sclp_con_lock, flags); | 83 | spin_lock_irqsave(&sclp_con_lock, flags); |
78 | buffer = sclp_conbuf; | 84 | if (sclp_conbuf) |
85 | list_add_tail(&sclp_conbuf->list, &sclp_con_outqueue); | ||
79 | sclp_conbuf = NULL; | 86 | sclp_conbuf = NULL; |
80 | if (buffer == NULL) { | 87 | if (sclp_con_queue_running || sclp_con_suspended) |
81 | spin_unlock_irqrestore(&sclp_con_lock, flags); | 88 | goto out_unlock; |
82 | return; | 89 | if (list_empty(&sclp_con_outqueue)) |
83 | } | 90 | goto out_unlock; |
84 | list_add_tail(&buffer->list, &sclp_con_outqueue); | 91 | buffer = list_first_entry(&sclp_con_outqueue, struct sclp_buffer, |
85 | count = sclp_con_buffer_count++; | 92 | list); |
93 | sclp_con_queue_running = 1; | ||
86 | spin_unlock_irqrestore(&sclp_con_lock, flags); | 94 | spin_unlock_irqrestore(&sclp_con_lock, flags); |
87 | if (count) | 95 | |
88 | return; | ||
89 | rc = sclp_emit_buffer(buffer, sclp_conbuf_callback); | 96 | rc = sclp_emit_buffer(buffer, sclp_conbuf_callback); |
90 | if (rc) | 97 | if (rc) |
91 | sclp_conbuf_callback(buffer, rc); | 98 | sclp_conbuf_callback(buffer, rc); |
99 | return; | ||
100 | out_unlock: | ||
101 | spin_unlock_irqrestore(&sclp_con_lock, flags); | ||
102 | } | ||
103 | |||
104 | /* | ||
105 | * Wait until out queue is empty | ||
106 | */ | ||
107 | static void sclp_console_sync_queue(void) | ||
108 | { | ||
109 | unsigned long flags; | ||
110 | |||
111 | spin_lock_irqsave(&sclp_con_lock, flags); | ||
112 | if (timer_pending(&sclp_con_timer)) | ||
113 | del_timer_sync(&sclp_con_timer); | ||
114 | while (sclp_con_queue_running) { | ||
115 | spin_unlock_irqrestore(&sclp_con_lock, flags); | ||
116 | sclp_sync_wait(); | ||
117 | spin_lock_irqsave(&sclp_con_lock, flags); | ||
118 | } | ||
119 | spin_unlock_irqrestore(&sclp_con_lock, flags); | ||
92 | } | 120 | } |
93 | 121 | ||
94 | /* | 122 | /* |
@@ -123,6 +151,8 @@ sclp_console_write(struct console *console, const char *message, | |||
123 | /* make sure we have a console output buffer */ | 151 | /* make sure we have a console output buffer */ |
124 | if (sclp_conbuf == NULL) { | 152 | if (sclp_conbuf == NULL) { |
125 | while (list_empty(&sclp_con_pages)) { | 153 | while (list_empty(&sclp_con_pages)) { |
154 | if (sclp_con_suspended) | ||
155 | goto out; | ||
126 | spin_unlock_irqrestore(&sclp_con_lock, flags); | 156 | spin_unlock_irqrestore(&sclp_con_lock, flags); |
127 | sclp_sync_wait(); | 157 | sclp_sync_wait(); |
128 | spin_lock_irqsave(&sclp_con_lock, flags); | 158 | spin_lock_irqsave(&sclp_con_lock, flags); |
@@ -157,6 +187,7 @@ sclp_console_write(struct console *console, const char *message, | |||
157 | sclp_con_timer.expires = jiffies + HZ/10; | 187 | sclp_con_timer.expires = jiffies + HZ/10; |
158 | add_timer(&sclp_con_timer); | 188 | add_timer(&sclp_con_timer); |
159 | } | 189 | } |
190 | out: | ||
160 | spin_unlock_irqrestore(&sclp_con_lock, flags); | 191 | spin_unlock_irqrestore(&sclp_con_lock, flags); |
161 | } | 192 | } |
162 | 193 | ||
@@ -168,30 +199,43 @@ sclp_console_device(struct console *c, int *index) | |||
168 | } | 199 | } |
169 | 200 | ||
170 | /* | 201 | /* |
171 | * This routine is called from panic when the kernel | 202 | * Make sure that all buffers will be flushed to the SCLP. |
172 | * is going to give up. We have to make sure that all buffers | ||
173 | * will be flushed to the SCLP. | ||
174 | */ | 203 | */ |
175 | static void | 204 | static void |
176 | sclp_console_flush(void) | 205 | sclp_console_flush(void) |
177 | { | 206 | { |
207 | sclp_conbuf_emit(); | ||
208 | sclp_console_sync_queue(); | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * Resume console: If there are cached messages, emit them. | ||
213 | */ | ||
214 | static void sclp_console_resume(void) | ||
215 | { | ||
178 | unsigned long flags; | 216 | unsigned long flags; |
179 | 217 | ||
218 | spin_lock_irqsave(&sclp_con_lock, flags); | ||
219 | sclp_con_suspended = 0; | ||
220 | spin_unlock_irqrestore(&sclp_con_lock, flags); | ||
180 | sclp_conbuf_emit(); | 221 | sclp_conbuf_emit(); |
222 | } | ||
223 | |||
224 | /* | ||
225 | * Suspend console: Set suspend flag and flush console | ||
226 | */ | ||
227 | static void sclp_console_suspend(void) | ||
228 | { | ||
229 | unsigned long flags; | ||
230 | |||
181 | spin_lock_irqsave(&sclp_con_lock, flags); | 231 | spin_lock_irqsave(&sclp_con_lock, flags); |
182 | if (timer_pending(&sclp_con_timer)) | 232 | sclp_con_suspended = 1; |
183 | del_timer(&sclp_con_timer); | ||
184 | while (sclp_con_buffer_count > 0) { | ||
185 | spin_unlock_irqrestore(&sclp_con_lock, flags); | ||
186 | sclp_sync_wait(); | ||
187 | spin_lock_irqsave(&sclp_con_lock, flags); | ||
188 | } | ||
189 | spin_unlock_irqrestore(&sclp_con_lock, flags); | 233 | spin_unlock_irqrestore(&sclp_con_lock, flags); |
234 | sclp_console_flush(); | ||
190 | } | 235 | } |
191 | 236 | ||
192 | static int | 237 | static int sclp_console_notify(struct notifier_block *self, |
193 | sclp_console_notify(struct notifier_block *self, | 238 | unsigned long event, void *data) |
194 | unsigned long event, void *data) | ||
195 | { | 239 | { |
196 | sclp_console_flush(); | 240 | sclp_console_flush(); |
197 | return NOTIFY_OK; | 241 | return NOTIFY_OK; |
@@ -199,7 +243,7 @@ sclp_console_notify(struct notifier_block *self, | |||
199 | 243 | ||
200 | static struct notifier_block on_panic_nb = { | 244 | static struct notifier_block on_panic_nb = { |
201 | .notifier_call = sclp_console_notify, | 245 | .notifier_call = sclp_console_notify, |
202 | .priority = 1, | 246 | .priority = SCLP_PANIC_PRIO_CLIENT, |
203 | }; | 247 | }; |
204 | 248 | ||
205 | static struct notifier_block on_reboot_nb = { | 249 | static struct notifier_block on_reboot_nb = { |
@@ -221,6 +265,22 @@ static struct console sclp_console = | |||
221 | }; | 265 | }; |
222 | 266 | ||
223 | /* | 267 | /* |
268 | * This function is called for SCLP suspend and resume events. | ||
269 | */ | ||
270 | void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event) | ||
271 | { | ||
272 | switch (sclp_pm_event) { | ||
273 | case SCLP_PM_EVENT_FREEZE: | ||
274 | sclp_console_suspend(); | ||
275 | break; | ||
276 | case SCLP_PM_EVENT_RESTORE: | ||
277 | case SCLP_PM_EVENT_THAW: | ||
278 | sclp_console_resume(); | ||
279 | break; | ||
280 | } | ||
281 | } | ||
282 | |||
283 | /* | ||
224 | * called by console_init() in drivers/char/tty_io.c at boot-time. | 284 | * called by console_init() in drivers/char/tty_io.c at boot-time. |
225 | */ | 285 | */ |
226 | static int __init | 286 | static int __init |
@@ -243,7 +303,6 @@ sclp_console_init(void) | |||
243 | } | 303 | } |
244 | INIT_LIST_HEAD(&sclp_con_outqueue); | 304 | INIT_LIST_HEAD(&sclp_con_outqueue); |
245 | spin_lock_init(&sclp_con_lock); | 305 | spin_lock_init(&sclp_con_lock); |
246 | sclp_con_buffer_count = 0; | ||
247 | sclp_conbuf = NULL; | 306 | sclp_conbuf = NULL; |
248 | init_timer(&sclp_con_timer); | 307 | init_timer(&sclp_con_timer); |
249 | 308 | ||
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c index 710af42603f8..4be63be73445 100644 --- a/drivers/s390/char/sclp_rw.c +++ b/drivers/s390/char/sclp_rw.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp_rw.c | 2 | * driver: reading from and writing to system console on S/390 via SCLP |
3 | * driver: reading from and writing to system console on S/390 via SCLP | ||
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999, 2009 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | 6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <linux/kmod.h> | 10 | #include <linux/kmod.h> |
@@ -26,9 +25,16 @@ | |||
26 | */ | 25 | */ |
27 | #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) | 26 | #define MAX_SCCB_ROOM (PAGE_SIZE - sizeof(struct sclp_buffer)) |
28 | 27 | ||
28 | static void sclp_rw_pm_event(struct sclp_register *reg, | ||
29 | enum sclp_pm_event sclp_pm_event) | ||
30 | { | ||
31 | sclp_console_pm_event(sclp_pm_event); | ||
32 | } | ||
33 | |||
29 | /* Event type structure for write message and write priority message */ | 34 | /* Event type structure for write message and write priority message */ |
30 | static struct sclp_register sclp_rw_event = { | 35 | static struct sclp_register sclp_rw_event = { |
31 | .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK | 36 | .send_mask = EVTYP_MSG_MASK | EVTYP_PMSGCMD_MASK, |
37 | .pm_event_fn = sclp_rw_pm_event, | ||
32 | }; | 38 | }; |
33 | 39 | ||
34 | /* | 40 | /* |
diff --git a/drivers/s390/char/sclp_rw.h b/drivers/s390/char/sclp_rw.h index 6aa7a6948bc9..85f491ea929c 100644 --- a/drivers/s390/char/sclp_rw.h +++ b/drivers/s390/char/sclp_rw.h | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp_rw.h | 2 | * interface to the SCLP-read/write driver |
3 | * interface to the SCLP-read/write driver | ||
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corporation 1999, 2009 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | 6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #ifndef __SCLP_RW_H__ | 10 | #ifndef __SCLP_RW_H__ |
@@ -93,4 +92,5 @@ void sclp_set_columns(struct sclp_buffer *, unsigned short); | |||
93 | void sclp_set_htab(struct sclp_buffer *, unsigned short); | 92 | void sclp_set_htab(struct sclp_buffer *, unsigned short); |
94 | int sclp_chars_in_buffer(struct sclp_buffer *); | 93 | int sclp_chars_in_buffer(struct sclp_buffer *); |
95 | 94 | ||
95 | void sclp_console_pm_event(enum sclp_pm_event sclp_pm_event); | ||
96 | #endif /* __SCLP_RW_H__ */ | 96 | #endif /* __SCLP_RW_H__ */ |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index a839aa531d7c..5518e24946aa 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -1,10 +1,9 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp_vt220.c | 2 | * SCLP VT220 terminal driver. |
3 | * SCLP VT220 terminal driver. | ||
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 2003, 2009 |
6 | * Copyright IBM Corp. 2003,2008 | 5 | * |
7 | * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> | 6 | * Author(s): Peter Oberparleiter <Peter.Oberparleiter@de.ibm.com> |
8 | */ | 7 | */ |
9 | 8 | ||
10 | #include <linux/module.h> | 9 | #include <linux/module.h> |
@@ -69,8 +68,11 @@ static struct list_head sclp_vt220_empty; | |||
69 | /* List of pending requests */ | 68 | /* List of pending requests */ |
70 | static struct list_head sclp_vt220_outqueue; | 69 | static struct list_head sclp_vt220_outqueue; |
71 | 70 | ||
72 | /* Number of requests in outqueue */ | 71 | /* Suspend mode flag */ |
73 | static int sclp_vt220_outqueue_count; | 72 | static int sclp_vt220_suspended; |
73 | |||
74 | /* Flag that output queue is currently running */ | ||
75 | static int sclp_vt220_queue_running; | ||
74 | 76 | ||
75 | /* Timer used for delaying write requests to merge subsequent messages into | 77 | /* Timer used for delaying write requests to merge subsequent messages into |
76 | * a single buffer */ | 78 | * a single buffer */ |
@@ -92,6 +94,8 @@ static int __initdata sclp_vt220_init_count; | |||
92 | static int sclp_vt220_flush_later; | 94 | static int sclp_vt220_flush_later; |
93 | 95 | ||
94 | static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf); | 96 | static void sclp_vt220_receiver_fn(struct evbuf_header *evbuf); |
97 | static void sclp_vt220_pm_event_fn(struct sclp_register *reg, | ||
98 | enum sclp_pm_event sclp_pm_event); | ||
95 | static int __sclp_vt220_emit(struct sclp_vt220_request *request); | 99 | static int __sclp_vt220_emit(struct sclp_vt220_request *request); |
96 | static void sclp_vt220_emit_current(void); | 100 | static void sclp_vt220_emit_current(void); |
97 | 101 | ||
@@ -100,7 +104,8 @@ static struct sclp_register sclp_vt220_register = { | |||
100 | .send_mask = EVTYP_VT220MSG_MASK, | 104 | .send_mask = EVTYP_VT220MSG_MASK, |
101 | .receive_mask = EVTYP_VT220MSG_MASK, | 105 | .receive_mask = EVTYP_VT220MSG_MASK, |
102 | .state_change_fn = NULL, | 106 | .state_change_fn = NULL, |
103 | .receiver_fn = sclp_vt220_receiver_fn | 107 | .receiver_fn = sclp_vt220_receiver_fn, |
108 | .pm_event_fn = sclp_vt220_pm_event_fn, | ||
104 | }; | 109 | }; |
105 | 110 | ||
106 | 111 | ||
@@ -120,15 +125,19 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request) | |||
120 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 125 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
121 | /* Move request from outqueue to empty queue */ | 126 | /* Move request from outqueue to empty queue */ |
122 | list_del(&request->list); | 127 | list_del(&request->list); |
123 | sclp_vt220_outqueue_count--; | ||
124 | list_add_tail((struct list_head *) page, &sclp_vt220_empty); | 128 | list_add_tail((struct list_head *) page, &sclp_vt220_empty); |
125 | /* Check if there is a pending buffer on the out queue. */ | 129 | /* Check if there is a pending buffer on the out queue. */ |
126 | request = NULL; | 130 | request = NULL; |
127 | if (!list_empty(&sclp_vt220_outqueue)) | 131 | if (!list_empty(&sclp_vt220_outqueue)) |
128 | request = list_entry(sclp_vt220_outqueue.next, | 132 | request = list_entry(sclp_vt220_outqueue.next, |
129 | struct sclp_vt220_request, list); | 133 | struct sclp_vt220_request, list); |
134 | if (!request || sclp_vt220_suspended) { | ||
135 | sclp_vt220_queue_running = 0; | ||
136 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
137 | break; | ||
138 | } | ||
130 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 139 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
131 | } while (request && __sclp_vt220_emit(request)); | 140 | } while (__sclp_vt220_emit(request)); |
132 | if (request == NULL && sclp_vt220_flush_later) | 141 | if (request == NULL && sclp_vt220_flush_later) |
133 | sclp_vt220_emit_current(); | 142 | sclp_vt220_emit_current(); |
134 | /* Check if the tty needs a wake up call */ | 143 | /* Check if the tty needs a wake up call */ |
@@ -212,26 +221,7 @@ __sclp_vt220_emit(struct sclp_vt220_request *request) | |||
212 | } | 221 | } |
213 | 222 | ||
214 | /* | 223 | /* |
215 | * Queue and emit given request. | 224 | * Queue and emit current request. |
216 | */ | ||
217 | static void | ||
218 | sclp_vt220_emit(struct sclp_vt220_request *request) | ||
219 | { | ||
220 | unsigned long flags; | ||
221 | int count; | ||
222 | |||
223 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
224 | list_add_tail(&request->list, &sclp_vt220_outqueue); | ||
225 | count = sclp_vt220_outqueue_count++; | ||
226 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
227 | /* Emit only the first buffer immediately - callback takes care of | ||
228 | * the rest */ | ||
229 | if (count == 0 && __sclp_vt220_emit(request)) | ||
230 | sclp_vt220_process_queue(request); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * Queue and emit current request. Return zero on success, non-zero otherwise. | ||
235 | */ | 225 | */ |
236 | static void | 226 | static void |
237 | sclp_vt220_emit_current(void) | 227 | sclp_vt220_emit_current(void) |
@@ -241,22 +231,33 @@ sclp_vt220_emit_current(void) | |||
241 | struct sclp_vt220_sccb *sccb; | 231 | struct sclp_vt220_sccb *sccb; |
242 | 232 | ||
243 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 233 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
244 | request = NULL; | 234 | if (sclp_vt220_current_request) { |
245 | if (sclp_vt220_current_request != NULL) { | ||
246 | sccb = (struct sclp_vt220_sccb *) | 235 | sccb = (struct sclp_vt220_sccb *) |
247 | sclp_vt220_current_request->sclp_req.sccb; | 236 | sclp_vt220_current_request->sclp_req.sccb; |
248 | /* Only emit buffers with content */ | 237 | /* Only emit buffers with content */ |
249 | if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) { | 238 | if (sccb->header.length != sizeof(struct sclp_vt220_sccb)) { |
250 | request = sclp_vt220_current_request; | 239 | list_add_tail(&sclp_vt220_current_request->list, |
240 | &sclp_vt220_outqueue); | ||
251 | sclp_vt220_current_request = NULL; | 241 | sclp_vt220_current_request = NULL; |
252 | if (timer_pending(&sclp_vt220_timer)) | 242 | if (timer_pending(&sclp_vt220_timer)) |
253 | del_timer(&sclp_vt220_timer); | 243 | del_timer(&sclp_vt220_timer); |
254 | } | 244 | } |
255 | sclp_vt220_flush_later = 0; | 245 | sclp_vt220_flush_later = 0; |
256 | } | 246 | } |
247 | if (sclp_vt220_queue_running || sclp_vt220_suspended) | ||
248 | goto out_unlock; | ||
249 | if (list_empty(&sclp_vt220_outqueue)) | ||
250 | goto out_unlock; | ||
251 | request = list_first_entry(&sclp_vt220_outqueue, | ||
252 | struct sclp_vt220_request, list); | ||
253 | sclp_vt220_queue_running = 1; | ||
254 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
255 | |||
256 | if (__sclp_vt220_emit(request)) | ||
257 | sclp_vt220_process_queue(request); | ||
258 | return; | ||
259 | out_unlock: | ||
257 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 260 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
258 | if (request != NULL) | ||
259 | sclp_vt220_emit(request); | ||
260 | } | 261 | } |
261 | 262 | ||
262 | #define SCLP_NORMAL_WRITE 0x00 | 263 | #define SCLP_NORMAL_WRITE 0x00 |
@@ -396,7 +397,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | |||
396 | if (sclp_vt220_current_request == NULL) { | 397 | if (sclp_vt220_current_request == NULL) { |
397 | while (list_empty(&sclp_vt220_empty)) { | 398 | while (list_empty(&sclp_vt220_empty)) { |
398 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 399 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
399 | if (may_fail) | 400 | if (may_fail || sclp_vt220_suspended) |
400 | goto out; | 401 | goto out; |
401 | else | 402 | else |
402 | sclp_sync_wait(); | 403 | sclp_sync_wait(); |
@@ -531,7 +532,7 @@ sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) | |||
531 | static void | 532 | static void |
532 | sclp_vt220_flush_chars(struct tty_struct *tty) | 533 | sclp_vt220_flush_chars(struct tty_struct *tty) |
533 | { | 534 | { |
534 | if (sclp_vt220_outqueue_count == 0) | 535 | if (!sclp_vt220_queue_running) |
535 | sclp_vt220_emit_current(); | 536 | sclp_vt220_emit_current(); |
536 | else | 537 | else |
537 | sclp_vt220_flush_later = 1; | 538 | sclp_vt220_flush_later = 1; |
@@ -635,7 +636,6 @@ static int __init __sclp_vt220_init(int num_pages) | |||
635 | init_timer(&sclp_vt220_timer); | 636 | init_timer(&sclp_vt220_timer); |
636 | sclp_vt220_current_request = NULL; | 637 | sclp_vt220_current_request = NULL; |
637 | sclp_vt220_buffered_chars = 0; | 638 | sclp_vt220_buffered_chars = 0; |
638 | sclp_vt220_outqueue_count = 0; | ||
639 | sclp_vt220_tty = NULL; | 639 | sclp_vt220_tty = NULL; |
640 | sclp_vt220_flush_later = 0; | 640 | sclp_vt220_flush_later = 0; |
641 | 641 | ||
@@ -736,7 +736,7 @@ static void __sclp_vt220_flush_buffer(void) | |||
736 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 736 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
737 | if (timer_pending(&sclp_vt220_timer)) | 737 | if (timer_pending(&sclp_vt220_timer)) |
738 | del_timer(&sclp_vt220_timer); | 738 | del_timer(&sclp_vt220_timer); |
739 | while (sclp_vt220_outqueue_count > 0) { | 739 | while (sclp_vt220_queue_running) { |
740 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 740 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
741 | sclp_sync_wait(); | 741 | sclp_sync_wait(); |
742 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 742 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
@@ -744,6 +744,46 @@ static void __sclp_vt220_flush_buffer(void) | |||
744 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 744 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
745 | } | 745 | } |
746 | 746 | ||
747 | /* | ||
748 | * Resume console: If there are cached messages, emit them. | ||
749 | */ | ||
750 | static void sclp_vt220_resume(void) | ||
751 | { | ||
752 | unsigned long flags; | ||
753 | |||
754 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
755 | sclp_vt220_suspended = 0; | ||
756 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
757 | sclp_vt220_emit_current(); | ||
758 | } | ||
759 | |||
760 | /* | ||
761 | * Suspend console: Set suspend flag and flush console | ||
762 | */ | ||
763 | static void sclp_vt220_suspend(void) | ||
764 | { | ||
765 | unsigned long flags; | ||
766 | |||
767 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
768 | sclp_vt220_suspended = 1; | ||
769 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
770 | __sclp_vt220_flush_buffer(); | ||
771 | } | ||
772 | |||
773 | static void sclp_vt220_pm_event_fn(struct sclp_register *reg, | ||
774 | enum sclp_pm_event sclp_pm_event) | ||
775 | { | ||
776 | switch (sclp_pm_event) { | ||
777 | case SCLP_PM_EVENT_FREEZE: | ||
778 | sclp_vt220_suspend(); | ||
779 | break; | ||
780 | case SCLP_PM_EVENT_RESTORE: | ||
781 | case SCLP_PM_EVENT_THAW: | ||
782 | sclp_vt220_resume(); | ||
783 | break; | ||
784 | } | ||
785 | } | ||
786 | |||
747 | static int | 787 | static int |
748 | sclp_vt220_notify(struct notifier_block *self, | 788 | sclp_vt220_notify(struct notifier_block *self, |
749 | unsigned long event, void *data) | 789 | unsigned long event, void *data) |
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index 5469e099597e..a26333774701 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -3,7 +3,7 @@ | |||
3 | * tape device driver for 3480/3490E/3590 tapes. | 3 | * tape device driver for 3480/3490E/3590 tapes. |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright IBM Corp. 2001,2006 | 6 | * Copyright IBM Corp. 2001, 2009 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 8 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -286,6 +286,7 @@ extern void tape_state_set(struct tape_device *, enum tape_state); | |||
286 | 286 | ||
287 | extern int tape_generic_online(struct tape_device *, struct tape_discipline *); | 287 | extern int tape_generic_online(struct tape_device *, struct tape_discipline *); |
288 | extern int tape_generic_offline(struct ccw_device *); | 288 | extern int tape_generic_offline(struct ccw_device *); |
289 | extern int tape_generic_pm_suspend(struct ccw_device *); | ||
289 | 290 | ||
290 | /* Externals from tape_devmap.c */ | 291 | /* Externals from tape_devmap.c */ |
291 | extern int tape_generic_probe(struct ccw_device *); | 292 | extern int tape_generic_probe(struct ccw_device *); |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 2d00a383a475..144d2a5e1a92 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/char/tape_34xx.c | 2 | * drivers/s390/char/tape_34xx.c |
3 | * tape device discipline for 3480/3490 tapes. | 3 | * tape device discipline for 3480/3490 tapes. |
4 | * | 4 | * |
5 | * Copyright (C) IBM Corp. 2001,2006 | 5 | * Copyright IBM Corp. 2001, 2009 |
6 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 6 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
7 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 7 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -1302,6 +1302,7 @@ static struct ccw_driver tape_34xx_driver = { | |||
1302 | .remove = tape_generic_remove, | 1302 | .remove = tape_generic_remove, |
1303 | .set_online = tape_34xx_online, | 1303 | .set_online = tape_34xx_online, |
1304 | .set_offline = tape_generic_offline, | 1304 | .set_offline = tape_generic_offline, |
1305 | .freeze = tape_generic_pm_suspend, | ||
1305 | }; | 1306 | }; |
1306 | 1307 | ||
1307 | static int | 1308 | static int |
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index c453b2f3e9f4..23e6598bc4b5 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * drivers/s390/char/tape_3590.c | 2 | * drivers/s390/char/tape_3590.c |
3 | * tape device discipline for 3590 tapes. | 3 | * tape device discipline for 3590 tapes. |
4 | * | 4 | * |
5 | * Copyright IBM Corp. 2001,2006 | 5 | * Copyright IBM Corp. 2001, 2009 |
6 | * Author(s): Stefan Bader <shbader@de.ibm.com> | 6 | * Author(s): Stefan Bader <shbader@de.ibm.com> |
7 | * Michael Holzheu <holzheu@de.ibm.com> | 7 | * Michael Holzheu <holzheu@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
@@ -1715,6 +1715,7 @@ static struct ccw_driver tape_3590_driver = { | |||
1715 | .remove = tape_generic_remove, | 1715 | .remove = tape_generic_remove, |
1716 | .set_offline = tape_generic_offline, | 1716 | .set_offline = tape_generic_offline, |
1717 | .set_online = tape_3590_online, | 1717 | .set_online = tape_3590_online, |
1718 | .freeze = tape_generic_pm_suspend, | ||
1718 | }; | 1719 | }; |
1719 | 1720 | ||
1720 | /* | 1721 | /* |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 8a109f3b69c6..3ebaa8eb5c86 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * basic function of the tape device driver | 3 | * basic function of the tape device driver |
4 | * | 4 | * |
5 | * S390 and zSeries version | 5 | * S390 and zSeries version |
6 | * Copyright IBM Corp. 2001,2006 | 6 | * Copyright IBM Corp. 2001, 2009 |
7 | * Author(s): Carsten Otte <cotte@de.ibm.com> | 7 | * Author(s): Carsten Otte <cotte@de.ibm.com> |
8 | * Michael Holzheu <holzheu@de.ibm.com> | 8 | * Michael Holzheu <holzheu@de.ibm.com> |
9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> | 9 | * Tuan Ngo-Anh <ngoanh@de.ibm.com> |
@@ -380,6 +380,55 @@ tape_cleanup_device(struct tape_device *device) | |||
380 | } | 380 | } |
381 | 381 | ||
382 | /* | 382 | /* |
383 | * Suspend device. | ||
384 | * | ||
385 | * Called by the common I/O layer if the drive should be suspended on user | ||
386 | * request. We refuse to suspend if the device is loaded or in use for the | ||
387 | * following reason: | ||
388 | * While the Linux guest is suspended, it might be logged off which causes | ||
389 | * devices to be detached. Tape devices are automatically rewound and unloaded | ||
390 | * during DETACH processing (unless the tape device was attached with the | ||
391 | * NOASSIGN or MULTIUSER option). After rewind/unload, there is no way to | ||
392 | * resume the original state of the tape device, since we would need to | ||
393 | * manually re-load the cartridge which was active at suspend time. | ||
394 | */ | ||
395 | int tape_generic_pm_suspend(struct ccw_device *cdev) | ||
396 | { | ||
397 | struct tape_device *device; | ||
398 | |||
399 | device = cdev->dev.driver_data; | ||
400 | if (!device) { | ||
401 | return -ENODEV; | ||
402 | } | ||
403 | |||
404 | DBF_LH(3, "(%08x): tape_generic_pm_suspend(%p)\n", | ||
405 | device->cdev_id, device); | ||
406 | |||
407 | if (device->medium_state != MS_UNLOADED) { | ||
408 | pr_err("A cartridge is loaded in tape device %s, " | ||
409 | "refusing to suspend\n", dev_name(&cdev->dev)); | ||
410 | return -EBUSY; | ||
411 | } | ||
412 | |||
413 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | ||
414 | switch (device->tape_state) { | ||
415 | case TS_INIT: | ||
416 | case TS_NOT_OPER: | ||
417 | case TS_UNUSED: | ||
418 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
419 | break; | ||
420 | default: | ||
421 | pr_err("Tape device %s is busy, refusing to " | ||
422 | "suspend\n", dev_name(&cdev->dev)); | ||
423 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
424 | return -EBUSY; | ||
425 | } | ||
426 | |||
427 | DBF_LH(3, "(%08x): Drive suspended.\n", device->cdev_id); | ||
428 | return 0; | ||
429 | } | ||
430 | |||
431 | /* | ||
383 | * Set device offline. | 432 | * Set device offline. |
384 | * | 433 | * |
385 | * Called by the common I/O layer if the drive should set offline on user | 434 | * Called by the common I/O layer if the drive should set offline on user |
@@ -1273,6 +1322,7 @@ EXPORT_SYMBOL(tape_generic_remove); | |||
1273 | EXPORT_SYMBOL(tape_generic_probe); | 1322 | EXPORT_SYMBOL(tape_generic_probe); |
1274 | EXPORT_SYMBOL(tape_generic_online); | 1323 | EXPORT_SYMBOL(tape_generic_online); |
1275 | EXPORT_SYMBOL(tape_generic_offline); | 1324 | EXPORT_SYMBOL(tape_generic_offline); |
1325 | EXPORT_SYMBOL(tape_generic_pm_suspend); | ||
1276 | EXPORT_SYMBOL(tape_put_device); | 1326 | EXPORT_SYMBOL(tape_put_device); |
1277 | EXPORT_SYMBOL(tape_get_device_reference); | 1327 | EXPORT_SYMBOL(tape_get_device_reference); |
1278 | EXPORT_SYMBOL(tape_state_verbose); | 1328 | EXPORT_SYMBOL(tape_state_verbose); |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index d8a2289fcb69..e925808c2149 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -3,7 +3,7 @@ | |||
3 | * character device driver for reading z/VM system service records | 3 | * character device driver for reading z/VM system service records |
4 | * | 4 | * |
5 | * | 5 | * |
6 | * Copyright 2004 IBM Corporation | 6 | * Copyright IBM Corp. 2004, 2009 |
7 | * character device driver for reading z/VM system service records, | 7 | * character device driver for reading z/VM system service records, |
8 | * Version 1.0 | 8 | * Version 1.0 |
9 | * Author(s): Xenia Tkatschow <xenia@us.ibm.com> | 9 | * Author(s): Xenia Tkatschow <xenia@us.ibm.com> |
@@ -660,6 +660,29 @@ static struct attribute *vmlogrdr_attrs[] = { | |||
660 | NULL, | 660 | NULL, |
661 | }; | 661 | }; |
662 | 662 | ||
663 | static int vmlogrdr_pm_prepare(struct device *dev) | ||
664 | { | ||
665 | int rc; | ||
666 | struct vmlogrdr_priv_t *priv = dev->driver_data; | ||
667 | |||
668 | rc = 0; | ||
669 | if (priv) { | ||
670 | spin_lock_bh(&priv->priv_lock); | ||
671 | if (priv->dev_in_use) | ||
672 | rc = -EBUSY; | ||
673 | spin_unlock_bh(&priv->priv_lock); | ||
674 | } | ||
675 | if (rc) | ||
676 | pr_err("vmlogrdr: device %s is busy. Refuse to suspend.\n", | ||
677 | dev_name(dev)); | ||
678 | return rc; | ||
679 | } | ||
680 | |||
681 | |||
682 | static struct dev_pm_ops vmlogrdr_pm_ops = { | ||
683 | .prepare = vmlogrdr_pm_prepare, | ||
684 | }; | ||
685 | |||
663 | static struct attribute_group vmlogrdr_attr_group = { | 686 | static struct attribute_group vmlogrdr_attr_group = { |
664 | .attrs = vmlogrdr_attrs, | 687 | .attrs = vmlogrdr_attrs, |
665 | }; | 688 | }; |
@@ -668,6 +691,7 @@ static struct class *vmlogrdr_class; | |||
668 | static struct device_driver vmlogrdr_driver = { | 691 | static struct device_driver vmlogrdr_driver = { |
669 | .name = "vmlogrdr", | 692 | .name = "vmlogrdr", |
670 | .bus = &iucv_bus, | 693 | .bus = &iucv_bus, |
694 | .pm = &vmlogrdr_pm_ops, | ||
671 | }; | 695 | }; |
672 | 696 | ||
673 | 697 | ||
@@ -729,6 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) | |||
729 | dev->bus = &iucv_bus; | 753 | dev->bus = &iucv_bus; |
730 | dev->parent = iucv_root; | 754 | dev->parent = iucv_root; |
731 | dev->driver = &vmlogrdr_driver; | 755 | dev->driver = &vmlogrdr_driver; |
756 | dev->driver_data = priv; | ||
732 | /* | 757 | /* |
733 | * The release function could be called after the | 758 | * The release function could be called after the |
734 | * module has been unloaded. It's _only_ task is to | 759 | * module has been unloaded. It's _only_ task is to |
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 5dcef81fc9d9..92458219a9e9 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Linux driver for System z and s390 unit record devices | 2 | * Linux driver for System z and s390 unit record devices |
3 | * (z/VM virtual punch, reader, printer) | 3 | * (z/VM virtual punch, reader, printer) |
4 | * | 4 | * |
5 | * Copyright IBM Corp. 2001, 2007 | 5 | * Copyright IBM Corp. 2001, 2009 |
6 | * Authors: Malcolm Beattie <beattiem@uk.ibm.com> | 6 | * Authors: Malcolm Beattie <beattiem@uk.ibm.com> |
7 | * Michael Holzheu <holzheu@de.ibm.com> | 7 | * Michael Holzheu <holzheu@de.ibm.com> |
8 | * Frank Munzert <munzert@de.ibm.com> | 8 | * Frank Munzert <munzert@de.ibm.com> |
@@ -60,6 +60,7 @@ static int ur_probe(struct ccw_device *cdev); | |||
60 | static void ur_remove(struct ccw_device *cdev); | 60 | static void ur_remove(struct ccw_device *cdev); |
61 | static int ur_set_online(struct ccw_device *cdev); | 61 | static int ur_set_online(struct ccw_device *cdev); |
62 | static int ur_set_offline(struct ccw_device *cdev); | 62 | static int ur_set_offline(struct ccw_device *cdev); |
63 | static int ur_pm_suspend(struct ccw_device *cdev); | ||
63 | 64 | ||
64 | static struct ccw_driver ur_driver = { | 65 | static struct ccw_driver ur_driver = { |
65 | .name = "vmur", | 66 | .name = "vmur", |
@@ -69,6 +70,7 @@ static struct ccw_driver ur_driver = { | |||
69 | .remove = ur_remove, | 70 | .remove = ur_remove, |
70 | .set_online = ur_set_online, | 71 | .set_online = ur_set_online, |
71 | .set_offline = ur_set_offline, | 72 | .set_offline = ur_set_offline, |
73 | .freeze = ur_pm_suspend, | ||
72 | }; | 74 | }; |
73 | 75 | ||
74 | static DEFINE_MUTEX(vmur_mutex); | 76 | static DEFINE_MUTEX(vmur_mutex); |
@@ -158,6 +160,28 @@ static void urdev_put(struct urdev *urd) | |||
158 | } | 160 | } |
159 | 161 | ||
160 | /* | 162 | /* |
163 | * State and contents of ur devices can be changed by class D users issuing | ||
164 | * CP commands such as PURGE or TRANSFER, while the Linux guest is suspended. | ||
165 | * Also the Linux guest might be logged off, which causes all active spool | ||
166 | * files to be closed. | ||
167 | * So we cannot guarantee that spool files are still the same when the Linux | ||
168 | * guest is resumed. In order to avoid unpredictable results at resume time | ||
169 | * we simply refuse to suspend if a ur device node is open. | ||
170 | */ | ||
171 | static int ur_pm_suspend(struct ccw_device *cdev) | ||
172 | { | ||
173 | struct urdev *urd = cdev->dev.driver_data; | ||
174 | |||
175 | TRACE("ur_pm_suspend: cdev=%p\n", cdev); | ||
176 | if (urd->open_flag) { | ||
177 | pr_err("Unit record device %s is busy, %s refusing to " | ||
178 | "suspend.\n", dev_name(&cdev->dev), ur_banner); | ||
179 | return -EBUSY; | ||
180 | } | ||
181 | return 0; | ||
182 | } | ||
183 | |||
184 | /* | ||
161 | * Low-level functions to do I/O to a ur device. | 185 | * Low-level functions to do I/O to a ur device. |
162 | * alloc_chan_prog | 186 | * alloc_chan_prog |
163 | * free_chan_prog | 187 | * free_chan_prog |
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 21a2a829bf4e..cb7854c10c04 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c | |||
@@ -1,17 +1,23 @@ | |||
1 | /* | 1 | /* |
2 | * Watchdog implementation based on z/VM Watchdog Timer API | 2 | * Watchdog implementation based on z/VM Watchdog Timer API |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2004,2009 | ||
5 | * | ||
4 | * The user space watchdog daemon can use this driver as | 6 | * The user space watchdog daemon can use this driver as |
5 | * /dev/vmwatchdog to have z/VM execute the specified CP | 7 | * /dev/vmwatchdog to have z/VM execute the specified CP |
6 | * command when the timeout expires. The default command is | 8 | * command when the timeout expires. The default command is |
7 | * "IPL", which which cause an immediate reboot. | 9 | * "IPL", which which cause an immediate reboot. |
8 | */ | 10 | */ |
11 | #define KMSG_COMPONENT "vmwatchdog" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
9 | #include <linux/init.h> | 14 | #include <linux/init.h> |
10 | #include <linux/fs.h> | 15 | #include <linux/fs.h> |
11 | #include <linux/kernel.h> | 16 | #include <linux/kernel.h> |
12 | #include <linux/miscdevice.h> | 17 | #include <linux/miscdevice.h> |
13 | #include <linux/module.h> | 18 | #include <linux/module.h> |
14 | #include <linux/moduleparam.h> | 19 | #include <linux/moduleparam.h> |
20 | #include <linux/suspend.h> | ||
15 | #include <linux/watchdog.h> | 21 | #include <linux/watchdog.h> |
16 | #include <linux/smp_lock.h> | 22 | #include <linux/smp_lock.h> |
17 | 23 | ||
@@ -43,6 +49,9 @@ static unsigned int vmwdt_interval = 60; | |||
43 | static unsigned long vmwdt_is_open; | 49 | static unsigned long vmwdt_is_open; |
44 | static int vmwdt_expect_close; | 50 | static int vmwdt_expect_close; |
45 | 51 | ||
52 | #define VMWDT_OPEN 0 /* devnode is open or suspend in progress */ | ||
53 | #define VMWDT_RUNNING 1 /* The watchdog is armed */ | ||
54 | |||
46 | enum vmwdt_func { | 55 | enum vmwdt_func { |
47 | /* function codes */ | 56 | /* function codes */ |
48 | wdt_init = 0, | 57 | wdt_init = 0, |
@@ -92,6 +101,7 @@ static int vmwdt_keepalive(void) | |||
92 | EBC_TOUPPER(ebc_cmd, MAX_CMDLEN); | 101 | EBC_TOUPPER(ebc_cmd, MAX_CMDLEN); |
93 | 102 | ||
94 | func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; | 103 | func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init; |
104 | set_bit(VMWDT_RUNNING, &vmwdt_is_open); | ||
95 | ret = __diag288(func, vmwdt_interval, ebc_cmd, len); | 105 | ret = __diag288(func, vmwdt_interval, ebc_cmd, len); |
96 | WARN_ON(ret != 0); | 106 | WARN_ON(ret != 0); |
97 | kfree(ebc_cmd); | 107 | kfree(ebc_cmd); |
@@ -102,6 +112,7 @@ static int vmwdt_disable(void) | |||
102 | { | 112 | { |
103 | int ret = __diag288(wdt_cancel, 0, "", 0); | 113 | int ret = __diag288(wdt_cancel, 0, "", 0); |
104 | WARN_ON(ret != 0); | 114 | WARN_ON(ret != 0); |
115 | clear_bit(VMWDT_RUNNING, &vmwdt_is_open); | ||
105 | return ret; | 116 | return ret; |
106 | } | 117 | } |
107 | 118 | ||
@@ -123,13 +134,13 @@ static int vmwdt_open(struct inode *i, struct file *f) | |||
123 | { | 134 | { |
124 | int ret; | 135 | int ret; |
125 | lock_kernel(); | 136 | lock_kernel(); |
126 | if (test_and_set_bit(0, &vmwdt_is_open)) { | 137 | if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { |
127 | unlock_kernel(); | 138 | unlock_kernel(); |
128 | return -EBUSY; | 139 | return -EBUSY; |
129 | } | 140 | } |
130 | ret = vmwdt_keepalive(); | 141 | ret = vmwdt_keepalive(); |
131 | if (ret) | 142 | if (ret) |
132 | clear_bit(0, &vmwdt_is_open); | 143 | clear_bit(VMWDT_OPEN, &vmwdt_is_open); |
133 | unlock_kernel(); | 144 | unlock_kernel(); |
134 | return ret ? ret : nonseekable_open(i, f); | 145 | return ret ? ret : nonseekable_open(i, f); |
135 | } | 146 | } |
@@ -139,7 +150,7 @@ static int vmwdt_close(struct inode *i, struct file *f) | |||
139 | if (vmwdt_expect_close == 42) | 150 | if (vmwdt_expect_close == 42) |
140 | vmwdt_disable(); | 151 | vmwdt_disable(); |
141 | vmwdt_expect_close = 0; | 152 | vmwdt_expect_close = 0; |
142 | clear_bit(0, &vmwdt_is_open); | 153 | clear_bit(VMWDT_OPEN, &vmwdt_is_open); |
143 | return 0; | 154 | return 0; |
144 | } | 155 | } |
145 | 156 | ||
@@ -223,6 +234,57 @@ static ssize_t vmwdt_write(struct file *f, const char __user *buf, | |||
223 | return count; | 234 | return count; |
224 | } | 235 | } |
225 | 236 | ||
237 | static int vmwdt_resume(void) | ||
238 | { | ||
239 | clear_bit(VMWDT_OPEN, &vmwdt_is_open); | ||
240 | return NOTIFY_DONE; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * It makes no sense to go into suspend while the watchdog is running. | ||
245 | * Depending on the memory size, the watchdog might trigger, while we | ||
246 | * are still saving the memory. | ||
247 | * We reuse the open flag to ensure that suspend and watchdog open are | ||
248 | * exclusive operations | ||
249 | */ | ||
250 | static int vmwdt_suspend(void) | ||
251 | { | ||
252 | if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) { | ||
253 | pr_err("The watchdog is in use. " | ||
254 | "This prevents hibernation or suspend.\n"); | ||
255 | return NOTIFY_BAD; | ||
256 | } | ||
257 | if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) { | ||
258 | clear_bit(VMWDT_OPEN, &vmwdt_is_open); | ||
259 | pr_err("The watchdog is running. " | ||
260 | "This prevents hibernation or suspend.\n"); | ||
261 | return NOTIFY_BAD; | ||
262 | } | ||
263 | return NOTIFY_DONE; | ||
264 | } | ||
265 | |||
266 | /* | ||
267 | * This function is called for suspend and resume. | ||
268 | */ | ||
269 | static int vmwdt_power_event(struct notifier_block *this, unsigned long event, | ||
270 | void *ptr) | ||
271 | { | ||
272 | switch (event) { | ||
273 | case PM_POST_HIBERNATION: | ||
274 | case PM_POST_SUSPEND: | ||
275 | return vmwdt_resume(); | ||
276 | case PM_HIBERNATION_PREPARE: | ||
277 | case PM_SUSPEND_PREPARE: | ||
278 | return vmwdt_suspend(); | ||
279 | default: | ||
280 | return NOTIFY_DONE; | ||
281 | } | ||
282 | } | ||
283 | |||
284 | static struct notifier_block vmwdt_power_notifier = { | ||
285 | .notifier_call = vmwdt_power_event, | ||
286 | }; | ||
287 | |||
226 | static const struct file_operations vmwdt_fops = { | 288 | static const struct file_operations vmwdt_fops = { |
227 | .open = &vmwdt_open, | 289 | .open = &vmwdt_open, |
228 | .release = &vmwdt_close, | 290 | .release = &vmwdt_close, |
@@ -244,12 +306,21 @@ static int __init vmwdt_init(void) | |||
244 | ret = vmwdt_probe(); | 306 | ret = vmwdt_probe(); |
245 | if (ret) | 307 | if (ret) |
246 | return ret; | 308 | return ret; |
247 | return misc_register(&vmwdt_dev); | 309 | ret = register_pm_notifier(&vmwdt_power_notifier); |
310 | if (ret) | ||
311 | return ret; | ||
312 | ret = misc_register(&vmwdt_dev); | ||
313 | if (ret) { | ||
314 | unregister_pm_notifier(&vmwdt_power_notifier); | ||
315 | return ret; | ||
316 | } | ||
317 | return 0; | ||
248 | } | 318 | } |
249 | module_init(vmwdt_init); | 319 | module_init(vmwdt_init); |
250 | 320 | ||
251 | static void __exit vmwdt_exit(void) | 321 | static void __exit vmwdt_exit(void) |
252 | { | 322 | { |
253 | WARN_ON(misc_deregister(&vmwdt_dev) != 0); | 323 | unregister_pm_notifier(&vmwdt_power_notifier); |
324 | misc_deregister(&vmwdt_dev); | ||
254 | } | 325 | } |
255 | module_exit(vmwdt_exit); | 326 | module_exit(vmwdt_exit); |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 22ce765d537e..a5a62f1f7747 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/ccwgroup.c | ||
3 | * bus driver for ccwgroup | 2 | * bus driver for ccwgroup |
4 | * | 3 | * |
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 4 | * Copyright IBM Corp. 2002, 2009 |
6 | * IBM Corporation | 5 | * |
7 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
8 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
9 | */ | 8 | */ |
10 | #include <linux/module.h> | 9 | #include <linux/module.h> |
11 | #include <linux/errno.h> | 10 | #include <linux/errno.h> |
@@ -501,6 +500,74 @@ static void ccwgroup_shutdown(struct device *dev) | |||
501 | gdrv->shutdown(gdev); | 500 | gdrv->shutdown(gdev); |
502 | } | 501 | } |
503 | 502 | ||
503 | static int ccwgroup_pm_prepare(struct device *dev) | ||
504 | { | ||
505 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | ||
506 | struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); | ||
507 | |||
508 | /* Fail while device is being set online/offline. */ | ||
509 | if (atomic_read(&gdev->onoff)) | ||
510 | return -EAGAIN; | ||
511 | |||
512 | if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) | ||
513 | return 0; | ||
514 | |||
515 | return gdrv->prepare ? gdrv->prepare(gdev) : 0; | ||
516 | } | ||
517 | |||
518 | static void ccwgroup_pm_complete(struct device *dev) | ||
519 | { | ||
520 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | ||
521 | struct ccwgroup_driver *gdrv = to_ccwgroupdrv(dev->driver); | ||
522 | |||
523 | if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) | ||
524 | return; | ||
525 | |||
526 | if (gdrv->complete) | ||
527 | gdrv->complete(gdev); | ||
528 | } | ||
529 | |||
530 | static int ccwgroup_pm_freeze(struct device *dev) | ||
531 | { | ||
532 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | ||
533 | struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); | ||
534 | |||
535 | if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) | ||
536 | return 0; | ||
537 | |||
538 | return gdrv->freeze ? gdrv->freeze(gdev) : 0; | ||
539 | } | ||
540 | |||
541 | static int ccwgroup_pm_thaw(struct device *dev) | ||
542 | { | ||
543 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | ||
544 | struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); | ||
545 | |||
546 | if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) | ||
547 | return 0; | ||
548 | |||
549 | return gdrv->thaw ? gdrv->thaw(gdev) : 0; | ||
550 | } | ||
551 | |||
552 | static int ccwgroup_pm_restore(struct device *dev) | ||
553 | { | ||
554 | struct ccwgroup_device *gdev = to_ccwgroupdev(dev); | ||
555 | struct ccwgroup_driver *gdrv = to_ccwgroupdrv(gdev->dev.driver); | ||
556 | |||
557 | if (!gdev->dev.driver || gdev->state != CCWGROUP_ONLINE) | ||
558 | return 0; | ||
559 | |||
560 | return gdrv->restore ? gdrv->restore(gdev) : 0; | ||
561 | } | ||
562 | |||
563 | static struct dev_pm_ops ccwgroup_pm_ops = { | ||
564 | .prepare = ccwgroup_pm_prepare, | ||
565 | .complete = ccwgroup_pm_complete, | ||
566 | .freeze = ccwgroup_pm_freeze, | ||
567 | .thaw = ccwgroup_pm_thaw, | ||
568 | .restore = ccwgroup_pm_restore, | ||
569 | }; | ||
570 | |||
504 | static struct bus_type ccwgroup_bus_type = { | 571 | static struct bus_type ccwgroup_bus_type = { |
505 | .name = "ccwgroup", | 572 | .name = "ccwgroup", |
506 | .match = ccwgroup_bus_match, | 573 | .match = ccwgroup_bus_match, |
@@ -508,6 +575,7 @@ static struct bus_type ccwgroup_bus_type = { | |||
508 | .probe = ccwgroup_probe, | 575 | .probe = ccwgroup_probe, |
509 | .remove = ccwgroup_remove, | 576 | .remove = ccwgroup_remove, |
510 | .shutdown = ccwgroup_shutdown, | 577 | .shutdown = ccwgroup_shutdown, |
578 | .pm = &ccwgroup_pm_ops, | ||
511 | }; | 579 | }; |
512 | 580 | ||
513 | 581 | ||
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 883f16f96f22..1ecd3e567648 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -549,8 +549,7 @@ cleanup: | |||
549 | return ret; | 549 | return ret; |
550 | } | 550 | } |
551 | 551 | ||
552 | static int | 552 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) |
553 | __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | ||
554 | { | 553 | { |
555 | struct { | 554 | struct { |
556 | struct chsc_header request; | 555 | struct chsc_header request; |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index ba59bceace98..425e8f89a6c5 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -90,6 +90,7 @@ extern void chsc_free_sei_area(void); | |||
90 | extern int chsc_enable_facility(int); | 90 | extern int chsc_enable_facility(int); |
91 | struct channel_subsystem; | 91 | struct channel_subsystem; |
92 | extern int chsc_secm(struct channel_subsystem *, int); | 92 | extern int chsc_secm(struct channel_subsystem *, int); |
93 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); | ||
93 | 94 | ||
94 | int chsc_chp_vary(struct chp_id chpid, int on); | 95 | int chsc_chp_vary(struct chp_id chpid, int on); |
95 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | 96 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index 93eca1731b81..cc5144b6f9d9 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * Driver for s390 chsc subchannels | 2 | * Driver for s390 chsc subchannels |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2008 | 4 | * Copyright IBM Corp. 2008, 2009 |
5 | * | ||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | 6 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * | 7 | * |
7 | */ | 8 | */ |
@@ -112,6 +113,31 @@ static void chsc_subchannel_shutdown(struct subchannel *sch) | |||
112 | cio_disable_subchannel(sch); | 113 | cio_disable_subchannel(sch); |
113 | } | 114 | } |
114 | 115 | ||
116 | static int chsc_subchannel_prepare(struct subchannel *sch) | ||
117 | { | ||
118 | int cc; | ||
119 | struct schib schib; | ||
120 | /* | ||
121 | * Don't allow suspend while the subchannel is not idle | ||
122 | * since we don't have a way to clear the subchannel and | ||
123 | * cannot disable it with a request running. | ||
124 | */ | ||
125 | cc = stsch(sch->schid, &schib); | ||
126 | if (!cc && scsw_stctl(&schib.scsw)) | ||
127 | return -EAGAIN; | ||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int chsc_subchannel_freeze(struct subchannel *sch) | ||
132 | { | ||
133 | return cio_disable_subchannel(sch); | ||
134 | } | ||
135 | |||
136 | static int chsc_subchannel_restore(struct subchannel *sch) | ||
137 | { | ||
138 | return cio_enable_subchannel(sch, (u32)(unsigned long)sch); | ||
139 | } | ||
140 | |||
115 | static struct css_device_id chsc_subchannel_ids[] = { | 141 | static struct css_device_id chsc_subchannel_ids[] = { |
116 | { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, | 142 | { .match_flags = 0x1, .type =SUBCHANNEL_TYPE_CHSC, }, |
117 | { /* end of list */ }, | 143 | { /* end of list */ }, |
@@ -125,6 +151,10 @@ static struct css_driver chsc_subchannel_driver = { | |||
125 | .probe = chsc_subchannel_probe, | 151 | .probe = chsc_subchannel_probe, |
126 | .remove = chsc_subchannel_remove, | 152 | .remove = chsc_subchannel_remove, |
127 | .shutdown = chsc_subchannel_shutdown, | 153 | .shutdown = chsc_subchannel_shutdown, |
154 | .prepare = chsc_subchannel_prepare, | ||
155 | .freeze = chsc_subchannel_freeze, | ||
156 | .thaw = chsc_subchannel_restore, | ||
157 | .restore = chsc_subchannel_restore, | ||
128 | .name = "chsc_subchannel", | 158 | .name = "chsc_subchannel", |
129 | }; | 159 | }; |
130 | 160 | ||
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index dc98b2c63862..30f516111307 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -1204,6 +1204,11 @@ static ssize_t cmb_enable_store(struct device *dev, | |||
1204 | 1204 | ||
1205 | DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store); | 1205 | DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store); |
1206 | 1206 | ||
1207 | int ccw_set_cmf(struct ccw_device *cdev, int enable) | ||
1208 | { | ||
1209 | return cmbops->set(cdev, enable ? 2 : 0); | ||
1210 | } | ||
1211 | |||
1207 | /** | 1212 | /** |
1208 | * enable_cmf() - switch on the channel measurement for a specific device | 1213 | * enable_cmf() - switch on the channel measurement for a specific device |
1209 | * @cdev: The ccw device to be enabled | 1214 | * @cdev: The ccw device to be enabled |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 0085d8901792..85d43c6bcb66 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -1,10 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/css.c | 2 | * driver for channel subsystem |
3 | * driver for channel subsystem | ||
4 | * | 3 | * |
5 | * Copyright IBM Corp. 2002,2008 | 4 | * Copyright IBM Corp. 2002, 2009 |
6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 5 | * |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | ||
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "cio" | 10 | #define KMSG_COMPONENT "cio" |
@@ -17,6 +17,7 @@ | |||
17 | #include <linux/errno.h> | 17 | #include <linux/errno.h> |
18 | #include <linux/list.h> | 18 | #include <linux/list.h> |
19 | #include <linux/reboot.h> | 19 | #include <linux/reboot.h> |
20 | #include <linux/suspend.h> | ||
20 | #include <asm/isc.h> | 21 | #include <asm/isc.h> |
21 | #include <asm/crw.h> | 22 | #include <asm/crw.h> |
22 | 23 | ||
@@ -780,6 +781,79 @@ static struct notifier_block css_reboot_notifier = { | |||
780 | }; | 781 | }; |
781 | 782 | ||
782 | /* | 783 | /* |
784 | * Since the css devices are neither on a bus nor have a class | ||
785 | * nor have a special device type, we cannot stop/restart channel | ||
786 | * path measurements via the normal suspend/resume callbacks, but have | ||
787 | * to use notifiers. | ||
788 | */ | ||
789 | static int css_power_event(struct notifier_block *this, unsigned long event, | ||
790 | void *ptr) | ||
791 | { | ||
792 | void *secm_area; | ||
793 | int ret, i; | ||
794 | |||
795 | switch (event) { | ||
796 | case PM_HIBERNATION_PREPARE: | ||
797 | case PM_SUSPEND_PREPARE: | ||
798 | ret = NOTIFY_DONE; | ||
799 | for (i = 0; i <= __MAX_CSSID; i++) { | ||
800 | struct channel_subsystem *css; | ||
801 | |||
802 | css = channel_subsystems[i]; | ||
803 | mutex_lock(&css->mutex); | ||
804 | if (!css->cm_enabled) { | ||
805 | mutex_unlock(&css->mutex); | ||
806 | continue; | ||
807 | } | ||
808 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | ||
809 | GFP_DMA); | ||
810 | if (secm_area) { | ||
811 | if (__chsc_do_secm(css, 0, secm_area)) | ||
812 | ret = NOTIFY_BAD; | ||
813 | free_page((unsigned long)secm_area); | ||
814 | } else | ||
815 | ret = NOTIFY_BAD; | ||
816 | |||
817 | mutex_unlock(&css->mutex); | ||
818 | } | ||
819 | break; | ||
820 | case PM_POST_HIBERNATION: | ||
821 | case PM_POST_SUSPEND: | ||
822 | ret = NOTIFY_DONE; | ||
823 | for (i = 0; i <= __MAX_CSSID; i++) { | ||
824 | struct channel_subsystem *css; | ||
825 | |||
826 | css = channel_subsystems[i]; | ||
827 | mutex_lock(&css->mutex); | ||
828 | if (!css->cm_enabled) { | ||
829 | mutex_unlock(&css->mutex); | ||
830 | continue; | ||
831 | } | ||
832 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | ||
833 | GFP_DMA); | ||
834 | if (secm_area) { | ||
835 | if (__chsc_do_secm(css, 1, secm_area)) | ||
836 | ret = NOTIFY_BAD; | ||
837 | free_page((unsigned long)secm_area); | ||
838 | } else | ||
839 | ret = NOTIFY_BAD; | ||
840 | |||
841 | mutex_unlock(&css->mutex); | ||
842 | } | ||
843 | /* search for subchannels, which appeared during hibernation */ | ||
844 | css_schedule_reprobe(); | ||
845 | break; | ||
846 | default: | ||
847 | ret = NOTIFY_DONE; | ||
848 | } | ||
849 | return ret; | ||
850 | |||
851 | } | ||
852 | static struct notifier_block css_power_notifier = { | ||
853 | .notifier_call = css_power_event, | ||
854 | }; | ||
855 | |||
856 | /* | ||
783 | * Now that the driver core is running, we can setup our channel subsystem. | 857 | * Now that the driver core is running, we can setup our channel subsystem. |
784 | * The struct subchannel's are created during probing (except for the | 858 | * The struct subchannel's are created during probing (except for the |
785 | * static console subchannel). | 859 | * static console subchannel). |
@@ -852,6 +926,11 @@ init_channel_subsystem (void) | |||
852 | ret = register_reboot_notifier(&css_reboot_notifier); | 926 | ret = register_reboot_notifier(&css_reboot_notifier); |
853 | if (ret) | 927 | if (ret) |
854 | goto out_unregister; | 928 | goto out_unregister; |
929 | ret = register_pm_notifier(&css_power_notifier); | ||
930 | if (ret) { | ||
931 | unregister_reboot_notifier(&css_reboot_notifier); | ||
932 | goto out_unregister; | ||
933 | } | ||
855 | css_init_done = 1; | 934 | css_init_done = 1; |
856 | 935 | ||
857 | /* Enable default isc for I/O subchannels. */ | 936 | /* Enable default isc for I/O subchannels. */ |
@@ -953,6 +1032,73 @@ static int css_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
953 | return ret; | 1032 | return ret; |
954 | } | 1033 | } |
955 | 1034 | ||
1035 | static int css_pm_prepare(struct device *dev) | ||
1036 | { | ||
1037 | struct subchannel *sch = to_subchannel(dev); | ||
1038 | struct css_driver *drv; | ||
1039 | |||
1040 | if (mutex_is_locked(&sch->reg_mutex)) | ||
1041 | return -EAGAIN; | ||
1042 | if (!sch->dev.driver) | ||
1043 | return 0; | ||
1044 | drv = to_cssdriver(sch->dev.driver); | ||
1045 | /* Notify drivers that they may not register children. */ | ||
1046 | return drv->prepare ? drv->prepare(sch) : 0; | ||
1047 | } | ||
1048 | |||
1049 | static void css_pm_complete(struct device *dev) | ||
1050 | { | ||
1051 | struct subchannel *sch = to_subchannel(dev); | ||
1052 | struct css_driver *drv; | ||
1053 | |||
1054 | if (!sch->dev.driver) | ||
1055 | return; | ||
1056 | drv = to_cssdriver(sch->dev.driver); | ||
1057 | if (drv->complete) | ||
1058 | drv->complete(sch); | ||
1059 | } | ||
1060 | |||
1061 | static int css_pm_freeze(struct device *dev) | ||
1062 | { | ||
1063 | struct subchannel *sch = to_subchannel(dev); | ||
1064 | struct css_driver *drv; | ||
1065 | |||
1066 | if (!sch->dev.driver) | ||
1067 | return 0; | ||
1068 | drv = to_cssdriver(sch->dev.driver); | ||
1069 | return drv->freeze ? drv->freeze(sch) : 0; | ||
1070 | } | ||
1071 | |||
1072 | static int css_pm_thaw(struct device *dev) | ||
1073 | { | ||
1074 | struct subchannel *sch = to_subchannel(dev); | ||
1075 | struct css_driver *drv; | ||
1076 | |||
1077 | if (!sch->dev.driver) | ||
1078 | return 0; | ||
1079 | drv = to_cssdriver(sch->dev.driver); | ||
1080 | return drv->thaw ? drv->thaw(sch) : 0; | ||
1081 | } | ||
1082 | |||
1083 | static int css_pm_restore(struct device *dev) | ||
1084 | { | ||
1085 | struct subchannel *sch = to_subchannel(dev); | ||
1086 | struct css_driver *drv; | ||
1087 | |||
1088 | if (!sch->dev.driver) | ||
1089 | return 0; | ||
1090 | drv = to_cssdriver(sch->dev.driver); | ||
1091 | return drv->restore ? drv->restore(sch) : 0; | ||
1092 | } | ||
1093 | |||
1094 | static struct dev_pm_ops css_pm_ops = { | ||
1095 | .prepare = css_pm_prepare, | ||
1096 | .complete = css_pm_complete, | ||
1097 | .freeze = css_pm_freeze, | ||
1098 | .thaw = css_pm_thaw, | ||
1099 | .restore = css_pm_restore, | ||
1100 | }; | ||
1101 | |||
956 | struct bus_type css_bus_type = { | 1102 | struct bus_type css_bus_type = { |
957 | .name = "css", | 1103 | .name = "css", |
958 | .match = css_bus_match, | 1104 | .match = css_bus_match, |
@@ -960,6 +1106,7 @@ struct bus_type css_bus_type = { | |||
960 | .remove = css_remove, | 1106 | .remove = css_remove, |
961 | .shutdown = css_shutdown, | 1107 | .shutdown = css_shutdown, |
962 | .uevent = css_uevent, | 1108 | .uevent = css_uevent, |
1109 | .pm = &css_pm_ops, | ||
963 | }; | 1110 | }; |
964 | 1111 | ||
965 | /** | 1112 | /** |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 57ebf120f825..9763eeec7458 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -70,6 +70,11 @@ struct chp_link; | |||
70 | * @probe: function called on probe | 70 | * @probe: function called on probe |
71 | * @remove: function called on remove | 71 | * @remove: function called on remove |
72 | * @shutdown: called at device shutdown | 72 | * @shutdown: called at device shutdown |
73 | * @prepare: prepare for pm state transition | ||
74 | * @complete: undo work done in @prepare | ||
75 | * @freeze: callback for freezing during hibernation snapshotting | ||
76 | * @thaw: undo work done in @freeze | ||
77 | * @restore: callback for restoring after hibernation | ||
73 | * @name: name of the device driver | 78 | * @name: name of the device driver |
74 | */ | 79 | */ |
75 | struct css_driver { | 80 | struct css_driver { |
@@ -82,6 +87,11 @@ struct css_driver { | |||
82 | int (*probe)(struct subchannel *); | 87 | int (*probe)(struct subchannel *); |
83 | int (*remove)(struct subchannel *); | 88 | int (*remove)(struct subchannel *); |
84 | void (*shutdown)(struct subchannel *); | 89 | void (*shutdown)(struct subchannel *); |
90 | int (*prepare) (struct subchannel *); | ||
91 | void (*complete) (struct subchannel *); | ||
92 | int (*freeze)(struct subchannel *); | ||
93 | int (*thaw) (struct subchannel *); | ||
94 | int (*restore)(struct subchannel *); | ||
85 | const char *name; | 95 | const char *name; |
86 | }; | 96 | }; |
87 | 97 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 35441fa16be1..3c57c1a18bb8 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -138,6 +138,19 @@ static struct css_device_id io_subchannel_ids[] = { | |||
138 | }; | 138 | }; |
139 | MODULE_DEVICE_TABLE(css, io_subchannel_ids); | 139 | MODULE_DEVICE_TABLE(css, io_subchannel_ids); |
140 | 140 | ||
141 | static int io_subchannel_prepare(struct subchannel *sch) | ||
142 | { | ||
143 | struct ccw_device *cdev; | ||
144 | /* | ||
145 | * Don't allow suspend while a ccw device registration | ||
146 | * is still outstanding. | ||
147 | */ | ||
148 | cdev = sch_get_cdev(sch); | ||
149 | if (cdev && !device_is_registered(&cdev->dev)) | ||
150 | return -EAGAIN; | ||
151 | return 0; | ||
152 | } | ||
153 | |||
141 | static struct css_driver io_subchannel_driver = { | 154 | static struct css_driver io_subchannel_driver = { |
142 | .owner = THIS_MODULE, | 155 | .owner = THIS_MODULE, |
143 | .subchannel_type = io_subchannel_ids, | 156 | .subchannel_type = io_subchannel_ids, |
@@ -148,6 +161,7 @@ static struct css_driver io_subchannel_driver = { | |||
148 | .probe = io_subchannel_probe, | 161 | .probe = io_subchannel_probe, |
149 | .remove = io_subchannel_remove, | 162 | .remove = io_subchannel_remove, |
150 | .shutdown = io_subchannel_shutdown, | 163 | .shutdown = io_subchannel_shutdown, |
164 | .prepare = io_subchannel_prepare, | ||
151 | }; | 165 | }; |
152 | 166 | ||
153 | struct workqueue_struct *ccw_device_work; | 167 | struct workqueue_struct *ccw_device_work; |
@@ -1775,6 +1789,15 @@ ccw_device_probe_console(void) | |||
1775 | return &console_cdev; | 1789 | return &console_cdev; |
1776 | } | 1790 | } |
1777 | 1791 | ||
1792 | static int ccw_device_pm_restore(struct device *dev); | ||
1793 | |||
1794 | int ccw_device_force_console(void) | ||
1795 | { | ||
1796 | if (!console_cdev_in_use) | ||
1797 | return -ENODEV; | ||
1798 | return ccw_device_pm_restore(&console_cdev.dev); | ||
1799 | } | ||
1800 | EXPORT_SYMBOL_GPL(ccw_device_force_console); | ||
1778 | 1801 | ||
1779 | const char *cio_get_console_cdev_name(struct subchannel *sch) | 1802 | const char *cio_get_console_cdev_name(struct subchannel *sch) |
1780 | { | 1803 | { |
@@ -1895,6 +1918,242 @@ static void ccw_device_shutdown(struct device *dev) | |||
1895 | disable_cmf(cdev); | 1918 | disable_cmf(cdev); |
1896 | } | 1919 | } |
1897 | 1920 | ||
1921 | static int ccw_device_pm_prepare(struct device *dev) | ||
1922 | { | ||
1923 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1924 | |||
1925 | if (work_pending(&cdev->private->kick_work)) | ||
1926 | return -EAGAIN; | ||
1927 | /* Fail while device is being set online/offline. */ | ||
1928 | if (atomic_read(&cdev->private->onoff)) | ||
1929 | return -EAGAIN; | ||
1930 | |||
1931 | if (cdev->online && cdev->drv && cdev->drv->prepare) | ||
1932 | return cdev->drv->prepare(cdev); | ||
1933 | |||
1934 | return 0; | ||
1935 | } | ||
1936 | |||
1937 | static void ccw_device_pm_complete(struct device *dev) | ||
1938 | { | ||
1939 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1940 | |||
1941 | if (cdev->online && cdev->drv && cdev->drv->complete) | ||
1942 | cdev->drv->complete(cdev); | ||
1943 | } | ||
1944 | |||
1945 | static int ccw_device_pm_freeze(struct device *dev) | ||
1946 | { | ||
1947 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1948 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
1949 | int ret, cm_enabled; | ||
1950 | |||
1951 | /* Fail suspend while device is in transistional state. */ | ||
1952 | if (!dev_fsm_final_state(cdev)) | ||
1953 | return -EAGAIN; | ||
1954 | if (!cdev->online) | ||
1955 | return 0; | ||
1956 | if (cdev->drv && cdev->drv->freeze) { | ||
1957 | ret = cdev->drv->freeze(cdev); | ||
1958 | if (ret) | ||
1959 | return ret; | ||
1960 | } | ||
1961 | |||
1962 | spin_lock_irq(sch->lock); | ||
1963 | cm_enabled = cdev->private->cmb != NULL; | ||
1964 | spin_unlock_irq(sch->lock); | ||
1965 | if (cm_enabled) { | ||
1966 | /* Don't have the css write on memory. */ | ||
1967 | ret = ccw_set_cmf(cdev, 0); | ||
1968 | if (ret) | ||
1969 | return ret; | ||
1970 | } | ||
1971 | /* From here on, disallow device driver I/O. */ | ||
1972 | spin_lock_irq(sch->lock); | ||
1973 | ret = cio_disable_subchannel(sch); | ||
1974 | spin_unlock_irq(sch->lock); | ||
1975 | |||
1976 | return ret; | ||
1977 | } | ||
1978 | |||
1979 | static int ccw_device_pm_thaw(struct device *dev) | ||
1980 | { | ||
1981 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1982 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
1983 | int ret, cm_enabled; | ||
1984 | |||
1985 | if (!cdev->online) | ||
1986 | return 0; | ||
1987 | |||
1988 | spin_lock_irq(sch->lock); | ||
1989 | /* Allow device driver I/O again. */ | ||
1990 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); | ||
1991 | cm_enabled = cdev->private->cmb != NULL; | ||
1992 | spin_unlock_irq(sch->lock); | ||
1993 | if (ret) | ||
1994 | return ret; | ||
1995 | |||
1996 | if (cm_enabled) { | ||
1997 | ret = ccw_set_cmf(cdev, 1); | ||
1998 | if (ret) | ||
1999 | return ret; | ||
2000 | } | ||
2001 | |||
2002 | if (cdev->drv && cdev->drv->thaw) | ||
2003 | ret = cdev->drv->thaw(cdev); | ||
2004 | |||
2005 | return ret; | ||
2006 | } | ||
2007 | |||
2008 | static void __ccw_device_pm_restore(struct ccw_device *cdev) | ||
2009 | { | ||
2010 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
2011 | int ret; | ||
2012 | |||
2013 | if (cio_is_console(sch->schid)) | ||
2014 | goto out; | ||
2015 | /* | ||
2016 | * While we were sleeping, devices may have gone or become | ||
2017 | * available again. Kick re-detection. | ||
2018 | */ | ||
2019 | spin_lock_irq(sch->lock); | ||
2020 | cdev->private->flags.resuming = 1; | ||
2021 | ret = ccw_device_recognition(cdev); | ||
2022 | spin_unlock_irq(sch->lock); | ||
2023 | if (ret) { | ||
2024 | CIO_MSG_EVENT(0, "Couldn't start recognition for device " | ||
2025 | "%s (ret=%d)\n", dev_name(&cdev->dev), ret); | ||
2026 | spin_lock_irq(sch->lock); | ||
2027 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
2028 | spin_unlock_irq(sch->lock); | ||
2029 | /* notify driver after the resume cb */ | ||
2030 | goto out; | ||
2031 | } | ||
2032 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || | ||
2033 | cdev->private->state == DEV_STATE_DISCONNECTED); | ||
2034 | |||
2035 | out: | ||
2036 | cdev->private->flags.resuming = 0; | ||
2037 | } | ||
2038 | |||
2039 | static int resume_handle_boxed(struct ccw_device *cdev) | ||
2040 | { | ||
2041 | cdev->private->state = DEV_STATE_BOXED; | ||
2042 | if (ccw_device_notify(cdev, CIO_BOXED)) | ||
2043 | return 0; | ||
2044 | ccw_device_schedule_sch_unregister(cdev); | ||
2045 | return -ENODEV; | ||
2046 | } | ||
2047 | |||
2048 | static int resume_handle_disc(struct ccw_device *cdev) | ||
2049 | { | ||
2050 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
2051 | if (ccw_device_notify(cdev, CIO_GONE)) | ||
2052 | return 0; | ||
2053 | ccw_device_schedule_sch_unregister(cdev); | ||
2054 | return -ENODEV; | ||
2055 | } | ||
2056 | |||
2057 | static int ccw_device_pm_restore(struct device *dev) | ||
2058 | { | ||
2059 | struct ccw_device *cdev = to_ccwdev(dev); | ||
2060 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
2061 | int ret = 0, cm_enabled; | ||
2062 | |||
2063 | __ccw_device_pm_restore(cdev); | ||
2064 | spin_lock_irq(sch->lock); | ||
2065 | if (cio_is_console(sch->schid)) { | ||
2066 | cio_enable_subchannel(sch, (u32)(addr_t)sch); | ||
2067 | spin_unlock_irq(sch->lock); | ||
2068 | goto out_restore; | ||
2069 | } | ||
2070 | cdev->private->flags.donotify = 0; | ||
2071 | /* check recognition results */ | ||
2072 | switch (cdev->private->state) { | ||
2073 | case DEV_STATE_OFFLINE: | ||
2074 | break; | ||
2075 | case DEV_STATE_BOXED: | ||
2076 | ret = resume_handle_boxed(cdev); | ||
2077 | spin_unlock_irq(sch->lock); | ||
2078 | if (ret) | ||
2079 | goto out; | ||
2080 | goto out_restore; | ||
2081 | case DEV_STATE_DISCONNECTED: | ||
2082 | goto out_disc_unlock; | ||
2083 | default: | ||
2084 | goto out_unreg_unlock; | ||
2085 | } | ||
2086 | /* check if the device id has changed */ | ||
2087 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | ||
2088 | CIO_MSG_EVENT(0, "resume: sch %s: failed (devno changed from " | ||
2089 | "%04x to %04x)\n", dev_name(&sch->dev), | ||
2090 | cdev->private->dev_id.devno, | ||
2091 | sch->schib.pmcw.dev); | ||
2092 | goto out_unreg_unlock; | ||
2093 | } | ||
2094 | /* check if the device type has changed */ | ||
2095 | if (!ccw_device_test_sense_data(cdev)) { | ||
2096 | ccw_device_update_sense_data(cdev); | ||
2097 | PREPARE_WORK(&cdev->private->kick_work, | ||
2098 | ccw_device_do_unbind_bind); | ||
2099 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
2100 | ret = -ENODEV; | ||
2101 | goto out_unlock; | ||
2102 | } | ||
2103 | if (!cdev->online) { | ||
2104 | ret = 0; | ||
2105 | goto out_unlock; | ||
2106 | } | ||
2107 | ret = ccw_device_online(cdev); | ||
2108 | if (ret) | ||
2109 | goto out_disc_unlock; | ||
2110 | |||
2111 | cm_enabled = cdev->private->cmb != NULL; | ||
2112 | spin_unlock_irq(sch->lock); | ||
2113 | |||
2114 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | ||
2115 | if (cdev->private->state != DEV_STATE_ONLINE) { | ||
2116 | spin_lock_irq(sch->lock); | ||
2117 | goto out_disc_unlock; | ||
2118 | } | ||
2119 | if (cm_enabled) { | ||
2120 | ret = ccw_set_cmf(cdev, 1); | ||
2121 | if (ret) { | ||
2122 | CIO_MSG_EVENT(2, "resume: cdev %s: cmf failed " | ||
2123 | "(rc=%d)\n", dev_name(&cdev->dev), ret); | ||
2124 | ret = 0; | ||
2125 | } | ||
2126 | } | ||
2127 | |||
2128 | out_restore: | ||
2129 | if (cdev->online && cdev->drv && cdev->drv->restore) | ||
2130 | ret = cdev->drv->restore(cdev); | ||
2131 | out: | ||
2132 | return ret; | ||
2133 | |||
2134 | out_disc_unlock: | ||
2135 | ret = resume_handle_disc(cdev); | ||
2136 | spin_unlock_irq(sch->lock); | ||
2137 | if (ret) | ||
2138 | return ret; | ||
2139 | goto out_restore; | ||
2140 | |||
2141 | out_unreg_unlock: | ||
2142 | ccw_device_schedule_sch_unregister(cdev); | ||
2143 | ret = -ENODEV; | ||
2144 | out_unlock: | ||
2145 | spin_unlock_irq(sch->lock); | ||
2146 | return ret; | ||
2147 | } | ||
2148 | |||
2149 | static struct dev_pm_ops ccw_pm_ops = { | ||
2150 | .prepare = ccw_device_pm_prepare, | ||
2151 | .complete = ccw_device_pm_complete, | ||
2152 | .freeze = ccw_device_pm_freeze, | ||
2153 | .thaw = ccw_device_pm_thaw, | ||
2154 | .restore = ccw_device_pm_restore, | ||
2155 | }; | ||
2156 | |||
1898 | struct bus_type ccw_bus_type = { | 2157 | struct bus_type ccw_bus_type = { |
1899 | .name = "ccw", | 2158 | .name = "ccw", |
1900 | .match = ccw_bus_match, | 2159 | .match = ccw_bus_match, |
@@ -1902,6 +2161,7 @@ struct bus_type ccw_bus_type = { | |||
1902 | .probe = ccw_device_probe, | 2161 | .probe = ccw_device_probe, |
1903 | .remove = ccw_device_remove, | 2162 | .remove = ccw_device_remove, |
1904 | .shutdown = ccw_device_shutdown, | 2163 | .shutdown = ccw_device_shutdown, |
2164 | .pm = &ccw_pm_ops, | ||
1905 | }; | 2165 | }; |
1906 | 2166 | ||
1907 | /** | 2167 | /** |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index f1cbbd94ad4e..e3975107a578 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -87,6 +87,8 @@ int ccw_device_is_orphan(struct ccw_device *); | |||
87 | int ccw_device_recognition(struct ccw_device *); | 87 | int ccw_device_recognition(struct ccw_device *); |
88 | int ccw_device_online(struct ccw_device *); | 88 | int ccw_device_online(struct ccw_device *); |
89 | int ccw_device_offline(struct ccw_device *); | 89 | int ccw_device_offline(struct ccw_device *); |
90 | void ccw_device_update_sense_data(struct ccw_device *); | ||
91 | int ccw_device_test_sense_data(struct ccw_device *); | ||
90 | void ccw_device_schedule_sch_unregister(struct ccw_device *); | 92 | void ccw_device_schedule_sch_unregister(struct ccw_device *); |
91 | int ccw_purge_blacklisted(void); | 93 | int ccw_purge_blacklisted(void); |
92 | 94 | ||
@@ -133,5 +135,6 @@ extern struct bus_type ccw_bus_type; | |||
133 | void retry_set_schib(struct ccw_device *cdev); | 135 | void retry_set_schib(struct ccw_device *cdev); |
134 | void cmf_retry_copy_block(struct ccw_device *); | 136 | void cmf_retry_copy_block(struct ccw_device *); |
135 | int cmf_reenable(struct ccw_device *); | 137 | int cmf_reenable(struct ccw_device *); |
138 | int ccw_set_cmf(struct ccw_device *cdev, int enable); | ||
136 | extern struct device_attribute dev_attr_cmb_enable; | 139 | extern struct device_attribute dev_attr_cmb_enable; |
137 | #endif | 140 | #endif |
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index e46049261561..3db88c52d287 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -177,29 +177,21 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
177 | panic("Can't stop i/o on subchannel.\n"); | 177 | panic("Can't stop i/o on subchannel.\n"); |
178 | } | 178 | } |
179 | 179 | ||
180 | static int | 180 | void ccw_device_update_sense_data(struct ccw_device *cdev) |
181 | ccw_device_handle_oper(struct ccw_device *cdev) | ||
182 | { | 181 | { |
183 | struct subchannel *sch; | 182 | memset(&cdev->id, 0, sizeof(cdev->id)); |
183 | cdev->id.cu_type = cdev->private->senseid.cu_type; | ||
184 | cdev->id.cu_model = cdev->private->senseid.cu_model; | ||
185 | cdev->id.dev_type = cdev->private->senseid.dev_type; | ||
186 | cdev->id.dev_model = cdev->private->senseid.dev_model; | ||
187 | } | ||
184 | 188 | ||
185 | sch = to_subchannel(cdev->dev.parent); | 189 | int ccw_device_test_sense_data(struct ccw_device *cdev) |
186 | cdev->private->flags.recog_done = 1; | 190 | { |
187 | /* | 191 | return cdev->id.cu_type == cdev->private->senseid.cu_type && |
188 | * Check if cu type and device type still match. If | 192 | cdev->id.cu_model == cdev->private->senseid.cu_model && |
189 | * not, it is certainly another device and we have to | 193 | cdev->id.dev_type == cdev->private->senseid.dev_type && |
190 | * de- and re-register. | 194 | cdev->id.dev_model == cdev->private->senseid.dev_model; |
191 | */ | ||
192 | if (cdev->id.cu_type != cdev->private->senseid.cu_type || | ||
193 | cdev->id.cu_model != cdev->private->senseid.cu_model || | ||
194 | cdev->id.dev_type != cdev->private->senseid.dev_type || | ||
195 | cdev->id.dev_model != cdev->private->senseid.dev_model) { | ||
196 | PREPARE_WORK(&cdev->private->kick_work, | ||
197 | ccw_device_do_unbind_bind); | ||
198 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
199 | return 0; | ||
200 | } | ||
201 | cdev->private->flags.donotify = 1; | ||
202 | return 1; | ||
203 | } | 195 | } |
204 | 196 | ||
205 | /* | 197 | /* |
@@ -233,7 +225,7 @@ static void | |||
233 | ccw_device_recog_done(struct ccw_device *cdev, int state) | 225 | ccw_device_recog_done(struct ccw_device *cdev, int state) |
234 | { | 226 | { |
235 | struct subchannel *sch; | 227 | struct subchannel *sch; |
236 | int notify, old_lpm, same_dev; | 228 | int old_lpm; |
237 | 229 | ||
238 | sch = to_subchannel(cdev->dev.parent); | 230 | sch = to_subchannel(cdev->dev.parent); |
239 | 231 | ||
@@ -263,8 +255,12 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
263 | wake_up(&cdev->private->wait_q); | 255 | wake_up(&cdev->private->wait_q); |
264 | return; | 256 | return; |
265 | } | 257 | } |
266 | notify = 0; | 258 | if (cdev->private->flags.resuming) { |
267 | same_dev = 0; /* Keep the compiler quiet... */ | 259 | cdev->private->state = state; |
260 | cdev->private->flags.recog_done = 1; | ||
261 | wake_up(&cdev->private->wait_q); | ||
262 | return; | ||
263 | } | ||
268 | switch (state) { | 264 | switch (state) { |
269 | case DEV_STATE_NOT_OPER: | 265 | case DEV_STATE_NOT_OPER: |
270 | CIO_MSG_EVENT(2, "SenseID : unknown device %04x on " | 266 | CIO_MSG_EVENT(2, "SenseID : unknown device %04x on " |
@@ -273,34 +269,31 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
273 | sch->schid.ssid, sch->schid.sch_no); | 269 | sch->schid.ssid, sch->schid.sch_no); |
274 | break; | 270 | break; |
275 | case DEV_STATE_OFFLINE: | 271 | case DEV_STATE_OFFLINE: |
276 | if (cdev->online) { | 272 | if (!cdev->online) { |
277 | same_dev = ccw_device_handle_oper(cdev); | 273 | ccw_device_update_sense_data(cdev); |
278 | notify = 1; | 274 | /* Issue device info message. */ |
275 | CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: " | ||
276 | "CU Type/Mod = %04X/%02X, Dev Type/Mod " | ||
277 | "= %04X/%02X\n", | ||
278 | cdev->private->dev_id.ssid, | ||
279 | cdev->private->dev_id.devno, | ||
280 | cdev->id.cu_type, cdev->id.cu_model, | ||
281 | cdev->id.dev_type, cdev->id.dev_model); | ||
282 | break; | ||
279 | } | 283 | } |
280 | /* fill out sense information */ | 284 | cdev->private->state = DEV_STATE_OFFLINE; |
281 | memset(&cdev->id, 0, sizeof(cdev->id)); | 285 | cdev->private->flags.recog_done = 1; |
282 | cdev->id.cu_type = cdev->private->senseid.cu_type; | 286 | if (ccw_device_test_sense_data(cdev)) { |
283 | cdev->id.cu_model = cdev->private->senseid.cu_model; | 287 | cdev->private->flags.donotify = 1; |
284 | cdev->id.dev_type = cdev->private->senseid.dev_type; | 288 | ccw_device_online(cdev); |
285 | cdev->id.dev_model = cdev->private->senseid.dev_model; | 289 | wake_up(&cdev->private->wait_q); |
286 | if (notify) { | 290 | } else { |
287 | cdev->private->state = DEV_STATE_OFFLINE; | 291 | ccw_device_update_sense_data(cdev); |
288 | if (same_dev) { | 292 | PREPARE_WORK(&cdev->private->kick_work, |
289 | /* Get device online again. */ | 293 | ccw_device_do_unbind_bind); |
290 | ccw_device_online(cdev); | 294 | queue_work(ccw_device_work, &cdev->private->kick_work); |
291 | wake_up(&cdev->private->wait_q); | ||
292 | } | ||
293 | return; | ||
294 | } | 295 | } |
295 | /* Issue device info message. */ | 296 | return; |
296 | CIO_MSG_EVENT(4, "SenseID : device 0.%x.%04x reports: " | ||
297 | "CU Type/Mod = %04X/%02X, Dev Type/Mod = " | ||
298 | "%04X/%02X\n", | ||
299 | cdev->private->dev_id.ssid, | ||
300 | cdev->private->dev_id.devno, | ||
301 | cdev->id.cu_type, cdev->id.cu_model, | ||
302 | cdev->id.dev_type, cdev->id.dev_model); | ||
303 | break; | ||
304 | case DEV_STATE_BOXED: | 297 | case DEV_STATE_BOXED: |
305 | CIO_MSG_EVENT(0, "SenseID : boxed device %04x on " | 298 | CIO_MSG_EVENT(0, "SenseID : boxed device %04x on " |
306 | " subchannel 0.%x.%04x\n", | 299 | " subchannel 0.%x.%04x\n", |
@@ -502,9 +495,6 @@ ccw_device_recognition(struct ccw_device *cdev) | |||
502 | struct subchannel *sch; | 495 | struct subchannel *sch; |
503 | int ret; | 496 | int ret; |
504 | 497 | ||
505 | if ((cdev->private->state != DEV_STATE_NOT_OPER) && | ||
506 | (cdev->private->state != DEV_STATE_BOXED)) | ||
507 | return -EINVAL; | ||
508 | sch = to_subchannel(cdev->dev.parent); | 498 | sch = to_subchannel(cdev->dev.parent); |
509 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); | 499 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); |
510 | if (ret != 0) | 500 | if (ret != 0) |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index bf0a24af39a0..2d0efee8a290 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -1,10 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/device_ops.c | 2 | * Copyright IBM Corp. 2002, 2009 |
3 | * | 3 | * |
4 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 4 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
5 | * IBM Corporation | 5 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | ||
8 | */ | 6 | */ |
9 | #include <linux/module.h> | 7 | #include <linux/module.h> |
10 | #include <linux/init.h> | 8 | #include <linux/init.h> |
@@ -116,12 +114,15 @@ int ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | |||
116 | 114 | ||
117 | if (!cdev || !cdev->dev.parent) | 115 | if (!cdev || !cdev->dev.parent) |
118 | return -ENODEV; | 116 | return -ENODEV; |
117 | sch = to_subchannel(cdev->dev.parent); | ||
118 | if (!sch->schib.pmcw.ena) | ||
119 | return -EINVAL; | ||
119 | if (cdev->private->state == DEV_STATE_NOT_OPER) | 120 | if (cdev->private->state == DEV_STATE_NOT_OPER) |
120 | return -ENODEV; | 121 | return -ENODEV; |
121 | if (cdev->private->state != DEV_STATE_ONLINE && | 122 | if (cdev->private->state != DEV_STATE_ONLINE && |
122 | cdev->private->state != DEV_STATE_W4SENSE) | 123 | cdev->private->state != DEV_STATE_W4SENSE) |
123 | return -EINVAL; | 124 | return -EINVAL; |
124 | sch = to_subchannel(cdev->dev.parent); | 125 | |
125 | ret = cio_clear(sch); | 126 | ret = cio_clear(sch); |
126 | if (ret == 0) | 127 | if (ret == 0) |
127 | cdev->private->intparm = intparm; | 128 | cdev->private->intparm = intparm; |
@@ -162,6 +163,8 @@ int ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | |||
162 | if (!cdev || !cdev->dev.parent) | 163 | if (!cdev || !cdev->dev.parent) |
163 | return -ENODEV; | 164 | return -ENODEV; |
164 | sch = to_subchannel(cdev->dev.parent); | 165 | sch = to_subchannel(cdev->dev.parent); |
166 | if (!sch->schib.pmcw.ena) | ||
167 | return -EINVAL; | ||
165 | if (cdev->private->state == DEV_STATE_NOT_OPER) | 168 | if (cdev->private->state == DEV_STATE_NOT_OPER) |
166 | return -ENODEV; | 169 | return -ENODEV; |
167 | if (cdev->private->state == DEV_STATE_VERIFY || | 170 | if (cdev->private->state == DEV_STATE_VERIFY || |
@@ -337,12 +340,15 @@ int ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) | |||
337 | 340 | ||
338 | if (!cdev || !cdev->dev.parent) | 341 | if (!cdev || !cdev->dev.parent) |
339 | return -ENODEV; | 342 | return -ENODEV; |
343 | sch = to_subchannel(cdev->dev.parent); | ||
344 | if (!sch->schib.pmcw.ena) | ||
345 | return -EINVAL; | ||
340 | if (cdev->private->state == DEV_STATE_NOT_OPER) | 346 | if (cdev->private->state == DEV_STATE_NOT_OPER) |
341 | return -ENODEV; | 347 | return -ENODEV; |
342 | if (cdev->private->state != DEV_STATE_ONLINE && | 348 | if (cdev->private->state != DEV_STATE_ONLINE && |
343 | cdev->private->state != DEV_STATE_W4SENSE) | 349 | cdev->private->state != DEV_STATE_W4SENSE) |
344 | return -EINVAL; | 350 | return -EINVAL; |
345 | sch = to_subchannel(cdev->dev.parent); | 351 | |
346 | ret = cio_halt(sch); | 352 | ret = cio_halt(sch); |
347 | if (ret == 0) | 353 | if (ret == 0) |
348 | cdev->private->intparm = intparm; | 354 | cdev->private->intparm = intparm; |
@@ -369,6 +375,8 @@ int ccw_device_resume(struct ccw_device *cdev) | |||
369 | if (!cdev || !cdev->dev.parent) | 375 | if (!cdev || !cdev->dev.parent) |
370 | return -ENODEV; | 376 | return -ENODEV; |
371 | sch = to_subchannel(cdev->dev.parent); | 377 | sch = to_subchannel(cdev->dev.parent); |
378 | if (!sch->schib.pmcw.ena) | ||
379 | return -EINVAL; | ||
372 | if (cdev->private->state == DEV_STATE_NOT_OPER) | 380 | if (cdev->private->state == DEV_STATE_NOT_OPER) |
373 | return -ENODEV; | 381 | return -ENODEV; |
374 | if (cdev->private->state != DEV_STATE_ONLINE || | 382 | if (cdev->private->state != DEV_STATE_ONLINE || |
@@ -580,6 +588,8 @@ int ccw_device_tm_start_key(struct ccw_device *cdev, struct tcw *tcw, | |||
580 | int rc; | 588 | int rc; |
581 | 589 | ||
582 | sch = to_subchannel(cdev->dev.parent); | 590 | sch = to_subchannel(cdev->dev.parent); |
591 | if (!sch->schib.pmcw.ena) | ||
592 | return -EINVAL; | ||
583 | if (cdev->private->state != DEV_STATE_ONLINE) | 593 | if (cdev->private->state != DEV_STATE_ONLINE) |
584 | return -EIO; | 594 | return -EIO; |
585 | /* Adjust requested path mask to excluded varied off paths. */ | 595 | /* Adjust requested path mask to excluded varied off paths. */ |
@@ -669,6 +679,8 @@ int ccw_device_tm_intrg(struct ccw_device *cdev) | |||
669 | { | 679 | { |
670 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | 680 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
671 | 681 | ||
682 | if (!sch->schib.pmcw.ena) | ||
683 | return -EINVAL; | ||
672 | if (cdev->private->state != DEV_STATE_ONLINE) | 684 | if (cdev->private->state != DEV_STATE_ONLINE) |
673 | return -EIO; | 685 | return -EIO; |
674 | if (!scsw_is_tm(&sch->schib.scsw) || | 686 | if (!scsw_is_tm(&sch->schib.scsw) || |
diff --git a/drivers/s390/cio/io_sch.h b/drivers/s390/cio/io_sch.h index c4f3e7c9a854..0b8f381bd20e 100644 --- a/drivers/s390/cio/io_sch.h +++ b/drivers/s390/cio/io_sch.h | |||
@@ -107,6 +107,7 @@ struct ccw_device_private { | |||
107 | unsigned int recog_done:1; /* dev. recog. complete */ | 107 | unsigned int recog_done:1; /* dev. recog. complete */ |
108 | unsigned int fake_irb:1; /* deliver faked irb */ | 108 | unsigned int fake_irb:1; /* deliver faked irb */ |
109 | unsigned int intretry:1; /* retry internal operation */ | 109 | unsigned int intretry:1; /* retry internal operation */ |
110 | unsigned int resuming:1; /* recognition while resume */ | ||
110 | } __attribute__((packed)) flags; | 111 | } __attribute__((packed)) flags; |
111 | unsigned long intparm; /* user interruption parameter */ | 112 | unsigned long intparm; /* user interruption parameter */ |
112 | struct qdio_irq *qdio_data; | 113 | struct qdio_irq *qdio_data; |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 7b6f46ddf3c3..d40f7a934f94 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -3,12 +3,12 @@ | |||
3 | * ESCON CLAW network driver | 3 | * ESCON CLAW network driver |
4 | * | 4 | * |
5 | * Linux for zSeries version | 5 | * Linux for zSeries version |
6 | * Copyright (C) 2002,2005 IBM Corporation | 6 | * Copyright IBM Corp. 2002, 2009 |
7 | * Author(s) Original code written by: | 7 | * Author(s) Original code written by: |
8 | * Kazuo Iimura (iimura@jp.ibm.com) | 8 | * Kazuo Iimura <iimura@jp.ibm.com> |
9 | * Rewritten by | 9 | * Rewritten by |
10 | * Andy Richter (richtera@us.ibm.com) | 10 | * Andy Richter <richtera@us.ibm.com> |
11 | * Marc Price (mwprice@us.ibm.com) | 11 | * Marc Price <mwprice@us.ibm.com> |
12 | * | 12 | * |
13 | * sysfs parms: | 13 | * sysfs parms: |
14 | * group x.x.rrrr,x.x.wwww | 14 | * group x.x.rrrr,x.x.wwww |
@@ -253,6 +253,11 @@ static void claw_free_wrt_buf(struct net_device *dev); | |||
253 | /* Functions for unpack reads */ | 253 | /* Functions for unpack reads */ |
254 | static void unpack_read(struct net_device *dev); | 254 | static void unpack_read(struct net_device *dev); |
255 | 255 | ||
256 | static int claw_pm_prepare(struct ccwgroup_device *gdev) | ||
257 | { | ||
258 | return -EPERM; | ||
259 | } | ||
260 | |||
256 | /* ccwgroup table */ | 261 | /* ccwgroup table */ |
257 | 262 | ||
258 | static struct ccwgroup_driver claw_group_driver = { | 263 | static struct ccwgroup_driver claw_group_driver = { |
@@ -264,6 +269,7 @@ static struct ccwgroup_driver claw_group_driver = { | |||
264 | .remove = claw_remove_device, | 269 | .remove = claw_remove_device, |
265 | .set_online = claw_new_device, | 270 | .set_online = claw_new_device, |
266 | .set_offline = claw_shutdown_device, | 271 | .set_offline = claw_shutdown_device, |
272 | .prepare = claw_pm_prepare, | ||
267 | }; | 273 | }; |
268 | 274 | ||
269 | /* | 275 | /* |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index 54c4649a493b..222e47394437 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/net/ctcm_main.c | 2 | * drivers/s390/net/ctcm_main.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2001, 2007 | 4 | * Copyright IBM Corp. 2001, 2009 |
5 | * Author(s): | 5 | * Author(s): |
6 | * Original CTC driver(s): | 6 | * Original CTC driver(s): |
7 | * Fritz Elfert (felfert@millenux.com) | 7 | * Fritz Elfert (felfert@millenux.com) |
@@ -1688,6 +1688,38 @@ static void ctcm_remove_device(struct ccwgroup_device *cgdev) | |||
1688 | put_device(&cgdev->dev); | 1688 | put_device(&cgdev->dev); |
1689 | } | 1689 | } |
1690 | 1690 | ||
1691 | static int ctcm_pm_suspend(struct ccwgroup_device *gdev) | ||
1692 | { | ||
1693 | struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); | ||
1694 | |||
1695 | if (gdev->state == CCWGROUP_OFFLINE) | ||
1696 | return 0; | ||
1697 | netif_device_detach(priv->channel[READ]->netdev); | ||
1698 | ctcm_close(priv->channel[READ]->netdev); | ||
1699 | ccw_device_set_offline(gdev->cdev[1]); | ||
1700 | ccw_device_set_offline(gdev->cdev[0]); | ||
1701 | return 0; | ||
1702 | } | ||
1703 | |||
1704 | static int ctcm_pm_resume(struct ccwgroup_device *gdev) | ||
1705 | { | ||
1706 | struct ctcm_priv *priv = dev_get_drvdata(&gdev->dev); | ||
1707 | int rc; | ||
1708 | |||
1709 | if (gdev->state == CCWGROUP_OFFLINE) | ||
1710 | return 0; | ||
1711 | rc = ccw_device_set_online(gdev->cdev[1]); | ||
1712 | if (rc) | ||
1713 | goto err_out; | ||
1714 | rc = ccw_device_set_online(gdev->cdev[0]); | ||
1715 | if (rc) | ||
1716 | goto err_out; | ||
1717 | ctcm_open(priv->channel[READ]->netdev); | ||
1718 | err_out: | ||
1719 | netif_device_attach(priv->channel[READ]->netdev); | ||
1720 | return rc; | ||
1721 | } | ||
1722 | |||
1691 | static struct ccwgroup_driver ctcm_group_driver = { | 1723 | static struct ccwgroup_driver ctcm_group_driver = { |
1692 | .owner = THIS_MODULE, | 1724 | .owner = THIS_MODULE, |
1693 | .name = CTC_DRIVER_NAME, | 1725 | .name = CTC_DRIVER_NAME, |
@@ -1697,6 +1729,9 @@ static struct ccwgroup_driver ctcm_group_driver = { | |||
1697 | .remove = ctcm_remove_device, | 1729 | .remove = ctcm_remove_device, |
1698 | .set_online = ctcm_new_device, | 1730 | .set_online = ctcm_new_device, |
1699 | .set_offline = ctcm_shutdown_device, | 1731 | .set_offline = ctcm_shutdown_device, |
1732 | .freeze = ctcm_pm_suspend, | ||
1733 | .thaw = ctcm_pm_resume, | ||
1734 | .restore = ctcm_pm_resume, | ||
1700 | }; | 1735 | }; |
1701 | 1736 | ||
1702 | 1737 | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index a45bc24eb5f9..07a25c3f94b6 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -1,15 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * linux/drivers/s390/net/lcs.c | ||
3 | * | ||
4 | * Linux for S/390 Lan Channel Station Network Driver | 2 | * Linux for S/390 Lan Channel Station Network Driver |
5 | * | 3 | * |
6 | * Copyright (C) 1999-2001 IBM Deutschland Entwicklung GmbH, | 4 | * Copyright IBM Corp. 1999, 2009 |
7 | * IBM Corporation | 5 | * Author(s): Original Code written by |
8 | * Author(s): Original Code written by | 6 | * DJ Barrow <djbarrow@de.ibm.com,barrow_dj@yahoo.com> |
9 | * DJ Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com) | 7 | * Rewritten by |
10 | * Rewritten by | 8 | * Frank Pavlic <fpavlic@de.ibm.com> and |
11 | * Frank Pavlic (fpavlic@de.ibm.com) and | 9 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
12 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
13 | * | 10 | * |
14 | * This program is free software; you can redistribute it and/or modify | 11 | * This program is free software; you can redistribute it and/or modify |
15 | * it under the terms of the GNU General Public License as published by | 12 | * it under the terms of the GNU General Public License as published by |
@@ -2313,6 +2310,60 @@ lcs_remove_device(struct ccwgroup_device *ccwgdev) | |||
2313 | put_device(&ccwgdev->dev); | 2310 | put_device(&ccwgdev->dev); |
2314 | } | 2311 | } |
2315 | 2312 | ||
2313 | static int lcs_pm_suspend(struct lcs_card *card) | ||
2314 | { | ||
2315 | if (card->dev) | ||
2316 | netif_device_detach(card->dev); | ||
2317 | lcs_set_allowed_threads(card, 0); | ||
2318 | lcs_wait_for_threads(card, 0xffffffff); | ||
2319 | if (card->state != DEV_STATE_DOWN) | ||
2320 | __lcs_shutdown_device(card->gdev, 1); | ||
2321 | return 0; | ||
2322 | } | ||
2323 | |||
2324 | static int lcs_pm_resume(struct lcs_card *card) | ||
2325 | { | ||
2326 | int rc = 0; | ||
2327 | |||
2328 | if (card->state == DEV_STATE_RECOVER) | ||
2329 | rc = lcs_new_device(card->gdev); | ||
2330 | if (card->dev) | ||
2331 | netif_device_attach(card->dev); | ||
2332 | if (rc) { | ||
2333 | dev_warn(&card->gdev->dev, "The lcs device driver " | ||
2334 | "failed to recover the device\n"); | ||
2335 | } | ||
2336 | return rc; | ||
2337 | } | ||
2338 | |||
2339 | static int lcs_prepare(struct ccwgroup_device *gdev) | ||
2340 | { | ||
2341 | return 0; | ||
2342 | } | ||
2343 | |||
2344 | static void lcs_complete(struct ccwgroup_device *gdev) | ||
2345 | { | ||
2346 | return; | ||
2347 | } | ||
2348 | |||
2349 | static int lcs_freeze(struct ccwgroup_device *gdev) | ||
2350 | { | ||
2351 | struct lcs_card *card = dev_get_drvdata(&gdev->dev); | ||
2352 | return lcs_pm_suspend(card); | ||
2353 | } | ||
2354 | |||
2355 | static int lcs_thaw(struct ccwgroup_device *gdev) | ||
2356 | { | ||
2357 | struct lcs_card *card = dev_get_drvdata(&gdev->dev); | ||
2358 | return lcs_pm_resume(card); | ||
2359 | } | ||
2360 | |||
2361 | static int lcs_restore(struct ccwgroup_device *gdev) | ||
2362 | { | ||
2363 | struct lcs_card *card = dev_get_drvdata(&gdev->dev); | ||
2364 | return lcs_pm_resume(card); | ||
2365 | } | ||
2366 | |||
2316 | /** | 2367 | /** |
2317 | * LCS ccwgroup driver registration | 2368 | * LCS ccwgroup driver registration |
2318 | */ | 2369 | */ |
@@ -2325,6 +2376,11 @@ static struct ccwgroup_driver lcs_group_driver = { | |||
2325 | .remove = lcs_remove_device, | 2376 | .remove = lcs_remove_device, |
2326 | .set_online = lcs_new_device, | 2377 | .set_online = lcs_new_device, |
2327 | .set_offline = lcs_shutdown_device, | 2378 | .set_offline = lcs_shutdown_device, |
2379 | .prepare = lcs_prepare, | ||
2380 | .complete = lcs_complete, | ||
2381 | .freeze = lcs_freeze, | ||
2382 | .thaw = lcs_thaw, | ||
2383 | .restore = lcs_restore, | ||
2328 | }; | 2384 | }; |
2329 | 2385 | ||
2330 | /** | 2386 | /** |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index aec9e5d3cf4b..fdb02d043d3e 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1,11 +1,15 @@ | |||
1 | /* | 1 | /* |
2 | * IUCV network driver | 2 | * IUCV network driver |
3 | * | 3 | * |
4 | * Copyright 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright IBM Corp. 2001, 2009 |
5 | * Author(s): Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | ||
6 | * | 5 | * |
7 | * Sysfs integration and all bugs therein by Cornelia Huck | 6 | * Author(s): |
8 | * (cornelia.huck@de.ibm.com) | 7 | * Original netiucv driver: |
8 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | ||
9 | * Sysfs integration and all bugs therein: | ||
10 | * Cornelia Huck (cornelia.huck@de.ibm.com) | ||
11 | * PM functions: | ||
12 | * Ursula Braun (ursula.braun@de.ibm.com) | ||
9 | * | 13 | * |
10 | * Documentation used: | 14 | * Documentation used: |
11 | * the source of the original IUCV driver by: | 15 | * the source of the original IUCV driver by: |
@@ -149,10 +153,27 @@ PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | |||
149 | 153 | ||
150 | #define PRINTK_HEADER " iucv: " /* for debugging */ | 154 | #define PRINTK_HEADER " iucv: " /* for debugging */ |
151 | 155 | ||
156 | /* dummy device to make sure netiucv_pm functions are called */ | ||
157 | static struct device *netiucv_dev; | ||
158 | |||
159 | static int netiucv_pm_prepare(struct device *); | ||
160 | static void netiucv_pm_complete(struct device *); | ||
161 | static int netiucv_pm_freeze(struct device *); | ||
162 | static int netiucv_pm_restore_thaw(struct device *); | ||
163 | |||
164 | static struct dev_pm_ops netiucv_pm_ops = { | ||
165 | .prepare = netiucv_pm_prepare, | ||
166 | .complete = netiucv_pm_complete, | ||
167 | .freeze = netiucv_pm_freeze, | ||
168 | .thaw = netiucv_pm_restore_thaw, | ||
169 | .restore = netiucv_pm_restore_thaw, | ||
170 | }; | ||
171 | |||
152 | static struct device_driver netiucv_driver = { | 172 | static struct device_driver netiucv_driver = { |
153 | .owner = THIS_MODULE, | 173 | .owner = THIS_MODULE, |
154 | .name = "netiucv", | 174 | .name = "netiucv", |
155 | .bus = &iucv_bus, | 175 | .bus = &iucv_bus, |
176 | .pm = &netiucv_pm_ops, | ||
156 | }; | 177 | }; |
157 | 178 | ||
158 | static int netiucv_callback_connreq(struct iucv_path *, | 179 | static int netiucv_callback_connreq(struct iucv_path *, |
@@ -233,6 +254,7 @@ struct netiucv_priv { | |||
233 | fsm_instance *fsm; | 254 | fsm_instance *fsm; |
234 | struct iucv_connection *conn; | 255 | struct iucv_connection *conn; |
235 | struct device *dev; | 256 | struct device *dev; |
257 | int pm_state; | ||
236 | }; | 258 | }; |
237 | 259 | ||
238 | /** | 260 | /** |
@@ -1265,6 +1287,72 @@ static int netiucv_close(struct net_device *dev) | |||
1265 | return 0; | 1287 | return 0; |
1266 | } | 1288 | } |
1267 | 1289 | ||
1290 | static int netiucv_pm_prepare(struct device *dev) | ||
1291 | { | ||
1292 | IUCV_DBF_TEXT(trace, 3, __func__); | ||
1293 | return 0; | ||
1294 | } | ||
1295 | |||
1296 | static void netiucv_pm_complete(struct device *dev) | ||
1297 | { | ||
1298 | IUCV_DBF_TEXT(trace, 3, __func__); | ||
1299 | return; | ||
1300 | } | ||
1301 | |||
1302 | /** | ||
1303 | * netiucv_pm_freeze() - Freeze PM callback | ||
1304 | * @dev: netiucv device | ||
1305 | * | ||
1306 | * close open netiucv interfaces | ||
1307 | */ | ||
1308 | static int netiucv_pm_freeze(struct device *dev) | ||
1309 | { | ||
1310 | struct netiucv_priv *priv = dev->driver_data; | ||
1311 | struct net_device *ndev = NULL; | ||
1312 | int rc = 0; | ||
1313 | |||
1314 | IUCV_DBF_TEXT(trace, 3, __func__); | ||
1315 | if (priv && priv->conn) | ||
1316 | ndev = priv->conn->netdev; | ||
1317 | if (!ndev) | ||
1318 | goto out; | ||
1319 | netif_device_detach(ndev); | ||
1320 | priv->pm_state = fsm_getstate(priv->fsm); | ||
1321 | rc = netiucv_close(ndev); | ||
1322 | out: | ||
1323 | return rc; | ||
1324 | } | ||
1325 | |||
1326 | /** | ||
1327 | * netiucv_pm_restore_thaw() - Thaw and restore PM callback | ||
1328 | * @dev: netiucv device | ||
1329 | * | ||
1330 | * re-open netiucv interfaces closed during freeze | ||
1331 | */ | ||
1332 | static int netiucv_pm_restore_thaw(struct device *dev) | ||
1333 | { | ||
1334 | struct netiucv_priv *priv = dev->driver_data; | ||
1335 | struct net_device *ndev = NULL; | ||
1336 | int rc = 0; | ||
1337 | |||
1338 | IUCV_DBF_TEXT(trace, 3, __func__); | ||
1339 | if (priv && priv->conn) | ||
1340 | ndev = priv->conn->netdev; | ||
1341 | if (!ndev) | ||
1342 | goto out; | ||
1343 | switch (priv->pm_state) { | ||
1344 | case DEV_STATE_RUNNING: | ||
1345 | case DEV_STATE_STARTWAIT: | ||
1346 | rc = netiucv_open(ndev); | ||
1347 | break; | ||
1348 | default: | ||
1349 | break; | ||
1350 | } | ||
1351 | netif_device_attach(ndev); | ||
1352 | out: | ||
1353 | return rc; | ||
1354 | } | ||
1355 | |||
1268 | /** | 1356 | /** |
1269 | * Start transmission of a packet. | 1357 | * Start transmission of a packet. |
1270 | * Called from generic network device layer. | 1358 | * Called from generic network device layer. |
@@ -1731,7 +1819,6 @@ static int netiucv_register_device(struct net_device *ndev) | |||
1731 | struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); | 1819 | struct device *dev = kzalloc(sizeof(struct device), GFP_KERNEL); |
1732 | int ret; | 1820 | int ret; |
1733 | 1821 | ||
1734 | |||
1735 | IUCV_DBF_TEXT(trace, 3, __func__); | 1822 | IUCV_DBF_TEXT(trace, 3, __func__); |
1736 | 1823 | ||
1737 | if (dev) { | 1824 | if (dev) { |
@@ -2100,6 +2187,7 @@ static void __exit netiucv_exit(void) | |||
2100 | netiucv_unregister_device(dev); | 2187 | netiucv_unregister_device(dev); |
2101 | } | 2188 | } |
2102 | 2189 | ||
2190 | device_unregister(netiucv_dev); | ||
2103 | driver_unregister(&netiucv_driver); | 2191 | driver_unregister(&netiucv_driver); |
2104 | iucv_unregister(&netiucv_handler, 1); | 2192 | iucv_unregister(&netiucv_handler, 1); |
2105 | iucv_unregister_dbf_views(); | 2193 | iucv_unregister_dbf_views(); |
@@ -2125,10 +2213,25 @@ static int __init netiucv_init(void) | |||
2125 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); | 2213 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_register\n", rc); |
2126 | goto out_iucv; | 2214 | goto out_iucv; |
2127 | } | 2215 | } |
2128 | 2216 | /* establish dummy device */ | |
2217 | netiucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); | ||
2218 | if (!netiucv_dev) { | ||
2219 | rc = -ENOMEM; | ||
2220 | goto out_driver; | ||
2221 | } | ||
2222 | dev_set_name(netiucv_dev, "netiucv"); | ||
2223 | netiucv_dev->bus = &iucv_bus; | ||
2224 | netiucv_dev->parent = iucv_root; | ||
2225 | netiucv_dev->release = (void (*)(struct device *))kfree; | ||
2226 | netiucv_dev->driver = &netiucv_driver; | ||
2227 | rc = device_register(netiucv_dev); | ||
2228 | if (rc) | ||
2229 | goto out_driver; | ||
2129 | netiucv_banner(); | 2230 | netiucv_banner(); |
2130 | return rc; | 2231 | return rc; |
2131 | 2232 | ||
2233 | out_driver: | ||
2234 | driver_unregister(&netiucv_driver); | ||
2132 | out_iucv: | 2235 | out_iucv: |
2133 | iucv_unregister(&netiucv_handler, 1); | 2236 | iucv_unregister(&netiucv_handler, 1); |
2134 | out_dbf: | 2237 | out_dbf: |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 74c49d9a8dba..d53621c4acbb 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/net/qeth_core_main.c | 2 | * drivers/s390/net/qeth_core_main.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Copyright IBM Corp. 2007, 2009 |
5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, | 5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, |
6 | * Frank Pavlic <fpavlic@de.ibm.com>, | 6 | * Frank Pavlic <fpavlic@de.ibm.com>, |
7 | * Thomas Spatzier <tspat@de.ibm.com>, | 7 | * Thomas Spatzier <tspat@de.ibm.com>, |
@@ -4195,6 +4195,50 @@ static void qeth_core_shutdown(struct ccwgroup_device *gdev) | |||
4195 | card->discipline.ccwgdriver->shutdown(gdev); | 4195 | card->discipline.ccwgdriver->shutdown(gdev); |
4196 | } | 4196 | } |
4197 | 4197 | ||
4198 | static int qeth_core_prepare(struct ccwgroup_device *gdev) | ||
4199 | { | ||
4200 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
4201 | if (card->discipline.ccwgdriver && | ||
4202 | card->discipline.ccwgdriver->prepare) | ||
4203 | return card->discipline.ccwgdriver->prepare(gdev); | ||
4204 | return 0; | ||
4205 | } | ||
4206 | |||
4207 | static void qeth_core_complete(struct ccwgroup_device *gdev) | ||
4208 | { | ||
4209 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
4210 | if (card->discipline.ccwgdriver && | ||
4211 | card->discipline.ccwgdriver->complete) | ||
4212 | card->discipline.ccwgdriver->complete(gdev); | ||
4213 | } | ||
4214 | |||
4215 | static int qeth_core_freeze(struct ccwgroup_device *gdev) | ||
4216 | { | ||
4217 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
4218 | if (card->discipline.ccwgdriver && | ||
4219 | card->discipline.ccwgdriver->freeze) | ||
4220 | return card->discipline.ccwgdriver->freeze(gdev); | ||
4221 | return 0; | ||
4222 | } | ||
4223 | |||
4224 | static int qeth_core_thaw(struct ccwgroup_device *gdev) | ||
4225 | { | ||
4226 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
4227 | if (card->discipline.ccwgdriver && | ||
4228 | card->discipline.ccwgdriver->thaw) | ||
4229 | return card->discipline.ccwgdriver->thaw(gdev); | ||
4230 | return 0; | ||
4231 | } | ||
4232 | |||
4233 | static int qeth_core_restore(struct ccwgroup_device *gdev) | ||
4234 | { | ||
4235 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
4236 | if (card->discipline.ccwgdriver && | ||
4237 | card->discipline.ccwgdriver->restore) | ||
4238 | return card->discipline.ccwgdriver->restore(gdev); | ||
4239 | return 0; | ||
4240 | } | ||
4241 | |||
4198 | static struct ccwgroup_driver qeth_core_ccwgroup_driver = { | 4242 | static struct ccwgroup_driver qeth_core_ccwgroup_driver = { |
4199 | .owner = THIS_MODULE, | 4243 | .owner = THIS_MODULE, |
4200 | .name = "qeth", | 4244 | .name = "qeth", |
@@ -4204,6 +4248,11 @@ static struct ccwgroup_driver qeth_core_ccwgroup_driver = { | |||
4204 | .set_online = qeth_core_set_online, | 4248 | .set_online = qeth_core_set_online, |
4205 | .set_offline = qeth_core_set_offline, | 4249 | .set_offline = qeth_core_set_offline, |
4206 | .shutdown = qeth_core_shutdown, | 4250 | .shutdown = qeth_core_shutdown, |
4251 | .prepare = qeth_core_prepare, | ||
4252 | .complete = qeth_core_complete, | ||
4253 | .freeze = qeth_core_freeze, | ||
4254 | .thaw = qeth_core_thaw, | ||
4255 | .restore = qeth_core_restore, | ||
4207 | }; | 4256 | }; |
4208 | 4257 | ||
4209 | static ssize_t | 4258 | static ssize_t |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index ecd3d06c0d5c..81d7f268418a 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/net/qeth_l2_main.c | 2 | * drivers/s390/net/qeth_l2_main.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Copyright IBM Corp. 2007, 2009 |
5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, | 5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, |
6 | * Frank Pavlic <fpavlic@de.ibm.com>, | 6 | * Frank Pavlic <fpavlic@de.ibm.com>, |
7 | * Thomas Spatzier <tspat@de.ibm.com>, | 7 | * Thomas Spatzier <tspat@de.ibm.com>, |
@@ -1141,12 +1141,62 @@ static void qeth_l2_shutdown(struct ccwgroup_device *gdev) | |||
1141 | qeth_clear_qdio_buffers(card); | 1141 | qeth_clear_qdio_buffers(card); |
1142 | } | 1142 | } |
1143 | 1143 | ||
1144 | static int qeth_l2_pm_suspend(struct ccwgroup_device *gdev) | ||
1145 | { | ||
1146 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
1147 | |||
1148 | if (card->dev) | ||
1149 | netif_device_detach(card->dev); | ||
1150 | qeth_set_allowed_threads(card, 0, 1); | ||
1151 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); | ||
1152 | if (gdev->state == CCWGROUP_OFFLINE) | ||
1153 | return 0; | ||
1154 | if (card->state == CARD_STATE_UP) { | ||
1155 | card->use_hard_stop = 1; | ||
1156 | __qeth_l2_set_offline(card->gdev, 1); | ||
1157 | } else | ||
1158 | __qeth_l2_set_offline(card->gdev, 0); | ||
1159 | return 0; | ||
1160 | } | ||
1161 | |||
1162 | static int qeth_l2_pm_resume(struct ccwgroup_device *gdev) | ||
1163 | { | ||
1164 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
1165 | int rc = 0; | ||
1166 | |||
1167 | if (gdev->state == CCWGROUP_OFFLINE) | ||
1168 | goto out; | ||
1169 | |||
1170 | if (card->state == CARD_STATE_RECOVER) { | ||
1171 | rc = __qeth_l2_set_online(card->gdev, 1); | ||
1172 | if (rc) { | ||
1173 | if (card->dev) { | ||
1174 | rtnl_lock(); | ||
1175 | dev_close(card->dev); | ||
1176 | rtnl_unlock(); | ||
1177 | } | ||
1178 | } | ||
1179 | } else | ||
1180 | rc = __qeth_l2_set_online(card->gdev, 0); | ||
1181 | out: | ||
1182 | qeth_set_allowed_threads(card, 0xffffffff, 0); | ||
1183 | if (card->dev) | ||
1184 | netif_device_attach(card->dev); | ||
1185 | if (rc) | ||
1186 | dev_warn(&card->gdev->dev, "The qeth device driver " | ||
1187 | "failed to recover an error on the device\n"); | ||
1188 | return rc; | ||
1189 | } | ||
1190 | |||
1144 | struct ccwgroup_driver qeth_l2_ccwgroup_driver = { | 1191 | struct ccwgroup_driver qeth_l2_ccwgroup_driver = { |
1145 | .probe = qeth_l2_probe_device, | 1192 | .probe = qeth_l2_probe_device, |
1146 | .remove = qeth_l2_remove_device, | 1193 | .remove = qeth_l2_remove_device, |
1147 | .set_online = qeth_l2_set_online, | 1194 | .set_online = qeth_l2_set_online, |
1148 | .set_offline = qeth_l2_set_offline, | 1195 | .set_offline = qeth_l2_set_offline, |
1149 | .shutdown = qeth_l2_shutdown, | 1196 | .shutdown = qeth_l2_shutdown, |
1197 | .freeze = qeth_l2_pm_suspend, | ||
1198 | .thaw = qeth_l2_pm_resume, | ||
1199 | .restore = qeth_l2_pm_resume, | ||
1150 | }; | 1200 | }; |
1151 | EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver); | 1201 | EXPORT_SYMBOL_GPL(qeth_l2_ccwgroup_driver); |
1152 | 1202 | ||
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 6f2386e9d6e2..54872406864e 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/net/qeth_l3_main.c | 2 | * drivers/s390/net/qeth_l3_main.c |
3 | * | 3 | * |
4 | * Copyright IBM Corp. 2007 | 4 | * Copyright IBM Corp. 2007, 2009 |
5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, | 5 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com>, |
6 | * Frank Pavlic <fpavlic@de.ibm.com>, | 6 | * Frank Pavlic <fpavlic@de.ibm.com>, |
7 | * Thomas Spatzier <tspat@de.ibm.com>, | 7 | * Thomas Spatzier <tspat@de.ibm.com>, |
@@ -3283,12 +3283,62 @@ static void qeth_l3_shutdown(struct ccwgroup_device *gdev) | |||
3283 | qeth_clear_qdio_buffers(card); | 3283 | qeth_clear_qdio_buffers(card); |
3284 | } | 3284 | } |
3285 | 3285 | ||
3286 | static int qeth_l3_pm_suspend(struct ccwgroup_device *gdev) | ||
3287 | { | ||
3288 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
3289 | |||
3290 | if (card->dev) | ||
3291 | netif_device_detach(card->dev); | ||
3292 | qeth_set_allowed_threads(card, 0, 1); | ||
3293 | wait_event(card->wait_q, qeth_threads_running(card, 0xffffffff) == 0); | ||
3294 | if (gdev->state == CCWGROUP_OFFLINE) | ||
3295 | return 0; | ||
3296 | if (card->state == CARD_STATE_UP) { | ||
3297 | card->use_hard_stop = 1; | ||
3298 | __qeth_l3_set_offline(card->gdev, 1); | ||
3299 | } else | ||
3300 | __qeth_l3_set_offline(card->gdev, 0); | ||
3301 | return 0; | ||
3302 | } | ||
3303 | |||
3304 | static int qeth_l3_pm_resume(struct ccwgroup_device *gdev) | ||
3305 | { | ||
3306 | struct qeth_card *card = dev_get_drvdata(&gdev->dev); | ||
3307 | int rc = 0; | ||
3308 | |||
3309 | if (gdev->state == CCWGROUP_OFFLINE) | ||
3310 | goto out; | ||
3311 | |||
3312 | if (card->state == CARD_STATE_RECOVER) { | ||
3313 | rc = __qeth_l3_set_online(card->gdev, 1); | ||
3314 | if (rc) { | ||
3315 | if (card->dev) { | ||
3316 | rtnl_lock(); | ||
3317 | dev_close(card->dev); | ||
3318 | rtnl_unlock(); | ||
3319 | } | ||
3320 | } | ||
3321 | } else | ||
3322 | rc = __qeth_l3_set_online(card->gdev, 0); | ||
3323 | out: | ||
3324 | qeth_set_allowed_threads(card, 0xffffffff, 0); | ||
3325 | if (card->dev) | ||
3326 | netif_device_attach(card->dev); | ||
3327 | if (rc) | ||
3328 | dev_warn(&card->gdev->dev, "The qeth device driver " | ||
3329 | "failed to recover an error on the device\n"); | ||
3330 | return rc; | ||
3331 | } | ||
3332 | |||
3286 | struct ccwgroup_driver qeth_l3_ccwgroup_driver = { | 3333 | struct ccwgroup_driver qeth_l3_ccwgroup_driver = { |
3287 | .probe = qeth_l3_probe_device, | 3334 | .probe = qeth_l3_probe_device, |
3288 | .remove = qeth_l3_remove_device, | 3335 | .remove = qeth_l3_remove_device, |
3289 | .set_online = qeth_l3_set_online, | 3336 | .set_online = qeth_l3_set_online, |
3290 | .set_offline = qeth_l3_set_offline, | 3337 | .set_offline = qeth_l3_set_offline, |
3291 | .shutdown = qeth_l3_shutdown, | 3338 | .shutdown = qeth_l3_shutdown, |
3339 | .freeze = qeth_l3_pm_suspend, | ||
3340 | .thaw = qeth_l3_pm_resume, | ||
3341 | .restore = qeth_l3_pm_resume, | ||
3292 | }; | 3342 | }; |
3293 | EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver); | 3343 | EXPORT_SYMBOL_GPL(qeth_l3_ccwgroup_driver); |
3294 | 3344 | ||
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 164e090c2625..e76a320d373b 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * IUCV special message driver | 2 | * IUCV special message driver |
3 | * | 3 | * |
4 | * Copyright 2003 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright IBM Corp. 2003, 2009 |
5 | * | ||
5 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | 6 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) |
6 | * | 7 | * |
7 | * This program is free software; you can redistribute it and/or modify | 8 | * This program is free software; you can redistribute it and/or modify |
@@ -40,6 +41,8 @@ MODULE_AUTHOR | |||
40 | MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); | 41 | MODULE_DESCRIPTION ("Linux for S/390 IUCV special message driver"); |
41 | 42 | ||
42 | static struct iucv_path *smsg_path; | 43 | static struct iucv_path *smsg_path; |
44 | /* dummy device used as trigger for PM functions */ | ||
45 | static struct device *smsg_dev; | ||
43 | 46 | ||
44 | static DEFINE_SPINLOCK(smsg_list_lock); | 47 | static DEFINE_SPINLOCK(smsg_list_lock); |
45 | static LIST_HEAD(smsg_list); | 48 | static LIST_HEAD(smsg_list); |
@@ -132,14 +135,51 @@ void smsg_unregister_callback(char *prefix, | |||
132 | kfree(cb); | 135 | kfree(cb); |
133 | } | 136 | } |
134 | 137 | ||
138 | static int smsg_pm_freeze(struct device *dev) | ||
139 | { | ||
140 | #ifdef CONFIG_PM_DEBUG | ||
141 | printk(KERN_WARNING "smsg_pm_freeze\n"); | ||
142 | #endif | ||
143 | if (smsg_path) | ||
144 | iucv_path_sever(smsg_path, NULL); | ||
145 | return 0; | ||
146 | } | ||
147 | |||
148 | static int smsg_pm_restore_thaw(struct device *dev) | ||
149 | { | ||
150 | int rc; | ||
151 | |||
152 | #ifdef CONFIG_PM_DEBUG | ||
153 | printk(KERN_WARNING "smsg_pm_restore_thaw\n"); | ||
154 | #endif | ||
155 | if (smsg_path) { | ||
156 | memset(smsg_path, 0, sizeof(*smsg_path)); | ||
157 | smsg_path->msglim = 255; | ||
158 | smsg_path->flags = 0; | ||
159 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", | ||
160 | NULL, NULL, NULL); | ||
161 | printk(KERN_ERR "iucv_path_connect returned with rc %i\n", rc); | ||
162 | } | ||
163 | return 0; | ||
164 | } | ||
165 | |||
166 | static struct dev_pm_ops smsg_pm_ops = { | ||
167 | .freeze = smsg_pm_freeze, | ||
168 | .thaw = smsg_pm_restore_thaw, | ||
169 | .restore = smsg_pm_restore_thaw, | ||
170 | }; | ||
171 | |||
135 | static struct device_driver smsg_driver = { | 172 | static struct device_driver smsg_driver = { |
173 | .owner = THIS_MODULE, | ||
136 | .name = "SMSGIUCV", | 174 | .name = "SMSGIUCV", |
137 | .bus = &iucv_bus, | 175 | .bus = &iucv_bus, |
176 | .pm = &smsg_pm_ops, | ||
138 | }; | 177 | }; |
139 | 178 | ||
140 | static void __exit smsg_exit(void) | 179 | static void __exit smsg_exit(void) |
141 | { | 180 | { |
142 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 181 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
182 | device_unregister(smsg_dev); | ||
143 | iucv_unregister(&smsg_handler, 1); | 183 | iucv_unregister(&smsg_handler, 1); |
144 | driver_unregister(&smsg_driver); | 184 | driver_unregister(&smsg_driver); |
145 | } | 185 | } |
@@ -166,12 +206,29 @@ static int __init smsg_init(void) | |||
166 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", | 206 | rc = iucv_path_connect(smsg_path, &smsg_handler, "*MSG ", |
167 | NULL, NULL, NULL); | 207 | NULL, NULL, NULL); |
168 | if (rc) | 208 | if (rc) |
169 | goto out_free; | 209 | goto out_free_path; |
210 | smsg_dev = kzalloc(sizeof(struct device), GFP_KERNEL); | ||
211 | if (!smsg_dev) { | ||
212 | rc = -ENOMEM; | ||
213 | goto out_free_path; | ||
214 | } | ||
215 | dev_set_name(smsg_dev, "smsg_iucv"); | ||
216 | smsg_dev->bus = &iucv_bus; | ||
217 | smsg_dev->parent = iucv_root; | ||
218 | smsg_dev->release = (void (*)(struct device *))kfree; | ||
219 | smsg_dev->driver = &smsg_driver; | ||
220 | rc = device_register(smsg_dev); | ||
221 | if (rc) | ||
222 | goto out_free_dev; | ||
223 | |||
170 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 224 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
171 | return 0; | 225 | return 0; |
172 | 226 | ||
173 | out_free: | 227 | out_free_dev: |
228 | kfree(smsg_dev); | ||
229 | out_free_path: | ||
174 | iucv_path_free(smsg_path); | 230 | iucv_path_free(smsg_path); |
231 | smsg_path = NULL; | ||
175 | out_register: | 232 | out_register: |
176 | iucv_unregister(&smsg_handler, 1); | 233 | iucv_unregister(&smsg_handler, 1); |
177 | out_driver: | 234 | out_driver: |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index b2fe5cdbcaee..d9da5c42ccbe 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -13,6 +13,36 @@ | |||
13 | 13 | ||
14 | #define ZFCP_MODEL_PRIV 0x4 | 14 | #define ZFCP_MODEL_PRIV 0x4 |
15 | 15 | ||
16 | static int zfcp_ccw_suspend(struct ccw_device *cdev) | ||
17 | |||
18 | { | ||
19 | struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); | ||
20 | |||
21 | down(&zfcp_data.config_sema); | ||
22 | |||
23 | zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL); | ||
24 | zfcp_erp_wait(adapter); | ||
25 | |||
26 | up(&zfcp_data.config_sema); | ||
27 | |||
28 | return 0; | ||
29 | } | ||
30 | |||
31 | static int zfcp_ccw_activate(struct ccw_device *cdev) | ||
32 | |||
33 | { | ||
34 | struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev); | ||
35 | |||
36 | zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL, | ||
37 | ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET); | ||
38 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED, | ||
39 | "ccresu2", NULL); | ||
40 | zfcp_erp_wait(adapter); | ||
41 | flush_work(&adapter->scan_work); | ||
42 | |||
43 | return 0; | ||
44 | } | ||
45 | |||
16 | static struct ccw_device_id zfcp_ccw_device_id[] = { | 46 | static struct ccw_device_id zfcp_ccw_device_id[] = { |
17 | { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, | 47 | { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, 0x3) }, |
18 | { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) }, | 48 | { CCW_DEVICE_DEVTYPE(0x1731, 0x3, 0x1732, ZFCP_MODEL_PRIV) }, |
@@ -227,6 +257,9 @@ static struct ccw_driver zfcp_ccw_driver = { | |||
227 | .set_offline = zfcp_ccw_set_offline, | 257 | .set_offline = zfcp_ccw_set_offline, |
228 | .notify = zfcp_ccw_notify, | 258 | .notify = zfcp_ccw_notify, |
229 | .shutdown = zfcp_ccw_shutdown, | 259 | .shutdown = zfcp_ccw_shutdown, |
260 | .freeze = zfcp_ccw_suspend, | ||
261 | .thaw = zfcp_ccw_activate, | ||
262 | .restore = zfcp_ccw_activate, | ||
230 | }; | 263 | }; |
231 | 264 | ||
232 | /** | 265 | /** |
diff --git a/mm/Kconfig b/mm/Kconfig index 71830ba7b986..6f4610a9ce55 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -128,11 +128,11 @@ config SPARSEMEM_VMEMMAP | |||
128 | config MEMORY_HOTPLUG | 128 | config MEMORY_HOTPLUG |
129 | bool "Allow for memory hot-add" | 129 | bool "Allow for memory hot-add" |
130 | depends on SPARSEMEM || X86_64_ACPI_NUMA | 130 | depends on SPARSEMEM || X86_64_ACPI_NUMA |
131 | depends on HOTPLUG && !HIBERNATION && ARCH_ENABLE_MEMORY_HOTPLUG | 131 | depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG |
132 | depends on (IA64 || X86 || PPC64 || SUPERH || S390) | 132 | depends on (IA64 || X86 || PPC64 || SUPERH || S390) |
133 | 133 | ||
134 | comment "Memory hotplug is currently incompatible with Software Suspend" | 134 | comment "Memory hotplug is currently incompatible with Software Suspend" |
135 | depends on SPARSEMEM && HOTPLUG && HIBERNATION | 135 | depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390 |
136 | 136 | ||
137 | config MEMORY_HOTPLUG_SPARSE | 137 | config MEMORY_HOTPLUG_SPARSE |
138 | def_bool y | 138 | def_bool y |
diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index a9b3a6f9ea95..656cbd195825 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c | |||
@@ -1,11 +1,12 @@ | |||
1 | /* | 1 | /* |
2 | * linux/net/iucv/af_iucv.c | ||
3 | * | ||
4 | * IUCV protocol stack for Linux on zSeries | 2 | * IUCV protocol stack for Linux on zSeries |
5 | * | 3 | * |
6 | * Copyright 2006 IBM Corporation | 4 | * Copyright IBM Corp. 2006, 2009 |
7 | * | 5 | * |
8 | * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> | 6 | * Author(s): Jennifer Hunt <jenhunt@us.ibm.com> |
7 | * Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | ||
8 | * PM functions: | ||
9 | * Ursula Braun <ursula.braun@de.ibm.com> | ||
9 | */ | 10 | */ |
10 | 11 | ||
11 | #define KMSG_COMPONENT "af_iucv" | 12 | #define KMSG_COMPONENT "af_iucv" |
@@ -90,6 +91,122 @@ static inline void low_nmcpy(unsigned char *dst, char *src) | |||
90 | memcpy(&dst[8], src, 8); | 91 | memcpy(&dst[8], src, 8); |
91 | } | 92 | } |
92 | 93 | ||
94 | static int afiucv_pm_prepare(struct device *dev) | ||
95 | { | ||
96 | #ifdef CONFIG_PM_DEBUG | ||
97 | printk(KERN_WARNING "afiucv_pm_prepare\n"); | ||
98 | #endif | ||
99 | return 0; | ||
100 | } | ||
101 | |||
102 | static void afiucv_pm_complete(struct device *dev) | ||
103 | { | ||
104 | #ifdef CONFIG_PM_DEBUG | ||
105 | printk(KERN_WARNING "afiucv_pm_complete\n"); | ||
106 | #endif | ||
107 | return; | ||
108 | } | ||
109 | |||
110 | /** | ||
111 | * afiucv_pm_freeze() - Freeze PM callback | ||
112 | * @dev: AFIUCV dummy device | ||
113 | * | ||
114 | * Sever all established IUCV communication pathes | ||
115 | */ | ||
116 | static int afiucv_pm_freeze(struct device *dev) | ||
117 | { | ||
118 | struct iucv_sock *iucv; | ||
119 | struct sock *sk; | ||
120 | struct hlist_node *node; | ||
121 | int err = 0; | ||
122 | |||
123 | #ifdef CONFIG_PM_DEBUG | ||
124 | printk(KERN_WARNING "afiucv_pm_freeze\n"); | ||
125 | #endif | ||
126 | read_lock(&iucv_sk_list.lock); | ||
127 | sk_for_each(sk, node, &iucv_sk_list.head) { | ||
128 | iucv = iucv_sk(sk); | ||
129 | skb_queue_purge(&iucv->send_skb_q); | ||
130 | skb_queue_purge(&iucv->backlog_skb_q); | ||
131 | switch (sk->sk_state) { | ||
132 | case IUCV_SEVERED: | ||
133 | case IUCV_DISCONN: | ||
134 | case IUCV_CLOSING: | ||
135 | case IUCV_CONNECTED: | ||
136 | if (iucv->path) { | ||
137 | err = iucv_path_sever(iucv->path, NULL); | ||
138 | iucv_path_free(iucv->path); | ||
139 | iucv->path = NULL; | ||
140 | } | ||
141 | break; | ||
142 | case IUCV_OPEN: | ||
143 | case IUCV_BOUND: | ||
144 | case IUCV_LISTEN: | ||
145 | case IUCV_CLOSED: | ||
146 | default: | ||
147 | break; | ||
148 | } | ||
149 | } | ||
150 | read_unlock(&iucv_sk_list.lock); | ||
151 | return err; | ||
152 | } | ||
153 | |||
154 | /** | ||
155 | * afiucv_pm_restore_thaw() - Thaw and restore PM callback | ||
156 | * @dev: AFIUCV dummy device | ||
157 | * | ||
158 | * socket clean up after freeze | ||
159 | */ | ||
160 | static int afiucv_pm_restore_thaw(struct device *dev) | ||
161 | { | ||
162 | struct iucv_sock *iucv; | ||
163 | struct sock *sk; | ||
164 | struct hlist_node *node; | ||
165 | |||
166 | #ifdef CONFIG_PM_DEBUG | ||
167 | printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); | ||
168 | #endif | ||
169 | read_lock(&iucv_sk_list.lock); | ||
170 | sk_for_each(sk, node, &iucv_sk_list.head) { | ||
171 | iucv = iucv_sk(sk); | ||
172 | switch (sk->sk_state) { | ||
173 | case IUCV_CONNECTED: | ||
174 | sk->sk_err = EPIPE; | ||
175 | sk->sk_state = IUCV_DISCONN; | ||
176 | sk->sk_state_change(sk); | ||
177 | break; | ||
178 | case IUCV_DISCONN: | ||
179 | case IUCV_SEVERED: | ||
180 | case IUCV_CLOSING: | ||
181 | case IUCV_LISTEN: | ||
182 | case IUCV_BOUND: | ||
183 | case IUCV_OPEN: | ||
184 | default: | ||
185 | break; | ||
186 | } | ||
187 | } | ||
188 | read_unlock(&iucv_sk_list.lock); | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static struct dev_pm_ops afiucv_pm_ops = { | ||
193 | .prepare = afiucv_pm_prepare, | ||
194 | .complete = afiucv_pm_complete, | ||
195 | .freeze = afiucv_pm_freeze, | ||
196 | .thaw = afiucv_pm_restore_thaw, | ||
197 | .restore = afiucv_pm_restore_thaw, | ||
198 | }; | ||
199 | |||
200 | static struct device_driver af_iucv_driver = { | ||
201 | .owner = THIS_MODULE, | ||
202 | .name = "afiucv", | ||
203 | .bus = &iucv_bus, | ||
204 | .pm = &afiucv_pm_ops, | ||
205 | }; | ||
206 | |||
207 | /* dummy device used as trigger for PM functions */ | ||
208 | static struct device *af_iucv_dev; | ||
209 | |||
93 | /** | 210 | /** |
94 | * iucv_msg_length() - Returns the length of an iucv message. | 211 | * iucv_msg_length() - Returns the length of an iucv message. |
95 | * @msg: Pointer to struct iucv_message, MUST NOT be NULL | 212 | * @msg: Pointer to struct iucv_message, MUST NOT be NULL |
@@ -1556,8 +1673,30 @@ static int __init afiucv_init(void) | |||
1556 | err = sock_register(&iucv_sock_family_ops); | 1673 | err = sock_register(&iucv_sock_family_ops); |
1557 | if (err) | 1674 | if (err) |
1558 | goto out_proto; | 1675 | goto out_proto; |
1676 | /* establish dummy device */ | ||
1677 | err = driver_register(&af_iucv_driver); | ||
1678 | if (err) | ||
1679 | goto out_sock; | ||
1680 | af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); | ||
1681 | if (!af_iucv_dev) { | ||
1682 | err = -ENOMEM; | ||
1683 | goto out_driver; | ||
1684 | } | ||
1685 | dev_set_name(af_iucv_dev, "af_iucv"); | ||
1686 | af_iucv_dev->bus = &iucv_bus; | ||
1687 | af_iucv_dev->parent = iucv_root; | ||
1688 | af_iucv_dev->release = (void (*)(struct device *))kfree; | ||
1689 | af_iucv_dev->driver = &af_iucv_driver; | ||
1690 | err = device_register(af_iucv_dev); | ||
1691 | if (err) | ||
1692 | goto out_driver; | ||
1693 | |||
1559 | return 0; | 1694 | return 0; |
1560 | 1695 | ||
1696 | out_driver: | ||
1697 | driver_unregister(&af_iucv_driver); | ||
1698 | out_sock: | ||
1699 | sock_unregister(PF_IUCV); | ||
1561 | out_proto: | 1700 | out_proto: |
1562 | proto_unregister(&iucv_proto); | 1701 | proto_unregister(&iucv_proto); |
1563 | out_iucv: | 1702 | out_iucv: |
@@ -1568,6 +1707,8 @@ out: | |||
1568 | 1707 | ||
1569 | static void __exit afiucv_exit(void) | 1708 | static void __exit afiucv_exit(void) |
1570 | { | 1709 | { |
1710 | device_unregister(af_iucv_dev); | ||
1711 | driver_unregister(&af_iucv_driver); | ||
1571 | sock_unregister(PF_IUCV); | 1712 | sock_unregister(PF_IUCV); |
1572 | proto_unregister(&iucv_proto); | 1713 | proto_unregister(&iucv_proto); |
1573 | iucv_unregister(&af_iucv_handler, 0); | 1714 | iucv_unregister(&af_iucv_handler, 0); |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index 61e8038a55ee..c833481d32e3 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -1,7 +1,8 @@ | |||
1 | /* | 1 | /* |
2 | * IUCV base infrastructure. | 2 | * IUCV base infrastructure. |
3 | * | 3 | * |
4 | * Copyright 2001, 2006 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright IBM Corp. 2001, 2009 |
5 | * | ||
5 | * Author(s): | 6 | * Author(s): |
6 | * Original source: | 7 | * Original source: |
7 | * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 | 8 | * Alan Altmark (Alan_Altmark@us.ibm.com) Sept. 2000 |
@@ -10,6 +11,8 @@ | |||
10 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) | 11 | * Fritz Elfert (elfert@de.ibm.com, felfert@millenux.com) |
11 | * Rewritten for af_iucv: | 12 | * Rewritten for af_iucv: |
12 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 13 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
14 | * PM functions: | ||
15 | * Ursula Braun (ursula.braun@de.ibm.com) | ||
13 | * | 16 | * |
14 | * Documentation used: | 17 | * Documentation used: |
15 | * The original source | 18 | * The original source |
@@ -45,6 +48,7 @@ | |||
45 | #include <linux/err.h> | 48 | #include <linux/err.h> |
46 | #include <linux/device.h> | 49 | #include <linux/device.h> |
47 | #include <linux/cpu.h> | 50 | #include <linux/cpu.h> |
51 | #include <linux/reboot.h> | ||
48 | #include <net/iucv/iucv.h> | 52 | #include <net/iucv/iucv.h> |
49 | #include <asm/atomic.h> | 53 | #include <asm/atomic.h> |
50 | #include <asm/ebcdic.h> | 54 | #include <asm/ebcdic.h> |
@@ -75,9 +79,24 @@ static int iucv_bus_match(struct device *dev, struct device_driver *drv) | |||
75 | return 0; | 79 | return 0; |
76 | } | 80 | } |
77 | 81 | ||
82 | static int iucv_pm_prepare(struct device *); | ||
83 | static void iucv_pm_complete(struct device *); | ||
84 | static int iucv_pm_freeze(struct device *); | ||
85 | static int iucv_pm_thaw(struct device *); | ||
86 | static int iucv_pm_restore(struct device *); | ||
87 | |||
88 | static struct dev_pm_ops iucv_pm_ops = { | ||
89 | .prepare = iucv_pm_prepare, | ||
90 | .complete = iucv_pm_complete, | ||
91 | .freeze = iucv_pm_freeze, | ||
92 | .thaw = iucv_pm_thaw, | ||
93 | .restore = iucv_pm_restore, | ||
94 | }; | ||
95 | |||
78 | struct bus_type iucv_bus = { | 96 | struct bus_type iucv_bus = { |
79 | .name = "iucv", | 97 | .name = "iucv", |
80 | .match = iucv_bus_match, | 98 | .match = iucv_bus_match, |
99 | .pm = &iucv_pm_ops, | ||
81 | }; | 100 | }; |
82 | EXPORT_SYMBOL(iucv_bus); | 101 | EXPORT_SYMBOL(iucv_bus); |
83 | 102 | ||
@@ -147,6 +166,7 @@ enum iucv_command_codes { | |||
147 | IUCV_RESUME = 14, | 166 | IUCV_RESUME = 14, |
148 | IUCV_SEVER = 15, | 167 | IUCV_SEVER = 15, |
149 | IUCV_SETMASK = 16, | 168 | IUCV_SETMASK = 16, |
169 | IUCV_SETCONTROLMASK = 17, | ||
150 | }; | 170 | }; |
151 | 171 | ||
152 | /* | 172 | /* |
@@ -364,6 +384,18 @@ static void iucv_allow_cpu(void *data) | |||
364 | parm->set_mask.ipmask = 0xf8; | 384 | parm->set_mask.ipmask = 0xf8; |
365 | iucv_call_b2f0(IUCV_SETMASK, parm); | 385 | iucv_call_b2f0(IUCV_SETMASK, parm); |
366 | 386 | ||
387 | /* | ||
388 | * Enable all iucv control interrupts. | ||
389 | * ipmask contains bits for the different interrupts | ||
390 | * 0x80 - Flag to allow pending connections interrupts | ||
391 | * 0x40 - Flag to allow connection complete interrupts | ||
392 | * 0x20 - Flag to allow connection severed interrupts | ||
393 | * 0x10 - Flag to allow connection quiesced interrupts | ||
394 | * 0x08 - Flag to allow connection resumed interrupts | ||
395 | */ | ||
396 | memset(parm, 0, sizeof(union iucv_param)); | ||
397 | parm->set_mask.ipmask = 0xf8; | ||
398 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | ||
367 | /* Set indication that iucv interrupts are allowed for this cpu. */ | 399 | /* Set indication that iucv interrupts are allowed for this cpu. */ |
368 | cpu_set(cpu, iucv_irq_cpumask); | 400 | cpu_set(cpu, iucv_irq_cpumask); |
369 | } | 401 | } |
@@ -389,6 +421,31 @@ static void iucv_block_cpu(void *data) | |||
389 | } | 421 | } |
390 | 422 | ||
391 | /** | 423 | /** |
424 | * iucv_block_cpu_almost | ||
425 | * @data: unused | ||
426 | * | ||
427 | * Allow connection-severed interrupts only on this cpu. | ||
428 | */ | ||
429 | static void iucv_block_cpu_almost(void *data) | ||
430 | { | ||
431 | int cpu = smp_processor_id(); | ||
432 | union iucv_param *parm; | ||
433 | |||
434 | /* Allow iucv control interrupts only */ | ||
435 | parm = iucv_param_irq[cpu]; | ||
436 | memset(parm, 0, sizeof(union iucv_param)); | ||
437 | parm->set_mask.ipmask = 0x08; | ||
438 | iucv_call_b2f0(IUCV_SETMASK, parm); | ||
439 | /* Allow iucv-severed interrupt only */ | ||
440 | memset(parm, 0, sizeof(union iucv_param)); | ||
441 | parm->set_mask.ipmask = 0x20; | ||
442 | iucv_call_b2f0(IUCV_SETCONTROLMASK, parm); | ||
443 | |||
444 | /* Clear indication that iucv interrupts are allowed for this cpu. */ | ||
445 | cpu_clear(cpu, iucv_irq_cpumask); | ||
446 | } | ||
447 | |||
448 | /** | ||
392 | * iucv_declare_cpu | 449 | * iucv_declare_cpu |
393 | * @data: unused | 450 | * @data: unused |
394 | * | 451 | * |
@@ -758,6 +815,28 @@ void iucv_unregister(struct iucv_handler *handler, int smp) | |||
758 | } | 815 | } |
759 | EXPORT_SYMBOL(iucv_unregister); | 816 | EXPORT_SYMBOL(iucv_unregister); |
760 | 817 | ||
818 | static int iucv_reboot_event(struct notifier_block *this, | ||
819 | unsigned long event, void *ptr) | ||
820 | { | ||
821 | int i, rc; | ||
822 | |||
823 | get_online_cpus(); | ||
824 | on_each_cpu(iucv_block_cpu, NULL, 1); | ||
825 | preempt_disable(); | ||
826 | for (i = 0; i < iucv_max_pathid; i++) { | ||
827 | if (iucv_path_table[i]) | ||
828 | rc = iucv_sever_pathid(i, NULL); | ||
829 | } | ||
830 | preempt_enable(); | ||
831 | put_online_cpus(); | ||
832 | iucv_disable(); | ||
833 | return NOTIFY_DONE; | ||
834 | } | ||
835 | |||
836 | static struct notifier_block iucv_reboot_notifier = { | ||
837 | .notifier_call = iucv_reboot_event, | ||
838 | }; | ||
839 | |||
761 | /** | 840 | /** |
762 | * iucv_path_accept | 841 | * iucv_path_accept |
763 | * @path: address of iucv path structure | 842 | * @path: address of iucv path structure |
@@ -777,6 +856,10 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, | |||
777 | int rc; | 856 | int rc; |
778 | 857 | ||
779 | local_bh_disable(); | 858 | local_bh_disable(); |
859 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
860 | rc = -EIO; | ||
861 | goto out; | ||
862 | } | ||
780 | /* Prepare parameter block. */ | 863 | /* Prepare parameter block. */ |
781 | parm = iucv_param[smp_processor_id()]; | 864 | parm = iucv_param[smp_processor_id()]; |
782 | memset(parm, 0, sizeof(union iucv_param)); | 865 | memset(parm, 0, sizeof(union iucv_param)); |
@@ -792,6 +875,7 @@ int iucv_path_accept(struct iucv_path *path, struct iucv_handler *handler, | |||
792 | path->msglim = parm->ctrl.ipmsglim; | 875 | path->msglim = parm->ctrl.ipmsglim; |
793 | path->flags = parm->ctrl.ipflags1; | 876 | path->flags = parm->ctrl.ipflags1; |
794 | } | 877 | } |
878 | out: | ||
795 | local_bh_enable(); | 879 | local_bh_enable(); |
796 | return rc; | 880 | return rc; |
797 | } | 881 | } |
@@ -821,6 +905,10 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | |||
821 | 905 | ||
822 | spin_lock_bh(&iucv_table_lock); | 906 | spin_lock_bh(&iucv_table_lock); |
823 | iucv_cleanup_queue(); | 907 | iucv_cleanup_queue(); |
908 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
909 | rc = -EIO; | ||
910 | goto out; | ||
911 | } | ||
824 | parm = iucv_param[smp_processor_id()]; | 912 | parm = iucv_param[smp_processor_id()]; |
825 | memset(parm, 0, sizeof(union iucv_param)); | 913 | memset(parm, 0, sizeof(union iucv_param)); |
826 | parm->ctrl.ipmsglim = path->msglim; | 914 | parm->ctrl.ipmsglim = path->msglim; |
@@ -855,6 +943,7 @@ int iucv_path_connect(struct iucv_path *path, struct iucv_handler *handler, | |||
855 | rc = -EIO; | 943 | rc = -EIO; |
856 | } | 944 | } |
857 | } | 945 | } |
946 | out: | ||
858 | spin_unlock_bh(&iucv_table_lock); | 947 | spin_unlock_bh(&iucv_table_lock); |
859 | return rc; | 948 | return rc; |
860 | } | 949 | } |
@@ -876,12 +965,17 @@ int iucv_path_quiesce(struct iucv_path *path, u8 userdata[16]) | |||
876 | int rc; | 965 | int rc; |
877 | 966 | ||
878 | local_bh_disable(); | 967 | local_bh_disable(); |
968 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
969 | rc = -EIO; | ||
970 | goto out; | ||
971 | } | ||
879 | parm = iucv_param[smp_processor_id()]; | 972 | parm = iucv_param[smp_processor_id()]; |
880 | memset(parm, 0, sizeof(union iucv_param)); | 973 | memset(parm, 0, sizeof(union iucv_param)); |
881 | if (userdata) | 974 | if (userdata) |
882 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 975 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); |
883 | parm->ctrl.ippathid = path->pathid; | 976 | parm->ctrl.ippathid = path->pathid; |
884 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); | 977 | rc = iucv_call_b2f0(IUCV_QUIESCE, parm); |
978 | out: | ||
885 | local_bh_enable(); | 979 | local_bh_enable(); |
886 | return rc; | 980 | return rc; |
887 | } | 981 | } |
@@ -903,12 +997,17 @@ int iucv_path_resume(struct iucv_path *path, u8 userdata[16]) | |||
903 | int rc; | 997 | int rc; |
904 | 998 | ||
905 | local_bh_disable(); | 999 | local_bh_disable(); |
1000 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1001 | rc = -EIO; | ||
1002 | goto out; | ||
1003 | } | ||
906 | parm = iucv_param[smp_processor_id()]; | 1004 | parm = iucv_param[smp_processor_id()]; |
907 | memset(parm, 0, sizeof(union iucv_param)); | 1005 | memset(parm, 0, sizeof(union iucv_param)); |
908 | if (userdata) | 1006 | if (userdata) |
909 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); | 1007 | memcpy(parm->ctrl.ipuser, userdata, sizeof(parm->ctrl.ipuser)); |
910 | parm->ctrl.ippathid = path->pathid; | 1008 | parm->ctrl.ippathid = path->pathid; |
911 | rc = iucv_call_b2f0(IUCV_RESUME, parm); | 1009 | rc = iucv_call_b2f0(IUCV_RESUME, parm); |
1010 | out: | ||
912 | local_bh_enable(); | 1011 | local_bh_enable(); |
913 | return rc; | 1012 | return rc; |
914 | } | 1013 | } |
@@ -927,6 +1026,10 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | |||
927 | int rc; | 1026 | int rc; |
928 | 1027 | ||
929 | preempt_disable(); | 1028 | preempt_disable(); |
1029 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1030 | rc = -EIO; | ||
1031 | goto out; | ||
1032 | } | ||
930 | if (iucv_active_cpu != smp_processor_id()) | 1033 | if (iucv_active_cpu != smp_processor_id()) |
931 | spin_lock_bh(&iucv_table_lock); | 1034 | spin_lock_bh(&iucv_table_lock); |
932 | rc = iucv_sever_pathid(path->pathid, userdata); | 1035 | rc = iucv_sever_pathid(path->pathid, userdata); |
@@ -934,6 +1037,7 @@ int iucv_path_sever(struct iucv_path *path, u8 userdata[16]) | |||
934 | list_del_init(&path->list); | 1037 | list_del_init(&path->list); |
935 | if (iucv_active_cpu != smp_processor_id()) | 1038 | if (iucv_active_cpu != smp_processor_id()) |
936 | spin_unlock_bh(&iucv_table_lock); | 1039 | spin_unlock_bh(&iucv_table_lock); |
1040 | out: | ||
937 | preempt_enable(); | 1041 | preempt_enable(); |
938 | return rc; | 1042 | return rc; |
939 | } | 1043 | } |
@@ -956,6 +1060,10 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | |||
956 | int rc; | 1060 | int rc; |
957 | 1061 | ||
958 | local_bh_disable(); | 1062 | local_bh_disable(); |
1063 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1064 | rc = -EIO; | ||
1065 | goto out; | ||
1066 | } | ||
959 | parm = iucv_param[smp_processor_id()]; | 1067 | parm = iucv_param[smp_processor_id()]; |
960 | memset(parm, 0, sizeof(union iucv_param)); | 1068 | memset(parm, 0, sizeof(union iucv_param)); |
961 | parm->purge.ippathid = path->pathid; | 1069 | parm->purge.ippathid = path->pathid; |
@@ -967,6 +1075,7 @@ int iucv_message_purge(struct iucv_path *path, struct iucv_message *msg, | |||
967 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; | 1075 | msg->audit = (*(u32 *) &parm->purge.ipaudit) >> 8; |
968 | msg->tag = parm->purge.ipmsgtag; | 1076 | msg->tag = parm->purge.ipmsgtag; |
969 | } | 1077 | } |
1078 | out: | ||
970 | local_bh_enable(); | 1079 | local_bh_enable(); |
971 | return rc; | 1080 | return rc; |
972 | } | 1081 | } |
@@ -1043,6 +1152,10 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | |||
1043 | if (msg->flags & IUCV_IPRMDATA) | 1152 | if (msg->flags & IUCV_IPRMDATA) |
1044 | return iucv_message_receive_iprmdata(path, msg, flags, | 1153 | return iucv_message_receive_iprmdata(path, msg, flags, |
1045 | buffer, size, residual); | 1154 | buffer, size, residual); |
1155 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1156 | rc = -EIO; | ||
1157 | goto out; | ||
1158 | } | ||
1046 | parm = iucv_param[smp_processor_id()]; | 1159 | parm = iucv_param[smp_processor_id()]; |
1047 | memset(parm, 0, sizeof(union iucv_param)); | 1160 | memset(parm, 0, sizeof(union iucv_param)); |
1048 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; | 1161 | parm->db.ipbfadr1 = (u32)(addr_t) buffer; |
@@ -1058,6 +1171,7 @@ int __iucv_message_receive(struct iucv_path *path, struct iucv_message *msg, | |||
1058 | if (residual) | 1171 | if (residual) |
1059 | *residual = parm->db.ipbfln1f; | 1172 | *residual = parm->db.ipbfln1f; |
1060 | } | 1173 | } |
1174 | out: | ||
1061 | return rc; | 1175 | return rc; |
1062 | } | 1176 | } |
1063 | EXPORT_SYMBOL(__iucv_message_receive); | 1177 | EXPORT_SYMBOL(__iucv_message_receive); |
@@ -1111,6 +1225,10 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) | |||
1111 | int rc; | 1225 | int rc; |
1112 | 1226 | ||
1113 | local_bh_disable(); | 1227 | local_bh_disable(); |
1228 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1229 | rc = -EIO; | ||
1230 | goto out; | ||
1231 | } | ||
1114 | parm = iucv_param[smp_processor_id()]; | 1232 | parm = iucv_param[smp_processor_id()]; |
1115 | memset(parm, 0, sizeof(union iucv_param)); | 1233 | memset(parm, 0, sizeof(union iucv_param)); |
1116 | parm->db.ippathid = path->pathid; | 1234 | parm->db.ippathid = path->pathid; |
@@ -1118,6 +1236,7 @@ int iucv_message_reject(struct iucv_path *path, struct iucv_message *msg) | |||
1118 | parm->db.iptrgcls = msg->class; | 1236 | parm->db.iptrgcls = msg->class; |
1119 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); | 1237 | parm->db.ipflags1 = (IUCV_IPTRGCLS | IUCV_IPFGMID | IUCV_IPFGPID); |
1120 | rc = iucv_call_b2f0(IUCV_REJECT, parm); | 1238 | rc = iucv_call_b2f0(IUCV_REJECT, parm); |
1239 | out: | ||
1121 | local_bh_enable(); | 1240 | local_bh_enable(); |
1122 | return rc; | 1241 | return rc; |
1123 | } | 1242 | } |
@@ -1145,6 +1264,10 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | |||
1145 | int rc; | 1264 | int rc; |
1146 | 1265 | ||
1147 | local_bh_disable(); | 1266 | local_bh_disable(); |
1267 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1268 | rc = -EIO; | ||
1269 | goto out; | ||
1270 | } | ||
1148 | parm = iucv_param[smp_processor_id()]; | 1271 | parm = iucv_param[smp_processor_id()]; |
1149 | memset(parm, 0, sizeof(union iucv_param)); | 1272 | memset(parm, 0, sizeof(union iucv_param)); |
1150 | if (flags & IUCV_IPRMDATA) { | 1273 | if (flags & IUCV_IPRMDATA) { |
@@ -1162,6 +1285,7 @@ int iucv_message_reply(struct iucv_path *path, struct iucv_message *msg, | |||
1162 | parm->db.iptrgcls = msg->class; | 1285 | parm->db.iptrgcls = msg->class; |
1163 | } | 1286 | } |
1164 | rc = iucv_call_b2f0(IUCV_REPLY, parm); | 1287 | rc = iucv_call_b2f0(IUCV_REPLY, parm); |
1288 | out: | ||
1165 | local_bh_enable(); | 1289 | local_bh_enable(); |
1166 | return rc; | 1290 | return rc; |
1167 | } | 1291 | } |
@@ -1190,6 +1314,10 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | |||
1190 | union iucv_param *parm; | 1314 | union iucv_param *parm; |
1191 | int rc; | 1315 | int rc; |
1192 | 1316 | ||
1317 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1318 | rc = -EIO; | ||
1319 | goto out; | ||
1320 | } | ||
1193 | parm = iucv_param[smp_processor_id()]; | 1321 | parm = iucv_param[smp_processor_id()]; |
1194 | memset(parm, 0, sizeof(union iucv_param)); | 1322 | memset(parm, 0, sizeof(union iucv_param)); |
1195 | if (flags & IUCV_IPRMDATA) { | 1323 | if (flags & IUCV_IPRMDATA) { |
@@ -1212,6 +1340,7 @@ int __iucv_message_send(struct iucv_path *path, struct iucv_message *msg, | |||
1212 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 1340 | rc = iucv_call_b2f0(IUCV_SEND, parm); |
1213 | if (!rc) | 1341 | if (!rc) |
1214 | msg->id = parm->db.ipmsgid; | 1342 | msg->id = parm->db.ipmsgid; |
1343 | out: | ||
1215 | return rc; | 1344 | return rc; |
1216 | } | 1345 | } |
1217 | EXPORT_SYMBOL(__iucv_message_send); | 1346 | EXPORT_SYMBOL(__iucv_message_send); |
@@ -1272,6 +1401,10 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, | |||
1272 | int rc; | 1401 | int rc; |
1273 | 1402 | ||
1274 | local_bh_disable(); | 1403 | local_bh_disable(); |
1404 | if (!cpu_isset(smp_processor_id(), iucv_buffer_cpumask)) { | ||
1405 | rc = -EIO; | ||
1406 | goto out; | ||
1407 | } | ||
1275 | parm = iucv_param[smp_processor_id()]; | 1408 | parm = iucv_param[smp_processor_id()]; |
1276 | memset(parm, 0, sizeof(union iucv_param)); | 1409 | memset(parm, 0, sizeof(union iucv_param)); |
1277 | if (flags & IUCV_IPRMDATA) { | 1410 | if (flags & IUCV_IPRMDATA) { |
@@ -1297,6 +1430,7 @@ int iucv_message_send2way(struct iucv_path *path, struct iucv_message *msg, | |||
1297 | rc = iucv_call_b2f0(IUCV_SEND, parm); | 1430 | rc = iucv_call_b2f0(IUCV_SEND, parm); |
1298 | if (!rc) | 1431 | if (!rc) |
1299 | msg->id = parm->db.ipmsgid; | 1432 | msg->id = parm->db.ipmsgid; |
1433 | out: | ||
1300 | local_bh_enable(); | 1434 | local_bh_enable(); |
1301 | return rc; | 1435 | return rc; |
1302 | } | 1436 | } |
@@ -1687,6 +1821,130 @@ static void iucv_external_interrupt(u16 code) | |||
1687 | spin_unlock(&iucv_queue_lock); | 1821 | spin_unlock(&iucv_queue_lock); |
1688 | } | 1822 | } |
1689 | 1823 | ||
1824 | static int iucv_pm_prepare(struct device *dev) | ||
1825 | { | ||
1826 | int rc = 0; | ||
1827 | |||
1828 | #ifdef CONFIG_PM_DEBUG | ||
1829 | printk(KERN_INFO "iucv_pm_prepare\n"); | ||
1830 | #endif | ||
1831 | if (dev->driver && dev->driver->pm && dev->driver->pm->prepare) | ||
1832 | rc = dev->driver->pm->prepare(dev); | ||
1833 | return rc; | ||
1834 | } | ||
1835 | |||
1836 | static void iucv_pm_complete(struct device *dev) | ||
1837 | { | ||
1838 | #ifdef CONFIG_PM_DEBUG | ||
1839 | printk(KERN_INFO "iucv_pm_complete\n"); | ||
1840 | #endif | ||
1841 | if (dev->driver && dev->driver->pm && dev->driver->pm->complete) | ||
1842 | dev->driver->pm->complete(dev); | ||
1843 | } | ||
1844 | |||
1845 | /** | ||
1846 | * iucv_path_table_empty() - determine if iucv path table is empty | ||
1847 | * | ||
1848 | * Returns 0 if there are still iucv pathes defined | ||
1849 | * 1 if there are no iucv pathes defined | ||
1850 | */ | ||
1851 | int iucv_path_table_empty(void) | ||
1852 | { | ||
1853 | int i; | ||
1854 | |||
1855 | for (i = 0; i < iucv_max_pathid; i++) { | ||
1856 | if (iucv_path_table[i]) | ||
1857 | return 0; | ||
1858 | } | ||
1859 | return 1; | ||
1860 | } | ||
1861 | |||
1862 | /** | ||
1863 | * iucv_pm_freeze() - Freeze PM callback | ||
1864 | * @dev: iucv-based device | ||
1865 | * | ||
1866 | * disable iucv interrupts | ||
1867 | * invoke callback function of the iucv-based driver | ||
1868 | * shut down iucv, if no iucv-pathes are established anymore | ||
1869 | */ | ||
1870 | static int iucv_pm_freeze(struct device *dev) | ||
1871 | { | ||
1872 | int cpu; | ||
1873 | int rc = 0; | ||
1874 | |||
1875 | #ifdef CONFIG_PM_DEBUG | ||
1876 | printk(KERN_WARNING "iucv_pm_freeze\n"); | ||
1877 | #endif | ||
1878 | for_each_cpu_mask_nr(cpu, iucv_irq_cpumask) | ||
1879 | smp_call_function_single(cpu, iucv_block_cpu_almost, NULL, 1); | ||
1880 | if (dev->driver && dev->driver->pm && dev->driver->pm->freeze) | ||
1881 | rc = dev->driver->pm->freeze(dev); | ||
1882 | if (iucv_path_table_empty()) | ||
1883 | iucv_disable(); | ||
1884 | return rc; | ||
1885 | } | ||
1886 | |||
1887 | /** | ||
1888 | * iucv_pm_thaw() - Thaw PM callback | ||
1889 | * @dev: iucv-based device | ||
1890 | * | ||
1891 | * make iucv ready for use again: allocate path table, declare interrupt buffers | ||
1892 | * and enable iucv interrupts | ||
1893 | * invoke callback function of the iucv-based driver | ||
1894 | */ | ||
1895 | static int iucv_pm_thaw(struct device *dev) | ||
1896 | { | ||
1897 | int rc = 0; | ||
1898 | |||
1899 | #ifdef CONFIG_PM_DEBUG | ||
1900 | printk(KERN_WARNING "iucv_pm_thaw\n"); | ||
1901 | #endif | ||
1902 | if (!iucv_path_table) { | ||
1903 | rc = iucv_enable(); | ||
1904 | if (rc) | ||
1905 | goto out; | ||
1906 | } | ||
1907 | if (cpus_empty(iucv_irq_cpumask)) { | ||
1908 | if (iucv_nonsmp_handler) | ||
1909 | /* enable interrupts on one cpu */ | ||
1910 | iucv_allow_cpu(NULL); | ||
1911 | else | ||
1912 | /* enable interrupts on all cpus */ | ||
1913 | iucv_setmask_mp(); | ||
1914 | } | ||
1915 | if (dev->driver && dev->driver->pm && dev->driver->pm->thaw) | ||
1916 | rc = dev->driver->pm->thaw(dev); | ||
1917 | out: | ||
1918 | return rc; | ||
1919 | } | ||
1920 | |||
1921 | /** | ||
1922 | * iucv_pm_restore() - Restore PM callback | ||
1923 | * @dev: iucv-based device | ||
1924 | * | ||
1925 | * make iucv ready for use again: allocate path table, declare interrupt buffers | ||
1926 | * and enable iucv interrupts | ||
1927 | * invoke callback function of the iucv-based driver | ||
1928 | */ | ||
1929 | static int iucv_pm_restore(struct device *dev) | ||
1930 | { | ||
1931 | int rc = 0; | ||
1932 | |||
1933 | #ifdef CONFIG_PM_DEBUG | ||
1934 | printk(KERN_WARNING "iucv_pm_restore %p\n", iucv_path_table); | ||
1935 | #endif | ||
1936 | if (cpus_empty(iucv_irq_cpumask)) { | ||
1937 | rc = iucv_query_maxconn(); | ||
1938 | rc = iucv_enable(); | ||
1939 | if (rc) | ||
1940 | goto out; | ||
1941 | } | ||
1942 | if (dev->driver && dev->driver->pm && dev->driver->pm->restore) | ||
1943 | rc = dev->driver->pm->restore(dev); | ||
1944 | out: | ||
1945 | return rc; | ||
1946 | } | ||
1947 | |||
1690 | /** | 1948 | /** |
1691 | * iucv_init | 1949 | * iucv_init |
1692 | * | 1950 | * |
@@ -1740,15 +1998,20 @@ static int __init iucv_init(void) | |||
1740 | rc = register_hotcpu_notifier(&iucv_cpu_notifier); | 1998 | rc = register_hotcpu_notifier(&iucv_cpu_notifier); |
1741 | if (rc) | 1999 | if (rc) |
1742 | goto out_free; | 2000 | goto out_free; |
2001 | rc = register_reboot_notifier(&iucv_reboot_notifier); | ||
2002 | if (rc) | ||
2003 | goto out_cpu; | ||
1743 | ASCEBC(iucv_error_no_listener, 16); | 2004 | ASCEBC(iucv_error_no_listener, 16); |
1744 | ASCEBC(iucv_error_no_memory, 16); | 2005 | ASCEBC(iucv_error_no_memory, 16); |
1745 | ASCEBC(iucv_error_pathid, 16); | 2006 | ASCEBC(iucv_error_pathid, 16); |
1746 | iucv_available = 1; | 2007 | iucv_available = 1; |
1747 | rc = bus_register(&iucv_bus); | 2008 | rc = bus_register(&iucv_bus); |
1748 | if (rc) | 2009 | if (rc) |
1749 | goto out_cpu; | 2010 | goto out_reboot; |
1750 | return 0; | 2011 | return 0; |
1751 | 2012 | ||
2013 | out_reboot: | ||
2014 | unregister_reboot_notifier(&iucv_reboot_notifier); | ||
1752 | out_cpu: | 2015 | out_cpu: |
1753 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 2016 | unregister_hotcpu_notifier(&iucv_cpu_notifier); |
1754 | out_free: | 2017 | out_free: |
@@ -1783,6 +2046,7 @@ static void __exit iucv_exit(void) | |||
1783 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) | 2046 | list_for_each_entry_safe(p, n, &iucv_work_queue, list) |
1784 | kfree(p); | 2047 | kfree(p); |
1785 | spin_unlock_irq(&iucv_queue_lock); | 2048 | spin_unlock_irq(&iucv_queue_lock); |
2049 | unregister_reboot_notifier(&iucv_reboot_notifier); | ||
1786 | unregister_hotcpu_notifier(&iucv_cpu_notifier); | 2050 | unregister_hotcpu_notifier(&iucv_cpu_notifier); |
1787 | for_each_possible_cpu(cpu) { | 2051 | for_each_possible_cpu(cpu) { |
1788 | kfree(iucv_param_irq[cpu]); | 2052 | kfree(iucv_param_irq[cpu]); |