diff options
author | Michael Holzheu <holzheu@de.ibm.com> | 2009-06-16 04:30:40 -0400 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2009-06-16 04:31:16 -0400 |
commit | 62b7494209495847269a6ce0504cbefd23d42eb1 (patch) | |
tree | 6318661837ddd250dc86b48a94f44fa2d1ae9421 /drivers/s390/char/sclp.c | |
parent | 3ef32e62cb55785f6f1b5ad7290744f74bdadaf0 (diff) |
[S390] pm: power management support for SCLP drivers.
The SCLP base driver defines a new notifier call back for all upper level SCLP
drivers, like the SCLP console, etc. This guarantees that in suspend first the
upper level drivers are suspended and afterwards the SCLP base driver. For
resume it is the other way round. The SCLP base driver itself registers a
new platform device at the platform bus and gets PM notifications via
the dev_pm_ops.
In suspend, the SCLP base driver switches off the receiver and sender mask
This is done in sclp_deactivate(). After suspend all new requests will be
rejected with -EIO and no more interrupts will be received, because the masks
are switched off. For resume the sender and receiver masks are reset in
the sclp_reactivate() function.
When the SCLP console is suspended, all new messages are cached in the
sclp console buffers. In resume, all the cached messages are written to the
console. In addition to that we have an early resume function that removes
the cached messages from the suspend image.
Signed-off-by: Michael Holzheu <holzheu@de.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/char/sclp.c')
-rw-r--r-- | drivers/s390/char/sclp.c | 248 |
1 files changed, 218 insertions, 30 deletions
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 4377e93a43d7..a983f5086788 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -1,11 +1,10 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/char/sclp.c | 2 | * core function to access sclp interface |
3 | * core function to access sclp interface | ||
4 | * | 3 | * |
5 | * S390 version | 4 | * Copyright IBM Corp. 1999, 2009 |
6 | * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation | 5 | * |
7 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> | 6 | * Author(s): Martin Peschke <mpeschke@de.ibm.com> |
8 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 7 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
9 | */ | 8 | */ |
10 | 9 | ||
11 | #include <linux/module.h> | 10 | #include <linux/module.h> |
@@ -16,6 +15,9 @@ | |||
16 | #include <linux/reboot.h> | 15 | #include <linux/reboot.h> |
17 | #include <linux/jiffies.h> | 16 | #include <linux/jiffies.h> |
18 | #include <linux/init.h> | 17 | #include <linux/init.h> |
18 | #include <linux/suspend.h> | ||
19 | #include <linux/completion.h> | ||
20 | #include <linux/platform_device.h> | ||
19 | #include <asm/types.h> | 21 | #include <asm/types.h> |
20 | #include <asm/s390_ext.h> | 22 | #include <asm/s390_ext.h> |
21 | 23 | ||
@@ -47,6 +49,16 @@ static struct sclp_req sclp_init_req; | |||
47 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 49 | static char sclp_read_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); |
48 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); | 50 | static char sclp_init_sccb[PAGE_SIZE] __attribute__((__aligned__(PAGE_SIZE))); |
49 | 51 | ||
52 | /* Suspend request */ | ||
53 | static DECLARE_COMPLETION(sclp_request_queue_flushed); | ||
54 | |||
55 | static void sclp_suspend_req_cb(struct sclp_req *req, void *data) | ||
56 | { | ||
57 | complete(&sclp_request_queue_flushed); | ||
58 | } | ||
59 | |||
60 | static struct sclp_req sclp_suspend_req; | ||
61 | |||
50 | /* Timer for request retries. */ | 62 | /* Timer for request retries. */ |
51 | static struct timer_list sclp_request_timer; | 63 | static struct timer_list sclp_request_timer; |
52 | 64 | ||
@@ -84,6 +96,12 @@ static volatile enum sclp_mask_state_t { | |||
84 | sclp_mask_state_initializing | 96 | sclp_mask_state_initializing |
85 | } sclp_mask_state = sclp_mask_state_idle; | 97 | } sclp_mask_state = sclp_mask_state_idle; |
86 | 98 | ||
99 | /* Internal state: is the driver suspended? */ | ||
100 | static enum sclp_suspend_state_t { | ||
101 | sclp_suspend_state_running, | ||
102 | sclp_suspend_state_suspended, | ||
103 | } sclp_suspend_state = sclp_suspend_state_running; | ||
104 | |||
87 | /* Maximum retry counts */ | 105 | /* Maximum retry counts */ |
88 | #define SCLP_INIT_RETRY 3 | 106 | #define SCLP_INIT_RETRY 3 |
89 | #define SCLP_MASK_RETRY 3 | 107 | #define SCLP_MASK_RETRY 3 |
@@ -211,6 +229,8 @@ sclp_process_queue(void) | |||
211 | del_timer(&sclp_request_timer); | 229 | del_timer(&sclp_request_timer); |
212 | while (!list_empty(&sclp_req_queue)) { | 230 | while (!list_empty(&sclp_req_queue)) { |
213 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); | 231 | req = list_entry(sclp_req_queue.next, struct sclp_req, list); |
232 | if (!req->sccb) | ||
233 | goto do_post; | ||
214 | rc = __sclp_start_request(req); | 234 | rc = __sclp_start_request(req); |
215 | if (rc == 0) | 235 | if (rc == 0) |
216 | break; | 236 | break; |
@@ -222,6 +242,7 @@ sclp_process_queue(void) | |||
222 | sclp_request_timeout, 0); | 242 | sclp_request_timeout, 0); |
223 | break; | 243 | break; |
224 | } | 244 | } |
245 | do_post: | ||
225 | /* Post-processing for aborted request */ | 246 | /* Post-processing for aborted request */ |
226 | list_del(&req->list); | 247 | list_del(&req->list); |
227 | if (req->callback) { | 248 | if (req->callback) { |
@@ -233,6 +254,19 @@ sclp_process_queue(void) | |||
233 | spin_unlock_irqrestore(&sclp_lock, flags); | 254 | spin_unlock_irqrestore(&sclp_lock, flags); |
234 | } | 255 | } |
235 | 256 | ||
257 | static int __sclp_can_add_request(struct sclp_req *req) | ||
258 | { | ||
259 | if (req == &sclp_suspend_req || req == &sclp_init_req) | ||
260 | return 1; | ||
261 | if (sclp_suspend_state != sclp_suspend_state_running) | ||
262 | return 0; | ||
263 | if (sclp_init_state != sclp_init_state_initialized) | ||
264 | return 0; | ||
265 | if (sclp_activation_state != sclp_activation_state_active) | ||
266 | return 0; | ||
267 | return 1; | ||
268 | } | ||
269 | |||
236 | /* Queue a new request. Return zero on success, non-zero otherwise. */ | 270 | /* Queue a new request. Return zero on success, non-zero otherwise. */ |
237 | int | 271 | int |
238 | sclp_add_request(struct sclp_req *req) | 272 | sclp_add_request(struct sclp_req *req) |
@@ -241,9 +275,7 @@ sclp_add_request(struct sclp_req *req) | |||
241 | int rc; | 275 | int rc; |
242 | 276 | ||
243 | spin_lock_irqsave(&sclp_lock, flags); | 277 | spin_lock_irqsave(&sclp_lock, flags); |
244 | if ((sclp_init_state != sclp_init_state_initialized || | 278 | if (!__sclp_can_add_request(req)) { |
245 | sclp_activation_state != sclp_activation_state_active) && | ||
246 | req != &sclp_init_req) { | ||
247 | spin_unlock_irqrestore(&sclp_lock, flags); | 279 | spin_unlock_irqrestore(&sclp_lock, flags); |
248 | return -EIO; | 280 | return -EIO; |
249 | } | 281 | } |
@@ -254,10 +286,16 @@ sclp_add_request(struct sclp_req *req) | |||
254 | /* Start if request is first in list */ | 286 | /* Start if request is first in list */ |
255 | if (sclp_running_state == sclp_running_state_idle && | 287 | if (sclp_running_state == sclp_running_state_idle && |
256 | req->list.prev == &sclp_req_queue) { | 288 | req->list.prev == &sclp_req_queue) { |
289 | if (!req->sccb) { | ||
290 | list_del(&req->list); | ||
291 | rc = -ENODATA; | ||
292 | goto out; | ||
293 | } | ||
257 | rc = __sclp_start_request(req); | 294 | rc = __sclp_start_request(req); |
258 | if (rc) | 295 | if (rc) |
259 | list_del(&req->list); | 296 | list_del(&req->list); |
260 | } | 297 | } |
298 | out: | ||
261 | spin_unlock_irqrestore(&sclp_lock, flags); | 299 | spin_unlock_irqrestore(&sclp_lock, flags); |
262 | return rc; | 300 | return rc; |
263 | } | 301 | } |
@@ -560,6 +598,7 @@ sclp_register(struct sclp_register *reg) | |||
560 | /* Trigger initial state change callback */ | 598 | /* Trigger initial state change callback */ |
561 | reg->sclp_receive_mask = 0; | 599 | reg->sclp_receive_mask = 0; |
562 | reg->sclp_send_mask = 0; | 600 | reg->sclp_send_mask = 0; |
601 | reg->pm_event_posted = 0; | ||
563 | list_add(®->list, &sclp_reg_list); | 602 | list_add(®->list, &sclp_reg_list); |
564 | spin_unlock_irqrestore(&sclp_lock, flags); | 603 | spin_unlock_irqrestore(&sclp_lock, flags); |
565 | rc = sclp_init_mask(1); | 604 | rc = sclp_init_mask(1); |
@@ -880,20 +919,134 @@ static struct notifier_block sclp_reboot_notifier = { | |||
880 | .notifier_call = sclp_reboot_event | 919 | .notifier_call = sclp_reboot_event |
881 | }; | 920 | }; |
882 | 921 | ||
922 | /* | ||
923 | * Suspend/resume SCLP notifier implementation | ||
924 | */ | ||
925 | |||
926 | static void sclp_pm_event(enum sclp_pm_event sclp_pm_event, int rollback) | ||
927 | { | ||
928 | struct sclp_register *reg; | ||
929 | unsigned long flags; | ||
930 | |||
931 | if (!rollback) { | ||
932 | spin_lock_irqsave(&sclp_lock, flags); | ||
933 | list_for_each_entry(reg, &sclp_reg_list, list) | ||
934 | reg->pm_event_posted = 0; | ||
935 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
936 | } | ||
937 | do { | ||
938 | spin_lock_irqsave(&sclp_lock, flags); | ||
939 | list_for_each_entry(reg, &sclp_reg_list, list) { | ||
940 | if (rollback && reg->pm_event_posted) | ||
941 | goto found; | ||
942 | if (!rollback && !reg->pm_event_posted) | ||
943 | goto found; | ||
944 | } | ||
945 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
946 | return; | ||
947 | found: | ||
948 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
949 | if (reg->pm_event_fn) | ||
950 | reg->pm_event_fn(reg, sclp_pm_event); | ||
951 | reg->pm_event_posted = rollback ? 0 : 1; | ||
952 | } while (1); | ||
953 | } | ||
954 | |||
955 | /* | ||
956 | * Susend/resume callbacks for platform device | ||
957 | */ | ||
958 | |||
959 | static int sclp_freeze(struct device *dev) | ||
960 | { | ||
961 | unsigned long flags; | ||
962 | int rc; | ||
963 | |||
964 | sclp_pm_event(SCLP_PM_EVENT_FREEZE, 0); | ||
965 | |||
966 | spin_lock_irqsave(&sclp_lock, flags); | ||
967 | sclp_suspend_state = sclp_suspend_state_suspended; | ||
968 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
969 | |||
970 | /* Init supend data */ | ||
971 | memset(&sclp_suspend_req, 0, sizeof(sclp_suspend_req)); | ||
972 | sclp_suspend_req.callback = sclp_suspend_req_cb; | ||
973 | sclp_suspend_req.status = SCLP_REQ_FILLED; | ||
974 | init_completion(&sclp_request_queue_flushed); | ||
975 | |||
976 | rc = sclp_add_request(&sclp_suspend_req); | ||
977 | if (rc == 0) | ||
978 | wait_for_completion(&sclp_request_queue_flushed); | ||
979 | else if (rc != -ENODATA) | ||
980 | goto fail_thaw; | ||
981 | |||
982 | rc = sclp_deactivate(); | ||
983 | if (rc) | ||
984 | goto fail_thaw; | ||
985 | return 0; | ||
986 | |||
987 | fail_thaw: | ||
988 | spin_lock_irqsave(&sclp_lock, flags); | ||
989 | sclp_suspend_state = sclp_suspend_state_running; | ||
990 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
991 | sclp_pm_event(SCLP_PM_EVENT_THAW, 1); | ||
992 | return rc; | ||
993 | } | ||
994 | |||
995 | static int sclp_undo_suspend(enum sclp_pm_event event) | ||
996 | { | ||
997 | unsigned long flags; | ||
998 | int rc; | ||
999 | |||
1000 | rc = sclp_reactivate(); | ||
1001 | if (rc) | ||
1002 | return rc; | ||
1003 | |||
1004 | spin_lock_irqsave(&sclp_lock, flags); | ||
1005 | sclp_suspend_state = sclp_suspend_state_running; | ||
1006 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
1007 | |||
1008 | sclp_pm_event(event, 0); | ||
1009 | return 0; | ||
1010 | } | ||
1011 | |||
1012 | static int sclp_thaw(struct device *dev) | ||
1013 | { | ||
1014 | return sclp_undo_suspend(SCLP_PM_EVENT_THAW); | ||
1015 | } | ||
1016 | |||
1017 | static int sclp_restore(struct device *dev) | ||
1018 | { | ||
1019 | return sclp_undo_suspend(SCLP_PM_EVENT_RESTORE); | ||
1020 | } | ||
1021 | |||
1022 | static struct dev_pm_ops sclp_pm_ops = { | ||
1023 | .freeze = sclp_freeze, | ||
1024 | .thaw = sclp_thaw, | ||
1025 | .restore = sclp_restore, | ||
1026 | }; | ||
1027 | |||
1028 | static struct platform_driver sclp_pdrv = { | ||
1029 | .driver = { | ||
1030 | .name = "sclp", | ||
1031 | .owner = THIS_MODULE, | ||
1032 | .pm = &sclp_pm_ops, | ||
1033 | }, | ||
1034 | }; | ||
1035 | |||
1036 | static struct platform_device *sclp_pdev; | ||
1037 | |||
883 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero | 1038 | /* Initialize SCLP driver. Return zero if driver is operational, non-zero |
884 | * otherwise. */ | 1039 | * otherwise. */ |
885 | static int | 1040 | static int |
886 | sclp_init(void) | 1041 | sclp_init(void) |
887 | { | 1042 | { |
888 | unsigned long flags; | 1043 | unsigned long flags; |
889 | int rc; | 1044 | int rc = 0; |
890 | 1045 | ||
891 | spin_lock_irqsave(&sclp_lock, flags); | 1046 | spin_lock_irqsave(&sclp_lock, flags); |
892 | /* Check for previous or running initialization */ | 1047 | /* Check for previous or running initialization */ |
893 | if (sclp_init_state != sclp_init_state_uninitialized) { | 1048 | if (sclp_init_state != sclp_init_state_uninitialized) |
894 | spin_unlock_irqrestore(&sclp_lock, flags); | 1049 | goto fail_unlock; |
895 | return 0; | ||
896 | } | ||
897 | sclp_init_state = sclp_init_state_initializing; | 1050 | sclp_init_state = sclp_init_state_initializing; |
898 | /* Set up variables */ | 1051 | /* Set up variables */ |
899 | INIT_LIST_HEAD(&sclp_req_queue); | 1052 | INIT_LIST_HEAD(&sclp_req_queue); |
@@ -904,27 +1057,17 @@ sclp_init(void) | |||
904 | spin_unlock_irqrestore(&sclp_lock, flags); | 1057 | spin_unlock_irqrestore(&sclp_lock, flags); |
905 | rc = sclp_check_interface(); | 1058 | rc = sclp_check_interface(); |
906 | spin_lock_irqsave(&sclp_lock, flags); | 1059 | spin_lock_irqsave(&sclp_lock, flags); |
907 | if (rc) { | 1060 | if (rc) |
908 | sclp_init_state = sclp_init_state_uninitialized; | 1061 | goto fail_init_state_uninitialized; |
909 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
910 | return rc; | ||
911 | } | ||
912 | /* Register reboot handler */ | 1062 | /* Register reboot handler */ |
913 | rc = register_reboot_notifier(&sclp_reboot_notifier); | 1063 | rc = register_reboot_notifier(&sclp_reboot_notifier); |
914 | if (rc) { | 1064 | if (rc) |
915 | sclp_init_state = sclp_init_state_uninitialized; | 1065 | goto fail_init_state_uninitialized; |
916 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
917 | return rc; | ||
918 | } | ||
919 | /* Register interrupt handler */ | 1066 | /* Register interrupt handler */ |
920 | rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, | 1067 | rc = register_early_external_interrupt(0x2401, sclp_interrupt_handler, |
921 | &ext_int_info_hwc); | 1068 | &ext_int_info_hwc); |
922 | if (rc) { | 1069 | if (rc) |
923 | unregister_reboot_notifier(&sclp_reboot_notifier); | 1070 | goto fail_unregister_reboot_notifier; |
924 | sclp_init_state = sclp_init_state_uninitialized; | ||
925 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
926 | return rc; | ||
927 | } | ||
928 | sclp_init_state = sclp_init_state_initialized; | 1071 | sclp_init_state = sclp_init_state_initialized; |
929 | spin_unlock_irqrestore(&sclp_lock, flags); | 1072 | spin_unlock_irqrestore(&sclp_lock, flags); |
930 | /* Enable service-signal external interruption - needs to happen with | 1073 | /* Enable service-signal external interruption - needs to happen with |
@@ -932,11 +1075,56 @@ sclp_init(void) | |||
932 | ctl_set_bit(0, 9); | 1075 | ctl_set_bit(0, 9); |
933 | sclp_init_mask(1); | 1076 | sclp_init_mask(1); |
934 | return 0; | 1077 | return 0; |
1078 | |||
1079 | fail_unregister_reboot_notifier: | ||
1080 | unregister_reboot_notifier(&sclp_reboot_notifier); | ||
1081 | fail_init_state_uninitialized: | ||
1082 | sclp_init_state = sclp_init_state_uninitialized; | ||
1083 | fail_unlock: | ||
1084 | spin_unlock_irqrestore(&sclp_lock, flags); | ||
1085 | return rc; | ||
935 | } | 1086 | } |
936 | 1087 | ||
1088 | /* | ||
1089 | * SCLP panic notifier: If we are suspended, we thaw SCLP in order to be able | ||
1090 | * to print the panic message. | ||
1091 | */ | ||
1092 | static int sclp_panic_notify(struct notifier_block *self, | ||
1093 | unsigned long event, void *data) | ||
1094 | { | ||
1095 | if (sclp_suspend_state == sclp_suspend_state_suspended) | ||
1096 | sclp_undo_suspend(SCLP_PM_EVENT_THAW); | ||
1097 | return NOTIFY_OK; | ||
1098 | } | ||
1099 | |||
1100 | static struct notifier_block sclp_on_panic_nb = { | ||
1101 | .notifier_call = sclp_panic_notify, | ||
1102 | .priority = SCLP_PANIC_PRIO, | ||
1103 | }; | ||
1104 | |||
937 | static __init int sclp_initcall(void) | 1105 | static __init int sclp_initcall(void) |
938 | { | 1106 | { |
1107 | int rc; | ||
1108 | |||
1109 | rc = platform_driver_register(&sclp_pdrv); | ||
1110 | if (rc) | ||
1111 | return rc; | ||
1112 | sclp_pdev = platform_device_register_simple("sclp", -1, NULL, 0); | ||
1113 | rc = IS_ERR(sclp_pdev) ? PTR_ERR(sclp_pdev) : 0; | ||
1114 | if (rc) | ||
1115 | goto fail_platform_driver_unregister; | ||
1116 | rc = atomic_notifier_chain_register(&panic_notifier_list, | ||
1117 | &sclp_on_panic_nb); | ||
1118 | if (rc) | ||
1119 | goto fail_platform_device_unregister; | ||
1120 | |||
939 | return sclp_init(); | 1121 | return sclp_init(); |
1122 | |||
1123 | fail_platform_device_unregister: | ||
1124 | platform_device_unregister(sclp_pdev); | ||
1125 | fail_platform_driver_unregister: | ||
1126 | platform_driver_unregister(&sclp_pdrv); | ||
1127 | return rc; | ||
940 | } | 1128 | } |
941 | 1129 | ||
942 | arch_initcall(sclp_initcall); | 1130 | arch_initcall(sclp_initcall); |