aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/ccwreq.c2
-rw-r--r--drivers/s390/cio/chsc.c2
-rw-r--r--drivers/s390/cio/chsc_sch.c4
-rw-r--r--drivers/s390/cio/cio.c14
-rw-r--r--drivers/s390/cio/crw.c29
-rw-r--r--drivers/s390/cio/css.c79
-rw-r--r--drivers/s390/cio/css.h5
-rw-r--r--drivers/s390/cio/device.c160
-rw-r--r--drivers/s390/cio/device.h3
-rw-r--r--drivers/s390/cio/device_fsm.c43
-rw-r--r--drivers/s390/cio/qdio.h92
-rw-r--r--drivers/s390/cio/qdio_debug.c23
-rw-r--r--drivers/s390/cio/qdio_main.c28
-rw-r--r--drivers/s390/cio/qdio_setup.c20
-rw-r--r--drivers/s390/cio/qdio_thinint.c4
15 files changed, 329 insertions, 179 deletions
diff --git a/drivers/s390/cio/ccwreq.c b/drivers/s390/cio/ccwreq.c
index 7a28a3029a3f..37df42af05ec 100644
--- a/drivers/s390/cio/ccwreq.c
+++ b/drivers/s390/cio/ccwreq.c
@@ -224,8 +224,8 @@ static void ccwreq_log_status(struct ccw_device *cdev, enum io_status status)
224 */ 224 */
225void ccw_request_handler(struct ccw_device *cdev) 225void ccw_request_handler(struct ccw_device *cdev)
226{ 226{
227 struct irb *irb = (struct irb *)&S390_lowcore.irb;
227 struct ccw_request *req = &cdev->private->req; 228 struct ccw_request *req = &cdev->private->req;
228 struct irb *irb = (struct irb *) __LC_IRB;
229 enum io_status status; 229 enum io_status status;
230 int rc = -EOPNOTSUPP; 230 int rc = -EOPNOTSUPP;
231 231
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 1ecd3e567648..4038f5b4f144 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -574,7 +574,7 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page)
574 secm_area->request.length = 0x0050; 574 secm_area->request.length = 0x0050;
575 secm_area->request.code = 0x0016; 575 secm_area->request.code = 0x0016;
576 576
577 secm_area->key = PAGE_DEFAULT_KEY; 577 secm_area->key = PAGE_DEFAULT_KEY >> 4;
578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1; 578 secm_area->cub_addr1 = (u64)(unsigned long)css->cub_addr1;
579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2; 579 secm_area->cub_addr2 = (u64)(unsigned long)css->cub_addr2;
580 580
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c
index c84ac9443079..852612f5dba0 100644
--- a/drivers/s390/cio/chsc_sch.c
+++ b/drivers/s390/cio/chsc_sch.c
@@ -51,7 +51,7 @@ static void chsc_subchannel_irq(struct subchannel *sch)
51{ 51{
52 struct chsc_private *private = sch->private; 52 struct chsc_private *private = sch->private;
53 struct chsc_request *request = private->request; 53 struct chsc_request *request = private->request;
54 struct irb *irb = (struct irb *)__LC_IRB; 54 struct irb *irb = (struct irb *)&S390_lowcore.irb;
55 55
56 CHSC_LOG(4, "irb"); 56 CHSC_LOG(4, "irb");
57 CHSC_LOG_HEX(4, irb, sizeof(*irb)); 57 CHSC_LOG_HEX(4, irb, sizeof(*irb));
@@ -237,7 +237,7 @@ static int chsc_async(struct chsc_async_area *chsc_area,
237 int ret = -ENODEV; 237 int ret = -ENODEV;
238 char dbf[10]; 238 char dbf[10];
239 239
240 chsc_area->header.key = PAGE_DEFAULT_KEY; 240 chsc_area->header.key = PAGE_DEFAULT_KEY >> 4;
241 while ((sch = chsc_get_next_subchannel(sch))) { 241 while ((sch = chsc_get_next_subchannel(sch))) {
242 spin_lock(sch->lock); 242 spin_lock(sch->lock);
243 private = sch->private; 243 private = sch->private;
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 126f240715a4..f736cdcf08ad 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -625,8 +625,8 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
625 /* 625 /*
626 * Get interrupt information from lowcore 626 * Get interrupt information from lowcore
627 */ 627 */
628 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 628 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
629 irb = (struct irb *) __LC_IRB; 629 irb = (struct irb *)&S390_lowcore.irb;
630 do { 630 do {
631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; 631 kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++;
632 /* 632 /*
@@ -661,7 +661,7 @@ void __irq_entry do_IRQ(struct pt_regs *regs)
661 * We don't do this for VM because a tpi drops the cpu 661 * We don't do this for VM because a tpi drops the cpu
662 * out of the sie which costs more cycles than it saves. 662 * out of the sie which costs more cycles than it saves.
663 */ 663 */
664 } while (!MACHINE_IS_VM && tpi (NULL) != 0); 664 } while (MACHINE_IS_LPAR && tpi(NULL) != 0);
665 irq_exit(); 665 irq_exit();
666 set_irq_regs(old_regs); 666 set_irq_regs(old_regs);
667} 667}
@@ -682,10 +682,10 @@ static int cio_tpi(void)
682 struct irb *irb; 682 struct irb *irb;
683 int irq_context; 683 int irq_context;
684 684
685 tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; 685 tpi_info = (struct tpi_info *)&S390_lowcore.subchannel_id;
686 if (tpi(NULL) != 1) 686 if (tpi(NULL) != 1)
687 return 0; 687 return 0;
688 irb = (struct irb *) __LC_IRB; 688 irb = (struct irb *)&S390_lowcore.irb;
689 /* Store interrupt response block to lowcore. */ 689 /* Store interrupt response block to lowcore. */
690 if (tsch(tpi_info->schid, irb) != 0) 690 if (tsch(tpi_info->schid, irb) != 0)
691 /* Not status pending or not operational. */ 691 /* Not status pending or not operational. */
@@ -885,7 +885,7 @@ __clear_io_subchannel_easy(struct subchannel_id schid)
885 struct tpi_info ti; 885 struct tpi_info ti;
886 886
887 if (tpi(&ti)) { 887 if (tpi(&ti)) {
888 tsch(ti.schid, (struct irb *)__LC_IRB); 888 tsch(ti.schid, (struct irb *)&S390_lowcore.irb);
889 if (schid_equal(&ti.schid, &schid)) 889 if (schid_equal(&ti.schid, &schid))
890 return 0; 890 return 0;
891 } 891 }
@@ -1083,7 +1083,7 @@ int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
1083 struct subchannel_id schid; 1083 struct subchannel_id schid;
1084 struct schib schib; 1084 struct schib schib;
1085 1085
1086 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID; 1086 schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
1087 if (!schid.one) 1087 if (!schid.one)
1088 return -ENODEV; 1088 return -ENODEV;
1089 if (stsch(schid, &schib)) 1089 if (stsch(schid, &schib))
diff --git a/drivers/s390/cio/crw.c b/drivers/s390/cio/crw.c
index d157665d0e76..425f741a280c 100644
--- a/drivers/s390/cio/crw.c
+++ b/drivers/s390/cio/crw.c
@@ -8,15 +8,16 @@
8 * Heiko Carstens <heiko.carstens@de.ibm.com>, 8 * Heiko Carstens <heiko.carstens@de.ibm.com>,
9 */ 9 */
10 10
11#include <linux/semaphore.h>
12#include <linux/mutex.h> 11#include <linux/mutex.h>
13#include <linux/kthread.h> 12#include <linux/kthread.h>
14#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/wait.h>
15#include <asm/crw.h> 15#include <asm/crw.h>
16 16
17static struct semaphore crw_semaphore;
18static DEFINE_MUTEX(crw_handler_mutex); 17static DEFINE_MUTEX(crw_handler_mutex);
19static crw_handler_t crw_handlers[NR_RSCS]; 18static crw_handler_t crw_handlers[NR_RSCS];
19static atomic_t crw_nr_req = ATOMIC_INIT(0);
20static DECLARE_WAIT_QUEUE_HEAD(crw_handler_wait_q);
20 21
21/** 22/**
22 * crw_register_handler() - register a channel report word handler 23 * crw_register_handler() - register a channel report word handler
@@ -59,12 +60,14 @@ void crw_unregister_handler(int rsc)
59static int crw_collect_info(void *unused) 60static int crw_collect_info(void *unused)
60{ 61{
61 struct crw crw[2]; 62 struct crw crw[2];
62 int ccode; 63 int ccode, signal;
63 unsigned int chain; 64 unsigned int chain;
64 int ignore;
65 65
66repeat: 66repeat:
67 ignore = down_interruptible(&crw_semaphore); 67 signal = wait_event_interruptible(crw_handler_wait_q,
68 atomic_read(&crw_nr_req) > 0);
69 if (unlikely(signal))
70 atomic_inc(&crw_nr_req);
68 chain = 0; 71 chain = 0;
69 while (1) { 72 while (1) {
70 crw_handler_t handler; 73 crw_handler_t handler;
@@ -122,25 +125,23 @@ repeat:
122 /* chain is always 0 or 1 here. */ 125 /* chain is always 0 or 1 here. */
123 chain = crw[chain].chn ? chain + 1 : 0; 126 chain = crw[chain].chn ? chain + 1 : 0;
124 } 127 }
128 if (atomic_dec_and_test(&crw_nr_req))
129 wake_up(&crw_handler_wait_q);
125 goto repeat; 130 goto repeat;
126 return 0; 131 return 0;
127} 132}
128 133
129void crw_handle_channel_report(void) 134void crw_handle_channel_report(void)
130{ 135{
131 up(&crw_semaphore); 136 atomic_inc(&crw_nr_req);
137 wake_up(&crw_handler_wait_q);
132} 138}
133 139
134/* 140void crw_wait_for_channel_report(void)
135 * Separate initcall needed for semaphore initialization since
136 * crw_handle_channel_report might be called before crw_machine_check_init.
137 */
138static int __init crw_init_semaphore(void)
139{ 141{
140 init_MUTEX_LOCKED(&crw_semaphore); 142 crw_handle_channel_report();
141 return 0; 143 wait_event(crw_handler_wait_q, atomic_read(&crw_nr_req) == 0);
142} 144}
143pure_initcall(crw_init_semaphore);
144 145
145/* 146/*
146 * Machine checks for the channel subsystem must be enabled 147 * Machine checks for the channel subsystem must be enabled
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 7679aee6fa14..2769da54f2b9 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -18,6 +18,7 @@
18#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/reboot.h> 19#include <linux/reboot.h>
20#include <linux/suspend.h> 20#include <linux/suspend.h>
21#include <linux/proc_fs.h>
21#include <asm/isc.h> 22#include <asm/isc.h>
22#include <asm/crw.h> 23#include <asm/crw.h>
23 24
@@ -232,7 +233,7 @@ void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo)
232 if (!get_device(&sch->dev)) 233 if (!get_device(&sch->dev))
233 return; 234 return;
234 sch->todo = todo; 235 sch->todo = todo;
235 if (!queue_work(slow_path_wq, &sch->todo_work)) { 236 if (!queue_work(cio_work_q, &sch->todo_work)) {
236 /* Already queued, release workqueue ref. */ 237 /* Already queued, release workqueue ref. */
237 put_device(&sch->dev); 238 put_device(&sch->dev);
238 } 239 }
@@ -543,7 +544,7 @@ static void css_slow_path_func(struct work_struct *unused)
543} 544}
544 545
545static DECLARE_WORK(slow_path_work, css_slow_path_func); 546static DECLARE_WORK(slow_path_work, css_slow_path_func);
546struct workqueue_struct *slow_path_wq; 547struct workqueue_struct *cio_work_q;
547 548
548void css_schedule_eval(struct subchannel_id schid) 549void css_schedule_eval(struct subchannel_id schid)
549{ 550{
@@ -552,7 +553,7 @@ void css_schedule_eval(struct subchannel_id schid)
552 spin_lock_irqsave(&slow_subchannel_lock, flags); 553 spin_lock_irqsave(&slow_subchannel_lock, flags);
553 idset_sch_add(slow_subchannel_set, schid); 554 idset_sch_add(slow_subchannel_set, schid);
554 atomic_set(&css_eval_scheduled, 1); 555 atomic_set(&css_eval_scheduled, 1);
555 queue_work(slow_path_wq, &slow_path_work); 556 queue_work(cio_work_q, &slow_path_work);
556 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 557 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
557} 558}
558 559
@@ -563,7 +564,7 @@ void css_schedule_eval_all(void)
563 spin_lock_irqsave(&slow_subchannel_lock, flags); 564 spin_lock_irqsave(&slow_subchannel_lock, flags);
564 idset_fill(slow_subchannel_set); 565 idset_fill(slow_subchannel_set);
565 atomic_set(&css_eval_scheduled, 1); 566 atomic_set(&css_eval_scheduled, 1);
566 queue_work(slow_path_wq, &slow_path_work); 567 queue_work(cio_work_q, &slow_path_work);
567 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 568 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
568} 569}
569 570
@@ -594,14 +595,14 @@ void css_schedule_eval_all_unreg(void)
594 spin_lock_irqsave(&slow_subchannel_lock, flags); 595 spin_lock_irqsave(&slow_subchannel_lock, flags);
595 idset_add_set(slow_subchannel_set, unreg_set); 596 idset_add_set(slow_subchannel_set, unreg_set);
596 atomic_set(&css_eval_scheduled, 1); 597 atomic_set(&css_eval_scheduled, 1);
597 queue_work(slow_path_wq, &slow_path_work); 598 queue_work(cio_work_q, &slow_path_work);
598 spin_unlock_irqrestore(&slow_subchannel_lock, flags); 599 spin_unlock_irqrestore(&slow_subchannel_lock, flags);
599 idset_free(unreg_set); 600 idset_free(unreg_set);
600} 601}
601 602
602void css_wait_for_slow_path(void) 603void css_wait_for_slow_path(void)
603{ 604{
604 flush_workqueue(slow_path_wq); 605 flush_workqueue(cio_work_q);
605} 606}
606 607
607/* Schedule reprobing of all unregistered subchannels. */ 608/* Schedule reprobing of all unregistered subchannels. */
@@ -992,12 +993,21 @@ static int __init channel_subsystem_init(void)
992 ret = css_bus_init(); 993 ret = css_bus_init();
993 if (ret) 994 if (ret)
994 return ret; 995 return ret;
995 996 cio_work_q = create_singlethread_workqueue("cio");
997 if (!cio_work_q) {
998 ret = -ENOMEM;
999 goto out_bus;
1000 }
996 ret = io_subchannel_init(); 1001 ret = io_subchannel_init();
997 if (ret) 1002 if (ret)
998 css_bus_cleanup(); 1003 goto out_wq;
999 1004
1000 return ret; 1005 return ret;
1006out_wq:
1007 destroy_workqueue(cio_work_q);
1008out_bus:
1009 css_bus_cleanup();
1010 return ret;
1001} 1011}
1002subsys_initcall(channel_subsystem_init); 1012subsys_initcall(channel_subsystem_init);
1003 1013
@@ -1006,10 +1016,25 @@ static int css_settle(struct device_driver *drv, void *unused)
1006 struct css_driver *cssdrv = to_cssdriver(drv); 1016 struct css_driver *cssdrv = to_cssdriver(drv);
1007 1017
1008 if (cssdrv->settle) 1018 if (cssdrv->settle)
1009 cssdrv->settle(); 1019 return cssdrv->settle();
1010 return 0; 1020 return 0;
1011} 1021}
1012 1022
1023int css_complete_work(void)
1024{
1025 int ret;
1026
1027 /* Wait for the evaluation of subchannels to finish. */
1028 ret = wait_event_interruptible(css_eval_wq,
1029 atomic_read(&css_eval_scheduled) == 0);
1030 if (ret)
1031 return -EINTR;
1032 flush_workqueue(cio_work_q);
1033 /* Wait for the subchannel type specific initialization to finish */
1034 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1035}
1036
1037
1013/* 1038/*
1014 * Wait for the initialization of devices to finish, to make sure we are 1039 * Wait for the initialization of devices to finish, to make sure we are
1015 * done with our setup if the search for the root device starts. 1040 * done with our setup if the search for the root device starts.
@@ -1018,13 +1043,41 @@ static int __init channel_subsystem_init_sync(void)
1018{ 1043{
1019 /* Start initial subchannel evaluation. */ 1044 /* Start initial subchannel evaluation. */
1020 css_schedule_eval_all(); 1045 css_schedule_eval_all();
1021 /* Wait for the evaluation of subchannels to finish. */ 1046 css_complete_work();
1022 wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); 1047 return 0;
1023 /* Wait for the subchannel type specific initialization to finish */
1024 return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle);
1025} 1048}
1026subsys_initcall_sync(channel_subsystem_init_sync); 1049subsys_initcall_sync(channel_subsystem_init_sync);
1027 1050
1051#ifdef CONFIG_PROC_FS
1052static ssize_t cio_settle_write(struct file *file, const char __user *buf,
1053 size_t count, loff_t *ppos)
1054{
1055 int ret;
1056
1057 /* Handle pending CRW's. */
1058 crw_wait_for_channel_report();
1059 ret = css_complete_work();
1060
1061 return ret ? ret : count;
1062}
1063
1064static const struct file_operations cio_settle_proc_fops = {
1065 .write = cio_settle_write,
1066};
1067
1068static int __init cio_settle_init(void)
1069{
1070 struct proc_dir_entry *entry;
1071
1072 entry = proc_create("cio_settle", S_IWUSR, NULL,
1073 &cio_settle_proc_fops);
1074 if (!entry)
1075 return -ENOMEM;
1076 return 0;
1077}
1078device_initcall(cio_settle_init);
1079#endif /*CONFIG_PROC_FS*/
1080
1028int sch_is_pseudo_sch(struct subchannel *sch) 1081int sch_is_pseudo_sch(struct subchannel *sch)
1029{ 1082{
1030 return sch == to_css(sch->dev.parent)->pseudo_subchannel; 1083 return sch == to_css(sch->dev.parent)->pseudo_subchannel;
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h
index fe84b92cde60..7e37886de231 100644
--- a/drivers/s390/cio/css.h
+++ b/drivers/s390/cio/css.h
@@ -95,7 +95,7 @@ struct css_driver {
95 int (*freeze)(struct subchannel *); 95 int (*freeze)(struct subchannel *);
96 int (*thaw) (struct subchannel *); 96 int (*thaw) (struct subchannel *);
97 int (*restore)(struct subchannel *); 97 int (*restore)(struct subchannel *);
98 void (*settle)(void); 98 int (*settle)(void);
99 const char *name; 99 const char *name;
100}; 100};
101 101
@@ -146,12 +146,13 @@ extern struct channel_subsystem *channel_subsystems[];
146/* Helper functions to build lists for the slow path. */ 146/* Helper functions to build lists for the slow path. */
147void css_schedule_eval(struct subchannel_id schid); 147void css_schedule_eval(struct subchannel_id schid);
148void css_schedule_eval_all(void); 148void css_schedule_eval_all(void);
149int css_complete_work(void);
149 150
150int sch_is_pseudo_sch(struct subchannel *); 151int sch_is_pseudo_sch(struct subchannel *);
151struct schib; 152struct schib;
152int css_sch_is_valid(struct schib *); 153int css_sch_is_valid(struct schib *);
153 154
154extern struct workqueue_struct *slow_path_wq; 155extern struct workqueue_struct *cio_work_q;
155void css_wait_for_slow_path(void); 156void css_wait_for_slow_path(void);
156void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo); 157void css_sched_sch_todo(struct subchannel *sch, enum sch_todo todo);
157#endif 158#endif
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index a6c7d5426fb2..c6abb75c4615 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -136,7 +136,6 @@ static int io_subchannel_sch_event(struct subchannel *, int);
136static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, 136static int io_subchannel_chp_event(struct subchannel *, struct chp_link *,
137 int); 137 int);
138static void recovery_func(unsigned long data); 138static void recovery_func(unsigned long data);
139struct workqueue_struct *ccw_device_work;
140wait_queue_head_t ccw_device_init_wq; 139wait_queue_head_t ccw_device_init_wq;
141atomic_t ccw_device_init_count; 140atomic_t ccw_device_init_count;
142 141
@@ -159,11 +158,16 @@ static int io_subchannel_prepare(struct subchannel *sch)
159 return 0; 158 return 0;
160} 159}
161 160
162static void io_subchannel_settle(void) 161static int io_subchannel_settle(void)
163{ 162{
164 wait_event(ccw_device_init_wq, 163 int ret;
165 atomic_read(&ccw_device_init_count) == 0); 164
166 flush_workqueue(ccw_device_work); 165 ret = wait_event_interruptible(ccw_device_init_wq,
166 atomic_read(&ccw_device_init_count) == 0);
167 if (ret)
168 return -EINTR;
169 flush_workqueue(cio_work_q);
170 return 0;
167} 171}
168 172
169static struct css_driver io_subchannel_driver = { 173static struct css_driver io_subchannel_driver = {
@@ -188,27 +192,13 @@ int __init io_subchannel_init(void)
188 atomic_set(&ccw_device_init_count, 0); 192 atomic_set(&ccw_device_init_count, 0);
189 setup_timer(&recovery_timer, recovery_func, 0); 193 setup_timer(&recovery_timer, recovery_func, 0);
190 194
191 ccw_device_work = create_singlethread_workqueue("cio"); 195 ret = bus_register(&ccw_bus_type);
192 if (!ccw_device_work) 196 if (ret)
193 return -ENOMEM; 197 return ret;
194 slow_path_wq = create_singlethread_workqueue("kslowcrw");
195 if (!slow_path_wq) {
196 ret = -ENOMEM;
197 goto out_err;
198 }
199 if ((ret = bus_register (&ccw_bus_type)))
200 goto out_err;
201
202 ret = css_driver_register(&io_subchannel_driver); 198 ret = css_driver_register(&io_subchannel_driver);
203 if (ret) 199 if (ret)
204 goto out_err; 200 bus_unregister(&ccw_bus_type);
205 201
206 return 0;
207out_err:
208 if (ccw_device_work)
209 destroy_workqueue(ccw_device_work);
210 if (slow_path_wq)
211 destroy_workqueue(slow_path_wq);
212 return ret; 202 return ret;
213} 203}
214 204
@@ -1348,7 +1338,7 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
1348 /* Not operational. */ 1338 /* Not operational. */
1349 if (!cdev) 1339 if (!cdev)
1350 return IO_SCH_UNREG; 1340 return IO_SCH_UNREG;
1351 if (!ccw_device_notify(cdev, CIO_GONE)) 1341 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1352 return IO_SCH_UNREG; 1342 return IO_SCH_UNREG;
1353 return IO_SCH_ORPH_UNREG; 1343 return IO_SCH_ORPH_UNREG;
1354 } 1344 }
@@ -1356,12 +1346,12 @@ static enum io_sch_action sch_get_action(struct subchannel *sch)
1356 if (!cdev) 1346 if (!cdev)
1357 return IO_SCH_ATTACH; 1347 return IO_SCH_ATTACH;
1358 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1348 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1359 if (!ccw_device_notify(cdev, CIO_GONE)) 1349 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
1360 return IO_SCH_UNREG_ATTACH; 1350 return IO_SCH_UNREG_ATTACH;
1361 return IO_SCH_ORPH_ATTACH; 1351 return IO_SCH_ORPH_ATTACH;
1362 } 1352 }
1363 if ((sch->schib.pmcw.pam & sch->opm) == 0) { 1353 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1364 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 1354 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
1365 return IO_SCH_UNREG; 1355 return IO_SCH_UNREG;
1366 return IO_SCH_DISC; 1356 return IO_SCH_DISC;
1367 } 1357 }
@@ -1410,6 +1400,12 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1410 rc = 0; 1400 rc = 0;
1411 goto out_unlock; 1401 goto out_unlock;
1412 case IO_SCH_VERIFY: 1402 case IO_SCH_VERIFY:
1403 if (cdev->private->flags.resuming == 1) {
1404 if (cio_enable_subchannel(sch, (u32)(addr_t)sch)) {
1405 ccw_device_set_notoper(cdev);
1406 break;
1407 }
1408 }
1413 /* Trigger path verification. */ 1409 /* Trigger path verification. */
1414 io_subchannel_verify(sch); 1410 io_subchannel_verify(sch);
1415 rc = 0; 1411 rc = 0;
@@ -1448,7 +1444,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1448 break; 1444 break;
1449 case IO_SCH_UNREG_ATTACH: 1445 case IO_SCH_UNREG_ATTACH:
1450 /* Unregister ccw device. */ 1446 /* Unregister ccw device. */
1451 ccw_device_unregister(cdev); 1447 if (!cdev->private->flags.resuming)
1448 ccw_device_unregister(cdev);
1452 break; 1449 break;
1453 default: 1450 default:
1454 break; 1451 break;
@@ -1457,7 +1454,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int process)
1457 switch (action) { 1454 switch (action) {
1458 case IO_SCH_ORPH_UNREG: 1455 case IO_SCH_ORPH_UNREG:
1459 case IO_SCH_UNREG: 1456 case IO_SCH_UNREG:
1460 css_sch_device_unregister(sch); 1457 if (!cdev || !cdev->private->flags.resuming)
1458 css_sch_device_unregister(sch);
1461 break; 1459 break;
1462 case IO_SCH_ORPH_ATTACH: 1460 case IO_SCH_ORPH_ATTACH:
1463 case IO_SCH_UNREG_ATTACH: 1461 case IO_SCH_UNREG_ATTACH:
@@ -1779,26 +1777,42 @@ static void __ccw_device_pm_restore(struct ccw_device *cdev)
1779{ 1777{
1780 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1778 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1781 1779
1782 if (cio_is_console(sch->schid)) 1780 spin_lock_irq(sch->lock);
1783 goto out; 1781 if (cio_is_console(sch->schid)) {
1782 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1783 goto out_unlock;
1784 }
1784 /* 1785 /*
1785 * While we were sleeping, devices may have gone or become 1786 * While we were sleeping, devices may have gone or become
1786 * available again. Kick re-detection. 1787 * available again. Kick re-detection.
1787 */ 1788 */
1788 spin_lock_irq(sch->lock);
1789 cdev->private->flags.resuming = 1; 1789 cdev->private->flags.resuming = 1;
1790 css_schedule_eval(sch->schid);
1791 spin_unlock_irq(sch->lock);
1792 css_complete_work();
1793
1794 /* cdev may have been moved to a different subchannel. */
1795 sch = to_subchannel(cdev->dev.parent);
1796 spin_lock_irq(sch->lock);
1797 if (cdev->private->state != DEV_STATE_ONLINE &&
1798 cdev->private->state != DEV_STATE_OFFLINE)
1799 goto out_unlock;
1800
1790 ccw_device_recognition(cdev); 1801 ccw_device_recognition(cdev);
1791 spin_unlock_irq(sch->lock); 1802 spin_unlock_irq(sch->lock);
1792 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) || 1803 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev) ||
1793 cdev->private->state == DEV_STATE_DISCONNECTED); 1804 cdev->private->state == DEV_STATE_DISCONNECTED);
1794out: 1805 spin_lock_irq(sch->lock);
1806
1807out_unlock:
1795 cdev->private->flags.resuming = 0; 1808 cdev->private->flags.resuming = 0;
1809 spin_unlock_irq(sch->lock);
1796} 1810}
1797 1811
1798static int resume_handle_boxed(struct ccw_device *cdev) 1812static int resume_handle_boxed(struct ccw_device *cdev)
1799{ 1813{
1800 cdev->private->state = DEV_STATE_BOXED; 1814 cdev->private->state = DEV_STATE_BOXED;
1801 if (ccw_device_notify(cdev, CIO_BOXED)) 1815 if (ccw_device_notify(cdev, CIO_BOXED) == NOTIFY_OK)
1802 return 0; 1816 return 0;
1803 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1817 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1804 return -ENODEV; 1818 return -ENODEV;
@@ -1807,7 +1821,7 @@ static int resume_handle_boxed(struct ccw_device *cdev)
1807static int resume_handle_disc(struct ccw_device *cdev) 1821static int resume_handle_disc(struct ccw_device *cdev)
1808{ 1822{
1809 cdev->private->state = DEV_STATE_DISCONNECTED; 1823 cdev->private->state = DEV_STATE_DISCONNECTED;
1810 if (ccw_device_notify(cdev, CIO_GONE)) 1824 if (ccw_device_notify(cdev, CIO_GONE) == NOTIFY_OK)
1811 return 0; 1825 return 0;
1812 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 1826 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1813 return -ENODEV; 1827 return -ENODEV;
@@ -1816,40 +1830,31 @@ static int resume_handle_disc(struct ccw_device *cdev)
1816static int ccw_device_pm_restore(struct device *dev) 1830static int ccw_device_pm_restore(struct device *dev)
1817{ 1831{
1818 struct ccw_device *cdev = to_ccwdev(dev); 1832 struct ccw_device *cdev = to_ccwdev(dev);
1819 struct subchannel *sch = to_subchannel(cdev->dev.parent); 1833 struct subchannel *sch;
1820 int ret = 0, cm_enabled; 1834 int ret = 0;
1821 1835
1822 __ccw_device_pm_restore(cdev); 1836 __ccw_device_pm_restore(cdev);
1837 sch = to_subchannel(cdev->dev.parent);
1823 spin_lock_irq(sch->lock); 1838 spin_lock_irq(sch->lock);
1824 if (cio_is_console(sch->schid)) { 1839 if (cio_is_console(sch->schid))
1825 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1826 spin_unlock_irq(sch->lock);
1827 goto out_restore; 1840 goto out_restore;
1828 } 1841
1829 cdev->private->flags.donotify = 0;
1830 /* check recognition results */ 1842 /* check recognition results */
1831 switch (cdev->private->state) { 1843 switch (cdev->private->state) {
1832 case DEV_STATE_OFFLINE: 1844 case DEV_STATE_OFFLINE:
1845 case DEV_STATE_ONLINE:
1846 cdev->private->flags.donotify = 0;
1833 break; 1847 break;
1834 case DEV_STATE_BOXED: 1848 case DEV_STATE_BOXED:
1835 ret = resume_handle_boxed(cdev); 1849 ret = resume_handle_boxed(cdev);
1836 spin_unlock_irq(sch->lock);
1837 if (ret) 1850 if (ret)
1838 goto out; 1851 goto out_unlock;
1839 goto out_restore; 1852 goto out_restore;
1840 case DEV_STATE_DISCONNECTED:
1841 goto out_disc_unlock;
1842 default: 1853 default:
1843 goto out_unreg_unlock; 1854 ret = resume_handle_disc(cdev);
1844 } 1855 if (ret)
1845 /* check if the device id has changed */ 1856 goto out_unlock;
1846 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { 1857 goto out_restore;
1847 CIO_MSG_EVENT(0, "resume: sch 0.%x.%04x: failed (devno "
1848 "changed from %04x to %04x)\n",
1849 sch->schid.ssid, sch->schid.sch_no,
1850 cdev->private->dev_id.devno,
1851 sch->schib.pmcw.dev);
1852 goto out_unreg_unlock;
1853 } 1858 }
1854 /* check if the device type has changed */ 1859 /* check if the device type has changed */
1855 if (!ccw_device_test_sense_data(cdev)) { 1860 if (!ccw_device_test_sense_data(cdev)) {
@@ -1858,24 +1863,30 @@ static int ccw_device_pm_restore(struct device *dev)
1858 ret = -ENODEV; 1863 ret = -ENODEV;
1859 goto out_unlock; 1864 goto out_unlock;
1860 } 1865 }
1861 if (!cdev->online) { 1866 if (!cdev->online)
1862 ret = 0;
1863 goto out_unlock; 1867 goto out_unlock;
1864 }
1865 ret = ccw_device_online(cdev);
1866 if (ret)
1867 goto out_disc_unlock;
1868 1868
1869 cm_enabled = cdev->private->cmb != NULL; 1869 if (ccw_device_online(cdev)) {
1870 ret = resume_handle_disc(cdev);
1871 if (ret)
1872 goto out_unlock;
1873 goto out_restore;
1874 }
1870 spin_unlock_irq(sch->lock); 1875 spin_unlock_irq(sch->lock);
1871
1872 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); 1876 wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev));
1873 if (cdev->private->state != DEV_STATE_ONLINE) { 1877 spin_lock_irq(sch->lock);
1874 spin_lock_irq(sch->lock); 1878
1875 goto out_disc_unlock; 1879 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_BAD) {
1880 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
1881 ret = -ENODEV;
1882 goto out_unlock;
1876 } 1883 }
1877 if (cm_enabled) { 1884
1885 /* reenable cmf, if needed */
1886 if (cdev->private->cmb) {
1887 spin_unlock_irq(sch->lock);
1878 ret = ccw_set_cmf(cdev, 1); 1888 ret = ccw_set_cmf(cdev, 1);
1889 spin_lock_irq(sch->lock);
1879 if (ret) { 1890 if (ret) {
1880 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed " 1891 CIO_MSG_EVENT(2, "resume: cdev 0.%x.%04x: cmf failed "
1881 "(rc=%d)\n", cdev->private->dev_id.ssid, 1892 "(rc=%d)\n", cdev->private->dev_id.ssid,
@@ -1885,21 +1896,11 @@ static int ccw_device_pm_restore(struct device *dev)
1885 } 1896 }
1886 1897
1887out_restore: 1898out_restore:
1899 spin_unlock_irq(sch->lock);
1888 if (cdev->online && cdev->drv && cdev->drv->restore) 1900 if (cdev->online && cdev->drv && cdev->drv->restore)
1889 ret = cdev->drv->restore(cdev); 1901 ret = cdev->drv->restore(cdev);
1890out:
1891 return ret; 1902 return ret;
1892 1903
1893out_disc_unlock:
1894 ret = resume_handle_disc(cdev);
1895 spin_unlock_irq(sch->lock);
1896 if (ret)
1897 return ret;
1898 goto out_restore;
1899
1900out_unreg_unlock:
1901 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG_EVAL);
1902 ret = -ENODEV;
1903out_unlock: 1904out_unlock:
1904 spin_unlock_irq(sch->lock); 1905 spin_unlock_irq(sch->lock);
1905 return ret; 1906 return ret;
@@ -2028,7 +2029,7 @@ void ccw_device_sched_todo(struct ccw_device *cdev, enum cdev_todo todo)
2028 /* Get workqueue ref. */ 2029 /* Get workqueue ref. */
2029 if (!get_device(&cdev->dev)) 2030 if (!get_device(&cdev->dev))
2030 return; 2031 return;
2031 if (!queue_work(slow_path_wq, &cdev->private->todo_work)) { 2032 if (!queue_work(cio_work_q, &cdev->private->todo_work)) {
2032 /* Already queued, release workqueue ref. */ 2033 /* Already queued, release workqueue ref. */
2033 put_device(&cdev->dev); 2034 put_device(&cdev->dev);
2034 } 2035 }
@@ -2041,5 +2042,4 @@ EXPORT_SYMBOL(ccw_driver_register);
2041EXPORT_SYMBOL(ccw_driver_unregister); 2042EXPORT_SYMBOL(ccw_driver_unregister);
2042EXPORT_SYMBOL(get_ccwdev_by_busid); 2043EXPORT_SYMBOL(get_ccwdev_by_busid);
2043EXPORT_SYMBOL(ccw_bus_type); 2044EXPORT_SYMBOL(ccw_bus_type);
2044EXPORT_SYMBOL(ccw_device_work);
2045EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id); 2045EXPORT_SYMBOL_GPL(ccw_device_get_subchannel_id);
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h
index bcfe13e42638..379de2d1ec49 100644
--- a/drivers/s390/cio/device.h
+++ b/drivers/s390/cio/device.h
@@ -4,7 +4,7 @@
4#include <asm/ccwdev.h> 4#include <asm/ccwdev.h>
5#include <asm/atomic.h> 5#include <asm/atomic.h>
6#include <linux/wait.h> 6#include <linux/wait.h>
7 7#include <linux/notifier.h>
8#include "io_sch.h" 8#include "io_sch.h"
9 9
10/* 10/*
@@ -71,7 +71,6 @@ dev_fsm_final_state(struct ccw_device *cdev)
71 cdev->private->state == DEV_STATE_BOXED); 71 cdev->private->state == DEV_STATE_BOXED);
72} 72}
73 73
74extern struct workqueue_struct *ccw_device_work;
75extern wait_queue_head_t ccw_device_init_wq; 74extern wait_queue_head_t ccw_device_init_wq;
76extern atomic_t ccw_device_init_count; 75extern atomic_t ccw_device_init_count;
77int __init io_subchannel_init(void); 76int __init io_subchannel_init(void);
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ae760658a131..c56ab94612f9 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -313,21 +313,43 @@ ccw_device_sense_id_done(struct ccw_device *cdev, int err)
313 } 313 }
314} 314}
315 315
316/**
317 * ccw_device_notify() - inform the device's driver about an event
318 * @cdev: device for which an event occured
319 * @event: event that occurred
320 *
321 * Returns:
322 * -%EINVAL if the device is offline or has no driver.
323 * -%EOPNOTSUPP if the device's driver has no notifier registered.
324 * %NOTIFY_OK if the driver wants to keep the device.
325 * %NOTIFY_BAD if the driver doesn't want to keep the device.
326 */
316int ccw_device_notify(struct ccw_device *cdev, int event) 327int ccw_device_notify(struct ccw_device *cdev, int event)
317{ 328{
329 int ret = -EINVAL;
330
318 if (!cdev->drv) 331 if (!cdev->drv)
319 return 0; 332 goto out;
320 if (!cdev->online) 333 if (!cdev->online)
321 return 0; 334 goto out;
322 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n", 335 CIO_MSG_EVENT(2, "notify called for 0.%x.%04x, event=%d\n",
323 cdev->private->dev_id.ssid, cdev->private->dev_id.devno, 336 cdev->private->dev_id.ssid, cdev->private->dev_id.devno,
324 event); 337 event);
325 return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; 338 if (!cdev->drv->notify) {
339 ret = -EOPNOTSUPP;
340 goto out;
341 }
342 if (cdev->drv->notify(cdev, event))
343 ret = NOTIFY_OK;
344 else
345 ret = NOTIFY_BAD;
346out:
347 return ret;
326} 348}
327 349
328static void ccw_device_oper_notify(struct ccw_device *cdev) 350static void ccw_device_oper_notify(struct ccw_device *cdev)
329{ 351{
330 if (ccw_device_notify(cdev, CIO_OPER)) { 352 if (ccw_device_notify(cdev, CIO_OPER) == NOTIFY_OK) {
331 /* Reenable channel measurements, if needed. */ 353 /* Reenable channel measurements, if needed. */
332 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF); 354 ccw_device_sched_todo(cdev, CDEV_TODO_ENABLE_CMF);
333 return; 355 return;
@@ -361,14 +383,15 @@ ccw_device_done(struct ccw_device *cdev, int state)
361 case DEV_STATE_BOXED: 383 case DEV_STATE_BOXED:
362 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n", 384 CIO_MSG_EVENT(0, "Boxed device %04x on subchannel %04x\n",
363 cdev->private->dev_id.devno, sch->schid.sch_no); 385 cdev->private->dev_id.devno, sch->schid.sch_no);
364 if (cdev->online && !ccw_device_notify(cdev, CIO_BOXED)) 386 if (cdev->online &&
387 ccw_device_notify(cdev, CIO_BOXED) != NOTIFY_OK)
365 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 388 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
366 cdev->private->flags.donotify = 0; 389 cdev->private->flags.donotify = 0;
367 break; 390 break;
368 case DEV_STATE_NOT_OPER: 391 case DEV_STATE_NOT_OPER:
369 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n", 392 CIO_MSG_EVENT(0, "Device %04x gone on subchannel %04x\n",
370 cdev->private->dev_id.devno, sch->schid.sch_no); 393 cdev->private->dev_id.devno, sch->schid.sch_no);
371 if (!ccw_device_notify(cdev, CIO_GONE)) 394 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
372 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 395 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
373 else 396 else
374 ccw_device_set_disconnected(cdev); 397 ccw_device_set_disconnected(cdev);
@@ -378,7 +401,7 @@ ccw_device_done(struct ccw_device *cdev, int state)
378 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel " 401 CIO_MSG_EVENT(0, "Disconnected device %04x on subchannel "
379 "%04x\n", cdev->private->dev_id.devno, 402 "%04x\n", cdev->private->dev_id.devno,
380 sch->schid.sch_no); 403 sch->schid.sch_no);
381 if (!ccw_device_notify(cdev, CIO_NO_PATH)) 404 if (ccw_device_notify(cdev, CIO_NO_PATH) != NOTIFY_OK)
382 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 405 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
383 else 406 else
384 ccw_device_set_disconnected(cdev); 407 ccw_device_set_disconnected(cdev);
@@ -586,7 +609,7 @@ ccw_device_offline(struct ccw_device *cdev)
586static void ccw_device_generic_notoper(struct ccw_device *cdev, 609static void ccw_device_generic_notoper(struct ccw_device *cdev,
587 enum dev_event dev_event) 610 enum dev_event dev_event)
588{ 611{
589 if (!ccw_device_notify(cdev, CIO_GONE)) 612 if (ccw_device_notify(cdev, CIO_GONE) != NOTIFY_OK)
590 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG); 613 ccw_device_sched_todo(cdev, CDEV_TODO_UNREG);
591 else 614 else
592 ccw_device_set_disconnected(cdev); 615 ccw_device_set_disconnected(cdev);
@@ -667,7 +690,7 @@ ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event)
667 struct irb *irb; 690 struct irb *irb;
668 int is_cmd; 691 int is_cmd;
669 692
670 irb = (struct irb *) __LC_IRB; 693 irb = (struct irb *)&S390_lowcore.irb;
671 is_cmd = !scsw_is_tm(&irb->scsw); 694 is_cmd = !scsw_is_tm(&irb->scsw);
672 /* Check for unsolicited interrupt. */ 695 /* Check for unsolicited interrupt. */
673 if (!scsw_is_solicited(&irb->scsw)) { 696 if (!scsw_is_solicited(&irb->scsw)) {
@@ -732,7 +755,7 @@ ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event)
732{ 755{
733 struct irb *irb; 756 struct irb *irb;
734 757
735 irb = (struct irb *) __LC_IRB; 758 irb = (struct irb *)&S390_lowcore.irb;
736 /* Check for unsolicited interrupt. */ 759 /* Check for unsolicited interrupt. */
737 if (scsw_stctl(&irb->scsw) == 760 if (scsw_stctl(&irb->scsw) ==
738 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 761 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 44f2f6a97f33..48aa0647432b 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -208,18 +208,27 @@ struct qdio_dev_perf_stat {
208 unsigned int eqbs_partial; 208 unsigned int eqbs_partial;
209 unsigned int sqbs; 209 unsigned int sqbs;
210 unsigned int sqbs_partial; 210 unsigned int sqbs_partial;
211} ____cacheline_aligned;
212
213struct qdio_queue_perf_stat {
214 /*
215 * Sorted into order-2 buckets: 1, 2-3, 4-7, ... 64-127, 128.
216 * Since max. 127 SBALs are scanned reuse entry for 128 as queue full
217 * aka 127 SBALs found.
218 */
219 unsigned int nr_sbals[8];
220 unsigned int nr_sbal_error;
221 unsigned int nr_sbal_nop;
222 unsigned int nr_sbal_total;
211}; 223};
212 224
213struct qdio_input_q { 225struct qdio_input_q {
214 /* input buffer acknowledgement flag */ 226 /* input buffer acknowledgement flag */
215 int polling; 227 int polling;
216
217 /* first ACK'ed buffer */ 228 /* first ACK'ed buffer */
218 int ack_start; 229 int ack_start;
219
220 /* how much sbals are acknowledged with qebsm */ 230 /* how much sbals are acknowledged with qebsm */
221 int ack_count; 231 int ack_count;
222
223 /* last time of noticing incoming data */ 232 /* last time of noticing incoming data */
224 u64 timestamp; 233 u64 timestamp;
225}; 234};
@@ -227,40 +236,27 @@ struct qdio_input_q {
227struct qdio_output_q { 236struct qdio_output_q {
228 /* PCIs are enabled for the queue */ 237 /* PCIs are enabled for the queue */
229 int pci_out_enabled; 238 int pci_out_enabled;
230
231 /* IQDIO: output multiple buffers (enhanced SIGA) */ 239 /* IQDIO: output multiple buffers (enhanced SIGA) */
232 int use_enh_siga; 240 int use_enh_siga;
233
234 /* timer to check for more outbound work */ 241 /* timer to check for more outbound work */
235 struct timer_list timer; 242 struct timer_list timer;
236}; 243};
237 244
245/*
246 * Note on cache alignment: grouped slsb and write mostly data at the beginning
247 * sbal[] is read-only and starts on a new cacheline followed by read mostly.
248 */
238struct qdio_q { 249struct qdio_q {
239 struct slsb slsb; 250 struct slsb slsb;
251
240 union { 252 union {
241 struct qdio_input_q in; 253 struct qdio_input_q in;
242 struct qdio_output_q out; 254 struct qdio_output_q out;
243 } u; 255 } u;
244 256
245 /* queue number */
246 int nr;
247
248 /* bitmask of queue number */
249 int mask;
250
251 /* input or output queue */
252 int is_input_q;
253
254 /* list of thinint input queues */
255 struct list_head entry;
256
257 /* upper-layer program handler */
258 qdio_handler_t (*handler);
259
260 /* 257 /*
261 * inbound: next buffer the program should check for 258 * inbound: next buffer the program should check for
262 * outbound: next buffer to check for having been processed 259 * outbound: next buffer to check if adapter processed it
263 * by the card
264 */ 260 */
265 int first_to_check; 261 int first_to_check;
266 262
@@ -273,16 +269,32 @@ struct qdio_q {
273 /* number of buffers in use by the adapter */ 269 /* number of buffers in use by the adapter */
274 atomic_t nr_buf_used; 270 atomic_t nr_buf_used;
275 271
276 struct qdio_irq *irq_ptr;
277 struct dentry *debugfs_q;
278 struct tasklet_struct tasklet;
279
280 /* error condition during a data transfer */ 272 /* error condition during a data transfer */
281 unsigned int qdio_error; 273 unsigned int qdio_error;
282 274
283 struct sl *sl; 275 struct tasklet_struct tasklet;
284 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q]; 276 struct qdio_queue_perf_stat q_stats;
277
278 struct qdio_buffer *sbal[QDIO_MAX_BUFFERS_PER_Q] ____cacheline_aligned;
279
280 /* queue number */
281 int nr;
282
283 /* bitmask of queue number */
284 int mask;
285
286 /* input or output queue */
287 int is_input_q;
288
289 /* list of thinint input queues */
290 struct list_head entry;
285 291
292 /* upper-layer program handler */
293 qdio_handler_t (*handler);
294
295 struct dentry *debugfs_q;
296 struct qdio_irq *irq_ptr;
297 struct sl *sl;
286 /* 298 /*
287 * Warning: Leave this member at the end so it won't be cleared in 299 * Warning: Leave this member at the end so it won't be cleared in
288 * qdio_fill_qs. A page is allocated under this pointer and used for 300 * qdio_fill_qs. A page is allocated under this pointer and used for
@@ -317,12 +329,8 @@ struct qdio_irq {
317 struct qdio_ssqd_desc ssqd_desc; 329 struct qdio_ssqd_desc ssqd_desc;
318 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *); 330 void (*orig_handler) (struct ccw_device *, unsigned long, struct irb *);
319 331
320 struct qdio_dev_perf_stat perf_stat;
321 int perf_stat_enabled; 332 int perf_stat_enabled;
322 /* 333
323 * Warning: Leave these members together at the end so they won't be
324 * cleared in qdio_setup_irq.
325 */
326 struct qdr *qdr; 334 struct qdr *qdr;
327 unsigned long chsc_page; 335 unsigned long chsc_page;
328 336
@@ -331,6 +339,7 @@ struct qdio_irq {
331 339
332 debug_info_t *debug_area; 340 debug_info_t *debug_area;
333 struct mutex setup_mutex; 341 struct mutex setup_mutex;
342 struct qdio_dev_perf_stat perf_stat;
334}; 343};
335 344
336/* helper functions */ 345/* helper functions */
@@ -341,9 +350,20 @@ struct qdio_irq {
341 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ 350 (irq->qib.qfmt == QDIO_IQDIO_QFMT || \
342 css_general_characteristics.aif_osa) 351 css_general_characteristics.aif_osa)
343 352
344#define qperf(qdev,attr) qdev->perf_stat.attr 353#define qperf(__qdev, __attr) ((__qdev)->perf_stat.(__attr))
345#define qperf_inc(q,attr) if (q->irq_ptr->perf_stat_enabled) \ 354
346 q->irq_ptr->perf_stat.attr++ 355#define qperf_inc(__q, __attr) \
356({ \
357 struct qdio_irq *qdev = (__q)->irq_ptr; \
358 if (qdev->perf_stat_enabled) \
359 (qdev->perf_stat.__attr)++; \
360})
361
362static inline void account_sbals_error(struct qdio_q *q, int count)
363{
364 q->q_stats.nr_sbal_error += count;
365 q->q_stats.nr_sbal_total += count;
366}
347 367
348/* the highest iqdio queue is used for multicast */ 368/* the highest iqdio queue is used for multicast */
349static inline int multicast_outbound(struct qdio_q *q) 369static inline int multicast_outbound(struct qdio_q *q)
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index f49761ff9a00..c94eb2a0fa2e 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -60,7 +60,7 @@ static int qstat_show(struct seq_file *m, void *v)
60 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); 60 seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move);
61 seq_printf(m, "polling: %d ack start: %d ack count: %d\n", 61 seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
62 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); 62 q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count);
63 seq_printf(m, "slsb buffer states:\n"); 63 seq_printf(m, "SBAL states:\n");
64 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); 64 seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n");
65 65
66 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { 66 for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) {
@@ -97,6 +97,20 @@ static int qstat_show(struct seq_file *m, void *v)
97 } 97 }
98 seq_printf(m, "\n"); 98 seq_printf(m, "\n");
99 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); 99 seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n");
100
101 seq_printf(m, "\nSBAL statistics:");
102 if (!q->irq_ptr->perf_stat_enabled) {
103 seq_printf(m, " disabled\n");
104 return 0;
105 }
106
107 seq_printf(m, "\n1 2.. 4.. 8.. "
108 "16.. 32.. 64.. 127\n");
109 for (i = 0; i < ARRAY_SIZE(q->q_stats.nr_sbals); i++)
110 seq_printf(m, "%-10u ", q->q_stats.nr_sbals[i]);
111 seq_printf(m, "\nError NOP Total\n%-10u %-10u %-10u\n\n",
112 q->q_stats.nr_sbal_error, q->q_stats.nr_sbal_nop,
113 q->q_stats.nr_sbal_total);
100 return 0; 114 return 0;
101} 115}
102 116
@@ -181,9 +195,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
181{ 195{
182 struct seq_file *seq = file->private_data; 196 struct seq_file *seq = file->private_data;
183 struct qdio_irq *irq_ptr = seq->private; 197 struct qdio_irq *irq_ptr = seq->private;
198 struct qdio_q *q;
184 unsigned long val; 199 unsigned long val;
185 char buf[8]; 200 char buf[8];
186 int ret; 201 int ret, i;
187 202
188 if (!irq_ptr) 203 if (!irq_ptr)
189 return 0; 204 return 0;
@@ -201,6 +216,10 @@ static ssize_t qperf_seq_write(struct file *file, const char __user *ubuf,
201 case 0: 216 case 0:
202 irq_ptr->perf_stat_enabled = 0; 217 irq_ptr->perf_stat_enabled = 0;
203 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat)); 218 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
219 for_each_input_queue(irq_ptr, q, i)
220 memset(&q->q_stats, 0, sizeof(q->q_stats));
221 for_each_output_queue(irq_ptr, q, i)
222 memset(&q->q_stats, 0, sizeof(q->q_stats));
204 break; 223 break;
205 case 1: 224 case 1:
206 irq_ptr->perf_stat_enabled = 1; 225 irq_ptr->perf_stat_enabled = 1;
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 62b654af9237..232ef047ba34 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -392,6 +392,20 @@ static inline void qdio_stop_polling(struct qdio_q *q)
392 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT); 392 set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
393} 393}
394 394
395static inline void account_sbals(struct qdio_q *q, int count)
396{
397 int pos = 0;
398
399 q->q_stats.nr_sbal_total += count;
400 if (count == QDIO_MAX_BUFFERS_MASK) {
401 q->q_stats.nr_sbals[7]++;
402 return;
403 }
404 while (count >>= 1)
405 pos++;
406 q->q_stats.nr_sbals[pos]++;
407}
408
395static void announce_buffer_error(struct qdio_q *q, int count) 409static void announce_buffer_error(struct qdio_q *q, int count)
396{ 410{
397 q->qdio_error |= QDIO_ERROR_SLSB_STATE; 411 q->qdio_error |= QDIO_ERROR_SLSB_STATE;
@@ -487,16 +501,22 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
487 q->first_to_check = add_buf(q->first_to_check, count); 501 q->first_to_check = add_buf(q->first_to_check, count);
488 if (atomic_sub(count, &q->nr_buf_used) == 0) 502 if (atomic_sub(count, &q->nr_buf_used) == 0)
489 qperf_inc(q, inbound_queue_full); 503 qperf_inc(q, inbound_queue_full);
504 if (q->irq_ptr->perf_stat_enabled)
505 account_sbals(q, count);
490 break; 506 break;
491 case SLSB_P_INPUT_ERROR: 507 case SLSB_P_INPUT_ERROR:
492 announce_buffer_error(q, count); 508 announce_buffer_error(q, count);
493 /* process the buffer, the upper layer will take care of it */ 509 /* process the buffer, the upper layer will take care of it */
494 q->first_to_check = add_buf(q->first_to_check, count); 510 q->first_to_check = add_buf(q->first_to_check, count);
495 atomic_sub(count, &q->nr_buf_used); 511 atomic_sub(count, &q->nr_buf_used);
512 if (q->irq_ptr->perf_stat_enabled)
513 account_sbals_error(q, count);
496 break; 514 break;
497 case SLSB_CU_INPUT_EMPTY: 515 case SLSB_CU_INPUT_EMPTY:
498 case SLSB_P_INPUT_NOT_INIT: 516 case SLSB_P_INPUT_NOT_INIT:
499 case SLSB_P_INPUT_ACK: 517 case SLSB_P_INPUT_ACK:
518 if (q->irq_ptr->perf_stat_enabled)
519 q->q_stats.nr_sbal_nop++;
500 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); 520 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop");
501 break; 521 break;
502 default: 522 default:
@@ -514,7 +534,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
514 534
515 if ((bufnr != q->last_move) || q->qdio_error) { 535 if ((bufnr != q->last_move) || q->qdio_error) {
516 q->last_move = bufnr; 536 q->last_move = bufnr;
517 if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) 537 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
518 q->u.in.timestamp = get_usecs(); 538 q->u.in.timestamp = get_usecs();
519 return 1; 539 return 1;
520 } else 540 } else
@@ -643,15 +663,21 @@ static int get_outbound_buffer_frontier(struct qdio_q *q)
643 663
644 atomic_sub(count, &q->nr_buf_used); 664 atomic_sub(count, &q->nr_buf_used);
645 q->first_to_check = add_buf(q->first_to_check, count); 665 q->first_to_check = add_buf(q->first_to_check, count);
666 if (q->irq_ptr->perf_stat_enabled)
667 account_sbals(q, count);
646 break; 668 break;
647 case SLSB_P_OUTPUT_ERROR: 669 case SLSB_P_OUTPUT_ERROR:
648 announce_buffer_error(q, count); 670 announce_buffer_error(q, count);
649 /* process the buffer, the upper layer will take care of it */ 671 /* process the buffer, the upper layer will take care of it */
650 q->first_to_check = add_buf(q->first_to_check, count); 672 q->first_to_check = add_buf(q->first_to_check, count);
651 atomic_sub(count, &q->nr_buf_used); 673 atomic_sub(count, &q->nr_buf_used);
674 if (q->irq_ptr->perf_stat_enabled)
675 account_sbals_error(q, count);
652 break; 676 break;
653 case SLSB_CU_OUTPUT_PRIMED: 677 case SLSB_CU_OUTPUT_PRIMED:
654 /* the adapter has not fetched the output yet */ 678 /* the adapter has not fetched the output yet */
679 if (q->irq_ptr->perf_stat_enabled)
680 q->q_stats.nr_sbal_nop++;
655 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); 681 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr);
656 break; 682 break;
657 case SLSB_P_OUTPUT_NOT_INIT: 683 case SLSB_P_OUTPUT_NOT_INIT:
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index 8c2dea5fa2b4..7f4a75465140 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -333,10 +333,10 @@ static void __qdio_allocate_fill_qdr(struct qdio_irq *irq_ptr,
333 irq_ptr->qdr->qdf0[i + nr].slsba = 333 irq_ptr->qdr->qdf0[i + nr].slsba =
334 (unsigned long)&irq_ptr_qs[i]->slsb.val[0]; 334 (unsigned long)&irq_ptr_qs[i]->slsb.val[0];
335 335
336 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY; 336 irq_ptr->qdr->qdf0[i + nr].akey = PAGE_DEFAULT_KEY >> 4;
337 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY; 337 irq_ptr->qdr->qdf0[i + nr].bkey = PAGE_DEFAULT_KEY >> 4;
338 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY; 338 irq_ptr->qdr->qdf0[i + nr].ckey = PAGE_DEFAULT_KEY >> 4;
339 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY; 339 irq_ptr->qdr->qdf0[i + nr].dkey = PAGE_DEFAULT_KEY >> 4;
340} 340}
341 341
342static void setup_qdr(struct qdio_irq *irq_ptr, 342static void setup_qdr(struct qdio_irq *irq_ptr,
@@ -350,7 +350,7 @@ static void setup_qdr(struct qdio_irq *irq_ptr,
350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */ 350 irq_ptr->qdr->iqdsz = sizeof(struct qdesfmt0) / 4; /* size in words */
351 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4; 351 irq_ptr->qdr->oqdsz = sizeof(struct qdesfmt0) / 4;
352 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib; 352 irq_ptr->qdr->qiba = (unsigned long)&irq_ptr->qib;
353 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY; 353 irq_ptr->qdr->qkey = PAGE_DEFAULT_KEY >> 4;
354 354
355 for (i = 0; i < qdio_init->no_input_qs; i++) 355 for (i = 0; i < qdio_init->no_input_qs; i++)
356 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0); 356 __qdio_allocate_fill_qdr(irq_ptr, irq_ptr->input_qs, i, 0);
@@ -382,7 +382,15 @@ int qdio_setup_irq(struct qdio_initialize *init_data)
382 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data; 382 struct qdio_irq *irq_ptr = init_data->cdev->private->qdio_data;
383 int rc; 383 int rc;
384 384
385 memset(irq_ptr, 0, ((char *)&irq_ptr->qdr) - ((char *)irq_ptr)); 385 memset(&irq_ptr->qib, 0, sizeof(irq_ptr->qib));
386 memset(&irq_ptr->siga_flag, 0, sizeof(irq_ptr->siga_flag));
387 memset(&irq_ptr->ccw, 0, sizeof(irq_ptr->ccw));
388 memset(&irq_ptr->ssqd_desc, 0, sizeof(irq_ptr->ssqd_desc));
389 memset(&irq_ptr->perf_stat, 0, sizeof(irq_ptr->perf_stat));
390
391 irq_ptr->debugfs_dev = irq_ptr->debugfs_perf = NULL;
392 irq_ptr->sch_token = irq_ptr->state = irq_ptr->perf_stat_enabled = 0;
393
386 /* wipes qib.ac, required by ar7063 */ 394 /* wipes qib.ac, required by ar7063 */
387 memset(irq_ptr->qdr, 0, sizeof(struct qdr)); 395 memset(irq_ptr->qdr, 0, sizeof(struct qdr));
388 396
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c
index 091d904d3182..9942c1031b25 100644
--- a/drivers/s390/cio/qdio_thinint.c
+++ b/drivers/s390/cio/qdio_thinint.c
@@ -198,8 +198,8 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset)
198 .code = 0x0021, 198 .code = 0x0021,
199 }; 199 };
200 scssc_area->operation_code = 0; 200 scssc_area->operation_code = 0;
201 scssc_area->ks = PAGE_DEFAULT_KEY; 201 scssc_area->ks = PAGE_DEFAULT_KEY >> 4;
202 scssc_area->kc = PAGE_DEFAULT_KEY; 202 scssc_area->kc = PAGE_DEFAULT_KEY >> 4;
203 scssc_area->isc = QDIO_AIRQ_ISC; 203 scssc_area->isc = QDIO_AIRQ_ISC;
204 scssc_area->schid = irq_ptr->schid; 204 scssc_area->schid = irq_ptr->schid;
205 205