aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r--drivers/s390/cio/chsc.c5
-rw-r--r--drivers/s390/cio/cio.c95
-rw-r--r--drivers/s390/cio/css.c203
-rw-r--r--drivers/s390/cio/device.c109
-rw-r--r--drivers/s390/cio/device_fsm.c40
-rw-r--r--drivers/s390/cio/device_ops.c17
-rw-r--r--drivers/s390/cio/device_pgid.c81
-rw-r--r--drivers/s390/cio/qdio.c4
-rw-r--r--drivers/s390/cio/qdio.h16
9 files changed, 342 insertions, 228 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index c28444af0919..3bb4e472d73d 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -256,7 +256,7 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
256 /* trigger path verification. */ 256 /* trigger path verification. */
257 if (sch->driver && sch->driver->verify) 257 if (sch->driver && sch->driver->verify)
258 sch->driver->verify(&sch->dev); 258 sch->driver->verify(&sch->dev);
259 else if (sch->vpm == mask) 259 else if (sch->lpm == mask)
260 goto out_unreg; 260 goto out_unreg;
261out_unlock: 261out_unlock:
262 spin_unlock_irq(&sch->lock); 262 spin_unlock_irq(&sch->lock);
@@ -378,6 +378,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
378 378
379 if (chp_mask == 0) { 379 if (chp_mask == 0) {
380 spin_unlock_irq(&sch->lock); 380 spin_unlock_irq(&sch->lock);
381 put_device(&sch->dev);
381 return 0; 382 return 0;
382 } 383 }
383 old_lpm = sch->lpm; 384 old_lpm = sch->lpm;
@@ -392,7 +393,7 @@ __s390_process_res_acc(struct subchannel_id schid, void *data)
392 393
393 spin_unlock_irq(&sch->lock); 394 spin_unlock_irq(&sch->lock);
394 put_device(&sch->dev); 395 put_device(&sch->dev);
395 return (res_data->fla_mask == 0xffff) ? -ENODEV : 0; 396 return 0;
396} 397}
397 398
398 399
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index 89320c1ad825..2e2882daefbb 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -16,11 +16,10 @@
16#include <linux/device.h> 16#include <linux/device.h>
17#include <linux/kernel_stat.h> 17#include <linux/kernel_stat.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19
20#include <asm/cio.h> 19#include <asm/cio.h>
21#include <asm/delay.h> 20#include <asm/delay.h>
22#include <asm/irq.h> 21#include <asm/irq.h>
23 22#include <asm/setup.h>
24#include "airq.h" 23#include "airq.h"
25#include "cio.h" 24#include "cio.h"
26#include "css.h" 25#include "css.h"
@@ -192,7 +191,7 @@ cio_start_key (struct subchannel *sch, /* subchannel structure */
192 sch->orb.pfch = sch->options.prefetch == 0; 191 sch->orb.pfch = sch->options.prefetch == 0;
193 sch->orb.spnd = sch->options.suspend; 192 sch->orb.spnd = sch->options.suspend;
194 sch->orb.ssic = sch->options.suspend && sch->options.inter; 193 sch->orb.ssic = sch->options.suspend && sch->options.inter;
195 sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; 194 sch->orb.lpm = (lpm != 0) ? lpm : sch->lpm;
196#ifdef CONFIG_64BIT 195#ifdef CONFIG_64BIT
197 /* 196 /*
198 * for 64 bit we always support 64 bit IDAWs with 4k page size only 197 * for 64 bit we always support 64 bit IDAWs with 4k page size only
@@ -570,10 +569,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid)
570 sch->opm = 0xff; 569 sch->opm = 0xff;
571 if (!cio_is_console(sch->schid)) 570 if (!cio_is_console(sch->schid))
572 chsc_validate_chpids(sch); 571 chsc_validate_chpids(sch);
573 sch->lpm = sch->schib.pmcw.pim & 572 sch->lpm = sch->schib.pmcw.pam & sch->opm;
574 sch->schib.pmcw.pam &
575 sch->schib.pmcw.pom &
576 sch->opm;
577 573
578 CIO_DEBUG(KERN_INFO, 0, 574 CIO_DEBUG(KERN_INFO, 0,
579 "Detected device %04x on subchannel 0.%x.%04X" 575 "Detected device %04x on subchannel 0.%x.%04X"
@@ -841,14 +837,26 @@ __clear_subchannel_easy(struct subchannel_id schid)
841 return -EBUSY; 837 return -EBUSY;
842} 838}
843 839
844extern void do_reipl(unsigned long devno); 840struct sch_match_id {
845static int 841 struct subchannel_id schid;
846__shutdown_subchannel_easy(struct subchannel_id schid, void *data) 842 struct ccw_dev_id devid;
843 int rc;
844};
845
846static int __shutdown_subchannel_easy_and_match(struct subchannel_id schid,
847 void *data)
847{ 848{
848 struct schib schib; 849 struct schib schib;
850 struct sch_match_id *match_id = data;
849 851
850 if (stsch_err(schid, &schib)) 852 if (stsch_err(schid, &schib))
851 return -ENXIO; 853 return -ENXIO;
854 if (match_id && schib.pmcw.dnv &&
855 (schib.pmcw.dev == match_id->devid.devno) &&
856 (schid.ssid == match_id->devid.ssid)) {
857 match_id->schid = schid;
858 match_id->rc = 0;
859 }
852 if (!schib.pmcw.ena) 860 if (!schib.pmcw.ena)
853 return 0; 861 return 0;
854 switch(__disable_subchannel_easy(schid, &schib)) { 862 switch(__disable_subchannel_easy(schid, &schib)) {
@@ -864,18 +872,71 @@ __shutdown_subchannel_easy(struct subchannel_id schid, void *data)
864 return 0; 872 return 0;
865} 873}
866 874
867void 875static int clear_all_subchannels_and_match(struct ccw_dev_id *devid,
868clear_all_subchannels(void) 876 struct subchannel_id *schid)
869{ 877{
878 struct sch_match_id match_id;
879
880 match_id.devid = *devid;
881 match_id.rc = -ENODEV;
870 local_irq_disable(); 882 local_irq_disable();
871 for_each_subchannel(__shutdown_subchannel_easy, NULL); 883 for_each_subchannel(__shutdown_subchannel_easy_and_match, &match_id);
884 if (match_id.rc == 0)
885 *schid = match_id.schid;
886 return match_id.rc;
872} 887}
873 888
889
890void clear_all_subchannels(void)
891{
892 local_irq_disable();
893 for_each_subchannel(__shutdown_subchannel_easy_and_match, NULL);
894}
895
896extern void do_reipl_asm(__u32 schid);
897
874/* Make sure all subchannels are quiet before we re-ipl an lpar. */ 898/* Make sure all subchannels are quiet before we re-ipl an lpar. */
875void 899void reipl_ccw_dev(struct ccw_dev_id *devid)
876reipl(unsigned long devno)
877{ 900{
878 clear_all_subchannels(); 901 struct subchannel_id schid;
902
903 if (clear_all_subchannels_and_match(devid, &schid))
904 panic("IPL Device not found\n");
879 cio_reset_channel_paths(); 905 cio_reset_channel_paths();
880 do_reipl(devno); 906 do_reipl_asm(*((__u32*)&schid));
907}
908
909extern struct schib ipl_schib;
910
911/*
912 * ipl_save_parameters gets called very early. It is not allowed to access
913 * anything in the bss section at all. The bss section is not cleared yet,
914 * but may contain some ipl parameters written by the firmware.
915 * These parameters (if present) are copied to 0x2000.
916 * To avoid corruption of the ipl parameters, all variables used by this
917 * function must reside on the stack or in the data section.
918 */
919void ipl_save_parameters(void)
920{
921 struct subchannel_id schid;
922 unsigned int *ipl_ptr;
923 void *src, *dst;
924
925 schid = *(struct subchannel_id *)__LC_SUBCHANNEL_ID;
926 if (!schid.one)
927 return;
928 if (stsch(schid, &ipl_schib))
929 return;
930 if (!ipl_schib.pmcw.dnv)
931 return;
932 ipl_devno = ipl_schib.pmcw.dev;
933 ipl_flags |= IPL_DEVNO_VALID;
934 if (!ipl_schib.pmcw.qf)
935 return;
936 ipl_flags |= IPL_PARMBLOCK_VALID;
937 ipl_ptr = (unsigned int *)__LC_IPL_PARMBLOCK_PTR;
938 src = (void *)(unsigned long)*ipl_ptr;
939 dst = (void *)IPL_PARMBLOCK_ORIGIN;
940 memmove(dst, src, PAGE_SIZE);
941 *ipl_ptr = IPL_PARMBLOCK_ORIGIN;
881} 942}
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 13eeea3d547f..7086a74e9871 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -182,136 +182,141 @@ get_subchannel_by_schid(struct subchannel_id schid)
182 return dev ? to_subchannel(dev) : NULL; 182 return dev ? to_subchannel(dev) : NULL;
183} 183}
184 184
185 185static inline int css_get_subchannel_status(struct subchannel *sch)
186static inline int
187css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid)
188{ 186{
189 struct schib schib; 187 struct schib schib;
190 int cc;
191 188
192 cc = stsch(schid, &schib); 189 if (stsch(sch->schid, &schib) || !schib.pmcw.dnv)
193 if (cc)
194 return CIO_GONE;
195 if (!schib.pmcw.dnv)
196 return CIO_GONE; 190 return CIO_GONE;
197 if (sch && sch->schib.pmcw.dnv && 191 if (sch->schib.pmcw.dnv && (schib.pmcw.dev != sch->schib.pmcw.dev))
198 (schib.pmcw.dev != sch->schib.pmcw.dev))
199 return CIO_REVALIDATE; 192 return CIO_REVALIDATE;
200 if (sch && !sch->lpm) 193 if (!sch->lpm)
201 return CIO_NO_PATH; 194 return CIO_NO_PATH;
202 return CIO_OPER; 195 return CIO_OPER;
203} 196}
204 197
205static int 198static int css_evaluate_known_subchannel(struct subchannel *sch, int slow)
206css_evaluate_subchannel(struct subchannel_id schid, int slow)
207{ 199{
208 int event, ret, disc; 200 int event, ret, disc;
209 struct subchannel *sch;
210 unsigned long flags; 201 unsigned long flags;
202 enum { NONE, UNREGISTER, UNREGISTER_PROBE, REPROBE } action;
211 203
212 sch = get_subchannel_by_schid(schid); 204 spin_lock_irqsave(&sch->lock, flags);
213 disc = sch ? device_is_disconnected(sch) : 0; 205 disc = device_is_disconnected(sch);
214 if (disc && slow) { 206 if (disc && slow) {
215 if (sch) 207 /* Disconnected devices are evaluated directly only.*/
216 put_device(&sch->dev); 208 spin_unlock_irqrestore(&sch->lock, flags);
217 return 0; /* Already processed. */ 209 return 0;
218 } 210 }
219 /* 211 /* No interrupt after machine check - kill pending timers. */
220 * We've got a machine check, so running I/O won't get an interrupt. 212 device_kill_pending_timer(sch);
221 * Kill any pending timers.
222 */
223 if (sch)
224 device_kill_pending_timer(sch);
225 if (!disc && !slow) { 213 if (!disc && !slow) {
226 if (sch) 214 /* Non-disconnected devices are evaluated on the slow path. */
227 put_device(&sch->dev); 215 spin_unlock_irqrestore(&sch->lock, flags);
228 return -EAGAIN; /* Will be done on the slow path. */ 216 return -EAGAIN;
229 } 217 }
230 event = css_get_subchannel_status(sch, schid); 218 event = css_get_subchannel_status(sch);
231 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", 219 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n",
232 schid.ssid, schid.sch_no, event, 220 sch->schid.ssid, sch->schid.sch_no, event,
233 sch?(disc?"disconnected":"normal"):"unknown", 221 disc ? "disconnected" : "normal",
234 slow?"slow":"fast"); 222 slow ? "slow" : "fast");
223 /* Analyze subchannel status. */
224 action = NONE;
235 switch (event) { 225 switch (event) {
236 case CIO_NO_PATH: 226 case CIO_NO_PATH:
237 case CIO_GONE: 227 if (disc) {
238 if (!sch) { 228 /* Check if paths have become available. */
239 /* Never used this subchannel. Ignore. */ 229 action = REPROBE;
240 ret = 0;
241 break; 230 break;
242 } 231 }
243 if (disc && (event == CIO_NO_PATH)) { 232 /* fall through */
244 /* 233 case CIO_GONE:
245 * Uargh, hack again. Because we don't get a machine 234 /* Prevent unwanted effects when opening lock. */
246 * check on configure on, our path bookkeeping can 235 cio_disable_subchannel(sch);
247 * be out of date here (it's fine while we only do 236 device_set_disconnected(sch);
248 * logical varying or get chsc machine checks). We 237 /* Ask driver what to do with device. */
249 * need to force reprobing or we might miss devices 238 action = UNREGISTER;
250 * coming operational again. It won't do harm in real 239 if (sch->driver && sch->driver->notify) {
251 * no path situations.
252 */
253 spin_lock_irqsave(&sch->lock, flags);
254 device_trigger_reprobe(sch);
255 spin_unlock_irqrestore(&sch->lock, flags); 240 spin_unlock_irqrestore(&sch->lock, flags);
256 ret = 0; 241 ret = sch->driver->notify(&sch->dev, event);
257 break; 242 spin_lock_irqsave(&sch->lock, flags);
258 } 243 if (ret)
259 if (sch->driver && sch->driver->notify && 244 action = NONE;
260 sch->driver->notify(&sch->dev, event)) {
261 cio_disable_subchannel(sch);
262 device_set_disconnected(sch);
263 ret = 0;
264 break;
265 } 245 }
266 /*
267 * Unregister subchannel.
268 * The device will be killed automatically.
269 */
270 cio_disable_subchannel(sch);
271 css_sch_device_unregister(sch);
272 /* Reset intparm to zeroes. */
273 sch->schib.pmcw.intparm = 0;
274 cio_modify(sch);
275 put_device(&sch->dev);
276 ret = 0;
277 break; 246 break;
278 case CIO_REVALIDATE: 247 case CIO_REVALIDATE:
279 /* 248 /* Device will be removed, so no notify necessary. */
280 * Revalidation machine check. Sick. 249 if (disc)
281 * We don't notify the driver since we have to throw the device 250 /* Reprobe because immediate unregister might block. */
282 * away in any case. 251 action = REPROBE;
283 */ 252 else
284 if (!disc) { 253 action = UNREGISTER_PROBE;
285 css_sch_device_unregister(sch);
286 /* Reset intparm to zeroes. */
287 sch->schib.pmcw.intparm = 0;
288 cio_modify(sch);
289 put_device(&sch->dev);
290 ret = css_probe_device(schid);
291 } else {
292 /*
293 * We can't immediately deregister the disconnected
294 * device since it might block.
295 */
296 spin_lock_irqsave(&sch->lock, flags);
297 device_trigger_reprobe(sch);
298 spin_unlock_irqrestore(&sch->lock, flags);
299 ret = 0;
300 }
301 break; 254 break;
302 case CIO_OPER: 255 case CIO_OPER:
303 if (disc) { 256 if (disc)
304 spin_lock_irqsave(&sch->lock, flags);
305 /* Get device operational again. */ 257 /* Get device operational again. */
306 device_trigger_reprobe(sch); 258 action = REPROBE;
307 spin_unlock_irqrestore(&sch->lock, flags); 259 break;
308 } 260 }
309 ret = sch ? 0 : css_probe_device(schid); 261 /* Perform action. */
262 ret = 0;
263 switch (action) {
264 case UNREGISTER:
265 case UNREGISTER_PROBE:
266 /* Unregister device (will use subchannel lock). */
267 spin_unlock_irqrestore(&sch->lock, flags);
268 css_sch_device_unregister(sch);
269 spin_lock_irqsave(&sch->lock, flags);
270
271 /* Reset intparm to zeroes. */
272 sch->schib.pmcw.intparm = 0;
273 cio_modify(sch);
274
275 /* Probe if necessary. */
276 if (action == UNREGISTER_PROBE)
277 ret = css_probe_device(sch->schid);
278 break;
279 case REPROBE:
280 device_trigger_reprobe(sch);
310 break; 281 break;
311 default: 282 default:
312 BUG(); 283 break;
313 ret = 0; 284 }
285 spin_unlock_irqrestore(&sch->lock, flags);
286
287 return ret;
288}
289
290static int css_evaluate_new_subchannel(struct subchannel_id schid, int slow)
291{
292 struct schib schib;
293
294 if (!slow) {
295 /* Will be done on the slow path. */
296 return -EAGAIN;
314 } 297 }
298 if (stsch(schid, &schib) || !schib.pmcw.dnv) {
299 /* Unusable - ignore. */
300 return 0;
301 }
302 CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, unknown, "
303 "slow path.\n", schid.ssid, schid.sch_no, CIO_OPER);
304
305 return css_probe_device(schid);
306}
307
308static int css_evaluate_subchannel(struct subchannel_id schid, int slow)
309{
310 struct subchannel *sch;
311 int ret;
312
313 sch = get_subchannel_by_schid(schid);
314 if (sch) {
315 ret = css_evaluate_known_subchannel(sch, slow);
316 put_device(&sch->dev);
317 } else
318 ret = css_evaluate_new_subchannel(schid, slow);
319
315 return ret; 320 return ret;
316} 321}
317 322
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 646da5640401..688945662c15 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -52,53 +52,81 @@ ccw_bus_match (struct device * dev, struct device_driver * drv)
52 return 1; 52 return 1;
53} 53}
54 54
55/* 55/* Store modalias string delimited by prefix/suffix string into buffer with
56 * Hotplugging interface for ccw devices. 56 * specified size. Return length of resulting string (excluding trailing '\0')
57 * Heavily modeled on pci and usb hotplug. 57 * even if string doesn't fit buffer (snprintf semantics). */
58 */ 58static int snprint_alias(char *buf, size_t size, const char *prefix,
59static int 59 struct ccw_device_id *id, const char *suffix)
60ccw_uevent (struct device *dev, char **envp, int num_envp,
61 char *buffer, int buffer_size)
62{ 60{
63 struct ccw_device *cdev = to_ccwdev(dev); 61 int len;
64 int i = 0;
65 int length = 0;
66 62
67 if (!cdev) 63 len = snprintf(buf, size, "%sccw:t%04Xm%02X", prefix, id->cu_type,
68 return -ENODEV; 64 id->cu_model);
65 if (len > size)
66 return len;
67 buf += len;
68 size -= len;
69 69
70 /* what we want to pass to /sbin/hotplug */ 70 if (id->dev_type != 0)
71 len += snprintf(buf, size, "dt%04Xdm%02X%s", id->dev_type,
72 id->dev_model, suffix);
73 else
74 len += snprintf(buf, size, "dtdm%s", suffix);
71 75
72 envp[i++] = buffer; 76 return len;
73 length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", 77}
74 cdev->id.cu_type);
75 if ((buffer_size - length <= 0) || (i >= num_envp))
76 return -ENOMEM;
77 ++length;
78 buffer += length;
79 78
79/* Set up environment variables for ccw device uevent. Return 0 on success,
80 * non-zero otherwise. */
81static int ccw_uevent(struct device *dev, char **envp, int num_envp,
82 char *buffer, int buffer_size)
83{
84 struct ccw_device *cdev = to_ccwdev(dev);
85 struct ccw_device_id *id = &(cdev->id);
86 int i = 0;
87 int len;
88
89 /* CU_TYPE= */
90 len = snprintf(buffer, buffer_size, "CU_TYPE=%04X", id->cu_type) + 1;
91 if (len > buffer_size || i >= num_envp)
92 return -ENOMEM;
80 envp[i++] = buffer; 93 envp[i++] = buffer;
81 length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", 94 buffer += len;
82 cdev->id.cu_model); 95 buffer_size -= len;
83 if ((buffer_size - length <= 0) || (i >= num_envp)) 96
97 /* CU_MODEL= */
98 len = snprintf(buffer, buffer_size, "CU_MODEL=%02X", id->cu_model) + 1;
99 if (len > buffer_size || i >= num_envp)
84 return -ENOMEM; 100 return -ENOMEM;
85 ++length; 101 envp[i++] = buffer;
86 buffer += length; 102 buffer += len;
103 buffer_size -= len;
87 104
88 /* The next two can be zero, that's ok for us */ 105 /* The next two can be zero, that's ok for us */
89 envp[i++] = buffer; 106 /* DEV_TYPE= */
90 length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", 107 len = snprintf(buffer, buffer_size, "DEV_TYPE=%04X", id->dev_type) + 1;
91 cdev->id.dev_type); 108 if (len > buffer_size || i >= num_envp)
92 if ((buffer_size - length <= 0) || (i >= num_envp))
93 return -ENOMEM; 109 return -ENOMEM;
94 ++length; 110 envp[i++] = buffer;
95 buffer += length; 111 buffer += len;
112 buffer_size -= len;
96 113
114 /* DEV_MODEL= */
115 len = snprintf(buffer, buffer_size, "DEV_MODEL=%02X",
116 (unsigned char) id->dev_model) + 1;
117 if (len > buffer_size || i >= num_envp)
118 return -ENOMEM;
97 envp[i++] = buffer; 119 envp[i++] = buffer;
98 length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", 120 buffer += len;
99 cdev->id.dev_model); 121 buffer_size -= len;
100 if ((buffer_size - length <= 0) || (i >= num_envp)) 122
123 /* MODALIAS= */
124 len = snprint_alias(buffer, buffer_size, "MODALIAS=", id, "") + 1;
125 if (len > buffer_size || i >= num_envp)
101 return -ENOMEM; 126 return -ENOMEM;
127 envp[i++] = buffer;
128 buffer += len;
129 buffer_size -= len;
102 130
103 envp[i] = NULL; 131 envp[i] = NULL;
104 132
@@ -251,16 +279,11 @@ modalias_show (struct device *dev, struct device_attribute *attr, char *buf)
251{ 279{
252 struct ccw_device *cdev = to_ccwdev(dev); 280 struct ccw_device *cdev = to_ccwdev(dev);
253 struct ccw_device_id *id = &(cdev->id); 281 struct ccw_device_id *id = &(cdev->id);
254 int ret; 282 int len;
255 283
256 ret = sprintf(buf, "ccw:t%04Xm%02X", 284 len = snprint_alias(buf, PAGE_SIZE, "", id, "\n") + 1;
257 id->cu_type, id->cu_model); 285
258 if (id->dev_type != 0) 286 return len > PAGE_SIZE ? PAGE_SIZE : len;
259 ret += sprintf(buf + ret, "dt%04Xdm%02X\n",
260 id->dev_type, id->dev_model);
261 else
262 ret += sprintf(buf + ret, "dtdm\n");
263 return ret;
264} 287}
265 288
266static ssize_t 289static ssize_t
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index 35e162ba6d54..dace46fc32e8 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -232,10 +232,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
232 */ 232 */
233 old_lpm = sch->lpm; 233 old_lpm = sch->lpm;
234 stsch(sch->schid, &sch->schib); 234 stsch(sch->schid, &sch->schib);
235 sch->lpm = sch->schib.pmcw.pim & 235 sch->lpm = sch->schib.pmcw.pam & sch->opm;
236 sch->schib.pmcw.pam &
237 sch->schib.pmcw.pom &
238 sch->opm;
239 /* Check since device may again have become not operational. */ 236 /* Check since device may again have become not operational. */
240 if (!sch->schib.pmcw.dnv) 237 if (!sch->schib.pmcw.dnv)
241 state = DEV_STATE_NOT_OPER; 238 state = DEV_STATE_NOT_OPER;
@@ -267,6 +264,7 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
267 notify = 1; 264 notify = 1;
268 } 265 }
269 /* fill out sense information */ 266 /* fill out sense information */
267 memset(&cdev->id, 0, sizeof(cdev->id));
270 cdev->id.cu_type = cdev->private->senseid.cu_type; 268 cdev->id.cu_type = cdev->private->senseid.cu_type;
271 cdev->id.cu_model = cdev->private->senseid.cu_model; 269 cdev->id.cu_model = cdev->private->senseid.cu_model;
272 cdev->id.dev_type = cdev->private->senseid.dev_type; 270 cdev->id.dev_type = cdev->private->senseid.dev_type;
@@ -454,8 +452,8 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err)
454 return; 452 return;
455 } 453 }
456 /* Start Path Group verification. */ 454 /* Start Path Group verification. */
457 sch->vpm = 0; /* Start with no path groups set. */
458 cdev->private->state = DEV_STATE_VERIFY; 455 cdev->private->state = DEV_STATE_VERIFY;
456 cdev->private->flags.doverify = 0;
459 ccw_device_verify_start(cdev); 457 ccw_device_verify_start(cdev);
460} 458}
461 459
@@ -555,7 +553,19 @@ ccw_device_nopath_notify(void *data)
555void 553void
556ccw_device_verify_done(struct ccw_device *cdev, int err) 554ccw_device_verify_done(struct ccw_device *cdev, int err)
557{ 555{
558 cdev->private->flags.doverify = 0; 556 struct subchannel *sch;
557
558 sch = to_subchannel(cdev->dev.parent);
559 /* Update schib - pom may have changed. */
560 stsch(sch->schid, &sch->schib);
561 /* Update lpm with verified path mask. */
562 sch->lpm = sch->vpm;
563 /* Repeat path verification? */
564 if (cdev->private->flags.doverify) {
565 cdev->private->flags.doverify = 0;
566 ccw_device_verify_start(cdev);
567 return;
568 }
559 switch (err) { 569 switch (err) {
560 case -EOPNOTSUPP: /* path grouping not supported, just set online. */ 570 case -EOPNOTSUPP: /* path grouping not supported, just set online. */
561 cdev->private->options.pgroup = 0; 571 cdev->private->options.pgroup = 0;
@@ -613,6 +623,7 @@ ccw_device_online(struct ccw_device *cdev)
613 if (!cdev->private->options.pgroup) { 623 if (!cdev->private->options.pgroup) {
614 /* Start initial path verification. */ 624 /* Start initial path verification. */
615 cdev->private->state = DEV_STATE_VERIFY; 625 cdev->private->state = DEV_STATE_VERIFY;
626 cdev->private->flags.doverify = 0;
616 ccw_device_verify_start(cdev); 627 ccw_device_verify_start(cdev);
617 return 0; 628 return 0;
618 } 629 }
@@ -659,7 +670,6 @@ ccw_device_offline(struct ccw_device *cdev)
659 /* Are we doing path grouping? */ 670 /* Are we doing path grouping? */
660 if (!cdev->private->options.pgroup) { 671 if (!cdev->private->options.pgroup) {
661 /* No, set state offline immediately. */ 672 /* No, set state offline immediately. */
662 sch->vpm = 0;
663 ccw_device_done(cdev, DEV_STATE_OFFLINE); 673 ccw_device_done(cdev, DEV_STATE_OFFLINE);
664 return 0; 674 return 0;
665 } 675 }
@@ -780,6 +790,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
780 } 790 }
781 /* Device is idle, we can do the path verification. */ 791 /* Device is idle, we can do the path verification. */
782 cdev->private->state = DEV_STATE_VERIFY; 792 cdev->private->state = DEV_STATE_VERIFY;
793 cdev->private->flags.doverify = 0;
783 ccw_device_verify_start(cdev); 794 ccw_device_verify_start(cdev);
784} 795}
785 796
@@ -1042,9 +1053,9 @@ ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event)
1042} 1053}
1043 1054
1044static void 1055static void
1045ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) 1056ccw_device_delay_verify(struct ccw_device *cdev, enum dev_event dev_event)
1046{ 1057{
1047 /* When the I/O has terminated, we have to start verification. */ 1058 /* Start verification after current task finished. */
1048 cdev->private->flags.doverify = 1; 1059 cdev->private->flags.doverify = 1;
1049} 1060}
1050 1061
@@ -1110,10 +1121,7 @@ device_trigger_reprobe(struct subchannel *sch)
1110 * The pim, pam, pom values may not be accurate, but they are the best 1121 * The pim, pam, pom values may not be accurate, but they are the best
1111 * we have before performing device selection :/ 1122 * we have before performing device selection :/
1112 */ 1123 */
1113 sch->lpm = sch->schib.pmcw.pim & 1124 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1114 sch->schib.pmcw.pam &
1115 sch->schib.pmcw.pom &
1116 sch->opm;
1117 /* Re-set some bits in the pmcw that were lost. */ 1125 /* Re-set some bits in the pmcw that were lost. */
1118 sch->schib.pmcw.isc = 3; 1126 sch->schib.pmcw.isc = 3;
1119 sch->schib.pmcw.csense = 1; 1127 sch->schib.pmcw.csense = 1;
@@ -1237,7 +1245,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1237 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1245 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1238 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, 1246 [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq,
1239 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, 1247 [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout,
1240 [DEV_EVENT_VERIFY] = ccw_device_nop, 1248 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1241 }, 1249 },
1242 [DEV_STATE_ONLINE] = { 1250 [DEV_STATE_ONLINE] = {
1243 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1251 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
@@ -1280,7 +1288,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1280 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, 1288 [DEV_EVENT_NOTOPER] = ccw_device_online_notoper,
1281 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, 1289 [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq,
1282 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, 1290 [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout,
1283 [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, 1291 [DEV_EVENT_VERIFY] = ccw_device_delay_verify,
1284 }, 1292 },
1285 [DEV_STATE_QUIESCE] = { 1293 [DEV_STATE_QUIESCE] = {
1286 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, 1294 [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done,
@@ -1293,7 +1301,7 @@ fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = {
1293 [DEV_EVENT_NOTOPER] = ccw_device_nop, 1301 [DEV_EVENT_NOTOPER] = ccw_device_nop,
1294 [DEV_EVENT_INTERRUPT] = ccw_device_start_id, 1302 [DEV_EVENT_INTERRUPT] = ccw_device_start_id,
1295 [DEV_EVENT_TIMEOUT] = ccw_device_bug, 1303 [DEV_EVENT_TIMEOUT] = ccw_device_bug,
1296 [DEV_EVENT_VERIFY] = ccw_device_nop, 1304 [DEV_EVENT_VERIFY] = ccw_device_start_id,
1297 }, 1305 },
1298 [DEV_STATE_DISCONNECTED_SENSE_ID] = { 1306 [DEV_STATE_DISCONNECTED_SENSE_ID] = {
1299 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, 1307 [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper,
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index 9e3de0bd59b5..93a897eebfff 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -96,6 +96,12 @@ ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa,
96 ret = cio_set_options (sch, flags); 96 ret = cio_set_options (sch, flags);
97 if (ret) 97 if (ret)
98 return ret; 98 return ret;
99 /* Adjust requested path mask to excluded varied off paths. */
100 if (lpm) {
101 lpm &= sch->opm;
102 if (lpm == 0)
103 return -EACCES;
104 }
99 ret = cio_start_key (sch, cpa, lpm, key); 105 ret = cio_start_key (sch, cpa, lpm, key);
100 if (ret == 0) 106 if (ret == 0)
101 cdev->private->intparm = intparm; 107 cdev->private->intparm = intparm;
@@ -250,7 +256,7 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
250 if (!sch) 256 if (!sch)
251 return 0; 257 return 0;
252 else 258 else
253 return sch->vpm; 259 return sch->lpm;
254} 260}
255 261
256static void 262static void
@@ -304,7 +310,7 @@ __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, _
304 sch = to_subchannel(cdev->dev.parent); 310 sch = to_subchannel(cdev->dev.parent);
305 do { 311 do {
306 ret = cio_start (sch, ccw, lpm); 312 ret = cio_start (sch, ccw, lpm);
307 if ((ret == -EBUSY) || (ret == -EACCES)) { 313 if (ret == -EBUSY) {
308 /* Try again later. */ 314 /* Try again later. */
309 spin_unlock_irq(&sch->lock); 315 spin_unlock_irq(&sch->lock);
310 msleep(10); 316 msleep(10);
@@ -433,6 +439,13 @@ read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lp
433 if (!ciw || ciw->cmd == 0) 439 if (!ciw || ciw->cmd == 0)
434 return -EOPNOTSUPP; 440 return -EOPNOTSUPP;
435 441
442 /* Adjust requested path mask to excluded varied off paths. */
443 if (lpm) {
444 lpm &= sch->opm;
445 if (lpm == 0)
446 return -EACCES;
447 }
448
436 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); 449 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
437 if (!rcd_ccw) 450 if (!rcd_ccw)
438 return -ENOMEM; 451 return -ENOMEM;
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 1693a102dcfe..8ca2d078848c 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -245,18 +245,17 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
245 memset(&cdev->private->irb, 0, sizeof(struct irb)); 245 memset(&cdev->private->irb, 0, sizeof(struct irb));
246 246
247 /* Try multiple times. */ 247 /* Try multiple times. */
248 ret = -ENODEV; 248 ret = -EACCES;
249 if (cdev->private->iretry > 0) { 249 if (cdev->private->iretry > 0) {
250 cdev->private->iretry--; 250 cdev->private->iretry--;
251 ret = cio_start (sch, cdev->private->iccws, 251 ret = cio_start (sch, cdev->private->iccws,
252 cdev->private->imask); 252 cdev->private->imask);
253 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 253 /* We expect an interrupt in case of success or busy
254 if ((ret != -EACCES) && (ret != -ENODEV)) 254 * indication. */
255 if ((ret == 0) || (ret == -EBUSY))
255 return ret; 256 return ret;
256 } 257 }
257 /* PGID command failed on this path. Switch it off. */ 258 /* PGID command failed on this path. */
258 sch->lpm &= ~cdev->private->imask;
259 sch->vpm &= ~cdev->private->imask;
260 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " 259 CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel "
261 "0.%x.%04x, lpm %02X, became 'not operational'\n", 260 "0.%x.%04x, lpm %02X, became 'not operational'\n",
262 cdev->private->devno, sch->schid.ssid, 261 cdev->private->devno, sch->schid.ssid,
@@ -286,18 +285,17 @@ static int __ccw_device_do_nop(struct ccw_device *cdev)
286 memset(&cdev->private->irb, 0, sizeof(struct irb)); 285 memset(&cdev->private->irb, 0, sizeof(struct irb));
287 286
288 /* Try multiple times. */ 287 /* Try multiple times. */
289 ret = -ENODEV; 288 ret = -EACCES;
290 if (cdev->private->iretry > 0) { 289 if (cdev->private->iretry > 0) {
291 cdev->private->iretry--; 290 cdev->private->iretry--;
292 ret = cio_start (sch, cdev->private->iccws, 291 ret = cio_start (sch, cdev->private->iccws,
293 cdev->private->imask); 292 cdev->private->imask);
294 /* ret is 0, -EBUSY, -EACCES or -ENODEV */ 293 /* We expect an interrupt in case of success or busy
295 if ((ret != -EACCES) && (ret != -ENODEV)) 294 * indication. */
295 if ((ret == 0) || (ret == -EBUSY))
296 return ret; 296 return ret;
297 } 297 }
298 /* nop command failed on this path. Switch it off. */ 298 /* nop command failed on this path. */
299 sch->lpm &= ~cdev->private->imask;
300 sch->vpm &= ~cdev->private->imask;
301 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " 299 CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel "
302 "0.%x.%04x, lpm %02X, became 'not operational'\n", 300 "0.%x.%04x, lpm %02X, became 'not operational'\n",
303 cdev->private->devno, sch->schid.ssid, 301 cdev->private->devno, sch->schid.ssid,
@@ -372,27 +370,32 @@ static void
372__ccw_device_verify_start(struct ccw_device *cdev) 370__ccw_device_verify_start(struct ccw_device *cdev)
373{ 371{
374 struct subchannel *sch; 372 struct subchannel *sch;
375 __u8 imask, func; 373 __u8 func;
376 int ret; 374 int ret;
377 375
378 sch = to_subchannel(cdev->dev.parent); 376 sch = to_subchannel(cdev->dev.parent);
379 while (sch->vpm != sch->lpm) { 377 /* Repeat for all paths. */
380 /* Find first unequal bit in vpm vs. lpm */ 378 for (; cdev->private->imask; cdev->private->imask >>= 1,
381 for (imask = 0x80; imask != 0; imask >>= 1) 379 cdev->private->iretry = 5) {
382 if ((sch->vpm & imask) != (sch->lpm & imask)) 380 if ((cdev->private->imask & sch->schib.pmcw.pam) == 0)
383 break; 381 /* Path not available, try next. */
384 cdev->private->imask = imask; 382 continue;
385 if (cdev->private->options.pgroup) { 383 if (cdev->private->options.pgroup) {
386 func = (sch->vpm & imask) ? 384 if (sch->opm & cdev->private->imask)
387 SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; 385 func = SPID_FUNC_ESTABLISH;
386 else
387 func = SPID_FUNC_RESIGN;
388 ret = __ccw_device_do_pgid(cdev, func); 388 ret = __ccw_device_do_pgid(cdev, func);
389 } else 389 } else
390 ret = __ccw_device_do_nop(cdev); 390 ret = __ccw_device_do_nop(cdev);
391 /* We expect an interrupt in case of success or busy
392 * indication. */
391 if (ret == 0 || ret == -EBUSY) 393 if (ret == 0 || ret == -EBUSY)
392 return; 394 return;
393 cdev->private->iretry = 5; 395 /* Permanent path failure, try next. */
394 } 396 }
395 ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); 397 /* Done with all paths. */
398 ccw_device_verify_done(cdev, (sch->vpm != 0) ? 0 : -ENODEV);
396} 399}
397 400
398/* 401/*
@@ -421,14 +424,14 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
421 else 424 else
422 ret = __ccw_device_check_nop(cdev); 425 ret = __ccw_device_check_nop(cdev);
423 memset(&cdev->private->irb, 0, sizeof(struct irb)); 426 memset(&cdev->private->irb, 0, sizeof(struct irb));
427
424 switch (ret) { 428 switch (ret) {
425 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 429 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
426 case 0: 430 case 0:
427 /* Establish or Resign Path Group done. Update vpm. */ 431 /* Path verification ccw finished successfully, update lpm. */
428 if ((sch->lpm & cdev->private->imask) != 0) 432 sch->vpm |= sch->opm & cdev->private->imask;
429 sch->vpm |= cdev->private->imask; 433 /* Go on with next path. */
430 else 434 cdev->private->imask >>= 1;
431 sch->vpm &= ~cdev->private->imask;
432 cdev->private->iretry = 5; 435 cdev->private->iretry = 5;
433 __ccw_device_verify_start(cdev); 436 __ccw_device_verify_start(cdev);
434 break; 437 break;
@@ -441,6 +444,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
441 cdev->private->options.pgroup = 0; 444 cdev->private->options.pgroup = 0;
442 else 445 else
443 cdev->private->flags.pgid_single = 1; 446 cdev->private->flags.pgid_single = 1;
447 /* Retry */
448 sch->vpm = 0;
449 cdev->private->imask = 0x80;
450 cdev->private->iretry = 5;
444 /* fall through. */ 451 /* fall through. */
445 case -EAGAIN: /* Try again. */ 452 case -EAGAIN: /* Try again. */
446 __ccw_device_verify_start(cdev); 453 __ccw_device_verify_start(cdev);
@@ -449,8 +456,7 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
449 ccw_device_verify_done(cdev, -ETIME); 456 ccw_device_verify_done(cdev, -ETIME);
450 break; 457 break;
451 case -EACCES: /* channel is not operational. */ 458 case -EACCES: /* channel is not operational. */
452 sch->lpm &= ~cdev->private->imask; 459 cdev->private->imask >>= 1;
453 sch->vpm &= ~cdev->private->imask;
454 cdev->private->iretry = 5; 460 cdev->private->iretry = 5;
455 __ccw_device_verify_start(cdev); 461 __ccw_device_verify_start(cdev);
456 break; 462 break;
@@ -463,19 +469,17 @@ ccw_device_verify_start(struct ccw_device *cdev)
463 struct subchannel *sch = to_subchannel(cdev->dev.parent); 469 struct subchannel *sch = to_subchannel(cdev->dev.parent);
464 470
465 cdev->private->flags.pgid_single = 0; 471 cdev->private->flags.pgid_single = 0;
472 cdev->private->imask = 0x80;
466 cdev->private->iretry = 5; 473 cdev->private->iretry = 5;
467 /* 474
468 * Update sch->lpm with current values to catch paths becoming 475 /* Start with empty vpm. */
469 * available again. 476 sch->vpm = 0;
470 */ 477
478 /* Get current pam. */
471 if (stsch(sch->schid, &sch->schib)) { 479 if (stsch(sch->schid, &sch->schib)) {
472 ccw_device_verify_done(cdev, -ENODEV); 480 ccw_device_verify_done(cdev, -ENODEV);
473 return; 481 return;
474 } 482 }
475 sch->lpm = sch->schib.pmcw.pim &
476 sch->schib.pmcw.pam &
477 sch->schib.pmcw.pom &
478 sch->opm;
479 __ccw_device_verify_start(cdev); 483 __ccw_device_verify_start(cdev);
480} 484}
481 485
@@ -524,7 +528,6 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
524 switch (ret) { 528 switch (ret) {
525 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ 529 /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */
526 case 0: /* disband successful. */ 530 case 0: /* disband successful. */
527 sch->vpm = 0;
528 ccw_device_disband_done(cdev, ret); 531 ccw_device_disband_done(cdev, ret);
529 break; 532 break;
530 case -EOPNOTSUPP: 533 case -EOPNOTSUPP:
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 7c93a8798d23..cde822d8b5c8 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -115,7 +115,7 @@ qdio_min(int a,int b)
115static inline __u64 115static inline __u64
116qdio_get_micros(void) 116qdio_get_micros(void)
117{ 117{
118 return (get_clock() >> 10); /* time>>12 is microseconds */ 118 return (get_clock() >> 12); /* time>>12 is microseconds */
119} 119}
120 120
121/* 121/*
@@ -1129,7 +1129,7 @@ out:
1129 1129
1130#ifdef QDIO_USE_PROCESSING_STATE 1130#ifdef QDIO_USE_PROCESSING_STATE
1131 if (last_position>=0) 1131 if (last_position>=0)
1132 set_slsb(q, &last_position, SLSB_P_INPUT_NOT_INIT, &count); 1132 set_slsb(q, &last_position, SLSB_P_INPUT_PROCESSING, &count);
1133#endif /* QDIO_USE_PROCESSING_STATE */ 1133#endif /* QDIO_USE_PROCESSING_STATE */
1134 1134
1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); 1135 QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int));
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index ceb3ab31ee08..124569362f02 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -191,49 +191,49 @@ enum qdio_irq_states {
191#if QDIO_VERBOSE_LEVEL>8 191#if QDIO_VERBOSE_LEVEL>8
192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) 192#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
193#else 193#else
194#define QDIO_PRINT_STUPID(x...) 194#define QDIO_PRINT_STUPID(x...) do { } while (0)
195#endif 195#endif
196 196
197#if QDIO_VERBOSE_LEVEL>7 197#if QDIO_VERBOSE_LEVEL>7
198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) 198#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
199#else 199#else
200#define QDIO_PRINT_ALL(x...) 200#define QDIO_PRINT_ALL(x...) do { } while (0)
201#endif 201#endif
202 202
203#if QDIO_VERBOSE_LEVEL>6 203#if QDIO_VERBOSE_LEVEL>6
204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) 204#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
205#else 205#else
206#define QDIO_PRINT_INFO(x...) 206#define QDIO_PRINT_INFO(x...) do { } while (0)
207#endif 207#endif
208 208
209#if QDIO_VERBOSE_LEVEL>5 209#if QDIO_VERBOSE_LEVEL>5
210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) 210#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
211#else 211#else
212#define QDIO_PRINT_WARN(x...) 212#define QDIO_PRINT_WARN(x...) do { } while (0)
213#endif 213#endif
214 214
215#if QDIO_VERBOSE_LEVEL>4 215#if QDIO_VERBOSE_LEVEL>4
216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) 216#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
217#else 217#else
218#define QDIO_PRINT_ERR(x...) 218#define QDIO_PRINT_ERR(x...) do { } while (0)
219#endif 219#endif
220 220
221#if QDIO_VERBOSE_LEVEL>3 221#if QDIO_VERBOSE_LEVEL>3
222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) 222#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
223#else 223#else
224#define QDIO_PRINT_CRIT(x...) 224#define QDIO_PRINT_CRIT(x...) do { } while (0)
225#endif 225#endif
226 226
227#if QDIO_VERBOSE_LEVEL>2 227#if QDIO_VERBOSE_LEVEL>2
228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) 228#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
229#else 229#else
230#define QDIO_PRINT_ALERT(x...) 230#define QDIO_PRINT_ALERT(x...) do { } while (0)
231#endif 231#endif
232 232
233#if QDIO_VERBOSE_LEVEL>1 233#if QDIO_VERBOSE_LEVEL>1
234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) 234#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
235#else 235#else
236#define QDIO_PRINT_EMERG(x...) 236#define QDIO_PRINT_EMERG(x...) do { } while (0)
237#endif 237#endif
238 238
239#define HEXDUMP16(importance,header,ptr) \ 239#define HEXDUMP16(importance,header,ptr) \