diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-28 15:33:21 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-12-28 15:33:21 -0500 |
commit | 1db2a5c11e495366bff35cf7445d494703f7febe (patch) | |
tree | 3347dd1cab0a2a96a4333524298a62132eb22336 /drivers | |
parent | a39b863342b8aba52390092be95db58f6ed56061 (diff) | |
parent | cef7125def4dd104769f400c941199614da0aca1 (diff) |
Merge branch 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.osdl.marist.edu/pub/scm/linux-2.6: (85 commits)
[S390] provide documentation for hvc_iucv kernel parameter.
[S390] convert ctcm printks to dev_xxx and pr_xxx macros.
[S390] convert zfcp printks to pr_xxx macros.
[S390] convert vmlogrdr printks to pr_xxx macros.
[S390] convert zfcp dumper printks to pr_xxx macros.
[S390] convert cpu related printks to pr_xxx macros.
[S390] convert qeth printks to dev_xxx and pr_xxx macros.
[S390] convert sclp printks to pr_xxx macros.
[S390] convert iucv printks to dev_xxx and pr_xxx macros.
[S390] convert ap_bus printks to pr_xxx macros.
[S390] convert dcssblk and extmem printks messages to pr_xxx macros.
[S390] convert monwriter printks to pr_xxx macros.
[S390] convert s390 debug feature printks to pr_xxx macros.
[S390] convert monreader printks to pr_xxx macros.
[S390] convert appldata printks to pr_xxx macros.
[S390] convert setup printks to pr_xxx macros.
[S390] convert hypfs printks to pr_xxx macros.
[S390] convert time printks to pr_xxx macros.
[S390] convert cpacf printks to pr_xxx macros.
[S390] convert cio printks to pr_xxx macros.
...
Diffstat (limited to 'drivers')
70 files changed, 2751 insertions, 1437 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig index 43d6ba83a191..8783457b93d3 100644 --- a/drivers/char/Kconfig +++ b/drivers/char/Kconfig | |||
@@ -622,6 +622,16 @@ config HVC_BEAT | |||
622 | help | 622 | help |
623 | Toshiba's Cell Reference Set Beat Console device driver | 623 | Toshiba's Cell Reference Set Beat Console device driver |
624 | 624 | ||
625 | config HVC_IUCV | ||
626 | bool "z/VM IUCV Hypervisor console support (VM only)" | ||
627 | depends on S390 | ||
628 | select HVC_DRIVER | ||
629 | select IUCV | ||
630 | default y | ||
631 | help | ||
632 | This driver provides a Hypervisor console (HVC) back-end to access | ||
633 | a Linux (console) terminal via a z/VM IUCV communication path. | ||
634 | |||
625 | config HVC_XEN | 635 | config HVC_XEN |
626 | bool "Xen Hypervisor Console support" | 636 | bool "Xen Hypervisor Console support" |
627 | depends on XEN | 637 | depends on XEN |
diff --git a/drivers/char/Makefile b/drivers/char/Makefile index 438f71317c5c..36151bae0d72 100644 --- a/drivers/char/Makefile +++ b/drivers/char/Makefile | |||
@@ -50,6 +50,7 @@ obj-$(CONFIG_HVC_BEAT) += hvc_beat.o | |||
50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o | 50 | obj-$(CONFIG_HVC_DRIVER) += hvc_console.o |
51 | obj-$(CONFIG_HVC_IRQ) += hvc_irq.o | 51 | obj-$(CONFIG_HVC_IRQ) += hvc_irq.o |
52 | obj-$(CONFIG_HVC_XEN) += hvc_xen.o | 52 | obj-$(CONFIG_HVC_XEN) += hvc_xen.o |
53 | obj-$(CONFIG_HVC_IUCV) += hvc_iucv.o | ||
53 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o | 54 | obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o |
54 | obj-$(CONFIG_RAW_DRIVER) += raw.o | 55 | obj-$(CONFIG_RAW_DRIVER) += raw.o |
55 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o | 56 | obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o |
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c new file mode 100644 index 000000000000..5ea7d7713fca --- /dev/null +++ b/drivers/char/hvc_iucv.c | |||
@@ -0,0 +1,850 @@ | |||
1 | /* | ||
2 | * hvc_iucv.c - z/VM IUCV back-end for the Hypervisor Console (HVC) | ||
3 | * | ||
4 | * This back-end for HVC provides terminal access via | ||
5 | * z/VM IUCV communication paths. | ||
6 | * | ||
7 | * Copyright IBM Corp. 2008. | ||
8 | * | ||
9 | * Author(s): Hendrik Brueckner <brueckner@linux.vnet.ibm.com> | ||
10 | */ | ||
11 | #define KMSG_COMPONENT "hvc_iucv" | ||
12 | |||
13 | #include <linux/types.h> | ||
14 | #include <asm/ebcdic.h> | ||
15 | #include <linux/mempool.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/tty.h> | ||
18 | #include <net/iucv/iucv.h> | ||
19 | |||
20 | #include "hvc_console.h" | ||
21 | |||
22 | |||
23 | /* HVC backend for z/VM IUCV */ | ||
24 | #define HVC_IUCV_MAGIC 0xc9e4c3e5 | ||
25 | #define MAX_HVC_IUCV_LINES HVC_ALLOC_TTY_ADAPTERS | ||
26 | #define MEMPOOL_MIN_NR (PAGE_SIZE / sizeof(struct iucv_tty_buffer)/4) | ||
27 | |||
28 | /* IUCV TTY message */ | ||
29 | #define MSG_VERSION 0x02 /* Message version */ | ||
30 | #define MSG_TYPE_ERROR 0x01 /* Error message */ | ||
31 | #define MSG_TYPE_TERMENV 0x02 /* Terminal environment variable */ | ||
32 | #define MSG_TYPE_TERMIOS 0x04 /* Terminal IO struct update */ | ||
33 | #define MSG_TYPE_WINSIZE 0x08 /* Terminal window size update */ | ||
34 | #define MSG_TYPE_DATA 0x10 /* Terminal data */ | ||
35 | |||
36 | #define MSG_SIZE(s) ((s) + offsetof(struct iucv_tty_msg, data)) | ||
37 | struct iucv_tty_msg { | ||
38 | u8 version; /* Message version */ | ||
39 | u8 type; /* Message type */ | ||
40 | #define MSG_MAX_DATALEN (~(u16)0) | ||
41 | u16 datalen; /* Payload length */ | ||
42 | u8 data[]; /* Payload buffer */ | ||
43 | } __attribute__((packed)); | ||
44 | |||
45 | enum iucv_state_t { | ||
46 | IUCV_DISCONN = 0, | ||
47 | IUCV_CONNECTED = 1, | ||
48 | IUCV_SEVERED = 2, | ||
49 | }; | ||
50 | |||
51 | enum tty_state_t { | ||
52 | TTY_CLOSED = 0, | ||
53 | TTY_OPENED = 1, | ||
54 | }; | ||
55 | |||
56 | struct hvc_iucv_private { | ||
57 | struct hvc_struct *hvc; /* HVC console struct reference */ | ||
58 | u8 srv_name[8]; /* IUCV service name (ebcdic) */ | ||
59 | enum iucv_state_t iucv_state; /* IUCV connection status */ | ||
60 | enum tty_state_t tty_state; /* TTY status */ | ||
61 | struct iucv_path *path; /* IUCV path pointer */ | ||
62 | spinlock_t lock; /* hvc_iucv_private lock */ | ||
63 | struct list_head tty_outqueue; /* outgoing IUCV messages */ | ||
64 | struct list_head tty_inqueue; /* incoming IUCV messages */ | ||
65 | }; | ||
66 | |||
67 | struct iucv_tty_buffer { | ||
68 | struct list_head list; /* list pointer */ | ||
69 | struct iucv_message msg; /* store an incoming IUCV message */ | ||
70 | size_t offset; /* data buffer offset */ | ||
71 | struct iucv_tty_msg *mbuf; /* buffer to store input/output data */ | ||
72 | }; | ||
73 | |||
74 | /* IUCV callback handler */ | ||
75 | static int hvc_iucv_path_pending(struct iucv_path *, u8[8], u8[16]); | ||
76 | static void hvc_iucv_path_severed(struct iucv_path *, u8[16]); | ||
77 | static void hvc_iucv_msg_pending(struct iucv_path *, struct iucv_message *); | ||
78 | static void hvc_iucv_msg_complete(struct iucv_path *, struct iucv_message *); | ||
79 | |||
80 | |||
81 | /* Kernel module parameters */ | ||
82 | static unsigned long hvc_iucv_devices; | ||
83 | |||
84 | /* Array of allocated hvc iucv tty lines... */ | ||
85 | static struct hvc_iucv_private *hvc_iucv_table[MAX_HVC_IUCV_LINES]; | ||
86 | |||
87 | /* Kmem cache and mempool for iucv_tty_buffer elements */ | ||
88 | static struct kmem_cache *hvc_iucv_buffer_cache; | ||
89 | static mempool_t *hvc_iucv_mempool; | ||
90 | |||
91 | /* IUCV handler callback functions */ | ||
92 | static struct iucv_handler hvc_iucv_handler = { | ||
93 | .path_pending = hvc_iucv_path_pending, | ||
94 | .path_severed = hvc_iucv_path_severed, | ||
95 | .message_complete = hvc_iucv_msg_complete, | ||
96 | .message_pending = hvc_iucv_msg_pending, | ||
97 | }; | ||
98 | |||
99 | |||
100 | /** | ||
101 | * hvc_iucv_get_private() - Return a struct hvc_iucv_private instance. | ||
102 | * @num: The HVC virtual terminal number (vtermno) | ||
103 | * | ||
104 | * This function returns the struct hvc_iucv_private instance that corresponds | ||
105 | * to the HVC virtual terminal number specified as parameter @num. | ||
106 | */ | ||
107 | struct hvc_iucv_private *hvc_iucv_get_private(uint32_t num) | ||
108 | { | ||
109 | if ((num < HVC_IUCV_MAGIC) || (num - HVC_IUCV_MAGIC > hvc_iucv_devices)) | ||
110 | return NULL; | ||
111 | return hvc_iucv_table[num - HVC_IUCV_MAGIC]; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * alloc_tty_buffer() - Returns a new struct iucv_tty_buffer element. | ||
116 | * @size: Size of the internal buffer used to store data. | ||
117 | * @flags: Memory allocation flags passed to mempool. | ||
118 | * | ||
119 | * This function allocates a new struct iucv_tty_buffer element and, optionally, | ||
120 | * allocates an internal data buffer with the specified size @size. | ||
121 | * Note: The total message size arises from the internal buffer size and the | ||
122 | * members of the iucv_tty_msg structure. | ||
123 | * | ||
124 | * The function returns NULL if memory allocation has failed. | ||
125 | */ | ||
126 | static struct iucv_tty_buffer *alloc_tty_buffer(size_t size, gfp_t flags) | ||
127 | { | ||
128 | struct iucv_tty_buffer *bufp; | ||
129 | |||
130 | bufp = mempool_alloc(hvc_iucv_mempool, flags); | ||
131 | if (!bufp) | ||
132 | return NULL; | ||
133 | memset(bufp, 0, sizeof(struct iucv_tty_buffer)); | ||
134 | |||
135 | if (size > 0) { | ||
136 | bufp->msg.length = MSG_SIZE(size); | ||
137 | bufp->mbuf = kmalloc(bufp->msg.length, flags); | ||
138 | if (!bufp->mbuf) { | ||
139 | mempool_free(bufp, hvc_iucv_mempool); | ||
140 | return NULL; | ||
141 | } | ||
142 | bufp->mbuf->version = MSG_VERSION; | ||
143 | bufp->mbuf->type = MSG_TYPE_DATA; | ||
144 | bufp->mbuf->datalen = (u16) size; | ||
145 | } | ||
146 | return bufp; | ||
147 | } | ||
148 | |||
149 | /** | ||
150 | * destroy_tty_buffer() - destroy struct iucv_tty_buffer element. | ||
151 | * @bufp: Pointer to a struct iucv_tty_buffer element, SHALL NOT be NULL. | ||
152 | * | ||
153 | * The destroy_tty_buffer() function frees the internal data buffer and returns | ||
154 | * the struct iucv_tty_buffer element back to the mempool for freeing. | ||
155 | */ | ||
156 | static void destroy_tty_buffer(struct iucv_tty_buffer *bufp) | ||
157 | { | ||
158 | kfree(bufp->mbuf); | ||
159 | mempool_free(bufp, hvc_iucv_mempool); | ||
160 | } | ||
161 | |||
162 | /** | ||
163 | * destroy_tty_buffer_list() - call destroy_tty_buffer() for each list element. | ||
164 | * @list: List head pointer to a list containing struct iucv_tty_buffer | ||
165 | * elements. | ||
166 | * | ||
167 | * Calls destroy_tty_buffer() for each struct iucv_tty_buffer element in the | ||
168 | * list @list. | ||
169 | */ | ||
170 | static void destroy_tty_buffer_list(struct list_head *list) | ||
171 | { | ||
172 | struct iucv_tty_buffer *ent, *next; | ||
173 | |||
174 | list_for_each_entry_safe(ent, next, list, list) { | ||
175 | list_del(&ent->list); | ||
176 | destroy_tty_buffer(ent); | ||
177 | } | ||
178 | } | ||
179 | |||
180 | /** | ||
181 | * hvc_iucv_write() - Receive IUCV message write data to HVC console buffer. | ||
182 | * @priv: Pointer to hvc_iucv_private structure. | ||
183 | * @buf: HVC console buffer for writing received terminal data. | ||
184 | * @count: HVC console buffer size. | ||
185 | * @has_more_data: Pointer to an int variable. | ||
186 | * | ||
187 | * The function picks up pending messages from the input queue and receives | ||
188 | * the message data that is then written to the specified buffer @buf. | ||
189 | * If the buffer size @count is less than the data message size, then the | ||
190 | * message is kept on the input queue and @has_more_data is set to 1. | ||
191 | * If the message data has been entirely written, the message is removed from | ||
192 | * the input queue. | ||
193 | * | ||
194 | * The function returns the number of bytes written to the terminal, zero if | ||
195 | * there are no pending data messages available or if there is no established | ||
196 | * IUCV path. | ||
197 | * If the IUCV path has been severed, then -EPIPE is returned to cause a | ||
198 | * hang up (that is issued by the HVC console layer). | ||
199 | */ | ||
200 | static int hvc_iucv_write(struct hvc_iucv_private *priv, | ||
201 | char *buf, int count, int *has_more_data) | ||
202 | { | ||
203 | struct iucv_tty_buffer *rb; | ||
204 | int written; | ||
205 | int rc; | ||
206 | |||
207 | /* Immediately return if there is no IUCV connection */ | ||
208 | if (priv->iucv_state == IUCV_DISCONN) | ||
209 | return 0; | ||
210 | |||
211 | /* If the IUCV path has been severed, return -EPIPE to inform the | ||
212 | * hvc console layer to hang up the tty device. */ | ||
213 | if (priv->iucv_state == IUCV_SEVERED) | ||
214 | return -EPIPE; | ||
215 | |||
216 | /* check if there are pending messages */ | ||
217 | if (list_empty(&priv->tty_inqueue)) | ||
218 | return 0; | ||
219 | |||
220 | /* receive a iucv message and flip data to the tty (ldisc) */ | ||
221 | rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); | ||
222 | |||
223 | written = 0; | ||
224 | if (!rb->mbuf) { /* message not yet received ... */ | ||
225 | /* allocate mem to store msg data; if no memory is available | ||
226 | * then leave the buffer on the list and re-try later */ | ||
227 | rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC); | ||
228 | if (!rb->mbuf) | ||
229 | return -ENOMEM; | ||
230 | |||
231 | rc = __iucv_message_receive(priv->path, &rb->msg, 0, | ||
232 | rb->mbuf, rb->msg.length, NULL); | ||
233 | switch (rc) { | ||
234 | case 0: /* Successful */ | ||
235 | break; | ||
236 | case 2: /* No message found */ | ||
237 | case 9: /* Message purged */ | ||
238 | break; | ||
239 | default: | ||
240 | written = -EIO; | ||
241 | } | ||
242 | /* remove buffer if an error has occured or received data | ||
243 | * is not correct */ | ||
244 | if (rc || (rb->mbuf->version != MSG_VERSION) || | ||
245 | (rb->msg.length != MSG_SIZE(rb->mbuf->datalen))) | ||
246 | goto out_remove_buffer; | ||
247 | } | ||
248 | |||
249 | switch (rb->mbuf->type) { | ||
250 | case MSG_TYPE_DATA: | ||
251 | written = min_t(int, rb->mbuf->datalen - rb->offset, count); | ||
252 | memcpy(buf, rb->mbuf->data + rb->offset, written); | ||
253 | if (written < (rb->mbuf->datalen - rb->offset)) { | ||
254 | rb->offset += written; | ||
255 | *has_more_data = 1; | ||
256 | goto out_written; | ||
257 | } | ||
258 | break; | ||
259 | |||
260 | case MSG_TYPE_WINSIZE: | ||
261 | if (rb->mbuf->datalen != sizeof(struct winsize)) | ||
262 | break; | ||
263 | hvc_resize(priv->hvc, *((struct winsize *)rb->mbuf->data)); | ||
264 | break; | ||
265 | |||
266 | case MSG_TYPE_ERROR: /* ignored ... */ | ||
267 | case MSG_TYPE_TERMENV: /* ignored ... */ | ||
268 | case MSG_TYPE_TERMIOS: /* ignored ... */ | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | out_remove_buffer: | ||
273 | list_del(&rb->list); | ||
274 | destroy_tty_buffer(rb); | ||
275 | *has_more_data = !list_empty(&priv->tty_inqueue); | ||
276 | |||
277 | out_written: | ||
278 | return written; | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * hvc_iucv_get_chars() - HVC get_chars operation. | ||
283 | * @vtermno: HVC virtual terminal number. | ||
284 | * @buf: Pointer to a buffer to store data | ||
285 | * @count: Size of buffer available for writing | ||
286 | * | ||
287 | * The hvc_console thread calls this method to read characters from | ||
288 | * the terminal backend. If an IUCV communication path has been established, | ||
289 | * pending IUCV messages are received and data is copied into buffer @buf | ||
290 | * up to @count bytes. | ||
291 | * | ||
292 | * Locking: The routine gets called under an irqsave() spinlock; and | ||
293 | * the routine locks the struct hvc_iucv_private->lock to call | ||
294 | * helper functions. | ||
295 | */ | ||
296 | static int hvc_iucv_get_chars(uint32_t vtermno, char *buf, int count) | ||
297 | { | ||
298 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); | ||
299 | int written; | ||
300 | int has_more_data; | ||
301 | |||
302 | if (count <= 0) | ||
303 | return 0; | ||
304 | |||
305 | if (!priv) | ||
306 | return -ENODEV; | ||
307 | |||
308 | spin_lock(&priv->lock); | ||
309 | has_more_data = 0; | ||
310 | written = hvc_iucv_write(priv, buf, count, &has_more_data); | ||
311 | spin_unlock(&priv->lock); | ||
312 | |||
313 | /* if there are still messages on the queue... schedule another run */ | ||
314 | if (has_more_data) | ||
315 | hvc_kick(); | ||
316 | |||
317 | return written; | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * hvc_iucv_send() - Send an IUCV message containing terminal data. | ||
322 | * @priv: Pointer to struct hvc_iucv_private instance. | ||
323 | * @buf: Buffer containing data to send. | ||
324 | * @size: Size of buffer and amount of data to send. | ||
325 | * | ||
326 | * If an IUCV communication path is established, the function copies the buffer | ||
327 | * data to a newly allocated struct iucv_tty_buffer element, sends the data and | ||
328 | * puts the element to the outqueue. | ||
329 | * | ||
330 | * If there is no IUCV communication path established, the function returns 0. | ||
331 | * If an existing IUCV communicaton path has been severed, the function returns | ||
332 | * -EPIPE (can be passed to HVC layer to cause a tty hangup). | ||
333 | */ | ||
334 | static int hvc_iucv_send(struct hvc_iucv_private *priv, const char *buf, | ||
335 | int count) | ||
336 | { | ||
337 | struct iucv_tty_buffer *sb; | ||
338 | int rc; | ||
339 | u16 len; | ||
340 | |||
341 | if (priv->iucv_state == IUCV_SEVERED) | ||
342 | return -EPIPE; | ||
343 | |||
344 | if (priv->iucv_state == IUCV_DISCONN) | ||
345 | return 0; | ||
346 | |||
347 | len = min_t(u16, MSG_MAX_DATALEN, count); | ||
348 | |||
349 | /* allocate internal buffer to store msg data and also compute total | ||
350 | * message length */ | ||
351 | sb = alloc_tty_buffer(len, GFP_ATOMIC); | ||
352 | if (!sb) | ||
353 | return -ENOMEM; | ||
354 | |||
355 | sb->mbuf->datalen = len; | ||
356 | memcpy(sb->mbuf->data, buf, len); | ||
357 | |||
358 | list_add_tail(&sb->list, &priv->tty_outqueue); | ||
359 | |||
360 | rc = __iucv_message_send(priv->path, &sb->msg, 0, 0, | ||
361 | (void *) sb->mbuf, sb->msg.length); | ||
362 | if (rc) { | ||
363 | list_del(&sb->list); | ||
364 | destroy_tty_buffer(sb); | ||
365 | len = 0; | ||
366 | } | ||
367 | |||
368 | return len; | ||
369 | } | ||
370 | |||
371 | /** | ||
372 | * hvc_iucv_put_chars() - HVC put_chars operation. | ||
373 | * @vtermno: HVC virtual terminal number. | ||
374 | * @buf: Pointer to an buffer to read data from | ||
375 | * @count: Size of buffer available for reading | ||
376 | * | ||
377 | * The hvc_console thread calls this method to write characters from | ||
378 | * to the terminal backend. | ||
379 | * The function calls hvc_iucv_send() under the lock of the | ||
380 | * struct hvc_iucv_private instance that corresponds to the tty @vtermno. | ||
381 | * | ||
382 | * Locking: The method gets called under an irqsave() spinlock; and | ||
383 | * locks struct hvc_iucv_private->lock. | ||
384 | */ | ||
385 | static int hvc_iucv_put_chars(uint32_t vtermno, const char *buf, int count) | ||
386 | { | ||
387 | struct hvc_iucv_private *priv = hvc_iucv_get_private(vtermno); | ||
388 | int sent; | ||
389 | |||
390 | if (count <= 0) | ||
391 | return 0; | ||
392 | |||
393 | if (!priv) | ||
394 | return -ENODEV; | ||
395 | |||
396 | spin_lock(&priv->lock); | ||
397 | sent = hvc_iucv_send(priv, buf, count); | ||
398 | spin_unlock(&priv->lock); | ||
399 | |||
400 | return sent; | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * hvc_iucv_notifier_add() - HVC notifier for opening a TTY for the first time. | ||
405 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
406 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | ||
407 | * hvc_iucv_private instance. | ||
408 | * | ||
409 | * The function sets the tty state to TTY_OPEN for the struct hvc_iucv_private | ||
410 | * instance that is derived from @id. Always returns 0. | ||
411 | * | ||
412 | * Locking: struct hvc_iucv_private->lock, spin_lock_bh | ||
413 | */ | ||
414 | static int hvc_iucv_notifier_add(struct hvc_struct *hp, int id) | ||
415 | { | ||
416 | struct hvc_iucv_private *priv; | ||
417 | |||
418 | priv = hvc_iucv_get_private(id); | ||
419 | if (!priv) | ||
420 | return 0; | ||
421 | |||
422 | spin_lock_bh(&priv->lock); | ||
423 | priv->tty_state = TTY_OPENED; | ||
424 | spin_unlock_bh(&priv->lock); | ||
425 | |||
426 | return 0; | ||
427 | } | ||
428 | |||
429 | /** | ||
430 | * hvc_iucv_cleanup() - Clean up function if the tty portion is finally closed. | ||
431 | * @priv: Pointer to the struct hvc_iucv_private instance. | ||
432 | * | ||
433 | * The functions severs the established IUCV communication path (if any), and | ||
434 | * destroy struct iucv_tty_buffer elements from the in- and outqueue. Finally, | ||
435 | * the functions resets the states to TTY_CLOSED and IUCV_DISCONN. | ||
436 | */ | ||
437 | static void hvc_iucv_cleanup(struct hvc_iucv_private *priv) | ||
438 | { | ||
439 | destroy_tty_buffer_list(&priv->tty_outqueue); | ||
440 | destroy_tty_buffer_list(&priv->tty_inqueue); | ||
441 | |||
442 | priv->tty_state = TTY_CLOSED; | ||
443 | priv->iucv_state = IUCV_DISCONN; | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * hvc_iucv_notifier_hangup() - HVC notifier for tty hangups. | ||
448 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
449 | * @id: Additional data (originally passed to hvc_alloc): the index of an struct | ||
450 | * hvc_iucv_private instance. | ||
451 | * | ||
452 | * This routine notifies the HVC backend that a tty hangup (carrier loss, | ||
453 | * virtual or otherwise) has occured. | ||
454 | * | ||
455 | * The HVC backend for z/VM IUCV ignores virtual hangups (vhangup()), to keep | ||
456 | * an existing IUCV communication path established. | ||
457 | * (Background: vhangup() is called from user space (by getty or login) to | ||
458 | * disable writing to the tty by other applications). | ||
459 | * | ||
460 | * If the tty has been opened (e.g. getty) and an established IUCV path has been | ||
461 | * severed (we caused the tty hangup in that case), then the functions invokes | ||
462 | * hvc_iucv_cleanup() to clean up. | ||
463 | * | ||
464 | * Locking: struct hvc_iucv_private->lock | ||
465 | */ | ||
466 | static void hvc_iucv_notifier_hangup(struct hvc_struct *hp, int id) | ||
467 | { | ||
468 | struct hvc_iucv_private *priv; | ||
469 | |||
470 | priv = hvc_iucv_get_private(id); | ||
471 | if (!priv) | ||
472 | return; | ||
473 | |||
474 | spin_lock_bh(&priv->lock); | ||
475 | /* NOTE: If the hangup was scheduled by ourself (from the iucv | ||
476 | * path_servered callback [IUCV_SEVERED]), then we have to | ||
477 | * finally clean up the tty backend structure and set state to | ||
478 | * TTY_CLOSED. | ||
479 | * | ||
480 | * If the tty was hung up otherwise (e.g. vhangup()), then we | ||
481 | * ignore this hangup and keep an established IUCV path open... | ||
482 | * (...the reason is that we are not able to connect back to the | ||
483 | * client if we disconnect on hang up) */ | ||
484 | priv->tty_state = TTY_CLOSED; | ||
485 | |||
486 | if (priv->iucv_state == IUCV_SEVERED) | ||
487 | hvc_iucv_cleanup(priv); | ||
488 | spin_unlock_bh(&priv->lock); | ||
489 | } | ||
490 | |||
491 | /** | ||
492 | * hvc_iucv_notifier_del() - HVC notifier for closing a TTY for the last time. | ||
493 | * @hp: Pointer to the HVC device (struct hvc_struct) | ||
494 | * @id: Additional data (originally passed to hvc_alloc): | ||
495 | * the index of an struct hvc_iucv_private instance. | ||
496 | * | ||
497 | * This routine notifies the HVC backend that the last tty device file | ||
498 | * descriptor has been closed. | ||
499 | * The function calls hvc_iucv_cleanup() to clean up the struct hvc_iucv_private | ||
500 | * instance. | ||
501 | * | ||
502 | * Locking: struct hvc_iucv_private->lock | ||
503 | */ | ||
504 | static void hvc_iucv_notifier_del(struct hvc_struct *hp, int id) | ||
505 | { | ||
506 | struct hvc_iucv_private *priv; | ||
507 | struct iucv_path *path; | ||
508 | |||
509 | priv = hvc_iucv_get_private(id); | ||
510 | if (!priv) | ||
511 | return; | ||
512 | |||
513 | spin_lock_bh(&priv->lock); | ||
514 | path = priv->path; /* save reference to IUCV path */ | ||
515 | priv->path = NULL; | ||
516 | hvc_iucv_cleanup(priv); | ||
517 | spin_unlock_bh(&priv->lock); | ||
518 | |||
519 | /* sever IUCV path outside of priv->lock due to lock ordering of: | ||
520 | * priv->lock <--> iucv_table_lock */ | ||
521 | if (path) { | ||
522 | iucv_path_sever(path, NULL); | ||
523 | iucv_path_free(path); | ||
524 | } | ||
525 | } | ||
526 | |||
527 | /** | ||
528 | * hvc_iucv_path_pending() - IUCV handler to process a connection request. | ||
529 | * @path: Pending path (struct iucv_path) | ||
530 | * @ipvmid: Originator z/VM system identifier | ||
531 | * @ipuser: User specified data for this path | ||
532 | * (AF_IUCV: port/service name and originator port) | ||
533 | * | ||
534 | * The function uses the @ipuser data to check to determine if the pending | ||
535 | * path belongs to a terminal managed by this HVC backend. | ||
536 | * If the check is successful, then an additional check is done to ensure | ||
537 | * that a terminal cannot be accessed multiple times (only one connection | ||
538 | * to a terminal is allowed). In that particular case, the pending path is | ||
539 | * severed. If it is the first connection, the pending path is accepted and | ||
540 | * associated to the struct hvc_iucv_private. The iucv state is updated to | ||
541 | * reflect that a communication path has been established. | ||
542 | * | ||
543 | * Returns 0 if the path belongs to a terminal managed by the this HVC backend; | ||
544 | * otherwise returns -ENODEV in order to dispatch this path to other handlers. | ||
545 | * | ||
546 | * Locking: struct hvc_iucv_private->lock | ||
547 | */ | ||
548 | static int hvc_iucv_path_pending(struct iucv_path *path, | ||
549 | u8 ipvmid[8], u8 ipuser[16]) | ||
550 | { | ||
551 | struct hvc_iucv_private *priv; | ||
552 | u8 nuser_data[16]; | ||
553 | int i, rc; | ||
554 | |||
555 | priv = NULL; | ||
556 | for (i = 0; i < hvc_iucv_devices; i++) | ||
557 | if (hvc_iucv_table[i] && | ||
558 | (0 == memcmp(hvc_iucv_table[i]->srv_name, ipuser, 8))) { | ||
559 | priv = hvc_iucv_table[i]; | ||
560 | break; | ||
561 | } | ||
562 | |||
563 | if (!priv) | ||
564 | return -ENODEV; | ||
565 | |||
566 | spin_lock(&priv->lock); | ||
567 | |||
568 | /* If the terminal is already connected or being severed, then sever | ||
569 | * this path to enforce that there is only ONE established communication | ||
570 | * path per terminal. */ | ||
571 | if (priv->iucv_state != IUCV_DISCONN) { | ||
572 | iucv_path_sever(path, ipuser); | ||
573 | iucv_path_free(path); | ||
574 | goto out_path_handled; | ||
575 | } | ||
576 | |||
577 | /* accept path */ | ||
578 | memcpy(nuser_data, ipuser + 8, 8); /* remote service (for af_iucv) */ | ||
579 | memcpy(nuser_data + 8, ipuser, 8); /* local service (for af_iucv) */ | ||
580 | path->msglim = 0xffff; /* IUCV MSGLIMIT */ | ||
581 | path->flags &= ~IUCV_IPRMDATA; /* TODO: use IUCV_IPRMDATA */ | ||
582 | rc = iucv_path_accept(path, &hvc_iucv_handler, nuser_data, priv); | ||
583 | if (rc) { | ||
584 | iucv_path_sever(path, ipuser); | ||
585 | iucv_path_free(path); | ||
586 | goto out_path_handled; | ||
587 | } | ||
588 | priv->path = path; | ||
589 | priv->iucv_state = IUCV_CONNECTED; | ||
590 | |||
591 | out_path_handled: | ||
592 | spin_unlock(&priv->lock); | ||
593 | return 0; | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * hvc_iucv_path_severed() - IUCV handler to process a path sever. | ||
598 | * @path: Pending path (struct iucv_path) | ||
599 | * @ipuser: User specified data for this path | ||
600 | * (AF_IUCV: port/service name and originator port) | ||
601 | * | ||
602 | * The function also severs the path (as required by the IUCV protocol) and | ||
603 | * sets the iucv state to IUCV_SEVERED for the associated struct | ||
604 | * hvc_iucv_private instance. Later, the IUCV_SEVERED state triggers a tty | ||
605 | * hangup (hvc_iucv_get_chars() / hvc_iucv_write()). | ||
606 | * | ||
607 | * If tty portion of the HVC is closed then clean up the outqueue in addition. | ||
608 | * | ||
609 | * Locking: struct hvc_iucv_private->lock | ||
610 | */ | ||
611 | static void hvc_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | ||
612 | { | ||
613 | struct hvc_iucv_private *priv = path->private; | ||
614 | |||
615 | spin_lock(&priv->lock); | ||
616 | priv->iucv_state = IUCV_SEVERED; | ||
617 | |||
618 | /* NOTE: If the tty has not yet been opened by a getty program | ||
619 | * (e.g. to see console messages), then cleanup the | ||
620 | * hvc_iucv_private structure to allow re-connects. | ||
621 | * | ||
622 | * If the tty has been opened, the get_chars() callback returns | ||
623 | * -EPIPE to signal the hvc console layer to hang up the tty. */ | ||
624 | priv->path = NULL; | ||
625 | if (priv->tty_state == TTY_CLOSED) | ||
626 | hvc_iucv_cleanup(priv); | ||
627 | spin_unlock(&priv->lock); | ||
628 | |||
629 | /* finally sever path (outside of priv->lock due to lock ordering) */ | ||
630 | iucv_path_sever(path, ipuser); | ||
631 | iucv_path_free(path); | ||
632 | } | ||
633 | |||
634 | /** | ||
635 | * hvc_iucv_msg_pending() - IUCV handler to process an incoming IUCV message. | ||
636 | * @path: Pending path (struct iucv_path) | ||
637 | * @msg: Pointer to the IUCV message | ||
638 | * | ||
639 | * The function stores an incoming message on the input queue for later | ||
640 | * processing (by hvc_iucv_get_chars() / hvc_iucv_write()). | ||
641 | * However, if the tty has not yet been opened, the message is rejected. | ||
642 | * | ||
643 | * Locking: struct hvc_iucv_private->lock | ||
644 | */ | ||
645 | static void hvc_iucv_msg_pending(struct iucv_path *path, | ||
646 | struct iucv_message *msg) | ||
647 | { | ||
648 | struct hvc_iucv_private *priv = path->private; | ||
649 | struct iucv_tty_buffer *rb; | ||
650 | |||
651 | spin_lock(&priv->lock); | ||
652 | |||
653 | /* reject messages if tty has not yet been opened */ | ||
654 | if (priv->tty_state == TTY_CLOSED) { | ||
655 | iucv_message_reject(path, msg); | ||
656 | goto unlock_return; | ||
657 | } | ||
658 | |||
659 | /* allocate buffer an empty buffer element */ | ||
660 | rb = alloc_tty_buffer(0, GFP_ATOMIC); | ||
661 | if (!rb) { | ||
662 | iucv_message_reject(path, msg); | ||
663 | goto unlock_return; /* -ENOMEM */ | ||
664 | } | ||
665 | rb->msg = *msg; | ||
666 | |||
667 | list_add_tail(&rb->list, &priv->tty_inqueue); | ||
668 | |||
669 | hvc_kick(); /* wakup hvc console thread */ | ||
670 | |||
671 | unlock_return: | ||
672 | spin_unlock(&priv->lock); | ||
673 | } | ||
674 | |||
675 | /** | ||
676 | * hvc_iucv_msg_complete() - IUCV handler to process message completion | ||
677 | * @path: Pending path (struct iucv_path) | ||
678 | * @msg: Pointer to the IUCV message | ||
679 | * | ||
680 | * The function is called upon completion of message delivery and the | ||
681 | * message is removed from the outqueue. Additional delivery information | ||
682 | * can be found in msg->audit: rejected messages (0x040000 (IPADRJCT)) and | ||
683 | * purged messages (0x010000 (IPADPGNR)). | ||
684 | * | ||
685 | * Locking: struct hvc_iucv_private->lock | ||
686 | */ | ||
687 | static void hvc_iucv_msg_complete(struct iucv_path *path, | ||
688 | struct iucv_message *msg) | ||
689 | { | ||
690 | struct hvc_iucv_private *priv = path->private; | ||
691 | struct iucv_tty_buffer *ent, *next; | ||
692 | LIST_HEAD(list_remove); | ||
693 | |||
694 | spin_lock(&priv->lock); | ||
695 | list_for_each_entry_safe(ent, next, &priv->tty_outqueue, list) | ||
696 | if (ent->msg.id == msg->id) { | ||
697 | list_move(&ent->list, &list_remove); | ||
698 | break; | ||
699 | } | ||
700 | spin_unlock(&priv->lock); | ||
701 | destroy_tty_buffer_list(&list_remove); | ||
702 | } | ||
703 | |||
704 | |||
705 | /* HVC operations */ | ||
706 | static struct hv_ops hvc_iucv_ops = { | ||
707 | .get_chars = hvc_iucv_get_chars, | ||
708 | .put_chars = hvc_iucv_put_chars, | ||
709 | .notifier_add = hvc_iucv_notifier_add, | ||
710 | .notifier_del = hvc_iucv_notifier_del, | ||
711 | .notifier_hangup = hvc_iucv_notifier_hangup, | ||
712 | }; | ||
713 | |||
714 | /** | ||
715 | * hvc_iucv_alloc() - Allocates a new struct hvc_iucv_private instance | ||
716 | * @id: hvc_iucv_table index | ||
717 | * | ||
718 | * This function allocates a new hvc_iucv_private struct and put the | ||
719 | * instance into hvc_iucv_table at index @id. | ||
720 | * Returns 0 on success; otherwise non-zero. | ||
721 | */ | ||
722 | static int __init hvc_iucv_alloc(int id) | ||
723 | { | ||
724 | struct hvc_iucv_private *priv; | ||
725 | char name[9]; | ||
726 | int rc; | ||
727 | |||
728 | priv = kzalloc(sizeof(struct hvc_iucv_private), GFP_KERNEL); | ||
729 | if (!priv) | ||
730 | return -ENOMEM; | ||
731 | |||
732 | spin_lock_init(&priv->lock); | ||
733 | INIT_LIST_HEAD(&priv->tty_outqueue); | ||
734 | INIT_LIST_HEAD(&priv->tty_inqueue); | ||
735 | |||
736 | /* Finally allocate hvc */ | ||
737 | priv->hvc = hvc_alloc(HVC_IUCV_MAGIC + id, | ||
738 | HVC_IUCV_MAGIC + id, &hvc_iucv_ops, PAGE_SIZE); | ||
739 | if (IS_ERR(priv->hvc)) { | ||
740 | rc = PTR_ERR(priv->hvc); | ||
741 | kfree(priv); | ||
742 | return rc; | ||
743 | } | ||
744 | |||
745 | /* setup iucv related information */ | ||
746 | snprintf(name, 9, "ihvc%-4d", id); | ||
747 | memcpy(priv->srv_name, name, 8); | ||
748 | ASCEBC(priv->srv_name, 8); | ||
749 | |||
750 | hvc_iucv_table[id] = priv; | ||
751 | return 0; | ||
752 | } | ||
753 | |||
754 | /** | ||
755 | * hvc_iucv_init() - Initialization of HVC backend for z/VM IUCV | ||
756 | */ | ||
757 | static int __init hvc_iucv_init(void) | ||
758 | { | ||
759 | int rc, i; | ||
760 | |||
761 | if (!MACHINE_IS_VM) { | ||
762 | pr_warning("The z/VM IUCV Hypervisor console cannot be " | ||
763 | "used without z/VM.\n"); | ||
764 | return -ENODEV; | ||
765 | } | ||
766 | |||
767 | if (!hvc_iucv_devices) | ||
768 | return -ENODEV; | ||
769 | |||
770 | if (hvc_iucv_devices > MAX_HVC_IUCV_LINES) | ||
771 | return -EINVAL; | ||
772 | |||
773 | hvc_iucv_buffer_cache = kmem_cache_create(KMSG_COMPONENT, | ||
774 | sizeof(struct iucv_tty_buffer), | ||
775 | 0, 0, NULL); | ||
776 | if (!hvc_iucv_buffer_cache) { | ||
777 | pr_err("Not enough memory for driver initialization " | ||
778 | "(rs=%d).\n", 1); | ||
779 | return -ENOMEM; | ||
780 | } | ||
781 | |||
782 | hvc_iucv_mempool = mempool_create_slab_pool(MEMPOOL_MIN_NR, | ||
783 | hvc_iucv_buffer_cache); | ||
784 | if (!hvc_iucv_mempool) { | ||
785 | pr_err("Not enough memory for driver initialization " | ||
786 | "(rs=%d).\n", 2); | ||
787 | kmem_cache_destroy(hvc_iucv_buffer_cache); | ||
788 | return -ENOMEM; | ||
789 | } | ||
790 | |||
791 | /* allocate hvc_iucv_private structs */ | ||
792 | for (i = 0; i < hvc_iucv_devices; i++) { | ||
793 | rc = hvc_iucv_alloc(i); | ||
794 | if (rc) { | ||
795 | pr_err("Could not create new z/VM IUCV HVC backend " | ||
796 | "rc=%d.\n", rc); | ||
797 | goto out_error_hvc; | ||
798 | } | ||
799 | } | ||
800 | |||
801 | /* register IUCV callback handler */ | ||
802 | rc = iucv_register(&hvc_iucv_handler, 0); | ||
803 | if (rc) { | ||
804 | pr_err("Could not register iucv handler (rc=%d).\n", rc); | ||
805 | goto out_error_iucv; | ||
806 | } | ||
807 | |||
808 | return 0; | ||
809 | |||
810 | out_error_iucv: | ||
811 | iucv_unregister(&hvc_iucv_handler, 0); | ||
812 | out_error_hvc: | ||
813 | for (i = 0; i < hvc_iucv_devices; i++) | ||
814 | if (hvc_iucv_table[i]) { | ||
815 | if (hvc_iucv_table[i]->hvc) | ||
816 | hvc_remove(hvc_iucv_table[i]->hvc); | ||
817 | kfree(hvc_iucv_table[i]); | ||
818 | } | ||
819 | mempool_destroy(hvc_iucv_mempool); | ||
820 | kmem_cache_destroy(hvc_iucv_buffer_cache); | ||
821 | return rc; | ||
822 | } | ||
823 | |||
824 | /** | ||
825 | * hvc_iucv_console_init() - Early console initialization | ||
826 | */ | ||
827 | static int __init hvc_iucv_console_init(void) | ||
828 | { | ||
829 | if (!MACHINE_IS_VM || !hvc_iucv_devices) | ||
830 | return -ENODEV; | ||
831 | return hvc_instantiate(HVC_IUCV_MAGIC, 0, &hvc_iucv_ops); | ||
832 | } | ||
833 | |||
834 | /** | ||
835 | * hvc_iucv_config() - Parsing of hvc_iucv= kernel command line parameter | ||
836 | * @val: Parameter value (numeric) | ||
837 | */ | ||
838 | static int __init hvc_iucv_config(char *val) | ||
839 | { | ||
840 | return strict_strtoul(val, 10, &hvc_iucv_devices); | ||
841 | } | ||
842 | |||
843 | |||
844 | module_init(hvc_iucv_init); | ||
845 | console_initcall(hvc_iucv_console_init); | ||
846 | __setup("hvc_iucv=", hvc_iucv_config); | ||
847 | |||
848 | MODULE_LICENSE("GPL"); | ||
849 | MODULE_DESCRIPTION("HVC back-end for z/VM IUCV."); | ||
850 | MODULE_AUTHOR("Hendrik Brueckner <brueckner@linux.vnet.ibm.com>"); | ||
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 363bd1303d21..570ae59c1d5e 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -1898,15 +1898,19 @@ restart_cb: | |||
1898 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); | 1898 | wait_event(dasd_flush_wq, (cqr->status < DASD_CQR_QUEUED)); |
1899 | /* Process finished ERP request. */ | 1899 | /* Process finished ERP request. */ |
1900 | if (cqr->refers) { | 1900 | if (cqr->refers) { |
1901 | spin_lock_bh(&block->queue_lock); | ||
1901 | __dasd_block_process_erp(block, cqr); | 1902 | __dasd_block_process_erp(block, cqr); |
1903 | spin_unlock_bh(&block->queue_lock); | ||
1902 | /* restart list_for_xx loop since dasd_process_erp | 1904 | /* restart list_for_xx loop since dasd_process_erp |
1903 | * might remove multiple elements */ | 1905 | * might remove multiple elements */ |
1904 | goto restart_cb; | 1906 | goto restart_cb; |
1905 | } | 1907 | } |
1906 | /* call the callback function */ | 1908 | /* call the callback function */ |
1909 | spin_lock_irq(&block->request_queue_lock); | ||
1907 | cqr->endclk = get_clock(); | 1910 | cqr->endclk = get_clock(); |
1908 | list_del_init(&cqr->blocklist); | 1911 | list_del_init(&cqr->blocklist); |
1909 | __dasd_cleanup_cqr(cqr); | 1912 | __dasd_cleanup_cqr(cqr); |
1913 | spin_unlock_irq(&block->request_queue_lock); | ||
1910 | } | 1914 | } |
1911 | return rc; | 1915 | return rc; |
1912 | } | 1916 | } |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index 921443b01d16..2ef25731d197 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -23,6 +23,7 @@ | |||
23 | 23 | ||
24 | /* This is ugly... */ | 24 | /* This is ugly... */ |
25 | #define PRINTK_HEADER "dasd_devmap:" | 25 | #define PRINTK_HEADER "dasd_devmap:" |
26 | #define DASD_BUS_ID_SIZE 20 | ||
26 | 27 | ||
27 | #include "dasd_int.h" | 28 | #include "dasd_int.h" |
28 | 29 | ||
@@ -41,7 +42,7 @@ EXPORT_SYMBOL_GPL(dasd_page_cache); | |||
41 | */ | 42 | */ |
42 | struct dasd_devmap { | 43 | struct dasd_devmap { |
43 | struct list_head list; | 44 | struct list_head list; |
44 | char bus_id[BUS_ID_SIZE]; | 45 | char bus_id[DASD_BUS_ID_SIZE]; |
45 | unsigned int devindex; | 46 | unsigned int devindex; |
46 | unsigned short features; | 47 | unsigned short features; |
47 | struct dasd_device *device; | 48 | struct dasd_device *device; |
@@ -94,7 +95,7 @@ dasd_hash_busid(const char *bus_id) | |||
94 | int hash, i; | 95 | int hash, i; |
95 | 96 | ||
96 | hash = 0; | 97 | hash = 0; |
97 | for (i = 0; (i < BUS_ID_SIZE) && *bus_id; i++, bus_id++) | 98 | for (i = 0; (i < DASD_BUS_ID_SIZE) && *bus_id; i++, bus_id++) |
98 | hash += *bus_id; | 99 | hash += *bus_id; |
99 | return hash & 0xff; | 100 | return hash & 0xff; |
100 | } | 101 | } |
@@ -301,7 +302,7 @@ dasd_parse_range( char *parsestring ) { | |||
301 | int from, from_id0, from_id1; | 302 | int from, from_id0, from_id1; |
302 | int to, to_id0, to_id1; | 303 | int to, to_id0, to_id1; |
303 | int features, rc; | 304 | int features, rc; |
304 | char bus_id[BUS_ID_SIZE+1], *str; | 305 | char bus_id[DASD_BUS_ID_SIZE+1], *str; |
305 | 306 | ||
306 | str = parsestring; | 307 | str = parsestring; |
307 | rc = dasd_busid(&str, &from_id0, &from_id1, &from); | 308 | rc = dasd_busid(&str, &from_id0, &from_id1, &from); |
@@ -407,14 +408,14 @@ dasd_add_busid(const char *bus_id, int features) | |||
407 | devmap = NULL; | 408 | devmap = NULL; |
408 | hash = dasd_hash_busid(bus_id); | 409 | hash = dasd_hash_busid(bus_id); |
409 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) | 410 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) |
410 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { | 411 | if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { |
411 | devmap = tmp; | 412 | devmap = tmp; |
412 | break; | 413 | break; |
413 | } | 414 | } |
414 | if (!devmap) { | 415 | if (!devmap) { |
415 | /* This bus_id is new. */ | 416 | /* This bus_id is new. */ |
416 | new->devindex = dasd_max_devindex++; | 417 | new->devindex = dasd_max_devindex++; |
417 | strncpy(new->bus_id, bus_id, BUS_ID_SIZE); | 418 | strncpy(new->bus_id, bus_id, DASD_BUS_ID_SIZE); |
418 | new->features = features; | 419 | new->features = features; |
419 | new->device = NULL; | 420 | new->device = NULL; |
420 | list_add(&new->list, &dasd_hashlists[hash]); | 421 | list_add(&new->list, &dasd_hashlists[hash]); |
@@ -439,7 +440,7 @@ dasd_find_busid(const char *bus_id) | |||
439 | devmap = ERR_PTR(-ENODEV); | 440 | devmap = ERR_PTR(-ENODEV); |
440 | hash = dasd_hash_busid(bus_id); | 441 | hash = dasd_hash_busid(bus_id); |
441 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) { | 442 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) { |
442 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { | 443 | if (strncmp(tmp->bus_id, bus_id, DASD_BUS_ID_SIZE) == 0) { |
443 | devmap = tmp; | 444 | devmap = tmp; |
444 | break; | 445 | break; |
445 | } | 446 | } |
@@ -561,7 +562,7 @@ dasd_create_device(struct ccw_device *cdev) | |||
561 | } | 562 | } |
562 | 563 | ||
563 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 564 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); |
564 | cdev->dev.driver_data = device; | 565 | dev_set_drvdata(&cdev->dev, device); |
565 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | 566 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); |
566 | 567 | ||
567 | return device; | 568 | return device; |
@@ -597,7 +598,7 @@ dasd_delete_device(struct dasd_device *device) | |||
597 | 598 | ||
598 | /* Disconnect dasd_device structure from ccw_device structure. */ | 599 | /* Disconnect dasd_device structure from ccw_device structure. */ |
599 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); | 600 | spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags); |
600 | device->cdev->dev.driver_data = NULL; | 601 | dev_set_drvdata(&device->cdev->dev, NULL); |
601 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); | 602 | spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags); |
602 | 603 | ||
603 | /* | 604 | /* |
@@ -638,7 +639,7 @@ dasd_put_device_wake(struct dasd_device *device) | |||
638 | struct dasd_device * | 639 | struct dasd_device * |
639 | dasd_device_from_cdev_locked(struct ccw_device *cdev) | 640 | dasd_device_from_cdev_locked(struct ccw_device *cdev) |
640 | { | 641 | { |
641 | struct dasd_device *device = cdev->dev.driver_data; | 642 | struct dasd_device *device = dev_get_drvdata(&cdev->dev); |
642 | 643 | ||
643 | if (!device) | 644 | if (!device) |
644 | return ERR_PTR(-ENODEV); | 645 | return ERR_PTR(-ENODEV); |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2e60d5f968c8..bd2c52e20762 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -1496,7 +1496,7 @@ static void dasd_eckd_handle_unsolicited_interrupt(struct dasd_device *device, | |||
1496 | 1496 | ||
1497 | 1497 | ||
1498 | /* service information message SIM */ | 1498 | /* service information message SIM */ |
1499 | if (irb->esw.esw0.erw.cons && (irb->ecw[27] & DASD_SENSE_BIT_0) && | 1499 | if (irb->esw.esw0.erw.cons && !(irb->ecw[27] & DASD_SENSE_BIT_0) && |
1500 | ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { | 1500 | ((irb->ecw[6] & DASD_SIM_SENSE) == DASD_SIM_SENSE)) { |
1501 | dasd_3990_erp_handle_sim(device, irb->ecw); | 1501 | dasd_3990_erp_handle_sim(device, irb->ecw); |
1502 | dasd_schedule_device_bh(device); | 1502 | dasd_schedule_device_bh(device); |
diff --git a/drivers/s390/block/dasd_proc.c b/drivers/s390/block/dasd_proc.c index 9088de84b45d..bf6fd348f20e 100644 --- a/drivers/s390/block/dasd_proc.c +++ b/drivers/s390/block/dasd_proc.c | |||
@@ -180,12 +180,12 @@ dasd_calc_metrics(char *page, char **start, off_t off, | |||
180 | 180 | ||
181 | #ifdef CONFIG_DASD_PROFILE | 181 | #ifdef CONFIG_DASD_PROFILE |
182 | static char * | 182 | static char * |
183 | dasd_statistics_array(char *str, unsigned int *array, int shift) | 183 | dasd_statistics_array(char *str, unsigned int *array, int factor) |
184 | { | 184 | { |
185 | int i; | 185 | int i; |
186 | 186 | ||
187 | for (i = 0; i < 32; i++) { | 187 | for (i = 0; i < 32; i++) { |
188 | str += sprintf(str, "%7d ", array[i] >> shift); | 188 | str += sprintf(str, "%7d ", array[i] / factor); |
189 | if (i == 15) | 189 | if (i == 15) |
190 | str += sprintf(str, "\n"); | 190 | str += sprintf(str, "\n"); |
191 | } | 191 | } |
@@ -202,7 +202,7 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
202 | #ifdef CONFIG_DASD_PROFILE | 202 | #ifdef CONFIG_DASD_PROFILE |
203 | struct dasd_profile_info_t *prof; | 203 | struct dasd_profile_info_t *prof; |
204 | char *str; | 204 | char *str; |
205 | int shift; | 205 | int factor; |
206 | 206 | ||
207 | /* check for active profiling */ | 207 | /* check for active profiling */ |
208 | if (dasd_profile_level == DASD_PROFILE_OFF) { | 208 | if (dasd_profile_level == DASD_PROFILE_OFF) { |
@@ -214,12 +214,14 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
214 | 214 | ||
215 | prof = &dasd_global_profile; | 215 | prof = &dasd_global_profile; |
216 | /* prevent couter 'overflow' on output */ | 216 | /* prevent couter 'overflow' on output */ |
217 | for (shift = 0; (prof->dasd_io_reqs >> shift) > 9999999; shift++); | 217 | for (factor = 1; (prof->dasd_io_reqs / factor) > 9999999; |
218 | factor *= 10); | ||
218 | 219 | ||
219 | str = page; | 220 | str = page; |
220 | str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); | 221 | str += sprintf(str, "%d dasd I/O requests\n", prof->dasd_io_reqs); |
221 | str += sprintf(str, "with %d sectors(512B each)\n", | 222 | str += sprintf(str, "with %u sectors(512B each)\n", |
222 | prof->dasd_io_sects); | 223 | prof->dasd_io_sects); |
224 | str += sprintf(str, "Scale Factor is %d\n", factor); | ||
223 | str += sprintf(str, | 225 | str += sprintf(str, |
224 | " __<4 ___8 __16 __32 __64 _128 " | 226 | " __<4 ___8 __16 __32 __64 _128 " |
225 | " _256 _512 __1k __2k __4k __8k " | 227 | " _256 _512 __1k __2k __4k __8k " |
@@ -230,22 +232,22 @@ dasd_statistics_read(char *page, char **start, off_t off, | |||
230 | " __1G __2G __4G " " _>4G\n"); | 232 | " __1G __2G __4G " " _>4G\n"); |
231 | 233 | ||
232 | str += sprintf(str, "Histogram of sizes (512B secs)\n"); | 234 | str += sprintf(str, "Histogram of sizes (512B secs)\n"); |
233 | str = dasd_statistics_array(str, prof->dasd_io_secs, shift); | 235 | str = dasd_statistics_array(str, prof->dasd_io_secs, factor); |
234 | str += sprintf(str, "Histogram of I/O times (microseconds)\n"); | 236 | str += sprintf(str, "Histogram of I/O times (microseconds)\n"); |
235 | str = dasd_statistics_array(str, prof->dasd_io_times, shift); | 237 | str = dasd_statistics_array(str, prof->dasd_io_times, factor); |
236 | str += sprintf(str, "Histogram of I/O times per sector\n"); | 238 | str += sprintf(str, "Histogram of I/O times per sector\n"); |
237 | str = dasd_statistics_array(str, prof->dasd_io_timps, shift); | 239 | str = dasd_statistics_array(str, prof->dasd_io_timps, factor); |
238 | str += sprintf(str, "Histogram of I/O time till ssch\n"); | 240 | str += sprintf(str, "Histogram of I/O time till ssch\n"); |
239 | str = dasd_statistics_array(str, prof->dasd_io_time1, shift); | 241 | str = dasd_statistics_array(str, prof->dasd_io_time1, factor); |
240 | str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); | 242 | str += sprintf(str, "Histogram of I/O time between ssch and irq\n"); |
241 | str = dasd_statistics_array(str, prof->dasd_io_time2, shift); | 243 | str = dasd_statistics_array(str, prof->dasd_io_time2, factor); |
242 | str += sprintf(str, "Histogram of I/O time between ssch " | 244 | str += sprintf(str, "Histogram of I/O time between ssch " |
243 | "and irq per sector\n"); | 245 | "and irq per sector\n"); |
244 | str = dasd_statistics_array(str, prof->dasd_io_time2ps, shift); | 246 | str = dasd_statistics_array(str, prof->dasd_io_time2ps, factor); |
245 | str += sprintf(str, "Histogram of I/O time between irq and end\n"); | 247 | str += sprintf(str, "Histogram of I/O time between irq and end\n"); |
246 | str = dasd_statistics_array(str, prof->dasd_io_time3, shift); | 248 | str = dasd_statistics_array(str, prof->dasd_io_time3, factor); |
247 | str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); | 249 | str += sprintf(str, "# of req in chanq at enqueuing (1..32) \n"); |
248 | str = dasd_statistics_array(str, prof->dasd_io_nr_req, shift); | 250 | str = dasd_statistics_array(str, prof->dasd_io_nr_req, factor); |
249 | len = str - page; | 251 | len = str - page; |
250 | #else | 252 | #else |
251 | len = sprintf(page, "Statistics are not activated in this kernel\n"); | 253 | len = sprintf(page, "Statistics are not activated in this kernel\n"); |
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c index 63f26a135fe5..26ffc6ab441d 100644 --- a/drivers/s390/block/dcssblk.c +++ b/drivers/s390/block/dcssblk.c | |||
@@ -4,6 +4,9 @@ | |||
4 | * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer | 4 | * Authors: Carsten Otte, Stefan Weinhuber, Gerald Schaefer |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #define KMSG_COMPONENT "dcssblk" | ||
8 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
9 | |||
7 | #include <linux/module.h> | 10 | #include <linux/module.h> |
8 | #include <linux/moduleparam.h> | 11 | #include <linux/moduleparam.h> |
9 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
@@ -17,19 +20,10 @@ | |||
17 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
18 | #include <asm/s390_rdev.h> | 21 | #include <asm/s390_rdev.h> |
19 | 22 | ||
20 | //#define DCSSBLK_DEBUG /* Debug messages on/off */ | ||
21 | #define DCSSBLK_NAME "dcssblk" | 23 | #define DCSSBLK_NAME "dcssblk" |
22 | #define DCSSBLK_MINORS_PER_DISK 1 | 24 | #define DCSSBLK_MINORS_PER_DISK 1 |
23 | #define DCSSBLK_PARM_LEN 400 | 25 | #define DCSSBLK_PARM_LEN 400 |
24 | 26 | #define DCSS_BUS_ID_SIZE 20 | |
25 | #ifdef DCSSBLK_DEBUG | ||
26 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG DCSSBLK_NAME " debug: " x) | ||
27 | #else | ||
28 | #define PRINT_DEBUG(x...) do {} while (0) | ||
29 | #endif | ||
30 | #define PRINT_INFO(x...) printk(KERN_INFO DCSSBLK_NAME " info: " x) | ||
31 | #define PRINT_WARN(x...) printk(KERN_WARNING DCSSBLK_NAME " warning: " x) | ||
32 | #define PRINT_ERR(x...) printk(KERN_ERR DCSSBLK_NAME " error: " x) | ||
33 | 27 | ||
34 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); | 28 | static int dcssblk_open(struct block_device *bdev, fmode_t mode); |
35 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); | 29 | static int dcssblk_release(struct gendisk *disk, fmode_t mode); |
@@ -50,7 +44,7 @@ static struct block_device_operations dcssblk_devops = { | |||
50 | struct dcssblk_dev_info { | 44 | struct dcssblk_dev_info { |
51 | struct list_head lh; | 45 | struct list_head lh; |
52 | struct device dev; | 46 | struct device dev; |
53 | char segment_name[BUS_ID_SIZE]; | 47 | char segment_name[DCSS_BUS_ID_SIZE]; |
54 | atomic_t use_count; | 48 | atomic_t use_count; |
55 | struct gendisk *gd; | 49 | struct gendisk *gd; |
56 | unsigned long start; | 50 | unsigned long start; |
@@ -65,7 +59,7 @@ struct dcssblk_dev_info { | |||
65 | 59 | ||
66 | struct segment_info { | 60 | struct segment_info { |
67 | struct list_head lh; | 61 | struct list_head lh; |
68 | char segment_name[BUS_ID_SIZE]; | 62 | char segment_name[DCSS_BUS_ID_SIZE]; |
69 | unsigned long start; | 63 | unsigned long start; |
70 | unsigned long end; | 64 | unsigned long end; |
71 | int segment_type; | 65 | int segment_type; |
@@ -261,10 +255,9 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) | |||
261 | /* check continuity */ | 255 | /* check continuity */ |
262 | for (i = 0; i < dev_info->num_of_segments - 1; i++) { | 256 | for (i = 0; i < dev_info->num_of_segments - 1; i++) { |
263 | if ((sort_list[i].end + 1) != sort_list[i+1].start) { | 257 | if ((sort_list[i].end + 1) != sort_list[i+1].start) { |
264 | PRINT_ERR("Segment %s is not contiguous with " | 258 | pr_err("Adjacent DCSSs %s and %s are not " |
265 | "segment %s\n", | 259 | "contiguous\n", sort_list[i].segment_name, |
266 | sort_list[i].segment_name, | 260 | sort_list[i+1].segment_name); |
267 | sort_list[i+1].segment_name); | ||
268 | rc = -EINVAL; | 261 | rc = -EINVAL; |
269 | goto out; | 262 | goto out; |
270 | } | 263 | } |
@@ -275,10 +268,10 @@ dcssblk_is_continuous(struct dcssblk_dev_info *dev_info) | |||
275 | !(sort_list[i+1].segment_type & | 268 | !(sort_list[i+1].segment_type & |
276 | SEGMENT_EXCLUSIVE) || | 269 | SEGMENT_EXCLUSIVE) || |
277 | (sort_list[i+1].segment_type == SEG_TYPE_ER)) { | 270 | (sort_list[i+1].segment_type == SEG_TYPE_ER)) { |
278 | PRINT_ERR("Segment %s has different type from " | 271 | pr_err("DCSS %s and DCSS %s have " |
279 | "segment %s\n", | 272 | "incompatible types\n", |
280 | sort_list[i].segment_name, | 273 | sort_list[i].segment_name, |
281 | sort_list[i+1].segment_name); | 274 | sort_list[i+1].segment_name); |
282 | rc = -EINVAL; | 275 | rc = -EINVAL; |
283 | goto out; | 276 | goto out; |
284 | } | 277 | } |
@@ -380,8 +373,9 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
380 | } else if (inbuf[0] == '0') { | 373 | } else if (inbuf[0] == '0') { |
381 | /* reload segments in exclusive mode */ | 374 | /* reload segments in exclusive mode */ |
382 | if (dev_info->segment_type == SEG_TYPE_SC) { | 375 | if (dev_info->segment_type == SEG_TYPE_SC) { |
383 | PRINT_ERR("Segment type SC (%s) cannot be loaded in " | 376 | pr_err("DCSS %s is of type SC and cannot be " |
384 | "non-shared mode\n", dev_info->segment_name); | 377 | "loaded as exclusive-writable\n", |
378 | dev_info->segment_name); | ||
385 | rc = -EINVAL; | 379 | rc = -EINVAL; |
386 | goto out; | 380 | goto out; |
387 | } | 381 | } |
@@ -404,9 +398,8 @@ dcssblk_shared_store(struct device *dev, struct device_attribute *attr, const ch | |||
404 | goto out; | 398 | goto out; |
405 | 399 | ||
406 | removeseg: | 400 | removeseg: |
407 | PRINT_ERR("Could not reload segment(s) of the device %s, removing " | 401 | pr_err("DCSS device %s is removed after a failed access mode " |
408 | "segment(s) now!\n", | 402 | "change\n", dev_info->segment_name); |
409 | dev_info->segment_name); | ||
410 | temp = entry; | 403 | temp = entry; |
411 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 404 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
412 | if (entry != temp) | 405 | if (entry != temp) |
@@ -454,17 +447,17 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
454 | if (inbuf[0] == '1') { | 447 | if (inbuf[0] == '1') { |
455 | if (atomic_read(&dev_info->use_count) == 0) { | 448 | if (atomic_read(&dev_info->use_count) == 0) { |
456 | // device is idle => we save immediately | 449 | // device is idle => we save immediately |
457 | PRINT_INFO("Saving segment(s) of the device %s\n", | 450 | pr_info("All DCSSs that map to device %s are " |
458 | dev_info->segment_name); | 451 | "saved\n", dev_info->segment_name); |
459 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 452 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
460 | segment_save(entry->segment_name); | 453 | segment_save(entry->segment_name); |
461 | } | 454 | } |
462 | } else { | 455 | } else { |
463 | // device is busy => we save it when it becomes | 456 | // device is busy => we save it when it becomes |
464 | // idle in dcssblk_release | 457 | // idle in dcssblk_release |
465 | PRINT_INFO("Device %s is currently busy, segment(s) " | 458 | pr_info("Device %s is in use, its DCSSs will be " |
466 | "will be saved when it becomes idle...\n", | 459 | "saved when it becomes idle\n", |
467 | dev_info->segment_name); | 460 | dev_info->segment_name); |
468 | dev_info->save_pending = 1; | 461 | dev_info->save_pending = 1; |
469 | } | 462 | } |
470 | } else if (inbuf[0] == '0') { | 463 | } else if (inbuf[0] == '0') { |
@@ -472,9 +465,9 @@ dcssblk_save_store(struct device *dev, struct device_attribute *attr, const char | |||
472 | // device is busy & the user wants to undo his save | 465 | // device is busy & the user wants to undo his save |
473 | // request | 466 | // request |
474 | dev_info->save_pending = 0; | 467 | dev_info->save_pending = 0; |
475 | PRINT_INFO("Pending save for segment(s) of the device " | 468 | pr_info("A pending save request for device %s " |
476 | "%s deactivated\n", | 469 | "has been canceled\n", |
477 | dev_info->segment_name); | 470 | dev_info->segment_name); |
478 | } | 471 | } |
479 | } else { | 472 | } else { |
480 | up_write(&dcssblk_devices_sem); | 473 | up_write(&dcssblk_devices_sem); |
@@ -614,9 +607,8 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char | |||
614 | 607 | ||
615 | seg_byte_size = (dev_info->end - dev_info->start + 1); | 608 | seg_byte_size = (dev_info->end - dev_info->start + 1); |
616 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors | 609 | set_capacity(dev_info->gd, seg_byte_size >> 9); // size in sectors |
617 | PRINT_INFO("Loaded segment(s) %s, size = %lu Byte, " | 610 | pr_info("Loaded %s with total size %lu bytes and capacity %lu " |
618 | "capacity = %lu (512 Byte) sectors\n", local_buf, | 611 | "sectors\n", local_buf, seg_byte_size, seg_byte_size >> 9); |
619 | seg_byte_size, seg_byte_size >> 9); | ||
620 | 612 | ||
621 | dev_info->save_pending = 0; | 613 | dev_info->save_pending = 0; |
622 | dev_info->is_shared = 1; | 614 | dev_info->is_shared = 1; |
@@ -744,13 +736,15 @@ dcssblk_remove_store(struct device *dev, struct device_attribute *attr, const ch | |||
744 | dev_info = dcssblk_get_device_by_name(local_buf); | 736 | dev_info = dcssblk_get_device_by_name(local_buf); |
745 | if (dev_info == NULL) { | 737 | if (dev_info == NULL) { |
746 | up_write(&dcssblk_devices_sem); | 738 | up_write(&dcssblk_devices_sem); |
747 | PRINT_WARN("Device %s is not loaded!\n", local_buf); | 739 | pr_warning("Device %s cannot be removed because it is not a " |
740 | "known device\n", local_buf); | ||
748 | rc = -ENODEV; | 741 | rc = -ENODEV; |
749 | goto out_buf; | 742 | goto out_buf; |
750 | } | 743 | } |
751 | if (atomic_read(&dev_info->use_count) != 0) { | 744 | if (atomic_read(&dev_info->use_count) != 0) { |
752 | up_write(&dcssblk_devices_sem); | 745 | up_write(&dcssblk_devices_sem); |
753 | PRINT_WARN("Device %s is in use!\n", local_buf); | 746 | pr_warning("Device %s cannot be removed while it is in " |
747 | "use\n", local_buf); | ||
754 | rc = -EBUSY; | 748 | rc = -EBUSY; |
755 | goto out_buf; | 749 | goto out_buf; |
756 | } | 750 | } |
@@ -807,8 +801,8 @@ dcssblk_release(struct gendisk *disk, fmode_t mode) | |||
807 | down_write(&dcssblk_devices_sem); | 801 | down_write(&dcssblk_devices_sem); |
808 | if (atomic_dec_and_test(&dev_info->use_count) | 802 | if (atomic_dec_and_test(&dev_info->use_count) |
809 | && (dev_info->save_pending)) { | 803 | && (dev_info->save_pending)) { |
810 | PRINT_INFO("Device %s became idle and is being saved now\n", | 804 | pr_info("Device %s has become idle and is being saved " |
811 | dev_info->segment_name); | 805 | "now\n", dev_info->segment_name); |
812 | list_for_each_entry(entry, &dev_info->seg_list, lh) { | 806 | list_for_each_entry(entry, &dev_info->seg_list, lh) { |
813 | segment_save(entry->segment_name); | 807 | segment_save(entry->segment_name); |
814 | } | 808 | } |
@@ -851,7 +845,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio) | |||
851 | case SEG_TYPE_SC: | 845 | case SEG_TYPE_SC: |
852 | /* cannot write to these segments */ | 846 | /* cannot write to these segments */ |
853 | if (bio_data_dir(bio) == WRITE) { | 847 | if (bio_data_dir(bio) == WRITE) { |
854 | PRINT_WARN("rejecting write to ro device %s\n", | 848 | pr_warning("Writing to %s failed because it " |
849 | "is a read-only device\n", | ||
855 | dev_name(&dev_info->dev)); | 850 | dev_name(&dev_info->dev)); |
856 | goto fail; | 851 | goto fail; |
857 | } | 852 | } |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 03916989ed2d..76814f3e898a 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -25,6 +25,9 @@ | |||
25 | * generic hard disk support to replace ad-hoc partitioning | 25 | * generic hard disk support to replace ad-hoc partitioning |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "xpram" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/module.h> | 31 | #include <linux/module.h> |
29 | #include <linux/moduleparam.h> | 32 | #include <linux/moduleparam.h> |
30 | #include <linux/ctype.h> /* isdigit, isxdigit */ | 33 | #include <linux/ctype.h> /* isdigit, isxdigit */ |
@@ -42,12 +45,6 @@ | |||
42 | #define XPRAM_DEVS 1 /* one partition */ | 45 | #define XPRAM_DEVS 1 /* one partition */ |
43 | #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ | 46 | #define XPRAM_MAX_DEVS 32 /* maximal number of devices (partitions) */ |
44 | 47 | ||
45 | #define PRINT_DEBUG(x...) printk(KERN_DEBUG XPRAM_NAME " debug:" x) | ||
46 | #define PRINT_INFO(x...) printk(KERN_INFO XPRAM_NAME " info:" x) | ||
47 | #define PRINT_WARN(x...) printk(KERN_WARNING XPRAM_NAME " warning:" x) | ||
48 | #define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) | ||
49 | |||
50 | |||
51 | typedef struct { | 48 | typedef struct { |
52 | unsigned int size; /* size of xpram segment in pages */ | 49 | unsigned int size; /* size of xpram segment in pages */ |
53 | unsigned int offset; /* start page of xpram segment */ | 50 | unsigned int offset; /* start page of xpram segment */ |
@@ -264,7 +261,7 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
264 | 261 | ||
265 | /* Check number of devices. */ | 262 | /* Check number of devices. */ |
266 | if (devs <= 0 || devs > XPRAM_MAX_DEVS) { | 263 | if (devs <= 0 || devs > XPRAM_MAX_DEVS) { |
267 | PRINT_ERR("invalid number %d of devices\n",devs); | 264 | pr_err("%d is not a valid number of XPRAM devices\n",devs); |
268 | return -EINVAL; | 265 | return -EINVAL; |
269 | } | 266 | } |
270 | xpram_devs = devs; | 267 | xpram_devs = devs; |
@@ -295,22 +292,22 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
295 | mem_auto_no++; | 292 | mem_auto_no++; |
296 | } | 293 | } |
297 | 294 | ||
298 | PRINT_INFO(" number of devices (partitions): %d \n", xpram_devs); | 295 | pr_info(" number of devices (partitions): %d \n", xpram_devs); |
299 | for (i = 0; i < xpram_devs; i++) { | 296 | for (i = 0; i < xpram_devs; i++) { |
300 | if (xpram_sizes[i]) | 297 | if (xpram_sizes[i]) |
301 | PRINT_INFO(" size of partition %d: %u kB\n", | 298 | pr_info(" size of partition %d: %u kB\n", |
302 | i, xpram_sizes[i]); | 299 | i, xpram_sizes[i]); |
303 | else | 300 | else |
304 | PRINT_INFO(" size of partition %d to be set " | 301 | pr_info(" size of partition %d to be set " |
305 | "automatically\n",i); | 302 | "automatically\n",i); |
306 | } | 303 | } |
307 | PRINT_DEBUG(" memory needed (for sized partitions): %lu kB\n", | 304 | pr_info(" memory needed (for sized partitions): %lu kB\n", |
308 | mem_needed); | 305 | mem_needed); |
309 | PRINT_DEBUG(" partitions to be sized automatically: %d\n", | 306 | pr_info(" partitions to be sized automatically: %d\n", |
310 | mem_auto_no); | 307 | mem_auto_no); |
311 | 308 | ||
312 | if (mem_needed > pages * 4) { | 309 | if (mem_needed > pages * 4) { |
313 | PRINT_ERR("Not enough expanded memory available\n"); | 310 | pr_err("Not enough expanded memory available\n"); |
314 | return -EINVAL; | 311 | return -EINVAL; |
315 | } | 312 | } |
316 | 313 | ||
@@ -322,8 +319,8 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
322 | */ | 319 | */ |
323 | if (mem_auto_no) { | 320 | if (mem_auto_no) { |
324 | mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; | 321 | mem_auto = ((pages - mem_needed / 4) / mem_auto_no) * 4; |
325 | PRINT_INFO(" automatically determined " | 322 | pr_info(" automatically determined " |
326 | "partition size: %lu kB\n", mem_auto); | 323 | "partition size: %lu kB\n", mem_auto); |
327 | for (i = 0; i < xpram_devs; i++) | 324 | for (i = 0; i < xpram_devs; i++) |
328 | if (xpram_sizes[i] == 0) | 325 | if (xpram_sizes[i] == 0) |
329 | xpram_sizes[i] = mem_auto; | 326 | xpram_sizes[i] = mem_auto; |
@@ -405,12 +402,12 @@ static int __init xpram_init(void) | |||
405 | 402 | ||
406 | /* Find out size of expanded memory. */ | 403 | /* Find out size of expanded memory. */ |
407 | if (xpram_present() != 0) { | 404 | if (xpram_present() != 0) { |
408 | PRINT_WARN("No expanded memory available\n"); | 405 | pr_err("No expanded memory available\n"); |
409 | return -ENODEV; | 406 | return -ENODEV; |
410 | } | 407 | } |
411 | xpram_pages = xpram_highest_page_index() + 1; | 408 | xpram_pages = xpram_highest_page_index() + 1; |
412 | PRINT_INFO(" %u pages expanded memory found (%lu KB).\n", | 409 | pr_info(" %u pages expanded memory found (%lu KB).\n", |
413 | xpram_pages, (unsigned long) xpram_pages*4); | 410 | xpram_pages, (unsigned long) xpram_pages*4); |
414 | rc = xpram_setup_sizes(xpram_pages); | 411 | rc = xpram_setup_sizes(xpram_pages); |
415 | if (rc) | 412 | if (rc) |
416 | return rc; | 413 | return rc; |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 35fd8dfcaaa6..97e63cf46944 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> | 7 | * Author: Gerald Schaefer <gerald.schaefer@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "monreader" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/module.h> | 13 | #include <linux/module.h> |
11 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
@@ -24,19 +27,6 @@ | |||
24 | #include <asm/ebcdic.h> | 27 | #include <asm/ebcdic.h> |
25 | #include <asm/extmem.h> | 28 | #include <asm/extmem.h> |
26 | 29 | ||
27 | //#define MON_DEBUG /* Debug messages on/off */ | ||
28 | |||
29 | #define MON_NAME "monreader" | ||
30 | |||
31 | #define P_INFO(x...) printk(KERN_INFO MON_NAME " info: " x) | ||
32 | #define P_ERROR(x...) printk(KERN_ERR MON_NAME " error: " x) | ||
33 | #define P_WARNING(x...) printk(KERN_WARNING MON_NAME " warning: " x) | ||
34 | |||
35 | #ifdef MON_DEBUG | ||
36 | #define P_DEBUG(x...) printk(KERN_DEBUG MON_NAME " debug: " x) | ||
37 | #else | ||
38 | #define P_DEBUG(x...) do {} while (0) | ||
39 | #endif | ||
40 | 30 | ||
41 | #define MON_COLLECT_SAMPLE 0x80 | 31 | #define MON_COLLECT_SAMPLE 0x80 |
42 | #define MON_COLLECT_EVENT 0x40 | 32 | #define MON_COLLECT_EVENT 0x40 |
@@ -172,7 +162,7 @@ static int mon_send_reply(struct mon_msg *monmsg, | |||
172 | } else | 162 | } else |
173 | monmsg->replied_msglim = 1; | 163 | monmsg->replied_msglim = 1; |
174 | if (rc) { | 164 | if (rc) { |
175 | P_ERROR("read, IUCV reply failed with rc = %i\n\n", rc); | 165 | pr_err("Reading monitor data failed with rc=%i\n", rc); |
176 | return -EIO; | 166 | return -EIO; |
177 | } | 167 | } |
178 | return 0; | 168 | return 0; |
@@ -251,7 +241,8 @@ static void mon_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
251 | { | 241 | { |
252 | struct mon_private *monpriv = path->private; | 242 | struct mon_private *monpriv = path->private; |
253 | 243 | ||
254 | P_ERROR("IUCV connection severed with rc = 0x%X\n", ipuser[0]); | 244 | pr_err("z/VM *MONITOR system service disconnected with rc=%i\n", |
245 | ipuser[0]); | ||
255 | iucv_path_sever(path, NULL); | 246 | iucv_path_sever(path, NULL); |
256 | atomic_set(&monpriv->iucv_severed, 1); | 247 | atomic_set(&monpriv->iucv_severed, 1); |
257 | wake_up(&mon_conn_wait_queue); | 248 | wake_up(&mon_conn_wait_queue); |
@@ -266,8 +257,7 @@ static void mon_iucv_message_pending(struct iucv_path *path, | |||
266 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, | 257 | memcpy(&monpriv->msg_array[monpriv->write_index]->msg, |
267 | msg, sizeof(*msg)); | 258 | msg, sizeof(*msg)); |
268 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { | 259 | if (atomic_inc_return(&monpriv->msglim_count) == MON_MSGLIM) { |
269 | P_WARNING("IUCV message pending, message limit (%i) reached\n", | 260 | pr_warning("The read queue for monitor data is full\n"); |
270 | MON_MSGLIM); | ||
271 | monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; | 261 | monpriv->msg_array[monpriv->write_index]->msglim_reached = 1; |
272 | } | 262 | } |
273 | monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; | 263 | monpriv->write_index = (monpriv->write_index + 1) % MON_MSGLIM; |
@@ -311,8 +301,8 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
311 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, | 301 | rc = iucv_path_connect(monpriv->path, &monreader_iucv_handler, |
312 | MON_SERVICE, NULL, user_data_connect, monpriv); | 302 | MON_SERVICE, NULL, user_data_connect, monpriv); |
313 | if (rc) { | 303 | if (rc) { |
314 | P_ERROR("iucv connection to *MONITOR failed with " | 304 | pr_err("Connecting to the z/VM *MONITOR system service " |
315 | "IPUSER SEVER code = %i\n", rc); | 305 | "failed with rc=%i\n", rc); |
316 | rc = -EIO; | 306 | rc = -EIO; |
317 | goto out_path; | 307 | goto out_path; |
318 | } | 308 | } |
@@ -353,7 +343,8 @@ static int mon_close(struct inode *inode, struct file *filp) | |||
353 | */ | 343 | */ |
354 | rc = iucv_path_sever(monpriv->path, user_data_sever); | 344 | rc = iucv_path_sever(monpriv->path, user_data_sever); |
355 | if (rc) | 345 | if (rc) |
356 | P_ERROR("close, iucv_sever failed with rc = %i\n", rc); | 346 | pr_warning("Disconnecting the z/VM *MONITOR system service " |
347 | "failed with rc=%i\n", rc); | ||
357 | 348 | ||
358 | atomic_set(&monpriv->iucv_severed, 0); | 349 | atomic_set(&monpriv->iucv_severed, 0); |
359 | atomic_set(&monpriv->iucv_connected, 0); | 350 | atomic_set(&monpriv->iucv_connected, 0); |
@@ -469,7 +460,8 @@ static int __init mon_init(void) | |||
469 | int rc; | 460 | int rc; |
470 | 461 | ||
471 | if (!MACHINE_IS_VM) { | 462 | if (!MACHINE_IS_VM) { |
472 | P_ERROR("not running under z/VM, driver not loaded\n"); | 463 | pr_err("The z/VM *MONITOR record device driver cannot be " |
464 | "loaded without z/VM\n"); | ||
473 | return -ENODEV; | 465 | return -ENODEV; |
474 | } | 466 | } |
475 | 467 | ||
@@ -478,7 +470,8 @@ static int __init mon_init(void) | |||
478 | */ | 470 | */ |
479 | rc = iucv_register(&monreader_iucv_handler, 1); | 471 | rc = iucv_register(&monreader_iucv_handler, 1); |
480 | if (rc) { | 472 | if (rc) { |
481 | P_ERROR("failed to register with iucv driver\n"); | 473 | pr_err("The z/VM *MONITOR record device driver failed to " |
474 | "register with IUCV\n"); | ||
482 | return rc; | 475 | return rc; |
483 | } | 476 | } |
484 | 477 | ||
@@ -488,8 +481,8 @@ static int __init mon_init(void) | |||
488 | goto out_iucv; | 481 | goto out_iucv; |
489 | } | 482 | } |
490 | if (rc != SEG_TYPE_SC) { | 483 | if (rc != SEG_TYPE_SC) { |
491 | P_ERROR("segment %s has unsupported type, should be SC\n", | 484 | pr_err("The specified *MONITOR DCSS %s does not have the " |
492 | mon_dcss_name); | 485 | "required type SC\n", mon_dcss_name); |
493 | rc = -EINVAL; | 486 | rc = -EINVAL; |
494 | goto out_iucv; | 487 | goto out_iucv; |
495 | } | 488 | } |
diff --git a/drivers/s390/char/monwriter.c b/drivers/s390/char/monwriter.c index 4d71aa8c1a79..c7d7483bab9a 100644 --- a/drivers/s390/char/monwriter.c +++ b/drivers/s390/char/monwriter.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> | 8 | * Author(s): Melissa Howland <Melissa.Howland@us.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "monwriter" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -64,9 +67,9 @@ static int monwrite_diag(struct monwrite_hdr *myhdr, char *buffer, int fcn) | |||
64 | rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); | 67 | rc = appldata_asm(&id, fcn, (void *) buffer, myhdr->datalen); |
65 | if (rc <= 0) | 68 | if (rc <= 0) |
66 | return rc; | 69 | return rc; |
70 | pr_err("Writing monitor data failed with rc=%i\n", rc); | ||
67 | if (rc == 5) | 71 | if (rc == 5) |
68 | return -EPERM; | 72 | return -EPERM; |
69 | printk("DIAG X'DC' error with return code: %i\n", rc); | ||
70 | return -EINVAL; | 73 | return -EINVAL; |
71 | } | 74 | } |
72 | 75 | ||
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index ec9c0bcf66ee..506390496416 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> | 6 | * Peter Oberparleiter <peter.oberparleiter@de.ibm.com> |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "sclp_cmd" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/completion.h> | 12 | #include <linux/completion.h> |
10 | #include <linux/init.h> | 13 | #include <linux/init.h> |
11 | #include <linux/errno.h> | 14 | #include <linux/errno.h> |
@@ -16,9 +19,8 @@ | |||
16 | #include <linux/memory.h> | 19 | #include <linux/memory.h> |
17 | #include <asm/chpid.h> | 20 | #include <asm/chpid.h> |
18 | #include <asm/sclp.h> | 21 | #include <asm/sclp.h> |
19 | #include "sclp.h" | ||
20 | 22 | ||
21 | #define TAG "sclp_cmd: " | 23 | #include "sclp.h" |
22 | 24 | ||
23 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 | 25 | #define SCLP_CMDW_READ_SCP_INFO 0x00020001 |
24 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 | 26 | #define SCLP_CMDW_READ_SCP_INFO_FORCED 0x00120001 |
@@ -169,8 +171,8 @@ static int do_sync_request(sclp_cmdw_t cmd, void *sccb) | |||
169 | 171 | ||
170 | /* Check response. */ | 172 | /* Check response. */ |
171 | if (request->status != SCLP_REQ_DONE) { | 173 | if (request->status != SCLP_REQ_DONE) { |
172 | printk(KERN_WARNING TAG "sync request failed " | 174 | pr_warning("sync request failed (cmd=0x%08x, " |
173 | "(cmd=0x%08x, status=0x%02x)\n", cmd, request->status); | 175 | "status=0x%02x)\n", cmd, request->status); |
174 | rc = -EIO; | 176 | rc = -EIO; |
175 | } | 177 | } |
176 | out: | 178 | out: |
@@ -224,8 +226,8 @@ int sclp_get_cpu_info(struct sclp_cpu_info *info) | |||
224 | if (rc) | 226 | if (rc) |
225 | goto out; | 227 | goto out; |
226 | if (sccb->header.response_code != 0x0010) { | 228 | if (sccb->header.response_code != 0x0010) { |
227 | printk(KERN_WARNING TAG "readcpuinfo failed " | 229 | pr_warning("readcpuinfo failed (response=0x%04x)\n", |
228 | "(response=0x%04x)\n", sccb->header.response_code); | 230 | sccb->header.response_code); |
229 | rc = -EIO; | 231 | rc = -EIO; |
230 | goto out; | 232 | goto out; |
231 | } | 233 | } |
@@ -262,8 +264,9 @@ static int do_cpu_configure(sclp_cmdw_t cmd) | |||
262 | case 0x0120: | 264 | case 0x0120: |
263 | break; | 265 | break; |
264 | default: | 266 | default: |
265 | printk(KERN_WARNING TAG "configure cpu failed (cmd=0x%08x, " | 267 | pr_warning("configure cpu failed (cmd=0x%08x, " |
266 | "response=0x%04x)\n", cmd, sccb->header.response_code); | 268 | "response=0x%04x)\n", cmd, |
269 | sccb->header.response_code); | ||
267 | rc = -EIO; | 270 | rc = -EIO; |
268 | break; | 271 | break; |
269 | } | 272 | } |
@@ -626,9 +629,9 @@ static int do_chp_configure(sclp_cmdw_t cmd) | |||
626 | case 0x0450: | 629 | case 0x0450: |
627 | break; | 630 | break; |
628 | default: | 631 | default: |
629 | printk(KERN_WARNING TAG "configure channel-path failed " | 632 | pr_warning("configure channel-path failed " |
630 | "(cmd=0x%08x, response=0x%04x)\n", cmd, | 633 | "(cmd=0x%08x, response=0x%04x)\n", cmd, |
631 | sccb->header.response_code); | 634 | sccb->header.response_code); |
632 | rc = -EIO; | 635 | rc = -EIO; |
633 | break; | 636 | break; |
634 | } | 637 | } |
@@ -695,8 +698,8 @@ int sclp_chp_read_info(struct sclp_chp_info *info) | |||
695 | if (rc) | 698 | if (rc) |
696 | goto out; | 699 | goto out; |
697 | if (sccb->header.response_code != 0x0010) { | 700 | if (sccb->header.response_code != 0x0010) { |
698 | printk(KERN_WARNING TAG "read channel-path info failed " | 701 | pr_warning("read channel-path info failed " |
699 | "(response=0x%04x)\n", sccb->header.response_code); | 702 | "(response=0x%04x)\n", sccb->header.response_code); |
700 | rc = -EIO; | 703 | rc = -EIO; |
701 | goto out; | 704 | goto out; |
702 | } | 705 | } |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 4cebd6ee6d27..b497afe061cc 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
@@ -5,15 +5,17 @@ | |||
5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> | 5 | * Author(s): Heiko Carstens <heiko.carstens@de.ibm.com> |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "sclp_config" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
8 | #include <linux/init.h> | 11 | #include <linux/init.h> |
9 | #include <linux/errno.h> | 12 | #include <linux/errno.h> |
10 | #include <linux/cpu.h> | 13 | #include <linux/cpu.h> |
11 | #include <linux/sysdev.h> | 14 | #include <linux/sysdev.h> |
12 | #include <linux/workqueue.h> | 15 | #include <linux/workqueue.h> |
13 | #include <asm/smp.h> | 16 | #include <asm/smp.h> |
14 | #include "sclp.h" | ||
15 | 17 | ||
16 | #define TAG "sclp_config: " | 18 | #include "sclp.h" |
17 | 19 | ||
18 | struct conf_mgm_data { | 20 | struct conf_mgm_data { |
19 | u8 reserved; | 21 | u8 reserved; |
@@ -31,7 +33,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
31 | int cpu; | 33 | int cpu; |
32 | struct sys_device *sysdev; | 34 | struct sys_device *sysdev; |
33 | 35 | ||
34 | printk(KERN_WARNING TAG "cpu capability changed.\n"); | 36 | pr_warning("cpu capability changed.\n"); |
35 | get_online_cpus(); | 37 | get_online_cpus(); |
36 | for_each_online_cpu(cpu) { | 38 | for_each_online_cpu(cpu) { |
37 | sysdev = get_cpu_sysdev(cpu); | 39 | sysdev = get_cpu_sysdev(cpu); |
@@ -78,7 +80,7 @@ static int __init sclp_conf_init(void) | |||
78 | return rc; | 80 | return rc; |
79 | 81 | ||
80 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { | 82 | if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) { |
81 | printk(KERN_WARNING TAG "no configuration management.\n"); | 83 | pr_warning("no configuration management.\n"); |
82 | sclp_unregister(&sclp_conf_register); | 84 | sclp_unregister(&sclp_conf_register); |
83 | rc = -ENOSYS; | 85 | rc = -ENOSYS; |
84 | } | 86 | } |
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c index d887bd261d28..62c2647f37f4 100644 --- a/drivers/s390/char/sclp_cpi_sys.c +++ b/drivers/s390/char/sclp_cpi_sys.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Michael Ernst <mernst@de.ibm.com> | 7 | * Michael Ernst <mernst@de.ibm.com> |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "sclp_cpi" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/kernel.h> | 13 | #include <linux/kernel.h> |
11 | #include <linux/init.h> | 14 | #include <linux/init.h> |
12 | #include <linux/stat.h> | 15 | #include <linux/stat.h> |
@@ -20,6 +23,7 @@ | |||
20 | #include <linux/completion.h> | 23 | #include <linux/completion.h> |
21 | #include <asm/ebcdic.h> | 24 | #include <asm/ebcdic.h> |
22 | #include <asm/sclp.h> | 25 | #include <asm/sclp.h> |
26 | |||
23 | #include "sclp.h" | 27 | #include "sclp.h" |
24 | #include "sclp_rw.h" | 28 | #include "sclp_rw.h" |
25 | #include "sclp_cpi_sys.h" | 29 | #include "sclp_cpi_sys.h" |
@@ -150,16 +154,16 @@ static int cpi_req(void) | |||
150 | wait_for_completion(&completion); | 154 | wait_for_completion(&completion); |
151 | 155 | ||
152 | if (req->status != SCLP_REQ_DONE) { | 156 | if (req->status != SCLP_REQ_DONE) { |
153 | printk(KERN_WARNING "cpi: request failed (status=0x%02x)\n", | 157 | pr_warning("request failed (status=0x%02x)\n", |
154 | req->status); | 158 | req->status); |
155 | rc = -EIO; | 159 | rc = -EIO; |
156 | goto out_free_req; | 160 | goto out_free_req; |
157 | } | 161 | } |
158 | 162 | ||
159 | response = ((struct cpi_sccb *) req->sccb)->header.response_code; | 163 | response = ((struct cpi_sccb *) req->sccb)->header.response_code; |
160 | if (response != 0x0020) { | 164 | if (response != 0x0020) { |
161 | printk(KERN_WARNING "cpi: failed with " | 165 | pr_warning("request failed with response code 0x%x\n", |
162 | "response code 0x%x\n", response); | 166 | response); |
163 | rc = -EIO; | 167 | rc = -EIO; |
164 | } | 168 | } |
165 | 169 | ||
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c index 8b854857ba07..6a1c58dc61a7 100644 --- a/drivers/s390/char/sclp_sdias.c +++ b/drivers/s390/char/sclp_sdias.c | |||
@@ -5,15 +5,18 @@ | |||
5 | * Author(s): Michael Holzheu | 5 | * Author(s): Michael Holzheu |
6 | */ | 6 | */ |
7 | 7 | ||
8 | #define KMSG_COMPONENT "sclp_sdias" | ||
9 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
10 | |||
8 | #include <linux/sched.h> | 11 | #include <linux/sched.h> |
9 | #include <asm/sclp.h> | 12 | #include <asm/sclp.h> |
10 | #include <asm/debug.h> | 13 | #include <asm/debug.h> |
11 | #include <asm/ipl.h> | 14 | #include <asm/ipl.h> |
15 | |||
12 | #include "sclp.h" | 16 | #include "sclp.h" |
13 | #include "sclp_rw.h" | 17 | #include "sclp_rw.h" |
14 | 18 | ||
15 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) | 19 | #define TRACE(x...) debug_sprintf_event(sdias_dbf, 1, x) |
16 | #define ERROR_MSG(x...) printk ( KERN_ALERT "SDIAS: " x ) | ||
17 | 20 | ||
18 | #define SDIAS_RETRIES 300 | 21 | #define SDIAS_RETRIES 300 |
19 | #define SDIAS_SLEEP_TICKS 50 | 22 | #define SDIAS_SLEEP_TICKS 50 |
@@ -131,7 +134,7 @@ int sclp_sdias_blk_count(void) | |||
131 | 134 | ||
132 | rc = sdias_sclp_send(&request); | 135 | rc = sdias_sclp_send(&request); |
133 | if (rc) { | 136 | if (rc) { |
134 | ERROR_MSG("sclp_send failed for get_nr_blocks\n"); | 137 | pr_err("sclp_send failed for get_nr_blocks\n"); |
135 | goto out; | 138 | goto out; |
136 | } | 139 | } |
137 | if (sccb.hdr.response_code != 0x0020) { | 140 | if (sccb.hdr.response_code != 0x0020) { |
@@ -145,7 +148,8 @@ int sclp_sdias_blk_count(void) | |||
145 | rc = sccb.evbuf.blk_cnt; | 148 | rc = sccb.evbuf.blk_cnt; |
146 | break; | 149 | break; |
147 | default: | 150 | default: |
148 | ERROR_MSG("SCLP error: %x\n", sccb.evbuf.event_status); | 151 | pr_err("SCLP error: %x\n", |
152 | sccb.evbuf.event_status); | ||
149 | rc = -EIO; | 153 | rc = -EIO; |
150 | goto out; | 154 | goto out; |
151 | } | 155 | } |
@@ -201,7 +205,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | |||
201 | 205 | ||
202 | rc = sdias_sclp_send(&request); | 206 | rc = sdias_sclp_send(&request); |
203 | if (rc) { | 207 | if (rc) { |
204 | ERROR_MSG("sclp_send failed: %x\n", rc); | 208 | pr_err("sclp_send failed: %x\n", rc); |
205 | goto out; | 209 | goto out; |
206 | } | 210 | } |
207 | if (sccb.hdr.response_code != 0x0020) { | 211 | if (sccb.hdr.response_code != 0x0020) { |
@@ -219,9 +223,9 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks) | |||
219 | case EVSTATE_NO_DATA: | 223 | case EVSTATE_NO_DATA: |
220 | TRACE("no data\n"); | 224 | TRACE("no data\n"); |
221 | default: | 225 | default: |
222 | ERROR_MSG("Error from SCLP while copying hsa. " | 226 | pr_err("Error from SCLP while copying hsa. " |
223 | "Event status = %x\n", | 227 | "Event status = %x\n", |
224 | sccb.evbuf.event_status); | 228 | sccb.evbuf.event_status); |
225 | rc = -EIO; | 229 | rc = -EIO; |
226 | } | 230 | } |
227 | out: | 231 | out: |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 9854f19f5e62..a839aa531d7c 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -583,23 +583,6 @@ sclp_vt220_chars_in_buffer(struct tty_struct *tty) | |||
583 | return count; | 583 | return count; |
584 | } | 584 | } |
585 | 585 | ||
586 | static void | ||
587 | __sclp_vt220_flush_buffer(void) | ||
588 | { | ||
589 | unsigned long flags; | ||
590 | |||
591 | sclp_vt220_emit_current(); | ||
592 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
593 | if (timer_pending(&sclp_vt220_timer)) | ||
594 | del_timer(&sclp_vt220_timer); | ||
595 | while (sclp_vt220_outqueue_count > 0) { | ||
596 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
597 | sclp_sync_wait(); | ||
598 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
599 | } | ||
600 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
601 | } | ||
602 | |||
603 | /* | 586 | /* |
604 | * Pass on all buffers to the hardware. Return only when there are no more | 587 | * Pass on all buffers to the hardware. Return only when there are no more |
605 | * buffers pending. | 588 | * buffers pending. |
@@ -745,6 +728,22 @@ sclp_vt220_con_device(struct console *c, int *index) | |||
745 | return sclp_vt220_driver; | 728 | return sclp_vt220_driver; |
746 | } | 729 | } |
747 | 730 | ||
731 | static void __sclp_vt220_flush_buffer(void) | ||
732 | { | ||
733 | unsigned long flags; | ||
734 | |||
735 | sclp_vt220_emit_current(); | ||
736 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
737 | if (timer_pending(&sclp_vt220_timer)) | ||
738 | del_timer(&sclp_vt220_timer); | ||
739 | while (sclp_vt220_outqueue_count > 0) { | ||
740 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
741 | sclp_sync_wait(); | ||
742 | spin_lock_irqsave(&sclp_vt220_lock, flags); | ||
743 | } | ||
744 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | ||
745 | } | ||
746 | |||
748 | static int | 747 | static int |
749 | sclp_vt220_notify(struct notifier_block *self, | 748 | sclp_vt220_notify(struct notifier_block *self, |
750 | unsigned long event, void *data) | 749 | unsigned long event, void *data) |
diff --git a/drivers/s390/char/vmcp.c b/drivers/s390/char/vmcp.c index 09e7d9bf438b..a6087cec55b4 100644 --- a/drivers/s390/char/vmcp.c +++ b/drivers/s390/char/vmcp.c | |||
@@ -11,12 +11,14 @@ | |||
11 | * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS | 11 | * The idea of this driver is based on cpint from Neale Ferguson and #CP in CMS |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #define KMSG_COMPONENT "vmcp" | ||
15 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
16 | |||
14 | #include <linux/fs.h> | 17 | #include <linux/fs.h> |
15 | #include <linux/init.h> | 18 | #include <linux/init.h> |
16 | #include <linux/kernel.h> | 19 | #include <linux/kernel.h> |
17 | #include <linux/miscdevice.h> | 20 | #include <linux/miscdevice.h> |
18 | #include <linux/module.h> | 21 | #include <linux/module.h> |
19 | #include <linux/smp_lock.h> | ||
20 | #include <asm/cpcmd.h> | 22 | #include <asm/cpcmd.h> |
21 | #include <asm/debug.h> | 23 | #include <asm/debug.h> |
22 | #include <asm/uaccess.h> | 24 | #include <asm/uaccess.h> |
@@ -26,8 +28,6 @@ MODULE_LICENSE("GPL"); | |||
26 | MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); | 28 | MODULE_AUTHOR("Christian Borntraeger <borntraeger@de.ibm.com>"); |
27 | MODULE_DESCRIPTION("z/VM CP interface"); | 29 | MODULE_DESCRIPTION("z/VM CP interface"); |
28 | 30 | ||
29 | #define PRINTK_HEADER "vmcp: " | ||
30 | |||
31 | static debug_info_t *vmcp_debug; | 31 | static debug_info_t *vmcp_debug; |
32 | 32 | ||
33 | static int vmcp_open(struct inode *inode, struct file *file) | 33 | static int vmcp_open(struct inode *inode, struct file *file) |
@@ -41,13 +41,11 @@ static int vmcp_open(struct inode *inode, struct file *file) | |||
41 | if (!session) | 41 | if (!session) |
42 | return -ENOMEM; | 42 | return -ENOMEM; |
43 | 43 | ||
44 | lock_kernel(); | ||
45 | session->bufsize = PAGE_SIZE; | 44 | session->bufsize = PAGE_SIZE; |
46 | session->response = NULL; | 45 | session->response = NULL; |
47 | session->resp_size = 0; | 46 | session->resp_size = 0; |
48 | mutex_init(&session->mutex); | 47 | mutex_init(&session->mutex); |
49 | file->private_data = session; | 48 | file->private_data = session; |
50 | unlock_kernel(); | ||
51 | return nonseekable_open(inode, file); | 49 | return nonseekable_open(inode, file); |
52 | } | 50 | } |
53 | 51 | ||
@@ -193,7 +191,8 @@ static int __init vmcp_init(void) | |||
193 | int ret; | 191 | int ret; |
194 | 192 | ||
195 | if (!MACHINE_IS_VM) { | 193 | if (!MACHINE_IS_VM) { |
196 | PRINT_WARN("z/VM CP interface is only available under z/VM\n"); | 194 | pr_warning("The z/VM CP interface device driver cannot be " |
195 | "loaded without z/VM\n"); | ||
197 | return -ENODEV; | 196 | return -ENODEV; |
198 | } | 197 | } |
199 | 198 | ||
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 24762727bc27..aabbeb909cc6 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -10,6 +10,10 @@ | |||
10 | * Stefan Weinhuber <wein@de.ibm.com> | 10 | * Stefan Weinhuber <wein@de.ibm.com> |
11 | * | 11 | * |
12 | */ | 12 | */ |
13 | |||
14 | #define KMSG_COMPONENT "vmlogrdr" | ||
15 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
16 | |||
13 | #include <linux/module.h> | 17 | #include <linux/module.h> |
14 | #include <linux/init.h> | 18 | #include <linux/init.h> |
15 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
@@ -28,8 +32,6 @@ | |||
28 | #include <linux/smp_lock.h> | 32 | #include <linux/smp_lock.h> |
29 | #include <linux/string.h> | 33 | #include <linux/string.h> |
30 | 34 | ||
31 | |||
32 | |||
33 | MODULE_AUTHOR | 35 | MODULE_AUTHOR |
34 | ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" | 36 | ("(C) 2004 IBM Corporation by Xenia Tkatschow (xenia@us.ibm.com)\n" |
35 | " Stefan Weinhuber (wein@de.ibm.com)"); | 37 | " Stefan Weinhuber (wein@de.ibm.com)"); |
@@ -174,8 +176,7 @@ static void vmlogrdr_iucv_path_severed(struct iucv_path *path, u8 ipuser[16]) | |||
174 | struct vmlogrdr_priv_t * logptr = path->private; | 176 | struct vmlogrdr_priv_t * logptr = path->private; |
175 | u8 reason = (u8) ipuser[8]; | 177 | u8 reason = (u8) ipuser[8]; |
176 | 178 | ||
177 | printk (KERN_ERR "vmlogrdr: connection severed with" | 179 | pr_err("vmlogrdr: connection severed with reason %i\n", reason); |
178 | " reason %i\n", reason); | ||
179 | 180 | ||
180 | iucv_path_sever(path, NULL); | 181 | iucv_path_sever(path, NULL); |
181 | kfree(path); | 182 | kfree(path); |
@@ -333,8 +334,8 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
333 | if (logptr->autorecording) { | 334 | if (logptr->autorecording) { |
334 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); | 335 | ret = vmlogrdr_recording(logptr,1,logptr->autopurge); |
335 | if (ret) | 336 | if (ret) |
336 | printk (KERN_WARNING "vmlogrdr: failed to start " | 337 | pr_warning("vmlogrdr: failed to start " |
337 | "recording automatically\n"); | 338 | "recording automatically\n"); |
338 | } | 339 | } |
339 | 340 | ||
340 | /* create connection to the system service */ | 341 | /* create connection to the system service */ |
@@ -345,9 +346,9 @@ static int vmlogrdr_open (struct inode *inode, struct file *filp) | |||
345 | logptr->system_service, NULL, NULL, | 346 | logptr->system_service, NULL, NULL, |
346 | logptr); | 347 | logptr); |
347 | if (connect_rc) { | 348 | if (connect_rc) { |
348 | printk (KERN_ERR "vmlogrdr: iucv connection to %s " | 349 | pr_err("vmlogrdr: iucv connection to %s " |
349 | "failed with rc %i \n", logptr->system_service, | 350 | "failed with rc %i \n", |
350 | connect_rc); | 351 | logptr->system_service, connect_rc); |
351 | goto out_path; | 352 | goto out_path; |
352 | } | 353 | } |
353 | 354 | ||
@@ -388,8 +389,8 @@ static int vmlogrdr_release (struct inode *inode, struct file *filp) | |||
388 | if (logptr->autorecording) { | 389 | if (logptr->autorecording) { |
389 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); | 390 | ret = vmlogrdr_recording(logptr,0,logptr->autopurge); |
390 | if (ret) | 391 | if (ret) |
391 | printk (KERN_WARNING "vmlogrdr: failed to stop " | 392 | pr_warning("vmlogrdr: failed to stop " |
392 | "recording automatically\n"); | 393 | "recording automatically\n"); |
393 | } | 394 | } |
394 | logptr->dev_in_use = 0; | 395 | logptr->dev_in_use = 0; |
395 | 396 | ||
@@ -823,8 +824,7 @@ static int __init vmlogrdr_init(void) | |||
823 | dev_t dev; | 824 | dev_t dev; |
824 | 825 | ||
825 | if (! MACHINE_IS_VM) { | 826 | if (! MACHINE_IS_VM) { |
826 | printk (KERN_ERR "vmlogrdr: not running under VM, " | 827 | pr_err("not running under VM, driver not loaded.\n"); |
827 | "driver not loaded.\n"); | ||
828 | return -ENODEV; | 828 | return -ENODEV; |
829 | } | 829 | } |
830 | 830 | ||
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 9020eba620ee..5dcef81fc9d9 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Munzert <munzert@de.ibm.com> | 8 | * Frank Munzert <munzert@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "vmur" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/cdev.h> | 14 | #include <linux/cdev.h> |
12 | #include <linux/smp_lock.h> | 15 | #include <linux/smp_lock.h> |
13 | 16 | ||
@@ -40,8 +43,6 @@ MODULE_AUTHOR("IBM Corporation"); | |||
40 | MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); | 43 | MODULE_DESCRIPTION("s390 z/VM virtual unit record device driver"); |
41 | MODULE_LICENSE("GPL"); | 44 | MODULE_LICENSE("GPL"); |
42 | 45 | ||
43 | #define PRINTK_HEADER "vmur: " | ||
44 | |||
45 | static dev_t ur_first_dev_maj_min; | 46 | static dev_t ur_first_dev_maj_min; |
46 | static struct class *vmur_class; | 47 | static struct class *vmur_class; |
47 | static struct debug_info *vmur_dbf; | 48 | static struct debug_info *vmur_dbf; |
@@ -987,7 +988,8 @@ static int __init ur_init(void) | |||
987 | dev_t dev; | 988 | dev_t dev; |
988 | 989 | ||
989 | if (!MACHINE_IS_VM) { | 990 | if (!MACHINE_IS_VM) { |
990 | PRINT_ERR("%s is only available under z/VM.\n", ur_banner); | 991 | pr_err("The %s cannot be loaded without z/VM\n", |
992 | ur_banner); | ||
991 | return -ENODEV; | 993 | return -ENODEV; |
992 | } | 994 | } |
993 | 995 | ||
@@ -1006,7 +1008,8 @@ static int __init ur_init(void) | |||
1006 | 1008 | ||
1007 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); | 1009 | rc = alloc_chrdev_region(&dev, 0, NUM_MINORS, "vmur"); |
1008 | if (rc) { | 1010 | if (rc) { |
1009 | PRINT_ERR("alloc_chrdev_region failed: err = %d\n", rc); | 1011 | pr_err("Kernel function alloc_chrdev_region failed with " |
1012 | "error code %d\n", rc); | ||
1010 | goto fail_unregister_driver; | 1013 | goto fail_unregister_driver; |
1011 | } | 1014 | } |
1012 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); | 1015 | ur_first_dev_maj_min = MKDEV(MAJOR(dev), 0); |
@@ -1016,7 +1019,7 @@ static int __init ur_init(void) | |||
1016 | rc = PTR_ERR(vmur_class); | 1019 | rc = PTR_ERR(vmur_class); |
1017 | goto fail_unregister_region; | 1020 | goto fail_unregister_region; |
1018 | } | 1021 | } |
1019 | PRINT_INFO("%s loaded.\n", ur_banner); | 1022 | pr_info("%s loaded.\n", ur_banner); |
1020 | return 0; | 1023 | return 0; |
1021 | 1024 | ||
1022 | fail_unregister_region: | 1025 | fail_unregister_region: |
@@ -1034,7 +1037,7 @@ static void __exit ur_exit(void) | |||
1034 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); | 1037 | unregister_chrdev_region(ur_first_dev_maj_min, NUM_MINORS); |
1035 | ccw_driver_unregister(&ur_driver); | 1038 | ccw_driver_unregister(&ur_driver); |
1036 | debug_unregister(vmur_dbf); | 1039 | debug_unregister(vmur_dbf); |
1037 | PRINT_INFO("%s unloaded.\n", ur_banner); | 1040 | pr_info("%s unloaded.\n", ur_banner); |
1038 | } | 1041 | } |
1039 | 1042 | ||
1040 | module_init(ur_init); | 1043 | module_init(ur_init); |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 7fd84be11931..eefc6611412e 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Author(s): Michael Holzheu | 9 | * Author(s): Michael Holzheu |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "zdump" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
13 | #include <linux/miscdevice.h> | 16 | #include <linux/miscdevice.h> |
14 | #include <linux/utsname.h> | 17 | #include <linux/utsname.h> |
@@ -24,8 +27,6 @@ | |||
24 | #include "sclp.h" | 27 | #include "sclp.h" |
25 | 28 | ||
26 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) | 29 | #define TRACE(x...) debug_sprintf_event(zcore_dbf, 1, x) |
27 | #define MSG(x...) printk( KERN_ALERT x ) | ||
28 | #define ERROR_MSG(x...) printk ( KERN_ALERT "DUMP: " x ) | ||
29 | 30 | ||
30 | #define TO_USER 0 | 31 | #define TO_USER 0 |
31 | #define TO_KERNEL 1 | 32 | #define TO_KERNEL 1 |
@@ -563,19 +564,19 @@ static int __init sys_info_init(enum arch_id arch) | |||
563 | 564 | ||
564 | switch (arch) { | 565 | switch (arch) { |
565 | case ARCH_S390X: | 566 | case ARCH_S390X: |
566 | MSG("DETECTED 'S390X (64 bit) OS'\n"); | 567 | pr_alert("DETECTED 'S390X (64 bit) OS'\n"); |
567 | sys_info.sa_base = SAVE_AREA_BASE_S390X; | 568 | sys_info.sa_base = SAVE_AREA_BASE_S390X; |
568 | sys_info.sa_size = sizeof(struct save_area_s390x); | 569 | sys_info.sa_size = sizeof(struct save_area_s390x); |
569 | set_s390x_lc_mask(&sys_info.lc_mask); | 570 | set_s390x_lc_mask(&sys_info.lc_mask); |
570 | break; | 571 | break; |
571 | case ARCH_S390: | 572 | case ARCH_S390: |
572 | MSG("DETECTED 'S390 (32 bit) OS'\n"); | 573 | pr_alert("DETECTED 'S390 (32 bit) OS'\n"); |
573 | sys_info.sa_base = SAVE_AREA_BASE_S390; | 574 | sys_info.sa_base = SAVE_AREA_BASE_S390; |
574 | sys_info.sa_size = sizeof(struct save_area_s390); | 575 | sys_info.sa_size = sizeof(struct save_area_s390); |
575 | set_s390_lc_mask(&sys_info.lc_mask); | 576 | set_s390_lc_mask(&sys_info.lc_mask); |
576 | break; | 577 | break; |
577 | default: | 578 | default: |
578 | ERROR_MSG("unknown architecture 0x%x.\n",arch); | 579 | pr_alert("0x%x is an unknown architecture.\n",arch); |
579 | return -EINVAL; | 580 | return -EINVAL; |
580 | } | 581 | } |
581 | sys_info.arch = arch; | 582 | sys_info.arch = arch; |
@@ -674,7 +675,8 @@ static int __init zcore_init(void) | |||
674 | 675 | ||
675 | #ifndef __s390x__ | 676 | #ifndef __s390x__ |
676 | if (arch == ARCH_S390X) { | 677 | if (arch == ARCH_S390X) { |
677 | ERROR_MSG("32 bit dumper can't dump 64 bit system!\n"); | 678 | pr_alert("The 32-bit dump tool cannot be used for a " |
679 | "64-bit system\n"); | ||
678 | rc = -EINVAL; | 680 | rc = -EINVAL; |
679 | goto fail; | 681 | goto fail; |
680 | } | 682 | } |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index 2f547b840ef0..fe00be3675cd 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Arnd Bergmann (arndb@de.ibm.com) | 9 | * Arnd Bergmann (arndb@de.ibm.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "cio" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/init.h> | 15 | #include <linux/init.h> |
13 | #include <linux/vmalloc.h> | 16 | #include <linux/vmalloc.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -50,9 +53,10 @@ static int blacklist_range(range_action action, unsigned int from_ssid, | |||
50 | { | 53 | { |
51 | if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { | 54 | if ((from_ssid > to_ssid) || ((from_ssid == to_ssid) && (from > to))) { |
52 | if (msgtrigger) | 55 | if (msgtrigger) |
53 | printk(KERN_WARNING "cio: Invalid cio_ignore range " | 56 | pr_warning("0.%x.%04x to 0.%x.%04x is not a valid " |
54 | "0.%x.%04x-0.%x.%04x\n", from_ssid, from, | 57 | "range for cio_ignore\n", from_ssid, from, |
55 | to_ssid, to); | 58 | to_ssid, to); |
59 | |||
56 | return 1; | 60 | return 1; |
57 | } | 61 | } |
58 | 62 | ||
@@ -140,8 +144,8 @@ static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, | |||
140 | rc = 0; | 144 | rc = 0; |
141 | out: | 145 | out: |
142 | if (rc && msgtrigger) | 146 | if (rc && msgtrigger) |
143 | printk(KERN_WARNING "cio: Invalid cio_ignore device '%s'\n", | 147 | pr_warning("%s is not a valid device for the cio_ignore " |
144 | str); | 148 | "kernel parameter\n", str); |
145 | 149 | ||
146 | return rc; | 150 | return rc; |
147 | } | 151 | } |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index 3ac2c2019f5e..918e6fce2573 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -19,6 +19,8 @@ | |||
19 | #include <asm/ccwdev.h> | 19 | #include <asm/ccwdev.h> |
20 | #include <asm/ccwgroup.h> | 20 | #include <asm/ccwgroup.h> |
21 | 21 | ||
22 | #define CCW_BUS_ID_SIZE 20 | ||
23 | |||
22 | /* In Linux 2.4, we had a channel device layer called "chandev" | 24 | /* In Linux 2.4, we had a channel device layer called "chandev" |
23 | * that did all sorts of obscure stuff for networking devices. | 25 | * that did all sorts of obscure stuff for networking devices. |
24 | * This is another driver that serves as a replacement for just | 26 | * This is another driver that serves as a replacement for just |
@@ -89,15 +91,23 @@ ccwgroup_ungroup_store(struct device *dev, struct device_attribute *attr, const | |||
89 | 91 | ||
90 | gdev = to_ccwgroupdev(dev); | 92 | gdev = to_ccwgroupdev(dev); |
91 | 93 | ||
92 | if (gdev->state != CCWGROUP_OFFLINE) | 94 | /* Prevent concurrent online/offline processing and ungrouping. */ |
93 | return -EINVAL; | 95 | if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0) |
94 | 96 | return -EAGAIN; | |
97 | if (gdev->state != CCWGROUP_OFFLINE) { | ||
98 | rc = -EINVAL; | ||
99 | goto out; | ||
100 | } | ||
95 | /* Note that we cannot unregister the device from one of its | 101 | /* Note that we cannot unregister the device from one of its |
96 | * attribute methods, so we have to use this roundabout approach. | 102 | * attribute methods, so we have to use this roundabout approach. |
97 | */ | 103 | */ |
98 | rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); | 104 | rc = device_schedule_callback(dev, ccwgroup_ungroup_callback); |
99 | if (rc) | 105 | out: |
100 | count = rc; | 106 | if (rc) { |
107 | /* Release onoff "lock" when ungrouping failed. */ | ||
108 | atomic_set(&gdev->onoff, 0); | ||
109 | return rc; | ||
110 | } | ||
101 | return count; | 111 | return count; |
102 | } | 112 | } |
103 | 113 | ||
@@ -172,7 +182,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) | |||
172 | len = end - start + 1; | 182 | len = end - start + 1; |
173 | end++; | 183 | end++; |
174 | } | 184 | } |
175 | if (len < BUS_ID_SIZE) { | 185 | if (len < CCW_BUS_ID_SIZE) { |
176 | strlcpy(bus_id, start, len); | 186 | strlcpy(bus_id, start, len); |
177 | rc = 0; | 187 | rc = 0; |
178 | } else | 188 | } else |
@@ -181,7 +191,7 @@ static int __get_next_bus_id(const char **buf, char *bus_id) | |||
181 | return rc; | 191 | return rc; |
182 | } | 192 | } |
183 | 193 | ||
184 | static int __is_valid_bus_id(char bus_id[BUS_ID_SIZE]) | 194 | static int __is_valid_bus_id(char bus_id[CCW_BUS_ID_SIZE]) |
185 | { | 195 | { |
186 | int cssid, ssid, devno; | 196 | int cssid, ssid, devno; |
187 | 197 | ||
@@ -213,7 +223,7 @@ int ccwgroup_create_from_string(struct device *root, unsigned int creator_id, | |||
213 | { | 223 | { |
214 | struct ccwgroup_device *gdev; | 224 | struct ccwgroup_device *gdev; |
215 | int rc, i; | 225 | int rc, i; |
216 | char tmp_bus_id[BUS_ID_SIZE]; | 226 | char tmp_bus_id[CCW_BUS_ID_SIZE]; |
217 | const char *curr_buf; | 227 | const char *curr_buf; |
218 | 228 | ||
219 | gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), | 229 | gdev = kzalloc(sizeof(*gdev) + num_devices * sizeof(gdev->cdev[0]), |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 29826fdd47b8..ebab6ea4659b 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Arnd Bergmann (arndb@de.ibm.com) | 8 | * Arnd Bergmann (arndb@de.ibm.com) |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "cio" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/slab.h> | 15 | #include <linux/slab.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
@@ -333,6 +336,7 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
333 | struct chp_config_data *data; | 336 | struct chp_config_data *data; |
334 | struct chp_id chpid; | 337 | struct chp_id chpid; |
335 | int num; | 338 | int num; |
339 | char *events[3] = {"configure", "deconfigure", "cancel deconfigure"}; | ||
336 | 340 | ||
337 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); | 341 | CIO_CRW_EVENT(4, "chsc: channel-path-configuration notification\n"); |
338 | if (sei_area->rs != 0) | 342 | if (sei_area->rs != 0) |
@@ -343,8 +347,8 @@ static void chsc_process_sei_chp_config(struct chsc_sei_area *sei_area) | |||
343 | if (!chp_test_bit(data->map, num)) | 347 | if (!chp_test_bit(data->map, num)) |
344 | continue; | 348 | continue; |
345 | chpid.id = num; | 349 | chpid.id = num; |
346 | printk(KERN_WARNING "cio: processing configure event %d for " | 350 | pr_notice("Processing %s for channel path %x.%02x\n", |
347 | "chpid %x.%02x\n", data->op, chpid.cssid, chpid.id); | 351 | events[data->op], chpid.cssid, chpid.id); |
348 | switch (data->op) { | 352 | switch (data->op) { |
349 | case 0: | 353 | case 0: |
350 | chp_cfg_schedule(chpid, 1); | 354 | chp_cfg_schedule(chpid, 1); |
diff --git a/drivers/s390/cio/chsc_sch.c b/drivers/s390/cio/chsc_sch.c index f49f0e502b8d..0a2f2edafc03 100644 --- a/drivers/s390/cio/chsc_sch.c +++ b/drivers/s390/cio/chsc_sch.c | |||
@@ -61,7 +61,7 @@ static void chsc_subchannel_irq(struct subchannel *sch) | |||
61 | } | 61 | } |
62 | private->request = NULL; | 62 | private->request = NULL; |
63 | memcpy(&request->irb, irb, sizeof(*irb)); | 63 | memcpy(&request->irb, irb, sizeof(*irb)); |
64 | stsch(sch->schid, &sch->schib); | 64 | cio_update_schib(sch); |
65 | complete(&request->completion); | 65 | complete(&request->completion); |
66 | put_device(&sch->dev); | 66 | put_device(&sch->dev); |
67 | } | 67 | } |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 3db2c386546f..8a8df7552969 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -9,6 +9,9 @@ | |||
9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | 9 | * Martin Schwidefsky (schwidefsky@de.ibm.com) |
10 | */ | 10 | */ |
11 | 11 | ||
12 | #define KMSG_COMPONENT "cio" | ||
13 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
14 | |||
12 | #include <linux/module.h> | 15 | #include <linux/module.h> |
13 | #include <linux/init.h> | 16 | #include <linux/init.h> |
14 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
@@ -104,44 +107,6 @@ cio_get_options (struct subchannel *sch) | |||
104 | return flags; | 107 | return flags; |
105 | } | 108 | } |
106 | 109 | ||
107 | /* | ||
108 | * Use tpi to get a pending interrupt, call the interrupt handler and | ||
109 | * return a pointer to the subchannel structure. | ||
110 | */ | ||
111 | static int | ||
112 | cio_tpi(void) | ||
113 | { | ||
114 | struct tpi_info *tpi_info; | ||
115 | struct subchannel *sch; | ||
116 | struct irb *irb; | ||
117 | int irq_context; | ||
118 | |||
119 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
120 | if (tpi (NULL) != 1) | ||
121 | return 0; | ||
122 | irb = (struct irb *) __LC_IRB; | ||
123 | /* Store interrupt response block to lowcore. */ | ||
124 | if (tsch (tpi_info->schid, irb) != 0) | ||
125 | /* Not status pending or not operational. */ | ||
126 | return 1; | ||
127 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
128 | if (!sch) | ||
129 | return 1; | ||
130 | irq_context = in_interrupt(); | ||
131 | if (!irq_context) | ||
132 | local_bh_disable(); | ||
133 | irq_enter (); | ||
134 | spin_lock(sch->lock); | ||
135 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); | ||
136 | if (sch->driver && sch->driver->irq) | ||
137 | sch->driver->irq(sch); | ||
138 | spin_unlock(sch->lock); | ||
139 | irq_exit (); | ||
140 | if (!irq_context) | ||
141 | _local_bh_enable(); | ||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | static int | 110 | static int |
146 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | 111 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) |
147 | { | 112 | { |
@@ -152,11 +117,13 @@ cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | |||
152 | else | 117 | else |
153 | sch->lpm = 0; | 118 | sch->lpm = 0; |
154 | 119 | ||
155 | stsch (sch->schid, &sch->schib); | ||
156 | |||
157 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " | 120 | CIO_MSG_EVENT(2, "cio_start: 'not oper' status for " |
158 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, | 121 | "subchannel 0.%x.%04x!\n", sch->schid.ssid, |
159 | sch->schid.sch_no); | 122 | sch->schid.sch_no); |
123 | |||
124 | if (cio_update_schib(sch)) | ||
125 | return -ENODEV; | ||
126 | |||
160 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); | 127 | sprintf(dbf_text, "no%s", dev_name(&sch->dev)); |
161 | CIO_TRACE_EVENT(0, dbf_text); | 128 | CIO_TRACE_EVENT(0, dbf_text); |
162 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); | 129 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); |
@@ -354,7 +321,8 @@ cio_cancel (struct subchannel *sch) | |||
354 | switch (ccode) { | 321 | switch (ccode) { |
355 | case 0: /* success */ | 322 | case 0: /* success */ |
356 | /* Update information in scsw. */ | 323 | /* Update information in scsw. */ |
357 | stsch (sch->schid, &sch->schib); | 324 | if (cio_update_schib(sch)) |
325 | return -ENODEV; | ||
358 | return 0; | 326 | return 0; |
359 | case 1: /* status pending */ | 327 | case 1: /* status pending */ |
360 | return -EBUSY; | 328 | return -EBUSY; |
@@ -365,30 +333,70 @@ cio_cancel (struct subchannel *sch) | |||
365 | } | 333 | } |
366 | } | 334 | } |
367 | 335 | ||
336 | |||
337 | static void cio_apply_config(struct subchannel *sch, struct schib *schib) | ||
338 | { | ||
339 | schib->pmcw.intparm = sch->config.intparm; | ||
340 | schib->pmcw.mbi = sch->config.mbi; | ||
341 | schib->pmcw.isc = sch->config.isc; | ||
342 | schib->pmcw.ena = sch->config.ena; | ||
343 | schib->pmcw.mme = sch->config.mme; | ||
344 | schib->pmcw.mp = sch->config.mp; | ||
345 | schib->pmcw.csense = sch->config.csense; | ||
346 | schib->pmcw.mbfc = sch->config.mbfc; | ||
347 | if (sch->config.mbfc) | ||
348 | schib->mba = sch->config.mba; | ||
349 | } | ||
350 | |||
351 | static int cio_check_config(struct subchannel *sch, struct schib *schib) | ||
352 | { | ||
353 | return (schib->pmcw.intparm == sch->config.intparm) && | ||
354 | (schib->pmcw.mbi == sch->config.mbi) && | ||
355 | (schib->pmcw.isc == sch->config.isc) && | ||
356 | (schib->pmcw.ena == sch->config.ena) && | ||
357 | (schib->pmcw.mme == sch->config.mme) && | ||
358 | (schib->pmcw.mp == sch->config.mp) && | ||
359 | (schib->pmcw.csense == sch->config.csense) && | ||
360 | (schib->pmcw.mbfc == sch->config.mbfc) && | ||
361 | (!sch->config.mbfc || (schib->mba == sch->config.mba)); | ||
362 | } | ||
363 | |||
368 | /* | 364 | /* |
369 | * Function: cio_modify | 365 | * cio_commit_config - apply configuration to the subchannel |
370 | * Issues a "Modify Subchannel" on the specified subchannel | ||
371 | */ | 366 | */ |
372 | int | 367 | int cio_commit_config(struct subchannel *sch) |
373 | cio_modify (struct subchannel *sch) | ||
374 | { | 368 | { |
375 | int ccode, retry, ret; | 369 | struct schib schib; |
370 | int ccode, retry, ret = 0; | ||
371 | |||
372 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | ||
373 | return -ENODEV; | ||
376 | 374 | ||
377 | ret = 0; | ||
378 | for (retry = 0; retry < 5; retry++) { | 375 | for (retry = 0; retry < 5; retry++) { |
379 | ccode = msch_err (sch->schid, &sch->schib); | 376 | /* copy desired changes to local schib */ |
380 | if (ccode < 0) /* -EIO if msch gets a program check. */ | 377 | cio_apply_config(sch, &schib); |
378 | ccode = msch_err(sch->schid, &schib); | ||
379 | if (ccode < 0) /* -EIO if msch gets a program check. */ | ||
381 | return ccode; | 380 | return ccode; |
382 | switch (ccode) { | 381 | switch (ccode) { |
383 | case 0: /* successfull */ | 382 | case 0: /* successfull */ |
384 | return 0; | 383 | if (stsch(sch->schid, &schib) || |
385 | case 1: /* status pending */ | 384 | !css_sch_is_valid(&schib)) |
385 | return -ENODEV; | ||
386 | if (cio_check_config(sch, &schib)) { | ||
387 | /* commit changes from local schib */ | ||
388 | memcpy(&sch->schib, &schib, sizeof(schib)); | ||
389 | return 0; | ||
390 | } | ||
391 | ret = -EAGAIN; | ||
392 | break; | ||
393 | case 1: /* status pending */ | ||
386 | return -EBUSY; | 394 | return -EBUSY; |
387 | case 2: /* busy */ | 395 | case 2: /* busy */ |
388 | udelay (100); /* allow for recovery */ | 396 | udelay(100); /* allow for recovery */ |
389 | ret = -EBUSY; | 397 | ret = -EBUSY; |
390 | break; | 398 | break; |
391 | case 3: /* not operational */ | 399 | case 3: /* not operational */ |
392 | return -ENODEV; | 400 | return -ENODEV; |
393 | } | 401 | } |
394 | } | 402 | } |
@@ -396,6 +404,23 @@ cio_modify (struct subchannel *sch) | |||
396 | } | 404 | } |
397 | 405 | ||
398 | /** | 406 | /** |
407 | * cio_update_schib - Perform stsch and update schib if subchannel is valid. | ||
408 | * @sch: subchannel on which to perform stsch | ||
409 | * Return zero on success, -ENODEV otherwise. | ||
410 | */ | ||
411 | int cio_update_schib(struct subchannel *sch) | ||
412 | { | ||
413 | struct schib schib; | ||
414 | |||
415 | if (stsch(sch->schid, &schib) || !css_sch_is_valid(&schib)) | ||
416 | return -ENODEV; | ||
417 | |||
418 | memcpy(&sch->schib, &schib, sizeof(schib)); | ||
419 | return 0; | ||
420 | } | ||
421 | EXPORT_SYMBOL_GPL(cio_update_schib); | ||
422 | |||
423 | /** | ||
399 | * cio_enable_subchannel - enable a subchannel. | 424 | * cio_enable_subchannel - enable a subchannel. |
400 | * @sch: subchannel to be enabled | 425 | * @sch: subchannel to be enabled |
401 | * @intparm: interruption parameter to set | 426 | * @intparm: interruption parameter to set |
@@ -403,7 +428,6 @@ cio_modify (struct subchannel *sch) | |||
403 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | 428 | int cio_enable_subchannel(struct subchannel *sch, u32 intparm) |
404 | { | 429 | { |
405 | char dbf_txt[15]; | 430 | char dbf_txt[15]; |
406 | int ccode; | ||
407 | int retry; | 431 | int retry; |
408 | int ret; | 432 | int ret; |
409 | 433 | ||
@@ -412,33 +436,27 @@ int cio_enable_subchannel(struct subchannel *sch, u32 intparm) | |||
412 | 436 | ||
413 | if (sch_is_pseudo_sch(sch)) | 437 | if (sch_is_pseudo_sch(sch)) |
414 | return -EINVAL; | 438 | return -EINVAL; |
415 | ccode = stsch (sch->schid, &sch->schib); | 439 | if (cio_update_schib(sch)) |
416 | if (ccode) | ||
417 | return -ENODEV; | 440 | return -ENODEV; |
418 | 441 | ||
419 | for (retry = 5, ret = 0; retry > 0; retry--) { | 442 | sch->config.ena = 1; |
420 | sch->schib.pmcw.ena = 1; | 443 | sch->config.isc = sch->isc; |
421 | sch->schib.pmcw.isc = sch->isc; | 444 | sch->config.intparm = intparm; |
422 | sch->schib.pmcw.intparm = intparm; | 445 | |
423 | ret = cio_modify(sch); | 446 | for (retry = 0; retry < 3; retry++) { |
424 | if (ret == -ENODEV) | 447 | ret = cio_commit_config(sch); |
425 | break; | 448 | if (ret == -EIO) { |
426 | if (ret == -EIO) | ||
427 | /* | 449 | /* |
428 | * Got a program check in cio_modify. Try without | 450 | * Got a program check in msch. Try without |
429 | * the concurrent sense bit the next time. | 451 | * the concurrent sense bit the next time. |
430 | */ | 452 | */ |
431 | sch->schib.pmcw.csense = 0; | 453 | sch->config.csense = 0; |
432 | if (ret == 0) { | 454 | } else if (ret == -EBUSY) { |
433 | stsch (sch->schid, &sch->schib); | ||
434 | if (sch->schib.pmcw.ena) | ||
435 | break; | ||
436 | } | ||
437 | if (ret == -EBUSY) { | ||
438 | struct irb irb; | 455 | struct irb irb; |
439 | if (tsch(sch->schid, &irb) != 0) | 456 | if (tsch(sch->schid, &irb) != 0) |
440 | break; | 457 | break; |
441 | } | 458 | } else |
459 | break; | ||
442 | } | 460 | } |
443 | sprintf (dbf_txt, "ret:%d", ret); | 461 | sprintf (dbf_txt, "ret:%d", ret); |
444 | CIO_TRACE_EVENT (2, dbf_txt); | 462 | CIO_TRACE_EVENT (2, dbf_txt); |
@@ -453,8 +471,6 @@ EXPORT_SYMBOL_GPL(cio_enable_subchannel); | |||
453 | int cio_disable_subchannel(struct subchannel *sch) | 471 | int cio_disable_subchannel(struct subchannel *sch) |
454 | { | 472 | { |
455 | char dbf_txt[15]; | 473 | char dbf_txt[15]; |
456 | int ccode; | ||
457 | int retry; | ||
458 | int ret; | 474 | int ret; |
459 | 475 | ||
460 | CIO_TRACE_EVENT (2, "dissch"); | 476 | CIO_TRACE_EVENT (2, "dissch"); |
@@ -462,8 +478,7 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
462 | 478 | ||
463 | if (sch_is_pseudo_sch(sch)) | 479 | if (sch_is_pseudo_sch(sch)) |
464 | return 0; | 480 | return 0; |
465 | ccode = stsch (sch->schid, &sch->schib); | 481 | if (cio_update_schib(sch)) |
466 | if (ccode == 3) /* Not operational. */ | ||
467 | return -ENODEV; | 482 | return -ENODEV; |
468 | 483 | ||
469 | if (scsw_actl(&sch->schib.scsw) != 0) | 484 | if (scsw_actl(&sch->schib.scsw) != 0) |
@@ -473,24 +488,9 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
473 | */ | 488 | */ |
474 | return -EBUSY; | 489 | return -EBUSY; |
475 | 490 | ||
476 | for (retry = 5, ret = 0; retry > 0; retry--) { | 491 | sch->config.ena = 0; |
477 | sch->schib.pmcw.ena = 0; | 492 | ret = cio_commit_config(sch); |
478 | ret = cio_modify(sch); | 493 | |
479 | if (ret == -ENODEV) | ||
480 | break; | ||
481 | if (ret == -EBUSY) | ||
482 | /* | ||
483 | * The subchannel is busy or status pending. | ||
484 | * We'll disable when the next interrupt was delivered | ||
485 | * via the state machine. | ||
486 | */ | ||
487 | break; | ||
488 | if (ret == 0) { | ||
489 | stsch (sch->schid, &sch->schib); | ||
490 | if (!sch->schib.pmcw.ena) | ||
491 | break; | ||
492 | } | ||
493 | } | ||
494 | sprintf (dbf_txt, "ret:%d", ret); | 494 | sprintf (dbf_txt, "ret:%d", ret); |
495 | CIO_TRACE_EVENT (2, dbf_txt); | 495 | CIO_TRACE_EVENT (2, dbf_txt); |
496 | return ret; | 496 | return ret; |
@@ -687,6 +687,43 @@ static char console_sch_name[10] = "0.x.xxxx"; | |||
687 | static struct io_subchannel_private console_priv; | 687 | static struct io_subchannel_private console_priv; |
688 | static int console_subchannel_in_use; | 688 | static int console_subchannel_in_use; |
689 | 689 | ||
690 | /* | ||
691 | * Use tpi to get a pending interrupt, call the interrupt handler and | ||
692 | * return a pointer to the subchannel structure. | ||
693 | */ | ||
694 | static int cio_tpi(void) | ||
695 | { | ||
696 | struct tpi_info *tpi_info; | ||
697 | struct subchannel *sch; | ||
698 | struct irb *irb; | ||
699 | int irq_context; | ||
700 | |||
701 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
702 | if (tpi(NULL) != 1) | ||
703 | return 0; | ||
704 | irb = (struct irb *) __LC_IRB; | ||
705 | /* Store interrupt response block to lowcore. */ | ||
706 | if (tsch(tpi_info->schid, irb) != 0) | ||
707 | /* Not status pending or not operational. */ | ||
708 | return 1; | ||
709 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
710 | if (!sch) | ||
711 | return 1; | ||
712 | irq_context = in_interrupt(); | ||
713 | if (!irq_context) | ||
714 | local_bh_disable(); | ||
715 | irq_enter(); | ||
716 | spin_lock(sch->lock); | ||
717 | memcpy(&sch->schib.scsw, &irb->scsw, sizeof(union scsw)); | ||
718 | if (sch->driver && sch->driver->irq) | ||
719 | sch->driver->irq(sch); | ||
720 | spin_unlock(sch->lock); | ||
721 | irq_exit(); | ||
722 | if (!irq_context) | ||
723 | _local_bh_enable(); | ||
724 | return 1; | ||
725 | } | ||
726 | |||
690 | void *cio_get_console_priv(void) | 727 | void *cio_get_console_priv(void) |
691 | { | 728 | { |
692 | return &console_priv; | 729 | return &console_priv; |
@@ -780,7 +817,7 @@ cio_probe_console(void) | |||
780 | sch_no = cio_get_console_sch_no(); | 817 | sch_no = cio_get_console_sch_no(); |
781 | if (sch_no == -1) { | 818 | if (sch_no == -1) { |
782 | console_subchannel_in_use = 0; | 819 | console_subchannel_in_use = 0; |
783 | printk(KERN_WARNING "cio: No ccw console found!\n"); | 820 | pr_warning("No CCW console was found\n"); |
784 | return ERR_PTR(-ENODEV); | 821 | return ERR_PTR(-ENODEV); |
785 | } | 822 | } |
786 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | 823 | memset(&console_subchannel, 0, sizeof(struct subchannel)); |
@@ -796,10 +833,9 @@ cio_probe_console(void) | |||
796 | * enable console I/O-interrupt subclass | 833 | * enable console I/O-interrupt subclass |
797 | */ | 834 | */ |
798 | isc_register(CONSOLE_ISC); | 835 | isc_register(CONSOLE_ISC); |
799 | console_subchannel.schib.pmcw.isc = CONSOLE_ISC; | 836 | console_subchannel.config.isc = CONSOLE_ISC; |
800 | console_subchannel.schib.pmcw.intparm = | 837 | console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; |
801 | (u32)(addr_t)&console_subchannel; | 838 | ret = cio_commit_config(&console_subchannel); |
802 | ret = cio_modify(&console_subchannel); | ||
803 | if (ret) { | 839 | if (ret) { |
804 | isc_unregister(CONSOLE_ISC); | 840 | isc_unregister(CONSOLE_ISC); |
805 | console_subchannel_in_use = 0; | 841 | console_subchannel_in_use = 0; |
@@ -811,8 +847,8 @@ cio_probe_console(void) | |||
811 | void | 847 | void |
812 | cio_release_console(void) | 848 | cio_release_console(void) |
813 | { | 849 | { |
814 | console_subchannel.schib.pmcw.intparm = 0; | 850 | console_subchannel.config.intparm = 0; |
815 | cio_modify(&console_subchannel); | 851 | cio_commit_config(&console_subchannel); |
816 | isc_unregister(CONSOLE_ISC); | 852 | isc_unregister(CONSOLE_ISC); |
817 | console_subchannel_in_use = 0; | 853 | console_subchannel_in_use = 0; |
818 | } | 854 | } |
@@ -852,7 +888,8 @@ __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | |||
852 | cc = msch(schid, schib); | 888 | cc = msch(schid, schib); |
853 | if (cc) | 889 | if (cc) |
854 | return (cc==3?-ENODEV:-EBUSY); | 890 | return (cc==3?-ENODEV:-EBUSY); |
855 | stsch(schid, schib); | 891 | if (stsch(schid, schib) || !css_sch_is_valid(schib)) |
892 | return -ENODEV; | ||
856 | if (!schib->pmcw.ena) | 893 | if (!schib->pmcw.ena) |
857 | return 0; | 894 | return 0; |
858 | } | 895 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 0fb24784e925..5150fba742ac 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -45,6 +45,19 @@ struct pmcw { | |||
45 | /* ... in an operand exception. */ | 45 | /* ... in an operand exception. */ |
46 | } __attribute__ ((packed)); | 46 | } __attribute__ ((packed)); |
47 | 47 | ||
48 | /* Target SCHIB configuration. */ | ||
49 | struct schib_config { | ||
50 | u64 mba; | ||
51 | u32 intparm; | ||
52 | u16 mbi; | ||
53 | u32 isc:3; | ||
54 | u32 ena:1; | ||
55 | u32 mme:2; | ||
56 | u32 mp:1; | ||
57 | u32 csense:1; | ||
58 | u32 mbfc:1; | ||
59 | } __attribute__ ((packed)); | ||
60 | |||
48 | /* | 61 | /* |
49 | * subchannel information block | 62 | * subchannel information block |
50 | */ | 63 | */ |
@@ -82,6 +95,8 @@ struct subchannel { | |||
82 | struct device dev; /* entry in device tree */ | 95 | struct device dev; /* entry in device tree */ |
83 | struct css_driver *driver; | 96 | struct css_driver *driver; |
84 | void *private; /* private per subchannel type data */ | 97 | void *private; /* private per subchannel type data */ |
98 | struct work_struct work; | ||
99 | struct schib_config config; | ||
85 | } __attribute__ ((aligned(8))); | 100 | } __attribute__ ((aligned(8))); |
86 | 101 | ||
87 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ | 102 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ |
@@ -100,7 +115,8 @@ extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); | |||
100 | extern int cio_cancel (struct subchannel *); | 115 | extern int cio_cancel (struct subchannel *); |
101 | extern int cio_set_options (struct subchannel *, int); | 116 | extern int cio_set_options (struct subchannel *, int); |
102 | extern int cio_get_options (struct subchannel *); | 117 | extern int cio_get_options (struct subchannel *); |
103 | extern int cio_modify (struct subchannel *); | 118 | extern int cio_update_schib(struct subchannel *sch); |
119 | extern int cio_commit_config(struct subchannel *sch); | ||
104 | 120 | ||
105 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); | 121 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); |
106 | int cio_tm_intrg(struct subchannel *sch); | 122 | int cio_tm_intrg(struct subchannel *sch); |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index a90b28c0be57..dc98b2c63862 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -25,6 +25,9 @@ | |||
25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 25 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "cio" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/bootmem.h> | 31 | #include <linux/bootmem.h> |
29 | #include <linux/device.h> | 32 | #include <linux/device.h> |
30 | #include <linux/init.h> | 33 | #include <linux/init.h> |
@@ -185,56 +188,19 @@ static inline void cmf_activate(void *area, unsigned int onoff) | |||
185 | static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, | 188 | static int set_schib(struct ccw_device *cdev, u32 mme, int mbfc, |
186 | unsigned long address) | 189 | unsigned long address) |
187 | { | 190 | { |
188 | int ret; | ||
189 | int retry; | ||
190 | struct subchannel *sch; | 191 | struct subchannel *sch; |
191 | struct schib *schib; | ||
192 | 192 | ||
193 | sch = to_subchannel(cdev->dev.parent); | 193 | sch = to_subchannel(cdev->dev.parent); |
194 | schib = &sch->schib; | ||
195 | /* msch can silently fail, so do it again if necessary */ | ||
196 | for (retry = 0; retry < 3; retry++) { | ||
197 | /* prepare schib */ | ||
198 | stsch(sch->schid, schib); | ||
199 | schib->pmcw.mme = mme; | ||
200 | schib->pmcw.mbfc = mbfc; | ||
201 | /* address can be either a block address or a block index */ | ||
202 | if (mbfc) | ||
203 | schib->mba = address; | ||
204 | else | ||
205 | schib->pmcw.mbi = address; | ||
206 | |||
207 | /* try to submit it */ | ||
208 | switch(ret = msch_err(sch->schid, schib)) { | ||
209 | case 0: | ||
210 | break; | ||
211 | case 1: | ||
212 | case 2: /* in I/O or status pending */ | ||
213 | ret = -EBUSY; | ||
214 | break; | ||
215 | case 3: /* subchannel is no longer valid */ | ||
216 | ret = -ENODEV; | ||
217 | break; | ||
218 | default: /* msch caught an exception */ | ||
219 | ret = -EINVAL; | ||
220 | break; | ||
221 | } | ||
222 | stsch(sch->schid, schib); /* restore the schib */ | ||
223 | |||
224 | if (ret) | ||
225 | break; | ||
226 | 194 | ||
227 | /* check if it worked */ | 195 | sch->config.mme = mme; |
228 | if (schib->pmcw.mme == mme && | 196 | sch->config.mbfc = mbfc; |
229 | schib->pmcw.mbfc == mbfc && | 197 | /* address can be either a block address or a block index */ |
230 | (mbfc ? (schib->mba == address) | 198 | if (mbfc) |
231 | : (schib->pmcw.mbi == address))) | 199 | sch->config.mba = address; |
232 | return 0; | 200 | else |
201 | sch->config.mbi = address; | ||
233 | 202 | ||
234 | ret = -EINVAL; | 203 | return cio_commit_config(sch); |
235 | } | ||
236 | |||
237 | return ret; | ||
238 | } | 204 | } |
239 | 205 | ||
240 | struct set_schib_struct { | 206 | struct set_schib_struct { |
@@ -338,7 +304,7 @@ static int cmf_copy_block(struct ccw_device *cdev) | |||
338 | 304 | ||
339 | sch = to_subchannel(cdev->dev.parent); | 305 | sch = to_subchannel(cdev->dev.parent); |
340 | 306 | ||
341 | if (stsch(sch->schid, &sch->schib)) | 307 | if (cio_update_schib(sch)) |
342 | return -ENODEV; | 308 | return -ENODEV; |
343 | 309 | ||
344 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { | 310 | if (scsw_fctl(&sch->schib.scsw) & SCSW_FCTL_START_FUNC) { |
@@ -1359,9 +1325,8 @@ static int __init init_cmf(void) | |||
1359 | default: | 1325 | default: |
1360 | return 1; | 1326 | return 1; |
1361 | } | 1327 | } |
1362 | 1328 | pr_info("Channel measurement facility initialized using format " | |
1363 | printk(KERN_INFO "cio: Channel measurement facility using %s " | 1329 | "%s (mode %s)\n", format_string, detect_string); |
1364 | "format (%s)\n", format_string, detect_string); | ||
1365 | return 0; | 1330 | return 0; |
1366 | } | 1331 | } |
1367 | 1332 | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 76bbb1e74c29..8019288bc6de 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -6,6 +6,10 @@ | |||
6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | 6 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) |
7 | * Cornelia Huck (cornelia.huck@de.ibm.com) | 7 | * Cornelia Huck (cornelia.huck@de.ibm.com) |
8 | */ | 8 | */ |
9 | |||
10 | #define KMSG_COMPONENT "cio" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
9 | #include <linux/module.h> | 13 | #include <linux/module.h> |
10 | #include <linux/init.h> | 14 | #include <linux/init.h> |
11 | #include <linux/device.h> | 15 | #include <linux/device.h> |
@@ -128,8 +132,8 @@ css_free_subchannel(struct subchannel *sch) | |||
128 | { | 132 | { |
129 | if (sch) { | 133 | if (sch) { |
130 | /* Reset intparm to zeroes. */ | 134 | /* Reset intparm to zeroes. */ |
131 | sch->schib.pmcw.intparm = 0; | 135 | sch->config.intparm = 0; |
132 | cio_modify(sch); | 136 | cio_commit_config(sch); |
133 | kfree(sch->lock); | 137 | kfree(sch->lock); |
134 | kfree(sch); | 138 | kfree(sch); |
135 | } | 139 | } |
@@ -844,8 +848,8 @@ out: | |||
844 | s390_unregister_crw_handler(CRW_RSC_CSS); | 848 | s390_unregister_crw_handler(CRW_RSC_CSS); |
845 | chsc_free_sei_area(); | 849 | chsc_free_sei_area(); |
846 | kfree(slow_subchannel_set); | 850 | kfree(slow_subchannel_set); |
847 | printk(KERN_WARNING"cio: failed to initialize css driver (%d)!\n", | 851 | pr_alert("The CSS device driver initialization failed with " |
848 | ret); | 852 | "errno=%d\n", ret); |
849 | return ret; | 853 | return ret; |
850 | } | 854 | } |
851 | 855 | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 4e4008325e28..23d5752349b5 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -376,19 +376,23 @@ int ccw_device_set_offline(struct ccw_device *cdev) | |||
376 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | 376 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); |
377 | } | 377 | } |
378 | spin_unlock_irq(cdev->ccwlock); | 378 | spin_unlock_irq(cdev->ccwlock); |
379 | /* Give up reference from ccw_device_set_online(). */ | ||
380 | put_device(&cdev->dev); | ||
379 | return ret; | 381 | return ret; |
380 | } | 382 | } |
381 | spin_unlock_irq(cdev->ccwlock); | 383 | spin_unlock_irq(cdev->ccwlock); |
382 | if (ret == 0) | 384 | if (ret == 0) { |
383 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | 385 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); |
384 | else { | 386 | /* Give up reference from ccw_device_set_online(). */ |
387 | put_device(&cdev->dev); | ||
388 | } else { | ||
385 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " | 389 | CIO_MSG_EVENT(0, "ccw_device_offline returned %d, " |
386 | "device 0.%x.%04x\n", | 390 | "device 0.%x.%04x\n", |
387 | ret, cdev->private->dev_id.ssid, | 391 | ret, cdev->private->dev_id.ssid, |
388 | cdev->private->dev_id.devno); | 392 | cdev->private->dev_id.devno); |
389 | cdev->online = 1; | 393 | cdev->online = 1; |
390 | } | 394 | } |
391 | return ret; | 395 | return ret; |
392 | } | 396 | } |
393 | 397 | ||
394 | /** | 398 | /** |
@@ -411,6 +415,9 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
411 | return -ENODEV; | 415 | return -ENODEV; |
412 | if (cdev->online || !cdev->drv) | 416 | if (cdev->online || !cdev->drv) |
413 | return -EINVAL; | 417 | return -EINVAL; |
418 | /* Hold on to an extra reference while device is online. */ | ||
419 | if (!get_device(&cdev->dev)) | ||
420 | return -ENODEV; | ||
414 | 421 | ||
415 | spin_lock_irq(cdev->ccwlock); | 422 | spin_lock_irq(cdev->ccwlock); |
416 | ret = ccw_device_online(cdev); | 423 | ret = ccw_device_online(cdev); |
@@ -422,10 +429,15 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
422 | "device 0.%x.%04x\n", | 429 | "device 0.%x.%04x\n", |
423 | ret, cdev->private->dev_id.ssid, | 430 | ret, cdev->private->dev_id.ssid, |
424 | cdev->private->dev_id.devno); | 431 | cdev->private->dev_id.devno); |
432 | /* Give up online reference since onlining failed. */ | ||
433 | put_device(&cdev->dev); | ||
425 | return ret; | 434 | return ret; |
426 | } | 435 | } |
427 | if (cdev->private->state != DEV_STATE_ONLINE) | 436 | if (cdev->private->state != DEV_STATE_ONLINE) { |
437 | /* Give up online reference since onlining failed. */ | ||
438 | put_device(&cdev->dev); | ||
428 | return -ENODEV; | 439 | return -ENODEV; |
440 | } | ||
429 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { | 441 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { |
430 | cdev->online = 1; | 442 | cdev->online = 1; |
431 | return 0; | 443 | return 0; |
@@ -440,6 +452,8 @@ int ccw_device_set_online(struct ccw_device *cdev) | |||
440 | "device 0.%x.%04x\n", | 452 | "device 0.%x.%04x\n", |
441 | ret, cdev->private->dev_id.ssid, | 453 | ret, cdev->private->dev_id.ssid, |
442 | cdev->private->dev_id.devno); | 454 | cdev->private->dev_id.devno); |
455 | /* Give up online reference since onlining failed. */ | ||
456 | put_device(&cdev->dev); | ||
443 | return (ret == 0) ? -ENODEV : ret; | 457 | return (ret == 0) ? -ENODEV : ret; |
444 | } | 458 | } |
445 | 459 | ||
@@ -704,6 +718,8 @@ ccw_device_release(struct device *dev) | |||
704 | struct ccw_device *cdev; | 718 | struct ccw_device *cdev; |
705 | 719 | ||
706 | cdev = to_ccwdev(dev); | 720 | cdev = to_ccwdev(dev); |
721 | /* Release reference of parent subchannel. */ | ||
722 | put_device(cdev->dev.parent); | ||
707 | kfree(cdev->private); | 723 | kfree(cdev->private); |
708 | kfree(cdev); | 724 | kfree(cdev); |
709 | } | 725 | } |
@@ -735,8 +751,8 @@ static int io_subchannel_initialize_dev(struct subchannel *sch, | |||
735 | /* Do first half of device_register. */ | 751 | /* Do first half of device_register. */ |
736 | device_initialize(&cdev->dev); | 752 | device_initialize(&cdev->dev); |
737 | if (!get_device(&sch->dev)) { | 753 | if (!get_device(&sch->dev)) { |
738 | if (cdev->dev.release) | 754 | /* Release reference from device_initialize(). */ |
739 | cdev->dev.release(&cdev->dev); | 755 | put_device(&cdev->dev); |
740 | return -ENODEV; | 756 | return -ENODEV; |
741 | } | 757 | } |
742 | return 0; | 758 | return 0; |
@@ -778,37 +794,55 @@ static void sch_attach_disconnected_device(struct subchannel *sch, | |||
778 | struct subchannel *other_sch; | 794 | struct subchannel *other_sch; |
779 | int ret; | 795 | int ret; |
780 | 796 | ||
781 | other_sch = to_subchannel(get_device(cdev->dev.parent)); | 797 | /* Get reference for new parent. */ |
798 | if (!get_device(&sch->dev)) | ||
799 | return; | ||
800 | other_sch = to_subchannel(cdev->dev.parent); | ||
801 | /* Note: device_move() changes cdev->dev.parent */ | ||
782 | ret = device_move(&cdev->dev, &sch->dev); | 802 | ret = device_move(&cdev->dev, &sch->dev); |
783 | if (ret) { | 803 | if (ret) { |
784 | CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " | 804 | CIO_MSG_EVENT(0, "Moving disconnected device 0.%x.%04x failed " |
785 | "(ret=%d)!\n", cdev->private->dev_id.ssid, | 805 | "(ret=%d)!\n", cdev->private->dev_id.ssid, |
786 | cdev->private->dev_id.devno, ret); | 806 | cdev->private->dev_id.devno, ret); |
787 | put_device(&other_sch->dev); | 807 | /* Put reference for new parent. */ |
808 | put_device(&sch->dev); | ||
788 | return; | 809 | return; |
789 | } | 810 | } |
790 | sch_set_cdev(other_sch, NULL); | 811 | sch_set_cdev(other_sch, NULL); |
791 | /* No need to keep a subchannel without ccw device around. */ | 812 | /* No need to keep a subchannel without ccw device around. */ |
792 | css_sch_device_unregister(other_sch); | 813 | css_sch_device_unregister(other_sch); |
793 | put_device(&other_sch->dev); | ||
794 | sch_attach_device(sch, cdev); | 814 | sch_attach_device(sch, cdev); |
815 | /* Put reference for old parent. */ | ||
816 | put_device(&other_sch->dev); | ||
795 | } | 817 | } |
796 | 818 | ||
797 | static void sch_attach_orphaned_device(struct subchannel *sch, | 819 | static void sch_attach_orphaned_device(struct subchannel *sch, |
798 | struct ccw_device *cdev) | 820 | struct ccw_device *cdev) |
799 | { | 821 | { |
800 | int ret; | 822 | int ret; |
823 | struct subchannel *pseudo_sch; | ||
801 | 824 | ||
802 | /* Try to move the ccw device to its new subchannel. */ | 825 | /* Get reference for new parent. */ |
826 | if (!get_device(&sch->dev)) | ||
827 | return; | ||
828 | pseudo_sch = to_subchannel(cdev->dev.parent); | ||
829 | /* | ||
830 | * Try to move the ccw device to its new subchannel. | ||
831 | * Note: device_move() changes cdev->dev.parent | ||
832 | */ | ||
803 | ret = device_move(&cdev->dev, &sch->dev); | 833 | ret = device_move(&cdev->dev, &sch->dev); |
804 | if (ret) { | 834 | if (ret) { |
805 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " | 835 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x from orphanage " |
806 | "failed (ret=%d)!\n", | 836 | "failed (ret=%d)!\n", |
807 | cdev->private->dev_id.ssid, | 837 | cdev->private->dev_id.ssid, |
808 | cdev->private->dev_id.devno, ret); | 838 | cdev->private->dev_id.devno, ret); |
839 | /* Put reference for new parent. */ | ||
840 | put_device(&sch->dev); | ||
809 | return; | 841 | return; |
810 | } | 842 | } |
811 | sch_attach_device(sch, cdev); | 843 | sch_attach_device(sch, cdev); |
844 | /* Put reference on pseudo subchannel. */ | ||
845 | put_device(&pseudo_sch->dev); | ||
812 | } | 846 | } |
813 | 847 | ||
814 | static void sch_create_and_recog_new_device(struct subchannel *sch) | 848 | static void sch_create_and_recog_new_device(struct subchannel *sch) |
@@ -830,9 +864,11 @@ static void sch_create_and_recog_new_device(struct subchannel *sch) | |||
830 | spin_lock_irq(sch->lock); | 864 | spin_lock_irq(sch->lock); |
831 | sch_set_cdev(sch, NULL); | 865 | sch_set_cdev(sch, NULL); |
832 | spin_unlock_irq(sch->lock); | 866 | spin_unlock_irq(sch->lock); |
833 | if (cdev->dev.release) | ||
834 | cdev->dev.release(&cdev->dev); | ||
835 | css_sch_device_unregister(sch); | 867 | css_sch_device_unregister(sch); |
868 | /* Put reference from io_subchannel_create_ccwdev(). */ | ||
869 | put_device(&sch->dev); | ||
870 | /* Give up initial reference. */ | ||
871 | put_device(&cdev->dev); | ||
836 | } | 872 | } |
837 | } | 873 | } |
838 | 874 | ||
@@ -854,15 +890,20 @@ void ccw_device_move_to_orphanage(struct work_struct *work) | |||
854 | dev_id.devno = sch->schib.pmcw.dev; | 890 | dev_id.devno = sch->schib.pmcw.dev; |
855 | dev_id.ssid = sch->schid.ssid; | 891 | dev_id.ssid = sch->schid.ssid; |
856 | 892 | ||
893 | /* Increase refcount for pseudo subchannel. */ | ||
894 | get_device(&css->pseudo_subchannel->dev); | ||
857 | /* | 895 | /* |
858 | * Move the orphaned ccw device to the orphanage so the replacing | 896 | * Move the orphaned ccw device to the orphanage so the replacing |
859 | * ccw device can take its place on the subchannel. | 897 | * ccw device can take its place on the subchannel. |
898 | * Note: device_move() changes cdev->dev.parent | ||
860 | */ | 899 | */ |
861 | ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); | 900 | ret = device_move(&cdev->dev, &css->pseudo_subchannel->dev); |
862 | if (ret) { | 901 | if (ret) { |
863 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " | 902 | CIO_MSG_EVENT(0, "Moving device 0.%x.%04x to orphanage failed " |
864 | "(ret=%d)!\n", cdev->private->dev_id.ssid, | 903 | "(ret=%d)!\n", cdev->private->dev_id.ssid, |
865 | cdev->private->dev_id.devno, ret); | 904 | cdev->private->dev_id.devno, ret); |
905 | /* Decrease refcount for pseudo subchannel again. */ | ||
906 | put_device(&css->pseudo_subchannel->dev); | ||
866 | return; | 907 | return; |
867 | } | 908 | } |
868 | cdev->ccwlock = css->pseudo_subchannel->lock; | 909 | cdev->ccwlock = css->pseudo_subchannel->lock; |
@@ -875,17 +916,23 @@ void ccw_device_move_to_orphanage(struct work_struct *work) | |||
875 | if (replacing_cdev) { | 916 | if (replacing_cdev) { |
876 | sch_attach_disconnected_device(sch, replacing_cdev); | 917 | sch_attach_disconnected_device(sch, replacing_cdev); |
877 | /* Release reference from get_disc_ccwdev_by_dev_id() */ | 918 | /* Release reference from get_disc_ccwdev_by_dev_id() */ |
878 | put_device(&cdev->dev); | 919 | put_device(&replacing_cdev->dev); |
920 | /* Release reference of subchannel from old cdev. */ | ||
921 | put_device(&sch->dev); | ||
879 | return; | 922 | return; |
880 | } | 923 | } |
881 | replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); | 924 | replacing_cdev = get_orphaned_ccwdev_by_dev_id(css, &dev_id); |
882 | if (replacing_cdev) { | 925 | if (replacing_cdev) { |
883 | sch_attach_orphaned_device(sch, replacing_cdev); | 926 | sch_attach_orphaned_device(sch, replacing_cdev); |
884 | /* Release reference from get_orphaned_ccwdev_by_dev_id() */ | 927 | /* Release reference from get_orphaned_ccwdev_by_dev_id() */ |
885 | put_device(&cdev->dev); | 928 | put_device(&replacing_cdev->dev); |
929 | /* Release reference of subchannel from old cdev. */ | ||
930 | put_device(&sch->dev); | ||
886 | return; | 931 | return; |
887 | } | 932 | } |
888 | sch_create_and_recog_new_device(sch); | 933 | sch_create_and_recog_new_device(sch); |
934 | /* Release reference of subchannel from old cdev. */ | ||
935 | put_device(&sch->dev); | ||
889 | } | 936 | } |
890 | 937 | ||
891 | /* | 938 | /* |
@@ -903,6 +950,14 @@ io_subchannel_register(struct work_struct *work) | |||
903 | priv = container_of(work, struct ccw_device_private, kick_work); | 950 | priv = container_of(work, struct ccw_device_private, kick_work); |
904 | cdev = priv->cdev; | 951 | cdev = priv->cdev; |
905 | sch = to_subchannel(cdev->dev.parent); | 952 | sch = to_subchannel(cdev->dev.parent); |
953 | /* | ||
954 | * Check if subchannel is still registered. It may have become | ||
955 | * unregistered if a machine check hit us after finishing | ||
956 | * device recognition but before the register work could be | ||
957 | * queued. | ||
958 | */ | ||
959 | if (!device_is_registered(&sch->dev)) | ||
960 | goto out_err; | ||
906 | css_update_ssd_info(sch); | 961 | css_update_ssd_info(sch); |
907 | /* | 962 | /* |
908 | * io_subchannel_register() will also be called after device | 963 | * io_subchannel_register() will also be called after device |
@@ -910,7 +965,7 @@ io_subchannel_register(struct work_struct *work) | |||
910 | * be registered). We need to reprobe since we may now have sense id | 965 | * be registered). We need to reprobe since we may now have sense id |
911 | * information. | 966 | * information. |
912 | */ | 967 | */ |
913 | if (klist_node_attached(&cdev->dev.knode_parent)) { | 968 | if (device_is_registered(&cdev->dev)) { |
914 | if (!cdev->drv) { | 969 | if (!cdev->drv) { |
915 | ret = device_reprobe(&cdev->dev); | 970 | ret = device_reprobe(&cdev->dev); |
916 | if (ret) | 971 | if (ret) |
@@ -934,22 +989,19 @@ io_subchannel_register(struct work_struct *work) | |||
934 | CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", | 989 | CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n", |
935 | cdev->private->dev_id.ssid, | 990 | cdev->private->dev_id.ssid, |
936 | cdev->private->dev_id.devno, ret); | 991 | cdev->private->dev_id.devno, ret); |
937 | put_device(&cdev->dev); | ||
938 | spin_lock_irqsave(sch->lock, flags); | 992 | spin_lock_irqsave(sch->lock, flags); |
939 | sch_set_cdev(sch, NULL); | 993 | sch_set_cdev(sch, NULL); |
940 | spin_unlock_irqrestore(sch->lock, flags); | 994 | spin_unlock_irqrestore(sch->lock, flags); |
941 | kfree (cdev->private); | 995 | /* Release initial device reference. */ |
942 | kfree (cdev); | 996 | put_device(&cdev->dev); |
943 | put_device(&sch->dev); | 997 | goto out_err; |
944 | if (atomic_dec_and_test(&ccw_device_init_count)) | ||
945 | wake_up(&ccw_device_init_wq); | ||
946 | return; | ||
947 | } | 998 | } |
948 | put_device(&cdev->dev); | ||
949 | out: | 999 | out: |
950 | cdev->private->flags.recog_done = 1; | 1000 | cdev->private->flags.recog_done = 1; |
951 | put_device(&sch->dev); | ||
952 | wake_up(&cdev->private->wait_q); | 1001 | wake_up(&cdev->private->wait_q); |
1002 | out_err: | ||
1003 | /* Release reference for workqueue processing. */ | ||
1004 | put_device(&cdev->dev); | ||
953 | if (atomic_dec_and_test(&ccw_device_init_count)) | 1005 | if (atomic_dec_and_test(&ccw_device_init_count)) |
954 | wake_up(&ccw_device_init_wq); | 1006 | wake_up(&ccw_device_init_wq); |
955 | } | 1007 | } |
@@ -968,8 +1020,8 @@ static void ccw_device_call_sch_unregister(struct work_struct *work) | |||
968 | sch = to_subchannel(cdev->dev.parent); | 1020 | sch = to_subchannel(cdev->dev.parent); |
969 | css_sch_device_unregister(sch); | 1021 | css_sch_device_unregister(sch); |
970 | /* Reset intparm to zeroes. */ | 1022 | /* Reset intparm to zeroes. */ |
971 | sch->schib.pmcw.intparm = 0; | 1023 | sch->config.intparm = 0; |
972 | cio_modify(sch); | 1024 | cio_commit_config(sch); |
973 | /* Release cdev reference for workqueue processing.*/ | 1025 | /* Release cdev reference for workqueue processing.*/ |
974 | put_device(&cdev->dev); | 1026 | put_device(&cdev->dev); |
975 | /* Release subchannel reference for local processing. */ | 1027 | /* Release subchannel reference for local processing. */ |
@@ -998,8 +1050,6 @@ io_subchannel_recog_done(struct ccw_device *cdev) | |||
998 | PREPARE_WORK(&cdev->private->kick_work, | 1050 | PREPARE_WORK(&cdev->private->kick_work, |
999 | ccw_device_call_sch_unregister); | 1051 | ccw_device_call_sch_unregister); |
1000 | queue_work(slow_path_wq, &cdev->private->kick_work); | 1052 | queue_work(slow_path_wq, &cdev->private->kick_work); |
1001 | /* Release subchannel reference for asynchronous recognition. */ | ||
1002 | put_device(&sch->dev); | ||
1003 | if (atomic_dec_and_test(&ccw_device_init_count)) | 1053 | if (atomic_dec_and_test(&ccw_device_init_count)) |
1004 | wake_up(&ccw_device_init_wq); | 1054 | wake_up(&ccw_device_init_wq); |
1005 | break; | 1055 | break; |
@@ -1070,10 +1120,15 @@ static void ccw_device_move_to_sch(struct work_struct *work) | |||
1070 | priv = container_of(work, struct ccw_device_private, kick_work); | 1120 | priv = container_of(work, struct ccw_device_private, kick_work); |
1071 | sch = priv->sch; | 1121 | sch = priv->sch; |
1072 | cdev = priv->cdev; | 1122 | cdev = priv->cdev; |
1073 | former_parent = ccw_device_is_orphan(cdev) ? | 1123 | former_parent = to_subchannel(cdev->dev.parent); |
1074 | NULL : to_subchannel(get_device(cdev->dev.parent)); | 1124 | /* Get reference for new parent. */ |
1125 | if (!get_device(&sch->dev)) | ||
1126 | return; | ||
1075 | mutex_lock(&sch->reg_mutex); | 1127 | mutex_lock(&sch->reg_mutex); |
1076 | /* Try to move the ccw device to its new subchannel. */ | 1128 | /* |
1129 | * Try to move the ccw device to its new subchannel. | ||
1130 | * Note: device_move() changes cdev->dev.parent | ||
1131 | */ | ||
1077 | rc = device_move(&cdev->dev, &sch->dev); | 1132 | rc = device_move(&cdev->dev, &sch->dev); |
1078 | mutex_unlock(&sch->reg_mutex); | 1133 | mutex_unlock(&sch->reg_mutex); |
1079 | if (rc) { | 1134 | if (rc) { |
@@ -1083,21 +1138,23 @@ static void ccw_device_move_to_sch(struct work_struct *work) | |||
1083 | cdev->private->dev_id.devno, sch->schid.ssid, | 1138 | cdev->private->dev_id.devno, sch->schid.ssid, |
1084 | sch->schid.sch_no, rc); | 1139 | sch->schid.sch_no, rc); |
1085 | css_sch_device_unregister(sch); | 1140 | css_sch_device_unregister(sch); |
1141 | /* Put reference for new parent again. */ | ||
1142 | put_device(&sch->dev); | ||
1086 | goto out; | 1143 | goto out; |
1087 | } | 1144 | } |
1088 | if (former_parent) { | 1145 | if (!sch_is_pseudo_sch(former_parent)) { |
1089 | spin_lock_irq(former_parent->lock); | 1146 | spin_lock_irq(former_parent->lock); |
1090 | sch_set_cdev(former_parent, NULL); | 1147 | sch_set_cdev(former_parent, NULL); |
1091 | spin_unlock_irq(former_parent->lock); | 1148 | spin_unlock_irq(former_parent->lock); |
1092 | css_sch_device_unregister(former_parent); | 1149 | css_sch_device_unregister(former_parent); |
1093 | /* Reset intparm to zeroes. */ | 1150 | /* Reset intparm to zeroes. */ |
1094 | former_parent->schib.pmcw.intparm = 0; | 1151 | former_parent->config.intparm = 0; |
1095 | cio_modify(former_parent); | 1152 | cio_commit_config(former_parent); |
1096 | } | 1153 | } |
1097 | sch_attach_device(sch, cdev); | 1154 | sch_attach_device(sch, cdev); |
1098 | out: | 1155 | out: |
1099 | if (former_parent) | 1156 | /* Put reference for old parent. */ |
1100 | put_device(&former_parent->dev); | 1157 | put_device(&former_parent->dev); |
1101 | put_device(&cdev->dev); | 1158 | put_device(&cdev->dev); |
1102 | } | 1159 | } |
1103 | 1160 | ||
@@ -1113,6 +1170,15 @@ static void io_subchannel_irq(struct subchannel *sch) | |||
1113 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | 1170 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); |
1114 | } | 1171 | } |
1115 | 1172 | ||
1173 | void io_subchannel_init_config(struct subchannel *sch) | ||
1174 | { | ||
1175 | memset(&sch->config, 0, sizeof(sch->config)); | ||
1176 | sch->config.csense = 1; | ||
1177 | /* Use subchannel mp mode when there is more than 1 installed CHPID. */ | ||
1178 | if ((sch->schib.pmcw.pim & (sch->schib.pmcw.pim - 1)) != 0) | ||
1179 | sch->config.mp = 1; | ||
1180 | } | ||
1181 | |||
1116 | static void io_subchannel_init_fields(struct subchannel *sch) | 1182 | static void io_subchannel_init_fields(struct subchannel *sch) |
1117 | { | 1183 | { |
1118 | if (cio_is_console(sch->schid)) | 1184 | if (cio_is_console(sch->schid)) |
@@ -1127,18 +1193,34 @@ static void io_subchannel_init_fields(struct subchannel *sch) | |||
1127 | sch->schib.pmcw.dev, sch->schid.ssid, | 1193 | sch->schib.pmcw.dev, sch->schid.ssid, |
1128 | sch->schid.sch_no, sch->schib.pmcw.pim, | 1194 | sch->schid.sch_no, sch->schib.pmcw.pim, |
1129 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | 1195 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); |
1130 | /* Initially set up some fields in the pmcw. */ | 1196 | |
1131 | sch->schib.pmcw.ena = 0; | 1197 | io_subchannel_init_config(sch); |
1132 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
1133 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
1134 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
1135 | /* clean up possible residual cmf stuff */ | ||
1136 | sch->schib.pmcw.mme = 0; | ||
1137 | sch->schib.pmcw.mbfc = 0; | ||
1138 | sch->schib.pmcw.mbi = 0; | ||
1139 | sch->schib.mba = 0; | ||
1140 | } | 1198 | } |
1141 | 1199 | ||
1200 | static void io_subchannel_do_unreg(struct work_struct *work) | ||
1201 | { | ||
1202 | struct subchannel *sch; | ||
1203 | |||
1204 | sch = container_of(work, struct subchannel, work); | ||
1205 | css_sch_device_unregister(sch); | ||
1206 | /* Reset intparm to zeroes. */ | ||
1207 | sch->config.intparm = 0; | ||
1208 | cio_commit_config(sch); | ||
1209 | put_device(&sch->dev); | ||
1210 | } | ||
1211 | |||
1212 | /* Schedule unregister if we have no cdev. */ | ||
1213 | static void io_subchannel_schedule_removal(struct subchannel *sch) | ||
1214 | { | ||
1215 | get_device(&sch->dev); | ||
1216 | INIT_WORK(&sch->work, io_subchannel_do_unreg); | ||
1217 | queue_work(slow_path_wq, &sch->work); | ||
1218 | } | ||
1219 | |||
1220 | /* | ||
1221 | * Note: We always return 0 so that we bind to the device even on error. | ||
1222 | * This is needed so that our remove function is called on unregister. | ||
1223 | */ | ||
1142 | static int io_subchannel_probe(struct subchannel *sch) | 1224 | static int io_subchannel_probe(struct subchannel *sch) |
1143 | { | 1225 | { |
1144 | struct ccw_device *cdev; | 1226 | struct ccw_device *cdev; |
@@ -1168,9 +1250,8 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1168 | ccw_device_register(cdev); | 1250 | ccw_device_register(cdev); |
1169 | /* | 1251 | /* |
1170 | * Check if the device is already online. If it is | 1252 | * Check if the device is already online. If it is |
1171 | * the reference count needs to be corrected | 1253 | * the reference count needs to be corrected since we |
1172 | * (see ccw_device_online and css_init_done for the | 1254 | * didn't obtain a reference in ccw_device_set_online. |
1173 | * ugly details). | ||
1174 | */ | 1255 | */ |
1175 | if (cdev->private->state != DEV_STATE_NOT_OPER && | 1256 | if (cdev->private->state != DEV_STATE_NOT_OPER && |
1176 | cdev->private->state != DEV_STATE_OFFLINE && | 1257 | cdev->private->state != DEV_STATE_OFFLINE && |
@@ -1179,23 +1260,24 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1179 | return 0; | 1260 | return 0; |
1180 | } | 1261 | } |
1181 | io_subchannel_init_fields(sch); | 1262 | io_subchannel_init_fields(sch); |
1182 | /* | 1263 | rc = cio_commit_config(sch); |
1183 | * First check if a fitting device may be found amongst the | 1264 | if (rc) |
1184 | * disconnected devices or in the orphanage. | 1265 | goto out_schedule; |
1185 | */ | ||
1186 | dev_id.devno = sch->schib.pmcw.dev; | ||
1187 | dev_id.ssid = sch->schid.ssid; | ||
1188 | rc = sysfs_create_group(&sch->dev.kobj, | 1266 | rc = sysfs_create_group(&sch->dev.kobj, |
1189 | &io_subchannel_attr_group); | 1267 | &io_subchannel_attr_group); |
1190 | if (rc) | 1268 | if (rc) |
1191 | return rc; | 1269 | goto out_schedule; |
1192 | /* Allocate I/O subchannel private data. */ | 1270 | /* Allocate I/O subchannel private data. */ |
1193 | sch->private = kzalloc(sizeof(struct io_subchannel_private), | 1271 | sch->private = kzalloc(sizeof(struct io_subchannel_private), |
1194 | GFP_KERNEL | GFP_DMA); | 1272 | GFP_KERNEL | GFP_DMA); |
1195 | if (!sch->private) { | 1273 | if (!sch->private) |
1196 | rc = -ENOMEM; | ||
1197 | goto out_err; | 1274 | goto out_err; |
1198 | } | 1275 | /* |
1276 | * First check if a fitting device may be found amongst the | ||
1277 | * disconnected devices or in the orphanage. | ||
1278 | */ | ||
1279 | dev_id.devno = sch->schib.pmcw.dev; | ||
1280 | dev_id.ssid = sch->schid.ssid; | ||
1199 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); | 1281 | cdev = get_disc_ccwdev_by_dev_id(&dev_id, NULL); |
1200 | if (!cdev) | 1282 | if (!cdev) |
1201 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), | 1283 | cdev = get_orphaned_ccwdev_by_dev_id(to_css(sch->dev.parent), |
@@ -1213,24 +1295,21 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1213 | return 0; | 1295 | return 0; |
1214 | } | 1296 | } |
1215 | cdev = io_subchannel_create_ccwdev(sch); | 1297 | cdev = io_subchannel_create_ccwdev(sch); |
1216 | if (IS_ERR(cdev)) { | 1298 | if (IS_ERR(cdev)) |
1217 | rc = PTR_ERR(cdev); | ||
1218 | goto out_err; | 1299 | goto out_err; |
1219 | } | ||
1220 | rc = io_subchannel_recog(cdev, sch); | 1300 | rc = io_subchannel_recog(cdev, sch); |
1221 | if (rc) { | 1301 | if (rc) { |
1222 | spin_lock_irqsave(sch->lock, flags); | 1302 | spin_lock_irqsave(sch->lock, flags); |
1223 | sch_set_cdev(sch, NULL); | 1303 | io_subchannel_recog_done(cdev); |
1224 | spin_unlock_irqrestore(sch->lock, flags); | 1304 | spin_unlock_irqrestore(sch->lock, flags); |
1225 | if (cdev->dev.release) | ||
1226 | cdev->dev.release(&cdev->dev); | ||
1227 | goto out_err; | ||
1228 | } | 1305 | } |
1229 | return 0; | 1306 | return 0; |
1230 | out_err: | 1307 | out_err: |
1231 | kfree(sch->private); | 1308 | kfree(sch->private); |
1232 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); | 1309 | sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group); |
1233 | return rc; | 1310 | out_schedule: |
1311 | io_subchannel_schedule_removal(sch); | ||
1312 | return 0; | ||
1234 | } | 1313 | } |
1235 | 1314 | ||
1236 | static int | 1315 | static int |
@@ -1275,10 +1354,7 @@ static void io_subchannel_verify(struct subchannel *sch) | |||
1275 | 1354 | ||
1276 | static int check_for_io_on_path(struct subchannel *sch, int mask) | 1355 | static int check_for_io_on_path(struct subchannel *sch, int mask) |
1277 | { | 1356 | { |
1278 | int cc; | 1357 | if (cio_update_schib(sch)) |
1279 | |||
1280 | cc = stsch(sch->schid, &sch->schib); | ||
1281 | if (cc) | ||
1282 | return 0; | 1358 | return 0; |
1283 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) | 1359 | if (scsw_actl(&sch->schib.scsw) && sch->schib.pmcw.lpum == mask) |
1284 | return 1; | 1360 | return 1; |
@@ -1347,15 +1423,13 @@ static int io_subchannel_chp_event(struct subchannel *sch, | |||
1347 | io_subchannel_verify(sch); | 1423 | io_subchannel_verify(sch); |
1348 | break; | 1424 | break; |
1349 | case CHP_OFFLINE: | 1425 | case CHP_OFFLINE: |
1350 | if (stsch(sch->schid, &sch->schib)) | 1426 | if (cio_update_schib(sch)) |
1351 | return -ENXIO; | ||
1352 | if (!css_sch_is_valid(&sch->schib)) | ||
1353 | return -ENODEV; | 1427 | return -ENODEV; |
1354 | io_subchannel_terminate_path(sch, mask); | 1428 | io_subchannel_terminate_path(sch, mask); |
1355 | break; | 1429 | break; |
1356 | case CHP_ONLINE: | 1430 | case CHP_ONLINE: |
1357 | if (stsch(sch->schid, &sch->schib)) | 1431 | if (cio_update_schib(sch)) |
1358 | return -ENXIO; | 1432 | return -ENODEV; |
1359 | sch->lpm |= mask & sch->opm; | 1433 | sch->lpm |= mask & sch->opm; |
1360 | io_subchannel_verify(sch); | 1434 | io_subchannel_verify(sch); |
1361 | break; | 1435 | break; |
@@ -1610,8 +1684,8 @@ static int io_subchannel_sch_event(struct subchannel *sch, int slow) | |||
1610 | spin_lock_irqsave(sch->lock, flags); | 1684 | spin_lock_irqsave(sch->lock, flags); |
1611 | 1685 | ||
1612 | /* Reset intparm to zeroes. */ | 1686 | /* Reset intparm to zeroes. */ |
1613 | sch->schib.pmcw.intparm = 0; | 1687 | sch->config.intparm = 0; |
1614 | cio_modify(sch); | 1688 | cio_commit_config(sch); |
1615 | break; | 1689 | break; |
1616 | case REPROBE: | 1690 | case REPROBE: |
1617 | ccw_device_trigger_reprobe(cdev); | 1691 | ccw_device_trigger_reprobe(cdev); |
@@ -1652,6 +1726,9 @@ static int ccw_device_console_enable(struct ccw_device *cdev, | |||
1652 | sch->private = cio_get_console_priv(); | 1726 | sch->private = cio_get_console_priv(); |
1653 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); | 1727 | memset(sch->private, 0, sizeof(struct io_subchannel_private)); |
1654 | io_subchannel_init_fields(sch); | 1728 | io_subchannel_init_fields(sch); |
1729 | rc = cio_commit_config(sch); | ||
1730 | if (rc) | ||
1731 | return rc; | ||
1655 | sch->driver = &io_subchannel_driver; | 1732 | sch->driver = &io_subchannel_driver; |
1656 | /* Initialize the ccw_device structure. */ | 1733 | /* Initialize the ccw_device structure. */ |
1657 | cdev->dev.parent= &sch->dev; | 1734 | cdev->dev.parent= &sch->dev; |
@@ -1723,7 +1800,7 @@ __ccwdev_check_busid(struct device *dev, void *id) | |||
1723 | 1800 | ||
1724 | bus_id = id; | 1801 | bus_id = id; |
1725 | 1802 | ||
1726 | return (strncmp(bus_id, dev_name(dev), BUS_ID_SIZE) == 0); | 1803 | return (strcmp(bus_id, dev_name(dev)) == 0); |
1727 | } | 1804 | } |
1728 | 1805 | ||
1729 | 1806 | ||
@@ -1806,6 +1883,8 @@ ccw_device_remove (struct device *dev) | |||
1806 | "device 0.%x.%04x\n", | 1883 | "device 0.%x.%04x\n", |
1807 | ret, cdev->private->dev_id.ssid, | 1884 | ret, cdev->private->dev_id.ssid, |
1808 | cdev->private->dev_id.devno); | 1885 | cdev->private->dev_id.devno); |
1886 | /* Give up reference obtained in ccw_device_set_online(). */ | ||
1887 | put_device(&cdev->dev); | ||
1809 | } | 1888 | } |
1810 | ccw_device_set_timeout(cdev, 0); | 1889 | ccw_device_set_timeout(cdev, 0); |
1811 | cdev->drv = NULL; | 1890 | cdev->drv = NULL; |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 104ed669db43..0f2e63ea48de 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -76,6 +76,7 @@ extern wait_queue_head_t ccw_device_init_wq; | |||
76 | extern atomic_t ccw_device_init_count; | 76 | extern atomic_t ccw_device_init_count; |
77 | 77 | ||
78 | void io_subchannel_recog_done(struct ccw_device *cdev); | 78 | void io_subchannel_recog_done(struct ccw_device *cdev); |
79 | void io_subchannel_init_config(struct subchannel *sch); | ||
79 | 80 | ||
80 | int ccw_device_cancel_halt_clear(struct ccw_device *); | 81 | int ccw_device_cancel_halt_clear(struct ccw_device *); |
81 | 82 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index 10bc03940fb3..8df5eaafc5ab 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -140,8 +140,7 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
140 | int ret; | 140 | int ret; |
141 | 141 | ||
142 | sch = to_subchannel(cdev->dev.parent); | 142 | sch = to_subchannel(cdev->dev.parent); |
143 | ret = stsch(sch->schid, &sch->schib); | 143 | if (cio_update_schib(sch)) |
144 | if (ret || !sch->schib.pmcw.dnv) | ||
145 | return -ENODEV; | 144 | return -ENODEV; |
146 | if (!sch->schib.pmcw.ena) | 145 | if (!sch->schib.pmcw.ena) |
147 | /* Not operational -> done. */ | 146 | /* Not operational -> done. */ |
@@ -245,11 +244,13 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
245 | * through ssch() and the path information is up to date. | 244 | * through ssch() and the path information is up to date. |
246 | */ | 245 | */ |
247 | old_lpm = sch->lpm; | 246 | old_lpm = sch->lpm; |
248 | stsch(sch->schid, &sch->schib); | 247 | |
249 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
250 | /* Check since device may again have become not operational. */ | 248 | /* Check since device may again have become not operational. */ |
251 | if (!sch->schib.pmcw.dnv) | 249 | if (cio_update_schib(sch)) |
252 | state = DEV_STATE_NOT_OPER; | 250 | state = DEV_STATE_NOT_OPER; |
251 | else | ||
252 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | ||
253 | |||
253 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) | 254 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) |
254 | /* Force reprobe on all chpids. */ | 255 | /* Force reprobe on all chpids. */ |
255 | old_lpm = 0; | 256 | old_lpm = 0; |
@@ -399,9 +400,6 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
399 | ccw_device_oper_notify(cdev); | 400 | ccw_device_oper_notify(cdev); |
400 | } | 401 | } |
401 | wake_up(&cdev->private->wait_q); | 402 | wake_up(&cdev->private->wait_q); |
402 | |||
403 | if (css_init_done && state != DEV_STATE_ONLINE) | ||
404 | put_device (&cdev->dev); | ||
405 | } | 403 | } |
406 | 404 | ||
407 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) | 405 | static int cmp_pgid(struct pgid *p1, struct pgid *p2) |
@@ -552,7 +550,11 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
552 | 550 | ||
553 | sch = to_subchannel(cdev->dev.parent); | 551 | sch = to_subchannel(cdev->dev.parent); |
554 | /* Update schib - pom may have changed. */ | 552 | /* Update schib - pom may have changed. */ |
555 | stsch(sch->schid, &sch->schib); | 553 | if (cio_update_schib(sch)) { |
554 | cdev->private->flags.donotify = 0; | ||
555 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
556 | return; | ||
557 | } | ||
556 | /* Update lpm with verified path mask. */ | 558 | /* Update lpm with verified path mask. */ |
557 | sch->lpm = sch->vpm; | 559 | sch->lpm = sch->vpm; |
558 | /* Repeat path verification? */ | 560 | /* Repeat path verification? */ |
@@ -611,8 +613,6 @@ ccw_device_online(struct ccw_device *cdev) | |||
611 | (cdev->private->state != DEV_STATE_BOXED)) | 613 | (cdev->private->state != DEV_STATE_BOXED)) |
612 | return -EINVAL; | 614 | return -EINVAL; |
613 | sch = to_subchannel(cdev->dev.parent); | 615 | sch = to_subchannel(cdev->dev.parent); |
614 | if (css_init_done && !get_device(&cdev->dev)) | ||
615 | return -ENODEV; | ||
616 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); | 616 | ret = cio_enable_subchannel(sch, (u32)(addr_t)sch); |
617 | if (ret != 0) { | 617 | if (ret != 0) { |
618 | /* Couldn't enable the subchannel for i/o. Sick device. */ | 618 | /* Couldn't enable the subchannel for i/o. Sick device. */ |
@@ -672,7 +672,7 @@ ccw_device_offline(struct ccw_device *cdev) | |||
672 | return 0; | 672 | return 0; |
673 | } | 673 | } |
674 | sch = to_subchannel(cdev->dev.parent); | 674 | sch = to_subchannel(cdev->dev.parent); |
675 | if (stsch(sch->schid, &sch->schib) || !sch->schib.pmcw.dnv) | 675 | if (cio_update_schib(sch)) |
676 | return -ENODEV; | 676 | return -ENODEV; |
677 | if (scsw_actl(&sch->schib.scsw) != 0) | 677 | if (scsw_actl(&sch->schib.scsw) != 0) |
678 | return -EBUSY; | 678 | return -EBUSY; |
@@ -750,7 +750,10 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
750 | * Since we might not just be coming from an interrupt from the | 750 | * Since we might not just be coming from an interrupt from the |
751 | * subchannel we have to update the schib. | 751 | * subchannel we have to update the schib. |
752 | */ | 752 | */ |
753 | stsch(sch->schid, &sch->schib); | 753 | if (cio_update_schib(sch)) { |
754 | ccw_device_verify_done(cdev, -ENODEV); | ||
755 | return; | ||
756 | } | ||
754 | 757 | ||
755 | if (scsw_actl(&sch->schib.scsw) != 0 || | 758 | if (scsw_actl(&sch->schib.scsw) != 0 || |
756 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || | 759 | (scsw_stctl(&sch->schib.scsw) & SCSW_STCTL_STATUS_PEND) || |
@@ -1016,20 +1019,21 @@ void ccw_device_trigger_reprobe(struct ccw_device *cdev) | |||
1016 | 1019 | ||
1017 | sch = to_subchannel(cdev->dev.parent); | 1020 | sch = to_subchannel(cdev->dev.parent); |
1018 | /* Update some values. */ | 1021 | /* Update some values. */ |
1019 | if (stsch(sch->schid, &sch->schib)) | 1022 | if (cio_update_schib(sch)) |
1020 | return; | ||
1021 | if (!sch->schib.pmcw.dnv) | ||
1022 | return; | 1023 | return; |
1023 | /* | 1024 | /* |
1024 | * The pim, pam, pom values may not be accurate, but they are the best | 1025 | * The pim, pam, pom values may not be accurate, but they are the best |
1025 | * we have before performing device selection :/ | 1026 | * we have before performing device selection :/ |
1026 | */ | 1027 | */ |
1027 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 1028 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
1028 | /* Re-set some bits in the pmcw that were lost. */ | 1029 | /* |
1029 | sch->schib.pmcw.csense = 1; | 1030 | * Use the initial configuration since we can't be shure that the old |
1030 | sch->schib.pmcw.ena = 0; | 1031 | * paths are valid. |
1031 | if ((sch->lpm & (sch->lpm - 1)) != 0) | 1032 | */ |
1032 | sch->schib.pmcw.mp = 1; | 1033 | io_subchannel_init_config(sch); |
1034 | if (cio_commit_config(sch)) | ||
1035 | return; | ||
1036 | |||
1033 | /* We should also udate ssd info, but this has to wait. */ | 1037 | /* We should also udate ssd info, but this has to wait. */ |
1034 | /* Check if this is another device which appeared on the same sch. */ | 1038 | /* Check if this is another device which appeared on the same sch. */ |
1035 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { | 1039 | if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) { |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 86bc94eb607f..fc5ca1dd52b3 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -504,7 +504,7 @@ ccw_device_verify_start(struct ccw_device *cdev) | |||
504 | sch->vpm = 0; | 504 | sch->vpm = 0; |
505 | 505 | ||
506 | /* Get current pam. */ | 506 | /* Get current pam. */ |
507 | if (stsch(sch->schid, &sch->schib)) { | 507 | if (cio_update_schib(sch)) { |
508 | ccw_device_verify_done(cdev, -ENODEV); | 508 | ccw_device_verify_done(cdev, -ENODEV); |
509 | return; | 509 | return; |
510 | } | 510 | } |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 1b03c5423be2..5814dbee2410 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -56,7 +56,8 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
56 | struct subchannel *sch; | 56 | struct subchannel *sch; |
57 | 57 | ||
58 | sch = to_subchannel(cdev->dev.parent); | 58 | sch = to_subchannel(cdev->dev.parent); |
59 | stsch (sch->schid, &sch->schib); | 59 | if (cio_update_schib(sch)) |
60 | goto doverify; | ||
60 | 61 | ||
61 | CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " | 62 | CIO_MSG_EVENT(0, "%s(0.%x.%04x) - path(s) %02x are " |
62 | "not operational \n", __func__, | 63 | "not operational \n", __func__, |
@@ -64,6 +65,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
64 | sch->schib.pmcw.pnom); | 65 | sch->schib.pmcw.pnom); |
65 | 66 | ||
66 | sch->lpm &= ~sch->schib.pmcw.pnom; | 67 | sch->lpm &= ~sch->schib.pmcw.pnom; |
68 | doverify: | ||
67 | cdev->private->flags.doverify = 1; | 69 | cdev->private->flags.doverify = 1; |
68 | } | 70 | } |
69 | 71 | ||
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index e3ea1d5f2810..42f2b09631b6 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -10,10 +10,10 @@ | |||
10 | 10 | ||
11 | #include <asm/page.h> | 11 | #include <asm/page.h> |
12 | #include <asm/schid.h> | 12 | #include <asm/schid.h> |
13 | #include <asm/debug.h> | ||
13 | #include "chsc.h" | 14 | #include "chsc.h" |
14 | 15 | ||
15 | #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ | 16 | #define QDIO_BUSY_BIT_PATIENCE 100 /* 100 microseconds */ |
16 | #define QDIO_BUSY_BIT_GIVE_UP 2000000 /* 2 seconds = eternity */ | ||
17 | #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ | 17 | #define QDIO_INPUT_THRESHOLD 500 /* 500 microseconds */ |
18 | 18 | ||
19 | /* | 19 | /* |
@@ -111,12 +111,12 @@ static inline int do_sqbs(u64 token, unsigned char state, int queue, | |||
111 | } | 111 | } |
112 | 112 | ||
113 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, | 113 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
114 | int *start, int *count) | 114 | int *start, int *count, int ack) |
115 | { | 115 | { |
116 | register unsigned long _ccq asm ("0") = *count; | 116 | register unsigned long _ccq asm ("0") = *count; |
117 | register unsigned long _token asm ("1") = token; | 117 | register unsigned long _token asm ("1") = token; |
118 | unsigned long _queuestart = ((unsigned long)queue << 32) | *start; | 118 | unsigned long _queuestart = ((unsigned long)queue << 32) | *start; |
119 | unsigned long _state = 0; | 119 | unsigned long _state = (unsigned long)ack << 63; |
120 | 120 | ||
121 | asm volatile( | 121 | asm volatile( |
122 | " .insn rrf,0xB99c0000,%1,%2,0,0" | 122 | " .insn rrf,0xB99c0000,%1,%2,0,0" |
@@ -133,7 +133,7 @@ static inline int do_eqbs(u64 token, unsigned char *state, int queue, | |||
133 | static inline int do_sqbs(u64 token, unsigned char state, int queue, | 133 | static inline int do_sqbs(u64 token, unsigned char state, int queue, |
134 | int *start, int *count) { return 0; } | 134 | int *start, int *count) { return 0; } |
135 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, | 135 | static inline int do_eqbs(u64 token, unsigned char *state, int queue, |
136 | int *start, int *count) { return 0; } | 136 | int *start, int *count, int ack) { return 0; } |
137 | #endif /* CONFIG_64BIT */ | 137 | #endif /* CONFIG_64BIT */ |
138 | 138 | ||
139 | struct qdio_irq; | 139 | struct qdio_irq; |
@@ -186,20 +186,14 @@ struct qdio_input_q { | |||
186 | /* input buffer acknowledgement flag */ | 186 | /* input buffer acknowledgement flag */ |
187 | int polling; | 187 | int polling; |
188 | 188 | ||
189 | /* how much sbals are acknowledged with qebsm */ | ||
190 | int ack_count; | ||
191 | |||
189 | /* last time of noticing incoming data */ | 192 | /* last time of noticing incoming data */ |
190 | u64 timestamp; | 193 | u64 timestamp; |
191 | |||
192 | /* lock for clearing the acknowledgement */ | ||
193 | spinlock_t lock; | ||
194 | }; | 194 | }; |
195 | 195 | ||
196 | struct qdio_output_q { | 196 | struct qdio_output_q { |
197 | /* failed siga-w attempts*/ | ||
198 | atomic_t busy_siga_counter; | ||
199 | |||
200 | /* start time of busy condition */ | ||
201 | u64 timestamp; | ||
202 | |||
203 | /* PCIs are enabled for the queue */ | 197 | /* PCIs are enabled for the queue */ |
204 | int pci_out_enabled; | 198 | int pci_out_enabled; |
205 | 199 | ||
@@ -250,6 +244,7 @@ struct qdio_q { | |||
250 | 244 | ||
251 | struct qdio_irq *irq_ptr; | 245 | struct qdio_irq *irq_ptr; |
252 | struct tasklet_struct tasklet; | 246 | struct tasklet_struct tasklet; |
247 | spinlock_t lock; | ||
253 | 248 | ||
254 | /* error condition during a data transfer */ | 249 | /* error condition during a data transfer */ |
255 | unsigned int qdio_error; | 250 | unsigned int qdio_error; |
@@ -300,11 +295,13 @@ struct qdio_irq { | |||
300 | struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; | 295 | struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; |
301 | struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; | 296 | struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; |
302 | 297 | ||
298 | debug_info_t *debug_area; | ||
303 | struct mutex setup_mutex; | 299 | struct mutex setup_mutex; |
304 | }; | 300 | }; |
305 | 301 | ||
306 | /* helper functions */ | 302 | /* helper functions */ |
307 | #define queue_type(q) q->irq_ptr->qib.qfmt | 303 | #define queue_type(q) q->irq_ptr->qib.qfmt |
304 | #define SCH_NO(q) (q->irq_ptr->schid.sch_no) | ||
308 | 305 | ||
309 | #define is_thinint_irq(irq) \ | 306 | #define is_thinint_irq(irq) \ |
310 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ | 307 | (irq->qib.qfmt == QDIO_IQDIO_QFMT || \ |
@@ -348,10 +345,13 @@ static inline unsigned long long get_usecs(void) | |||
348 | ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) | 345 | ((bufnr + 1) & QDIO_MAX_BUFFERS_MASK) |
349 | #define add_buf(bufnr, inc) \ | 346 | #define add_buf(bufnr, inc) \ |
350 | ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) | 347 | ((bufnr + inc) & QDIO_MAX_BUFFERS_MASK) |
348 | #define sub_buf(bufnr, dec) \ | ||
349 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) | ||
351 | 350 | ||
352 | /* prototypes for thin interrupt */ | 351 | /* prototypes for thin interrupt */ |
353 | void qdio_sync_after_thinint(struct qdio_q *q); | 352 | void qdio_sync_after_thinint(struct qdio_q *q); |
354 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state); | 353 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, |
354 | int auto_ack); | ||
355 | void qdio_check_outbound_after_thinint(struct qdio_q *q); | 355 | void qdio_check_outbound_after_thinint(struct qdio_q *q); |
356 | int qdio_inbound_q_moved(struct qdio_q *q); | 356 | int qdio_inbound_q_moved(struct qdio_q *q); |
357 | void qdio_kick_inbound_handler(struct qdio_q *q); | 357 | void qdio_kick_inbound_handler(struct qdio_q *q); |
@@ -378,10 +378,15 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
378 | int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, | 378 | int qdio_allocate_qs(struct qdio_irq *irq_ptr, int nr_input_qs, |
379 | int nr_output_qs); | 379 | int nr_output_qs); |
380 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); | 380 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr); |
381 | int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, | ||
382 | struct subchannel_id *schid, | ||
383 | struct qdio_ssqd_desc *data); | ||
381 | int qdio_setup_irq(struct qdio_initialize *init_data); | 384 | int qdio_setup_irq(struct qdio_initialize *init_data); |
382 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | 385 | void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, |
383 | struct ccw_device *cdev); | 386 | struct ccw_device *cdev); |
384 | void qdio_release_memory(struct qdio_irq *irq_ptr); | 387 | void qdio_release_memory(struct qdio_irq *irq_ptr); |
388 | int qdio_setup_create_sysfs(struct ccw_device *cdev); | ||
389 | void qdio_setup_destroy_sysfs(struct ccw_device *cdev); | ||
385 | int qdio_setup_init(void); | 390 | int qdio_setup_init(void); |
386 | void qdio_setup_exit(void); | 391 | void qdio_setup_exit(void); |
387 | 392 | ||
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index f05590355be8..f8a3b6967f69 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -14,7 +14,7 @@ | |||
14 | #include "qdio.h" | 14 | #include "qdio.h" |
15 | 15 | ||
16 | debug_info_t *qdio_dbf_setup; | 16 | debug_info_t *qdio_dbf_setup; |
17 | debug_info_t *qdio_dbf_trace; | 17 | debug_info_t *qdio_dbf_error; |
18 | 18 | ||
19 | static struct dentry *debugfs_root; | 19 | static struct dentry *debugfs_root; |
20 | #define MAX_DEBUGFS_QUEUES 32 | 20 | #define MAX_DEBUGFS_QUEUES 32 |
@@ -22,59 +22,33 @@ static struct dentry *debugfs_queues[MAX_DEBUGFS_QUEUES] = { NULL }; | |||
22 | static DEFINE_MUTEX(debugfs_mutex); | 22 | static DEFINE_MUTEX(debugfs_mutex); |
23 | #define QDIO_DEBUGFS_NAME_LEN 40 | 23 | #define QDIO_DEBUGFS_NAME_LEN 40 |
24 | 24 | ||
25 | void qdio_allocate_do_dbf(struct qdio_initialize *init_data) | 25 | void qdio_allocate_dbf(struct qdio_initialize *init_data, |
26 | struct qdio_irq *irq_ptr) | ||
26 | { | 27 | { |
27 | char dbf_text[20]; | 28 | char text[20]; |
28 | 29 | ||
29 | sprintf(dbf_text, "qfmt:%x", init_data->q_format); | 30 | DBF_EVENT("qfmt:%1d", init_data->q_format); |
30 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 31 | DBF_HEX(init_data->adapter_name, 8); |
31 | QDIO_DBF_HEX0(0, setup, init_data->adapter_name, 8); | 32 | DBF_EVENT("qpff%4x", init_data->qib_param_field_format); |
32 | sprintf(dbf_text, "qpff%4x", init_data->qib_param_field_format); | 33 | DBF_HEX(&init_data->qib_param_field, sizeof(void *)); |
33 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 34 | DBF_HEX(&init_data->input_slib_elements, sizeof(void *)); |
34 | QDIO_DBF_HEX0(0, setup, &init_data->qib_param_field, sizeof(void *)); | 35 | DBF_HEX(&init_data->output_slib_elements, sizeof(void *)); |
35 | QDIO_DBF_HEX0(0, setup, &init_data->input_slib_elements, sizeof(void *)); | 36 | DBF_EVENT("niq:%1d noq:%1d", init_data->no_input_qs, |
36 | QDIO_DBF_HEX0(0, setup, &init_data->output_slib_elements, sizeof(void *)); | 37 | init_data->no_output_qs); |
37 | sprintf(dbf_text, "niq:%4x", init_data->no_input_qs); | 38 | DBF_HEX(&init_data->input_handler, sizeof(void *)); |
38 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 39 | DBF_HEX(&init_data->output_handler, sizeof(void *)); |
39 | sprintf(dbf_text, "noq:%4x", init_data->no_output_qs); | 40 | DBF_HEX(&init_data->int_parm, sizeof(long)); |
40 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 41 | DBF_HEX(&init_data->flags, sizeof(long)); |
41 | QDIO_DBF_HEX0(0, setup, &init_data->input_handler, sizeof(void *)); | 42 | DBF_HEX(&init_data->input_sbal_addr_array, sizeof(void *)); |
42 | QDIO_DBF_HEX0(0, setup, &init_data->output_handler, sizeof(void *)); | 43 | DBF_HEX(&init_data->output_sbal_addr_array, sizeof(void *)); |
43 | QDIO_DBF_HEX0(0, setup, &init_data->int_parm, sizeof(long)); | 44 | DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr); |
44 | QDIO_DBF_HEX0(0, setup, &init_data->flags, sizeof(long)); | 45 | |
45 | QDIO_DBF_HEX0(0, setup, &init_data->input_sbal_addr_array, sizeof(void *)); | 46 | /* allocate trace view for the interface */ |
46 | QDIO_DBF_HEX0(0, setup, &init_data->output_sbal_addr_array, sizeof(void *)); | 47 | snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev)); |
47 | } | 48 | irq_ptr->debug_area = debug_register(text, 2, 1, 16); |
48 | 49 | debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view); | |
49 | static void qdio_unregister_dbf_views(void) | 50 | debug_set_level(irq_ptr->debug_area, DBF_WARN); |
50 | { | 51 | DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created"); |
51 | if (qdio_dbf_setup) | ||
52 | debug_unregister(qdio_dbf_setup); | ||
53 | if (qdio_dbf_trace) | ||
54 | debug_unregister(qdio_dbf_trace); | ||
55 | } | ||
56 | |||
57 | static int qdio_register_dbf_views(void) | ||
58 | { | ||
59 | qdio_dbf_setup = debug_register("qdio_setup", QDIO_DBF_SETUP_PAGES, | ||
60 | QDIO_DBF_SETUP_NR_AREAS, | ||
61 | QDIO_DBF_SETUP_LEN); | ||
62 | if (!qdio_dbf_setup) | ||
63 | goto oom; | ||
64 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); | ||
65 | debug_set_level(qdio_dbf_setup, QDIO_DBF_SETUP_LEVEL); | ||
66 | |||
67 | qdio_dbf_trace = debug_register("qdio_trace", QDIO_DBF_TRACE_PAGES, | ||
68 | QDIO_DBF_TRACE_NR_AREAS, | ||
69 | QDIO_DBF_TRACE_LEN); | ||
70 | if (!qdio_dbf_trace) | ||
71 | goto oom; | ||
72 | debug_register_view(qdio_dbf_trace, &debug_hex_ascii_view); | ||
73 | debug_set_level(qdio_dbf_trace, QDIO_DBF_TRACE_LEVEL); | ||
74 | return 0; | ||
75 | oom: | ||
76 | qdio_unregister_dbf_views(); | ||
77 | return -ENOMEM; | ||
78 | } | 52 | } |
79 | 53 | ||
80 | static int qstat_show(struct seq_file *m, void *v) | 54 | static int qstat_show(struct seq_file *m, void *v) |
@@ -86,16 +60,18 @@ static int qstat_show(struct seq_file *m, void *v) | |||
86 | if (!q) | 60 | if (!q) |
87 | return 0; | 61 | return 0; |
88 | 62 | ||
89 | seq_printf(m, "device state indicator: %d\n", *q->irq_ptr->dsci); | 63 | seq_printf(m, "device state indicator: %d\n", *(u32 *)q->irq_ptr->dsci); |
90 | seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); | 64 | seq_printf(m, "nr_used: %d\n", atomic_read(&q->nr_buf_used)); |
91 | seq_printf(m, "ftc: %d\n", q->first_to_check); | 65 | seq_printf(m, "ftc: %d\n", q->first_to_check); |
92 | seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); | 66 | seq_printf(m, "last_move_ftc: %d\n", q->last_move_ftc); |
93 | seq_printf(m, "polling: %d\n", q->u.in.polling); | 67 | seq_printf(m, "polling: %d\n", q->u.in.polling); |
68 | seq_printf(m, "ack count: %d\n", q->u.in.ack_count); | ||
94 | seq_printf(m, "slsb buffer states:\n"); | 69 | seq_printf(m, "slsb buffer states:\n"); |
70 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | ||
95 | 71 | ||
96 | qdio_siga_sync_q(q); | 72 | qdio_siga_sync_q(q); |
97 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { | 73 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { |
98 | get_buf_state(q, i, &state); | 74 | get_buf_state(q, i, &state, 0); |
99 | switch (state) { | 75 | switch (state) { |
100 | case SLSB_P_INPUT_NOT_INIT: | 76 | case SLSB_P_INPUT_NOT_INIT: |
101 | case SLSB_P_OUTPUT_NOT_INIT: | 77 | case SLSB_P_OUTPUT_NOT_INIT: |
@@ -127,6 +103,7 @@ static int qstat_show(struct seq_file *m, void *v) | |||
127 | seq_printf(m, "\n"); | 103 | seq_printf(m, "\n"); |
128 | } | 104 | } |
129 | seq_printf(m, "\n"); | 105 | seq_printf(m, "\n"); |
106 | seq_printf(m, "|64 |72 |80 |88 |96 |104 |112 | 127|\n"); | ||
130 | return 0; | 107 | return 0; |
131 | } | 108 | } |
132 | 109 | ||
@@ -223,11 +200,24 @@ void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, struct ccw_device *cd | |||
223 | int __init qdio_debug_init(void) | 200 | int __init qdio_debug_init(void) |
224 | { | 201 | { |
225 | debugfs_root = debugfs_create_dir("qdio_queues", NULL); | 202 | debugfs_root = debugfs_create_dir("qdio_queues", NULL); |
226 | return qdio_register_dbf_views(); | 203 | |
204 | qdio_dbf_setup = debug_register("qdio_setup", 16, 1, 16); | ||
205 | debug_register_view(qdio_dbf_setup, &debug_hex_ascii_view); | ||
206 | debug_set_level(qdio_dbf_setup, DBF_INFO); | ||
207 | DBF_EVENT("dbf created\n"); | ||
208 | |||
209 | qdio_dbf_error = debug_register("qdio_error", 4, 1, 16); | ||
210 | debug_register_view(qdio_dbf_error, &debug_hex_ascii_view); | ||
211 | debug_set_level(qdio_dbf_error, DBF_INFO); | ||
212 | DBF_ERROR("dbf created\n"); | ||
213 | return 0; | ||
227 | } | 214 | } |
228 | 215 | ||
229 | void qdio_debug_exit(void) | 216 | void qdio_debug_exit(void) |
230 | { | 217 | { |
231 | debugfs_remove(debugfs_root); | 218 | debugfs_remove(debugfs_root); |
232 | qdio_unregister_dbf_views(); | 219 | if (qdio_dbf_setup) |
220 | debug_unregister(qdio_dbf_setup); | ||
221 | if (qdio_dbf_error) | ||
222 | debug_unregister(qdio_dbf_error); | ||
233 | } | 223 | } |
diff --git a/drivers/s390/cio/qdio_debug.h b/drivers/s390/cio/qdio_debug.h index 5a4d85b829ad..5d70bd162ae9 100644 --- a/drivers/s390/cio/qdio_debug.h +++ b/drivers/s390/cio/qdio_debug.h | |||
@@ -12,80 +12,72 @@ | |||
12 | #include <asm/qdio.h> | 12 | #include <asm/qdio.h> |
13 | #include "qdio.h" | 13 | #include "qdio.h" |
14 | 14 | ||
15 | #define QDIO_DBF_HEX(ex, name, level, addr, len) \ | 15 | /* that gives us 15 characters in the text event views */ |
16 | #define QDIO_DBF_LEN 16 | ||
17 | |||
18 | extern debug_info_t *qdio_dbf_setup; | ||
19 | extern debug_info_t *qdio_dbf_error; | ||
20 | |||
21 | /* sort out low debug levels early to avoid wasted sprints */ | ||
22 | static inline int qdio_dbf_passes(debug_info_t *dbf_grp, int level) | ||
23 | { | ||
24 | return (level <= dbf_grp->level); | ||
25 | } | ||
26 | |||
27 | #define DBF_ERR 3 /* error conditions */ | ||
28 | #define DBF_WARN 4 /* warning conditions */ | ||
29 | #define DBF_INFO 6 /* informational */ | ||
30 | |||
31 | #undef DBF_EVENT | ||
32 | #undef DBF_ERROR | ||
33 | #undef DBF_DEV_EVENT | ||
34 | |||
35 | #define DBF_EVENT(text...) \ | ||
16 | do { \ | 36 | do { \ |
17 | if (ex) \ | 37 | char debug_buffer[QDIO_DBF_LEN]; \ |
18 | debug_exception(qdio_dbf_##name, level, (void *)(addr), len); \ | 38 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ |
19 | else \ | 39 | debug_text_event(qdio_dbf_setup, DBF_ERR, debug_buffer); \ |
20 | debug_event(qdio_dbf_##name, level, (void *)(addr), len); \ | ||
21 | } while (0) | 40 | } while (0) |
22 | #define QDIO_DBF_TEXT(ex, name, level, text) \ | 41 | |
42 | #define DBF_HEX(addr, len) \ | ||
23 | do { \ | 43 | do { \ |
24 | if (ex) \ | 44 | debug_event(qdio_dbf_setup, DBF_ERR, (void*)(addr), len); \ |
25 | debug_text_exception(qdio_dbf_##name, level, text); \ | ||
26 | else \ | ||
27 | debug_text_event(qdio_dbf_##name, level, text); \ | ||
28 | } while (0) | 45 | } while (0) |
29 | 46 | ||
30 | #define QDIO_DBF_HEX0(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 0, addr, len) | 47 | #define DBF_ERROR(text...) \ |
31 | #define QDIO_DBF_HEX1(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 1, addr, len) | 48 | do { \ |
32 | #define QDIO_DBF_HEX2(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 2, addr, len) | 49 | char debug_buffer[QDIO_DBF_LEN]; \ |
33 | 50 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ | |
34 | #ifdef CONFIG_QDIO_DEBUG | 51 | debug_text_event(qdio_dbf_error, DBF_ERR, debug_buffer); \ |
35 | #define QDIO_DBF_HEX3(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 3, addr, len) | 52 | } while (0) |
36 | #define QDIO_DBF_HEX4(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 4, addr, len) | ||
37 | #define QDIO_DBF_HEX5(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 5, addr, len) | ||
38 | #define QDIO_DBF_HEX6(ex, name, addr, len) QDIO_DBF_HEX(ex, name, 6, addr, len) | ||
39 | #else | ||
40 | #define QDIO_DBF_HEX3(ex, name, addr, len) do {} while (0) | ||
41 | #define QDIO_DBF_HEX4(ex, name, addr, len) do {} while (0) | ||
42 | #define QDIO_DBF_HEX5(ex, name, addr, len) do {} while (0) | ||
43 | #define QDIO_DBF_HEX6(ex, name, addr, len) do {} while (0) | ||
44 | #endif /* CONFIG_QDIO_DEBUG */ | ||
45 | |||
46 | #define QDIO_DBF_TEXT0(ex, name, text) QDIO_DBF_TEXT(ex, name, 0, text) | ||
47 | #define QDIO_DBF_TEXT1(ex, name, text) QDIO_DBF_TEXT(ex, name, 1, text) | ||
48 | #define QDIO_DBF_TEXT2(ex, name, text) QDIO_DBF_TEXT(ex, name, 2, text) | ||
49 | |||
50 | #ifdef CONFIG_QDIO_DEBUG | ||
51 | #define QDIO_DBF_TEXT3(ex, name, text) QDIO_DBF_TEXT(ex, name, 3, text) | ||
52 | #define QDIO_DBF_TEXT4(ex, name, text) QDIO_DBF_TEXT(ex, name, 4, text) | ||
53 | #define QDIO_DBF_TEXT5(ex, name, text) QDIO_DBF_TEXT(ex, name, 5, text) | ||
54 | #define QDIO_DBF_TEXT6(ex, name, text) QDIO_DBF_TEXT(ex, name, 6, text) | ||
55 | #else | ||
56 | #define QDIO_DBF_TEXT3(ex, name, text) do {} while (0) | ||
57 | #define QDIO_DBF_TEXT4(ex, name, text) do {} while (0) | ||
58 | #define QDIO_DBF_TEXT5(ex, name, text) do {} while (0) | ||
59 | #define QDIO_DBF_TEXT6(ex, name, text) do {} while (0) | ||
60 | #endif /* CONFIG_QDIO_DEBUG */ | ||
61 | 53 | ||
62 | /* s390dbf views */ | 54 | #define DBF_ERROR_HEX(addr, len) \ |
63 | #define QDIO_DBF_SETUP_LEN 8 | 55 | do { \ |
64 | #define QDIO_DBF_SETUP_PAGES 8 | 56 | debug_event(qdio_dbf_error, DBF_ERR, (void*)(addr), len); \ |
65 | #define QDIO_DBF_SETUP_NR_AREAS 1 | 57 | } while (0) |
66 | 58 | ||
67 | #define QDIO_DBF_TRACE_LEN 8 | ||
68 | #define QDIO_DBF_TRACE_NR_AREAS 2 | ||
69 | 59 | ||
70 | #ifdef CONFIG_QDIO_DEBUG | 60 | #define DBF_DEV_EVENT(level, device, text...) \ |
71 | #define QDIO_DBF_TRACE_PAGES 32 | 61 | do { \ |
72 | #define QDIO_DBF_SETUP_LEVEL 6 | 62 | char debug_buffer[QDIO_DBF_LEN]; \ |
73 | #define QDIO_DBF_TRACE_LEVEL 4 | 63 | if (qdio_dbf_passes(device->debug_area, level)) { \ |
74 | #else /* !CONFIG_QDIO_DEBUG */ | 64 | snprintf(debug_buffer, QDIO_DBF_LEN, text); \ |
75 | #define QDIO_DBF_TRACE_PAGES 8 | 65 | debug_text_event(device->debug_area, level, debug_buffer); \ |
76 | #define QDIO_DBF_SETUP_LEVEL 2 | 66 | } \ |
77 | #define QDIO_DBF_TRACE_LEVEL 2 | 67 | } while (0) |
78 | #endif /* CONFIG_QDIO_DEBUG */ | ||
79 | 68 | ||
80 | extern debug_info_t *qdio_dbf_setup; | 69 | #define DBF_DEV_HEX(level, device, addr, len) \ |
81 | extern debug_info_t *qdio_dbf_trace; | 70 | do { \ |
71 | debug_event(device->debug_area, level, (void*)(addr), len); \ | ||
72 | } while (0) | ||
82 | 73 | ||
83 | void qdio_allocate_do_dbf(struct qdio_initialize *init_data); | 74 | void qdio_allocate_dbf(struct qdio_initialize *init_data, |
84 | void debug_print_bstat(struct qdio_q *q); | 75 | struct qdio_irq *irq_ptr); |
85 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, | 76 | void qdio_setup_debug_entries(struct qdio_irq *irq_ptr, |
86 | struct ccw_device *cdev); | 77 | struct ccw_device *cdev); |
87 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, | 78 | void qdio_shutdown_debug_entries(struct qdio_irq *irq_ptr, |
88 | struct ccw_device *cdev); | 79 | struct ccw_device *cdev); |
89 | int qdio_debug_init(void); | 80 | int qdio_debug_init(void); |
90 | void qdio_debug_exit(void); | 81 | void qdio_debug_exit(void); |
82 | |||
91 | #endif | 83 | #endif |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 7c8659151993..744f928a59ea 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -74,7 +74,7 @@ static inline int do_siga_input(struct subchannel_id schid, unsigned int mask) | |||
74 | * Note: For IQDC unicast queues only the highest priority queue is processed. | 74 | * Note: For IQDC unicast queues only the highest priority queue is processed. |
75 | */ | 75 | */ |
76 | static inline int do_siga_output(unsigned long schid, unsigned long mask, | 76 | static inline int do_siga_output(unsigned long schid, unsigned long mask, |
77 | u32 *bb, unsigned int fc) | 77 | unsigned int *bb, unsigned int fc) |
78 | { | 78 | { |
79 | register unsigned long __fc asm("0") = fc; | 79 | register unsigned long __fc asm("0") = fc; |
80 | register unsigned long __schid asm("1") = schid; | 80 | register unsigned long __schid asm("1") = schid; |
@@ -95,8 +95,6 @@ static inline int do_siga_output(unsigned long schid, unsigned long mask, | |||
95 | 95 | ||
96 | static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | 96 | static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) |
97 | { | 97 | { |
98 | char dbf_text[15]; | ||
99 | |||
100 | /* all done or next buffer state different */ | 98 | /* all done or next buffer state different */ |
101 | if (ccq == 0 || ccq == 32) | 99 | if (ccq == 0 || ccq == 32) |
102 | return 0; | 100 | return 0; |
@@ -104,8 +102,7 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
104 | if (ccq == 96 || ccq == 97) | 102 | if (ccq == 96 || ccq == 97) |
105 | return 1; | 103 | return 1; |
106 | /* notify devices immediately */ | 104 | /* notify devices immediately */ |
107 | sprintf(dbf_text, "%d", ccq); | 105 | DBF_ERROR("%4x ccq:%3d", SCH_NO(q), ccq); |
108 | QDIO_DBF_TEXT2(1, trace, dbf_text); | ||
109 | return -EIO; | 106 | return -EIO; |
110 | } | 107 | } |
111 | 108 | ||
@@ -115,41 +112,45 @@ static inline int qdio_check_ccq(struct qdio_q *q, unsigned int ccq) | |||
115 | * @state: state of the extracted buffers | 112 | * @state: state of the extracted buffers |
116 | * @start: buffer number to start at | 113 | * @start: buffer number to start at |
117 | * @count: count of buffers to examine | 114 | * @count: count of buffers to examine |
115 | * @auto_ack: automatically acknowledge buffers | ||
118 | * | 116 | * |
119 | * Returns the number of successfull extracted equal buffer states. | 117 | * Returns the number of successfull extracted equal buffer states. |
120 | * Stops processing if a state is different from the last buffers state. | 118 | * Stops processing if a state is different from the last buffers state. |
121 | */ | 119 | */ |
122 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, | 120 | static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state, |
123 | int start, int count) | 121 | int start, int count, int auto_ack) |
124 | { | 122 | { |
125 | unsigned int ccq = 0; | 123 | unsigned int ccq = 0; |
126 | int tmp_count = count, tmp_start = start; | 124 | int tmp_count = count, tmp_start = start; |
127 | int nr = q->nr; | 125 | int nr = q->nr; |
128 | int rc; | 126 | int rc; |
129 | char dbf_text[15]; | ||
130 | 127 | ||
131 | BUG_ON(!q->irq_ptr->sch_token); | 128 | BUG_ON(!q->irq_ptr->sch_token); |
129 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); | ||
132 | 130 | ||
133 | if (!q->is_input_q) | 131 | if (!q->is_input_q) |
134 | nr += q->irq_ptr->nr_input_qs; | 132 | nr += q->irq_ptr->nr_input_qs; |
135 | again: | 133 | again: |
136 | ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); | 134 | ccq = do_eqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count, |
135 | auto_ack); | ||
137 | rc = qdio_check_ccq(q, ccq); | 136 | rc = qdio_check_ccq(q, ccq); |
138 | 137 | ||
139 | /* At least one buffer was processed, return and extract the remaining | 138 | /* At least one buffer was processed, return and extract the remaining |
140 | * buffers later. | 139 | * buffers later. |
141 | */ | 140 | */ |
142 | if ((ccq == 96) && (count != tmp_count)) | 141 | if ((ccq == 96) && (count != tmp_count)) { |
142 | qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); | ||
143 | return (count - tmp_count); | 143 | return (count - tmp_count); |
144 | } | ||
145 | |||
144 | if (rc == 1) { | 146 | if (rc == 1) { |
145 | QDIO_DBF_TEXT5(1, trace, "eqAGAIN"); | 147 | DBF_DEV_EVENT(DBF_WARN, q->irq_ptr, "EQBS again:%2d", ccq); |
146 | goto again; | 148 | goto again; |
147 | } | 149 | } |
148 | 150 | ||
149 | if (rc < 0) { | 151 | if (rc < 0) { |
150 | QDIO_DBF_TEXT2(1, trace, "eqberr"); | 152 | DBF_ERROR("%4x EQBS ERROR", SCH_NO(q)); |
151 | sprintf(dbf_text, "%2x,%2x,%d,%d", count, tmp_count, ccq, nr); | 153 | DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); |
152 | QDIO_DBF_TEXT2(1, trace, dbf_text); | ||
153 | q->handler(q->irq_ptr->cdev, | 154 | q->handler(q->irq_ptr->cdev, |
154 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, | 155 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, |
155 | 0, -1, -1, q->irq_ptr->int_parm); | 156 | 0, -1, -1, q->irq_ptr->int_parm); |
@@ -176,9 +177,12 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start, | |||
176 | int tmp_count = count, tmp_start = start; | 177 | int tmp_count = count, tmp_start = start; |
177 | int nr = q->nr; | 178 | int nr = q->nr; |
178 | int rc; | 179 | int rc; |
179 | char dbf_text[15]; | 180 | |
181 | if (!count) | ||
182 | return 0; | ||
180 | 183 | ||
181 | BUG_ON(!q->irq_ptr->sch_token); | 184 | BUG_ON(!q->irq_ptr->sch_token); |
185 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); | ||
182 | 186 | ||
183 | if (!q->is_input_q) | 187 | if (!q->is_input_q) |
184 | nr += q->irq_ptr->nr_input_qs; | 188 | nr += q->irq_ptr->nr_input_qs; |
@@ -186,16 +190,13 @@ again: | |||
186 | ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); | 190 | ccq = do_sqbs(q->irq_ptr->sch_token, state, nr, &tmp_start, &tmp_count); |
187 | rc = qdio_check_ccq(q, ccq); | 191 | rc = qdio_check_ccq(q, ccq); |
188 | if (rc == 1) { | 192 | if (rc == 1) { |
189 | QDIO_DBF_TEXT5(1, trace, "sqAGAIN"); | 193 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); |
194 | qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); | ||
190 | goto again; | 195 | goto again; |
191 | } | 196 | } |
192 | if (rc < 0) { | 197 | if (rc < 0) { |
193 | QDIO_DBF_TEXT3(1, trace, "sqberr"); | 198 | DBF_ERROR("%4x SQBS ERROR", SCH_NO(q)); |
194 | sprintf(dbf_text, "%2x,%2x", count, tmp_count); | 199 | DBF_ERROR("%3d%3d%2d", count, tmp_count, nr); |
195 | QDIO_DBF_TEXT3(1, trace, dbf_text); | ||
196 | sprintf(dbf_text, "%d,%d", ccq, nr); | ||
197 | QDIO_DBF_TEXT3(1, trace, dbf_text); | ||
198 | |||
199 | q->handler(q->irq_ptr->cdev, | 200 | q->handler(q->irq_ptr->cdev, |
200 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, | 201 | QDIO_ERROR_ACTIVATE_CHECK_CONDITION, |
201 | 0, -1, -1, q->irq_ptr->int_parm); | 202 | 0, -1, -1, q->irq_ptr->int_parm); |
@@ -207,7 +208,8 @@ again: | |||
207 | 208 | ||
208 | /* returns number of examined buffers and their common state in *state */ | 209 | /* returns number of examined buffers and their common state in *state */ |
209 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | 210 | static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, |
210 | unsigned char *state, unsigned int count) | 211 | unsigned char *state, unsigned int count, |
212 | int auto_ack) | ||
211 | { | 213 | { |
212 | unsigned char __state = 0; | 214 | unsigned char __state = 0; |
213 | int i; | 215 | int i; |
@@ -216,7 +218,7 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
216 | BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); | 218 | BUG_ON(count > QDIO_MAX_BUFFERS_PER_Q); |
217 | 219 | ||
218 | if (is_qebsm(q)) | 220 | if (is_qebsm(q)) |
219 | return qdio_do_eqbs(q, state, bufnr, count); | 221 | return qdio_do_eqbs(q, state, bufnr, count, auto_ack); |
220 | 222 | ||
221 | for (i = 0; i < count; i++) { | 223 | for (i = 0; i < count; i++) { |
222 | if (!__state) | 224 | if (!__state) |
@@ -230,9 +232,9 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
230 | } | 232 | } |
231 | 233 | ||
232 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, | 234 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, |
233 | unsigned char *state) | 235 | unsigned char *state, int auto_ack) |
234 | { | 236 | { |
235 | return get_buf_states(q, bufnr, state, 1); | 237 | return get_buf_states(q, bufnr, state, 1, auto_ack); |
236 | } | 238 | } |
237 | 239 | ||
238 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ | 240 | /* wrap-around safe setting of slsb states, returns number of changed buffers */ |
@@ -282,14 +284,12 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | |||
282 | if (!need_siga_sync(q)) | 284 | if (!need_siga_sync(q)) |
283 | return 0; | 285 | return 0; |
284 | 286 | ||
287 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); | ||
285 | qdio_perf_stat_inc(&perf_stats.siga_sync); | 288 | qdio_perf_stat_inc(&perf_stats.siga_sync); |
286 | 289 | ||
287 | cc = do_siga_sync(q->irq_ptr->schid, output, input); | 290 | cc = do_siga_sync(q->irq_ptr->schid, output, input); |
288 | if (cc) { | 291 | if (cc) |
289 | QDIO_DBF_TEXT4(0, trace, "sigasync"); | 292 | DBF_ERROR("%4x SIGA-S:%2d", SCH_NO(q), cc); |
290 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
291 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | ||
292 | } | ||
293 | return cc; | 293 | return cc; |
294 | } | 294 | } |
295 | 295 | ||
@@ -311,50 +311,37 @@ static inline int qdio_siga_sync_all(struct qdio_q *q) | |||
311 | return qdio_siga_sync(q, ~0U, ~0U); | 311 | return qdio_siga_sync(q, ~0U, ~0U); |
312 | } | 312 | } |
313 | 313 | ||
314 | static inline int qdio_do_siga_output(struct qdio_q *q, unsigned int *busy_bit) | 314 | static int qdio_siga_output(struct qdio_q *q, unsigned int *busy_bit) |
315 | { | 315 | { |
316 | unsigned int fc = 0; | ||
317 | unsigned long schid; | 316 | unsigned long schid; |
317 | unsigned int fc = 0; | ||
318 | u64 start_time = 0; | ||
319 | int cc; | ||
318 | 320 | ||
319 | if (q->u.out.use_enh_siga) { | 321 | if (q->u.out.use_enh_siga) |
320 | fc = 3; | 322 | fc = 3; |
321 | } | 323 | |
322 | if (!is_qebsm(q)) | 324 | if (is_qebsm(q)) { |
323 | schid = *((u32 *)&q->irq_ptr->schid); | ||
324 | else { | ||
325 | schid = q->irq_ptr->sch_token; | 325 | schid = q->irq_ptr->sch_token; |
326 | fc |= 0x80; | 326 | fc |= 0x80; |
327 | } | 327 | } |
328 | return do_siga_output(schid, q->mask, busy_bit, fc); | 328 | else |
329 | } | 329 | schid = *((u32 *)&q->irq_ptr->schid); |
330 | |||
331 | static int qdio_siga_output(struct qdio_q *q) | ||
332 | { | ||
333 | int cc; | ||
334 | u32 busy_bit; | ||
335 | u64 start_time = 0; | ||
336 | char dbf_text[15]; | ||
337 | |||
338 | QDIO_DBF_TEXT5(0, trace, "sigaout"); | ||
339 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); | ||
340 | 330 | ||
341 | qdio_perf_stat_inc(&perf_stats.siga_out); | ||
342 | again: | 331 | again: |
343 | cc = qdio_do_siga_output(q, &busy_bit); | 332 | cc = do_siga_output(schid, q->mask, busy_bit, fc); |
344 | if (queue_type(q) == QDIO_IQDIO_QFMT && cc == 2 && busy_bit) { | ||
345 | sprintf(dbf_text, "bb%4x%2x", q->irq_ptr->schid.sch_no, q->nr); | ||
346 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
347 | 333 | ||
348 | if (!start_time) | 334 | /* hipersocket busy condition */ |
335 | if (*busy_bit) { | ||
336 | WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); | ||
337 | |||
338 | if (!start_time) { | ||
349 | start_time = get_usecs(); | 339 | start_time = get_usecs(); |
350 | else if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) | 340 | goto again; |
341 | } | ||
342 | if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) | ||
351 | goto again; | 343 | goto again; |
352 | } | 344 | } |
353 | |||
354 | if (cc == 2 && busy_bit) | ||
355 | cc |= QDIO_ERROR_SIGA_BUSY; | ||
356 | if (cc) | ||
357 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | ||
358 | return cc; | 345 | return cc; |
359 | } | 346 | } |
360 | 347 | ||
@@ -362,14 +349,12 @@ static inline int qdio_siga_input(struct qdio_q *q) | |||
362 | { | 349 | { |
363 | int cc; | 350 | int cc; |
364 | 351 | ||
365 | QDIO_DBF_TEXT4(0, trace, "sigain"); | 352 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); |
366 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
367 | |||
368 | qdio_perf_stat_inc(&perf_stats.siga_in); | 353 | qdio_perf_stat_inc(&perf_stats.siga_in); |
369 | 354 | ||
370 | cc = do_siga_input(q->irq_ptr->schid, q->mask); | 355 | cc = do_siga_input(q->irq_ptr->schid, q->mask); |
371 | if (cc) | 356 | if (cc) |
372 | QDIO_DBF_HEX3(0, trace, &cc, sizeof(int *)); | 357 | DBF_ERROR("%4x SIGA-R:%2d", SCH_NO(q), cc); |
373 | return cc; | 358 | return cc; |
374 | } | 359 | } |
375 | 360 | ||
@@ -387,35 +372,91 @@ void qdio_sync_after_thinint(struct qdio_q *q) | |||
387 | 372 | ||
388 | inline void qdio_stop_polling(struct qdio_q *q) | 373 | inline void qdio_stop_polling(struct qdio_q *q) |
389 | { | 374 | { |
390 | spin_lock_bh(&q->u.in.lock); | 375 | if (!q->u.in.polling) |
391 | if (!q->u.in.polling) { | ||
392 | spin_unlock_bh(&q->u.in.lock); | ||
393 | return; | 376 | return; |
394 | } | 377 | |
395 | q->u.in.polling = 0; | 378 | q->u.in.polling = 0; |
396 | qdio_perf_stat_inc(&perf_stats.debug_stop_polling); | 379 | qdio_perf_stat_inc(&perf_stats.debug_stop_polling); |
397 | 380 | ||
398 | /* show the card that we are not polling anymore */ | 381 | /* show the card that we are not polling anymore */ |
399 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | 382 | if (is_qebsm(q)) { |
400 | spin_unlock_bh(&q->u.in.lock); | 383 | set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, |
384 | q->u.in.ack_count); | ||
385 | q->u.in.ack_count = 0; | ||
386 | } else | ||
387 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | ||
401 | } | 388 | } |
402 | 389 | ||
403 | static void announce_buffer_error(struct qdio_q *q) | 390 | static void announce_buffer_error(struct qdio_q *q, int count) |
404 | { | 391 | { |
405 | char dbf_text[15]; | 392 | q->qdio_error |= QDIO_ERROR_SLSB_STATE; |
393 | |||
394 | /* special handling for no target buffer empty */ | ||
395 | if ((!q->is_input_q && | ||
396 | (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { | ||
397 | qdio_perf_stat_inc(&perf_stats.outbound_target_full); | ||
398 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d", | ||
399 | q->first_to_check); | ||
400 | return; | ||
401 | } | ||
406 | 402 | ||
407 | if (q->is_input_q) | 403 | DBF_ERROR("%4x BUF ERROR", SCH_NO(q)); |
408 | QDIO_DBF_TEXT3(1, trace, "inperr"); | 404 | DBF_ERROR((q->is_input_q) ? "IN:%2d" : "OUT:%2d", q->nr); |
409 | else | 405 | DBF_ERROR("FTC:%3d C:%3d", q->first_to_check, count); |
410 | QDIO_DBF_TEXT3(0, trace, "outperr"); | 406 | DBF_ERROR("F14:%2x F15:%2x", |
407 | q->sbal[q->first_to_check]->element[14].flags & 0xff, | ||
408 | q->sbal[q->first_to_check]->element[15].flags & 0xff); | ||
409 | } | ||
410 | |||
411 | static inline void inbound_primed(struct qdio_q *q, int count) | ||
412 | { | ||
413 | int new; | ||
414 | |||
415 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count); | ||
416 | |||
417 | /* for QEBSM the ACK was already set by EQBS */ | ||
418 | if (is_qebsm(q)) { | ||
419 | if (!q->u.in.polling) { | ||
420 | q->u.in.polling = 1; | ||
421 | q->u.in.ack_count = count; | ||
422 | q->last_move_ftc = q->first_to_check; | ||
423 | return; | ||
424 | } | ||
425 | |||
426 | /* delete the previous ACK's */ | ||
427 | set_buf_states(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT, | ||
428 | q->u.in.ack_count); | ||
429 | q->u.in.ack_count = count; | ||
430 | q->last_move_ftc = q->first_to_check; | ||
431 | return; | ||
432 | } | ||
433 | |||
434 | /* | ||
435 | * ACK the newest buffer. The ACK will be removed in qdio_stop_polling | ||
436 | * or by the next inbound run. | ||
437 | */ | ||
438 | new = add_buf(q->first_to_check, count - 1); | ||
439 | if (q->u.in.polling) { | ||
440 | /* reset the previous ACK but first set the new one */ | ||
441 | set_buf_state(q, new, SLSB_P_INPUT_ACK); | ||
442 | set_buf_state(q, q->last_move_ftc, SLSB_P_INPUT_NOT_INIT); | ||
443 | } | ||
444 | else { | ||
445 | q->u.in.polling = 1; | ||
446 | set_buf_state(q, q->first_to_check, SLSB_P_INPUT_ACK); | ||
447 | } | ||
411 | 448 | ||
412 | sprintf(dbf_text, "%x-%x-%x", q->first_to_check, | 449 | q->last_move_ftc = new; |
413 | q->sbal[q->first_to_check]->element[14].flags, | 450 | count--; |
414 | q->sbal[q->first_to_check]->element[15].flags); | 451 | if (!count) |
415 | QDIO_DBF_TEXT3(1, trace, dbf_text); | 452 | return; |
416 | QDIO_DBF_HEX2(1, trace, q->sbal[q->first_to_check], 256); | ||
417 | 453 | ||
418 | q->qdio_error = QDIO_ERROR_SLSB_STATE; | 454 | /* |
455 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
456 | * we're loosing initiative in the thinint code. | ||
457 | */ | ||
458 | set_buf_states(q, next_buf(q->first_to_check), SLSB_P_INPUT_NOT_INIT, | ||
459 | count); | ||
419 | } | 460 | } |
420 | 461 | ||
421 | static int get_inbound_buffer_frontier(struct qdio_q *q) | 462 | static int get_inbound_buffer_frontier(struct qdio_q *q) |
@@ -424,13 +465,6 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
424 | unsigned char state; | 465 | unsigned char state; |
425 | 466 | ||
426 | /* | 467 | /* |
427 | * If we still poll don't update last_move_ftc, keep the | ||
428 | * previously ACK buffer there. | ||
429 | */ | ||
430 | if (!q->u.in.polling) | ||
431 | q->last_move_ftc = q->first_to_check; | ||
432 | |||
433 | /* | ||
434 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved | 468 | * Don't check 128 buffers, as otherwise qdio_inbound_q_moved |
435 | * would return 0. | 469 | * would return 0. |
436 | */ | 470 | */ |
@@ -450,34 +484,13 @@ check_next: | |||
450 | if (q->first_to_check == stop) | 484 | if (q->first_to_check == stop) |
451 | goto out; | 485 | goto out; |
452 | 486 | ||
453 | count = get_buf_states(q, q->first_to_check, &state, count); | 487 | count = get_buf_states(q, q->first_to_check, &state, count, 1); |
454 | if (!count) | 488 | if (!count) |
455 | goto out; | 489 | goto out; |
456 | 490 | ||
457 | switch (state) { | 491 | switch (state) { |
458 | case SLSB_P_INPUT_PRIMED: | 492 | case SLSB_P_INPUT_PRIMED: |
459 | QDIO_DBF_TEXT5(0, trace, "inptprim"); | 493 | inbound_primed(q, count); |
460 | |||
461 | /* | ||
462 | * Only ACK the first buffer. The ACK will be removed in | ||
463 | * qdio_stop_polling. | ||
464 | */ | ||
465 | if (q->u.in.polling) | ||
466 | state = SLSB_P_INPUT_NOT_INIT; | ||
467 | else { | ||
468 | q->u.in.polling = 1; | ||
469 | state = SLSB_P_INPUT_ACK; | ||
470 | } | ||
471 | set_buf_state(q, q->first_to_check, state); | ||
472 | |||
473 | /* | ||
474 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
475 | * we're loosing initiative in the thinint code. | ||
476 | */ | ||
477 | if (count > 1) | ||
478 | set_buf_states(q, next_buf(q->first_to_check), | ||
479 | SLSB_P_INPUT_NOT_INIT, count - 1); | ||
480 | |||
481 | /* | 494 | /* |
482 | * No siga-sync needed for non-qebsm here, as the inbound queue | 495 | * No siga-sync needed for non-qebsm here, as the inbound queue |
483 | * will be synced on the next siga-r, resp. | 496 | * will be synced on the next siga-r, resp. |
@@ -487,7 +500,7 @@ check_next: | |||
487 | atomic_sub(count, &q->nr_buf_used); | 500 | atomic_sub(count, &q->nr_buf_used); |
488 | goto check_next; | 501 | goto check_next; |
489 | case SLSB_P_INPUT_ERROR: | 502 | case SLSB_P_INPUT_ERROR: |
490 | announce_buffer_error(q); | 503 | announce_buffer_error(q, count); |
491 | /* process the buffer, the upper layer will take care of it */ | 504 | /* process the buffer, the upper layer will take care of it */ |
492 | q->first_to_check = add_buf(q->first_to_check, count); | 505 | q->first_to_check = add_buf(q->first_to_check, count); |
493 | atomic_sub(count, &q->nr_buf_used); | 506 | atomic_sub(count, &q->nr_buf_used); |
@@ -495,13 +508,12 @@ check_next: | |||
495 | case SLSB_CU_INPUT_EMPTY: | 508 | case SLSB_CU_INPUT_EMPTY: |
496 | case SLSB_P_INPUT_NOT_INIT: | 509 | case SLSB_P_INPUT_NOT_INIT: |
497 | case SLSB_P_INPUT_ACK: | 510 | case SLSB_P_INPUT_ACK: |
498 | QDIO_DBF_TEXT5(0, trace, "inpnipro"); | 511 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in nop"); |
499 | break; | 512 | break; |
500 | default: | 513 | default: |
501 | BUG(); | 514 | BUG(); |
502 | } | 515 | } |
503 | out: | 516 | out: |
504 | QDIO_DBF_HEX4(0, trace, &q->first_to_check, sizeof(int)); | ||
505 | return q->first_to_check; | 517 | return q->first_to_check; |
506 | } | 518 | } |
507 | 519 | ||
@@ -515,8 +527,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
515 | if (!need_siga_sync(q) && !pci_out_supported(q)) | 527 | if (!need_siga_sync(q) && !pci_out_supported(q)) |
516 | q->u.in.timestamp = get_usecs(); | 528 | q->u.in.timestamp = get_usecs(); |
517 | 529 | ||
518 | QDIO_DBF_TEXT4(0, trace, "inhasmvd"); | 530 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); |
519 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
520 | return 1; | 531 | return 1; |
521 | } else | 532 | } else |
522 | return 0; | 533 | return 0; |
@@ -524,10 +535,7 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
524 | 535 | ||
525 | static int qdio_inbound_q_done(struct qdio_q *q) | 536 | static int qdio_inbound_q_done(struct qdio_q *q) |
526 | { | 537 | { |
527 | unsigned char state; | 538 | unsigned char state = 0; |
528 | #ifdef CONFIG_QDIO_DEBUG | ||
529 | char dbf_text[15]; | ||
530 | #endif | ||
531 | 539 | ||
532 | if (!atomic_read(&q->nr_buf_used)) | 540 | if (!atomic_read(&q->nr_buf_used)) |
533 | return 1; | 541 | return 1; |
@@ -538,7 +546,7 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
538 | */ | 546 | */ |
539 | qdio_siga_sync_q(q); | 547 | qdio_siga_sync_q(q); |
540 | 548 | ||
541 | get_buf_state(q, q->first_to_check, &state); | 549 | get_buf_state(q, q->first_to_check, &state, 0); |
542 | if (state == SLSB_P_INPUT_PRIMED) | 550 | if (state == SLSB_P_INPUT_PRIMED) |
543 | /* we got something to do */ | 551 | /* we got something to do */ |
544 | return 0; | 552 | return 0; |
@@ -552,20 +560,12 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
552 | * has (probably) not moved (see qdio_inbound_processing). | 560 | * has (probably) not moved (see qdio_inbound_processing). |
553 | */ | 561 | */ |
554 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { | 562 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { |
555 | #ifdef CONFIG_QDIO_DEBUG | 563 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", |
556 | QDIO_DBF_TEXT4(0, trace, "inqisdon"); | 564 | q->first_to_check); |
557 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
558 | sprintf(dbf_text, "pf%02x", q->first_to_check); | ||
559 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
560 | #endif /* CONFIG_QDIO_DEBUG */ | ||
561 | return 1; | 565 | return 1; |
562 | } else { | 566 | } else { |
563 | #ifdef CONFIG_QDIO_DEBUG | 567 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", |
564 | QDIO_DBF_TEXT4(0, trace, "inqisntd"); | 568 | q->first_to_check); |
565 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
566 | sprintf(dbf_text, "pf%02x", q->first_to_check); | ||
567 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
568 | #endif /* CONFIG_QDIO_DEBUG */ | ||
569 | return 0; | 569 | return 0; |
570 | } | 570 | } |
571 | } | 571 | } |
@@ -573,9 +573,6 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
573 | void qdio_kick_inbound_handler(struct qdio_q *q) | 573 | void qdio_kick_inbound_handler(struct qdio_q *q) |
574 | { | 574 | { |
575 | int count, start, end; | 575 | int count, start, end; |
576 | #ifdef CONFIG_QDIO_DEBUG | ||
577 | char dbf_text[15]; | ||
578 | #endif | ||
579 | 576 | ||
580 | qdio_perf_stat_inc(&perf_stats.inbound_handler); | 577 | qdio_perf_stat_inc(&perf_stats.inbound_handler); |
581 | 578 | ||
@@ -586,10 +583,7 @@ void qdio_kick_inbound_handler(struct qdio_q *q) | |||
586 | else | 583 | else |
587 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; | 584 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; |
588 | 585 | ||
589 | #ifdef CONFIG_QDIO_DEBUG | 586 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); |
590 | sprintf(dbf_text, "s=%2xc=%2x", start, count); | ||
591 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
592 | #endif /* CONFIG_QDIO_DEBUG */ | ||
593 | 587 | ||
594 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | 588 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
595 | return; | 589 | return; |
@@ -655,14 +649,14 @@ check_next: | |||
655 | if (q->first_to_check == stop) | 649 | if (q->first_to_check == stop) |
656 | return q->first_to_check; | 650 | return q->first_to_check; |
657 | 651 | ||
658 | count = get_buf_states(q, q->first_to_check, &state, count); | 652 | count = get_buf_states(q, q->first_to_check, &state, count, 0); |
659 | if (!count) | 653 | if (!count) |
660 | return q->first_to_check; | 654 | return q->first_to_check; |
661 | 655 | ||
662 | switch (state) { | 656 | switch (state) { |
663 | case SLSB_P_OUTPUT_EMPTY: | 657 | case SLSB_P_OUTPUT_EMPTY: |
664 | /* the adapter got it */ | 658 | /* the adapter got it */ |
665 | QDIO_DBF_TEXT5(0, trace, "outpempt"); | 659 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count); |
666 | 660 | ||
667 | atomic_sub(count, &q->nr_buf_used); | 661 | atomic_sub(count, &q->nr_buf_used); |
668 | q->first_to_check = add_buf(q->first_to_check, count); | 662 | q->first_to_check = add_buf(q->first_to_check, count); |
@@ -674,14 +668,14 @@ check_next: | |||
674 | break; | 668 | break; |
675 | goto check_next; | 669 | goto check_next; |
676 | case SLSB_P_OUTPUT_ERROR: | 670 | case SLSB_P_OUTPUT_ERROR: |
677 | announce_buffer_error(q); | 671 | announce_buffer_error(q, count); |
678 | /* process the buffer, the upper layer will take care of it */ | 672 | /* process the buffer, the upper layer will take care of it */ |
679 | q->first_to_check = add_buf(q->first_to_check, count); | 673 | q->first_to_check = add_buf(q->first_to_check, count); |
680 | atomic_sub(count, &q->nr_buf_used); | 674 | atomic_sub(count, &q->nr_buf_used); |
681 | break; | 675 | break; |
682 | case SLSB_CU_OUTPUT_PRIMED: | 676 | case SLSB_CU_OUTPUT_PRIMED: |
683 | /* the adapter has not fetched the output yet */ | 677 | /* the adapter has not fetched the output yet */ |
684 | QDIO_DBF_TEXT5(0, trace, "outpprim"); | 678 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out primed:%1d", q->nr); |
685 | break; | 679 | break; |
686 | case SLSB_P_OUTPUT_NOT_INIT: | 680 | case SLSB_P_OUTPUT_NOT_INIT: |
687 | case SLSB_P_OUTPUT_HALTED: | 681 | case SLSB_P_OUTPUT_HALTED: |
@@ -706,99 +700,48 @@ static inline int qdio_outbound_q_moved(struct qdio_q *q) | |||
706 | 700 | ||
707 | if ((bufnr != q->last_move_ftc) || q->qdio_error) { | 701 | if ((bufnr != q->last_move_ftc) || q->qdio_error) { |
708 | q->last_move_ftc = bufnr; | 702 | q->last_move_ftc = bufnr; |
709 | QDIO_DBF_TEXT4(0, trace, "oqhasmvd"); | 703 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out moved:%1d", q->nr); |
710 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
711 | return 1; | 704 | return 1; |
712 | } else | 705 | } else |
713 | return 0; | 706 | return 0; |
714 | } | 707 | } |
715 | 708 | ||
716 | /* | ||
717 | * VM could present us cc=2 and busy bit set on SIGA-write | ||
718 | * during reconfiguration of their Guest LAN (only in iqdio mode, | ||
719 | * otherwise qdio is asynchronous and cc=2 and busy bit there will take | ||
720 | * the queues down immediately). | ||
721 | * | ||
722 | * Therefore qdio_siga_output will try for a short time constantly, | ||
723 | * if such a condition occurs. If it doesn't change, it will | ||
724 | * increase the busy_siga_counter and save the timestamp, and | ||
725 | * schedule the queue for later processing. qdio_outbound_processing | ||
726 | * will check out the counter. If non-zero, it will call qdio_kick_outbound_q | ||
727 | * as often as the value of the counter. This will attempt further SIGA | ||
728 | * instructions. For each successful SIGA, the counter is | ||
729 | * decreased, for failing SIGAs the counter remains the same, after | ||
730 | * all. After some time of no movement, qdio_kick_outbound_q will | ||
731 | * finally fail and reflect corresponding error codes to call | ||
732 | * the upper layer module and have it take the queues down. | ||
733 | * | ||
734 | * Note that this is a change from the original HiperSockets design | ||
735 | * (saying cc=2 and busy bit means take the queues down), but in | ||
736 | * these days Guest LAN didn't exist... excessive cc=2 with busy bit | ||
737 | * conditions will still take the queues down, but the threshold is | ||
738 | * higher due to the Guest LAN environment. | ||
739 | * | ||
740 | * Called from outbound tasklet and do_QDIO handler. | ||
741 | */ | ||
742 | static void qdio_kick_outbound_q(struct qdio_q *q) | 709 | static void qdio_kick_outbound_q(struct qdio_q *q) |
743 | { | 710 | { |
744 | int rc; | 711 | unsigned int busy_bit; |
745 | #ifdef CONFIG_QDIO_DEBUG | 712 | int cc; |
746 | char dbf_text[15]; | ||
747 | |||
748 | QDIO_DBF_TEXT5(0, trace, "kickoutq"); | ||
749 | QDIO_DBF_HEX5(0, trace, &q, sizeof(void *)); | ||
750 | #endif /* CONFIG_QDIO_DEBUG */ | ||
751 | 713 | ||
752 | if (!need_siga_out(q)) | 714 | if (!need_siga_out(q)) |
753 | return; | 715 | return; |
754 | 716 | ||
755 | rc = qdio_siga_output(q); | 717 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); |
756 | switch (rc) { | 718 | qdio_perf_stat_inc(&perf_stats.siga_out); |
719 | |||
720 | cc = qdio_siga_output(q, &busy_bit); | ||
721 | switch (cc) { | ||
757 | case 0: | 722 | case 0: |
758 | /* TODO: improve error handling for CC=0 case */ | ||
759 | #ifdef CONFIG_QDIO_DEBUG | ||
760 | if (q->u.out.timestamp) { | ||
761 | QDIO_DBF_TEXT3(0, trace, "cc2reslv"); | ||
762 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, | ||
763 | q->nr, | ||
764 | atomic_read(&q->u.out.busy_siga_counter)); | ||
765 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
766 | } | ||
767 | #endif /* CONFIG_QDIO_DEBUG */ | ||
768 | /* went smooth this time, reset timestamp */ | ||
769 | q->u.out.timestamp = 0; | ||
770 | break; | 723 | break; |
771 | /* cc=2 and busy bit */ | 724 | case 2: |
772 | case (2 | QDIO_ERROR_SIGA_BUSY): | 725 | if (busy_bit) { |
773 | atomic_inc(&q->u.out.busy_siga_counter); | 726 | DBF_ERROR("%4x cc2 REP:%1d", SCH_NO(q), q->nr); |
774 | 727 | q->qdio_error = cc | QDIO_ERROR_SIGA_BUSY; | |
775 | /* if the last siga was successful, save timestamp here */ | 728 | } else { |
776 | if (!q->u.out.timestamp) | 729 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w cc2:%1d", |
777 | q->u.out.timestamp = get_usecs(); | 730 | q->nr); |
778 | 731 | q->qdio_error = cc; | |
779 | /* if we're in time, don't touch qdio_error */ | ||
780 | if (get_usecs() - q->u.out.timestamp < QDIO_BUSY_BIT_GIVE_UP) { | ||
781 | tasklet_schedule(&q->tasklet); | ||
782 | break; | ||
783 | } | 732 | } |
784 | QDIO_DBF_TEXT2(0, trace, "cc2REPRT"); | 733 | break; |
785 | #ifdef CONFIG_QDIO_DEBUG | 734 | case 1: |
786 | sprintf(dbf_text, "%4x%2x%2x", q->irq_ptr->schid.sch_no, q->nr, | 735 | case 3: |
787 | atomic_read(&q->u.out.busy_siga_counter)); | 736 | DBF_ERROR("%4x SIGA-W:%1d", SCH_NO(q), cc); |
788 | QDIO_DBF_TEXT3(0, trace, dbf_text); | 737 | q->qdio_error = cc; |
789 | #endif /* CONFIG_QDIO_DEBUG */ | 738 | break; |
790 | default: | ||
791 | /* for plain cc=1, 2 or 3 */ | ||
792 | q->qdio_error = rc; | ||
793 | } | 739 | } |
794 | } | 740 | } |
795 | 741 | ||
796 | static void qdio_kick_outbound_handler(struct qdio_q *q) | 742 | static void qdio_kick_outbound_handler(struct qdio_q *q) |
797 | { | 743 | { |
798 | int start, end, count; | 744 | int start, end, count; |
799 | #ifdef CONFIG_QDIO_DEBUG | ||
800 | char dbf_text[15]; | ||
801 | #endif | ||
802 | 745 | ||
803 | start = q->first_to_kick; | 746 | start = q->first_to_kick; |
804 | end = q->last_move_ftc; | 747 | end = q->last_move_ftc; |
@@ -807,13 +750,8 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) | |||
807 | else | 750 | else |
808 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; | 751 | count = end + QDIO_MAX_BUFFERS_PER_Q - start; |
809 | 752 | ||
810 | #ifdef CONFIG_QDIO_DEBUG | 753 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kickouth: %1d", q->nr); |
811 | QDIO_DBF_TEXT4(0, trace, "kickouth"); | 754 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); |
812 | QDIO_DBF_HEX4(0, trace, &q, sizeof(void *)); | ||
813 | |||
814 | sprintf(dbf_text, "s=%2xc=%2x", start, count); | ||
815 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
816 | #endif /* CONFIG_QDIO_DEBUG */ | ||
817 | 755 | ||
818 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | 756 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) |
819 | return; | 757 | return; |
@@ -828,22 +766,18 @@ static void qdio_kick_outbound_handler(struct qdio_q *q) | |||
828 | 766 | ||
829 | static void __qdio_outbound_processing(struct qdio_q *q) | 767 | static void __qdio_outbound_processing(struct qdio_q *q) |
830 | { | 768 | { |
831 | int siga_attempts; | 769 | unsigned long flags; |
832 | 770 | ||
833 | qdio_perf_stat_inc(&perf_stats.tasklet_outbound); | 771 | qdio_perf_stat_inc(&perf_stats.tasklet_outbound); |
834 | 772 | spin_lock_irqsave(&q->lock, flags); | |
835 | /* see comment in qdio_kick_outbound_q */ | ||
836 | siga_attempts = atomic_read(&q->u.out.busy_siga_counter); | ||
837 | while (siga_attempts--) { | ||
838 | atomic_dec(&q->u.out.busy_siga_counter); | ||
839 | qdio_kick_outbound_q(q); | ||
840 | } | ||
841 | 773 | ||
842 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); | 774 | BUG_ON(atomic_read(&q->nr_buf_used) < 0); |
843 | 775 | ||
844 | if (qdio_outbound_q_moved(q)) | 776 | if (qdio_outbound_q_moved(q)) |
845 | qdio_kick_outbound_handler(q); | 777 | qdio_kick_outbound_handler(q); |
846 | 778 | ||
779 | spin_unlock_irqrestore(&q->lock, flags); | ||
780 | |||
847 | if (queue_type(q) == QDIO_ZFCP_QFMT) { | 781 | if (queue_type(q) == QDIO_ZFCP_QFMT) { |
848 | if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) | 782 | if (!pci_out_supported(q) && !qdio_outbound_q_done(q)) |
849 | tasklet_schedule(&q->tasklet); | 783 | tasklet_schedule(&q->tasklet); |
@@ -908,27 +842,18 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
908 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, | 842 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, |
909 | enum qdio_irq_states state) | 843 | enum qdio_irq_states state) |
910 | { | 844 | { |
911 | #ifdef CONFIG_QDIO_DEBUG | 845 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "newstate: %1d", state); |
912 | char dbf_text[15]; | ||
913 | |||
914 | QDIO_DBF_TEXT5(0, trace, "newstate"); | ||
915 | sprintf(dbf_text, "%4x%4x", irq_ptr->schid.sch_no, state); | ||
916 | QDIO_DBF_TEXT5(0, trace, dbf_text); | ||
917 | #endif /* CONFIG_QDIO_DEBUG */ | ||
918 | 846 | ||
919 | irq_ptr->state = state; | 847 | irq_ptr->state = state; |
920 | mb(); | 848 | mb(); |
921 | } | 849 | } |
922 | 850 | ||
923 | static void qdio_irq_check_sense(struct subchannel_id schid, struct irb *irb) | 851 | static void qdio_irq_check_sense(struct qdio_irq *irq_ptr, struct irb *irb) |
924 | { | 852 | { |
925 | char dbf_text[15]; | ||
926 | |||
927 | if (irb->esw.esw0.erw.cons) { | 853 | if (irb->esw.esw0.erw.cons) { |
928 | sprintf(dbf_text, "sens%4x", schid.sch_no); | 854 | DBF_ERROR("%4x sense:", irq_ptr->schid.sch_no); |
929 | QDIO_DBF_TEXT2(1, trace, dbf_text); | 855 | DBF_ERROR_HEX(irb, 64); |
930 | QDIO_DBF_HEX0(0, trace, irb, 64); | 856 | DBF_ERROR_HEX(irb->ecw, 64); |
931 | QDIO_DBF_HEX0(0, trace, irb->ecw, 64); | ||
932 | } | 857 | } |
933 | } | 858 | } |
934 | 859 | ||
@@ -962,14 +887,10 @@ static void qdio_handle_activate_check(struct ccw_device *cdev, | |||
962 | { | 887 | { |
963 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 888 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
964 | struct qdio_q *q; | 889 | struct qdio_q *q; |
965 | char dbf_text[15]; | ||
966 | 890 | ||
967 | QDIO_DBF_TEXT2(1, trace, "ick2"); | 891 | DBF_ERROR("%4x ACT CHECK", irq_ptr->schid.sch_no); |
968 | sprintf(dbf_text, "%s", dev_name(&cdev->dev)); | 892 | DBF_ERROR("intp :%lx", intparm); |
969 | QDIO_DBF_TEXT2(1, trace, dbf_text); | 893 | DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); |
970 | QDIO_DBF_HEX2(0, trace, &intparm, sizeof(int)); | ||
971 | QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); | ||
972 | QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); | ||
973 | 894 | ||
974 | if (irq_ptr->nr_input_qs) { | 895 | if (irq_ptr->nr_input_qs) { |
975 | q = irq_ptr->input_qs[0]; | 896 | q = irq_ptr->input_qs[0]; |
@@ -1022,28 +943,29 @@ static void qdio_int_error(struct ccw_device *cdev) | |||
1022 | } | 943 | } |
1023 | 944 | ||
1024 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, | 945 | static int qdio_establish_check_errors(struct ccw_device *cdev, int cstat, |
1025 | int dstat) | 946 | int dstat) |
1026 | { | 947 | { |
1027 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 948 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1028 | 949 | ||
1029 | if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { | 950 | if (cstat || (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END))) { |
1030 | QDIO_DBF_TEXT2(1, setup, "eq:ckcon"); | 951 | DBF_ERROR("EQ:ck con"); |
1031 | goto error; | 952 | goto error; |
1032 | } | 953 | } |
1033 | 954 | ||
1034 | if (!(dstat & DEV_STAT_DEV_END)) { | 955 | if (!(dstat & DEV_STAT_DEV_END)) { |
1035 | QDIO_DBF_TEXT2(1, setup, "eq:no de"); | 956 | DBF_ERROR("EQ:no dev"); |
1036 | goto error; | 957 | goto error; |
1037 | } | 958 | } |
1038 | 959 | ||
1039 | if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { | 960 | if (dstat & ~(DEV_STAT_CHN_END | DEV_STAT_DEV_END)) { |
1040 | QDIO_DBF_TEXT2(1, setup, "eq:badio"); | 961 | DBF_ERROR("EQ: bad io"); |
1041 | goto error; | 962 | goto error; |
1042 | } | 963 | } |
1043 | return 0; | 964 | return 0; |
1044 | error: | 965 | error: |
1045 | QDIO_DBF_HEX2(0, trace, &cstat, sizeof(int)); | 966 | DBF_ERROR("%4x EQ:error", irq_ptr->schid.sch_no); |
1046 | QDIO_DBF_HEX2(0, trace, &dstat, sizeof(int)); | 967 | DBF_ERROR("ds: %2x cs:%2x", dstat, cstat); |
968 | |||
1047 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | 969 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); |
1048 | return 1; | 970 | return 1; |
1049 | } | 971 | } |
@@ -1052,12 +974,8 @@ static void qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, | |||
1052 | int dstat) | 974 | int dstat) |
1053 | { | 975 | { |
1054 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 976 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1055 | char dbf_text[15]; | ||
1056 | |||
1057 | sprintf(dbf_text, "qehi%4x", cdev->private->schid.sch_no); | ||
1058 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1059 | QDIO_DBF_TEXT0(0, trace, dbf_text); | ||
1060 | 977 | ||
978 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "qest irq"); | ||
1061 | if (!qdio_establish_check_errors(cdev, cstat, dstat)) | 979 | if (!qdio_establish_check_errors(cdev, cstat, dstat)) |
1062 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); | 980 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ESTABLISHED); |
1063 | } | 981 | } |
@@ -1068,25 +986,21 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1068 | { | 986 | { |
1069 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | 987 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1070 | int cstat, dstat; | 988 | int cstat, dstat; |
1071 | char dbf_text[15]; | ||
1072 | 989 | ||
1073 | qdio_perf_stat_inc(&perf_stats.qdio_int); | 990 | qdio_perf_stat_inc(&perf_stats.qdio_int); |
1074 | 991 | ||
1075 | if (!intparm || !irq_ptr) { | 992 | if (!intparm || !irq_ptr) { |
1076 | sprintf(dbf_text, "qihd%4x", cdev->private->schid.sch_no); | 993 | DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); |
1077 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1078 | return; | 994 | return; |
1079 | } | 995 | } |
1080 | 996 | ||
1081 | if (IS_ERR(irb)) { | 997 | if (IS_ERR(irb)) { |
1082 | switch (PTR_ERR(irb)) { | 998 | switch (PTR_ERR(irb)) { |
1083 | case -EIO: | 999 | case -EIO: |
1084 | sprintf(dbf_text, "ierr%4x", irq_ptr->schid.sch_no); | 1000 | DBF_ERROR("%4x IO error", irq_ptr->schid.sch_no); |
1085 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1086 | return; | 1001 | return; |
1087 | case -ETIMEDOUT: | 1002 | case -ETIMEDOUT: |
1088 | sprintf(dbf_text, "qtoh%4x", irq_ptr->schid.sch_no); | 1003 | DBF_ERROR("%4x IO timeout", irq_ptr->schid.sch_no); |
1089 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1090 | qdio_int_error(cdev); | 1004 | qdio_int_error(cdev); |
1091 | return; | 1005 | return; |
1092 | default: | 1006 | default: |
@@ -1094,7 +1008,7 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1094 | return; | 1008 | return; |
1095 | } | 1009 | } |
1096 | } | 1010 | } |
1097 | qdio_irq_check_sense(irq_ptr->schid, irb); | 1011 | qdio_irq_check_sense(irq_ptr, irb); |
1098 | 1012 | ||
1099 | cstat = irb->scsw.cmd.cstat; | 1013 | cstat = irb->scsw.cmd.cstat; |
1100 | dstat = irb->scsw.cmd.dstat; | 1014 | dstat = irb->scsw.cmd.dstat; |
@@ -1129,23 +1043,20 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
1129 | /** | 1043 | /** |
1130 | * qdio_get_ssqd_desc - get qdio subchannel description | 1044 | * qdio_get_ssqd_desc - get qdio subchannel description |
1131 | * @cdev: ccw device to get description for | 1045 | * @cdev: ccw device to get description for |
1046 | * @data: where to store the ssqd | ||
1132 | * | 1047 | * |
1133 | * Returns a pointer to the saved qdio subchannel description, | 1048 | * Returns 0 or an error code. The results of the chsc are stored in the |
1134 | * or NULL for not setup qdio devices. | 1049 | * specified structure. |
1135 | */ | 1050 | */ |
1136 | struct qdio_ssqd_desc *qdio_get_ssqd_desc(struct ccw_device *cdev) | 1051 | int qdio_get_ssqd_desc(struct ccw_device *cdev, |
1052 | struct qdio_ssqd_desc *data) | ||
1137 | { | 1053 | { |
1138 | struct qdio_irq *irq_ptr; | ||
1139 | char dbf_text[15]; | ||
1140 | |||
1141 | sprintf(dbf_text, "qssq%4x", cdev->private->schid.sch_no); | ||
1142 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1143 | 1054 | ||
1144 | irq_ptr = cdev->private->qdio_data; | 1055 | if (!cdev || !cdev->private) |
1145 | if (!irq_ptr) | 1056 | return -EINVAL; |
1146 | return NULL; | ||
1147 | 1057 | ||
1148 | return &irq_ptr->ssqd_desc; | 1058 | DBF_EVENT("get ssqd:%4x", cdev->private->schid.sch_no); |
1059 | return qdio_setup_get_ssqd(NULL, &cdev->private->schid, data); | ||
1149 | } | 1060 | } |
1150 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | 1061 | EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); |
1151 | 1062 | ||
@@ -1159,14 +1070,9 @@ EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); | |||
1159 | */ | 1070 | */ |
1160 | int qdio_cleanup(struct ccw_device *cdev, int how) | 1071 | int qdio_cleanup(struct ccw_device *cdev, int how) |
1161 | { | 1072 | { |
1162 | struct qdio_irq *irq_ptr; | 1073 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1163 | char dbf_text[15]; | ||
1164 | int rc; | 1074 | int rc; |
1165 | 1075 | ||
1166 | sprintf(dbf_text, "qcln%4x", cdev->private->schid.sch_no); | ||
1167 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1168 | |||
1169 | irq_ptr = cdev->private->qdio_data; | ||
1170 | if (!irq_ptr) | 1076 | if (!irq_ptr) |
1171 | return -ENODEV; | 1077 | return -ENODEV; |
1172 | 1078 | ||
@@ -1199,18 +1105,15 @@ static void qdio_shutdown_queues(struct ccw_device *cdev) | |||
1199 | */ | 1105 | */ |
1200 | int qdio_shutdown(struct ccw_device *cdev, int how) | 1106 | int qdio_shutdown(struct ccw_device *cdev, int how) |
1201 | { | 1107 | { |
1202 | struct qdio_irq *irq_ptr; | 1108 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1203 | int rc; | 1109 | int rc; |
1204 | unsigned long flags; | 1110 | unsigned long flags; |
1205 | char dbf_text[15]; | ||
1206 | 1111 | ||
1207 | sprintf(dbf_text, "qshu%4x", cdev->private->schid.sch_no); | ||
1208 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1209 | |||
1210 | irq_ptr = cdev->private->qdio_data; | ||
1211 | if (!irq_ptr) | 1112 | if (!irq_ptr) |
1212 | return -ENODEV; | 1113 | return -ENODEV; |
1213 | 1114 | ||
1115 | DBF_EVENT("qshutdown:%4x", cdev->private->schid.sch_no); | ||
1116 | |||
1214 | mutex_lock(&irq_ptr->setup_mutex); | 1117 | mutex_lock(&irq_ptr->setup_mutex); |
1215 | /* | 1118 | /* |
1216 | * Subchannel was already shot down. We cannot prevent being called | 1119 | * Subchannel was already shot down. We cannot prevent being called |
@@ -1234,10 +1137,8 @@ int qdio_shutdown(struct ccw_device *cdev, int how) | |||
1234 | /* default behaviour is halt */ | 1137 | /* default behaviour is halt */ |
1235 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); | 1138 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); |
1236 | if (rc) { | 1139 | if (rc) { |
1237 | sprintf(dbf_text, "sher%4x", irq_ptr->schid.sch_no); | 1140 | DBF_ERROR("%4x SHUTD ERR", irq_ptr->schid.sch_no); |
1238 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 1141 | DBF_ERROR("rc:%4d", rc); |
1239 | sprintf(dbf_text, "rc=%d", rc); | ||
1240 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1241 | goto no_cleanup; | 1142 | goto no_cleanup; |
1242 | } | 1143 | } |
1243 | 1144 | ||
@@ -1271,17 +1172,18 @@ EXPORT_SYMBOL_GPL(qdio_shutdown); | |||
1271 | */ | 1172 | */ |
1272 | int qdio_free(struct ccw_device *cdev) | 1173 | int qdio_free(struct ccw_device *cdev) |
1273 | { | 1174 | { |
1274 | struct qdio_irq *irq_ptr; | 1175 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; |
1275 | char dbf_text[15]; | ||
1276 | |||
1277 | sprintf(dbf_text, "qfre%4x", cdev->private->schid.sch_no); | ||
1278 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1279 | 1176 | ||
1280 | irq_ptr = cdev->private->qdio_data; | ||
1281 | if (!irq_ptr) | 1177 | if (!irq_ptr) |
1282 | return -ENODEV; | 1178 | return -ENODEV; |
1283 | 1179 | ||
1180 | DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no); | ||
1284 | mutex_lock(&irq_ptr->setup_mutex); | 1181 | mutex_lock(&irq_ptr->setup_mutex); |
1182 | |||
1183 | if (irq_ptr->debug_area != NULL) { | ||
1184 | debug_unregister(irq_ptr->debug_area); | ||
1185 | irq_ptr->debug_area = NULL; | ||
1186 | } | ||
1285 | cdev->private->qdio_data = NULL; | 1187 | cdev->private->qdio_data = NULL; |
1286 | mutex_unlock(&irq_ptr->setup_mutex); | 1188 | mutex_unlock(&irq_ptr->setup_mutex); |
1287 | 1189 | ||
@@ -1300,10 +1202,6 @@ EXPORT_SYMBOL_GPL(qdio_free); | |||
1300 | int qdio_initialize(struct qdio_initialize *init_data) | 1202 | int qdio_initialize(struct qdio_initialize *init_data) |
1301 | { | 1203 | { |
1302 | int rc; | 1204 | int rc; |
1303 | char dbf_text[15]; | ||
1304 | |||
1305 | sprintf(dbf_text, "qini%4x", init_data->cdev->private->schid.sch_no); | ||
1306 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1307 | 1205 | ||
1308 | rc = qdio_allocate(init_data); | 1206 | rc = qdio_allocate(init_data); |
1309 | if (rc) | 1207 | if (rc) |
@@ -1323,10 +1221,8 @@ EXPORT_SYMBOL_GPL(qdio_initialize); | |||
1323 | int qdio_allocate(struct qdio_initialize *init_data) | 1221 | int qdio_allocate(struct qdio_initialize *init_data) |
1324 | { | 1222 | { |
1325 | struct qdio_irq *irq_ptr; | 1223 | struct qdio_irq *irq_ptr; |
1326 | char dbf_text[15]; | ||
1327 | 1224 | ||
1328 | sprintf(dbf_text, "qalc%4x", init_data->cdev->private->schid.sch_no); | 1225 | DBF_EVENT("qallocate:%4x", init_data->cdev->private->schid.sch_no); |
1329 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1330 | 1226 | ||
1331 | if ((init_data->no_input_qs && !init_data->input_handler) || | 1227 | if ((init_data->no_input_qs && !init_data->input_handler) || |
1332 | (init_data->no_output_qs && !init_data->output_handler)) | 1228 | (init_data->no_output_qs && !init_data->output_handler)) |
@@ -1340,16 +1236,13 @@ int qdio_allocate(struct qdio_initialize *init_data) | |||
1340 | (!init_data->output_sbal_addr_array)) | 1236 | (!init_data->output_sbal_addr_array)) |
1341 | return -EINVAL; | 1237 | return -EINVAL; |
1342 | 1238 | ||
1343 | qdio_allocate_do_dbf(init_data); | ||
1344 | |||
1345 | /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ | 1239 | /* irq_ptr must be in GFP_DMA since it contains ccw1.cda */ |
1346 | irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 1240 | irq_ptr = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
1347 | if (!irq_ptr) | 1241 | if (!irq_ptr) |
1348 | goto out_err; | 1242 | goto out_err; |
1349 | QDIO_DBF_TEXT0(0, setup, "irq_ptr:"); | ||
1350 | QDIO_DBF_HEX0(0, setup, &irq_ptr, sizeof(void *)); | ||
1351 | 1243 | ||
1352 | mutex_init(&irq_ptr->setup_mutex); | 1244 | mutex_init(&irq_ptr->setup_mutex); |
1245 | qdio_allocate_dbf(init_data, irq_ptr); | ||
1353 | 1246 | ||
1354 | /* | 1247 | /* |
1355 | * Allocate a page for the chsc calls in qdio_establish. | 1248 | * Allocate a page for the chsc calls in qdio_establish. |
@@ -1367,9 +1260,6 @@ int qdio_allocate(struct qdio_initialize *init_data) | |||
1367 | goto out_rel; | 1260 | goto out_rel; |
1368 | WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); | 1261 | WARN_ON((unsigned long)irq_ptr->qdr & 0xfff); |
1369 | 1262 | ||
1370 | QDIO_DBF_TEXT0(0, setup, "qdr:"); | ||
1371 | QDIO_DBF_HEX0(0, setup, &irq_ptr->qdr, sizeof(void *)); | ||
1372 | |||
1373 | if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, | 1263 | if (qdio_allocate_qs(irq_ptr, init_data->no_input_qs, |
1374 | init_data->no_output_qs)) | 1264 | init_data->no_output_qs)) |
1375 | goto out_rel; | 1265 | goto out_rel; |
@@ -1390,14 +1280,12 @@ EXPORT_SYMBOL_GPL(qdio_allocate); | |||
1390 | */ | 1280 | */ |
1391 | int qdio_establish(struct qdio_initialize *init_data) | 1281 | int qdio_establish(struct qdio_initialize *init_data) |
1392 | { | 1282 | { |
1393 | char dbf_text[20]; | ||
1394 | struct qdio_irq *irq_ptr; | 1283 | struct qdio_irq *irq_ptr; |
1395 | struct ccw_device *cdev = init_data->cdev; | 1284 | struct ccw_device *cdev = init_data->cdev; |
1396 | unsigned long saveflags; | 1285 | unsigned long saveflags; |
1397 | int rc; | 1286 | int rc; |
1398 | 1287 | ||
1399 | sprintf(dbf_text, "qest%4x", cdev->private->schid.sch_no); | 1288 | DBF_EVENT("qestablish:%4x", cdev->private->schid.sch_no); |
1400 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1401 | 1289 | ||
1402 | irq_ptr = cdev->private->qdio_data; | 1290 | irq_ptr = cdev->private->qdio_data; |
1403 | if (!irq_ptr) | 1291 | if (!irq_ptr) |
@@ -1427,10 +1315,8 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1427 | 1315 | ||
1428 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); | 1316 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ESTABLISH, 0, 0); |
1429 | if (rc) { | 1317 | if (rc) { |
1430 | sprintf(dbf_text, "eq:io%4x", irq_ptr->schid.sch_no); | 1318 | DBF_ERROR("%4x est IO ERR", irq_ptr->schid.sch_no); |
1431 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1319 | DBF_ERROR("rc:%4x", rc); |
1432 | sprintf(dbf_text, "eq:rc%4x", rc); | ||
1433 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1434 | } | 1320 | } |
1435 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | 1321 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); |
1436 | 1322 | ||
@@ -1451,10 +1337,8 @@ int qdio_establish(struct qdio_initialize *init_data) | |||
1451 | } | 1337 | } |
1452 | 1338 | ||
1453 | qdio_setup_ssqd_info(irq_ptr); | 1339 | qdio_setup_ssqd_info(irq_ptr); |
1454 | sprintf(dbf_text, "qDmmwc%2x", irq_ptr->ssqd_desc.mmwc); | 1340 | DBF_EVENT("qDmmwc:%2x", irq_ptr->ssqd_desc.mmwc); |
1455 | QDIO_DBF_TEXT2(0, setup, dbf_text); | 1341 | DBF_EVENT("qib ac:%4x", irq_ptr->qib.ac); |
1456 | sprintf(dbf_text, "qib ac%2x", irq_ptr->qib.ac); | ||
1457 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
1458 | 1342 | ||
1459 | /* qebsm is now setup if available, initialize buffer states */ | 1343 | /* qebsm is now setup if available, initialize buffer states */ |
1460 | qdio_init_buf_states(irq_ptr); | 1344 | qdio_init_buf_states(irq_ptr); |
@@ -1475,10 +1359,8 @@ int qdio_activate(struct ccw_device *cdev) | |||
1475 | struct qdio_irq *irq_ptr; | 1359 | struct qdio_irq *irq_ptr; |
1476 | int rc; | 1360 | int rc; |
1477 | unsigned long saveflags; | 1361 | unsigned long saveflags; |
1478 | char dbf_text[20]; | ||
1479 | 1362 | ||
1480 | sprintf(dbf_text, "qact%4x", cdev->private->schid.sch_no); | 1363 | DBF_EVENT("qactivate:%4x", cdev->private->schid.sch_no); |
1481 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
1482 | 1364 | ||
1483 | irq_ptr = cdev->private->qdio_data; | 1365 | irq_ptr = cdev->private->qdio_data; |
1484 | if (!irq_ptr) | 1366 | if (!irq_ptr) |
@@ -1504,10 +1386,8 @@ int qdio_activate(struct ccw_device *cdev) | |||
1504 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, | 1386 | rc = ccw_device_start(cdev, &irq_ptr->ccw, QDIO_DOING_ACTIVATE, |
1505 | 0, DOIO_DENY_PREFETCH); | 1387 | 0, DOIO_DENY_PREFETCH); |
1506 | if (rc) { | 1388 | if (rc) { |
1507 | sprintf(dbf_text, "aq:io%4x", irq_ptr->schid.sch_no); | 1389 | DBF_ERROR("%4x act IO ERR", irq_ptr->schid.sch_no); |
1508 | QDIO_DBF_TEXT2(1, setup, dbf_text); | 1390 | DBF_ERROR("rc:%4x", rc); |
1509 | sprintf(dbf_text, "aq:rc%4x", rc); | ||
1510 | QDIO_DBF_TEXT2(1, setup, dbf_text); | ||
1511 | } | 1391 | } |
1512 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); | 1392 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), saveflags); |
1513 | 1393 | ||
@@ -1565,23 +1445,38 @@ static inline int buf_in_between(int bufnr, int start, int count) | |||
1565 | static void handle_inbound(struct qdio_q *q, unsigned int callflags, | 1445 | static void handle_inbound(struct qdio_q *q, unsigned int callflags, |
1566 | int bufnr, int count) | 1446 | int bufnr, int count) |
1567 | { | 1447 | { |
1568 | unsigned long flags; | 1448 | int used, cc, diff; |
1569 | int used, rc; | ||
1570 | 1449 | ||
1571 | /* | 1450 | if (!q->u.in.polling) |
1572 | * do_QDIO could run in parallel with the queue tasklet so the | 1451 | goto set; |
1573 | * upper-layer programm could empty the ACK'ed buffer here. | 1452 | |
1574 | * If that happens we must clear the polling flag, otherwise | 1453 | /* protect against stop polling setting an ACK for an emptied slsb */ |
1575 | * qdio_stop_polling() could set the buffer to NOT_INIT after | 1454 | if (count == QDIO_MAX_BUFFERS_PER_Q) { |
1576 | * it was set to EMPTY which would kill us. | 1455 | /* overwriting everything, just delete polling status */ |
1577 | */ | 1456 | q->u.in.polling = 0; |
1578 | spin_lock_irqsave(&q->u.in.lock, flags); | 1457 | q->u.in.ack_count = 0; |
1579 | if (q->u.in.polling) | 1458 | goto set; |
1580 | if (buf_in_between(q->last_move_ftc, bufnr, count)) | 1459 | } else if (buf_in_between(q->last_move_ftc, bufnr, count)) { |
1460 | if (is_qebsm(q)) { | ||
1461 | /* partial overwrite, just update last_move_ftc */ | ||
1462 | diff = add_buf(bufnr, count); | ||
1463 | diff = sub_buf(diff, q->last_move_ftc); | ||
1464 | q->u.in.ack_count -= diff; | ||
1465 | if (q->u.in.ack_count <= 0) { | ||
1466 | q->u.in.polling = 0; | ||
1467 | q->u.in.ack_count = 0; | ||
1468 | /* TODO: must we set last_move_ftc to something meaningful? */ | ||
1469 | goto set; | ||
1470 | } | ||
1471 | q->last_move_ftc = add_buf(q->last_move_ftc, diff); | ||
1472 | } | ||
1473 | else | ||
1474 | /* the only ACK will be deleted, so stop polling */ | ||
1581 | q->u.in.polling = 0; | 1475 | q->u.in.polling = 0; |
1476 | } | ||
1582 | 1477 | ||
1478 | set: | ||
1583 | count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); | 1479 | count = set_buf_states(q, bufnr, SLSB_CU_INPUT_EMPTY, count); |
1584 | spin_unlock_irqrestore(&q->u.in.lock, flags); | ||
1585 | 1480 | ||
1586 | used = atomic_add_return(count, &q->nr_buf_used) - count; | 1481 | used = atomic_add_return(count, &q->nr_buf_used) - count; |
1587 | BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); | 1482 | BUG_ON(used + count > QDIO_MAX_BUFFERS_PER_Q); |
@@ -1591,9 +1486,9 @@ static void handle_inbound(struct qdio_q *q, unsigned int callflags, | |||
1591 | return; | 1486 | return; |
1592 | 1487 | ||
1593 | if (need_siga_in(q)) { | 1488 | if (need_siga_in(q)) { |
1594 | rc = qdio_siga_input(q); | 1489 | cc = qdio_siga_input(q); |
1595 | if (rc) | 1490 | if (cc) |
1596 | q->qdio_error = rc; | 1491 | q->qdio_error = cc; |
1597 | } | 1492 | } |
1598 | } | 1493 | } |
1599 | 1494 | ||
@@ -1640,6 +1535,10 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1640 | while (count--) | 1535 | while (count--) |
1641 | qdio_kick_outbound_q(q); | 1536 | qdio_kick_outbound_q(q); |
1642 | } | 1537 | } |
1538 | |||
1539 | /* report CC=2 conditions synchronously */ | ||
1540 | if (q->qdio_error) | ||
1541 | __qdio_outbound_processing(q); | ||
1643 | goto out; | 1542 | goto out; |
1644 | } | 1543 | } |
1645 | 1544 | ||
@@ -1649,11 +1548,11 @@ static void handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1649 | } | 1548 | } |
1650 | 1549 | ||
1651 | /* try to fast requeue buffers */ | 1550 | /* try to fast requeue buffers */ |
1652 | get_buf_state(q, prev_buf(bufnr), &state); | 1551 | get_buf_state(q, prev_buf(bufnr), &state, 0); |
1653 | if (state != SLSB_CU_OUTPUT_PRIMED) | 1552 | if (state != SLSB_CU_OUTPUT_PRIMED) |
1654 | qdio_kick_outbound_q(q); | 1553 | qdio_kick_outbound_q(q); |
1655 | else { | 1554 | else { |
1656 | QDIO_DBF_TEXT5(0, trace, "fast-req"); | 1555 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); |
1657 | qdio_perf_stat_inc(&perf_stats.fast_requeue); | 1556 | qdio_perf_stat_inc(&perf_stats.fast_requeue); |
1658 | } | 1557 | } |
1659 | out: | 1558 | out: |
@@ -1673,12 +1572,6 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1673 | int q_nr, int bufnr, int count) | 1572 | int q_nr, int bufnr, int count) |
1674 | { | 1573 | { |
1675 | struct qdio_irq *irq_ptr; | 1574 | struct qdio_irq *irq_ptr; |
1676 | #ifdef CONFIG_QDIO_DEBUG | ||
1677 | char dbf_text[20]; | ||
1678 | |||
1679 | sprintf(dbf_text, "doQD%4x", cdev->private->schid.sch_no); | ||
1680 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1681 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1682 | 1575 | ||
1683 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || | 1576 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || |
1684 | (count > QDIO_MAX_BUFFERS_PER_Q) || | 1577 | (count > QDIO_MAX_BUFFERS_PER_Q) || |
@@ -1692,33 +1585,24 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1692 | if (!irq_ptr) | 1585 | if (!irq_ptr) |
1693 | return -ENODEV; | 1586 | return -ENODEV; |
1694 | 1587 | ||
1695 | #ifdef CONFIG_QDIO_DEBUG | ||
1696 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1588 | if (callflags & QDIO_FLAG_SYNC_INPUT) |
1697 | QDIO_DBF_HEX3(0, trace, &irq_ptr->input_qs[q_nr], | 1589 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input"); |
1698 | sizeof(void *)); | ||
1699 | else | 1590 | else |
1700 | QDIO_DBF_HEX3(0, trace, &irq_ptr->output_qs[q_nr], | 1591 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output"); |
1701 | sizeof(void *)); | 1592 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags); |
1702 | 1593 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count); | |
1703 | sprintf(dbf_text, "flag%04x", callflags); | ||
1704 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1705 | sprintf(dbf_text, "qi%02xct%02x", bufnr, count); | ||
1706 | QDIO_DBF_TEXT3(0, trace, dbf_text); | ||
1707 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1708 | 1594 | ||
1709 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) | 1595 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) |
1710 | return -EBUSY; | 1596 | return -EBUSY; |
1711 | 1597 | ||
1712 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1598 | if (callflags & QDIO_FLAG_SYNC_INPUT) |
1713 | handle_inbound(irq_ptr->input_qs[q_nr], | 1599 | handle_inbound(irq_ptr->input_qs[q_nr], callflags, bufnr, |
1714 | callflags, bufnr, count); | 1600 | count); |
1715 | else if (callflags & QDIO_FLAG_SYNC_OUTPUT) | 1601 | else if (callflags & QDIO_FLAG_SYNC_OUTPUT) |
1716 | handle_outbound(irq_ptr->output_qs[q_nr], | 1602 | handle_outbound(irq_ptr->output_qs[q_nr], callflags, bufnr, |
1717 | callflags, bufnr, count); | 1603 | count); |
1718 | else { | 1604 | else |
1719 | QDIO_DBF_TEXT3(1, trace, "doQD:inv"); | ||
1720 | return -EINVAL; | 1605 | return -EINVAL; |
1721 | } | ||
1722 | return 0; | 1606 | return 0; |
1723 | } | 1607 | } |
1724 | EXPORT_SYMBOL_GPL(do_QDIO); | 1608 | EXPORT_SYMBOL_GPL(do_QDIO); |
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c index ec5c4a414235..136d0f0b1e93 100644 --- a/drivers/s390/cio/qdio_perf.c +++ b/drivers/s390/cio/qdio_perf.c | |||
@@ -74,12 +74,20 @@ static int qdio_perf_proc_show(struct seq_file *m, void *v) | |||
74 | seq_printf(m, "\n"); | 74 | seq_printf(m, "\n"); |
75 | seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", | 75 | seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n", |
76 | (long)atomic_long_read(&perf_stats.fast_requeue)); | 76 | (long)atomic_long_read(&perf_stats.fast_requeue)); |
77 | seq_printf(m, "Number of outbound target full condition\t: %li\n", | ||
78 | (long)atomic_long_read(&perf_stats.outbound_target_full)); | ||
77 | seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", | 79 | seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n", |
78 | (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); | 80 | (long)atomic_long_read(&perf_stats.debug_tl_out_timer)); |
79 | seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", | 81 | seq_printf(m, "Number of stop polling calls\t\t\t: %li\n", |
80 | (long)atomic_long_read(&perf_stats.debug_stop_polling)); | 82 | (long)atomic_long_read(&perf_stats.debug_stop_polling)); |
81 | seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", | 83 | seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n", |
82 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); | 84 | (long)atomic_long_read(&perf_stats.thinint_inbound_loop2)); |
85 | seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n", | ||
86 | (long)atomic_long_read(&perf_stats.debug_eqbs_all), | ||
87 | (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete)); | ||
88 | seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n", | ||
89 | (long)atomic_long_read(&perf_stats.debug_sqbs_all), | ||
90 | (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete)); | ||
83 | seq_printf(m, "\n"); | 91 | seq_printf(m, "\n"); |
84 | return 0; | 92 | return 0; |
85 | } | 93 | } |
diff --git a/drivers/s390/cio/qdio_perf.h b/drivers/s390/cio/qdio_perf.h index 5c406a8b7387..7821ac4fa517 100644 --- a/drivers/s390/cio/qdio_perf.h +++ b/drivers/s390/cio/qdio_perf.h | |||
@@ -36,10 +36,15 @@ struct qdio_perf_stats { | |||
36 | atomic_long_t inbound_handler; | 36 | atomic_long_t inbound_handler; |
37 | atomic_long_t outbound_handler; | 37 | atomic_long_t outbound_handler; |
38 | atomic_long_t fast_requeue; | 38 | atomic_long_t fast_requeue; |
39 | atomic_long_t outbound_target_full; | ||
39 | 40 | ||
40 | /* for debugging */ | 41 | /* for debugging */ |
41 | atomic_long_t debug_tl_out_timer; | 42 | atomic_long_t debug_tl_out_timer; |
42 | atomic_long_t debug_stop_polling; | 43 | atomic_long_t debug_stop_polling; |
44 | atomic_long_t debug_eqbs_all; | ||
45 | atomic_long_t debug_eqbs_incomplete; | ||
46 | atomic_long_t debug_sqbs_all; | ||
47 | atomic_long_t debug_sqbs_incomplete; | ||
43 | }; | 48 | }; |
44 | 49 | ||
45 | extern struct qdio_perf_stats perf_stats; | 50 | extern struct qdio_perf_stats perf_stats; |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index a0b6b46e7466..c08356b95bf5 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -117,17 +117,16 @@ static void setup_queues_misc(struct qdio_q *q, struct qdio_irq *irq_ptr, | |||
117 | q->mask = 1 << (31 - i); | 117 | q->mask = 1 << (31 - i); |
118 | q->nr = i; | 118 | q->nr = i; |
119 | q->handler = handler; | 119 | q->handler = handler; |
120 | spin_lock_init(&q->lock); | ||
120 | } | 121 | } |
121 | 122 | ||
122 | static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, | 123 | static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, |
123 | void **sbals_array, char *dbf_text, int i) | 124 | void **sbals_array, int i) |
124 | { | 125 | { |
125 | struct qdio_q *prev; | 126 | struct qdio_q *prev; |
126 | int j; | 127 | int j; |
127 | 128 | ||
128 | QDIO_DBF_TEXT0(0, setup, dbf_text); | 129 | DBF_HEX(&q, sizeof(void *)); |
129 | QDIO_DBF_HEX0(0, setup, &q, sizeof(void *)); | ||
130 | |||
131 | q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); | 130 | q->sl = (struct sl *)((char *)q->slib + PAGE_SIZE / 2); |
132 | 131 | ||
133 | /* fill in sbal */ | 132 | /* fill in sbal */ |
@@ -150,31 +149,26 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr, | |||
150 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) | 149 | for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++) |
151 | q->sl->element[j].sbal = (unsigned long)q->sbal[j]; | 150 | q->sl->element[j].sbal = (unsigned long)q->sbal[j]; |
152 | 151 | ||
153 | QDIO_DBF_TEXT2(0, setup, "sl-sb-b0"); | 152 | DBF_EVENT("sl-slsb-sbal"); |
154 | QDIO_DBF_HEX2(0, setup, q->sl, sizeof(void *)); | 153 | DBF_HEX(q->sl, sizeof(void *)); |
155 | QDIO_DBF_HEX2(0, setup, &q->slsb, sizeof(void *)); | 154 | DBF_HEX(&q->slsb, sizeof(void *)); |
156 | QDIO_DBF_HEX2(0, setup, q->sbal, sizeof(void *)); | 155 | DBF_HEX(q->sbal, sizeof(void *)); |
157 | } | 156 | } |
158 | 157 | ||
159 | static void setup_queues(struct qdio_irq *irq_ptr, | 158 | static void setup_queues(struct qdio_irq *irq_ptr, |
160 | struct qdio_initialize *qdio_init) | 159 | struct qdio_initialize *qdio_init) |
161 | { | 160 | { |
162 | char dbf_text[20]; | ||
163 | struct qdio_q *q; | 161 | struct qdio_q *q; |
164 | void **input_sbal_array = qdio_init->input_sbal_addr_array; | 162 | void **input_sbal_array = qdio_init->input_sbal_addr_array; |
165 | void **output_sbal_array = qdio_init->output_sbal_addr_array; | 163 | void **output_sbal_array = qdio_init->output_sbal_addr_array; |
166 | int i; | 164 | int i; |
167 | 165 | ||
168 | sprintf(dbf_text, "qset%4x", qdio_init->cdev->private->schid.sch_no); | ||
169 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
170 | |||
171 | for_each_input_queue(irq_ptr, q, i) { | 166 | for_each_input_queue(irq_ptr, q, i) { |
172 | sprintf(dbf_text, "in-q%4x", i); | 167 | DBF_EVENT("in-q:%1d", i); |
173 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); | 168 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); |
174 | 169 | ||
175 | q->is_input_q = 1; | 170 | q->is_input_q = 1; |
176 | spin_lock_init(&q->u.in.lock); | 171 | setup_storage_lists(q, irq_ptr, input_sbal_array, i); |
177 | setup_storage_lists(q, irq_ptr, input_sbal_array, dbf_text, i); | ||
178 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 172 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
179 | 173 | ||
180 | if (is_thinint_irq(irq_ptr)) | 174 | if (is_thinint_irq(irq_ptr)) |
@@ -186,12 +180,11 @@ static void setup_queues(struct qdio_irq *irq_ptr, | |||
186 | } | 180 | } |
187 | 181 | ||
188 | for_each_output_queue(irq_ptr, q, i) { | 182 | for_each_output_queue(irq_ptr, q, i) { |
189 | sprintf(dbf_text, "outq%4x", i); | 183 | DBF_EVENT("outq:%1d", i); |
190 | setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); | 184 | setup_queues_misc(q, irq_ptr, qdio_init->output_handler, i); |
191 | 185 | ||
192 | q->is_input_q = 0; | 186 | q->is_input_q = 0; |
193 | setup_storage_lists(q, irq_ptr, output_sbal_array, | 187 | setup_storage_lists(q, irq_ptr, output_sbal_array, i); |
194 | dbf_text, i); | ||
195 | output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 188 | output_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
196 | 189 | ||
197 | tasklet_init(&q->tasklet, qdio_outbound_processing, | 190 | tasklet_init(&q->tasklet, qdio_outbound_processing, |
@@ -222,8 +215,6 @@ static void process_ac_flags(struct qdio_irq *irq_ptr, unsigned char qdioac) | |||
222 | static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, | 215 | static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, |
223 | unsigned char qdioac, unsigned long token) | 216 | unsigned char qdioac, unsigned long token) |
224 | { | 217 | { |
225 | char dbf_text[15]; | ||
226 | |||
227 | if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) | 218 | if (!(irq_ptr->qib.rflags & QIB_RFLAGS_ENABLE_QEBSM)) |
228 | goto no_qebsm; | 219 | goto no_qebsm; |
229 | if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || | 220 | if (!(qdioac & AC1_SC_QEBSM_AVAILABLE) || |
@@ -232,33 +223,41 @@ static void check_and_setup_qebsm(struct qdio_irq *irq_ptr, | |||
232 | 223 | ||
233 | irq_ptr->sch_token = token; | 224 | irq_ptr->sch_token = token; |
234 | 225 | ||
235 | QDIO_DBF_TEXT0(0, setup, "V=V:1"); | 226 | DBF_EVENT("V=V:1"); |
236 | sprintf(dbf_text, "%8lx", irq_ptr->sch_token); | 227 | DBF_EVENT("%8lx", irq_ptr->sch_token); |
237 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
238 | return; | 228 | return; |
239 | 229 | ||
240 | no_qebsm: | 230 | no_qebsm: |
241 | irq_ptr->sch_token = 0; | 231 | irq_ptr->sch_token = 0; |
242 | irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; | 232 | irq_ptr->qib.rflags &= ~QIB_RFLAGS_ENABLE_QEBSM; |
243 | QDIO_DBF_TEXT0(0, setup, "noV=V"); | 233 | DBF_EVENT("noV=V"); |
244 | } | 234 | } |
245 | 235 | ||
246 | static int __get_ssqd_info(struct qdio_irq *irq_ptr) | 236 | /* |
237 | * If there is a qdio_irq we use the chsc_page and store the information | ||
238 | * in the qdio_irq, otherwise we copy it to the specified structure. | ||
239 | */ | ||
240 | int qdio_setup_get_ssqd(struct qdio_irq *irq_ptr, | ||
241 | struct subchannel_id *schid, | ||
242 | struct qdio_ssqd_desc *data) | ||
247 | { | 243 | { |
248 | struct chsc_ssqd_area *ssqd; | 244 | struct chsc_ssqd_area *ssqd; |
249 | int rc; | 245 | int rc; |
250 | 246 | ||
251 | QDIO_DBF_TEXT0(0, setup, "getssqd"); | 247 | DBF_EVENT("getssqd:%4x", schid->sch_no); |
252 | ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; | 248 | if (irq_ptr != NULL) |
249 | ssqd = (struct chsc_ssqd_area *)irq_ptr->chsc_page; | ||
250 | else | ||
251 | ssqd = (struct chsc_ssqd_area *)__get_free_page(GFP_KERNEL); | ||
253 | memset(ssqd, 0, PAGE_SIZE); | 252 | memset(ssqd, 0, PAGE_SIZE); |
254 | 253 | ||
255 | ssqd->request = (struct chsc_header) { | 254 | ssqd->request = (struct chsc_header) { |
256 | .length = 0x0010, | 255 | .length = 0x0010, |
257 | .code = 0x0024, | 256 | .code = 0x0024, |
258 | }; | 257 | }; |
259 | ssqd->first_sch = irq_ptr->schid.sch_no; | 258 | ssqd->first_sch = schid->sch_no; |
260 | ssqd->last_sch = irq_ptr->schid.sch_no; | 259 | ssqd->last_sch = schid->sch_no; |
261 | ssqd->ssid = irq_ptr->schid.ssid; | 260 | ssqd->ssid = schid->ssid; |
262 | 261 | ||
263 | if (chsc(ssqd)) | 262 | if (chsc(ssqd)) |
264 | return -EIO; | 263 | return -EIO; |
@@ -268,27 +267,29 @@ static int __get_ssqd_info(struct qdio_irq *irq_ptr) | |||
268 | 267 | ||
269 | if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || | 268 | if (!(ssqd->qdio_ssqd.flags & CHSC_FLAG_QDIO_CAPABILITY) || |
270 | !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || | 269 | !(ssqd->qdio_ssqd.flags & CHSC_FLAG_VALIDITY) || |
271 | (ssqd->qdio_ssqd.sch != irq_ptr->schid.sch_no)) | 270 | (ssqd->qdio_ssqd.sch != schid->sch_no)) |
272 | return -EINVAL; | 271 | return -EINVAL; |
273 | 272 | ||
274 | memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, | 273 | if (irq_ptr != NULL) |
275 | sizeof(struct qdio_ssqd_desc)); | 274 | memcpy(&irq_ptr->ssqd_desc, &ssqd->qdio_ssqd, |
275 | sizeof(struct qdio_ssqd_desc)); | ||
276 | else { | ||
277 | memcpy(data, &ssqd->qdio_ssqd, | ||
278 | sizeof(struct qdio_ssqd_desc)); | ||
279 | free_page((unsigned long)ssqd); | ||
280 | } | ||
276 | return 0; | 281 | return 0; |
277 | } | 282 | } |
278 | 283 | ||
279 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) | 284 | void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) |
280 | { | 285 | { |
281 | unsigned char qdioac; | 286 | unsigned char qdioac; |
282 | char dbf_text[15]; | ||
283 | int rc; | 287 | int rc; |
284 | 288 | ||
285 | rc = __get_ssqd_info(irq_ptr); | 289 | rc = qdio_setup_get_ssqd(irq_ptr, &irq_ptr->schid, NULL); |
286 | if (rc) { | 290 | if (rc) { |
287 | QDIO_DBF_TEXT2(0, setup, "ssqdasig"); | 291 | DBF_ERROR("%4x ssqd ERR", irq_ptr->schid.sch_no); |
288 | sprintf(dbf_text, "schn%4x", irq_ptr->schid.sch_no); | 292 | DBF_ERROR("rc:%x", rc); |
289 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
290 | sprintf(dbf_text, "rc:%d", rc); | ||
291 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
292 | /* all flags set, worst case */ | 293 | /* all flags set, worst case */ |
293 | qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | | 294 | qdioac = AC1_SIGA_INPUT_NEEDED | AC1_SIGA_OUTPUT_NEEDED | |
294 | AC1_SIGA_SYNC_NEEDED; | 295 | AC1_SIGA_SYNC_NEEDED; |
@@ -297,9 +298,7 @@ void qdio_setup_ssqd_info(struct qdio_irq *irq_ptr) | |||
297 | 298 | ||
298 | check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); | 299 | check_and_setup_qebsm(irq_ptr, qdioac, irq_ptr->ssqd_desc.sch_token); |
299 | process_ac_flags(irq_ptr, qdioac); | 300 | process_ac_flags(irq_ptr, qdioac); |
300 | 301 | DBF_EVENT("qdioac:%4x", qdioac); | |
301 | sprintf(dbf_text, "qdioac%2x", qdioac); | ||
302 | QDIO_DBF_TEXT2(0, setup, dbf_text); | ||
303 | } | 302 | } |
304 | 303 | ||
305 | void qdio_release_memory(struct qdio_irq *irq_ptr) | 304 | void qdio_release_memory(struct qdio_irq *irq_ptr) |
@@ -419,7 +418,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
419 | /* get qdio commands */ | 418 | /* get qdio commands */ |
420 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); | 419 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); |
421 | if (!ciw) { | 420 | if (!ciw) { |
422 | QDIO_DBF_TEXT2(1, setup, "no eq"); | 421 | DBF_ERROR("%4x NO EQ", irq_ptr->schid.sch_no); |
423 | rc = -EINVAL; | 422 | rc = -EINVAL; |
424 | goto out_err; | 423 | goto out_err; |
425 | } | 424 | } |
@@ -427,7 +426,7 @@ int qdio_setup_irq(struct qdio_initialize *init_data) | |||
427 | 426 | ||
428 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); | 427 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); |
429 | if (!ciw) { | 428 | if (!ciw) { |
430 | QDIO_DBF_TEXT2(1, setup, "no aq"); | 429 | DBF_ERROR("%4x NO AQ", irq_ptr->schid.sch_no); |
431 | rc = -EINVAL; | 430 | rc = -EINVAL; |
432 | goto out_err; | 431 | goto out_err; |
433 | } | 432 | } |
@@ -447,56 +446,38 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr, | |||
447 | { | 446 | { |
448 | char s[80]; | 447 | char s[80]; |
449 | 448 | ||
450 | sprintf(s, "qdio: %s ", dev_name(&cdev->dev)); | 449 | snprintf(s, 80, "qdio: %s %s on SC %x using " |
451 | switch (irq_ptr->qib.qfmt) { | 450 | "AI:%d QEBSM:%d PCI:%d TDD:%d SIGA:%s%s%s%s%s%s\n", |
452 | case QDIO_QETH_QFMT: | 451 | dev_name(&cdev->dev), |
453 | sprintf(s + strlen(s), "OSA "); | 452 | (irq_ptr->qib.qfmt == QDIO_QETH_QFMT) ? "OSA" : |
454 | break; | 453 | ((irq_ptr->qib.qfmt == QDIO_ZFCP_QFMT) ? "ZFCP" : "HS"), |
455 | case QDIO_ZFCP_QFMT: | 454 | irq_ptr->schid.sch_no, |
456 | sprintf(s + strlen(s), "ZFCP "); | 455 | is_thinint_irq(irq_ptr), |
457 | break; | 456 | (irq_ptr->sch_token) ? 1 : 0, |
458 | case QDIO_IQDIO_QFMT: | 457 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0, |
459 | sprintf(s + strlen(s), "HS "); | 458 | css_general_characteristics.aif_tdd, |
460 | break; | 459 | (irq_ptr->siga_flag.input) ? "R" : " ", |
461 | } | 460 | (irq_ptr->siga_flag.output) ? "W" : " ", |
462 | sprintf(s + strlen(s), "on SC %x using ", irq_ptr->schid.sch_no); | 461 | (irq_ptr->siga_flag.sync) ? "S" : " ", |
463 | sprintf(s + strlen(s), "AI:%d ", is_thinint_irq(irq_ptr)); | 462 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " ", |
464 | sprintf(s + strlen(s), "QEBSM:%d ", (irq_ptr->sch_token) ? 1 : 0); | 463 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " ", |
465 | sprintf(s + strlen(s), "PCI:%d ", | 464 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); |
466 | (irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED) ? 1 : 0); | ||
467 | sprintf(s + strlen(s), "TDD:%d ", css_general_characteristics.aif_tdd); | ||
468 | sprintf(s + strlen(s), "SIGA:"); | ||
469 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.input) ? "R" : " "); | ||
470 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.output) ? "W" : " "); | ||
471 | sprintf(s + strlen(s), "%s", (irq_ptr->siga_flag.sync) ? "S" : " "); | ||
472 | sprintf(s + strlen(s), "%s", | ||
473 | (!irq_ptr->siga_flag.no_sync_ti) ? "A" : " "); | ||
474 | sprintf(s + strlen(s), "%s", | ||
475 | (!irq_ptr->siga_flag.no_sync_out_ti) ? "O" : " "); | ||
476 | sprintf(s + strlen(s), "%s", | ||
477 | (!irq_ptr->siga_flag.no_sync_out_pci) ? "P" : " "); | ||
478 | sprintf(s + strlen(s), "\n"); | ||
479 | printk(KERN_INFO "%s", s); | 465 | printk(KERN_INFO "%s", s); |
480 | } | 466 | } |
481 | 467 | ||
482 | int __init qdio_setup_init(void) | 468 | int __init qdio_setup_init(void) |
483 | { | 469 | { |
484 | char dbf_text[15]; | ||
485 | |||
486 | qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), | 470 | qdio_q_cache = kmem_cache_create("qdio_q", sizeof(struct qdio_q), |
487 | 256, 0, NULL); | 471 | 256, 0, NULL); |
488 | if (!qdio_q_cache) | 472 | if (!qdio_q_cache) |
489 | return -ENOMEM; | 473 | return -ENOMEM; |
490 | 474 | ||
491 | /* Check for OSA/FCP thin interrupts (bit 67). */ | 475 | /* Check for OSA/FCP thin interrupts (bit 67). */ |
492 | sprintf(dbf_text, "thini%1x", | 476 | DBF_EVENT("thinint:%1d", |
493 | (css_general_characteristics.aif_osa) ? 1 : 0); | 477 | (css_general_characteristics.aif_osa) ? 1 : 0); |
494 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
495 | 478 | ||
496 | /* Check for QEBSM support in general (bit 58). */ | 479 | /* Check for QEBSM support in general (bit 58). */ |
497 | sprintf(dbf_text, "cssQBS:%1x", | 480 | DBF_EVENT("cssQEBSM:%1d", (qebsm_possible()) ? 1 : 0); |
498 | (qebsm_possible()) ? 1 : 0); | ||
499 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
500 | return 0; | 481 | return 0; |
501 | } | 482 | } |
502 | 483 | ||
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index ea7f61400267..8e90e147b746 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -125,13 +125,13 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
125 | 125 | ||
126 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) | 126 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) |
127 | { | 127 | { |
128 | unsigned char state; | 128 | unsigned char state = 0; |
129 | 129 | ||
130 | if (!atomic_read(&q->nr_buf_used)) | 130 | if (!atomic_read(&q->nr_buf_used)) |
131 | return 1; | 131 | return 1; |
132 | 132 | ||
133 | qdio_siga_sync_q(q); | 133 | qdio_siga_sync_q(q); |
134 | get_buf_state(q, q->first_to_check, &state); | 134 | get_buf_state(q, q->first_to_check, &state, 0); |
135 | 135 | ||
136 | if (state == SLSB_P_INPUT_PRIMED) | 136 | if (state == SLSB_P_INPUT_PRIMED) |
137 | /* more work coming */ | 137 | /* more work coming */ |
@@ -258,8 +258,6 @@ static void tiqdio_thinint_handler(void *ind, void *drv_data) | |||
258 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | 258 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) |
259 | { | 259 | { |
260 | struct scssc_area *scssc_area; | 260 | struct scssc_area *scssc_area; |
261 | char dbf_text[15]; | ||
262 | void *ptr; | ||
263 | int rc; | 261 | int rc; |
264 | 262 | ||
265 | scssc_area = (struct scssc_area *)irq_ptr->chsc_page; | 263 | scssc_area = (struct scssc_area *)irq_ptr->chsc_page; |
@@ -294,19 +292,15 @@ static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | |||
294 | 292 | ||
295 | rc = chsc_error_from_response(scssc_area->response.code); | 293 | rc = chsc_error_from_response(scssc_area->response.code); |
296 | if (rc) { | 294 | if (rc) { |
297 | sprintf(dbf_text, "sidR%4x", scssc_area->response.code); | 295 | DBF_ERROR("%4x SSI r:%4x", irq_ptr->schid.sch_no, |
298 | QDIO_DBF_TEXT1(0, trace, dbf_text); | 296 | scssc_area->response.code); |
299 | QDIO_DBF_TEXT1(0, setup, dbf_text); | 297 | DBF_ERROR_HEX(&scssc_area->response, sizeof(void *)); |
300 | ptr = &scssc_area->response; | ||
301 | QDIO_DBF_HEX2(1, setup, &ptr, QDIO_DBF_SETUP_LEN); | ||
302 | return rc; | 298 | return rc; |
303 | } | 299 | } |
304 | 300 | ||
305 | QDIO_DBF_TEXT2(0, setup, "setscind"); | 301 | DBF_EVENT("setscind"); |
306 | QDIO_DBF_HEX2(0, setup, &scssc_area->summary_indicator_addr, | 302 | DBF_HEX(&scssc_area->summary_indicator_addr, sizeof(unsigned long)); |
307 | sizeof(unsigned long)); | 303 | DBF_HEX(&scssc_area->subchannel_indicator_addr, sizeof(unsigned long)); |
308 | QDIO_DBF_HEX2(0, setup, &scssc_area->subchannel_indicator_addr, | ||
309 | sizeof(unsigned long)); | ||
310 | return 0; | 304 | return 0; |
311 | } | 305 | } |
312 | 306 | ||
@@ -327,14 +321,11 @@ void tiqdio_free_memory(void) | |||
327 | 321 | ||
328 | int __init tiqdio_register_thinints(void) | 322 | int __init tiqdio_register_thinints(void) |
329 | { | 323 | { |
330 | char dbf_text[20]; | ||
331 | |||
332 | isc_register(QDIO_AIRQ_ISC); | 324 | isc_register(QDIO_AIRQ_ISC); |
333 | tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, | 325 | tiqdio_alsi = s390_register_adapter_interrupt(&tiqdio_thinint_handler, |
334 | NULL, QDIO_AIRQ_ISC); | 326 | NULL, QDIO_AIRQ_ISC); |
335 | if (IS_ERR(tiqdio_alsi)) { | 327 | if (IS_ERR(tiqdio_alsi)) { |
336 | sprintf(dbf_text, "regthn%lx", PTR_ERR(tiqdio_alsi)); | 328 | DBF_EVENT("RTI:%lx", PTR_ERR(tiqdio_alsi)); |
337 | QDIO_DBF_TEXT0(0, setup, dbf_text); | ||
338 | tiqdio_alsi = NULL; | 329 | tiqdio_alsi = NULL; |
339 | isc_unregister(QDIO_AIRQ_ISC); | 330 | isc_unregister(QDIO_AIRQ_ISC); |
340 | return -ENOMEM; | 331 | return -ENOMEM; |
@@ -360,7 +351,7 @@ void qdio_setup_thinint(struct qdio_irq *irq_ptr) | |||
360 | if (!is_thinint_irq(irq_ptr)) | 351 | if (!is_thinint_irq(irq_ptr)) |
361 | return; | 352 | return; |
362 | irq_ptr->dsci = get_indicator(); | 353 | irq_ptr->dsci = get_indicator(); |
363 | QDIO_DBF_HEX1(0, setup, &irq_ptr->dsci, sizeof(void *)); | 354 | DBF_HEX(&irq_ptr->dsci, sizeof(void *)); |
364 | } | 355 | } |
365 | 356 | ||
366 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) | 357 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr) |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index e3fe6838293a..1f5f5d2d87d9 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> | 7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> |
8 | * Felix Beck <felix.beck@de.ibm.com> | ||
8 | * | 9 | * |
9 | * Adjunct processor bus. | 10 | * Adjunct processor bus. |
10 | * | 11 | * |
@@ -23,6 +24,9 @@ | |||
23 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
24 | */ | 25 | */ |
25 | 26 | ||
27 | #define KMSG_COMPONENT "ap" | ||
28 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
29 | |||
26 | #include <linux/module.h> | 30 | #include <linux/module.h> |
27 | #include <linux/init.h> | 31 | #include <linux/init.h> |
28 | #include <linux/delay.h> | 32 | #include <linux/delay.h> |
@@ -34,6 +38,10 @@ | |||
34 | #include <linux/mutex.h> | 38 | #include <linux/mutex.h> |
35 | #include <asm/s390_rdev.h> | 39 | #include <asm/s390_rdev.h> |
36 | #include <asm/reset.h> | 40 | #include <asm/reset.h> |
41 | #include <asm/airq.h> | ||
42 | #include <asm/atomic.h> | ||
43 | #include <asm/system.h> | ||
44 | #include <asm/isc.h> | ||
37 | #include <linux/hrtimer.h> | 45 | #include <linux/hrtimer.h> |
38 | #include <linux/ktime.h> | 46 | #include <linux/ktime.h> |
39 | 47 | ||
@@ -46,6 +54,7 @@ static enum hrtimer_restart ap_poll_timeout(struct hrtimer *); | |||
46 | static int ap_poll_thread_start(void); | 54 | static int ap_poll_thread_start(void); |
47 | static void ap_poll_thread_stop(void); | 55 | static void ap_poll_thread_stop(void); |
48 | static void ap_request_timeout(unsigned long); | 56 | static void ap_request_timeout(unsigned long); |
57 | static inline void ap_schedule_poll_timer(void); | ||
49 | 58 | ||
50 | /* | 59 | /* |
51 | * Module description. | 60 | * Module description. |
@@ -68,7 +77,7 @@ module_param_named(poll_thread, ap_thread_flag, int, 0000); | |||
68 | MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); | 77 | MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off)."); |
69 | 78 | ||
70 | static struct device *ap_root_device = NULL; | 79 | static struct device *ap_root_device = NULL; |
71 | static DEFINE_SPINLOCK(ap_device_lock); | 80 | static DEFINE_SPINLOCK(ap_device_list_lock); |
72 | static LIST_HEAD(ap_device_list); | 81 | static LIST_HEAD(ap_device_list); |
73 | 82 | ||
74 | /* | 83 | /* |
@@ -80,19 +89,29 @@ static int ap_config_time = AP_CONFIG_TIME; | |||
80 | static DECLARE_WORK(ap_config_work, ap_scan_bus); | 89 | static DECLARE_WORK(ap_config_work, ap_scan_bus); |
81 | 90 | ||
82 | /* | 91 | /* |
83 | * Tasklet & timer for AP request polling. | 92 | * Tasklet & timer for AP request polling and interrupts |
84 | */ | 93 | */ |
85 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); | 94 | static DECLARE_TASKLET(ap_tasklet, ap_poll_all, 0); |
86 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); | 95 | static atomic_t ap_poll_requests = ATOMIC_INIT(0); |
87 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); | 96 | static DECLARE_WAIT_QUEUE_HEAD(ap_poll_wait); |
88 | static struct task_struct *ap_poll_kthread = NULL; | 97 | static struct task_struct *ap_poll_kthread = NULL; |
89 | static DEFINE_MUTEX(ap_poll_thread_mutex); | 98 | static DEFINE_MUTEX(ap_poll_thread_mutex); |
99 | static void *ap_interrupt_indicator; | ||
90 | static struct hrtimer ap_poll_timer; | 100 | static struct hrtimer ap_poll_timer; |
91 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. | 101 | /* In LPAR poll with 4kHz frequency. Poll every 250000 nanoseconds. |
92 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ | 102 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ |
93 | static unsigned long long poll_timeout = 250000; | 103 | static unsigned long long poll_timeout = 250000; |
94 | 104 | ||
95 | /** | 105 | /** |
106 | * ap_using_interrupts() - Returns non-zero if interrupt support is | ||
107 | * available. | ||
108 | */ | ||
109 | static inline int ap_using_interrupts(void) | ||
110 | { | ||
111 | return ap_interrupt_indicator != NULL; | ||
112 | } | ||
113 | |||
114 | /** | ||
96 | * ap_intructions_available() - Test if AP instructions are available. | 115 | * ap_intructions_available() - Test if AP instructions are available. |
97 | * | 116 | * |
98 | * Returns 0 if the AP instructions are installed. | 117 | * Returns 0 if the AP instructions are installed. |
@@ -113,6 +132,23 @@ static inline int ap_instructions_available(void) | |||
113 | } | 132 | } |
114 | 133 | ||
115 | /** | 134 | /** |
135 | * ap_interrupts_available(): Test if AP interrupts are available. | ||
136 | * | ||
137 | * Returns 1 if AP interrupts are available. | ||
138 | */ | ||
139 | static int ap_interrupts_available(void) | ||
140 | { | ||
141 | unsigned long long facility_bits[2]; | ||
142 | |||
143 | if (stfle(facility_bits, 2) <= 1) | ||
144 | return 0; | ||
145 | if (!(facility_bits[0] & (1ULL << 61)) || | ||
146 | !(facility_bits[1] & (1ULL << 62))) | ||
147 | return 0; | ||
148 | return 1; | ||
149 | } | ||
150 | |||
151 | /** | ||
116 | * ap_test_queue(): Test adjunct processor queue. | 152 | * ap_test_queue(): Test adjunct processor queue. |
117 | * @qid: The AP queue number | 153 | * @qid: The AP queue number |
118 | * @queue_depth: Pointer to queue depth value | 154 | * @queue_depth: Pointer to queue depth value |
@@ -152,6 +188,80 @@ static inline struct ap_queue_status ap_reset_queue(ap_qid_t qid) | |||
152 | return reg1; | 188 | return reg1; |
153 | } | 189 | } |
154 | 190 | ||
191 | #ifdef CONFIG_64BIT | ||
192 | /** | ||
193 | * ap_queue_interruption_control(): Enable interruption for a specific AP. | ||
194 | * @qid: The AP queue number | ||
195 | * @ind: The notification indicator byte | ||
196 | * | ||
197 | * Returns AP queue status. | ||
198 | */ | ||
199 | static inline struct ap_queue_status | ||
200 | ap_queue_interruption_control(ap_qid_t qid, void *ind) | ||
201 | { | ||
202 | register unsigned long reg0 asm ("0") = qid | 0x03000000UL; | ||
203 | register unsigned long reg1_in asm ("1") = 0x0000800000000000UL | AP_ISC; | ||
204 | register struct ap_queue_status reg1_out asm ("1"); | ||
205 | register void *reg2 asm ("2") = ind; | ||
206 | asm volatile( | ||
207 | ".long 0xb2af0000" /* PQAP(RAPQ) */ | ||
208 | : "+d" (reg0), "+d" (reg1_in), "=d" (reg1_out), "+d" (reg2) | ||
209 | : | ||
210 | : "cc" ); | ||
211 | return reg1_out; | ||
212 | } | ||
213 | #endif | ||
214 | |||
215 | /** | ||
216 | * ap_queue_enable_interruption(): Enable interruption on an AP. | ||
217 | * @qid: The AP queue number | ||
218 | * @ind: the notification indicator byte | ||
219 | * | ||
220 | * Enables interruption on AP queue via ap_queue_interruption_control(). Based | ||
221 | * on the return value it waits a while and tests the AP queue if interrupts | ||
222 | * have been switched on using ap_test_queue(). | ||
223 | */ | ||
224 | static int ap_queue_enable_interruption(ap_qid_t qid, void *ind) | ||
225 | { | ||
226 | #ifdef CONFIG_64BIT | ||
227 | struct ap_queue_status status; | ||
228 | int t_depth, t_device_type, rc, i; | ||
229 | |||
230 | rc = -EBUSY; | ||
231 | status = ap_queue_interruption_control(qid, ind); | ||
232 | |||
233 | for (i = 0; i < AP_MAX_RESET; i++) { | ||
234 | switch (status.response_code) { | ||
235 | case AP_RESPONSE_NORMAL: | ||
236 | if (status.int_enabled) | ||
237 | return 0; | ||
238 | break; | ||
239 | case AP_RESPONSE_RESET_IN_PROGRESS: | ||
240 | case AP_RESPONSE_BUSY: | ||
241 | break; | ||
242 | case AP_RESPONSE_Q_NOT_AVAIL: | ||
243 | case AP_RESPONSE_DECONFIGURED: | ||
244 | case AP_RESPONSE_CHECKSTOPPED: | ||
245 | case AP_RESPONSE_INVALID_ADDRESS: | ||
246 | return -ENODEV; | ||
247 | case AP_RESPONSE_OTHERWISE_CHANGED: | ||
248 | if (status.int_enabled) | ||
249 | return 0; | ||
250 | break; | ||
251 | default: | ||
252 | break; | ||
253 | } | ||
254 | if (i < AP_MAX_RESET - 1) { | ||
255 | udelay(5); | ||
256 | status = ap_test_queue(qid, &t_depth, &t_device_type); | ||
257 | } | ||
258 | } | ||
259 | return rc; | ||
260 | #else | ||
261 | return -EINVAL; | ||
262 | #endif | ||
263 | } | ||
264 | |||
155 | /** | 265 | /** |
156 | * __ap_send(): Send message to adjunct processor queue. | 266 | * __ap_send(): Send message to adjunct processor queue. |
157 | * @qid: The AP queue number | 267 | * @qid: The AP queue number |
@@ -295,6 +405,11 @@ static int ap_query_queue(ap_qid_t qid, int *queue_depth, int *device_type) | |||
295 | case AP_RESPONSE_CHECKSTOPPED: | 405 | case AP_RESPONSE_CHECKSTOPPED: |
296 | rc = -ENODEV; | 406 | rc = -ENODEV; |
297 | break; | 407 | break; |
408 | case AP_RESPONSE_INVALID_ADDRESS: | ||
409 | rc = -ENODEV; | ||
410 | break; | ||
411 | case AP_RESPONSE_OTHERWISE_CHANGED: | ||
412 | break; | ||
298 | case AP_RESPONSE_BUSY: | 413 | case AP_RESPONSE_BUSY: |
299 | break; | 414 | break; |
300 | default: | 415 | default: |
@@ -345,6 +460,15 @@ static int ap_init_queue(ap_qid_t qid) | |||
345 | status = ap_test_queue(qid, &dummy, &dummy); | 460 | status = ap_test_queue(qid, &dummy, &dummy); |
346 | } | 461 | } |
347 | } | 462 | } |
463 | if (rc == 0 && ap_using_interrupts()) { | ||
464 | rc = ap_queue_enable_interruption(qid, ap_interrupt_indicator); | ||
465 | /* If interruption mode is supported by the machine, | ||
466 | * but an AP can not be enabled for interruption then | ||
467 | * the AP will be discarded. */ | ||
468 | if (rc) | ||
469 | pr_err("Registering adapter interrupts for " | ||
470 | "AP %d failed\n", AP_QID_DEVICE(qid)); | ||
471 | } | ||
348 | return rc; | 472 | return rc; |
349 | } | 473 | } |
350 | 474 | ||
@@ -397,16 +521,16 @@ static ssize_t ap_hwtype_show(struct device *dev, | |||
397 | struct ap_device *ap_dev = to_ap_dev(dev); | 521 | struct ap_device *ap_dev = to_ap_dev(dev); |
398 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); | 522 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->device_type); |
399 | } | 523 | } |
400 | static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); | ||
401 | 524 | ||
525 | static DEVICE_ATTR(hwtype, 0444, ap_hwtype_show, NULL); | ||
402 | static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, | 526 | static ssize_t ap_depth_show(struct device *dev, struct device_attribute *attr, |
403 | char *buf) | 527 | char *buf) |
404 | { | 528 | { |
405 | struct ap_device *ap_dev = to_ap_dev(dev); | 529 | struct ap_device *ap_dev = to_ap_dev(dev); |
406 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); | 530 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_dev->queue_depth); |
407 | } | 531 | } |
408 | static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); | ||
409 | 532 | ||
533 | static DEVICE_ATTR(depth, 0444, ap_depth_show, NULL); | ||
410 | static ssize_t ap_request_count_show(struct device *dev, | 534 | static ssize_t ap_request_count_show(struct device *dev, |
411 | struct device_attribute *attr, | 535 | struct device_attribute *attr, |
412 | char *buf) | 536 | char *buf) |
@@ -509,9 +633,9 @@ static int ap_device_probe(struct device *dev) | |||
509 | ap_dev->drv = ap_drv; | 633 | ap_dev->drv = ap_drv; |
510 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; | 634 | rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV; |
511 | if (!rc) { | 635 | if (!rc) { |
512 | spin_lock_bh(&ap_device_lock); | 636 | spin_lock_bh(&ap_device_list_lock); |
513 | list_add(&ap_dev->list, &ap_device_list); | 637 | list_add(&ap_dev->list, &ap_device_list); |
514 | spin_unlock_bh(&ap_device_lock); | 638 | spin_unlock_bh(&ap_device_list_lock); |
515 | } | 639 | } |
516 | return rc; | 640 | return rc; |
517 | } | 641 | } |
@@ -553,9 +677,9 @@ static int ap_device_remove(struct device *dev) | |||
553 | 677 | ||
554 | ap_flush_queue(ap_dev); | 678 | ap_flush_queue(ap_dev); |
555 | del_timer_sync(&ap_dev->timeout); | 679 | del_timer_sync(&ap_dev->timeout); |
556 | spin_lock_bh(&ap_device_lock); | 680 | spin_lock_bh(&ap_device_list_lock); |
557 | list_del_init(&ap_dev->list); | 681 | list_del_init(&ap_dev->list); |
558 | spin_unlock_bh(&ap_device_lock); | 682 | spin_unlock_bh(&ap_device_list_lock); |
559 | if (ap_drv->remove) | 683 | if (ap_drv->remove) |
560 | ap_drv->remove(ap_dev); | 684 | ap_drv->remove(ap_dev); |
561 | spin_lock_bh(&ap_dev->lock); | 685 | spin_lock_bh(&ap_dev->lock); |
@@ -599,6 +723,14 @@ static ssize_t ap_config_time_show(struct bus_type *bus, char *buf) | |||
599 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); | 723 | return snprintf(buf, PAGE_SIZE, "%d\n", ap_config_time); |
600 | } | 724 | } |
601 | 725 | ||
726 | static ssize_t ap_interrupts_show(struct bus_type *bus, char *buf) | ||
727 | { | ||
728 | return snprintf(buf, PAGE_SIZE, "%d\n", | ||
729 | ap_using_interrupts() ? 1 : 0); | ||
730 | } | ||
731 | |||
732 | static BUS_ATTR(ap_interrupts, 0444, ap_interrupts_show, NULL); | ||
733 | |||
602 | static ssize_t ap_config_time_store(struct bus_type *bus, | 734 | static ssize_t ap_config_time_store(struct bus_type *bus, |
603 | const char *buf, size_t count) | 735 | const char *buf, size_t count) |
604 | { | 736 | { |
@@ -653,7 +785,8 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf, | |||
653 | ktime_t hr_time; | 785 | ktime_t hr_time; |
654 | 786 | ||
655 | /* 120 seconds = maximum poll interval */ | 787 | /* 120 seconds = maximum poll interval */ |
656 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || time > 120000000000) | 788 | if (sscanf(buf, "%llu\n", &time) != 1 || time < 1 || |
789 | time > 120000000000ULL) | ||
657 | return -EINVAL; | 790 | return -EINVAL; |
658 | poll_timeout = time; | 791 | poll_timeout = time; |
659 | hr_time = ktime_set(0, poll_timeout); | 792 | hr_time = ktime_set(0, poll_timeout); |
@@ -672,6 +805,7 @@ static struct bus_attribute *const ap_bus_attrs[] = { | |||
672 | &bus_attr_ap_domain, | 805 | &bus_attr_ap_domain, |
673 | &bus_attr_config_time, | 806 | &bus_attr_config_time, |
674 | &bus_attr_poll_thread, | 807 | &bus_attr_poll_thread, |
808 | &bus_attr_ap_interrupts, | ||
675 | &bus_attr_poll_timeout, | 809 | &bus_attr_poll_timeout, |
676 | NULL, | 810 | NULL, |
677 | }; | 811 | }; |
@@ -814,6 +948,11 @@ out: | |||
814 | return rc; | 948 | return rc; |
815 | } | 949 | } |
816 | 950 | ||
951 | static void ap_interrupt_handler(void *unused1, void *unused2) | ||
952 | { | ||
953 | tasklet_schedule(&ap_tasklet); | ||
954 | } | ||
955 | |||
817 | /** | 956 | /** |
818 | * __ap_scan_bus(): Scan the AP bus. | 957 | * __ap_scan_bus(): Scan the AP bus. |
819 | * @dev: Pointer to device | 958 | * @dev: Pointer to device |
@@ -928,6 +1067,8 @@ ap_config_timeout(unsigned long ptr) | |||
928 | */ | 1067 | */ |
929 | static inline void ap_schedule_poll_timer(void) | 1068 | static inline void ap_schedule_poll_timer(void) |
930 | { | 1069 | { |
1070 | if (ap_using_interrupts()) | ||
1071 | return; | ||
931 | if (hrtimer_is_queued(&ap_poll_timer)) | 1072 | if (hrtimer_is_queued(&ap_poll_timer)) |
932 | return; | 1073 | return; |
933 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), | 1074 | hrtimer_start(&ap_poll_timer, ktime_set(0, poll_timeout), |
@@ -1181,7 +1322,7 @@ static void ap_reset(struct ap_device *ap_dev) | |||
1181 | ap_dev->unregistered = 1; | 1322 | ap_dev->unregistered = 1; |
1182 | } | 1323 | } |
1183 | 1324 | ||
1184 | static int __ap_poll_all(struct ap_device *ap_dev, unsigned long *flags) | 1325 | static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags) |
1185 | { | 1326 | { |
1186 | spin_lock(&ap_dev->lock); | 1327 | spin_lock(&ap_dev->lock); |
1187 | if (!ap_dev->unregistered) { | 1328 | if (!ap_dev->unregistered) { |
@@ -1207,13 +1348,19 @@ static void ap_poll_all(unsigned long dummy) | |||
1207 | unsigned long flags; | 1348 | unsigned long flags; |
1208 | struct ap_device *ap_dev; | 1349 | struct ap_device *ap_dev; |
1209 | 1350 | ||
1351 | /* Reset the indicator if interrupts are used. Thus new interrupts can | ||
1352 | * be received. Doing it in the beginning of the tasklet is therefor | ||
1353 | * important that no requests on any AP get lost. | ||
1354 | */ | ||
1355 | if (ap_using_interrupts()) | ||
1356 | xchg((u8 *)ap_interrupt_indicator, 0); | ||
1210 | do { | 1357 | do { |
1211 | flags = 0; | 1358 | flags = 0; |
1212 | spin_lock(&ap_device_lock); | 1359 | spin_lock(&ap_device_list_lock); |
1213 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1360 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1214 | __ap_poll_all(ap_dev, &flags); | 1361 | __ap_poll_device(ap_dev, &flags); |
1215 | } | 1362 | } |
1216 | spin_unlock(&ap_device_lock); | 1363 | spin_unlock(&ap_device_list_lock); |
1217 | } while (flags & 1); | 1364 | } while (flags & 1); |
1218 | if (flags & 2) | 1365 | if (flags & 2) |
1219 | ap_schedule_poll_timer(); | 1366 | ap_schedule_poll_timer(); |
@@ -1253,11 +1400,11 @@ static int ap_poll_thread(void *data) | |||
1253 | remove_wait_queue(&ap_poll_wait, &wait); | 1400 | remove_wait_queue(&ap_poll_wait, &wait); |
1254 | 1401 | ||
1255 | flags = 0; | 1402 | flags = 0; |
1256 | spin_lock_bh(&ap_device_lock); | 1403 | spin_lock_bh(&ap_device_list_lock); |
1257 | list_for_each_entry(ap_dev, &ap_device_list, list) { | 1404 | list_for_each_entry(ap_dev, &ap_device_list, list) { |
1258 | __ap_poll_all(ap_dev, &flags); | 1405 | __ap_poll_device(ap_dev, &flags); |
1259 | } | 1406 | } |
1260 | spin_unlock_bh(&ap_device_lock); | 1407 | spin_unlock_bh(&ap_device_list_lock); |
1261 | } | 1408 | } |
1262 | set_current_state(TASK_RUNNING); | 1409 | set_current_state(TASK_RUNNING); |
1263 | remove_wait_queue(&ap_poll_wait, &wait); | 1410 | remove_wait_queue(&ap_poll_wait, &wait); |
@@ -1268,6 +1415,8 @@ static int ap_poll_thread_start(void) | |||
1268 | { | 1415 | { |
1269 | int rc; | 1416 | int rc; |
1270 | 1417 | ||
1418 | if (ap_using_interrupts()) | ||
1419 | return 0; | ||
1271 | mutex_lock(&ap_poll_thread_mutex); | 1420 | mutex_lock(&ap_poll_thread_mutex); |
1272 | if (!ap_poll_kthread) { | 1421 | if (!ap_poll_kthread) { |
1273 | ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); | 1422 | ap_poll_kthread = kthread_run(ap_poll_thread, NULL, "appoll"); |
@@ -1301,8 +1450,12 @@ static void ap_request_timeout(unsigned long data) | |||
1301 | { | 1450 | { |
1302 | struct ap_device *ap_dev = (struct ap_device *) data; | 1451 | struct ap_device *ap_dev = (struct ap_device *) data; |
1303 | 1452 | ||
1304 | if (ap_dev->reset == AP_RESET_ARMED) | 1453 | if (ap_dev->reset == AP_RESET_ARMED) { |
1305 | ap_dev->reset = AP_RESET_DO; | 1454 | ap_dev->reset = AP_RESET_DO; |
1455 | |||
1456 | if (ap_using_interrupts()) | ||
1457 | tasklet_schedule(&ap_tasklet); | ||
1458 | } | ||
1306 | } | 1459 | } |
1307 | 1460 | ||
1308 | static void ap_reset_domain(void) | 1461 | static void ap_reset_domain(void) |
@@ -1337,14 +1490,25 @@ int __init ap_module_init(void) | |||
1337 | int rc, i; | 1490 | int rc, i; |
1338 | 1491 | ||
1339 | if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { | 1492 | if (ap_domain_index < -1 || ap_domain_index >= AP_DOMAINS) { |
1340 | printk(KERN_WARNING "Invalid param: domain = %d. " | 1493 | pr_warning("%d is not a valid cryptographic domain\n", |
1341 | " Not loading.\n", ap_domain_index); | 1494 | ap_domain_index); |
1342 | return -EINVAL; | 1495 | return -EINVAL; |
1343 | } | 1496 | } |
1344 | if (ap_instructions_available() != 0) { | 1497 | if (ap_instructions_available() != 0) { |
1345 | printk(KERN_WARNING "AP instructions not installed.\n"); | 1498 | pr_warning("The hardware system does not support " |
1499 | "AP instructions\n"); | ||
1346 | return -ENODEV; | 1500 | return -ENODEV; |
1347 | } | 1501 | } |
1502 | if (ap_interrupts_available()) { | ||
1503 | isc_register(AP_ISC); | ||
1504 | ap_interrupt_indicator = s390_register_adapter_interrupt( | ||
1505 | &ap_interrupt_handler, NULL, AP_ISC); | ||
1506 | if (IS_ERR(ap_interrupt_indicator)) { | ||
1507 | ap_interrupt_indicator = NULL; | ||
1508 | isc_unregister(AP_ISC); | ||
1509 | } | ||
1510 | } | ||
1511 | |||
1348 | register_reset_call(&ap_reset_call); | 1512 | register_reset_call(&ap_reset_call); |
1349 | 1513 | ||
1350 | /* Create /sys/bus/ap. */ | 1514 | /* Create /sys/bus/ap. */ |
@@ -1408,6 +1572,10 @@ out_bus: | |||
1408 | bus_unregister(&ap_bus_type); | 1572 | bus_unregister(&ap_bus_type); |
1409 | out: | 1573 | out: |
1410 | unregister_reset_call(&ap_reset_call); | 1574 | unregister_reset_call(&ap_reset_call); |
1575 | if (ap_using_interrupts()) { | ||
1576 | s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); | ||
1577 | isc_unregister(AP_ISC); | ||
1578 | } | ||
1411 | return rc; | 1579 | return rc; |
1412 | } | 1580 | } |
1413 | 1581 | ||
@@ -1443,6 +1611,10 @@ void ap_module_exit(void) | |||
1443 | bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); | 1611 | bus_remove_file(&ap_bus_type, ap_bus_attrs[i]); |
1444 | bus_unregister(&ap_bus_type); | 1612 | bus_unregister(&ap_bus_type); |
1445 | unregister_reset_call(&ap_reset_call); | 1613 | unregister_reset_call(&ap_reset_call); |
1614 | if (ap_using_interrupts()) { | ||
1615 | s390_unregister_adapter_interrupt(ap_interrupt_indicator, AP_ISC); | ||
1616 | isc_unregister(AP_ISC); | ||
1617 | } | ||
1446 | } | 1618 | } |
1447 | 1619 | ||
1448 | #ifndef CONFIG_ZCRYPT_MONOLITHIC | 1620 | #ifndef CONFIG_ZCRYPT_MONOLITHIC |
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h index 446378b308fc..a35362241805 100644 --- a/drivers/s390/crypto/ap_bus.h +++ b/drivers/s390/crypto/ap_bus.h | |||
@@ -5,6 +5,7 @@ | |||
5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> | 5 | * Author(s): Cornelia Huck <cornelia.huck@de.ibm.com> |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | 6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> |
7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> | 7 | * Ralph Wuerthner <rwuerthn@de.ibm.com> |
8 | * Felix Beck <felix.beck@de.ibm.com> | ||
8 | * | 9 | * |
9 | * Adjunct processor bus header file. | 10 | * Adjunct processor bus header file. |
10 | * | 11 | * |
@@ -67,7 +68,8 @@ struct ap_queue_status { | |||
67 | unsigned int queue_empty : 1; | 68 | unsigned int queue_empty : 1; |
68 | unsigned int replies_waiting : 1; | 69 | unsigned int replies_waiting : 1; |
69 | unsigned int queue_full : 1; | 70 | unsigned int queue_full : 1; |
70 | unsigned int pad1 : 5; | 71 | unsigned int pad1 : 4; |
72 | unsigned int int_enabled : 1; | ||
71 | unsigned int response_code : 8; | 73 | unsigned int response_code : 8; |
72 | unsigned int pad2 : 16; | 74 | unsigned int pad2 : 16; |
73 | }; | 75 | }; |
@@ -78,6 +80,8 @@ struct ap_queue_status { | |||
78 | #define AP_RESPONSE_DECONFIGURED 0x03 | 80 | #define AP_RESPONSE_DECONFIGURED 0x03 |
79 | #define AP_RESPONSE_CHECKSTOPPED 0x04 | 81 | #define AP_RESPONSE_CHECKSTOPPED 0x04 |
80 | #define AP_RESPONSE_BUSY 0x05 | 82 | #define AP_RESPONSE_BUSY 0x05 |
83 | #define AP_RESPONSE_INVALID_ADDRESS 0x06 | ||
84 | #define AP_RESPONSE_OTHERWISE_CHANGED 0x07 | ||
81 | #define AP_RESPONSE_Q_FULL 0x10 | 85 | #define AP_RESPONSE_Q_FULL 0x10 |
82 | #define AP_RESPONSE_NO_PENDING_REPLY 0x10 | 86 | #define AP_RESPONSE_NO_PENDING_REPLY 0x10 |
83 | #define AP_RESPONSE_INDEX_TOO_BIG 0x11 | 87 | #define AP_RESPONSE_INDEX_TOO_BIG 0x11 |
diff --git a/drivers/s390/crypto/zcrypt_cex2a.c b/drivers/s390/crypto/zcrypt_cex2a.c index 54f4cbc3be9e..326ea08f67c9 100644 --- a/drivers/s390/crypto/zcrypt_cex2a.c +++ b/drivers/s390/crypto/zcrypt_cex2a.c | |||
@@ -264,17 +264,21 @@ static void zcrypt_cex2a_receive(struct ap_device *ap_dev, | |||
264 | .type = TYPE82_RSP_CODE, | 264 | .type = TYPE82_RSP_CODE, |
265 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 265 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
266 | }; | 266 | }; |
267 | struct type80_hdr *t80h = reply->message; | 267 | struct type80_hdr *t80h; |
268 | int length; | 268 | int length; |
269 | 269 | ||
270 | /* Copy the reply message to the request message buffer. */ | 270 | /* Copy the reply message to the request message buffer. */ |
271 | if (IS_ERR(reply)) | 271 | if (IS_ERR(reply)) { |
272 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 272 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
273 | else if (t80h->type == TYPE80_RSP_CODE) { | 273 | goto out; |
274 | } | ||
275 | t80h = reply->message; | ||
276 | if (t80h->type == TYPE80_RSP_CODE) { | ||
274 | length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); | 277 | length = min(CEX2A_MAX_RESPONSE_SIZE, (int) t80h->len); |
275 | memcpy(msg->message, reply->message, length); | 278 | memcpy(msg->message, reply->message, length); |
276 | } else | 279 | } else |
277 | memcpy(msg->message, reply->message, sizeof error_reply); | 280 | memcpy(msg->message, reply->message, sizeof error_reply); |
281 | out: | ||
278 | complete((struct completion *) msg->private); | 282 | complete((struct completion *) msg->private); |
279 | } | 283 | } |
280 | 284 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcica.c b/drivers/s390/crypto/zcrypt_pcica.c index 12da4815ba8e..17ba81b58c78 100644 --- a/drivers/s390/crypto/zcrypt_pcica.c +++ b/drivers/s390/crypto/zcrypt_pcica.c | |||
@@ -247,17 +247,21 @@ static void zcrypt_pcica_receive(struct ap_device *ap_dev, | |||
247 | .type = TYPE82_RSP_CODE, | 247 | .type = TYPE82_RSP_CODE, |
248 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 248 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
249 | }; | 249 | }; |
250 | struct type84_hdr *t84h = reply->message; | 250 | struct type84_hdr *t84h; |
251 | int length; | 251 | int length; |
252 | 252 | ||
253 | /* Copy the reply message to the request message buffer. */ | 253 | /* Copy the reply message to the request message buffer. */ |
254 | if (IS_ERR(reply)) | 254 | if (IS_ERR(reply)) { |
255 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 255 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
256 | else if (t84h->code == TYPE84_RSP_CODE) { | 256 | goto out; |
257 | } | ||
258 | t84h = reply->message; | ||
259 | if (t84h->code == TYPE84_RSP_CODE) { | ||
257 | length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); | 260 | length = min(PCICA_MAX_RESPONSE_SIZE, (int) t84h->len); |
258 | memcpy(msg->message, reply->message, length); | 261 | memcpy(msg->message, reply->message, length); |
259 | } else | 262 | } else |
260 | memcpy(msg->message, reply->message, sizeof error_reply); | 263 | memcpy(msg->message, reply->message, sizeof error_reply); |
264 | out: | ||
261 | complete((struct completion *) msg->private); | 265 | complete((struct completion *) msg->private); |
262 | } | 266 | } |
263 | 267 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcicc.c b/drivers/s390/crypto/zcrypt_pcicc.c index 779952cb19fc..f4b0c4795434 100644 --- a/drivers/s390/crypto/zcrypt_pcicc.c +++ b/drivers/s390/crypto/zcrypt_pcicc.c | |||
@@ -447,19 +447,23 @@ static void zcrypt_pcicc_receive(struct ap_device *ap_dev, | |||
447 | .type = TYPE82_RSP_CODE, | 447 | .type = TYPE82_RSP_CODE, |
448 | .reply_code = REP82_ERROR_MACHINE_FAILURE, | 448 | .reply_code = REP82_ERROR_MACHINE_FAILURE, |
449 | }; | 449 | }; |
450 | struct type86_reply *t86r = reply->message; | 450 | struct type86_reply *t86r; |
451 | int length; | 451 | int length; |
452 | 452 | ||
453 | /* Copy the reply message to the request message buffer. */ | 453 | /* Copy the reply message to the request message buffer. */ |
454 | if (IS_ERR(reply)) | 454 | if (IS_ERR(reply)) { |
455 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 455 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
456 | else if (t86r->hdr.type == TYPE86_RSP_CODE && | 456 | goto out; |
457 | } | ||
458 | t86r = reply->message; | ||
459 | if (t86r->hdr.type == TYPE86_RSP_CODE && | ||
457 | t86r->cprb.cprb_ver_id == 0x01) { | 460 | t86r->cprb.cprb_ver_id == 0x01) { |
458 | length = sizeof(struct type86_reply) + t86r->length - 2; | 461 | length = sizeof(struct type86_reply) + t86r->length - 2; |
459 | length = min(PCICC_MAX_RESPONSE_SIZE, length); | 462 | length = min(PCICC_MAX_RESPONSE_SIZE, length); |
460 | memcpy(msg->message, reply->message, length); | 463 | memcpy(msg->message, reply->message, length); |
461 | } else | 464 | } else |
462 | memcpy(msg->message, reply->message, sizeof error_reply); | 465 | memcpy(msg->message, reply->message, sizeof error_reply); |
466 | out: | ||
463 | complete((struct completion *) msg->private); | 467 | complete((struct completion *) msg->private); |
464 | } | 468 | } |
465 | 469 | ||
diff --git a/drivers/s390/crypto/zcrypt_pcixcc.c b/drivers/s390/crypto/zcrypt_pcixcc.c index d8ad36f81540..e7a1e22e77ac 100644 --- a/drivers/s390/crypto/zcrypt_pcixcc.c +++ b/drivers/s390/crypto/zcrypt_pcixcc.c | |||
@@ -635,13 +635,16 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
635 | }; | 635 | }; |
636 | struct response_type *resp_type = | 636 | struct response_type *resp_type = |
637 | (struct response_type *) msg->private; | 637 | (struct response_type *) msg->private; |
638 | struct type86x_reply *t86r = reply->message; | 638 | struct type86x_reply *t86r; |
639 | int length; | 639 | int length; |
640 | 640 | ||
641 | /* Copy the reply message to the request message buffer. */ | 641 | /* Copy the reply message to the request message buffer. */ |
642 | if (IS_ERR(reply)) | 642 | if (IS_ERR(reply)) { |
643 | memcpy(msg->message, &error_reply, sizeof(error_reply)); | 643 | memcpy(msg->message, &error_reply, sizeof(error_reply)); |
644 | else if (t86r->hdr.type == TYPE86_RSP_CODE && | 644 | goto out; |
645 | } | ||
646 | t86r = reply->message; | ||
647 | if (t86r->hdr.type == TYPE86_RSP_CODE && | ||
645 | t86r->cprbx.cprb_ver_id == 0x02) { | 648 | t86r->cprbx.cprb_ver_id == 0x02) { |
646 | switch (resp_type->type) { | 649 | switch (resp_type->type) { |
647 | case PCIXCC_RESPONSE_TYPE_ICA: | 650 | case PCIXCC_RESPONSE_TYPE_ICA: |
@@ -660,6 +663,7 @@ static void zcrypt_pcixcc_receive(struct ap_device *ap_dev, | |||
660 | } | 663 | } |
661 | } else | 664 | } else |
662 | memcpy(msg->message, reply->message, sizeof error_reply); | 665 | memcpy(msg->message, reply->message, sizeof error_reply); |
666 | out: | ||
663 | complete(&(resp_type->work)); | 667 | complete(&(resp_type->work)); |
664 | } | 668 | } |
665 | 669 | ||
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c index 42776550acfd..f29c7086fc19 100644 --- a/drivers/s390/net/ctcm_fsms.c +++ b/drivers/s390/net/ctcm_fsms.c | |||
@@ -13,6 +13,9 @@ | |||
13 | #undef DEBUGDATA | 13 | #undef DEBUGDATA |
14 | #undef DEBUGCCW | 14 | #undef DEBUGCCW |
15 | 15 | ||
16 | #define KMSG_COMPONENT "ctcm" | ||
17 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
18 | |||
16 | #include <linux/module.h> | 19 | #include <linux/module.h> |
17 | #include <linux/init.h> | 20 | #include <linux/init.h> |
18 | #include <linux/kernel.h> | 21 | #include <linux/kernel.h> |
@@ -190,21 +193,22 @@ static void ctcmpc_chx_send_sweep(fsm_instance *fsm, int event, void *arg); | |||
190 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) | 193 | void ctcm_ccw_check_rc(struct channel *ch, int rc, char *msg) |
191 | { | 194 | { |
192 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, | 195 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
193 | "%s(%s): %s: %04x\n", | 196 | "%s(%s): %s: %04x\n", |
194 | CTCM_FUNTAIL, ch->id, msg, rc); | 197 | CTCM_FUNTAIL, ch->id, msg, rc); |
195 | switch (rc) { | 198 | switch (rc) { |
196 | case -EBUSY: | 199 | case -EBUSY: |
197 | ctcm_pr_warn("%s (%s): Busy !\n", ch->id, msg); | 200 | pr_info("%s: The communication peer is busy\n", |
201 | ch->id); | ||
198 | fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); | 202 | fsm_event(ch->fsm, CTC_EVENT_IO_EBUSY, ch); |
199 | break; | 203 | break; |
200 | case -ENODEV: | 204 | case -ENODEV: |
201 | ctcm_pr_emerg("%s (%s): Invalid device called for IO\n", | 205 | pr_err("%s: The specified target device is not valid\n", |
202 | ch->id, msg); | 206 | ch->id); |
203 | fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); | 207 | fsm_event(ch->fsm, CTC_EVENT_IO_ENODEV, ch); |
204 | break; | 208 | break; |
205 | default: | 209 | default: |
206 | ctcm_pr_emerg("%s (%s): Unknown error in do_IO %04x\n", | 210 | pr_err("An I/O operation resulted in error %04x\n", |
207 | ch->id, msg, rc); | 211 | rc); |
208 | fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); | 212 | fsm_event(ch->fsm, CTC_EVENT_IO_UNKNOWN, ch); |
209 | } | 213 | } |
210 | } | 214 | } |
@@ -886,8 +890,15 @@ static void ctcm_chx_rxiniterr(fsm_instance *fi, int event, void *arg) | |||
886 | fsm_newstate(fi, CTC_STATE_RXERR); | 890 | fsm_newstate(fi, CTC_STATE_RXERR); |
887 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); | 891 | fsm_event(priv->fsm, DEV_EVENT_RXDOWN, dev); |
888 | } | 892 | } |
889 | } else | 893 | } else { |
890 | ctcm_pr_warn("%s: Error during RX init handshake\n", dev->name); | 894 | CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR, |
895 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, | ||
896 | ctc_ch_event_names[event], fsm_getstate_str(fi)); | ||
897 | |||
898 | dev_warn(&dev->dev, | ||
899 | "Initialization failed with RX/TX init handshake " | ||
900 | "error %s\n", ctc_ch_event_names[event]); | ||
901 | } | ||
891 | } | 902 | } |
892 | 903 | ||
893 | /** | 904 | /** |
@@ -969,7 +980,9 @@ static void ctcm_chx_txiniterr(fsm_instance *fi, int event, void *arg) | |||
969 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, | 980 | "%s(%s): %s in %s", CTCM_FUNTAIL, ch->id, |
970 | ctc_ch_event_names[event], fsm_getstate_str(fi)); | 981 | ctc_ch_event_names[event], fsm_getstate_str(fi)); |
971 | 982 | ||
972 | ctcm_pr_warn("%s: Error during TX init handshake\n", dev->name); | 983 | dev_warn(&dev->dev, |
984 | "Initialization failed with RX/TX init handshake " | ||
985 | "error %s\n", ctc_ch_event_names[event]); | ||
973 | } | 986 | } |
974 | } | 987 | } |
975 | 988 | ||
@@ -2101,14 +2114,11 @@ static void dev_action_restart(fsm_instance *fi, int event, void *arg) | |||
2101 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); | 2114 | CTCMY_DBF_DEV_NAME(TRACE, dev, ""); |
2102 | 2115 | ||
2103 | if (IS_MPC(priv)) { | 2116 | if (IS_MPC(priv)) { |
2104 | ctcm_pr_info("ctcm: %s Restarting Device and " | ||
2105 | "MPC Group in 5 seconds\n", | ||
2106 | dev->name); | ||
2107 | restart_timer = CTCM_TIME_1_SEC; | 2117 | restart_timer = CTCM_TIME_1_SEC; |
2108 | } else { | 2118 | } else { |
2109 | ctcm_pr_info("%s: Restarting\n", dev->name); | ||
2110 | restart_timer = CTCM_TIME_5_SEC; | 2119 | restart_timer = CTCM_TIME_5_SEC; |
2111 | } | 2120 | } |
2121 | dev_info(&dev->dev, "Restarting device\n"); | ||
2112 | 2122 | ||
2113 | dev_action_stop(fi, event, arg); | 2123 | dev_action_stop(fi, event, arg); |
2114 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); | 2124 | fsm_event(priv->fsm, DEV_EVENT_STOP, dev); |
@@ -2150,16 +2160,16 @@ static void dev_action_chup(fsm_instance *fi, int event, void *arg) | |||
2150 | case DEV_STATE_STARTWAIT_RX: | 2160 | case DEV_STATE_STARTWAIT_RX: |
2151 | if (event == DEV_EVENT_RXUP) { | 2161 | if (event == DEV_EVENT_RXUP) { |
2152 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2162 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2153 | ctcm_pr_info("%s: connected with remote side\n", | 2163 | dev_info(&dev->dev, |
2154 | dev->name); | 2164 | "Connected with remote side\n"); |
2155 | ctcm_clear_busy(dev); | 2165 | ctcm_clear_busy(dev); |
2156 | } | 2166 | } |
2157 | break; | 2167 | break; |
2158 | case DEV_STATE_STARTWAIT_TX: | 2168 | case DEV_STATE_STARTWAIT_TX: |
2159 | if (event == DEV_EVENT_TXUP) { | 2169 | if (event == DEV_EVENT_TXUP) { |
2160 | fsm_newstate(fi, DEV_STATE_RUNNING); | 2170 | fsm_newstate(fi, DEV_STATE_RUNNING); |
2161 | ctcm_pr_info("%s: connected with remote side\n", | 2171 | dev_info(&dev->dev, |
2162 | dev->name); | 2172 | "Connected with remote side\n"); |
2163 | ctcm_clear_busy(dev); | 2173 | ctcm_clear_busy(dev); |
2164 | } | 2174 | } |
2165 | break; | 2175 | break; |
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c index a4e29836a2aa..2678573becec 100644 --- a/drivers/s390/net/ctcm_main.c +++ b/drivers/s390/net/ctcm_main.c | |||
@@ -21,6 +21,9 @@ | |||
21 | #undef DEBUGDATA | 21 | #undef DEBUGDATA |
22 | #undef DEBUGCCW | 22 | #undef DEBUGCCW |
23 | 23 | ||
24 | #define KMSG_COMPONENT "ctcm" | ||
25 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
26 | |||
24 | #include <linux/module.h> | 27 | #include <linux/module.h> |
25 | #include <linux/init.h> | 28 | #include <linux/init.h> |
26 | #include <linux/kernel.h> | 29 | #include <linux/kernel.h> |
@@ -281,14 +284,16 @@ static long ctcm_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
281 | 284 | ||
282 | switch (PTR_ERR(irb)) { | 285 | switch (PTR_ERR(irb)) { |
283 | case -EIO: | 286 | case -EIO: |
284 | ctcm_pr_warn("i/o-error on device %s\n", dev_name(&cdev->dev)); | 287 | dev_err(&cdev->dev, |
288 | "An I/O-error occurred on the CTCM device\n"); | ||
285 | break; | 289 | break; |
286 | case -ETIMEDOUT: | 290 | case -ETIMEDOUT: |
287 | ctcm_pr_warn("timeout on device %s\n", dev_name(&cdev->dev)); | 291 | dev_err(&cdev->dev, |
292 | "An adapter hardware operation timed out\n"); | ||
288 | break; | 293 | break; |
289 | default: | 294 | default: |
290 | ctcm_pr_warn("unknown error %ld on device %s\n", | 295 | dev_err(&cdev->dev, |
291 | PTR_ERR(irb), dev_name(&cdev->dev)); | 296 | "An error occurred on the adapter hardware\n"); |
292 | } | 297 | } |
293 | return PTR_ERR(irb); | 298 | return PTR_ERR(irb); |
294 | } | 299 | } |
@@ -309,15 +314,17 @@ static inline void ccw_unit_check(struct channel *ch, __u8 sense) | |||
309 | if (sense & SNS0_INTERVENTION_REQ) { | 314 | if (sense & SNS0_INTERVENTION_REQ) { |
310 | if (sense & 0x01) { | 315 | if (sense & 0x01) { |
311 | if (ch->sense_rc != 0x01) { | 316 | if (ch->sense_rc != 0x01) { |
312 | ctcm_pr_debug("%s: Interface disc. or Sel. " | 317 | pr_notice( |
313 | "reset (remote)\n", ch->id); | 318 | "%s: The communication peer has " |
319 | "disconnected\n", ch->id); | ||
314 | ch->sense_rc = 0x01; | 320 | ch->sense_rc = 0x01; |
315 | } | 321 | } |
316 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); | 322 | fsm_event(ch->fsm, CTC_EVENT_UC_RCRESET, ch); |
317 | } else { | 323 | } else { |
318 | if (ch->sense_rc != SNS0_INTERVENTION_REQ) { | 324 | if (ch->sense_rc != SNS0_INTERVENTION_REQ) { |
319 | ctcm_pr_debug("%s: System reset (remote)\n", | 325 | pr_notice( |
320 | ch->id); | 326 | "%s: The remote operating system is " |
327 | "not available\n", ch->id); | ||
321 | ch->sense_rc = SNS0_INTERVENTION_REQ; | 328 | ch->sense_rc = SNS0_INTERVENTION_REQ; |
322 | } | 329 | } |
323 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); | 330 | fsm_event(ch->fsm, CTC_EVENT_UC_RSRESET, ch); |
@@ -1194,8 +1201,11 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1194 | 1201 | ||
1195 | /* Check for unsolicited interrupts. */ | 1202 | /* Check for unsolicited interrupts. */ |
1196 | if (cgdev == NULL) { | 1203 | if (cgdev == NULL) { |
1197 | ctcm_pr_warn("ctcm: Got unsolicited irq: c-%02x d-%02x\n", | 1204 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_ERROR, |
1198 | cstat, dstat); | 1205 | "%s(%s) unsolicited irq: c-%02x d-%02x\n", |
1206 | CTCM_FUNTAIL, dev_name(&cdev->dev), cstat, dstat); | ||
1207 | dev_warn(&cdev->dev, | ||
1208 | "The adapter received a non-specific IRQ\n"); | ||
1199 | return; | 1209 | return; |
1200 | } | 1210 | } |
1201 | 1211 | ||
@@ -1207,31 +1217,34 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1207 | else if (priv->channel[WRITE]->cdev == cdev) | 1217 | else if (priv->channel[WRITE]->cdev == cdev) |
1208 | ch = priv->channel[WRITE]; | 1218 | ch = priv->channel[WRITE]; |
1209 | else { | 1219 | else { |
1210 | ctcm_pr_err("ctcm: Can't determine channel for interrupt, " | 1220 | dev_err(&cdev->dev, |
1211 | "device %s\n", dev_name(&cdev->dev)); | 1221 | "%s: Internal error: Can't determine channel for " |
1222 | "interrupt device %s\n", | ||
1223 | __func__, dev_name(&cdev->dev)); | ||
1224 | /* Explain: inconsistent internal structures */ | ||
1212 | return; | 1225 | return; |
1213 | } | 1226 | } |
1214 | 1227 | ||
1215 | dev = ch->netdev; | 1228 | dev = ch->netdev; |
1216 | if (dev == NULL) { | 1229 | if (dev == NULL) { |
1217 | ctcm_pr_crit("ctcm: %s dev=NULL bus_id=%s, ch=0x%p\n", | 1230 | dev_err(&cdev->dev, |
1218 | __func__, dev_name(&cdev->dev), ch); | 1231 | "%s Internal error: net_device is NULL, ch = 0x%p\n", |
1232 | __func__, ch); | ||
1233 | /* Explain: inconsistent internal structures */ | ||
1219 | return; | 1234 | return; |
1220 | } | 1235 | } |
1221 | 1236 | ||
1222 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_DEBUG, | ||
1223 | "%s(%s): int. for %s: cstat=%02x dstat=%02x", | ||
1224 | CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); | ||
1225 | |||
1226 | /* Copy interruption response block. */ | 1237 | /* Copy interruption response block. */ |
1227 | memcpy(ch->irb, irb, sizeof(struct irb)); | 1238 | memcpy(ch->irb, irb, sizeof(struct irb)); |
1228 | 1239 | ||
1240 | /* Issue error message and return on subchannel error code */ | ||
1229 | if (irb->scsw.cmd.cstat) { | 1241 | if (irb->scsw.cmd.cstat) { |
1230 | /* Check for good subchannel return code, otherwise error message */ | ||
1231 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); | 1242 | fsm_event(ch->fsm, CTC_EVENT_SC_UNKNOWN, ch); |
1232 | ctcm_pr_warn("%s: subchannel check for dev: %s - %02x %02x\n", | 1243 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
1233 | dev->name, ch->id, irb->scsw.cmd.cstat, | 1244 | "%s(%s): sub-ch check %s: cs=%02x ds=%02x", |
1234 | irb->scsw.cmd.dstat); | 1245 | CTCM_FUNTAIL, dev->name, ch->id, cstat, dstat); |
1246 | dev_warn(&cdev->dev, | ||
1247 | "A check occurred on the subchannel\n"); | ||
1235 | return; | 1248 | return; |
1236 | } | 1249 | } |
1237 | 1250 | ||
@@ -1239,7 +1252,7 @@ static void ctcm_irq_handler(struct ccw_device *cdev, | |||
1239 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { | 1252 | if (irb->scsw.cmd.dstat & DEV_STAT_UNIT_CHECK) { |
1240 | if ((irb->ecw[0] & ch->sense_rc) == 0) | 1253 | if ((irb->ecw[0] & ch->sense_rc) == 0) |
1241 | /* print it only once */ | 1254 | /* print it only once */ |
1242 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_INFO, | 1255 | CTCM_DBF_TEXT_(TRACE, CTC_DBF_WARN, |
1243 | "%s(%s): sense=%02x, ds=%02x", | 1256 | "%s(%s): sense=%02x, ds=%02x", |
1244 | CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); | 1257 | CTCM_FUNTAIL, ch->id, irb->ecw[0], dstat); |
1245 | ccw_unit_check(ch, irb->ecw[0]); | 1258 | ccw_unit_check(ch, irb->ecw[0]); |
@@ -1574,6 +1587,11 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev) | |||
1574 | 1587 | ||
1575 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); | 1588 | strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); |
1576 | 1589 | ||
1590 | dev_info(&dev->dev, | ||
1591 | "setup OK : r/w = %s/%s, protocol : %d\n", | ||
1592 | priv->channel[READ]->id, | ||
1593 | priv->channel[WRITE]->id, priv->protocol); | ||
1594 | |||
1577 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, | 1595 | CTCM_DBF_TEXT_(SETUP, CTC_DBF_INFO, |
1578 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, | 1596 | "setup(%s) OK : r/w = %s/%s, protocol : %d", dev->name, |
1579 | priv->channel[READ]->id, | 1597 | priv->channel[READ]->id, |
@@ -1687,7 +1705,7 @@ static void __exit ctcm_exit(void) | |||
1687 | { | 1705 | { |
1688 | unregister_cu3088_discipline(&ctcm_group_driver); | 1706 | unregister_cu3088_discipline(&ctcm_group_driver); |
1689 | ctcm_unregister_dbf_views(); | 1707 | ctcm_unregister_dbf_views(); |
1690 | ctcm_pr_info("CTCM driver unloaded\n"); | 1708 | pr_info("CTCM driver unloaded\n"); |
1691 | } | 1709 | } |
1692 | 1710 | ||
1693 | /* | 1711 | /* |
@@ -1695,7 +1713,7 @@ static void __exit ctcm_exit(void) | |||
1695 | */ | 1713 | */ |
1696 | static void print_banner(void) | 1714 | static void print_banner(void) |
1697 | { | 1715 | { |
1698 | printk(KERN_INFO "CTCM driver initialized\n"); | 1716 | pr_info("CTCM driver initialized\n"); |
1699 | } | 1717 | } |
1700 | 1718 | ||
1701 | /** | 1719 | /** |
@@ -1717,8 +1735,8 @@ static int __init ctcm_init(void) | |||
1717 | ret = register_cu3088_discipline(&ctcm_group_driver); | 1735 | ret = register_cu3088_discipline(&ctcm_group_driver); |
1718 | if (ret) { | 1736 | if (ret) { |
1719 | ctcm_unregister_dbf_views(); | 1737 | ctcm_unregister_dbf_views(); |
1720 | ctcm_pr_crit("ctcm_init failed with register_cu3088_discipline " | 1738 | pr_err("%s / register_cu3088_discipline failed, ret = %d\n", |
1721 | "(rc = %d)\n", ret); | 1739 | __func__, ret); |
1722 | return ret; | 1740 | return ret; |
1723 | } | 1741 | } |
1724 | print_banner(); | 1742 | print_banner(); |
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h index d77cce3fe4d4..d925e732b7d8 100644 --- a/drivers/s390/net/ctcm_main.h +++ b/drivers/s390/net/ctcm_main.h | |||
@@ -41,12 +41,6 @@ | |||
41 | #define LOG_FLAG_NOMEM 8 | 41 | #define LOG_FLAG_NOMEM 8 |
42 | 42 | ||
43 | #define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) | 43 | #define ctcm_pr_debug(fmt, arg...) printk(KERN_DEBUG fmt, ##arg) |
44 | #define ctcm_pr_info(fmt, arg...) printk(KERN_INFO fmt, ##arg) | ||
45 | #define ctcm_pr_notice(fmt, arg...) printk(KERN_NOTICE fmt, ##arg) | ||
46 | #define ctcm_pr_warn(fmt, arg...) printk(KERN_WARNING fmt, ##arg) | ||
47 | #define ctcm_pr_emerg(fmt, arg...) printk(KERN_EMERG fmt, ##arg) | ||
48 | #define ctcm_pr_err(fmt, arg...) printk(KERN_ERR fmt, ##arg) | ||
49 | #define ctcm_pr_crit(fmt, arg...) printk(KERN_CRIT fmt, ##arg) | ||
50 | 44 | ||
51 | #define CTCM_PR_DEBUG(fmt, arg...) \ | 45 | #define CTCM_PR_DEBUG(fmt, arg...) \ |
52 | do { \ | 46 | do { \ |
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c index 19f5d5ed85e0..3db5f846bbf6 100644 --- a/drivers/s390/net/ctcm_mpc.c +++ b/drivers/s390/net/ctcm_mpc.c | |||
@@ -19,6 +19,9 @@ | |||
19 | #undef DEBUGDATA | 19 | #undef DEBUGDATA |
20 | #undef DEBUGCCW | 20 | #undef DEBUGCCW |
21 | 21 | ||
22 | #define KMSG_COMPONENT "ctcm" | ||
23 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
24 | |||
22 | #include <linux/module.h> | 25 | #include <linux/module.h> |
23 | #include <linux/init.h> | 26 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 27 | #include <linux/kernel.h> |
@@ -386,7 +389,7 @@ int ctc_mpc_alloc_channel(int port_num, void (*callback)(int, int)) | |||
386 | if (grp->allocchan_callback_retries < 4) { | 389 | if (grp->allocchan_callback_retries < 4) { |
387 | if (grp->allochanfunc) | 390 | if (grp->allochanfunc) |
388 | grp->allochanfunc(grp->port_num, | 391 | grp->allochanfunc(grp->port_num, |
389 | grp->group_max_buflen); | 392 | grp->group_max_buflen); |
390 | } else { | 393 | } else { |
391 | /* there are problems...bail out */ | 394 | /* there are problems...bail out */ |
392 | /* there may be a state mismatch so restart */ | 395 | /* there may be a state mismatch so restart */ |
@@ -1232,8 +1235,9 @@ done: | |||
1232 | 1235 | ||
1233 | dev_kfree_skb_any(pskb); | 1236 | dev_kfree_skb_any(pskb); |
1234 | if (sendrc == NET_RX_DROP) { | 1237 | if (sendrc == NET_RX_DROP) { |
1235 | printk(KERN_WARNING "%s %s() NETWORK BACKLOG EXCEEDED" | 1238 | dev_warn(&dev->dev, |
1236 | " - PACKET DROPPED\n", dev->name, __func__); | 1239 | "The network backlog for %s is exceeded, " |
1240 | "package dropped\n", __func__); | ||
1237 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); | 1241 | fsm_event(grp->fsm, MPCG_EVENT_INOP, dev); |
1238 | } | 1242 | } |
1239 | 1243 | ||
@@ -1670,10 +1674,11 @@ static int mpc_validate_xid(struct mpcg_info *mpcginfo) | |||
1670 | CTCM_FUNTAIL, ch->id); | 1674 | CTCM_FUNTAIL, ch->id); |
1671 | } | 1675 | } |
1672 | } | 1676 | } |
1673 | |||
1674 | done: | 1677 | done: |
1675 | if (rc) { | 1678 | if (rc) { |
1676 | ctcm_pr_info("ctcmpc : %s() failed\n", __func__); | 1679 | dev_warn(&dev->dev, |
1680 | "The XID used in the MPC protocol is not valid, " | ||
1681 | "rc = %d\n", rc); | ||
1677 | priv->xid->xid2_flag2 = 0x40; | 1682 | priv->xid->xid2_flag2 = 0x40; |
1678 | grp->saved_xid2->xid2_flag2 = 0x40; | 1683 | grp->saved_xid2->xid2_flag2 = 0x40; |
1679 | } | 1684 | } |
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c index bb2d13721d34..8452bb052d68 100644 --- a/drivers/s390/net/ctcm_sysfs.c +++ b/drivers/s390/net/ctcm_sysfs.c | |||
@@ -10,6 +10,9 @@ | |||
10 | #undef DEBUGDATA | 10 | #undef DEBUGDATA |
11 | #undef DEBUGCCW | 11 | #undef DEBUGCCW |
12 | 12 | ||
13 | #define KMSG_COMPONENT "ctcm" | ||
14 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
15 | |||
13 | #include <linux/sysfs.h> | 16 | #include <linux/sysfs.h> |
14 | #include "ctcm_main.h" | 17 | #include "ctcm_main.h" |
15 | 18 | ||
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 0825be87e5a0..fb6c70cec253 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -26,6 +26,9 @@ | |||
26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | 26 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. |
27 | */ | 27 | */ |
28 | 28 | ||
29 | #define KMSG_COMPONENT "lcs" | ||
30 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
31 | |||
29 | #include <linux/module.h> | 32 | #include <linux/module.h> |
30 | #include <linux/if.h> | 33 | #include <linux/if.h> |
31 | #include <linux/netdevice.h> | 34 | #include <linux/netdevice.h> |
@@ -54,8 +57,6 @@ | |||
54 | #error Cannot compile lcs.c without some net devices switched on. | 57 | #error Cannot compile lcs.c without some net devices switched on. |
55 | #endif | 58 | #endif |
56 | 59 | ||
57 | #define PRINTK_HEADER " lcs: " | ||
58 | |||
59 | /** | 60 | /** |
60 | * initialization string for output | 61 | * initialization string for output |
61 | */ | 62 | */ |
@@ -96,7 +97,7 @@ lcs_register_debug_facility(void) | |||
96 | lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); | 97 | lcs_dbf_setup = debug_register("lcs_setup", 2, 1, 8); |
97 | lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); | 98 | lcs_dbf_trace = debug_register("lcs_trace", 4, 1, 8); |
98 | if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { | 99 | if (lcs_dbf_setup == NULL || lcs_dbf_trace == NULL) { |
99 | PRINT_ERR("Not enough memory for debug facility.\n"); | 100 | pr_err("Not enough memory for debug facility.\n"); |
100 | lcs_unregister_debug_facility(); | 101 | lcs_unregister_debug_facility(); |
101 | return -ENOMEM; | 102 | return -ENOMEM; |
102 | } | 103 | } |
@@ -503,7 +504,9 @@ lcs_start_channel(struct lcs_channel *channel) | |||
503 | if (rc) { | 504 | if (rc) { |
504 | LCS_DBF_TEXT_(4,trace,"essh%s", | 505 | LCS_DBF_TEXT_(4,trace,"essh%s", |
505 | dev_name(&channel->ccwdev->dev)); | 506 | dev_name(&channel->ccwdev->dev)); |
506 | PRINT_ERR("Error in starting channel, rc=%d!\n", rc); | 507 | dev_err(&channel->ccwdev->dev, |
508 | "Starting an LCS device resulted in an error," | ||
509 | " rc=%d!\n", rc); | ||
507 | } | 510 | } |
508 | return rc; | 511 | return rc; |
509 | } | 512 | } |
@@ -640,7 +643,9 @@ __lcs_resume_channel(struct lcs_channel *channel) | |||
640 | if (rc) { | 643 | if (rc) { |
641 | LCS_DBF_TEXT_(4, trace, "ersc%s", | 644 | LCS_DBF_TEXT_(4, trace, "ersc%s", |
642 | dev_name(&channel->ccwdev->dev)); | 645 | dev_name(&channel->ccwdev->dev)); |
643 | PRINT_ERR("Error in lcs_resume_channel: rc=%d\n",rc); | 646 | dev_err(&channel->ccwdev->dev, |
647 | "Sending data from the LCS device to the LAN failed" | ||
648 | " with rc=%d\n",rc); | ||
644 | } else | 649 | } else |
645 | channel->state = LCS_CH_STATE_RUNNING; | 650 | channel->state = LCS_CH_STATE_RUNNING; |
646 | return rc; | 651 | return rc; |
@@ -1086,7 +1091,7 @@ lcs_check_multicast_support(struct lcs_card *card) | |||
1086 | cmd->cmd.lcs_qipassist.num_ip_pairs = 1; | 1091 | cmd->cmd.lcs_qipassist.num_ip_pairs = 1; |
1087 | rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); | 1092 | rc = lcs_send_lancmd(card, buffer, __lcs_check_multicast_cb); |
1088 | if (rc != 0) { | 1093 | if (rc != 0) { |
1089 | PRINT_ERR("Query IPAssist failed. Assuming unsupported!\n"); | 1094 | pr_err("Query IPAssist failed. Assuming unsupported!\n"); |
1090 | return -EOPNOTSUPP; | 1095 | return -EOPNOTSUPP; |
1091 | } | 1096 | } |
1092 | if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) | 1097 | if (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) |
@@ -1119,8 +1124,8 @@ list_modified: | |||
1119 | rc = lcs_send_setipm(card, ipm); | 1124 | rc = lcs_send_setipm(card, ipm); |
1120 | spin_lock_irqsave(&card->ipm_lock, flags); | 1125 | spin_lock_irqsave(&card->ipm_lock, flags); |
1121 | if (rc) { | 1126 | if (rc) { |
1122 | PRINT_INFO("Adding multicast address failed. " | 1127 | pr_info("Adding multicast address failed." |
1123 | "Table possibly full!\n"); | 1128 | " Table possibly full!\n"); |
1124 | /* store ipm in failed list -> will be added | 1129 | /* store ipm in failed list -> will be added |
1125 | * to ipm_list again, so a retry will be done | 1130 | * to ipm_list again, so a retry will be done |
1126 | * during the next call of this function */ | 1131 | * during the next call of this function */ |
@@ -1231,8 +1236,8 @@ lcs_set_mc_addresses(struct lcs_card *card, struct in_device *in4_dev) | |||
1231 | ipm = (struct lcs_ipm_list *) | 1236 | ipm = (struct lcs_ipm_list *) |
1232 | kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); | 1237 | kzalloc(sizeof(struct lcs_ipm_list), GFP_ATOMIC); |
1233 | if (ipm == NULL) { | 1238 | if (ipm == NULL) { |
1234 | PRINT_INFO("Not enough memory to add " | 1239 | pr_info("Not enough memory to add" |
1235 | "new multicast entry!\n"); | 1240 | " new multicast entry!\n"); |
1236 | break; | 1241 | break; |
1237 | } | 1242 | } |
1238 | memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); | 1243 | memcpy(&ipm->ipm.mac_addr, buf, LCS_MAC_LENGTH); |
@@ -1306,18 +1311,21 @@ lcs_check_irb_error(struct ccw_device *cdev, struct irb *irb) | |||
1306 | 1311 | ||
1307 | switch (PTR_ERR(irb)) { | 1312 | switch (PTR_ERR(irb)) { |
1308 | case -EIO: | 1313 | case -EIO: |
1309 | PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); | 1314 | dev_warn(&cdev->dev, |
1315 | "An I/O-error occurred on the LCS device\n"); | ||
1310 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1316 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1311 | LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); | 1317 | LCS_DBF_TEXT_(2, trace, " rc%d", -EIO); |
1312 | break; | 1318 | break; |
1313 | case -ETIMEDOUT: | 1319 | case -ETIMEDOUT: |
1314 | PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); | 1320 | dev_warn(&cdev->dev, |
1321 | "A command timed out on the LCS device\n"); | ||
1315 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1322 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1316 | LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); | 1323 | LCS_DBF_TEXT_(2, trace, " rc%d", -ETIMEDOUT); |
1317 | break; | 1324 | break; |
1318 | default: | 1325 | default: |
1319 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), | 1326 | dev_warn(&cdev->dev, |
1320 | dev_name(&cdev->dev)); | 1327 | "An error occurred on the LCS device, rc=%ld\n", |
1328 | PTR_ERR(irb)); | ||
1321 | LCS_DBF_TEXT(2, trace, "ckirberr"); | 1329 | LCS_DBF_TEXT(2, trace, "ckirberr"); |
1322 | LCS_DBF_TEXT(2, trace, " rc???"); | 1330 | LCS_DBF_TEXT(2, trace, " rc???"); |
1323 | } | 1331 | } |
@@ -1403,8 +1411,10 @@ lcs_irq(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | |||
1403 | /* Check for channel and device errors presented */ | 1411 | /* Check for channel and device errors presented */ |
1404 | rc = lcs_get_problem(cdev, irb); | 1412 | rc = lcs_get_problem(cdev, irb); |
1405 | if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { | 1413 | if (rc || (dstat & DEV_STAT_UNIT_EXCEP)) { |
1406 | PRINT_WARN("check on device %s, dstat=0x%X, cstat=0x%X \n", | 1414 | dev_warn(&cdev->dev, |
1407 | dev_name(&cdev->dev), dstat, cstat); | 1415 | "The LCS device stopped because of an error," |
1416 | " dstat=0x%X, cstat=0x%X \n", | ||
1417 | dstat, cstat); | ||
1408 | if (rc) { | 1418 | if (rc) { |
1409 | channel->state = LCS_CH_STATE_ERROR; | 1419 | channel->state = LCS_CH_STATE_ERROR; |
1410 | } | 1420 | } |
@@ -1761,8 +1771,8 @@ lcs_get_control(struct lcs_card *card, struct lcs_cmd *cmd) | |||
1761 | lcs_schedule_recovery(card); | 1771 | lcs_schedule_recovery(card); |
1762 | break; | 1772 | break; |
1763 | case LCS_CMD_STOPLAN: | 1773 | case LCS_CMD_STOPLAN: |
1764 | PRINT_WARN("Stoplan for %s initiated by LGW.\n", | 1774 | pr_warning("Stoplan for %s initiated by LGW.\n", |
1765 | card->dev->name); | 1775 | card->dev->name); |
1766 | if (card->dev) | 1776 | if (card->dev) |
1767 | netif_carrier_off(card->dev); | 1777 | netif_carrier_off(card->dev); |
1768 | break; | 1778 | break; |
@@ -1790,7 +1800,8 @@ lcs_get_skb(struct lcs_card *card, char *skb_data, unsigned int skb_len) | |||
1790 | 1800 | ||
1791 | skb = dev_alloc_skb(skb_len); | 1801 | skb = dev_alloc_skb(skb_len); |
1792 | if (skb == NULL) { | 1802 | if (skb == NULL) { |
1793 | PRINT_ERR("LCS: alloc_skb failed for device=%s\n", | 1803 | dev_err(&card->dev->dev, |
1804 | " Allocating a socket buffer to interface %s failed\n", | ||
1794 | card->dev->name); | 1805 | card->dev->name); |
1795 | card->stats.rx_dropped++; | 1806 | card->stats.rx_dropped++; |
1796 | return; | 1807 | return; |
@@ -1886,7 +1897,8 @@ lcs_stop_device(struct net_device *dev) | |||
1886 | (card->write.state != LCS_CH_STATE_RUNNING)); | 1897 | (card->write.state != LCS_CH_STATE_RUNNING)); |
1887 | rc = lcs_stopcard(card); | 1898 | rc = lcs_stopcard(card); |
1888 | if (rc) | 1899 | if (rc) |
1889 | PRINT_ERR("Try it again!\n "); | 1900 | dev_err(&card->dev->dev, |
1901 | " Shutting down the LCS device failed\n "); | ||
1890 | return rc; | 1902 | return rc; |
1891 | } | 1903 | } |
1892 | 1904 | ||
@@ -1905,7 +1917,7 @@ lcs_open_device(struct net_device *dev) | |||
1905 | /* initialize statistics */ | 1917 | /* initialize statistics */ |
1906 | rc = lcs_detect(card); | 1918 | rc = lcs_detect(card); |
1907 | if (rc) { | 1919 | if (rc) { |
1908 | PRINT_ERR("LCS:Error in opening device!\n"); | 1920 | pr_err("Error in opening device!\n"); |
1909 | 1921 | ||
1910 | } else { | 1922 | } else { |
1911 | dev->flags |= IFF_UP; | 1923 | dev->flags |= IFF_UP; |
@@ -2113,8 +2125,9 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2113 | rc = lcs_detect(card); | 2125 | rc = lcs_detect(card); |
2114 | if (rc) { | 2126 | if (rc) { |
2115 | LCS_DBF_TEXT(2, setup, "dtctfail"); | 2127 | LCS_DBF_TEXT(2, setup, "dtctfail"); |
2116 | PRINT_WARN("Detection of LCS card failed with return code " | 2128 | dev_err(&card->dev->dev, |
2117 | "%d (0x%x)\n", rc, rc); | 2129 | "Detecting a network adapter for LCS devices" |
2130 | " failed with rc=%d (0x%x)\n", rc, rc); | ||
2118 | lcs_stopcard(card); | 2131 | lcs_stopcard(card); |
2119 | goto out; | 2132 | goto out; |
2120 | } | 2133 | } |
@@ -2144,7 +2157,7 @@ lcs_new_device(struct ccwgroup_device *ccwgdev) | |||
2144 | #endif | 2157 | #endif |
2145 | default: | 2158 | default: |
2146 | LCS_DBF_TEXT(3, setup, "errinit"); | 2159 | LCS_DBF_TEXT(3, setup, "errinit"); |
2147 | PRINT_ERR("LCS: Initialization failed\n"); | 2160 | pr_err(" Initialization failed\n"); |
2148 | goto out; | 2161 | goto out; |
2149 | } | 2162 | } |
2150 | if (!dev) | 2163 | if (!dev) |
@@ -2176,13 +2189,13 @@ netdev_out: | |||
2176 | goto out; | 2189 | goto out; |
2177 | 2190 | ||
2178 | /* Print out supported assists: IPv6 */ | 2191 | /* Print out supported assists: IPv6 */ |
2179 | PRINT_INFO("LCS device %s %s IPv6 support\n", card->dev->name, | 2192 | pr_info("LCS device %s %s IPv6 support\n", card->dev->name, |
2180 | (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? | 2193 | (card->ip_assists_supported & LCS_IPASS_IPV6_SUPPORT) ? |
2181 | "with" : "without"); | 2194 | "with" : "without"); |
2182 | /* Print out supported assist: Multicast */ | 2195 | /* Print out supported assist: Multicast */ |
2183 | PRINT_INFO("LCS device %s %s Multicast support\n", card->dev->name, | 2196 | pr_info("LCS device %s %s Multicast support\n", card->dev->name, |
2184 | (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? | 2197 | (card->ip_assists_supported & LCS_IPASS_MULTICAST_SUPPORT) ? |
2185 | "with" : "without"); | 2198 | "with" : "without"); |
2186 | return 0; | 2199 | return 0; |
2187 | out: | 2200 | out: |
2188 | 2201 | ||
@@ -2248,15 +2261,16 @@ lcs_recovery(void *ptr) | |||
2248 | return 0; | 2261 | return 0; |
2249 | LCS_DBF_TEXT(4, trace, "recover2"); | 2262 | LCS_DBF_TEXT(4, trace, "recover2"); |
2250 | gdev = card->gdev; | 2263 | gdev = card->gdev; |
2251 | PRINT_WARN("Recovery of device %s started...\n", dev_name(&gdev->dev)); | 2264 | dev_warn(&gdev->dev, |
2265 | "A recovery process has been started for the LCS device\n"); | ||
2252 | rc = __lcs_shutdown_device(gdev, 1); | 2266 | rc = __lcs_shutdown_device(gdev, 1); |
2253 | rc = lcs_new_device(gdev); | 2267 | rc = lcs_new_device(gdev); |
2254 | if (!rc) | 2268 | if (!rc) |
2255 | PRINT_INFO("Device %s successfully recovered!\n", | 2269 | pr_info("Device %s successfully recovered!\n", |
2256 | card->dev->name); | 2270 | card->dev->name); |
2257 | else | 2271 | else |
2258 | PRINT_INFO("Device %s could not be recovered!\n", | 2272 | pr_info("Device %s could not be recovered!\n", |
2259 | card->dev->name); | 2273 | card->dev->name); |
2260 | lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); | 2274 | lcs_clear_thread_running_bit(card, LCS_RECOVERY_THREAD); |
2261 | return 0; | 2275 | return 0; |
2262 | } | 2276 | } |
@@ -2308,17 +2322,17 @@ __init lcs_init_module(void) | |||
2308 | { | 2322 | { |
2309 | int rc; | 2323 | int rc; |
2310 | 2324 | ||
2311 | PRINT_INFO("Loading %s\n",version); | 2325 | pr_info("Loading %s\n", version); |
2312 | rc = lcs_register_debug_facility(); | 2326 | rc = lcs_register_debug_facility(); |
2313 | LCS_DBF_TEXT(0, setup, "lcsinit"); | 2327 | LCS_DBF_TEXT(0, setup, "lcsinit"); |
2314 | if (rc) { | 2328 | if (rc) { |
2315 | PRINT_ERR("Initialization failed\n"); | 2329 | pr_err("Initialization failed\n"); |
2316 | return rc; | 2330 | return rc; |
2317 | } | 2331 | } |
2318 | 2332 | ||
2319 | rc = register_cu3088_discipline(&lcs_group_driver); | 2333 | rc = register_cu3088_discipline(&lcs_group_driver); |
2320 | if (rc) { | 2334 | if (rc) { |
2321 | PRINT_ERR("Initialization failed\n"); | 2335 | pr_err("Initialization failed\n"); |
2322 | return rc; | 2336 | return rc; |
2323 | } | 2337 | } |
2324 | return 0; | 2338 | return 0; |
@@ -2331,7 +2345,7 @@ __init lcs_init_module(void) | |||
2331 | static void | 2345 | static void |
2332 | __exit lcs_cleanup_module(void) | 2346 | __exit lcs_cleanup_module(void) |
2333 | { | 2347 | { |
2334 | PRINT_INFO("Terminating lcs module.\n"); | 2348 | pr_info("Terminating lcs module.\n"); |
2335 | LCS_DBF_TEXT(0, trace, "cleanup"); | 2349 | LCS_DBF_TEXT(0, trace, "cleanup"); |
2336 | unregister_cu3088_discipline(&lcs_group_driver); | 2350 | unregister_cu3088_discipline(&lcs_group_driver); |
2337 | lcs_unregister_debug_facility(); | 2351 | lcs_unregister_debug_facility(); |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 0fea51e34b57..930e2fc2a011 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -31,6 +31,9 @@ | |||
31 | * | 31 | * |
32 | */ | 32 | */ |
33 | 33 | ||
34 | #define KMSG_COMPONENT "netiucv" | ||
35 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
36 | |||
34 | #undef DEBUG | 37 | #undef DEBUG |
35 | 38 | ||
36 | #include <linux/module.h> | 39 | #include <linux/module.h> |
@@ -846,7 +849,8 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) | |||
846 | 849 | ||
847 | fsm_deltimer(&conn->timer); | 850 | fsm_deltimer(&conn->timer); |
848 | iucv_path_sever(conn->path, NULL); | 851 | iucv_path_sever(conn->path, NULL); |
849 | PRINT_INFO("%s: Remote dropped connection\n", netdev->name); | 852 | dev_info(privptr->dev, "The peer interface of the IUCV device" |
853 | " has closed the connection\n"); | ||
850 | IUCV_DBF_TEXT(data, 2, | 854 | IUCV_DBF_TEXT(data, 2, |
851 | "conn_action_connsever: Remote dropped connection\n"); | 855 | "conn_action_connsever: Remote dropped connection\n"); |
852 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 856 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
@@ -856,13 +860,15 @@ static void conn_action_connsever(fsm_instance *fi, int event, void *arg) | |||
856 | static void conn_action_start(fsm_instance *fi, int event, void *arg) | 860 | static void conn_action_start(fsm_instance *fi, int event, void *arg) |
857 | { | 861 | { |
858 | struct iucv_connection *conn = arg; | 862 | struct iucv_connection *conn = arg; |
863 | struct net_device *netdev = conn->netdev; | ||
864 | struct netiucv_priv *privptr = netdev_priv(netdev); | ||
859 | int rc; | 865 | int rc; |
860 | 866 | ||
861 | IUCV_DBF_TEXT(trace, 3, __func__); | 867 | IUCV_DBF_TEXT(trace, 3, __func__); |
862 | 868 | ||
863 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 869 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
864 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", | 870 | IUCV_DBF_TEXT_(setup, 2, "%s('%s'): connecting ...\n", |
865 | conn->netdev->name, conn->userid); | 871 | netdev->name, conn->userid); |
866 | 872 | ||
867 | /* | 873 | /* |
868 | * We must set the state before calling iucv_connect because the | 874 | * We must set the state before calling iucv_connect because the |
@@ -876,41 +882,45 @@ static void conn_action_start(fsm_instance *fi, int event, void *arg) | |||
876 | NULL, iucvMagic, conn); | 882 | NULL, iucvMagic, conn); |
877 | switch (rc) { | 883 | switch (rc) { |
878 | case 0: | 884 | case 0: |
879 | conn->netdev->tx_queue_len = conn->path->msglim; | 885 | netdev->tx_queue_len = conn->path->msglim; |
880 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, | 886 | fsm_addtimer(&conn->timer, NETIUCV_TIMEOUT_5SEC, |
881 | CONN_EVENT_TIMER, conn); | 887 | CONN_EVENT_TIMER, conn); |
882 | return; | 888 | return; |
883 | case 11: | 889 | case 11: |
884 | PRINT_INFO("%s: User %s is currently not available.\n", | 890 | dev_warn(privptr->dev, |
885 | conn->netdev->name, | 891 | "The IUCV device failed to connect to z/VM guest %s\n", |
886 | netiucv_printname(conn->userid)); | 892 | netiucv_printname(conn->userid)); |
887 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 893 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
888 | break; | 894 | break; |
889 | case 12: | 895 | case 12: |
890 | PRINT_INFO("%s: User %s is currently not ready.\n", | 896 | dev_warn(privptr->dev, |
891 | conn->netdev->name, | 897 | "The IUCV device failed to connect to the peer on z/VM" |
892 | netiucv_printname(conn->userid)); | 898 | " guest %s\n", netiucv_printname(conn->userid)); |
893 | fsm_newstate(fi, CONN_STATE_STARTWAIT); | 899 | fsm_newstate(fi, CONN_STATE_STARTWAIT); |
894 | break; | 900 | break; |
895 | case 13: | 901 | case 13: |
896 | PRINT_WARN("%s: Too many IUCV connections.\n", | 902 | dev_err(privptr->dev, |
897 | conn->netdev->name); | 903 | "Connecting the IUCV device would exceed the maximum" |
904 | " number of IUCV connections\n"); | ||
898 | fsm_newstate(fi, CONN_STATE_CONNERR); | 905 | fsm_newstate(fi, CONN_STATE_CONNERR); |
899 | break; | 906 | break; |
900 | case 14: | 907 | case 14: |
901 | PRINT_WARN("%s: User %s has too many IUCV connections.\n", | 908 | dev_err(privptr->dev, |
902 | conn->netdev->name, | 909 | "z/VM guest %s has too many IUCV connections" |
903 | netiucv_printname(conn->userid)); | 910 | " to connect with the IUCV device\n", |
911 | netiucv_printname(conn->userid)); | ||
904 | fsm_newstate(fi, CONN_STATE_CONNERR); | 912 | fsm_newstate(fi, CONN_STATE_CONNERR); |
905 | break; | 913 | break; |
906 | case 15: | 914 | case 15: |
907 | PRINT_WARN("%s: No IUCV authorization in CP directory.\n", | 915 | dev_err(privptr->dev, |
908 | conn->netdev->name); | 916 | "The IUCV device cannot connect to a z/VM guest with no" |
917 | " IUCV authorization\n"); | ||
909 | fsm_newstate(fi, CONN_STATE_CONNERR); | 918 | fsm_newstate(fi, CONN_STATE_CONNERR); |
910 | break; | 919 | break; |
911 | default: | 920 | default: |
912 | PRINT_WARN("%s: iucv_connect returned error %d\n", | 921 | dev_err(privptr->dev, |
913 | conn->netdev->name, rc); | 922 | "Connecting the IUCV device failed with error %d\n", |
923 | rc); | ||
914 | fsm_newstate(fi, CONN_STATE_CONNERR); | 924 | fsm_newstate(fi, CONN_STATE_CONNERR); |
915 | break; | 925 | break; |
916 | } | 926 | } |
@@ -1059,8 +1069,9 @@ dev_action_connup(fsm_instance *fi, int event, void *arg) | |||
1059 | switch (fsm_getstate(fi)) { | 1069 | switch (fsm_getstate(fi)) { |
1060 | case DEV_STATE_STARTWAIT: | 1070 | case DEV_STATE_STARTWAIT: |
1061 | fsm_newstate(fi, DEV_STATE_RUNNING); | 1071 | fsm_newstate(fi, DEV_STATE_RUNNING); |
1062 | PRINT_INFO("%s: connected with remote side %s\n", | 1072 | dev_info(privptr->dev, |
1063 | dev->name, privptr->conn->userid); | 1073 | "The IUCV device has been connected" |
1074 | " successfully to %s\n", privptr->conn->userid); | ||
1064 | IUCV_DBF_TEXT(setup, 3, | 1075 | IUCV_DBF_TEXT(setup, 3, |
1065 | "connection is up and running\n"); | 1076 | "connection is up and running\n"); |
1066 | break; | 1077 | break; |
@@ -1982,6 +1993,8 @@ static ssize_t conn_write(struct device_driver *drv, | |||
1982 | if (rc) | 1993 | if (rc) |
1983 | goto out_unreg; | 1994 | goto out_unreg; |
1984 | 1995 | ||
1996 | dev_info(priv->dev, "The IUCV interface to %s has been" | ||
1997 | " established successfully\n", netiucv_printname(username)); | ||
1985 | 1998 | ||
1986 | return count; | 1999 | return count; |
1987 | 2000 | ||
@@ -2027,10 +2040,9 @@ static ssize_t remove_write (struct device_driver *drv, | |||
2027 | continue; | 2040 | continue; |
2028 | read_unlock_bh(&iucv_connection_rwlock); | 2041 | read_unlock_bh(&iucv_connection_rwlock); |
2029 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 2042 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { |
2030 | PRINT_WARN("netiucv: net device %s active with peer " | 2043 | dev_warn(dev, "The IUCV device is connected" |
2031 | "%s\n", ndev->name, priv->conn->userid); | 2044 | " to %s and cannot be removed\n", |
2032 | PRINT_WARN("netiucv: %s cannot be removed\n", | 2045 | priv->conn->userid); |
2033 | ndev->name); | ||
2034 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); | 2046 | IUCV_DBF_TEXT(data, 2, "remove_write: still active\n"); |
2035 | return -EPERM; | 2047 | return -EPERM; |
2036 | } | 2048 | } |
@@ -2062,7 +2074,7 @@ static struct attribute_group *netiucv_drv_attr_groups[] = { | |||
2062 | 2074 | ||
2063 | static void netiucv_banner(void) | 2075 | static void netiucv_banner(void) |
2064 | { | 2076 | { |
2065 | PRINT_INFO("NETIUCV driver initialized\n"); | 2077 | pr_info("driver initialized\n"); |
2066 | } | 2078 | } |
2067 | 2079 | ||
2068 | static void __exit netiucv_exit(void) | 2080 | static void __exit netiucv_exit(void) |
@@ -2088,7 +2100,7 @@ static void __exit netiucv_exit(void) | |||
2088 | iucv_unregister(&netiucv_handler, 1); | 2100 | iucv_unregister(&netiucv_handler, 1); |
2089 | iucv_unregister_dbf_views(); | 2101 | iucv_unregister_dbf_views(); |
2090 | 2102 | ||
2091 | PRINT_INFO("NETIUCV driver unloaded\n"); | 2103 | pr_info("driver unloaded\n"); |
2092 | return; | 2104 | return; |
2093 | } | 2105 | } |
2094 | 2106 | ||
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index af6d60458513..d5ccce1643e4 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -31,11 +31,10 @@ | |||
31 | #include <asm/qdio.h> | 31 | #include <asm/qdio.h> |
32 | #include <asm/ccwdev.h> | 32 | #include <asm/ccwdev.h> |
33 | #include <asm/ccwgroup.h> | 33 | #include <asm/ccwgroup.h> |
34 | #include <asm/sysinfo.h> | ||
34 | 35 | ||
35 | #include "qeth_core_mpc.h" | 36 | #include "qeth_core_mpc.h" |
36 | 37 | ||
37 | #define KMSG_COMPONENT "qeth" | ||
38 | |||
39 | /** | 38 | /** |
40 | * Debug Facility stuff | 39 | * Debug Facility stuff |
41 | */ | 40 | */ |
@@ -74,11 +73,6 @@ struct qeth_dbf_info { | |||
74 | #define QETH_DBF_TEXT_(name, level, text...) \ | 73 | #define QETH_DBF_TEXT_(name, level, text...) \ |
75 | qeth_dbf_longtext(QETH_DBF_##name, level, text) | 74 | qeth_dbf_longtext(QETH_DBF_##name, level, text) |
76 | 75 | ||
77 | /** | ||
78 | * some more debug stuff | ||
79 | */ | ||
80 | #define PRINTK_HEADER "qeth: " | ||
81 | |||
82 | #define SENSE_COMMAND_REJECT_BYTE 0 | 76 | #define SENSE_COMMAND_REJECT_BYTE 0 |
83 | #define SENSE_COMMAND_REJECT_FLAG 0x80 | 77 | #define SENSE_COMMAND_REJECT_FLAG 0x80 |
84 | #define SENSE_RESETTING_EVENT_BYTE 1 | 78 | #define SENSE_RESETTING_EVENT_BYTE 1 |
@@ -733,6 +727,7 @@ struct qeth_card { | |||
733 | struct qeth_osn_info osn_info; | 727 | struct qeth_osn_info osn_info; |
734 | struct qeth_discipline discipline; | 728 | struct qeth_discipline discipline; |
735 | atomic_t force_alloc_skb; | 729 | atomic_t force_alloc_skb; |
730 | struct service_level qeth_service_level; | ||
736 | }; | 731 | }; |
737 | 732 | ||
738 | struct qeth_card_list_struct { | 733 | struct qeth_card_list_struct { |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 52d26592c72c..e783644a2105 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -319,7 +322,10 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
319 | return -EIO; | 322 | return -EIO; |
320 | iob = qeth_get_buffer(&card->read); | 323 | iob = qeth_get_buffer(&card->read); |
321 | if (!iob) { | 324 | if (!iob) { |
322 | PRINT_WARN("issue_next_read failed: no iob available!\n"); | 325 | dev_warn(&card->gdev->dev, "The qeth device driver " |
326 | "failed to recover an error on the device\n"); | ||
327 | QETH_DBF_MESSAGE(2, "%s issue_next_read failed: no iob " | ||
328 | "available\n", dev_name(&card->gdev->dev)); | ||
323 | return -ENOMEM; | 329 | return -ENOMEM; |
324 | } | 330 | } |
325 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); | 331 | qeth_setup_ccw(&card->read, iob->data, QETH_BUFSIZE); |
@@ -327,7 +333,8 @@ static int qeth_issue_next_read(struct qeth_card *card) | |||
327 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, | 333 | rc = ccw_device_start(card->read.ccwdev, &card->read.ccw, |
328 | (addr_t) iob, 0, 0); | 334 | (addr_t) iob, 0, 0); |
329 | if (rc) { | 335 | if (rc) { |
330 | PRINT_ERR("Error in starting next read ccw! rc=%i\n", rc); | 336 | QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! " |
337 | "rc=%i\n", dev_name(&card->gdev->dev), rc); | ||
331 | atomic_set(&card->read.irq_pending, 0); | 338 | atomic_set(&card->read.irq_pending, 0); |
332 | qeth_schedule_recovery(card); | 339 | qeth_schedule_recovery(card); |
333 | wake_up(&card->wait_q); | 340 | wake_up(&card->wait_q); |
@@ -393,10 +400,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
393 | } else { | 400 | } else { |
394 | switch (cmd->hdr.command) { | 401 | switch (cmd->hdr.command) { |
395 | case IPA_CMD_STOPLAN: | 402 | case IPA_CMD_STOPLAN: |
396 | PRINT_WARN("Link failure on %s (CHPID 0x%X) - " | 403 | dev_warn(&card->gdev->dev, |
397 | "there is a network problem or " | 404 | "The link for interface %s on CHPID" |
398 | "someone pulled the cable or " | 405 | " 0x%X failed\n", |
399 | "disabled the port.\n", | ||
400 | QETH_CARD_IFNAME(card), | 406 | QETH_CARD_IFNAME(card), |
401 | card->info.chpid); | 407 | card->info.chpid); |
402 | card->lan_online = 0; | 408 | card->lan_online = 0; |
@@ -404,9 +410,9 @@ static struct qeth_ipa_cmd *qeth_check_ipa_data(struct qeth_card *card, | |||
404 | netif_carrier_off(card->dev); | 410 | netif_carrier_off(card->dev); |
405 | return NULL; | 411 | return NULL; |
406 | case IPA_CMD_STARTLAN: | 412 | case IPA_CMD_STARTLAN: |
407 | PRINT_INFO("Link reestablished on %s " | 413 | dev_info(&card->gdev->dev, |
408 | "(CHPID 0x%X). Scheduling " | 414 | "The link for %s on CHPID 0x%X has" |
409 | "IP address reset.\n", | 415 | " been restored\n", |
410 | QETH_CARD_IFNAME(card), | 416 | QETH_CARD_IFNAME(card), |
411 | card->info.chpid); | 417 | card->info.chpid); |
412 | netif_carrier_on(card->dev); | 418 | netif_carrier_on(card->dev); |
@@ -458,7 +464,7 @@ static int qeth_check_idx_response(unsigned char *buffer) | |||
458 | 464 | ||
459 | QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); | 465 | QETH_DBF_HEX(CTRL, 2, buffer, QETH_DBF_CTRL_LEN); |
460 | if ((buffer[2] & 0xc0) == 0xc0) { | 466 | if ((buffer[2] & 0xc0) == 0xc0) { |
461 | PRINT_WARN("received an IDX TERMINATE " | 467 | QETH_DBF_MESSAGE(2, "received an IDX TERMINATE " |
462 | "with cause code 0x%02x%s\n", | 468 | "with cause code 0x%02x%s\n", |
463 | buffer[4], | 469 | buffer[4], |
464 | ((buffer[4] == 0x22) ? | 470 | ((buffer[4] == 0x22) ? |
@@ -744,8 +750,10 @@ static int qeth_get_problem(struct ccw_device *cdev, struct irb *irb) | |||
744 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | | 750 | SCHN_STAT_CHN_DATA_CHK | SCHN_STAT_CHAIN_CHECK | |
745 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { | 751 | SCHN_STAT_PROT_CHECK | SCHN_STAT_PROG_CHECK)) { |
746 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); | 752 | QETH_DBF_TEXT(TRACE, 2, "CGENCHK"); |
747 | PRINT_WARN("check on device %s, dstat=x%x, cstat=x%x ", | 753 | dev_warn(&cdev->dev, "The qeth device driver " |
748 | dev_name(&cdev->dev), dstat, cstat); | 754 | "failed to recover an error on the device\n"); |
755 | QETH_DBF_MESSAGE(2, "%s check on device dstat=x%x, cstat=x%x ", | ||
756 | dev_name(&cdev->dev), dstat, cstat); | ||
749 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, | 757 | print_hex_dump(KERN_WARNING, "qeth: irb ", DUMP_PREFIX_OFFSET, |
750 | 16, 1, irb, 64, 1); | 758 | 16, 1, irb, 64, 1); |
751 | return 1; | 759 | return 1; |
@@ -784,12 +792,14 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
784 | 792 | ||
785 | switch (PTR_ERR(irb)) { | 793 | switch (PTR_ERR(irb)) { |
786 | case -EIO: | 794 | case -EIO: |
787 | PRINT_WARN("i/o-error on device %s\n", dev_name(&cdev->dev)); | 795 | QETH_DBF_MESSAGE(2, "%s i/o-error on device\n", |
796 | dev_name(&cdev->dev)); | ||
788 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 797 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
789 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); | 798 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -EIO); |
790 | break; | 799 | break; |
791 | case -ETIMEDOUT: | 800 | case -ETIMEDOUT: |
792 | PRINT_WARN("timeout on device %s\n", dev_name(&cdev->dev)); | 801 | dev_warn(&cdev->dev, "A hardware operation timed out" |
802 | " on the device\n"); | ||
793 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 803 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
794 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); | 804 | QETH_DBF_TEXT_(TRACE, 2, " rc%d", -ETIMEDOUT); |
795 | if (intparm == QETH_RCD_PARM) { | 805 | if (intparm == QETH_RCD_PARM) { |
@@ -802,8 +812,8 @@ static long __qeth_check_irb_error(struct ccw_device *cdev, | |||
802 | } | 812 | } |
803 | break; | 813 | break; |
804 | default: | 814 | default: |
805 | PRINT_WARN("unknown error %ld on device %s\n", PTR_ERR(irb), | 815 | QETH_DBF_MESSAGE(2, "%s unknown error %ld on device\n", |
806 | dev_name(&cdev->dev)); | 816 | dev_name(&cdev->dev), PTR_ERR(irb)); |
807 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); | 817 | QETH_DBF_TEXT(TRACE, 2, "ckirberr"); |
808 | QETH_DBF_TEXT(TRACE, 2, " rc???"); | 818 | QETH_DBF_TEXT(TRACE, 2, " rc???"); |
809 | } | 819 | } |
@@ -869,10 +879,12 @@ static void qeth_irq(struct ccw_device *cdev, unsigned long intparm, | |||
869 | (dstat & DEV_STAT_UNIT_CHECK) || | 879 | (dstat & DEV_STAT_UNIT_CHECK) || |
870 | (cstat)) { | 880 | (cstat)) { |
871 | if (irb->esw.esw0.erw.cons) { | 881 | if (irb->esw.esw0.erw.cons) { |
872 | /* TODO: we should make this s390dbf */ | 882 | dev_warn(&channel->ccwdev->dev, |
873 | PRINT_WARN("sense data available on channel %s.\n", | 883 | "The qeth device driver failed to recover " |
874 | CHANNEL_ID(channel)); | 884 | "an error on the device\n"); |
875 | PRINT_WARN(" cstat 0x%X\n dstat 0x%X\n", cstat, dstat); | 885 | QETH_DBF_MESSAGE(2, "%s sense data available. cstat " |
886 | "0x%X dstat 0x%X\n", | ||
887 | dev_name(&channel->ccwdev->dev), cstat, dstat); | ||
876 | print_hex_dump(KERN_WARNING, "qeth: irb ", | 888 | print_hex_dump(KERN_WARNING, "qeth: irb ", |
877 | DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); | 889 | DUMP_PREFIX_OFFSET, 16, 1, irb, 32, 1); |
878 | print_hex_dump(KERN_WARNING, "qeth: sense data ", | 890 | print_hex_dump(KERN_WARNING, "qeth: sense data ", |
@@ -1138,6 +1150,14 @@ static int qeth_setup_card(struct qeth_card *card) | |||
1138 | return 0; | 1150 | return 0; |
1139 | } | 1151 | } |
1140 | 1152 | ||
1153 | static void qeth_core_sl_print(struct seq_file *m, struct service_level *slr) | ||
1154 | { | ||
1155 | struct qeth_card *card = container_of(slr, struct qeth_card, | ||
1156 | qeth_service_level); | ||
1157 | seq_printf(m, "qeth: %s firmware level %s\n", CARD_BUS_ID(card), | ||
1158 | card->info.mcl_level); | ||
1159 | } | ||
1160 | |||
1141 | static struct qeth_card *qeth_alloc_card(void) | 1161 | static struct qeth_card *qeth_alloc_card(void) |
1142 | { | 1162 | { |
1143 | struct qeth_card *card; | 1163 | struct qeth_card *card; |
@@ -1157,6 +1177,8 @@ static struct qeth_card *qeth_alloc_card(void) | |||
1157 | return NULL; | 1177 | return NULL; |
1158 | } | 1178 | } |
1159 | card->options.layer2 = -1; | 1179 | card->options.layer2 = -1; |
1180 | card->qeth_service_level.seq_print = qeth_core_sl_print; | ||
1181 | register_service_level(&card->qeth_service_level); | ||
1160 | return card; | 1182 | return card; |
1161 | } | 1183 | } |
1162 | 1184 | ||
@@ -1175,8 +1197,8 @@ static int qeth_determine_card_type(struct qeth_card *card) | |||
1175 | card->qdio.no_out_queues = known_devices[i][8]; | 1197 | card->qdio.no_out_queues = known_devices[i][8]; |
1176 | card->info.is_multicast_different = known_devices[i][9]; | 1198 | card->info.is_multicast_different = known_devices[i][9]; |
1177 | if (qeth_is_1920_device(card)) { | 1199 | if (qeth_is_1920_device(card)) { |
1178 | PRINT_INFO("Priority Queueing not able " | 1200 | dev_info(&card->gdev->dev, |
1179 | "due to hardware limitations!\n"); | 1201 | "Priority Queueing not supported\n"); |
1180 | card->qdio.no_out_queues = 1; | 1202 | card->qdio.no_out_queues = 1; |
1181 | card->qdio.default_out_queue = 0; | 1203 | card->qdio.default_out_queue = 0; |
1182 | } | 1204 | } |
@@ -1185,7 +1207,8 @@ static int qeth_determine_card_type(struct qeth_card *card) | |||
1185 | i++; | 1207 | i++; |
1186 | } | 1208 | } |
1187 | card->info.type = QETH_CARD_TYPE_UNKNOWN; | 1209 | card->info.type = QETH_CARD_TYPE_UNKNOWN; |
1188 | PRINT_ERR("unknown card type on device %s\n", CARD_BUS_ID(card)); | 1210 | dev_err(&card->gdev->dev, "The adapter hardware is of an " |
1211 | "unknown type\n"); | ||
1189 | return -ENOENT; | 1212 | return -ENOENT; |
1190 | } | 1213 | } |
1191 | 1214 | ||
@@ -1368,8 +1391,8 @@ static int qeth_get_unitaddr(struct qeth_card *card) | |||
1368 | QETH_DBF_TEXT(SETUP, 2, "getunit"); | 1391 | QETH_DBF_TEXT(SETUP, 2, "getunit"); |
1369 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); | 1392 | rc = qeth_read_conf_data(card, (void **) &prcd, &length); |
1370 | if (rc) { | 1393 | if (rc) { |
1371 | PRINT_ERR("qeth_read_conf_data for device %s returned %i\n", | 1394 | QETH_DBF_MESSAGE(2, "%s qeth_read_conf_data returned %i\n", |
1372 | CARD_DDEV_ID(card), rc); | 1395 | dev_name(&card->gdev->dev), rc); |
1373 | return rc; | 1396 | return rc; |
1374 | } | 1397 | } |
1375 | card->info.chpid = prcd[30]; | 1398 | card->info.chpid = prcd[30]; |
@@ -1519,7 +1542,10 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1519 | if (rc == -ERESTARTSYS) | 1542 | if (rc == -ERESTARTSYS) |
1520 | return rc; | 1543 | return rc; |
1521 | if (channel->state != CH_STATE_ACTIVATING) { | 1544 | if (channel->state != CH_STATE_ACTIVATING) { |
1522 | PRINT_WARN("IDX activate timed out!\n"); | 1545 | dev_warn(&channel->ccwdev->dev, "The qeth device driver" |
1546 | " failed to recover an error on the device\n"); | ||
1547 | QETH_DBF_MESSAGE(2, "%s IDX activate timed out\n", | ||
1548 | dev_name(&channel->ccwdev->dev)); | ||
1523 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); | 1549 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", -ETIME); |
1524 | qeth_clear_cmd_buffers(channel); | 1550 | qeth_clear_cmd_buffers(channel); |
1525 | return -ETIME; | 1551 | return -ETIME; |
@@ -1552,20 +1578,21 @@ static void qeth_idx_write_cb(struct qeth_channel *channel, | |||
1552 | 1578 | ||
1553 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1579 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1554 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) | 1580 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1555 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " | 1581 | dev_err(&card->write.ccwdev->dev, |
1556 | "adapter exclusively used by another host\n", | 1582 | "The adapter is used exclusively by another " |
1557 | CARD_WDEV_ID(card)); | 1583 | "host\n"); |
1558 | else | 1584 | else |
1559 | PRINT_ERR("IDX_ACTIVATE on write channel device %s: " | 1585 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel:" |
1560 | "negative reply\n", CARD_WDEV_ID(card)); | 1586 | " negative reply\n", |
1587 | dev_name(&card->write.ccwdev->dev)); | ||
1561 | goto out; | 1588 | goto out; |
1562 | } | 1589 | } |
1563 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1590 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1564 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { | 1591 | if ((temp & ~0x0100) != qeth_peer_func_level(card->info.func_level)) { |
1565 | PRINT_WARN("IDX_ACTIVATE on write channel device %s: " | 1592 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on write channel: " |
1566 | "function level mismatch " | 1593 | "function level mismatch (sent: 0x%x, received: " |
1567 | "(sent: 0x%x, received: 0x%x)\n", | 1594 | "0x%x)\n", dev_name(&card->write.ccwdev->dev), |
1568 | CARD_WDEV_ID(card), card->info.func_level, temp); | 1595 | card->info.func_level, temp); |
1569 | goto out; | 1596 | goto out; |
1570 | } | 1597 | } |
1571 | channel->state = CH_STATE_UP; | 1598 | channel->state = CH_STATE_UP; |
@@ -1591,12 +1618,13 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1591 | 1618 | ||
1592 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { | 1619 | if (!(QETH_IS_IDX_ACT_POS_REPLY(iob->data))) { |
1593 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) | 1620 | if (QETH_IDX_ACT_CAUSE_CODE(iob->data) == 0x19) |
1594 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " | 1621 | dev_err(&card->write.ccwdev->dev, |
1595 | "adapter exclusively used by another host\n", | 1622 | "The adapter is used exclusively by another " |
1596 | CARD_RDEV_ID(card)); | 1623 | "host\n"); |
1597 | else | 1624 | else |
1598 | PRINT_ERR("IDX_ACTIVATE on read channel device %s: " | 1625 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel:" |
1599 | "negative reply\n", CARD_RDEV_ID(card)); | 1626 | " negative reply\n", |
1627 | dev_name(&card->read.ccwdev->dev)); | ||
1600 | goto out; | 1628 | goto out; |
1601 | } | 1629 | } |
1602 | 1630 | ||
@@ -1610,9 +1638,10 @@ static void qeth_idx_read_cb(struct qeth_channel *channel, | |||
1610 | 1638 | ||
1611 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); | 1639 | memcpy(&temp, QETH_IDX_ACT_FUNC_LEVEL(iob->data), 2); |
1612 | if (temp != qeth_peer_func_level(card->info.func_level)) { | 1640 | if (temp != qeth_peer_func_level(card->info.func_level)) { |
1613 | PRINT_WARN("IDX_ACTIVATE on read channel device %s: function " | 1641 | QETH_DBF_MESSAGE(2, "%s IDX_ACTIVATE on read channel: function " |
1614 | "level mismatch (sent: 0x%x, received: 0x%x)\n", | 1642 | "level mismatch (sent: 0x%x, received: 0x%x)\n", |
1615 | CARD_RDEV_ID(card), card->info.func_level, temp); | 1643 | dev_name(&card->read.ccwdev->dev), |
1644 | card->info.func_level, temp); | ||
1616 | goto out; | 1645 | goto out; |
1617 | } | 1646 | } |
1618 | memcpy(&card->token.issuer_rm_r, | 1647 | memcpy(&card->token.issuer_rm_r, |
@@ -1686,8 +1715,9 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1686 | (addr_t) iob, 0, 0); | 1715 | (addr_t) iob, 0, 0); |
1687 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); | 1716 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); |
1688 | if (rc) { | 1717 | if (rc) { |
1689 | PRINT_WARN("qeth_send_control_data: " | 1718 | QETH_DBF_MESSAGE(2, "%s qeth_send_control_data: " |
1690 | "ccw_device_start rc = %i\n", rc); | 1719 | "ccw_device_start rc = %i\n", |
1720 | dev_name(&card->write.ccwdev->dev), rc); | ||
1691 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 1721 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
1692 | spin_lock_irqsave(&card->lock, flags); | 1722 | spin_lock_irqsave(&card->lock, flags); |
1693 | list_del_init(&reply->list); | 1723 | list_del_init(&reply->list); |
@@ -2170,11 +2200,8 @@ static void qeth_print_status_with_portname(struct qeth_card *card) | |||
2170 | dbf_text[i] = | 2200 | dbf_text[i] = |
2171 | (char) _ebcasc[(__u8) dbf_text[i]]; | 2201 | (char) _ebcasc[(__u8) dbf_text[i]]; |
2172 | dbf_text[8] = 0; | 2202 | dbf_text[8] = 0; |
2173 | PRINT_INFO("Device %s/%s/%s is a%s card%s%s%s\n" | 2203 | dev_info(&card->gdev->dev, "Device is a%s card%s%s%s\n" |
2174 | "with link type %s (portname: %s)\n", | 2204 | "with link type %s (portname: %s)\n", |
2175 | CARD_RDEV_ID(card), | ||
2176 | CARD_WDEV_ID(card), | ||
2177 | CARD_DDEV_ID(card), | ||
2178 | qeth_get_cardname(card), | 2205 | qeth_get_cardname(card), |
2179 | (card->info.mcl_level[0]) ? " (level: " : "", | 2206 | (card->info.mcl_level[0]) ? " (level: " : "", |
2180 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2207 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
@@ -2187,23 +2214,17 @@ static void qeth_print_status_with_portname(struct qeth_card *card) | |||
2187 | static void qeth_print_status_no_portname(struct qeth_card *card) | 2214 | static void qeth_print_status_no_portname(struct qeth_card *card) |
2188 | { | 2215 | { |
2189 | if (card->info.portname[0]) | 2216 | if (card->info.portname[0]) |
2190 | PRINT_INFO("Device %s/%s/%s is a%s " | 2217 | dev_info(&card->gdev->dev, "Device is a%s " |
2191 | "card%s%s%s\nwith link type %s " | 2218 | "card%s%s%s\nwith link type %s " |
2192 | "(no portname needed by interface).\n", | 2219 | "(no portname needed by interface).\n", |
2193 | CARD_RDEV_ID(card), | ||
2194 | CARD_WDEV_ID(card), | ||
2195 | CARD_DDEV_ID(card), | ||
2196 | qeth_get_cardname(card), | 2220 | qeth_get_cardname(card), |
2197 | (card->info.mcl_level[0]) ? " (level: " : "", | 2221 | (card->info.mcl_level[0]) ? " (level: " : "", |
2198 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2222 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
2199 | (card->info.mcl_level[0]) ? ")" : "", | 2223 | (card->info.mcl_level[0]) ? ")" : "", |
2200 | qeth_get_cardname_short(card)); | 2224 | qeth_get_cardname_short(card)); |
2201 | else | 2225 | else |
2202 | PRINT_INFO("Device %s/%s/%s is a%s " | 2226 | dev_info(&card->gdev->dev, "Device is a%s " |
2203 | "card%s%s%s\nwith link type %s.\n", | 2227 | "card%s%s%s\nwith link type %s.\n", |
2204 | CARD_RDEV_ID(card), | ||
2205 | CARD_WDEV_ID(card), | ||
2206 | CARD_DDEV_ID(card), | ||
2207 | qeth_get_cardname(card), | 2228 | qeth_get_cardname(card), |
2208 | (card->info.mcl_level[0]) ? " (level: " : "", | 2229 | (card->info.mcl_level[0]) ? " (level: " : "", |
2209 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", | 2230 | (card->info.mcl_level[0]) ? card->info.mcl_level : "", |
@@ -2325,7 +2346,6 @@ static int qeth_init_input_buffer(struct qeth_card *card, | |||
2325 | * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off | 2346 | * the QETH_IN_BUF_REQUEUE_THRESHOLD we should never run out off |
2326 | * buffers | 2347 | * buffers |
2327 | */ | 2348 | */ |
2328 | BUG_ON(!pool_entry); | ||
2329 | 2349 | ||
2330 | buf->pool_entry = pool_entry; | 2350 | buf->pool_entry = pool_entry; |
2331 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { | 2351 | for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) { |
@@ -2630,9 +2650,8 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2630 | qeth_get_micros() - | 2650 | qeth_get_micros() - |
2631 | card->perf_stats.inbound_do_qdio_start_time; | 2651 | card->perf_stats.inbound_do_qdio_start_time; |
2632 | if (rc) { | 2652 | if (rc) { |
2633 | PRINT_WARN("qeth_queue_input_buffer's do_QDIO " | 2653 | dev_warn(&card->gdev->dev, |
2634 | "return %i (device %s).\n", | 2654 | "QDIO reported an error, rc=%i\n", rc); |
2635 | rc, CARD_DDEV_ID(card)); | ||
2636 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); | 2655 | QETH_DBF_TEXT(TRACE, 2, "qinberr"); |
2637 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | 2656 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
2638 | } | 2657 | } |
@@ -3730,6 +3749,7 @@ static void qeth_core_free_card(struct qeth_card *card) | |||
3730 | free_netdev(card->dev); | 3749 | free_netdev(card->dev); |
3731 | kfree(card->ip_tbd_list); | 3750 | kfree(card->ip_tbd_list); |
3732 | qeth_free_qdio_buffers(card); | 3751 | qeth_free_qdio_buffers(card); |
3752 | unregister_service_level(&card->qeth_service_level); | ||
3733 | kfree(card); | 3753 | kfree(card); |
3734 | } | 3754 | } |
3735 | 3755 | ||
@@ -3757,7 +3777,7 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev, | |||
3757 | 3777 | ||
3758 | int qeth_core_hardsetup_card(struct qeth_card *card) | 3778 | int qeth_core_hardsetup_card(struct qeth_card *card) |
3759 | { | 3779 | { |
3760 | struct qdio_ssqd_desc *qdio_ssqd; | 3780 | struct qdio_ssqd_desc *ssqd; |
3761 | int retries = 3; | 3781 | int retries = 3; |
3762 | int mpno = 0; | 3782 | int mpno = 0; |
3763 | int rc; | 3783 | int rc; |
@@ -3766,7 +3786,8 @@ int qeth_core_hardsetup_card(struct qeth_card *card) | |||
3766 | atomic_set(&card->force_alloc_skb, 0); | 3786 | atomic_set(&card->force_alloc_skb, 0); |
3767 | retry: | 3787 | retry: |
3768 | if (retries < 3) { | 3788 | if (retries < 3) { |
3769 | PRINT_WARN("Retrying to do IDX activates.\n"); | 3789 | QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", |
3790 | dev_name(&card->gdev->dev)); | ||
3770 | ccw_device_set_offline(CARD_DDEV(card)); | 3791 | ccw_device_set_offline(CARD_DDEV(card)); |
3771 | ccw_device_set_offline(CARD_WDEV(card)); | 3792 | ccw_device_set_offline(CARD_WDEV(card)); |
3772 | ccw_device_set_offline(CARD_RDEV(card)); | 3793 | ccw_device_set_offline(CARD_RDEV(card)); |
@@ -3792,9 +3813,16 @@ retry: | |||
3792 | return rc; | 3813 | return rc; |
3793 | } | 3814 | } |
3794 | 3815 | ||
3795 | qdio_ssqd = qdio_get_ssqd_desc(CARD_DDEV(card)); | 3816 | ssqd = kmalloc(sizeof(struct qdio_ssqd_desc), GFP_KERNEL); |
3796 | if (qdio_ssqd) | 3817 | if (!ssqd) { |
3797 | mpno = qdio_ssqd->pcnt; | 3818 | rc = -ENOMEM; |
3819 | goto out; | ||
3820 | } | ||
3821 | rc = qdio_get_ssqd_desc(CARD_DDEV(card), ssqd); | ||
3822 | if (rc == 0) | ||
3823 | mpno = ssqd->pcnt; | ||
3824 | kfree(ssqd); | ||
3825 | |||
3798 | if (mpno) | 3826 | if (mpno) |
3799 | mpno = min(mpno - 1, QETH_MAX_PORTNO); | 3827 | mpno = min(mpno - 1, QETH_MAX_PORTNO); |
3800 | if (card->info.portno > mpno) { | 3828 | if (card->info.portno > mpno) { |
@@ -3834,7 +3862,10 @@ retry: | |||
3834 | } | 3862 | } |
3835 | return 0; | 3863 | return 0; |
3836 | out: | 3864 | out: |
3837 | PRINT_ERR("Initialization in hardsetup failed! rc=%d\n", rc); | 3865 | dev_warn(&card->gdev->dev, "The qeth device driver failed to recover " |
3866 | "an error on the device\n"); | ||
3867 | QETH_DBF_MESSAGE(2, "%s Initialization in hardsetup failed! rc=%d\n", | ||
3868 | dev_name(&card->gdev->dev), rc); | ||
3838 | return rc; | 3869 | return rc; |
3839 | } | 3870 | } |
3840 | EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); | 3871 | EXPORT_SYMBOL_GPL(qeth_core_hardsetup_card); |
@@ -4054,8 +4085,8 @@ int qeth_core_load_discipline(struct qeth_card *card, | |||
4054 | break; | 4085 | break; |
4055 | } | 4086 | } |
4056 | if (!card->discipline.ccwgdriver) { | 4087 | if (!card->discipline.ccwgdriver) { |
4057 | PRINT_ERR("Support for discipline %d not present\n", | 4088 | dev_err(&card->gdev->dev, "There is no kernel module to " |
4058 | discipline); | 4089 | "support discipline %d\n", discipline); |
4059 | rc = -EINVAL; | 4090 | rc = -EINVAL; |
4060 | } | 4091 | } |
4061 | return rc; | 4092 | return rc; |
@@ -4448,7 +4479,7 @@ static int __init qeth_core_init(void) | |||
4448 | { | 4479 | { |
4449 | int rc; | 4480 | int rc; |
4450 | 4481 | ||
4451 | PRINT_INFO("loading core functions\n"); | 4482 | pr_info("loading core functions\n"); |
4452 | INIT_LIST_HEAD(&qeth_core_card_list.list); | 4483 | INIT_LIST_HEAD(&qeth_core_card_list.list); |
4453 | rwlock_init(&qeth_core_card_list.rwlock); | 4484 | rwlock_init(&qeth_core_card_list.rwlock); |
4454 | 4485 | ||
@@ -4488,9 +4519,10 @@ driver_err: | |||
4488 | ccwgroup_err: | 4519 | ccwgroup_err: |
4489 | ccw_driver_unregister(&qeth_ccw_driver); | 4520 | ccw_driver_unregister(&qeth_ccw_driver); |
4490 | ccw_err: | 4521 | ccw_err: |
4522 | QETH_DBF_MESSAGE(2, "Initialization failed with code %d\n", rc); | ||
4491 | qeth_unregister_dbf_views(); | 4523 | qeth_unregister_dbf_views(); |
4492 | out_err: | 4524 | out_err: |
4493 | PRINT_ERR("Initialization failed with code %d\n", rc); | 4525 | pr_err("Initializing the qeth device driver failed\n"); |
4494 | return rc; | 4526 | return rc; |
4495 | } | 4527 | } |
4496 | 4528 | ||
@@ -4503,7 +4535,7 @@ static void __exit qeth_core_exit(void) | |||
4503 | ccw_driver_unregister(&qeth_ccw_driver); | 4535 | ccw_driver_unregister(&qeth_ccw_driver); |
4504 | kmem_cache_destroy(qeth_core_header_cache); | 4536 | kmem_cache_destroy(qeth_core_header_cache); |
4505 | qeth_unregister_dbf_views(); | 4537 | qeth_unregister_dbf_views(); |
4506 | PRINT_INFO("core functions removed\n"); | 4538 | pr_info("core functions removed\n"); |
4507 | } | 4539 | } |
4508 | 4540 | ||
4509 | module_init(qeth_core_init); | 4541 | module_init(qeth_core_init); |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 1b1e80336d2c..af15bc648ba1 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -503,12 +506,13 @@ static int qeth_l2_send_setmac_cb(struct qeth_card *card, | |||
503 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; | 506 | card->info.mac_bits |= QETH_LAYER2_MAC_REGISTERED; |
504 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, | 507 | memcpy(card->dev->dev_addr, cmd->data.setdelmac.mac, |
505 | OSA_ADDR_LEN); | 508 | OSA_ADDR_LEN); |
506 | PRINT_INFO("MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " | 509 | dev_info(&card->gdev->dev, |
507 | "successfully registered on device %s\n", | 510 | "MAC address %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x " |
508 | card->dev->dev_addr[0], card->dev->dev_addr[1], | 511 | "successfully registered on device %s\n", |
509 | card->dev->dev_addr[2], card->dev->dev_addr[3], | 512 | card->dev->dev_addr[0], card->dev->dev_addr[1], |
510 | card->dev->dev_addr[4], card->dev->dev_addr[5], | 513 | card->dev->dev_addr[2], card->dev->dev_addr[3], |
511 | card->dev->name); | 514 | card->dev->dev_addr[4], card->dev->dev_addr[5], |
515 | card->dev->name); | ||
512 | } | 516 | } |
513 | return 0; | 517 | return 0; |
514 | } | 518 | } |
@@ -1015,9 +1019,8 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
1015 | if (rc) { | 1019 | if (rc) { |
1016 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1020 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1017 | if (rc == 0xe080) { | 1021 | if (rc == 0xe080) { |
1018 | PRINT_WARN("LAN on card %s if offline! " | 1022 | dev_warn(&card->gdev->dev, |
1019 | "Waiting for STARTLAN from card.\n", | 1023 | "The LAN is offline\n"); |
1020 | CARD_BUS_ID(card)); | ||
1021 | card->lan_online = 0; | 1024 | card->lan_online = 0; |
1022 | } | 1025 | } |
1023 | return rc; | 1026 | return rc; |
@@ -1117,8 +1120,8 @@ static int qeth_l2_recover(void *ptr) | |||
1117 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 1120 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
1118 | return 0; | 1121 | return 0; |
1119 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 1122 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
1120 | PRINT_WARN("Recovery of device %s started ...\n", | 1123 | dev_warn(&card->gdev->dev, |
1121 | CARD_BUS_ID(card)); | 1124 | "A recovery process has been started for the device\n"); |
1122 | card->use_hard_stop = 1; | 1125 | card->use_hard_stop = 1; |
1123 | __qeth_l2_set_offline(card->gdev, 1); | 1126 | __qeth_l2_set_offline(card->gdev, 1); |
1124 | rc = __qeth_l2_set_online(card->gdev, 1); | 1127 | rc = __qeth_l2_set_online(card->gdev, 1); |
@@ -1126,27 +1129,27 @@ static int qeth_l2_recover(void *ptr) | |||
1126 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 1129 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
1127 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 1130 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
1128 | if (!rc) | 1131 | if (!rc) |
1129 | PRINT_INFO("Device %s successfully recovered!\n", | 1132 | dev_info(&card->gdev->dev, |
1130 | CARD_BUS_ID(card)); | 1133 | "Device successfully recovered!\n"); |
1131 | else { | 1134 | else { |
1132 | rtnl_lock(); | 1135 | rtnl_lock(); |
1133 | dev_close(card->dev); | 1136 | dev_close(card->dev); |
1134 | rtnl_unlock(); | 1137 | rtnl_unlock(); |
1135 | PRINT_INFO("Device %s could not be recovered!\n", | 1138 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1136 | CARD_BUS_ID(card)); | 1139 | "failed to recover an error on the device\n"); |
1137 | } | 1140 | } |
1138 | return 0; | 1141 | return 0; |
1139 | } | 1142 | } |
1140 | 1143 | ||
1141 | static int __init qeth_l2_init(void) | 1144 | static int __init qeth_l2_init(void) |
1142 | { | 1145 | { |
1143 | PRINT_INFO("register layer 2 discipline\n"); | 1146 | pr_info("register layer 2 discipline\n"); |
1144 | return 0; | 1147 | return 0; |
1145 | } | 1148 | } |
1146 | 1149 | ||
1147 | static void __exit qeth_l2_exit(void) | 1150 | static void __exit qeth_l2_exit(void) |
1148 | { | 1151 | { |
1149 | PRINT_INFO("unregister layer 2 discipline\n"); | 1152 | pr_info("unregister layer 2 discipline\n"); |
1150 | } | 1153 | } |
1151 | 1154 | ||
1152 | static void qeth_l2_shutdown(struct ccwgroup_device *gdev) | 1155 | static void qeth_l2_shutdown(struct ccwgroup_device *gdev) |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index ed59fedd5922..c0b30b25a5f1 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -8,6 +8,9 @@ | |||
8 | * Frank Blaschka <frank.blaschka@de.ibm.com> | 8 | * Frank Blaschka <frank.blaschka@de.ibm.com> |
9 | */ | 9 | */ |
10 | 10 | ||
11 | #define KMSG_COMPONENT "qeth" | ||
12 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
13 | |||
11 | #include <linux/module.h> | 14 | #include <linux/module.h> |
12 | #include <linux/moduleparam.h> | 15 | #include <linux/moduleparam.h> |
13 | #include <linux/string.h> | 16 | #include <linux/string.h> |
@@ -917,8 +920,8 @@ static int qeth_l3_register_addr_entry(struct qeth_card *card, | |||
917 | if (rc) { | 920 | if (rc) { |
918 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); | 921 | QETH_DBF_TEXT(TRACE, 2, "FAILED"); |
919 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); | 922 | qeth_l3_ipaddr_to_string(addr->proto, (u8 *)&addr->u, buf); |
920 | PRINT_WARN("Could not register IP address %s (rc=0x%x/%d)\n", | 923 | dev_warn(&card->gdev->dev, |
921 | buf, rc, rc); | 924 | "Registering IP address %s failed\n", buf); |
922 | } | 925 | } |
923 | return rc; | 926 | return rc; |
924 | } | 927 | } |
@@ -1029,24 +1032,22 @@ static int qeth_l3_setadapter_parms(struct qeth_card *card) | |||
1029 | QETH_DBF_TEXT(SETUP, 2, "setadprm"); | 1032 | QETH_DBF_TEXT(SETUP, 2, "setadprm"); |
1030 | 1033 | ||
1031 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { | 1034 | if (!qeth_is_supported(card, IPA_SETADAPTERPARMS)) { |
1032 | PRINT_WARN("set adapter parameters not supported " | 1035 | dev_info(&card->gdev->dev, |
1033 | "on device %s.\n", | 1036 | "set adapter parameters not supported.\n"); |
1034 | CARD_BUS_ID(card)); | ||
1035 | QETH_DBF_TEXT(SETUP, 2, " notsupp"); | 1037 | QETH_DBF_TEXT(SETUP, 2, " notsupp"); |
1036 | return 0; | 1038 | return 0; |
1037 | } | 1039 | } |
1038 | rc = qeth_query_setadapterparms(card); | 1040 | rc = qeth_query_setadapterparms(card); |
1039 | if (rc) { | 1041 | if (rc) { |
1040 | PRINT_WARN("couldn't set adapter parameters on device %s: " | 1042 | QETH_DBF_MESSAGE(2, "%s couldn't set adapter parameters: " |
1041 | "x%x\n", CARD_BUS_ID(card), rc); | 1043 | "0x%x\n", card->gdev->dev.bus_id, rc); |
1042 | return rc; | 1044 | return rc; |
1043 | } | 1045 | } |
1044 | if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { | 1046 | if (qeth_adp_supported(card, IPA_SETADP_ALTER_MAC_ADDRESS)) { |
1045 | rc = qeth_setadpparms_change_macaddr(card); | 1047 | rc = qeth_setadpparms_change_macaddr(card); |
1046 | if (rc) | 1048 | if (rc) |
1047 | PRINT_WARN("couldn't get MAC address on " | 1049 | dev_warn(&card->gdev->dev, "Reading the adapter MAC" |
1048 | "device %s: x%x\n", | 1050 | " address failed\n", rc); |
1049 | CARD_BUS_ID(card), rc); | ||
1050 | } | 1051 | } |
1051 | 1052 | ||
1052 | if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || | 1053 | if ((card->info.link_type == QETH_LINK_TYPE_HSTR) || |
@@ -1160,16 +1161,17 @@ static int qeth_l3_start_ipa_arp_processing(struct qeth_card *card) | |||
1160 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); | 1161 | QETH_DBF_TEXT(TRACE, 3, "ipaarp"); |
1161 | 1162 | ||
1162 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 1163 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
1163 | PRINT_WARN("ARP processing not supported " | 1164 | dev_info(&card->gdev->dev, |
1164 | "on %s!\n", QETH_CARD_IFNAME(card)); | 1165 | "ARP processing not supported on %s!\n", |
1166 | QETH_CARD_IFNAME(card)); | ||
1165 | return 0; | 1167 | return 0; |
1166 | } | 1168 | } |
1167 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, | 1169 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, |
1168 | IPA_CMD_ASS_START, 0); | 1170 | IPA_CMD_ASS_START, 0); |
1169 | if (rc) { | 1171 | if (rc) { |
1170 | PRINT_WARN("Could not start ARP processing " | 1172 | dev_warn(&card->gdev->dev, |
1171 | "assist on %s: 0x%x\n", | 1173 | "Starting ARP processing support for %s failed\n", |
1172 | QETH_CARD_IFNAME(card), rc); | 1174 | QETH_CARD_IFNAME(card)); |
1173 | } | 1175 | } |
1174 | return rc; | 1176 | return rc; |
1175 | } | 1177 | } |
@@ -1181,19 +1183,21 @@ static int qeth_l3_start_ipa_ip_fragmentation(struct qeth_card *card) | |||
1181 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); | 1183 | QETH_DBF_TEXT(TRACE, 3, "ipaipfrg"); |
1182 | 1184 | ||
1183 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { | 1185 | if (!qeth_is_supported(card, IPA_IP_FRAGMENTATION)) { |
1184 | PRINT_INFO("Hardware IP fragmentation not supported on %s\n", | 1186 | dev_info(&card->gdev->dev, |
1185 | QETH_CARD_IFNAME(card)); | 1187 | "Hardware IP fragmentation not supported on %s\n", |
1188 | QETH_CARD_IFNAME(card)); | ||
1186 | return -EOPNOTSUPP; | 1189 | return -EOPNOTSUPP; |
1187 | } | 1190 | } |
1188 | 1191 | ||
1189 | rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, | 1192 | rc = qeth_l3_send_simple_setassparms(card, IPA_IP_FRAGMENTATION, |
1190 | IPA_CMD_ASS_START, 0); | 1193 | IPA_CMD_ASS_START, 0); |
1191 | if (rc) { | 1194 | if (rc) { |
1192 | PRINT_WARN("Could not start Hardware IP fragmentation " | 1195 | dev_warn(&card->gdev->dev, |
1193 | "assist on %s: 0x%x\n", | 1196 | "Starting IP fragmentation support for %s failed\n", |
1194 | QETH_CARD_IFNAME(card), rc); | 1197 | QETH_CARD_IFNAME(card)); |
1195 | } else | 1198 | } else |
1196 | PRINT_INFO("Hardware IP fragmentation enabled \n"); | 1199 | dev_info(&card->gdev->dev, |
1200 | "Hardware IP fragmentation enabled \n"); | ||
1197 | return rc; | 1201 | return rc; |
1198 | } | 1202 | } |
1199 | 1203 | ||
@@ -1207,17 +1211,18 @@ static int qeth_l3_start_ipa_source_mac(struct qeth_card *card) | |||
1207 | return -EOPNOTSUPP; | 1211 | return -EOPNOTSUPP; |
1208 | 1212 | ||
1209 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { | 1213 | if (!qeth_is_supported(card, IPA_SOURCE_MAC)) { |
1210 | PRINT_INFO("Inbound source address not " | 1214 | dev_info(&card->gdev->dev, |
1211 | "supported on %s\n", QETH_CARD_IFNAME(card)); | 1215 | "Inbound source address not supported on %s\n", |
1216 | QETH_CARD_IFNAME(card)); | ||
1212 | return -EOPNOTSUPP; | 1217 | return -EOPNOTSUPP; |
1213 | } | 1218 | } |
1214 | 1219 | ||
1215 | rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, | 1220 | rc = qeth_l3_send_simple_setassparms(card, IPA_SOURCE_MAC, |
1216 | IPA_CMD_ASS_START, 0); | 1221 | IPA_CMD_ASS_START, 0); |
1217 | if (rc) | 1222 | if (rc) |
1218 | PRINT_WARN("Could not start inbound source " | 1223 | dev_warn(&card->gdev->dev, |
1219 | "assist on %s: 0x%x\n", | 1224 | "Starting proxy ARP support for %s failed\n", |
1220 | QETH_CARD_IFNAME(card), rc); | 1225 | QETH_CARD_IFNAME(card)); |
1221 | return rc; | 1226 | return rc; |
1222 | } | 1227 | } |
1223 | 1228 | ||
@@ -1228,19 +1233,19 @@ static int qeth_l3_start_ipa_vlan(struct qeth_card *card) | |||
1228 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); | 1233 | QETH_DBF_TEXT(TRACE, 3, "strtvlan"); |
1229 | 1234 | ||
1230 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { | 1235 | if (!qeth_is_supported(card, IPA_FULL_VLAN)) { |
1231 | PRINT_WARN("VLAN not supported on %s\n", | 1236 | dev_info(&card->gdev->dev, |
1232 | QETH_CARD_IFNAME(card)); | 1237 | "VLAN not supported on %s\n", QETH_CARD_IFNAME(card)); |
1233 | return -EOPNOTSUPP; | 1238 | return -EOPNOTSUPP; |
1234 | } | 1239 | } |
1235 | 1240 | ||
1236 | rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, | 1241 | rc = qeth_l3_send_simple_setassparms(card, IPA_VLAN_PRIO, |
1237 | IPA_CMD_ASS_START, 0); | 1242 | IPA_CMD_ASS_START, 0); |
1238 | if (rc) { | 1243 | if (rc) { |
1239 | PRINT_WARN("Could not start vlan " | 1244 | dev_warn(&card->gdev->dev, |
1240 | "assist on %s: 0x%x\n", | 1245 | "Starting VLAN support for %s failed\n", |
1241 | QETH_CARD_IFNAME(card), rc); | 1246 | QETH_CARD_IFNAME(card)); |
1242 | } else { | 1247 | } else { |
1243 | PRINT_INFO("VLAN enabled \n"); | 1248 | dev_info(&card->gdev->dev, "VLAN enabled\n"); |
1244 | } | 1249 | } |
1245 | return rc; | 1250 | return rc; |
1246 | } | 1251 | } |
@@ -1252,19 +1257,20 @@ static int qeth_l3_start_ipa_multicast(struct qeth_card *card) | |||
1252 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); | 1257 | QETH_DBF_TEXT(TRACE, 3, "stmcast"); |
1253 | 1258 | ||
1254 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { | 1259 | if (!qeth_is_supported(card, IPA_MULTICASTING)) { |
1255 | PRINT_WARN("Multicast not supported on %s\n", | 1260 | dev_info(&card->gdev->dev, |
1256 | QETH_CARD_IFNAME(card)); | 1261 | "Multicast not supported on %s\n", |
1262 | QETH_CARD_IFNAME(card)); | ||
1257 | return -EOPNOTSUPP; | 1263 | return -EOPNOTSUPP; |
1258 | } | 1264 | } |
1259 | 1265 | ||
1260 | rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, | 1266 | rc = qeth_l3_send_simple_setassparms(card, IPA_MULTICASTING, |
1261 | IPA_CMD_ASS_START, 0); | 1267 | IPA_CMD_ASS_START, 0); |
1262 | if (rc) { | 1268 | if (rc) { |
1263 | PRINT_WARN("Could not start multicast " | 1269 | dev_warn(&card->gdev->dev, |
1264 | "assist on %s: rc=%i\n", | 1270 | "Starting multicast support for %s failed\n", |
1265 | QETH_CARD_IFNAME(card), rc); | 1271 | QETH_CARD_IFNAME(card)); |
1266 | } else { | 1272 | } else { |
1267 | PRINT_INFO("Multicast enabled\n"); | 1273 | dev_info(&card->gdev->dev, "Multicast enabled\n"); |
1268 | card->dev->flags |= IFF_MULTICAST; | 1274 | card->dev->flags |= IFF_MULTICAST; |
1269 | } | 1275 | } |
1270 | return rc; | 1276 | return rc; |
@@ -1315,36 +1321,37 @@ static int qeth_l3_softsetup_ipv6(struct qeth_card *card) | |||
1315 | 1321 | ||
1316 | rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); | 1322 | rc = qeth_l3_query_ipassists(card, QETH_PROT_IPV6); |
1317 | if (rc) { | 1323 | if (rc) { |
1318 | PRINT_ERR("IPv6 query ipassist failed on %s\n", | 1324 | dev_err(&card->gdev->dev, |
1319 | QETH_CARD_IFNAME(card)); | 1325 | "Activating IPv6 support for %s failed\n", |
1326 | QETH_CARD_IFNAME(card)); | ||
1320 | return rc; | 1327 | return rc; |
1321 | } | 1328 | } |
1322 | rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, | 1329 | rc = qeth_l3_send_simple_setassparms(card, IPA_IPV6, |
1323 | IPA_CMD_ASS_START, 3); | 1330 | IPA_CMD_ASS_START, 3); |
1324 | if (rc) { | 1331 | if (rc) { |
1325 | PRINT_WARN("IPv6 start assist (version 4) failed " | 1332 | dev_err(&card->gdev->dev, |
1326 | "on %s: 0x%x\n", | 1333 | "Activating IPv6 support for %s failed\n", |
1327 | QETH_CARD_IFNAME(card), rc); | 1334 | QETH_CARD_IFNAME(card)); |
1328 | return rc; | 1335 | return rc; |
1329 | } | 1336 | } |
1330 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, | 1337 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_IPV6, |
1331 | IPA_CMD_ASS_START); | 1338 | IPA_CMD_ASS_START); |
1332 | if (rc) { | 1339 | if (rc) { |
1333 | PRINT_WARN("IPV6 start assist (version 6) failed " | 1340 | dev_err(&card->gdev->dev, |
1334 | "on %s: 0x%x\n", | 1341 | "Activating IPv6 support for %s failed\n", |
1335 | QETH_CARD_IFNAME(card), rc); | 1342 | QETH_CARD_IFNAME(card)); |
1336 | return rc; | 1343 | return rc; |
1337 | } | 1344 | } |
1338 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, | 1345 | rc = qeth_l3_send_simple_setassparms_ipv6(card, IPA_PASSTHRU, |
1339 | IPA_CMD_ASS_START); | 1346 | IPA_CMD_ASS_START); |
1340 | if (rc) { | 1347 | if (rc) { |
1341 | PRINT_WARN("Could not enable passthrough " | 1348 | dev_warn(&card->gdev->dev, |
1342 | "on %s: 0x%x\n", | 1349 | "Enabling the passthrough mode for %s failed\n", |
1343 | QETH_CARD_IFNAME(card), rc); | 1350 | QETH_CARD_IFNAME(card)); |
1344 | return rc; | 1351 | return rc; |
1345 | } | 1352 | } |
1346 | out: | 1353 | out: |
1347 | PRINT_INFO("IPV6 enabled \n"); | 1354 | dev_info(&card->gdev->dev, "IPV6 enabled\n"); |
1348 | return 0; | 1355 | return 0; |
1349 | } | 1356 | } |
1350 | #endif | 1357 | #endif |
@@ -1356,8 +1363,8 @@ static int qeth_l3_start_ipa_ipv6(struct qeth_card *card) | |||
1356 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); | 1363 | QETH_DBF_TEXT(TRACE, 3, "strtipv6"); |
1357 | 1364 | ||
1358 | if (!qeth_is_supported(card, IPA_IPV6)) { | 1365 | if (!qeth_is_supported(card, IPA_IPV6)) { |
1359 | PRINT_WARN("IPv6 not supported on %s\n", | 1366 | dev_info(&card->gdev->dev, |
1360 | QETH_CARD_IFNAME(card)); | 1367 | "IPv6 not supported on %s\n", QETH_CARD_IFNAME(card)); |
1361 | return 0; | 1368 | return 0; |
1362 | } | 1369 | } |
1363 | #ifdef CONFIG_QETH_IPV6 | 1370 | #ifdef CONFIG_QETH_IPV6 |
@@ -1373,34 +1380,35 @@ static int qeth_l3_start_ipa_broadcast(struct qeth_card *card) | |||
1373 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); | 1380 | QETH_DBF_TEXT(TRACE, 3, "stbrdcst"); |
1374 | card->info.broadcast_capable = 0; | 1381 | card->info.broadcast_capable = 0; |
1375 | if (!qeth_is_supported(card, IPA_FILTERING)) { | 1382 | if (!qeth_is_supported(card, IPA_FILTERING)) { |
1376 | PRINT_WARN("Broadcast not supported on %s\n", | 1383 | dev_info(&card->gdev->dev, |
1377 | QETH_CARD_IFNAME(card)); | 1384 | "Broadcast not supported on %s\n", |
1385 | QETH_CARD_IFNAME(card)); | ||
1378 | rc = -EOPNOTSUPP; | 1386 | rc = -EOPNOTSUPP; |
1379 | goto out; | 1387 | goto out; |
1380 | } | 1388 | } |
1381 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1389 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1382 | IPA_CMD_ASS_START, 0); | 1390 | IPA_CMD_ASS_START, 0); |
1383 | if (rc) { | 1391 | if (rc) { |
1384 | PRINT_WARN("Could not enable broadcasting filtering " | 1392 | dev_warn(&card->gdev->dev, "Enabling broadcast filtering for " |
1385 | "on %s: 0x%x\n", | 1393 | "%s failed\n", QETH_CARD_IFNAME(card)); |
1386 | QETH_CARD_IFNAME(card), rc); | ||
1387 | goto out; | 1394 | goto out; |
1388 | } | 1395 | } |
1389 | 1396 | ||
1390 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1397 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1391 | IPA_CMD_ASS_CONFIGURE, 1); | 1398 | IPA_CMD_ASS_CONFIGURE, 1); |
1392 | if (rc) { | 1399 | if (rc) { |
1393 | PRINT_WARN("Could not set up broadcast filtering on %s: 0x%x\n", | 1400 | dev_warn(&card->gdev->dev, |
1394 | QETH_CARD_IFNAME(card), rc); | 1401 | "Setting up broadcast filtering for %s failed\n", |
1402 | QETH_CARD_IFNAME(card)); | ||
1395 | goto out; | 1403 | goto out; |
1396 | } | 1404 | } |
1397 | card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; | 1405 | card->info.broadcast_capable = QETH_BROADCAST_WITH_ECHO; |
1398 | PRINT_INFO("Broadcast enabled \n"); | 1406 | dev_info(&card->gdev->dev, "Broadcast enabled\n"); |
1399 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, | 1407 | rc = qeth_l3_send_simple_setassparms(card, IPA_FILTERING, |
1400 | IPA_CMD_ASS_ENABLE, 1); | 1408 | IPA_CMD_ASS_ENABLE, 1); |
1401 | if (rc) { | 1409 | if (rc) { |
1402 | PRINT_WARN("Could not set up broadcast echo filtering on " | 1410 | dev_warn(&card->gdev->dev, "Setting up broadcast echo " |
1403 | "%s: 0x%x\n", QETH_CARD_IFNAME(card), rc); | 1411 | "filtering for %s failed\n", QETH_CARD_IFNAME(card)); |
1404 | goto out; | 1412 | goto out; |
1405 | } | 1413 | } |
1406 | card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; | 1414 | card->info.broadcast_capable = QETH_BROADCAST_WITHOUT_ECHO; |
@@ -1419,18 +1427,18 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card) | |||
1419 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, | 1427 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, |
1420 | IPA_CMD_ASS_START, 0); | 1428 | IPA_CMD_ASS_START, 0); |
1421 | if (rc) { | 1429 | if (rc) { |
1422 | PRINT_WARN("Starting Inbound HW Checksumming failed on %s: " | 1430 | dev_warn(&card->gdev->dev, "Starting HW checksumming for %s " |
1423 | "0x%x,\ncontinuing using Inbound SW Checksumming\n", | 1431 | "failed, using SW checksumming\n", |
1424 | QETH_CARD_IFNAME(card), rc); | 1432 | QETH_CARD_IFNAME(card)); |
1425 | return rc; | 1433 | return rc; |
1426 | } | 1434 | } |
1427 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, | 1435 | rc = qeth_l3_send_simple_setassparms(card, IPA_INBOUND_CHECKSUM, |
1428 | IPA_CMD_ASS_ENABLE, | 1436 | IPA_CMD_ASS_ENABLE, |
1429 | card->info.csum_mask); | 1437 | card->info.csum_mask); |
1430 | if (rc) { | 1438 | if (rc) { |
1431 | PRINT_WARN("Enabling Inbound HW Checksumming failed on %s: " | 1439 | dev_warn(&card->gdev->dev, "Enabling HW checksumming for %s " |
1432 | "0x%x,\ncontinuing using Inbound SW Checksumming\n", | 1440 | "failed, using SW checksumming\n", |
1433 | QETH_CARD_IFNAME(card), rc); | 1441 | QETH_CARD_IFNAME(card)); |
1434 | return rc; | 1442 | return rc; |
1435 | } | 1443 | } |
1436 | return 0; | 1444 | return 0; |
@@ -1443,26 +1451,30 @@ static int qeth_l3_start_ipa_checksum(struct qeth_card *card) | |||
1443 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); | 1451 | QETH_DBF_TEXT(TRACE, 3, "strtcsum"); |
1444 | 1452 | ||
1445 | if (card->options.checksum_type == NO_CHECKSUMMING) { | 1453 | if (card->options.checksum_type == NO_CHECKSUMMING) { |
1446 | PRINT_WARN("Using no checksumming on %s.\n", | 1454 | dev_info(&card->gdev->dev, |
1447 | QETH_CARD_IFNAME(card)); | 1455 | "Using no checksumming on %s.\n", |
1456 | QETH_CARD_IFNAME(card)); | ||
1448 | return 0; | 1457 | return 0; |
1449 | } | 1458 | } |
1450 | if (card->options.checksum_type == SW_CHECKSUMMING) { | 1459 | if (card->options.checksum_type == SW_CHECKSUMMING) { |
1451 | PRINT_WARN("Using SW checksumming on %s.\n", | 1460 | dev_info(&card->gdev->dev, |
1452 | QETH_CARD_IFNAME(card)); | 1461 | "Using SW checksumming on %s.\n", |
1462 | QETH_CARD_IFNAME(card)); | ||
1453 | return 0; | 1463 | return 0; |
1454 | } | 1464 | } |
1455 | if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { | 1465 | if (!qeth_is_supported(card, IPA_INBOUND_CHECKSUM)) { |
1456 | PRINT_WARN("Inbound HW Checksumming not " | 1466 | dev_info(&card->gdev->dev, |
1457 | "supported on %s,\ncontinuing " | 1467 | "Inbound HW Checksumming not " |
1458 | "using Inbound SW Checksumming\n", | 1468 | "supported on %s,\ncontinuing " |
1459 | QETH_CARD_IFNAME(card)); | 1469 | "using Inbound SW Checksumming\n", |
1470 | QETH_CARD_IFNAME(card)); | ||
1460 | card->options.checksum_type = SW_CHECKSUMMING; | 1471 | card->options.checksum_type = SW_CHECKSUMMING; |
1461 | return 0; | 1472 | return 0; |
1462 | } | 1473 | } |
1463 | rc = qeth_l3_send_checksum_command(card); | 1474 | rc = qeth_l3_send_checksum_command(card); |
1464 | if (!rc) | 1475 | if (!rc) |
1465 | PRINT_INFO("HW Checksumming (inbound) enabled \n"); | 1476 | dev_info(&card->gdev->dev, |
1477 | "HW Checksumming (inbound) enabled\n"); | ||
1466 | 1478 | ||
1467 | return rc; | 1479 | return rc; |
1468 | } | 1480 | } |
@@ -1474,18 +1486,20 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card) | |||
1474 | QETH_DBF_TEXT(TRACE, 3, "sttso"); | 1486 | QETH_DBF_TEXT(TRACE, 3, "sttso"); |
1475 | 1487 | ||
1476 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { | 1488 | if (!qeth_is_supported(card, IPA_OUTBOUND_TSO)) { |
1477 | PRINT_WARN("Outbound TSO not supported on %s\n", | 1489 | dev_info(&card->gdev->dev, |
1478 | QETH_CARD_IFNAME(card)); | 1490 | "Outbound TSO not supported on %s\n", |
1491 | QETH_CARD_IFNAME(card)); | ||
1479 | rc = -EOPNOTSUPP; | 1492 | rc = -EOPNOTSUPP; |
1480 | } else { | 1493 | } else { |
1481 | rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, | 1494 | rc = qeth_l3_send_simple_setassparms(card, IPA_OUTBOUND_TSO, |
1482 | IPA_CMD_ASS_START, 0); | 1495 | IPA_CMD_ASS_START, 0); |
1483 | if (rc) | 1496 | if (rc) |
1484 | PRINT_WARN("Could not start outbound TSO " | 1497 | dev_warn(&card->gdev->dev, "Starting outbound TCP " |
1485 | "assist on %s: rc=%i\n", | 1498 | "segmentation offload for %s failed\n", |
1486 | QETH_CARD_IFNAME(card), rc); | 1499 | QETH_CARD_IFNAME(card)); |
1487 | else | 1500 | else |
1488 | PRINT_INFO("Outbound TSO enabled\n"); | 1501 | dev_info(&card->gdev->dev, |
1502 | "Outbound TSO enabled\n"); | ||
1489 | } | 1503 | } |
1490 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { | 1504 | if (rc && (card->options.large_send == QETH_LARGE_SEND_TSO)) { |
1491 | card->options.large_send = QETH_LARGE_SEND_NO; | 1505 | card->options.large_send = QETH_LARGE_SEND_NO; |
@@ -1578,12 +1592,8 @@ static int qeth_l3_get_unique_id_cb(struct qeth_card *card, | |||
1578 | else { | 1592 | else { |
1579 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | | 1593 | card->info.unique_id = UNIQUE_ID_IF_CREATE_ADDR_FAILED | |
1580 | UNIQUE_ID_NOT_BY_CARD; | 1594 | UNIQUE_ID_NOT_BY_CARD; |
1581 | PRINT_WARN("couldn't get a unique id from the card on device " | 1595 | dev_warn(&card->gdev->dev, "The network adapter failed to " |
1582 | "%s (result=x%x), using default id. ipv6 " | 1596 | "generate a unique ID\n"); |
1583 | "autoconfig on other lpars may lead to duplicate " | ||
1584 | "ip addresses. please use manually " | ||
1585 | "configured ones.\n", | ||
1586 | CARD_BUS_ID(card), cmd->hdr.return_code); | ||
1587 | } | 1597 | } |
1588 | return 0; | 1598 | return 0; |
1589 | } | 1599 | } |
@@ -3086,9 +3096,8 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3086 | if (rc) { | 3096 | if (rc) { |
3087 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 3097 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
3088 | if (rc == 0xe080) { | 3098 | if (rc == 0xe080) { |
3089 | PRINT_WARN("LAN on card %s if offline! " | 3099 | dev_warn(&card->gdev->dev, |
3090 | "Waiting for STARTLAN from card.\n", | 3100 | "The LAN is offline\n"); |
3091 | CARD_BUS_ID(card)); | ||
3092 | card->lan_online = 0; | 3101 | card->lan_online = 0; |
3093 | } | 3102 | } |
3094 | return rc; | 3103 | return rc; |
@@ -3194,8 +3203,8 @@ static int qeth_l3_recover(void *ptr) | |||
3194 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) | 3203 | if (!qeth_do_run_thread(card, QETH_RECOVER_THREAD)) |
3195 | return 0; | 3204 | return 0; |
3196 | QETH_DBF_TEXT(TRACE, 2, "recover2"); | 3205 | QETH_DBF_TEXT(TRACE, 2, "recover2"); |
3197 | PRINT_WARN("Recovery of device %s started ...\n", | 3206 | dev_warn(&card->gdev->dev, |
3198 | CARD_BUS_ID(card)); | 3207 | "A recovery process has been started for the device\n"); |
3199 | card->use_hard_stop = 1; | 3208 | card->use_hard_stop = 1; |
3200 | __qeth_l3_set_offline(card->gdev, 1); | 3209 | __qeth_l3_set_offline(card->gdev, 1); |
3201 | rc = __qeth_l3_set_online(card->gdev, 1); | 3210 | rc = __qeth_l3_set_online(card->gdev, 1); |
@@ -3203,14 +3212,14 @@ static int qeth_l3_recover(void *ptr) | |||
3203 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 3212 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
3204 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 3213 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
3205 | if (!rc) | 3214 | if (!rc) |
3206 | PRINT_INFO("Device %s successfully recovered!\n", | 3215 | dev_info(&card->gdev->dev, |
3207 | CARD_BUS_ID(card)); | 3216 | "Device successfully recovered!\n"); |
3208 | else { | 3217 | else { |
3209 | rtnl_lock(); | 3218 | rtnl_lock(); |
3210 | dev_close(card->dev); | 3219 | dev_close(card->dev); |
3211 | rtnl_unlock(); | 3220 | rtnl_unlock(); |
3212 | PRINT_INFO("Device %s could not be recovered!\n", | 3221 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3213 | CARD_BUS_ID(card)); | 3222 | "failed to recover an error on the device\n"); |
3214 | } | 3223 | } |
3215 | return 0; | 3224 | return 0; |
3216 | } | 3225 | } |
@@ -3344,7 +3353,7 @@ static int qeth_l3_register_notifiers(void) | |||
3344 | return rc; | 3353 | return rc; |
3345 | } | 3354 | } |
3346 | #else | 3355 | #else |
3347 | PRINT_WARN("layer 3 discipline no IPv6 support\n"); | 3356 | pr_warning("There is no IPv6 support for the layer 3 discipline\n"); |
3348 | #endif | 3357 | #endif |
3349 | return 0; | 3358 | return 0; |
3350 | } | 3359 | } |
@@ -3363,7 +3372,7 @@ static int __init qeth_l3_init(void) | |||
3363 | { | 3372 | { |
3364 | int rc = 0; | 3373 | int rc = 0; |
3365 | 3374 | ||
3366 | PRINT_INFO("register layer 3 discipline\n"); | 3375 | pr_info("register layer 3 discipline\n"); |
3367 | rc = qeth_l3_register_notifiers(); | 3376 | rc = qeth_l3_register_notifiers(); |
3368 | return rc; | 3377 | return rc; |
3369 | } | 3378 | } |
@@ -3371,7 +3380,7 @@ static int __init qeth_l3_init(void) | |||
3371 | static void __exit qeth_l3_exit(void) | 3380 | static void __exit qeth_l3_exit(void) |
3372 | { | 3381 | { |
3373 | qeth_l3_unregister_notifiers(); | 3382 | qeth_l3_unregister_notifiers(); |
3374 | PRINT_INFO("unregister layer 3 discipline\n"); | 3383 | pr_info("unregister layer 3 discipline\n"); |
3375 | } | 3384 | } |
3376 | 3385 | ||
3377 | module_init(qeth_l3_init); | 3386 | module_init(qeth_l3_init); |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 3d4e3e3f3fc0..e529b55b3ce9 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -25,9 +25,15 @@ | |||
25 | * Sven Schuetz | 25 | * Sven Schuetz |
26 | */ | 26 | */ |
27 | 27 | ||
28 | #define KMSG_COMPONENT "zfcp" | ||
29 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
30 | |||
28 | #include <linux/miscdevice.h> | 31 | #include <linux/miscdevice.h> |
32 | #include <linux/seq_file.h> | ||
29 | #include "zfcp_ext.h" | 33 | #include "zfcp_ext.h" |
30 | 34 | ||
35 | #define ZFCP_BUS_ID_SIZE 20 | ||
36 | |||
31 | static char *device; | 37 | static char *device; |
32 | 38 | ||
33 | MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); | 39 | MODULE_AUTHOR("IBM Deutschland Entwicklung GmbH - linux390@de.ibm.com"); |
@@ -83,9 +89,9 @@ static int __init zfcp_device_setup(char *devstr) | |||
83 | strcpy(str, devstr); | 89 | strcpy(str, devstr); |
84 | 90 | ||
85 | token = strsep(&str, ","); | 91 | token = strsep(&str, ","); |
86 | if (!token || strlen(token) >= BUS_ID_SIZE) | 92 | if (!token || strlen(token) >= ZFCP_BUS_ID_SIZE) |
87 | goto err_out; | 93 | goto err_out; |
88 | strncpy(zfcp_data.init_busid, token, BUS_ID_SIZE); | 94 | strncpy(zfcp_data.init_busid, token, ZFCP_BUS_ID_SIZE); |
89 | 95 | ||
90 | token = strsep(&str, ","); | 96 | token = strsep(&str, ","); |
91 | if (!token || strict_strtoull(token, 0, | 97 | if (!token || strict_strtoull(token, 0, |
@@ -102,7 +108,7 @@ static int __init zfcp_device_setup(char *devstr) | |||
102 | 108 | ||
103 | err_out: | 109 | err_out: |
104 | kfree(str); | 110 | kfree(str); |
105 | pr_err("zfcp: %s is not a valid SCSI device\n", devstr); | 111 | pr_err("%s is not a valid SCSI device\n", devstr); |
106 | return 0; | 112 | return 0; |
107 | } | 113 | } |
108 | 114 | ||
@@ -186,13 +192,13 @@ static int __init zfcp_module_init(void) | |||
186 | 192 | ||
187 | retval = misc_register(&zfcp_cfdc_misc); | 193 | retval = misc_register(&zfcp_cfdc_misc); |
188 | if (retval) { | 194 | if (retval) { |
189 | pr_err("zfcp: Registering the misc device zfcp_cfdc failed\n"); | 195 | pr_err("Registering the misc device zfcp_cfdc failed\n"); |
190 | goto out_misc; | 196 | goto out_misc; |
191 | } | 197 | } |
192 | 198 | ||
193 | retval = zfcp_ccw_register(); | 199 | retval = zfcp_ccw_register(); |
194 | if (retval) { | 200 | if (retval) { |
195 | pr_err("zfcp: The zfcp device driver could not register with " | 201 | pr_err("The zfcp device driver could not register with " |
196 | "the common I/O layer\n"); | 202 | "the common I/O layer\n"); |
197 | goto out_ccw_register; | 203 | goto out_ccw_register; |
198 | } | 204 | } |
@@ -436,6 +442,16 @@ static void _zfcp_status_read_scheduler(struct work_struct *work) | |||
436 | stat_work)); | 442 | stat_work)); |
437 | } | 443 | } |
438 | 444 | ||
445 | static void zfcp_print_sl(struct seq_file *m, struct service_level *sl) | ||
446 | { | ||
447 | struct zfcp_adapter *adapter = | ||
448 | container_of(sl, struct zfcp_adapter, service_level); | ||
449 | |||
450 | seq_printf(m, "zfcp: %s microcode level %x\n", | ||
451 | dev_name(&adapter->ccw_device->dev), | ||
452 | adapter->fsf_lic_version); | ||
453 | } | ||
454 | |||
439 | /** | 455 | /** |
440 | * zfcp_adapter_enqueue - enqueue a new adapter to the list | 456 | * zfcp_adapter_enqueue - enqueue a new adapter to the list |
441 | * @ccw_device: pointer to the struct cc_device | 457 | * @ccw_device: pointer to the struct cc_device |
@@ -500,6 +516,8 @@ int zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
500 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); | 516 | INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler); |
501 | INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); | 517 | INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later); |
502 | 518 | ||
519 | adapter->service_level.seq_print = zfcp_print_sl; | ||
520 | |||
503 | /* mark adapter unusable as long as sysfs registration is not complete */ | 521 | /* mark adapter unusable as long as sysfs registration is not complete */ |
504 | atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); | 522 | atomic_set_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status); |
505 | 523 | ||
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 951a8d409d1d..728147131e1d 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | /** | 14 | /** |
diff --git a/drivers/s390/scsi/zfcp_cfdc.c b/drivers/s390/scsi/zfcp_cfdc.c index ec2abceca6dc..f1a7518e67ed 100644 --- a/drivers/s390/scsi/zfcp_cfdc.c +++ b/drivers/s390/scsi/zfcp_cfdc.c | |||
@@ -7,6 +7,9 @@ | |||
7 | * Copyright IBM Corporation 2008 | 7 | * Copyright IBM Corporation 2008 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #define KMSG_COMPONENT "zfcp" | ||
11 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
12 | |||
10 | #include <linux/types.h> | 13 | #include <linux/types.h> |
11 | #include <linux/miscdevice.h> | 14 | #include <linux/miscdevice.h> |
12 | #include <asm/ccwdev.h> | 15 | #include <asm/ccwdev.h> |
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c index 31012d58cfb7..735d675623f8 100644 --- a/drivers/s390/scsi/zfcp_dbf.c +++ b/drivers/s390/scsi/zfcp_dbf.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/ctype.h> | 12 | #include <linux/ctype.h> |
10 | #include <asm/debug.h> | 13 | #include <asm/debug.h> |
11 | #include "zfcp_ext.h" | 14 | #include "zfcp_ext.h" |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 9ce4c75bd190..e19e46ae4a68 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/qdio.h> | 33 | #include <asm/qdio.h> |
34 | #include <asm/debug.h> | 34 | #include <asm/debug.h> |
35 | #include <asm/ebcdic.h> | 35 | #include <asm/ebcdic.h> |
36 | #include <asm/sysinfo.h> | ||
36 | #include "zfcp_dbf.h" | 37 | #include "zfcp_dbf.h" |
37 | #include "zfcp_fsf.h" | 38 | #include "zfcp_fsf.h" |
38 | 39 | ||
@@ -515,6 +516,7 @@ struct zfcp_adapter { | |||
515 | struct fsf_qtcb_bottom_port *stats_reset_data; | 516 | struct fsf_qtcb_bottom_port *stats_reset_data; |
516 | unsigned long stats_reset; | 517 | unsigned long stats_reset; |
517 | struct work_struct scan_work; | 518 | struct work_struct scan_work; |
519 | struct service_level service_level; | ||
518 | atomic_t qdio_outb_full; /* queue full incidents */ | 520 | atomic_t qdio_outb_full; /* queue full incidents */ |
519 | }; | 521 | }; |
520 | 522 | ||
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index c557ba34e1aa..4ed4950d994b 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | #define ZFCP_MAX_ERPS 3 | 14 | #define ZFCP_MAX_ERPS 3 |
@@ -1281,10 +1284,13 @@ static void zfcp_erp_action_cleanup(struct zfcp_erp_action *act, int result) | |||
1281 | break; | 1284 | break; |
1282 | 1285 | ||
1283 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: | 1286 | case ZFCP_ERP_ACTION_REOPEN_ADAPTER: |
1284 | if (result != ZFCP_ERP_SUCCEEDED) | 1287 | if (result != ZFCP_ERP_SUCCEEDED) { |
1288 | unregister_service_level(&adapter->service_level); | ||
1285 | zfcp_erp_rports_del(adapter); | 1289 | zfcp_erp_rports_del(adapter); |
1286 | else | 1290 | } else { |
1291 | register_service_level(&adapter->service_level); | ||
1287 | schedule_work(&adapter->scan_work); | 1292 | schedule_work(&adapter->scan_work); |
1293 | } | ||
1288 | zfcp_adapter_put(adapter); | 1294 | zfcp_adapter_put(adapter); |
1289 | break; | 1295 | break; |
1290 | } | 1296 | } |
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c index 8aab3091a7b1..f009f2a7ec3e 100644 --- a/drivers/s390/scsi/zfcp_fc.c +++ b/drivers/s390/scsi/zfcp_fc.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2008 | 6 | * Copyright IBM Corporation 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | struct ct_iu_gpn_ft_req { | 14 | struct ct_iu_gpn_ft_req { |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index dc0367690405..9c72e083559d 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include <linux/blktrace_api.h> | 12 | #include <linux/blktrace_api.h> |
10 | #include "zfcp_ext.h" | 13 | #include "zfcp_ext.h" |
11 | 14 | ||
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 664752f90b20..d3b55fb66f13 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ | 14 | /* FIXME(tune): free space should be one max. SBAL chain plus what? */ |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 468c880f8b6d..9dc42a68fbdd 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2002, 2008 | 6 | * Copyright IBM Corporation 2002, 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | #include <asm/atomic.h> | 13 | #include <asm/atomic.h> |
11 | 14 | ||
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c index ca9293ba1766..899af2b45b1e 100644 --- a/drivers/s390/scsi/zfcp_sysfs.c +++ b/drivers/s390/scsi/zfcp_sysfs.c | |||
@@ -6,6 +6,9 @@ | |||
6 | * Copyright IBM Corporation 2008 | 6 | * Copyright IBM Corporation 2008 |
7 | */ | 7 | */ |
8 | 8 | ||
9 | #define KMSG_COMPONENT "zfcp" | ||
10 | #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt | ||
11 | |||
9 | #include "zfcp_ext.h" | 12 | #include "zfcp_ext.h" |
10 | 13 | ||
11 | #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ | 14 | #define ZFCP_DEV_ATTR(_feat, _name, _mode, _show, _store) \ |
diff --git a/drivers/s390/sysinfo.c b/drivers/s390/sysinfo.c index c3e4ab07b9cc..0eea90781385 100644 --- a/drivers/s390/sysinfo.c +++ b/drivers/s390/sysinfo.c | |||
@@ -1,17 +1,21 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/sysinfo.c | 2 | * drivers/s390/sysinfo.c |
3 | * | 3 | * |
4 | * Copyright (C) 2001 IBM Deutschland Entwicklung GmbH, IBM Corporation | 4 | * Copyright IBM Corp. 2001, 2008 |
5 | * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) | 5 | * Author(s): Ulrich Weigand (Ulrich.Weigand@de.ibm.com) |
6 | * Martin Schwidefsky <schwidefsky@de.ibm.com> | ||
6 | */ | 7 | */ |
7 | 8 | ||
8 | #include <linux/kernel.h> | 9 | #include <linux/kernel.h> |
9 | #include <linux/mm.h> | 10 | #include <linux/mm.h> |
10 | #include <linux/proc_fs.h> | 11 | #include <linux/proc_fs.h> |
12 | #include <linux/seq_file.h> | ||
11 | #include <linux/init.h> | 13 | #include <linux/init.h> |
12 | #include <linux/delay.h> | 14 | #include <linux/delay.h> |
15 | #include <linux/module.h> | ||
13 | #include <asm/ebcdic.h> | 16 | #include <asm/ebcdic.h> |
14 | #include <asm/sysinfo.h> | 17 | #include <asm/sysinfo.h> |
18 | #include <asm/cpcmd.h> | ||
15 | 19 | ||
16 | /* Sigh, math-emu. Don't ask. */ | 20 | /* Sigh, math-emu. Don't ask. */ |
17 | #include <asm/sfp-util.h> | 21 | #include <asm/sfp-util.h> |
@@ -271,6 +275,125 @@ static __init int create_proc_sysinfo(void) | |||
271 | 275 | ||
272 | __initcall(create_proc_sysinfo); | 276 | __initcall(create_proc_sysinfo); |
273 | 277 | ||
278 | /* | ||
279 | * Service levels interface. | ||
280 | */ | ||
281 | |||
282 | static DECLARE_RWSEM(service_level_sem); | ||
283 | static LIST_HEAD(service_level_list); | ||
284 | |||
285 | int register_service_level(struct service_level *slr) | ||
286 | { | ||
287 | struct service_level *ptr; | ||
288 | |||
289 | down_write(&service_level_sem); | ||
290 | list_for_each_entry(ptr, &service_level_list, list) | ||
291 | if (ptr == slr) { | ||
292 | up_write(&service_level_sem); | ||
293 | return -EEXIST; | ||
294 | } | ||
295 | list_add_tail(&slr->list, &service_level_list); | ||
296 | up_write(&service_level_sem); | ||
297 | return 0; | ||
298 | } | ||
299 | EXPORT_SYMBOL(register_service_level); | ||
300 | |||
301 | int unregister_service_level(struct service_level *slr) | ||
302 | { | ||
303 | struct service_level *ptr, *next; | ||
304 | int rc = -ENOENT; | ||
305 | |||
306 | down_write(&service_level_sem); | ||
307 | list_for_each_entry_safe(ptr, next, &service_level_list, list) { | ||
308 | if (ptr != slr) | ||
309 | continue; | ||
310 | list_del(&ptr->list); | ||
311 | rc = 0; | ||
312 | break; | ||
313 | } | ||
314 | up_write(&service_level_sem); | ||
315 | return rc; | ||
316 | } | ||
317 | EXPORT_SYMBOL(unregister_service_level); | ||
318 | |||
319 | static void *service_level_start(struct seq_file *m, loff_t *pos) | ||
320 | { | ||
321 | down_read(&service_level_sem); | ||
322 | return seq_list_start(&service_level_list, *pos); | ||
323 | } | ||
324 | |||
325 | static void *service_level_next(struct seq_file *m, void *p, loff_t *pos) | ||
326 | { | ||
327 | return seq_list_next(p, &service_level_list, pos); | ||
328 | } | ||
329 | |||
330 | static void service_level_stop(struct seq_file *m, void *p) | ||
331 | { | ||
332 | up_read(&service_level_sem); | ||
333 | } | ||
334 | |||
335 | static int service_level_show(struct seq_file *m, void *p) | ||
336 | { | ||
337 | struct service_level *slr; | ||
338 | |||
339 | slr = list_entry(p, struct service_level, list); | ||
340 | slr->seq_print(m, slr); | ||
341 | return 0; | ||
342 | } | ||
343 | |||
344 | static const struct seq_operations service_level_seq_ops = { | ||
345 | .start = service_level_start, | ||
346 | .next = service_level_next, | ||
347 | .stop = service_level_stop, | ||
348 | .show = service_level_show | ||
349 | }; | ||
350 | |||
351 | static int service_level_open(struct inode *inode, struct file *file) | ||
352 | { | ||
353 | return seq_open(file, &service_level_seq_ops); | ||
354 | } | ||
355 | |||
356 | static const struct file_operations service_level_ops = { | ||
357 | .open = service_level_open, | ||
358 | .read = seq_read, | ||
359 | .llseek = seq_lseek, | ||
360 | .release = seq_release | ||
361 | }; | ||
362 | |||
363 | static void service_level_vm_print(struct seq_file *m, | ||
364 | struct service_level *slr) | ||
365 | { | ||
366 | char *query_buffer, *str; | ||
367 | |||
368 | query_buffer = kmalloc(1024, GFP_KERNEL | GFP_DMA); | ||
369 | if (!query_buffer) | ||
370 | return; | ||
371 | cpcmd("QUERY CPLEVEL", query_buffer, 1024, NULL); | ||
372 | str = strchr(query_buffer, '\n'); | ||
373 | if (str) | ||
374 | *str = 0; | ||
375 | seq_printf(m, "VM: %s\n", query_buffer); | ||
376 | kfree(query_buffer); | ||
377 | } | ||
378 | |||
379 | static struct service_level service_level_vm = { | ||
380 | .seq_print = service_level_vm_print | ||
381 | }; | ||
382 | |||
383 | static __init int create_proc_service_level(void) | ||
384 | { | ||
385 | proc_create("service_levels", 0, NULL, &service_level_ops); | ||
386 | if (MACHINE_IS_VM) | ||
387 | register_service_level(&service_level_vm); | ||
388 | return 0; | ||
389 | } | ||
390 | |||
391 | subsys_initcall(create_proc_service_level); | ||
392 | |||
393 | /* | ||
394 | * Bogomips calculation based on cpu capability. | ||
395 | */ | ||
396 | |||
274 | int get_cpu_capability(unsigned int *capability) | 397 | int get_cpu_capability(unsigned int *capability) |
275 | { | 398 | { |
276 | struct sysinfo_1_2_2 *info; | 399 | struct sysinfo_1_2_2 *info; |