diff options
author | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@ppc970.osdl.org> | 2005-04-16 18:20:36 -0400 |
commit | 1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch) | |
tree | 0bba044c4ce775e45a88a51686b5d9f90697ea9d /drivers/s390/cio |
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.
Let it rip!
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/Makefile | 10 | ||||
-rw-r--r-- | drivers/s390/cio/airq.c | 87 | ||||
-rw-r--r-- | drivers/s390/cio/airq.h | 10 | ||||
-rw-r--r-- | drivers/s390/cio/blacklist.c | 351 | ||||
-rw-r--r-- | drivers/s390/cio/blacklist.h | 6 | ||||
-rw-r--r-- | drivers/s390/cio/ccwgroup.c | 482 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.c | 1114 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.h | 66 | ||||
-rw-r--r-- | drivers/s390/cio/cio.c | 860 | ||||
-rw-r--r-- | drivers/s390/cio/cio.h | 143 | ||||
-rw-r--r-- | drivers/s390/cio/cio_debug.h | 32 | ||||
-rw-r--r-- | drivers/s390/cio/cmf.c | 1042 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 575 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 155 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 1135 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 115 | ||||
-rw-r--r-- | drivers/s390/cio/device_fsm.c | 1250 | ||||
-rw-r--r-- | drivers/s390/cio/device_id.c | 355 | ||||
-rw-r--r-- | drivers/s390/cio/device_ops.c | 603 | ||||
-rw-r--r-- | drivers/s390/cio/device_pgid.c | 448 | ||||
-rw-r--r-- | drivers/s390/cio/device_status.c | 385 | ||||
-rw-r--r-- | drivers/s390/cio/ioasm.h | 228 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.c | 3468 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 648 |
24 files changed, 13568 insertions, 0 deletions
diff --git a/drivers/s390/cio/Makefile b/drivers/s390/cio/Makefile new file mode 100644 index 000000000000..c490c2a1c2fc --- /dev/null +++ b/drivers/s390/cio/Makefile | |||
@@ -0,0 +1,10 @@ | |||
1 | # | ||
2 | # Makefile for the S/390 common i/o drivers | ||
3 | # | ||
4 | |||
5 | obj-y += airq.o blacklist.o chsc.o cio.o css.o | ||
6 | ccw_device-objs += device.o device_fsm.o device_ops.o | ||
7 | ccw_device-objs += device_id.o device_pgid.o device_status.o | ||
8 | obj-y += ccw_device.o cmf.o | ||
9 | obj-$(CONFIG_CCWGROUP) += ccwgroup.o | ||
10 | obj-$(CONFIG_QDIO) += qdio.o | ||
diff --git a/drivers/s390/cio/airq.c b/drivers/s390/cio/airq.c new file mode 100644 index 000000000000..3720e77b465f --- /dev/null +++ b/drivers/s390/cio/airq.c | |||
@@ -0,0 +1,87 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/airq.c | ||
3 | * S/390 common I/O routines -- support for adapter interruptions | ||
4 | * | ||
5 | * $Revision: 1.12 $ | ||
6 | * | ||
7 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | ||
8 | * IBM Corporation | ||
9 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | ||
10 | * Cornelia Huck (cohuck@de.ibm.com) | ||
11 | * Arnd Bergmann (arndb@de.ibm.com) | ||
12 | */ | ||
13 | |||
14 | #include <linux/init.h> | ||
15 | #include <linux/module.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/rcupdate.h> | ||
18 | |||
19 | #include "cio_debug.h" | ||
20 | #include "airq.h" | ||
21 | |||
22 | static adapter_int_handler_t adapter_handler; | ||
23 | |||
24 | /* | ||
25 | * register for adapter interrupts | ||
26 | * | ||
27 | * With HiperSockets the zSeries architecture provides for | ||
28 | * means of adapter interrups, pseudo I/O interrupts that are | ||
29 | * not tied to an I/O subchannel, but to an adapter. However, | ||
30 | * it doesn't disclose the info how to enable/disable them, but | ||
31 | * to recognize them only. Perhaps we should consider them | ||
32 | * being shared interrupts, and thus build a linked list | ||
33 | * of adapter handlers ... to be evaluated ... | ||
34 | */ | ||
35 | int | ||
36 | s390_register_adapter_interrupt (adapter_int_handler_t handler) | ||
37 | { | ||
38 | int ret; | ||
39 | char dbf_txt[15]; | ||
40 | |||
41 | CIO_TRACE_EVENT (4, "rgaint"); | ||
42 | |||
43 | if (handler == NULL) | ||
44 | ret = -EINVAL; | ||
45 | else | ||
46 | ret = (cmpxchg(&adapter_handler, NULL, handler) ? -EBUSY : 0); | ||
47 | if (!ret) | ||
48 | synchronize_kernel(); | ||
49 | |||
50 | sprintf (dbf_txt, "ret:%d", ret); | ||
51 | CIO_TRACE_EVENT (4, dbf_txt); | ||
52 | |||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | int | ||
57 | s390_unregister_adapter_interrupt (adapter_int_handler_t handler) | ||
58 | { | ||
59 | int ret; | ||
60 | char dbf_txt[15]; | ||
61 | |||
62 | CIO_TRACE_EVENT (4, "urgaint"); | ||
63 | |||
64 | if (handler == NULL) | ||
65 | ret = -EINVAL; | ||
66 | else { | ||
67 | adapter_handler = NULL; | ||
68 | synchronize_kernel(); | ||
69 | ret = 0; | ||
70 | } | ||
71 | sprintf (dbf_txt, "ret:%d", ret); | ||
72 | CIO_TRACE_EVENT (4, dbf_txt); | ||
73 | |||
74 | return ret; | ||
75 | } | ||
76 | |||
77 | void | ||
78 | do_adapter_IO (void) | ||
79 | { | ||
80 | CIO_TRACE_EVENT (6, "doaio"); | ||
81 | |||
82 | if (adapter_handler) | ||
83 | (*adapter_handler) (); | ||
84 | } | ||
85 | |||
86 | EXPORT_SYMBOL (s390_register_adapter_interrupt); | ||
87 | EXPORT_SYMBOL (s390_unregister_adapter_interrupt); | ||
diff --git a/drivers/s390/cio/airq.h b/drivers/s390/cio/airq.h new file mode 100644 index 000000000000..7d6be3fdcd66 --- /dev/null +++ b/drivers/s390/cio/airq.h | |||
@@ -0,0 +1,10 @@ | |||
1 | #ifndef S390_AINTERRUPT_H | ||
2 | #define S390_AINTERRUPT_H | ||
3 | |||
4 | typedef int (*adapter_int_handler_t)(void); | ||
5 | |||
6 | extern int s390_register_adapter_interrupt(adapter_int_handler_t handler); | ||
7 | extern int s390_unregister_adapter_interrupt(adapter_int_handler_t handler); | ||
8 | extern void do_adapter_IO (void); | ||
9 | |||
10 | #endif | ||
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c new file mode 100644 index 000000000000..4a06c7d0e5e4 --- /dev/null +++ b/drivers/s390/cio/blacklist.c | |||
@@ -0,0 +1,351 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/blacklist.c | ||
3 | * S/390 common I/O routines -- blacklisting of specific devices | ||
4 | * $Revision: 1.33 $ | ||
5 | * | ||
6 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | ||
7 | * IBM Corporation | ||
8 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | ||
9 | * Cornelia Huck (cohuck@de.ibm.com) | ||
10 | * Arnd Bergmann (arndb@de.ibm.com) | ||
11 | */ | ||
12 | |||
13 | #include <linux/config.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/proc_fs.h> | ||
18 | #include <linux/ctype.h> | ||
19 | #include <linux/device.h> | ||
20 | |||
21 | #include <asm/cio.h> | ||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | #include "blacklist.h" | ||
25 | #include "cio.h" | ||
26 | #include "cio_debug.h" | ||
27 | #include "css.h" | ||
28 | |||
29 | /* | ||
30 | * "Blacklisting" of certain devices: | ||
31 | * Device numbers given in the commandline as cio_ignore=... won't be known | ||
32 | * to Linux. | ||
33 | * | ||
34 | * These can be single devices or ranges of devices | ||
35 | */ | ||
36 | |||
37 | /* 65536 bits to indicate if a devno is blacklisted or not */ | ||
38 | #define __BL_DEV_WORDS (__MAX_SUBCHANNELS + (8*sizeof(long) - 1) / \ | ||
39 | (8*sizeof(long))) | ||
40 | static unsigned long bl_dev[__BL_DEV_WORDS]; | ||
41 | typedef enum {add, free} range_action; | ||
42 | |||
43 | /* | ||
44 | * Function: blacklist_range | ||
45 | * (Un-)blacklist the devices from-to | ||
46 | */ | ||
47 | static inline void | ||
48 | blacklist_range (range_action action, unsigned int from, unsigned int to) | ||
49 | { | ||
50 | if (!to) | ||
51 | to = from; | ||
52 | |||
53 | if (from > to || to > __MAX_SUBCHANNELS) { | ||
54 | printk (KERN_WARNING "Invalid blacklist range " | ||
55 | "0x%04x to 0x%04x, skipping\n", from, to); | ||
56 | return; | ||
57 | } | ||
58 | for (; from <= to; from++) { | ||
59 | if (action == add) | ||
60 | set_bit (from, bl_dev); | ||
61 | else | ||
62 | clear_bit (from, bl_dev); | ||
63 | } | ||
64 | } | ||
65 | |||
66 | /* | ||
67 | * Function: blacklist_busid | ||
68 | * Get devno/busid from given string. | ||
69 | * Shamelessly grabbed from dasd_devmap.c. | ||
70 | */ | ||
71 | static inline int | ||
72 | blacklist_busid(char **str, int *id0, int *id1, int *devno) | ||
73 | { | ||
74 | int val, old_style; | ||
75 | char *sav; | ||
76 | |||
77 | sav = *str; | ||
78 | |||
79 | /* check for leading '0x' */ | ||
80 | old_style = 0; | ||
81 | if ((*str)[0] == '0' && (*str)[1] == 'x') { | ||
82 | *str += 2; | ||
83 | old_style = 1; | ||
84 | } | ||
85 | if (!isxdigit((*str)[0])) /* We require at least one hex digit */ | ||
86 | goto confused; | ||
87 | val = simple_strtoul(*str, str, 16); | ||
88 | if (old_style || (*str)[0] != '.') { | ||
89 | *id0 = *id1 = 0; | ||
90 | if (val < 0 || val > 0xffff) | ||
91 | goto confused; | ||
92 | *devno = val; | ||
93 | if ((*str)[0] != ',' && (*str)[0] != '-' && | ||
94 | (*str)[0] != '\n' && (*str)[0] != '\0') | ||
95 | goto confused; | ||
96 | return 0; | ||
97 | } | ||
98 | /* New style x.y.z busid */ | ||
99 | if (val < 0 || val > 0xff) | ||
100 | goto confused; | ||
101 | *id0 = val; | ||
102 | (*str)++; | ||
103 | if (!isxdigit((*str)[0])) /* We require at least one hex digit */ | ||
104 | goto confused; | ||
105 | val = simple_strtoul(*str, str, 16); | ||
106 | if (val < 0 || val > 0xff || (*str)++[0] != '.') | ||
107 | goto confused; | ||
108 | *id1 = val; | ||
109 | if (!isxdigit((*str)[0])) /* We require at least one hex digit */ | ||
110 | goto confused; | ||
111 | val = simple_strtoul(*str, str, 16); | ||
112 | if (val < 0 || val > 0xffff) | ||
113 | goto confused; | ||
114 | *devno = val; | ||
115 | if ((*str)[0] != ',' && (*str)[0] != '-' && | ||
116 | (*str)[0] != '\n' && (*str)[0] != '\0') | ||
117 | goto confused; | ||
118 | return 0; | ||
119 | confused: | ||
120 | strsep(str, ",\n"); | ||
121 | printk(KERN_WARNING "Invalid cio_ignore parameter '%s'\n", sav); | ||
122 | return 1; | ||
123 | } | ||
124 | |||
125 | static inline int | ||
126 | blacklist_parse_parameters (char *str, range_action action) | ||
127 | { | ||
128 | unsigned int from, to, from_id0, to_id0, from_id1, to_id1; | ||
129 | |||
130 | while (*str != 0 && *str != '\n') { | ||
131 | range_action ra = action; | ||
132 | while(*str == ',') | ||
133 | str++; | ||
134 | if (*str == '!') { | ||
135 | ra = !action; | ||
136 | ++str; | ||
137 | } | ||
138 | |||
139 | /* | ||
140 | * Since we have to parse the proc commands and the | ||
141 | * kernel arguments we have to check four cases | ||
142 | */ | ||
143 | if (strncmp(str,"all,",4) == 0 || strcmp(str,"all") == 0 || | ||
144 | strncmp(str,"all\n",4) == 0 || strncmp(str,"all ",4) == 0) { | ||
145 | from = 0; | ||
146 | to = __MAX_SUBCHANNELS; | ||
147 | str += 3; | ||
148 | } else { | ||
149 | int rc; | ||
150 | |||
151 | rc = blacklist_busid(&str, &from_id0, | ||
152 | &from_id1, &from); | ||
153 | if (rc) | ||
154 | continue; | ||
155 | to = from; | ||
156 | to_id0 = from_id0; | ||
157 | to_id1 = from_id1; | ||
158 | if (*str == '-') { | ||
159 | str++; | ||
160 | rc = blacklist_busid(&str, &to_id0, | ||
161 | &to_id1, &to); | ||
162 | if (rc) | ||
163 | continue; | ||
164 | } | ||
165 | if (*str == '-') { | ||
166 | printk(KERN_WARNING "invalid cio_ignore " | ||
167 | "parameter '%s'\n", | ||
168 | strsep(&str, ",\n")); | ||
169 | continue; | ||
170 | } | ||
171 | if ((from_id0 != to_id0) || (from_id1 != to_id1)) { | ||
172 | printk(KERN_WARNING "invalid cio_ignore range " | ||
173 | "%x.%x.%04x-%x.%x.%04x\n", | ||
174 | from_id0, from_id1, from, | ||
175 | to_id0, to_id1, to); | ||
176 | continue; | ||
177 | } | ||
178 | } | ||
179 | /* FIXME: ignoring id0 and id1 here. */ | ||
180 | pr_debug("blacklist_setup: adding range " | ||
181 | "from 0.0.%04x to 0.0.%04x\n", from, to); | ||
182 | blacklist_range (ra, from, to); | ||
183 | } | ||
184 | return 1; | ||
185 | } | ||
186 | |||
187 | /* Parsing the commandline for blacklist parameters, e.g. to blacklist | ||
188 | * bus ids 0.0.1234, 0.0.1235 and 0.0.1236, you could use any of: | ||
189 | * - cio_ignore=1234-1236 | ||
190 | * - cio_ignore=0x1234-0x1235,1236 | ||
191 | * - cio_ignore=0x1234,1235-1236 | ||
192 | * - cio_ignore=1236 cio_ignore=1234-0x1236 | ||
193 | * - cio_ignore=1234 cio_ignore=1236 cio_ignore=0x1235 | ||
194 | * - cio_ignore=0.0.1234-0.0.1236 | ||
195 | * - cio_ignore=0.0.1234,0x1235,1236 | ||
196 | * - ... | ||
197 | */ | ||
198 | static int __init | ||
199 | blacklist_setup (char *str) | ||
200 | { | ||
201 | CIO_MSG_EVENT(6, "Reading blacklist parameters\n"); | ||
202 | return blacklist_parse_parameters (str, add); | ||
203 | } | ||
204 | |||
205 | __setup ("cio_ignore=", blacklist_setup); | ||
206 | |||
207 | /* Checking if devices are blacklisted */ | ||
208 | |||
209 | /* | ||
210 | * Function: is_blacklisted | ||
211 | * Returns 1 if the given devicenumber can be found in the blacklist, | ||
212 | * otherwise 0. | ||
213 | * Used by validate_subchannel() | ||
214 | */ | ||
215 | int | ||
216 | is_blacklisted (int devno) | ||
217 | { | ||
218 | return test_bit (devno, bl_dev); | ||
219 | } | ||
220 | |||
221 | #ifdef CONFIG_PROC_FS | ||
222 | /* | ||
223 | * Function: s390_redo_validation | ||
224 | * Look for no longer blacklisted devices | ||
225 | * FIXME: there must be a better way to do this */ | ||
226 | static inline void | ||
227 | s390_redo_validation (void) | ||
228 | { | ||
229 | unsigned int irq; | ||
230 | |||
231 | CIO_TRACE_EVENT (0, "redoval"); | ||
232 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
233 | int ret; | ||
234 | struct subchannel *sch; | ||
235 | |||
236 | sch = get_subchannel_by_schid(irq); | ||
237 | if (sch) { | ||
238 | /* Already known. */ | ||
239 | put_device(&sch->dev); | ||
240 | continue; | ||
241 | } | ||
242 | ret = css_probe_device(irq); | ||
243 | if (ret == -ENXIO) | ||
244 | break; /* We're through. */ | ||
245 | if (ret == -ENOMEM) | ||
246 | /* | ||
247 | * Stop validation for now. Bad, but no need for a | ||
248 | * panic. | ||
249 | */ | ||
250 | break; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | /* | ||
255 | * Function: blacklist_parse_proc_parameters | ||
256 | * parse the stuff which is piped to /proc/cio_ignore | ||
257 | */ | ||
258 | static inline void | ||
259 | blacklist_parse_proc_parameters (char *buf) | ||
260 | { | ||
261 | if (strncmp (buf, "free ", 5) == 0) { | ||
262 | blacklist_parse_parameters (buf + 5, free); | ||
263 | } else if (strncmp (buf, "add ", 4) == 0) { | ||
264 | /* | ||
265 | * We don't need to check for known devices since | ||
266 | * css_probe_device will handle this correctly. | ||
267 | */ | ||
268 | blacklist_parse_parameters (buf + 4, add); | ||
269 | } else { | ||
270 | printk (KERN_WARNING "cio_ignore: Parse error; \n" | ||
271 | KERN_WARNING "try using 'free all|<devno-range>," | ||
272 | "<devno-range>,...'\n" | ||
273 | KERN_WARNING "or 'add <devno-range>," | ||
274 | "<devno-range>,...'\n"); | ||
275 | return; | ||
276 | } | ||
277 | |||
278 | s390_redo_validation (); | ||
279 | } | ||
280 | |||
281 | /* FIXME: These should be real bus ids and not home-grown ones! */ | ||
282 | static int cio_ignore_read (char *page, char **start, off_t off, | ||
283 | int count, int *eof, void *data) | ||
284 | { | ||
285 | const unsigned int entry_size = 18; /* "0.0.ABCD-0.0.EFGH\n" */ | ||
286 | long devno; | ||
287 | int len; | ||
288 | |||
289 | len = 0; | ||
290 | for (devno = off; /* abuse the page variable | ||
291 | * as counter, see fs/proc/generic.c */ | ||
292 | devno <= __MAX_SUBCHANNELS && len + entry_size < count; devno++) { | ||
293 | if (!test_bit(devno, bl_dev)) | ||
294 | continue; | ||
295 | len += sprintf(page + len, "0.0.%04lx", devno); | ||
296 | if (test_bit(devno + 1, bl_dev)) { /* print range */ | ||
297 | while (++devno < __MAX_SUBCHANNELS) | ||
298 | if (!test_bit(devno, bl_dev)) | ||
299 | break; | ||
300 | len += sprintf(page + len, "-0.0.%04lx", --devno); | ||
301 | } | ||
302 | len += sprintf(page + len, "\n"); | ||
303 | } | ||
304 | |||
305 | if (devno <= __MAX_SUBCHANNELS) | ||
306 | *eof = 1; | ||
307 | *start = (char *) (devno - off); /* number of checked entries */ | ||
308 | return len; | ||
309 | } | ||
310 | |||
311 | static int cio_ignore_write(struct file *file, const char __user *user_buf, | ||
312 | unsigned long user_len, void *data) | ||
313 | { | ||
314 | char *buf; | ||
315 | |||
316 | if (user_len > 65536) | ||
317 | user_len = 65536; | ||
318 | buf = vmalloc (user_len + 1); /* maybe better use the stack? */ | ||
319 | if (buf == NULL) | ||
320 | return -ENOMEM; | ||
321 | if (strncpy_from_user (buf, user_buf, user_len) < 0) { | ||
322 | vfree (buf); | ||
323 | return -EFAULT; | ||
324 | } | ||
325 | buf[user_len] = '\0'; | ||
326 | |||
327 | blacklist_parse_proc_parameters (buf); | ||
328 | |||
329 | vfree (buf); | ||
330 | return user_len; | ||
331 | } | ||
332 | |||
333 | static int | ||
334 | cio_ignore_proc_init (void) | ||
335 | { | ||
336 | struct proc_dir_entry *entry; | ||
337 | |||
338 | entry = create_proc_entry ("cio_ignore", S_IFREG | S_IRUGO | S_IWUSR, | ||
339 | &proc_root); | ||
340 | if (!entry) | ||
341 | return 0; | ||
342 | |||
343 | entry->read_proc = cio_ignore_read; | ||
344 | entry->write_proc = cio_ignore_write; | ||
345 | |||
346 | return 1; | ||
347 | } | ||
348 | |||
349 | __initcall (cio_ignore_proc_init); | ||
350 | |||
351 | #endif /* CONFIG_PROC_FS */ | ||
diff --git a/drivers/s390/cio/blacklist.h b/drivers/s390/cio/blacklist.h new file mode 100644 index 000000000000..fb42cafbe57c --- /dev/null +++ b/drivers/s390/cio/blacklist.h | |||
@@ -0,0 +1,6 @@ | |||
1 | #ifndef S390_BLACKLIST_H | ||
2 | #define S390_BLACKLIST_H | ||
3 | |||
4 | extern int is_blacklisted (int devno); | ||
5 | |||
6 | #endif | ||
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c new file mode 100644 index 000000000000..21a75ee28b80 --- /dev/null +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -0,0 +1,482 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/ccwgroup.c | ||
3 | * bus driver for ccwgroup | ||
4 | * $Revision: 1.29 $ | ||
5 | * | ||
6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
7 | * IBM Corporation | ||
8 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | ||
9 | * Cornelia Huck (cohuck@de.ibm.com) | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <linux/list.h> | ||
15 | #include <linux/device.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/ctype.h> | ||
18 | #include <linux/dcache.h> | ||
19 | |||
20 | #include <asm/semaphore.h> | ||
21 | #include <asm/ccwdev.h> | ||
22 | #include <asm/ccwgroup.h> | ||
23 | |||
24 | /* In Linux 2.4, we had a channel device layer called "chandev" | ||
25 | * that did all sorts of obscure stuff for networking devices. | ||
26 | * This is another driver that serves as a replacement for just | ||
27 | * one of its functions, namely the translation of single subchannels | ||
28 | * to devices that use multiple subchannels. | ||
29 | */ | ||
30 | |||
31 | /* a device matches a driver if all its slave devices match the same | ||
32 | * entry of the driver */ | ||
33 | static int | ||
34 | ccwgroup_bus_match (struct device * dev, struct device_driver * drv) | ||
35 | { | ||
36 | struct ccwgroup_device *gdev; | ||
37 | struct ccwgroup_driver *gdrv; | ||
38 | |||
39 | gdev = container_of(dev, struct ccwgroup_device, dev); | ||
40 | gdrv = container_of(drv, struct ccwgroup_driver, driver); | ||
41 | |||
42 | if (gdev->creator_id == gdrv->driver_id) | ||
43 | return 1; | ||
44 | |||
45 | return 0; | ||
46 | } | ||
47 | static int | ||
48 | ccwgroup_hotplug (struct device *dev, char **envp, int num_envp, char *buffer, | ||
49 | int buffer_size) | ||
50 | { | ||
51 | /* TODO */ | ||
52 | return 0; | ||
53 | } | ||
54 | |||
55 | static struct bus_type ccwgroup_bus_type = { | ||
56 | .name = "ccwgroup", | ||
57 | .match = ccwgroup_bus_match, | ||
58 | .hotplug = ccwgroup_hotplug, | ||
59 | }; | ||
60 | |||
61 | static inline void | ||
62 | __ccwgroup_remove_symlinks(struct ccwgroup_device *gdev) | ||
63 | { | ||
64 | int i; | ||
65 | char str[8]; | ||
66 | |||
67 | for (i = 0; i < gdev->count; i++) { | ||
68 | sprintf(str, "cdev%d", i); | ||
69 | sysfs_remove_link(&gdev->dev.kobj, str); | ||
70 | sysfs_remove_link(&gdev->cdev[i]->dev.kobj, "group_device"); | ||
71 | } | ||
72 | |||
73 | } | ||
74 | |||
75 | /* | ||
76 | * Provide an 'ungroup' attribute so the user can remove group devices no | ||
77 | * longer needed or accidentially created. Saves memory :) | ||
78 | */ | ||
79 | static ssize_t | ||
80 | ccwgroup_ungroup_store(struct device *dev, const char *buf, size_t count) | ||
81 | { | ||
82 | struct ccwgroup_device *gdev; | ||
83 | |||
84 | gdev = to_ccwgroupdev(dev); | ||
85 | |||
86 | if (gdev->state != CCWGROUP_OFFLINE) | ||
87 | return -EINVAL; | ||
88 | |||
89 | __ccwgroup_remove_symlinks(gdev); | ||
90 | device_unregister(dev); | ||
91 | |||
92 | return count; | ||
93 | } | ||
94 | |||
95 | static DEVICE_ATTR(ungroup, 0200, NULL, ccwgroup_ungroup_store); | ||
96 | |||
97 | static void | ||
98 | ccwgroup_release (struct device *dev) | ||
99 | { | ||
100 | struct ccwgroup_device *gdev; | ||
101 | int i; | ||
102 | |||
103 | gdev = to_ccwgroupdev(dev); | ||
104 | |||
105 | for (i = 0; i < gdev->count; i++) { | ||
106 | gdev->cdev[i]->dev.driver_data = NULL; | ||
107 | put_device(&gdev->cdev[i]->dev); | ||
108 | } | ||
109 | kfree(gdev); | ||
110 | } | ||
111 | |||
112 | static inline int | ||
113 | __ccwgroup_create_symlinks(struct ccwgroup_device *gdev) | ||
114 | { | ||
115 | char str[8]; | ||
116 | int i, rc; | ||
117 | |||
118 | for (i = 0; i < gdev->count; i++) { | ||
119 | rc = sysfs_create_link(&gdev->cdev[i]->dev.kobj, &gdev->dev.kobj, | ||
120 | "group_device"); | ||
121 | if (rc) { | ||
122 | for (--i; i >= 0; i--) | ||
123 | sysfs_remove_link(&gdev->cdev[i]->dev.kobj, | ||
124 | "group_device"); | ||
125 | return rc; | ||
126 | } | ||
127 | } | ||
128 | for (i = 0; i < gdev->count; i++) { | ||
129 | sprintf(str, "cdev%d", i); | ||
130 | rc = sysfs_create_link(&gdev->dev.kobj, &gdev->cdev[i]->dev.kobj, | ||
131 | str); | ||
132 | if (rc) { | ||
133 | for (--i; i >= 0; i--) { | ||
134 | sprintf(str, "cdev%d", i); | ||
135 | sysfs_remove_link(&gdev->dev.kobj, str); | ||
136 | } | ||
137 | for (i = 0; i < gdev->count; i++) | ||
138 | sysfs_remove_link(&gdev->cdev[i]->dev.kobj, | ||
139 | "group_device"); | ||
140 | return rc; | ||
141 | } | ||
142 | } | ||
143 | return 0; | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * try to add a new ccwgroup device for one driver | ||
148 | * argc and argv[] are a list of bus_id's of devices | ||
149 | * belonging to the driver. | ||
150 | */ | ||
151 | int | ||
152 | ccwgroup_create(struct device *root, | ||
153 | unsigned int creator_id, | ||
154 | struct ccw_driver *cdrv, | ||
155 | int argc, char *argv[]) | ||
156 | { | ||
157 | struct ccwgroup_device *gdev; | ||
158 | int i; | ||
159 | int rc; | ||
160 | int del_drvdata; | ||
161 | |||
162 | if (argc > 256) /* disallow dumb users */ | ||
163 | return -EINVAL; | ||
164 | |||
165 | gdev = kmalloc(sizeof(*gdev) + argc*sizeof(gdev->cdev[0]), GFP_KERNEL); | ||
166 | if (!gdev) | ||
167 | return -ENOMEM; | ||
168 | |||
169 | memset(gdev, 0, sizeof(*gdev) + argc*sizeof(gdev->cdev[0])); | ||
170 | atomic_set(&gdev->onoff, 0); | ||
171 | |||
172 | del_drvdata = 0; | ||
173 | for (i = 0; i < argc; i++) { | ||
174 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | ||
175 | |||
176 | /* all devices have to be of the same type in | ||
177 | * order to be grouped */ | ||
178 | if (!gdev->cdev[i] | ||
179 | || gdev->cdev[i]->id.driver_info != | ||
180 | gdev->cdev[0]->id.driver_info) { | ||
181 | rc = -EINVAL; | ||
182 | goto free_dev; | ||
183 | } | ||
184 | /* Don't allow a device to belong to more than one group. */ | ||
185 | if (gdev->cdev[i]->dev.driver_data) { | ||
186 | rc = -EINVAL; | ||
187 | goto free_dev; | ||
188 | } | ||
189 | } | ||
190 | for (i = 0; i < argc; i++) | ||
191 | gdev->cdev[i]->dev.driver_data = gdev; | ||
192 | del_drvdata = 1; | ||
193 | |||
194 | gdev->creator_id = creator_id; | ||
195 | gdev->count = argc; | ||
196 | gdev->dev = (struct device ) { | ||
197 | .bus = &ccwgroup_bus_type, | ||
198 | .parent = root, | ||
199 | .release = ccwgroup_release, | ||
200 | }; | ||
201 | |||
202 | snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", | ||
203 | gdev->cdev[0]->dev.bus_id); | ||
204 | |||
205 | rc = device_register(&gdev->dev); | ||
206 | |||
207 | if (rc) | ||
208 | goto free_dev; | ||
209 | get_device(&gdev->dev); | ||
210 | rc = device_create_file(&gdev->dev, &dev_attr_ungroup); | ||
211 | |||
212 | if (rc) { | ||
213 | device_unregister(&gdev->dev); | ||
214 | goto error; | ||
215 | } | ||
216 | |||
217 | rc = __ccwgroup_create_symlinks(gdev); | ||
218 | if (!rc) { | ||
219 | put_device(&gdev->dev); | ||
220 | return 0; | ||
221 | } | ||
222 | device_remove_file(&gdev->dev, &dev_attr_ungroup); | ||
223 | device_unregister(&gdev->dev); | ||
224 | error: | ||
225 | for (i = 0; i < argc; i++) | ||
226 | if (gdev->cdev[i]) { | ||
227 | put_device(&gdev->cdev[i]->dev); | ||
228 | gdev->cdev[i]->dev.driver_data = NULL; | ||
229 | } | ||
230 | put_device(&gdev->dev); | ||
231 | return rc; | ||
232 | free_dev: | ||
233 | for (i = 0; i < argc; i++) | ||
234 | if (gdev->cdev[i]) { | ||
235 | put_device(&gdev->cdev[i]->dev); | ||
236 | if (del_drvdata) | ||
237 | gdev->cdev[i]->dev.driver_data = NULL; | ||
238 | } | ||
239 | kfree(gdev); | ||
240 | return rc; | ||
241 | } | ||
242 | |||
243 | static int __init | ||
244 | init_ccwgroup (void) | ||
245 | { | ||
246 | return bus_register (&ccwgroup_bus_type); | ||
247 | } | ||
248 | |||
249 | static void __exit | ||
250 | cleanup_ccwgroup (void) | ||
251 | { | ||
252 | bus_unregister (&ccwgroup_bus_type); | ||
253 | } | ||
254 | |||
255 | module_init(init_ccwgroup); | ||
256 | module_exit(cleanup_ccwgroup); | ||
257 | |||
258 | /************************** driver stuff ******************************/ | ||
259 | |||
260 | static int | ||
261 | ccwgroup_set_online(struct ccwgroup_device *gdev) | ||
262 | { | ||
263 | struct ccwgroup_driver *gdrv; | ||
264 | int ret; | ||
265 | |||
266 | if (atomic_compare_and_swap(0, 1, &gdev->onoff)) | ||
267 | return -EAGAIN; | ||
268 | if (gdev->state == CCWGROUP_ONLINE) { | ||
269 | ret = 0; | ||
270 | goto out; | ||
271 | } | ||
272 | if (!gdev->dev.driver) { | ||
273 | ret = -EINVAL; | ||
274 | goto out; | ||
275 | } | ||
276 | gdrv = to_ccwgroupdrv (gdev->dev.driver); | ||
277 | if ((ret = gdrv->set_online(gdev))) | ||
278 | goto out; | ||
279 | |||
280 | gdev->state = CCWGROUP_ONLINE; | ||
281 | out: | ||
282 | atomic_set(&gdev->onoff, 0); | ||
283 | return ret; | ||
284 | } | ||
285 | |||
286 | static int | ||
287 | ccwgroup_set_offline(struct ccwgroup_device *gdev) | ||
288 | { | ||
289 | struct ccwgroup_driver *gdrv; | ||
290 | int ret; | ||
291 | |||
292 | if (atomic_compare_and_swap(0, 1, &gdev->onoff)) | ||
293 | return -EAGAIN; | ||
294 | if (gdev->state == CCWGROUP_OFFLINE) { | ||
295 | ret = 0; | ||
296 | goto out; | ||
297 | } | ||
298 | if (!gdev->dev.driver) { | ||
299 | ret = -EINVAL; | ||
300 | goto out; | ||
301 | } | ||
302 | gdrv = to_ccwgroupdrv (gdev->dev.driver); | ||
303 | if ((ret = gdrv->set_offline(gdev))) | ||
304 | goto out; | ||
305 | |||
306 | gdev->state = CCWGROUP_OFFLINE; | ||
307 | out: | ||
308 | atomic_set(&gdev->onoff, 0); | ||
309 | return ret; | ||
310 | } | ||
311 | |||
312 | static ssize_t | ||
313 | ccwgroup_online_store (struct device *dev, const char *buf, size_t count) | ||
314 | { | ||
315 | struct ccwgroup_device *gdev; | ||
316 | struct ccwgroup_driver *gdrv; | ||
317 | unsigned int value; | ||
318 | int ret; | ||
319 | |||
320 | gdev = to_ccwgroupdev(dev); | ||
321 | if (!dev->driver) | ||
322 | return count; | ||
323 | |||
324 | gdrv = to_ccwgroupdrv (gdev->dev.driver); | ||
325 | if (!try_module_get(gdrv->owner)) | ||
326 | return -EINVAL; | ||
327 | |||
328 | value = simple_strtoul(buf, 0, 0); | ||
329 | ret = count; | ||
330 | if (value == 1) | ||
331 | ccwgroup_set_online(gdev); | ||
332 | else if (value == 0) | ||
333 | ccwgroup_set_offline(gdev); | ||
334 | else | ||
335 | ret = -EINVAL; | ||
336 | module_put(gdrv->owner); | ||
337 | return ret; | ||
338 | } | ||
339 | |||
340 | static ssize_t | ||
341 | ccwgroup_online_show (struct device *dev, char *buf) | ||
342 | { | ||
343 | int online; | ||
344 | |||
345 | online = (to_ccwgroupdev(dev)->state == CCWGROUP_ONLINE); | ||
346 | |||
347 | return sprintf(buf, online ? "1\n" : "0\n"); | ||
348 | } | ||
349 | |||
350 | static DEVICE_ATTR(online, 0644, ccwgroup_online_show, ccwgroup_online_store); | ||
351 | |||
352 | static int | ||
353 | ccwgroup_probe (struct device *dev) | ||
354 | { | ||
355 | struct ccwgroup_device *gdev; | ||
356 | struct ccwgroup_driver *gdrv; | ||
357 | |||
358 | int ret; | ||
359 | |||
360 | gdev = to_ccwgroupdev(dev); | ||
361 | gdrv = to_ccwgroupdrv(dev->driver); | ||
362 | |||
363 | if ((ret = device_create_file(dev, &dev_attr_online))) | ||
364 | return ret; | ||
365 | |||
366 | pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id); | ||
367 | ret = gdrv->probe ? gdrv->probe(gdev) : -ENODEV; | ||
368 | if (ret) | ||
369 | device_remove_file(dev, &dev_attr_online); | ||
370 | |||
371 | return ret; | ||
372 | } | ||
373 | |||
374 | static int | ||
375 | ccwgroup_remove (struct device *dev) | ||
376 | { | ||
377 | struct ccwgroup_device *gdev; | ||
378 | struct ccwgroup_driver *gdrv; | ||
379 | |||
380 | gdev = to_ccwgroupdev(dev); | ||
381 | gdrv = to_ccwgroupdrv(dev->driver); | ||
382 | |||
383 | pr_debug("%s: device %s\n", __func__, gdev->dev.bus_id); | ||
384 | |||
385 | device_remove_file(dev, &dev_attr_online); | ||
386 | |||
387 | if (gdrv && gdrv->remove) | ||
388 | gdrv->remove(gdev); | ||
389 | return 0; | ||
390 | } | ||
391 | |||
392 | int | ||
393 | ccwgroup_driver_register (struct ccwgroup_driver *cdriver) | ||
394 | { | ||
395 | /* register our new driver with the core */ | ||
396 | cdriver->driver = (struct device_driver) { | ||
397 | .bus = &ccwgroup_bus_type, | ||
398 | .name = cdriver->name, | ||
399 | .probe = ccwgroup_probe, | ||
400 | .remove = ccwgroup_remove, | ||
401 | }; | ||
402 | |||
403 | return driver_register(&cdriver->driver); | ||
404 | } | ||
405 | |||
406 | static inline struct device * | ||
407 | __get_next_ccwgroup_device(struct device_driver *drv) | ||
408 | { | ||
409 | struct device *dev, *d; | ||
410 | |||
411 | down_read(&drv->bus->subsys.rwsem); | ||
412 | dev = NULL; | ||
413 | list_for_each_entry(d, &drv->devices, driver_list) { | ||
414 | dev = get_device(d); | ||
415 | if (dev) | ||
416 | break; | ||
417 | } | ||
418 | up_read(&drv->bus->subsys.rwsem); | ||
419 | return dev; | ||
420 | } | ||
421 | |||
422 | void | ||
423 | ccwgroup_driver_unregister (struct ccwgroup_driver *cdriver) | ||
424 | { | ||
425 | struct device *dev; | ||
426 | |||
427 | /* We don't want ccwgroup devices to live longer than their driver. */ | ||
428 | get_driver(&cdriver->driver); | ||
429 | while ((dev = __get_next_ccwgroup_device(&cdriver->driver))) { | ||
430 | __ccwgroup_remove_symlinks(to_ccwgroupdev(dev)); | ||
431 | device_unregister(dev); | ||
432 | put_device(dev); | ||
433 | }; | ||
434 | put_driver(&cdriver->driver); | ||
435 | driver_unregister(&cdriver->driver); | ||
436 | } | ||
437 | |||
438 | int | ||
439 | ccwgroup_probe_ccwdev(struct ccw_device *cdev) | ||
440 | { | ||
441 | return 0; | ||
442 | } | ||
443 | |||
444 | static inline struct ccwgroup_device * | ||
445 | __ccwgroup_get_gdev_by_cdev(struct ccw_device *cdev) | ||
446 | { | ||
447 | struct ccwgroup_device *gdev; | ||
448 | |||
449 | if (cdev->dev.driver_data) { | ||
450 | gdev = (struct ccwgroup_device *)cdev->dev.driver_data; | ||
451 | if (get_device(&gdev->dev)) { | ||
452 | if (!list_empty(&gdev->dev.node)) | ||
453 | return gdev; | ||
454 | put_device(&gdev->dev); | ||
455 | } | ||
456 | return NULL; | ||
457 | } | ||
458 | return NULL; | ||
459 | } | ||
460 | |||
461 | void | ||
462 | ccwgroup_remove_ccwdev(struct ccw_device *cdev) | ||
463 | { | ||
464 | struct ccwgroup_device *gdev; | ||
465 | |||
466 | /* Ignore offlining errors, device is gone anyway. */ | ||
467 | ccw_device_set_offline(cdev); | ||
468 | /* If one of its devices is gone, the whole group is done for. */ | ||
469 | gdev = __ccwgroup_get_gdev_by_cdev(cdev); | ||
470 | if (gdev) { | ||
471 | __ccwgroup_remove_symlinks(gdev); | ||
472 | device_unregister(&gdev->dev); | ||
473 | put_device(&gdev->dev); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | MODULE_LICENSE("GPL"); | ||
478 | EXPORT_SYMBOL(ccwgroup_driver_register); | ||
479 | EXPORT_SYMBOL(ccwgroup_driver_unregister); | ||
480 | EXPORT_SYMBOL(ccwgroup_create); | ||
481 | EXPORT_SYMBOL(ccwgroup_probe_ccwdev); | ||
482 | EXPORT_SYMBOL(ccwgroup_remove_ccwdev); | ||
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c new file mode 100644 index 000000000000..b35fe12e6bfc --- /dev/null +++ b/drivers/s390/cio/chsc.c | |||
@@ -0,0 +1,1114 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/chsc.c | ||
3 | * S/390 common I/O routines -- channel subsystem call | ||
4 | * $Revision: 1.119 $ | ||
5 | * | ||
6 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | ||
7 | * IBM Corporation | ||
8 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | ||
9 | * Cornelia Huck (cohuck@de.ibm.com) | ||
10 | * Arnd Bergmann (arndb@de.ibm.com) | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/config.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/device.h> | ||
18 | |||
19 | #include <asm/cio.h> | ||
20 | |||
21 | #include "css.h" | ||
22 | #include "cio.h" | ||
23 | #include "cio_debug.h" | ||
24 | #include "ioasm.h" | ||
25 | #include "chsc.h" | ||
26 | |||
27 | static struct channel_path *chps[NR_CHPIDS]; | ||
28 | |||
29 | static void *sei_page; | ||
30 | |||
31 | static int new_channel_path(int chpid); | ||
32 | |||
33 | static inline void | ||
34 | set_chp_logically_online(int chp, int onoff) | ||
35 | { | ||
36 | chps[chp]->state = onoff; | ||
37 | } | ||
38 | |||
39 | static int | ||
40 | get_chp_status(int chp) | ||
41 | { | ||
42 | return (chps[chp] ? chps[chp]->state : -ENODEV); | ||
43 | } | ||
44 | |||
45 | void | ||
46 | chsc_validate_chpids(struct subchannel *sch) | ||
47 | { | ||
48 | int mask, chp; | ||
49 | |||
50 | for (chp = 0; chp <= 7; chp++) { | ||
51 | mask = 0x80 >> chp; | ||
52 | if (!get_chp_status(sch->schib.pmcw.chpid[chp])) | ||
53 | /* disable using this path */ | ||
54 | sch->opm &= ~mask; | ||
55 | } | ||
56 | } | ||
57 | |||
58 | void | ||
59 | chpid_is_actually_online(int chp) | ||
60 | { | ||
61 | int state; | ||
62 | |||
63 | state = get_chp_status(chp); | ||
64 | if (state < 0) { | ||
65 | need_rescan = 1; | ||
66 | queue_work(slow_path_wq, &slow_path_work); | ||
67 | } else | ||
68 | WARN_ON(!state); | ||
69 | } | ||
70 | |||
71 | /* FIXME: this is _always_ called for every subchannel. shouldn't we | ||
72 | * process more than one at a time? */ | ||
73 | static int | ||
74 | chsc_get_sch_desc_irq(struct subchannel *sch, void *page) | ||
75 | { | ||
76 | int ccode, j; | ||
77 | |||
78 | struct { | ||
79 | struct chsc_header request; | ||
80 | u16 reserved1; | ||
81 | u16 f_sch; /* first subchannel */ | ||
82 | u16 reserved2; | ||
83 | u16 l_sch; /* last subchannel */ | ||
84 | u32 reserved3; | ||
85 | struct chsc_header response; | ||
86 | u32 reserved4; | ||
87 | u8 sch_valid : 1; | ||
88 | u8 dev_valid : 1; | ||
89 | u8 st : 3; /* subchannel type */ | ||
90 | u8 zeroes : 3; | ||
91 | u8 unit_addr; /* unit address */ | ||
92 | u16 devno; /* device number */ | ||
93 | u8 path_mask; | ||
94 | u8 fla_valid_mask; | ||
95 | u16 sch; /* subchannel */ | ||
96 | u8 chpid[8]; /* chpids 0-7 */ | ||
97 | u16 fla[8]; /* full link addresses 0-7 */ | ||
98 | } *ssd_area; | ||
99 | |||
100 | ssd_area = page; | ||
101 | |||
102 | ssd_area->request = (struct chsc_header) { | ||
103 | .length = 0x0010, | ||
104 | .code = 0x0004, | ||
105 | }; | ||
106 | |||
107 | ssd_area->f_sch = sch->irq; | ||
108 | ssd_area->l_sch = sch->irq; | ||
109 | |||
110 | ccode = chsc(ssd_area); | ||
111 | if (ccode > 0) { | ||
112 | pr_debug("chsc returned with ccode = %d\n", ccode); | ||
113 | return (ccode == 3) ? -ENODEV : -EBUSY; | ||
114 | } | ||
115 | |||
116 | switch (ssd_area->response.code) { | ||
117 | case 0x0001: /* everything ok */ | ||
118 | break; | ||
119 | case 0x0002: | ||
120 | CIO_CRW_EVENT(2, "Invalid command!\n"); | ||
121 | return -EINVAL; | ||
122 | case 0x0003: | ||
123 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
124 | return -EINVAL; | ||
125 | case 0x0004: | ||
126 | CIO_CRW_EVENT(2, "Model does not provide ssd\n"); | ||
127 | return -EOPNOTSUPP; | ||
128 | default: | ||
129 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
130 | ssd_area->response.code); | ||
131 | return -EIO; | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * ssd_area->st stores the type of the detected | ||
136 | * subchannel, with the following definitions: | ||
137 | * | ||
138 | * 0: I/O subchannel: All fields have meaning | ||
139 | * 1: CHSC subchannel: Only sch_val, st and sch | ||
140 | * have meaning | ||
141 | * 2: Message subchannel: All fields except unit_addr | ||
142 | * have meaning | ||
143 | * 3: ADM subchannel: Only sch_val, st and sch | ||
144 | * have meaning | ||
145 | * | ||
146 | * Other types are currently undefined. | ||
147 | */ | ||
148 | if (ssd_area->st > 3) { /* uhm, that looks strange... */ | ||
149 | CIO_CRW_EVENT(0, "Strange subchannel type %d" | ||
150 | " for sch %04x\n", ssd_area->st, sch->irq); | ||
151 | /* | ||
152 | * There may have been a new subchannel type defined in the | ||
153 | * time since this code was written; since we don't know which | ||
154 | * fields have meaning and what to do with it we just jump out | ||
155 | */ | ||
156 | return 0; | ||
157 | } else { | ||
158 | const char *type[4] = {"I/O", "chsc", "message", "ADM"}; | ||
159 | CIO_CRW_EVENT(6, "ssd: sch %04x is %s subchannel\n", | ||
160 | sch->irq, type[ssd_area->st]); | ||
161 | |||
162 | sch->ssd_info.valid = 1; | ||
163 | sch->ssd_info.type = ssd_area->st; | ||
164 | } | ||
165 | |||
166 | if (ssd_area->st == 0 || ssd_area->st == 2) { | ||
167 | for (j = 0; j < 8; j++) { | ||
168 | if (!((0x80 >> j) & ssd_area->path_mask & | ||
169 | ssd_area->fla_valid_mask)) | ||
170 | continue; | ||
171 | sch->ssd_info.chpid[j] = ssd_area->chpid[j]; | ||
172 | sch->ssd_info.fla[j] = ssd_area->fla[j]; | ||
173 | } | ||
174 | } | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | int | ||
179 | css_get_ssd_info(struct subchannel *sch) | ||
180 | { | ||
181 | int ret; | ||
182 | void *page; | ||
183 | |||
184 | page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
185 | if (!page) | ||
186 | return -ENOMEM; | ||
187 | spin_lock_irq(&sch->lock); | ||
188 | ret = chsc_get_sch_desc_irq(sch, page); | ||
189 | if (ret) { | ||
190 | static int cio_chsc_err_msg; | ||
191 | |||
192 | if (!cio_chsc_err_msg) { | ||
193 | printk(KERN_ERR | ||
194 | "chsc_get_sch_descriptions:" | ||
195 | " Error %d while doing chsc; " | ||
196 | "processing some machine checks may " | ||
197 | "not work\n", ret); | ||
198 | cio_chsc_err_msg = 1; | ||
199 | } | ||
200 | } | ||
201 | spin_unlock_irq(&sch->lock); | ||
202 | free_page((unsigned long)page); | ||
203 | if (!ret) { | ||
204 | int j, chpid; | ||
205 | /* Allocate channel path structures, if needed. */ | ||
206 | for (j = 0; j < 8; j++) { | ||
207 | chpid = sch->ssd_info.chpid[j]; | ||
208 | if (chpid && (get_chp_status(chpid) < 0)) | ||
209 | new_channel_path(chpid); | ||
210 | } | ||
211 | } | ||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | static int | ||
216 | s390_subchannel_remove_chpid(struct device *dev, void *data) | ||
217 | { | ||
218 | int j; | ||
219 | int mask; | ||
220 | struct subchannel *sch; | ||
221 | __u8 *chpid; | ||
222 | struct schib schib; | ||
223 | |||
224 | sch = to_subchannel(dev); | ||
225 | chpid = data; | ||
226 | for (j = 0; j < 8; j++) | ||
227 | if (sch->schib.pmcw.chpid[j] == *chpid) | ||
228 | break; | ||
229 | if (j >= 8) | ||
230 | return 0; | ||
231 | |||
232 | mask = 0x80 >> j; | ||
233 | spin_lock(&sch->lock); | ||
234 | |||
235 | stsch(sch->irq, &schib); | ||
236 | if (!schib.pmcw.dnv) | ||
237 | goto out_unreg; | ||
238 | memcpy(&sch->schib, &schib, sizeof(struct schib)); | ||
239 | /* Check for single path devices. */ | ||
240 | if (sch->schib.pmcw.pim == 0x80) | ||
241 | goto out_unreg; | ||
242 | if (sch->vpm == mask) | ||
243 | goto out_unreg; | ||
244 | |||
245 | if ((sch->schib.scsw.actl & (SCSW_ACTL_CLEAR_PEND | | ||
246 | SCSW_ACTL_HALT_PEND | | ||
247 | SCSW_ACTL_START_PEND | | ||
248 | SCSW_ACTL_RESUME_PEND)) && | ||
249 | (sch->schib.pmcw.lpum == mask)) { | ||
250 | int cc = cio_cancel(sch); | ||
251 | |||
252 | if (cc == -ENODEV) | ||
253 | goto out_unreg; | ||
254 | |||
255 | if (cc == -EINVAL) { | ||
256 | cc = cio_clear(sch); | ||
257 | if (cc == -ENODEV) | ||
258 | goto out_unreg; | ||
259 | /* Call handler. */ | ||
260 | if (sch->driver && sch->driver->termination) | ||
261 | sch->driver->termination(&sch->dev); | ||
262 | goto out_unlock; | ||
263 | } | ||
264 | } else if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && | ||
265 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && | ||
266 | (sch->schib.pmcw.lpum == mask)) { | ||
267 | int cc; | ||
268 | |||
269 | cc = cio_clear(sch); | ||
270 | if (cc == -ENODEV) | ||
271 | goto out_unreg; | ||
272 | /* Call handler. */ | ||
273 | if (sch->driver && sch->driver->termination) | ||
274 | sch->driver->termination(&sch->dev); | ||
275 | goto out_unlock; | ||
276 | } | ||
277 | |||
278 | /* trigger path verification. */ | ||
279 | if (sch->driver && sch->driver->verify) | ||
280 | sch->driver->verify(&sch->dev); | ||
281 | out_unlock: | ||
282 | spin_unlock(&sch->lock); | ||
283 | return 0; | ||
284 | out_unreg: | ||
285 | spin_unlock(&sch->lock); | ||
286 | sch->lpm = 0; | ||
287 | if (css_enqueue_subchannel_slow(sch->irq)) { | ||
288 | css_clear_subchannel_slow_list(); | ||
289 | need_rescan = 1; | ||
290 | } | ||
291 | return 0; | ||
292 | } | ||
293 | |||
294 | static inline void | ||
295 | s390_set_chpid_offline( __u8 chpid) | ||
296 | { | ||
297 | char dbf_txt[15]; | ||
298 | |||
299 | sprintf(dbf_txt, "chpr%x", chpid); | ||
300 | CIO_TRACE_EVENT(2, dbf_txt); | ||
301 | |||
302 | if (get_chp_status(chpid) <= 0) | ||
303 | return; | ||
304 | |||
305 | bus_for_each_dev(&css_bus_type, NULL, &chpid, | ||
306 | s390_subchannel_remove_chpid); | ||
307 | |||
308 | if (need_rescan || css_slow_subchannels_exist()) | ||
309 | queue_work(slow_path_wq, &slow_path_work); | ||
310 | } | ||
311 | |||
312 | static int | ||
313 | s390_process_res_acc_sch(u8 chpid, __u16 fla, u32 fla_mask, | ||
314 | struct subchannel *sch) | ||
315 | { | ||
316 | int found; | ||
317 | int chp; | ||
318 | int ccode; | ||
319 | |||
320 | found = 0; | ||
321 | for (chp = 0; chp <= 7; chp++) | ||
322 | /* | ||
323 | * check if chpid is in information updated by ssd | ||
324 | */ | ||
325 | if (sch->ssd_info.valid && | ||
326 | sch->ssd_info.chpid[chp] == chpid && | ||
327 | (sch->ssd_info.fla[chp] & fla_mask) == fla) { | ||
328 | found = 1; | ||
329 | break; | ||
330 | } | ||
331 | |||
332 | if (found == 0) | ||
333 | return 0; | ||
334 | |||
335 | /* | ||
336 | * Do a stsch to update our subchannel structure with the | ||
337 | * new path information and eventually check for logically | ||
338 | * offline chpids. | ||
339 | */ | ||
340 | ccode = stsch(sch->irq, &sch->schib); | ||
341 | if (ccode > 0) | ||
342 | return 0; | ||
343 | |||
344 | return 0x80 >> chp; | ||
345 | } | ||
346 | |||
347 | static int | ||
348 | s390_process_res_acc (u8 chpid, __u16 fla, u32 fla_mask) | ||
349 | { | ||
350 | struct subchannel *sch; | ||
351 | int irq, rc; | ||
352 | char dbf_txt[15]; | ||
353 | |||
354 | sprintf(dbf_txt, "accpr%x", chpid); | ||
355 | CIO_TRACE_EVENT( 2, dbf_txt); | ||
356 | if (fla != 0) { | ||
357 | sprintf(dbf_txt, "fla%x", fla); | ||
358 | CIO_TRACE_EVENT( 2, dbf_txt); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * I/O resources may have become accessible. | ||
363 | * Scan through all subchannels that may be concerned and | ||
364 | * do a validation on those. | ||
365 | * The more information we have (info), the less scanning | ||
366 | * will we have to do. | ||
367 | */ | ||
368 | |||
369 | if (!get_chp_status(chpid)) | ||
370 | return 0; /* no need to do the rest */ | ||
371 | |||
372 | rc = 0; | ||
373 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
374 | int chp_mask, old_lpm; | ||
375 | |||
376 | sch = get_subchannel_by_schid(irq); | ||
377 | if (!sch) { | ||
378 | struct schib schib; | ||
379 | int ret; | ||
380 | /* | ||
381 | * We don't know the device yet, but since a path | ||
382 | * may be available now to the device we'll have | ||
383 | * to do recognition again. | ||
384 | * Since we don't have any idea about which chpid | ||
385 | * that beast may be on we'll have to do a stsch | ||
386 | * on all devices, grr... | ||
387 | */ | ||
388 | if (stsch(irq, &schib)) { | ||
389 | /* We're through */ | ||
390 | if (need_rescan) | ||
391 | rc = -EAGAIN; | ||
392 | break; | ||
393 | } | ||
394 | if (need_rescan) { | ||
395 | rc = -EAGAIN; | ||
396 | continue; | ||
397 | } | ||
398 | /* Put it on the slow path. */ | ||
399 | ret = css_enqueue_subchannel_slow(irq); | ||
400 | if (ret) { | ||
401 | css_clear_subchannel_slow_list(); | ||
402 | need_rescan = 1; | ||
403 | } | ||
404 | rc = -EAGAIN; | ||
405 | continue; | ||
406 | } | ||
407 | |||
408 | spin_lock_irq(&sch->lock); | ||
409 | |||
410 | chp_mask = s390_process_res_acc_sch(chpid, fla, fla_mask, sch); | ||
411 | |||
412 | if (chp_mask == 0) { | ||
413 | |||
414 | spin_unlock_irq(&sch->lock); | ||
415 | |||
416 | if (fla_mask != 0) | ||
417 | break; | ||
418 | else | ||
419 | continue; | ||
420 | } | ||
421 | old_lpm = sch->lpm; | ||
422 | sch->lpm = ((sch->schib.pmcw.pim & | ||
423 | sch->schib.pmcw.pam & | ||
424 | sch->schib.pmcw.pom) | ||
425 | | chp_mask) & sch->opm; | ||
426 | if (!old_lpm && sch->lpm) | ||
427 | device_trigger_reprobe(sch); | ||
428 | else if (sch->driver && sch->driver->verify) | ||
429 | sch->driver->verify(&sch->dev); | ||
430 | |||
431 | spin_unlock_irq(&sch->lock); | ||
432 | put_device(&sch->dev); | ||
433 | if (fla_mask != 0) | ||
434 | break; | ||
435 | } | ||
436 | return rc; | ||
437 | } | ||
438 | |||
439 | static int | ||
440 | __get_chpid_from_lir(void *data) | ||
441 | { | ||
442 | struct lir { | ||
443 | u8 iq; | ||
444 | u8 ic; | ||
445 | u16 sci; | ||
446 | /* incident-node descriptor */ | ||
447 | u32 indesc[28]; | ||
448 | /* attached-node descriptor */ | ||
449 | u32 andesc[28]; | ||
450 | /* incident-specific information */ | ||
451 | u32 isinfo[28]; | ||
452 | } *lir; | ||
453 | |||
454 | lir = (struct lir*) data; | ||
455 | if (!(lir->iq&0x80)) | ||
456 | /* NULL link incident record */ | ||
457 | return -EINVAL; | ||
458 | if (!(lir->indesc[0]&0xc0000000)) | ||
459 | /* node descriptor not valid */ | ||
460 | return -EINVAL; | ||
461 | if (!(lir->indesc[0]&0x10000000)) | ||
462 | /* don't handle device-type nodes - FIXME */ | ||
463 | return -EINVAL; | ||
464 | /* Byte 3 contains the chpid. Could also be CTCA, but we don't care */ | ||
465 | |||
466 | return (u16) (lir->indesc[0]&0x000000ff); | ||
467 | } | ||
468 | |||
469 | int | ||
470 | chsc_process_crw(void) | ||
471 | { | ||
472 | int chpid, ret; | ||
473 | struct { | ||
474 | struct chsc_header request; | ||
475 | u32 reserved1; | ||
476 | u32 reserved2; | ||
477 | u32 reserved3; | ||
478 | struct chsc_header response; | ||
479 | u32 reserved4; | ||
480 | u8 flags; | ||
481 | u8 vf; /* validity flags */ | ||
482 | u8 rs; /* reporting source */ | ||
483 | u8 cc; /* content code */ | ||
484 | u16 fla; /* full link address */ | ||
485 | u16 rsid; /* reporting source id */ | ||
486 | u32 reserved5; | ||
487 | u32 reserved6; | ||
488 | u32 ccdf[96]; /* content-code dependent field */ | ||
489 | /* ccdf has to be big enough for a link-incident record */ | ||
490 | } *sei_area; | ||
491 | |||
492 | if (!sei_page) | ||
493 | return 0; | ||
494 | /* | ||
495 | * build the chsc request block for store event information | ||
496 | * and do the call | ||
497 | * This function is only called by the machine check handler thread, | ||
498 | * so we don't need locking for the sei_page. | ||
499 | */ | ||
500 | sei_area = sei_page; | ||
501 | |||
502 | CIO_TRACE_EVENT( 2, "prcss"); | ||
503 | ret = 0; | ||
504 | do { | ||
505 | int ccode, status; | ||
506 | memset(sei_area, 0, sizeof(*sei_area)); | ||
507 | |||
508 | sei_area->request = (struct chsc_header) { | ||
509 | .length = 0x0010, | ||
510 | .code = 0x000e, | ||
511 | }; | ||
512 | |||
513 | ccode = chsc(sei_area); | ||
514 | if (ccode > 0) | ||
515 | return 0; | ||
516 | |||
517 | switch (sei_area->response.code) { | ||
518 | /* for debug purposes, check for problems */ | ||
519 | case 0x0001: | ||
520 | CIO_CRW_EVENT(4, "chsc_process_crw: event information " | ||
521 | "successfully stored\n"); | ||
522 | break; /* everything ok */ | ||
523 | case 0x0002: | ||
524 | CIO_CRW_EVENT(2, | ||
525 | "chsc_process_crw: invalid command!\n"); | ||
526 | return 0; | ||
527 | case 0x0003: | ||
528 | CIO_CRW_EVENT(2, "chsc_process_crw: error in chsc " | ||
529 | "request block!\n"); | ||
530 | return 0; | ||
531 | case 0x0005: | ||
532 | CIO_CRW_EVENT(2, "chsc_process_crw: no event " | ||
533 | "information stored\n"); | ||
534 | return 0; | ||
535 | default: | ||
536 | CIO_CRW_EVENT(2, "chsc_process_crw: chsc response %d\n", | ||
537 | sei_area->response.code); | ||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | /* Check if we might have lost some information. */ | ||
542 | if (sei_area->flags & 0x40) | ||
543 | CIO_CRW_EVENT(2, "chsc_process_crw: Event information " | ||
544 | "has been lost due to overflow!\n"); | ||
545 | |||
546 | if (sei_area->rs != 4) { | ||
547 | CIO_CRW_EVENT(2, "chsc_process_crw: reporting source " | ||
548 | "(%04X) isn't a chpid!\n", | ||
549 | sei_area->rsid); | ||
550 | continue; | ||
551 | } | ||
552 | |||
553 | /* which kind of information was stored? */ | ||
554 | switch (sei_area->cc) { | ||
555 | case 1: /* link incident*/ | ||
556 | CIO_CRW_EVENT(4, "chsc_process_crw: " | ||
557 | "channel subsystem reports link incident," | ||
558 | " reporting source is chpid %x\n", | ||
559 | sei_area->rsid); | ||
560 | chpid = __get_chpid_from_lir(sei_area->ccdf); | ||
561 | if (chpid < 0) | ||
562 | CIO_CRW_EVENT(4, "%s: Invalid LIR, skipping\n", | ||
563 | __FUNCTION__); | ||
564 | else | ||
565 | s390_set_chpid_offline(chpid); | ||
566 | break; | ||
567 | |||
568 | case 2: /* i/o resource accessibiliy */ | ||
569 | CIO_CRW_EVENT(4, "chsc_process_crw: " | ||
570 | "channel subsystem reports some I/O " | ||
571 | "devices may have become accessible\n"); | ||
572 | pr_debug("Data received after sei: \n"); | ||
573 | pr_debug("Validity flags: %x\n", sei_area->vf); | ||
574 | |||
575 | /* allocate a new channel path structure, if needed */ | ||
576 | status = get_chp_status(sei_area->rsid); | ||
577 | if (status < 0) | ||
578 | new_channel_path(sei_area->rsid); | ||
579 | else if (!status) | ||
580 | return 0; | ||
581 | if ((sei_area->vf & 0x80) == 0) { | ||
582 | pr_debug("chpid: %x\n", sei_area->rsid); | ||
583 | ret = s390_process_res_acc(sei_area->rsid, | ||
584 | 0, 0); | ||
585 | } else if ((sei_area->vf & 0xc0) == 0x80) { | ||
586 | pr_debug("chpid: %x link addr: %x\n", | ||
587 | sei_area->rsid, sei_area->fla); | ||
588 | ret = s390_process_res_acc(sei_area->rsid, | ||
589 | sei_area->fla, | ||
590 | 0xff00); | ||
591 | } else if ((sei_area->vf & 0xc0) == 0xc0) { | ||
592 | pr_debug("chpid: %x full link addr: %x\n", | ||
593 | sei_area->rsid, sei_area->fla); | ||
594 | ret = s390_process_res_acc(sei_area->rsid, | ||
595 | sei_area->fla, | ||
596 | 0xffff); | ||
597 | } | ||
598 | pr_debug("\n"); | ||
599 | |||
600 | break; | ||
601 | |||
602 | default: /* other stuff */ | ||
603 | CIO_CRW_EVENT(4, "chsc_process_crw: event %d\n", | ||
604 | sei_area->cc); | ||
605 | break; | ||
606 | } | ||
607 | } while (sei_area->flags & 0x80); | ||
608 | return ret; | ||
609 | } | ||
610 | |||
611 | static int | ||
612 | chp_add(int chpid) | ||
613 | { | ||
614 | struct subchannel *sch; | ||
615 | int irq, ret, rc; | ||
616 | char dbf_txt[15]; | ||
617 | |||
618 | if (!get_chp_status(chpid)) | ||
619 | return 0; /* no need to do the rest */ | ||
620 | |||
621 | sprintf(dbf_txt, "cadd%x", chpid); | ||
622 | CIO_TRACE_EVENT(2, dbf_txt); | ||
623 | |||
624 | rc = 0; | ||
625 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
626 | int i; | ||
627 | |||
628 | sch = get_subchannel_by_schid(irq); | ||
629 | if (!sch) { | ||
630 | struct schib schib; | ||
631 | |||
632 | if (stsch(irq, &schib)) { | ||
633 | /* We're through */ | ||
634 | if (need_rescan) | ||
635 | rc = -EAGAIN; | ||
636 | break; | ||
637 | } | ||
638 | if (need_rescan) { | ||
639 | rc = -EAGAIN; | ||
640 | continue; | ||
641 | } | ||
642 | /* Put it on the slow path. */ | ||
643 | ret = css_enqueue_subchannel_slow(irq); | ||
644 | if (ret) { | ||
645 | css_clear_subchannel_slow_list(); | ||
646 | need_rescan = 1; | ||
647 | } | ||
648 | rc = -EAGAIN; | ||
649 | continue; | ||
650 | } | ||
651 | |||
652 | spin_lock(&sch->lock); | ||
653 | for (i=0; i<8; i++) | ||
654 | if (sch->schib.pmcw.chpid[i] == chpid) { | ||
655 | if (stsch(sch->irq, &sch->schib) != 0) { | ||
656 | /* Endgame. */ | ||
657 | spin_unlock(&sch->lock); | ||
658 | return rc; | ||
659 | } | ||
660 | break; | ||
661 | } | ||
662 | if (i==8) { | ||
663 | spin_unlock(&sch->lock); | ||
664 | return rc; | ||
665 | } | ||
666 | sch->lpm = ((sch->schib.pmcw.pim & | ||
667 | sch->schib.pmcw.pam & | ||
668 | sch->schib.pmcw.pom) | ||
669 | | 0x80 >> i) & sch->opm; | ||
670 | |||
671 | if (sch->driver && sch->driver->verify) | ||
672 | sch->driver->verify(&sch->dev); | ||
673 | |||
674 | spin_unlock(&sch->lock); | ||
675 | put_device(&sch->dev); | ||
676 | } | ||
677 | return rc; | ||
678 | } | ||
679 | |||
680 | /* | ||
681 | * Handling of crw machine checks with channel path source. | ||
682 | */ | ||
683 | int | ||
684 | chp_process_crw(int chpid, int on) | ||
685 | { | ||
686 | if (on == 0) { | ||
687 | /* Path has gone. We use the link incident routine.*/ | ||
688 | s390_set_chpid_offline(chpid); | ||
689 | return 0; /* De-register is async anyway. */ | ||
690 | } | ||
691 | /* | ||
692 | * Path has come. Allocate a new channel path structure, | ||
693 | * if needed. | ||
694 | */ | ||
695 | if (get_chp_status(chpid) < 0) | ||
696 | new_channel_path(chpid); | ||
697 | /* Avoid the extra overhead in process_rec_acc. */ | ||
698 | return chp_add(chpid); | ||
699 | } | ||
700 | |||
701 | static inline int | ||
702 | __check_for_io_and_kill(struct subchannel *sch, int index) | ||
703 | { | ||
704 | int cc; | ||
705 | |||
706 | if (!device_is_online(sch)) | ||
707 | /* cio could be doing I/O. */ | ||
708 | return 0; | ||
709 | cc = stsch(sch->irq, &sch->schib); | ||
710 | if (cc) | ||
711 | return 0; | ||
712 | if (sch->schib.scsw.actl && sch->schib.pmcw.lpum == (0x80 >> index)) { | ||
713 | device_set_waiting(sch); | ||
714 | return 1; | ||
715 | } | ||
716 | return 0; | ||
717 | } | ||
718 | |||
719 | static inline void | ||
720 | __s390_subchannel_vary_chpid(struct subchannel *sch, __u8 chpid, int on) | ||
721 | { | ||
722 | int chp, old_lpm; | ||
723 | unsigned long flags; | ||
724 | |||
725 | if (!sch->ssd_info.valid) | ||
726 | return; | ||
727 | |||
728 | spin_lock_irqsave(&sch->lock, flags); | ||
729 | old_lpm = sch->lpm; | ||
730 | for (chp = 0; chp < 8; chp++) { | ||
731 | if (sch->ssd_info.chpid[chp] != chpid) | ||
732 | continue; | ||
733 | |||
734 | if (on) { | ||
735 | sch->opm |= (0x80 >> chp); | ||
736 | sch->lpm |= (0x80 >> chp); | ||
737 | if (!old_lpm) | ||
738 | device_trigger_reprobe(sch); | ||
739 | else if (sch->driver && sch->driver->verify) | ||
740 | sch->driver->verify(&sch->dev); | ||
741 | } else { | ||
742 | sch->opm &= ~(0x80 >> chp); | ||
743 | sch->lpm &= ~(0x80 >> chp); | ||
744 | /* | ||
745 | * Give running I/O a grace period in which it | ||
746 | * can successfully terminate, even using the | ||
747 | * just varied off path. Then kill it. | ||
748 | */ | ||
749 | if (!__check_for_io_and_kill(sch, chp) && !sch->lpm) { | ||
750 | if (css_enqueue_subchannel_slow(sch->irq)) { | ||
751 | css_clear_subchannel_slow_list(); | ||
752 | need_rescan = 1; | ||
753 | } | ||
754 | } else if (sch->driver && sch->driver->verify) | ||
755 | sch->driver->verify(&sch->dev); | ||
756 | } | ||
757 | break; | ||
758 | } | ||
759 | spin_unlock_irqrestore(&sch->lock, flags); | ||
760 | } | ||
761 | |||
762 | static int | ||
763 | s390_subchannel_vary_chpid_off(struct device *dev, void *data) | ||
764 | { | ||
765 | struct subchannel *sch; | ||
766 | __u8 *chpid; | ||
767 | |||
768 | sch = to_subchannel(dev); | ||
769 | chpid = data; | ||
770 | |||
771 | __s390_subchannel_vary_chpid(sch, *chpid, 0); | ||
772 | return 0; | ||
773 | } | ||
774 | |||
775 | static int | ||
776 | s390_subchannel_vary_chpid_on(struct device *dev, void *data) | ||
777 | { | ||
778 | struct subchannel *sch; | ||
779 | __u8 *chpid; | ||
780 | |||
781 | sch = to_subchannel(dev); | ||
782 | chpid = data; | ||
783 | |||
784 | __s390_subchannel_vary_chpid(sch, *chpid, 1); | ||
785 | return 0; | ||
786 | } | ||
787 | |||
788 | /* | ||
789 | * Function: s390_vary_chpid | ||
790 | * Varies the specified chpid online or offline | ||
791 | */ | ||
792 | static int | ||
793 | s390_vary_chpid( __u8 chpid, int on) | ||
794 | { | ||
795 | char dbf_text[15]; | ||
796 | int status, irq, ret; | ||
797 | struct subchannel *sch; | ||
798 | |||
799 | sprintf(dbf_text, on?"varyon%x":"varyoff%x", chpid); | ||
800 | CIO_TRACE_EVENT( 2, dbf_text); | ||
801 | |||
802 | status = get_chp_status(chpid); | ||
803 | if (status < 0) { | ||
804 | printk(KERN_ERR "Can't vary unknown chpid %02X\n", chpid); | ||
805 | return -EINVAL; | ||
806 | } | ||
807 | |||
808 | if (!on && !status) { | ||
809 | printk(KERN_ERR "chpid %x is already offline\n", chpid); | ||
810 | return -EINVAL; | ||
811 | } | ||
812 | |||
813 | set_chp_logically_online(chpid, on); | ||
814 | |||
815 | /* | ||
816 | * Redo PathVerification on the devices the chpid connects to | ||
817 | */ | ||
818 | |||
819 | bus_for_each_dev(&css_bus_type, NULL, &chpid, on ? | ||
820 | s390_subchannel_vary_chpid_on : | ||
821 | s390_subchannel_vary_chpid_off); | ||
822 | if (!on) | ||
823 | goto out; | ||
824 | /* Scan for new devices on varied on path. */ | ||
825 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
826 | struct schib schib; | ||
827 | |||
828 | if (need_rescan) | ||
829 | break; | ||
830 | sch = get_subchannel_by_schid(irq); | ||
831 | if (sch) { | ||
832 | put_device(&sch->dev); | ||
833 | continue; | ||
834 | } | ||
835 | if (stsch(irq, &schib)) | ||
836 | /* We're through */ | ||
837 | break; | ||
838 | /* Put it on the slow path. */ | ||
839 | ret = css_enqueue_subchannel_slow(irq); | ||
840 | if (ret) { | ||
841 | css_clear_subchannel_slow_list(); | ||
842 | need_rescan = 1; | ||
843 | } | ||
844 | } | ||
845 | out: | ||
846 | if (need_rescan || css_slow_subchannels_exist()) | ||
847 | queue_work(slow_path_wq, &slow_path_work); | ||
848 | return 0; | ||
849 | } | ||
850 | |||
851 | /* | ||
852 | * Files for the channel path entries. | ||
853 | */ | ||
854 | static ssize_t | ||
855 | chp_status_show(struct device *dev, char *buf) | ||
856 | { | ||
857 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
858 | |||
859 | if (!chp) | ||
860 | return 0; | ||
861 | return (get_chp_status(chp->id) ? sprintf(buf, "online\n") : | ||
862 | sprintf(buf, "offline\n")); | ||
863 | } | ||
864 | |||
865 | static ssize_t | ||
866 | chp_status_write(struct device *dev, const char *buf, size_t count) | ||
867 | { | ||
868 | struct channel_path *cp = container_of(dev, struct channel_path, dev); | ||
869 | char cmd[10]; | ||
870 | int num_args; | ||
871 | int error; | ||
872 | |||
873 | num_args = sscanf(buf, "%5s", cmd); | ||
874 | if (!num_args) | ||
875 | return count; | ||
876 | |||
877 | if (!strnicmp(cmd, "on", 2)) | ||
878 | error = s390_vary_chpid(cp->id, 1); | ||
879 | else if (!strnicmp(cmd, "off", 3)) | ||
880 | error = s390_vary_chpid(cp->id, 0); | ||
881 | else | ||
882 | error = -EINVAL; | ||
883 | |||
884 | return error < 0 ? error : count; | ||
885 | |||
886 | } | ||
887 | |||
888 | static DEVICE_ATTR(status, 0644, chp_status_show, chp_status_write); | ||
889 | |||
890 | static ssize_t | ||
891 | chp_type_show(struct device *dev, char *buf) | ||
892 | { | ||
893 | struct channel_path *chp = container_of(dev, struct channel_path, dev); | ||
894 | |||
895 | if (!chp) | ||
896 | return 0; | ||
897 | return sprintf(buf, "%x\n", chp->desc.desc); | ||
898 | } | ||
899 | |||
900 | static DEVICE_ATTR(type, 0444, chp_type_show, NULL); | ||
901 | |||
902 | static struct attribute * chp_attrs[] = { | ||
903 | &dev_attr_status.attr, | ||
904 | &dev_attr_type.attr, | ||
905 | NULL, | ||
906 | }; | ||
907 | |||
908 | static struct attribute_group chp_attr_group = { | ||
909 | .attrs = chp_attrs, | ||
910 | }; | ||
911 | |||
912 | static void | ||
913 | chp_release(struct device *dev) | ||
914 | { | ||
915 | struct channel_path *cp; | ||
916 | |||
917 | cp = container_of(dev, struct channel_path, dev); | ||
918 | kfree(cp); | ||
919 | } | ||
920 | |||
921 | static int | ||
922 | chsc_determine_channel_path_description(int chpid, | ||
923 | struct channel_path_desc *desc) | ||
924 | { | ||
925 | int ccode, ret; | ||
926 | |||
927 | struct { | ||
928 | struct chsc_header request; | ||
929 | u32 : 24; | ||
930 | u32 first_chpid : 8; | ||
931 | u32 : 24; | ||
932 | u32 last_chpid : 8; | ||
933 | u32 zeroes1; | ||
934 | struct chsc_header response; | ||
935 | u32 zeroes2; | ||
936 | struct channel_path_desc desc; | ||
937 | } *scpd_area; | ||
938 | |||
939 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
940 | if (!scpd_area) | ||
941 | return -ENOMEM; | ||
942 | |||
943 | scpd_area->request = (struct chsc_header) { | ||
944 | .length = 0x0010, | ||
945 | .code = 0x0002, | ||
946 | }; | ||
947 | |||
948 | scpd_area->first_chpid = chpid; | ||
949 | scpd_area->last_chpid = chpid; | ||
950 | |||
951 | ccode = chsc(scpd_area); | ||
952 | if (ccode > 0) { | ||
953 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | ||
954 | goto out; | ||
955 | } | ||
956 | |||
957 | switch (scpd_area->response.code) { | ||
958 | case 0x0001: /* Success. */ | ||
959 | memcpy(desc, &scpd_area->desc, | ||
960 | sizeof(struct channel_path_desc)); | ||
961 | ret = 0; | ||
962 | break; | ||
963 | case 0x0003: /* Invalid block. */ | ||
964 | case 0x0007: /* Invalid format. */ | ||
965 | case 0x0008: /* Other invalid block. */ | ||
966 | CIO_CRW_EVENT(2, "Error in chsc request block!\n"); | ||
967 | ret = -EINVAL; | ||
968 | break; | ||
969 | case 0x0004: /* Command not provided in model. */ | ||
970 | CIO_CRW_EVENT(2, "Model does not provide scpd\n"); | ||
971 | ret = -EOPNOTSUPP; | ||
972 | break; | ||
973 | default: | ||
974 | CIO_CRW_EVENT(2, "Unknown CHSC response %d\n", | ||
975 | scpd_area->response.code); | ||
976 | ret = -EIO; | ||
977 | } | ||
978 | out: | ||
979 | free_page((unsigned long)scpd_area); | ||
980 | return ret; | ||
981 | } | ||
982 | |||
983 | /* | ||
984 | * Entries for chpids on the system bus. | ||
985 | * This replaces /proc/chpids. | ||
986 | */ | ||
987 | static int | ||
988 | new_channel_path(int chpid) | ||
989 | { | ||
990 | struct channel_path *chp; | ||
991 | int ret; | ||
992 | |||
993 | chp = kmalloc(sizeof(struct channel_path), GFP_KERNEL); | ||
994 | if (!chp) | ||
995 | return -ENOMEM; | ||
996 | memset(chp, 0, sizeof(struct channel_path)); | ||
997 | |||
998 | /* fill in status, etc. */ | ||
999 | chp->id = chpid; | ||
1000 | chp->state = 1; | ||
1001 | chp->dev = (struct device) { | ||
1002 | .parent = &css_bus_device, | ||
1003 | .release = chp_release, | ||
1004 | }; | ||
1005 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); | ||
1006 | |||
1007 | /* Obtain channel path description and fill it in. */ | ||
1008 | ret = chsc_determine_channel_path_description(chpid, &chp->desc); | ||
1009 | if (ret) | ||
1010 | goto out_free; | ||
1011 | |||
1012 | /* make it known to the system */ | ||
1013 | ret = device_register(&chp->dev); | ||
1014 | if (ret) { | ||
1015 | printk(KERN_WARNING "%s: could not register %02x\n", | ||
1016 | __func__, chpid); | ||
1017 | goto out_free; | ||
1018 | } | ||
1019 | ret = sysfs_create_group(&chp->dev.kobj, &chp_attr_group); | ||
1020 | if (ret) { | ||
1021 | device_unregister(&chp->dev); | ||
1022 | goto out_free; | ||
1023 | } else | ||
1024 | chps[chpid] = chp; | ||
1025 | return ret; | ||
1026 | out_free: | ||
1027 | kfree(chp); | ||
1028 | return ret; | ||
1029 | } | ||
1030 | |||
1031 | void * | ||
1032 | chsc_get_chp_desc(struct subchannel *sch, int chp_no) | ||
1033 | { | ||
1034 | struct channel_path *chp; | ||
1035 | struct channel_path_desc *desc; | ||
1036 | |||
1037 | chp = chps[sch->schib.pmcw.chpid[chp_no]]; | ||
1038 | if (!chp) | ||
1039 | return NULL; | ||
1040 | desc = kmalloc(sizeof(struct channel_path_desc), GFP_KERNEL); | ||
1041 | if (!desc) | ||
1042 | return NULL; | ||
1043 | memcpy(desc, &chp->desc, sizeof(struct channel_path_desc)); | ||
1044 | return desc; | ||
1045 | } | ||
1046 | |||
1047 | |||
1048 | static int __init | ||
1049 | chsc_alloc_sei_area(void) | ||
1050 | { | ||
1051 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
1052 | if (!sei_page) | ||
1053 | printk(KERN_WARNING"Can't allocate page for processing of " \ | ||
1054 | "chsc machine checks!\n"); | ||
1055 | return (sei_page ? 0 : -ENOMEM); | ||
1056 | } | ||
1057 | |||
1058 | subsys_initcall(chsc_alloc_sei_area); | ||
1059 | |||
1060 | struct css_general_char css_general_characteristics; | ||
1061 | struct css_chsc_char css_chsc_characteristics; | ||
1062 | |||
1063 | int __init | ||
1064 | chsc_determine_css_characteristics(void) | ||
1065 | { | ||
1066 | int result; | ||
1067 | struct { | ||
1068 | struct chsc_header request; | ||
1069 | u32 reserved1; | ||
1070 | u32 reserved2; | ||
1071 | u32 reserved3; | ||
1072 | struct chsc_header response; | ||
1073 | u32 reserved4; | ||
1074 | u32 general_char[510]; | ||
1075 | u32 chsc_char[518]; | ||
1076 | } *scsc_area; | ||
1077 | |||
1078 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
1079 | if (!scsc_area) { | ||
1080 | printk(KERN_WARNING"cio: Was not able to determine available" \ | ||
1081 | "CHSCs due to no memory.\n"); | ||
1082 | return -ENOMEM; | ||
1083 | } | ||
1084 | |||
1085 | scsc_area->request = (struct chsc_header) { | ||
1086 | .length = 0x0010, | ||
1087 | .code = 0x0010, | ||
1088 | }; | ||
1089 | |||
1090 | result = chsc(scsc_area); | ||
1091 | if (result) { | ||
1092 | printk(KERN_WARNING"cio: Was not able to determine " \ | ||
1093 | "available CHSCs, cc=%i.\n", result); | ||
1094 | result = -EIO; | ||
1095 | goto exit; | ||
1096 | } | ||
1097 | |||
1098 | if (scsc_area->response.code != 1) { | ||
1099 | printk(KERN_WARNING"cio: Was not able to determine " \ | ||
1100 | "available CHSCs.\n"); | ||
1101 | result = -EIO; | ||
1102 | goto exit; | ||
1103 | } | ||
1104 | memcpy(&css_general_characteristics, scsc_area->general_char, | ||
1105 | sizeof(css_general_characteristics)); | ||
1106 | memcpy(&css_chsc_characteristics, scsc_area->chsc_char, | ||
1107 | sizeof(css_chsc_characteristics)); | ||
1108 | exit: | ||
1109 | free_page ((unsigned long) scsc_area); | ||
1110 | return result; | ||
1111 | } | ||
1112 | |||
1113 | EXPORT_SYMBOL_GPL(css_general_characteristics); | ||
1114 | EXPORT_SYMBOL_GPL(css_chsc_characteristics); | ||
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h new file mode 100644 index 000000000000..be20da49d147 --- /dev/null +++ b/drivers/s390/cio/chsc.h | |||
@@ -0,0 +1,66 @@ | |||
1 | #ifndef S390_CHSC_H | ||
2 | #define S390_CHSC_H | ||
3 | |||
4 | #define NR_CHPIDS 256 | ||
5 | |||
6 | #define CHSC_SEI_ACC_CHPID 1 | ||
7 | #define CHSC_SEI_ACC_LINKADDR 2 | ||
8 | #define CHSC_SEI_ACC_FULLLINKADDR 3 | ||
9 | |||
10 | struct chsc_header { | ||
11 | u16 length; | ||
12 | u16 code; | ||
13 | }; | ||
14 | |||
15 | struct channel_path_desc { | ||
16 | u8 flags; | ||
17 | u8 lsn; | ||
18 | u8 desc; | ||
19 | u8 chpid; | ||
20 | u8 swla; | ||
21 | u8 zeroes; | ||
22 | u8 chla; | ||
23 | u8 chpp; | ||
24 | }; | ||
25 | |||
26 | struct channel_path { | ||
27 | int id; | ||
28 | int state; | ||
29 | struct channel_path_desc desc; | ||
30 | struct device dev; | ||
31 | }; | ||
32 | |||
33 | extern void s390_process_css( void ); | ||
34 | extern void chsc_validate_chpids(struct subchannel *); | ||
35 | extern void chpid_is_actually_online(int); | ||
36 | |||
37 | struct css_general_char { | ||
38 | u64 : 41; | ||
39 | u32 aif : 1; /* bit 41 */ | ||
40 | u32 : 3; | ||
41 | u32 mcss : 1; /* bit 45 */ | ||
42 | u32 : 2; | ||
43 | u32 ext_mb : 1; /* bit 48 */ | ||
44 | u32 : 7; | ||
45 | u32 aif_tdd : 1; /* bit 56 */ | ||
46 | u32 : 10; | ||
47 | u32 aif_osa : 1; /* bit 67 */ | ||
48 | u32 : 28; | ||
49 | }__attribute__((packed)); | ||
50 | |||
51 | struct css_chsc_char { | ||
52 | u64 res; | ||
53 | u64 : 43; | ||
54 | u32 scssc : 1; /* bit 107 */ | ||
55 | u32 scsscf : 1; /* bit 108 */ | ||
56 | u32 : 19; | ||
57 | }__attribute__((packed)); | ||
58 | |||
59 | extern struct css_general_char css_general_characteristics; | ||
60 | extern struct css_chsc_char css_chsc_characteristics; | ||
61 | |||
62 | extern int chsc_determine_css_characteristics(void); | ||
63 | extern int css_characteristics_avail; | ||
64 | |||
65 | extern void *chsc_get_chp_desc(struct subchannel*, int); | ||
66 | #endif | ||
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c new file mode 100644 index 000000000000..99ce5a567982 --- /dev/null +++ b/drivers/s390/cio/cio.c | |||
@@ -0,0 +1,860 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/cio.c | ||
3 | * S/390 common I/O routines -- low level i/o calls | ||
4 | * $Revision: 1.131 $ | ||
5 | * | ||
6 | * Copyright (C) 1999-2002 IBM Deutschland Entwicklung GmbH, | ||
7 | * IBM Corporation | ||
8 | * Author(s): Ingo Adlung (adlung@de.ibm.com) | ||
9 | * Cornelia Huck (cohuck@de.ibm.com) | ||
10 | * Arnd Bergmann (arndb@de.ibm.com) | ||
11 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/config.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/kernel_stat.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | |||
22 | #include <asm/cio.h> | ||
23 | #include <asm/delay.h> | ||
24 | #include <asm/irq.h> | ||
25 | |||
26 | #include "airq.h" | ||
27 | #include "cio.h" | ||
28 | #include "css.h" | ||
29 | #include "chsc.h" | ||
30 | #include "ioasm.h" | ||
31 | #include "blacklist.h" | ||
32 | #include "cio_debug.h" | ||
33 | |||
34 | debug_info_t *cio_debug_msg_id; | ||
35 | debug_info_t *cio_debug_trace_id; | ||
36 | debug_info_t *cio_debug_crw_id; | ||
37 | |||
38 | int cio_show_msg; | ||
39 | |||
40 | static int __init | ||
41 | cio_setup (char *parm) | ||
42 | { | ||
43 | if (!strcmp (parm, "yes")) | ||
44 | cio_show_msg = 1; | ||
45 | else if (!strcmp (parm, "no")) | ||
46 | cio_show_msg = 0; | ||
47 | else | ||
48 | printk (KERN_ERR "cio_setup : invalid cio_msg parameter '%s'", | ||
49 | parm); | ||
50 | return 1; | ||
51 | } | ||
52 | |||
53 | __setup ("cio_msg=", cio_setup); | ||
54 | |||
55 | /* | ||
56 | * Function: cio_debug_init | ||
57 | * Initializes three debug logs (under /proc/s390dbf) for common I/O: | ||
58 | * - cio_msg logs the messages which are printk'ed when CONFIG_DEBUG_IO is on | ||
59 | * - cio_trace logs the calling of different functions | ||
60 | * - cio_crw logs the messages which are printk'ed when CONFIG_DEBUG_CRW is on | ||
61 | * debug levels depend on CONFIG_DEBUG_IO resp. CONFIG_DEBUG_CRW | ||
62 | */ | ||
63 | static int __init | ||
64 | cio_debug_init (void) | ||
65 | { | ||
66 | cio_debug_msg_id = debug_register ("cio_msg", 4, 4, 16*sizeof (long)); | ||
67 | if (!cio_debug_msg_id) | ||
68 | goto out_unregister; | ||
69 | debug_register_view (cio_debug_msg_id, &debug_sprintf_view); | ||
70 | debug_set_level (cio_debug_msg_id, 2); | ||
71 | cio_debug_trace_id = debug_register ("cio_trace", 4, 4, 8); | ||
72 | if (!cio_debug_trace_id) | ||
73 | goto out_unregister; | ||
74 | debug_register_view (cio_debug_trace_id, &debug_hex_ascii_view); | ||
75 | debug_set_level (cio_debug_trace_id, 2); | ||
76 | cio_debug_crw_id = debug_register ("cio_crw", 2, 4, 16*sizeof (long)); | ||
77 | if (!cio_debug_crw_id) | ||
78 | goto out_unregister; | ||
79 | debug_register_view (cio_debug_crw_id, &debug_sprintf_view); | ||
80 | debug_set_level (cio_debug_crw_id, 2); | ||
81 | pr_debug("debugging initialized\n"); | ||
82 | return 0; | ||
83 | |||
84 | out_unregister: | ||
85 | if (cio_debug_msg_id) | ||
86 | debug_unregister (cio_debug_msg_id); | ||
87 | if (cio_debug_trace_id) | ||
88 | debug_unregister (cio_debug_trace_id); | ||
89 | if (cio_debug_crw_id) | ||
90 | debug_unregister (cio_debug_crw_id); | ||
91 | pr_debug("could not initialize debugging\n"); | ||
92 | return -1; | ||
93 | } | ||
94 | |||
95 | arch_initcall (cio_debug_init); | ||
96 | |||
97 | int | ||
98 | cio_set_options (struct subchannel *sch, int flags) | ||
99 | { | ||
100 | sch->options.suspend = (flags & DOIO_ALLOW_SUSPEND) != 0; | ||
101 | sch->options.prefetch = (flags & DOIO_DENY_PREFETCH) != 0; | ||
102 | sch->options.inter = (flags & DOIO_SUPPRESS_INTER) != 0; | ||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | /* FIXME: who wants to use this? */ | ||
107 | int | ||
108 | cio_get_options (struct subchannel *sch) | ||
109 | { | ||
110 | int flags; | ||
111 | |||
112 | flags = 0; | ||
113 | if (sch->options.suspend) | ||
114 | flags |= DOIO_ALLOW_SUSPEND; | ||
115 | if (sch->options.prefetch) | ||
116 | flags |= DOIO_DENY_PREFETCH; | ||
117 | if (sch->options.inter) | ||
118 | flags |= DOIO_SUPPRESS_INTER; | ||
119 | return flags; | ||
120 | } | ||
121 | |||
122 | /* | ||
123 | * Use tpi to get a pending interrupt, call the interrupt handler and | ||
124 | * return a pointer to the subchannel structure. | ||
125 | */ | ||
126 | static inline int | ||
127 | cio_tpi(void) | ||
128 | { | ||
129 | struct tpi_info *tpi_info; | ||
130 | struct subchannel *sch; | ||
131 | struct irb *irb; | ||
132 | |||
133 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
134 | if (tpi (NULL) != 1) | ||
135 | return 0; | ||
136 | irb = (struct irb *) __LC_IRB; | ||
137 | /* Store interrupt response block to lowcore. */ | ||
138 | if (tsch (tpi_info->irq, irb) != 0) | ||
139 | /* Not status pending or not operational. */ | ||
140 | return 1; | ||
141 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
142 | if (!sch) | ||
143 | return 1; | ||
144 | local_bh_disable(); | ||
145 | irq_enter (); | ||
146 | spin_lock(&sch->lock); | ||
147 | memcpy (&sch->schib.scsw, &irb->scsw, sizeof (struct scsw)); | ||
148 | if (sch->driver && sch->driver->irq) | ||
149 | sch->driver->irq(&sch->dev); | ||
150 | spin_unlock(&sch->lock); | ||
151 | irq_exit (); | ||
152 | __local_bh_enable(); | ||
153 | return 1; | ||
154 | } | ||
155 | |||
156 | static inline int | ||
157 | cio_start_handle_notoper(struct subchannel *sch, __u8 lpm) | ||
158 | { | ||
159 | char dbf_text[15]; | ||
160 | |||
161 | if (lpm != 0) | ||
162 | sch->lpm &= ~lpm; | ||
163 | else | ||
164 | sch->lpm = 0; | ||
165 | |||
166 | stsch (sch->irq, &sch->schib); | ||
167 | |||
168 | CIO_MSG_EVENT(0, "cio_start: 'not oper' status for " | ||
169 | "subchannel %04x!\n", sch->irq); | ||
170 | sprintf(dbf_text, "no%s", sch->dev.bus_id); | ||
171 | CIO_TRACE_EVENT(0, dbf_text); | ||
172 | CIO_HEX_EVENT(0, &sch->schib, sizeof (struct schib)); | ||
173 | |||
174 | return (sch->lpm ? -EACCES : -ENODEV); | ||
175 | } | ||
176 | |||
177 | int | ||
178 | cio_start_key (struct subchannel *sch, /* subchannel structure */ | ||
179 | struct ccw1 * cpa, /* logical channel prog addr */ | ||
180 | __u8 lpm, /* logical path mask */ | ||
181 | __u8 key) /* storage key */ | ||
182 | { | ||
183 | char dbf_txt[15]; | ||
184 | int ccode; | ||
185 | |||
186 | CIO_TRACE_EVENT (4, "stIO"); | ||
187 | CIO_TRACE_EVENT (4, sch->dev.bus_id); | ||
188 | |||
189 | /* sch is always under 2G. */ | ||
190 | sch->orb.intparm = (__u32)(unsigned long)sch; | ||
191 | sch->orb.fmt = 1; | ||
192 | |||
193 | sch->orb.pfch = sch->options.prefetch == 0; | ||
194 | sch->orb.spnd = sch->options.suspend; | ||
195 | sch->orb.ssic = sch->options.suspend && sch->options.inter; | ||
196 | sch->orb.lpm = (lpm != 0) ? (lpm & sch->opm) : sch->lpm; | ||
197 | #ifdef CONFIG_ARCH_S390X | ||
198 | /* | ||
199 | * for 64 bit we always support 64 bit IDAWs with 4k page size only | ||
200 | */ | ||
201 | sch->orb.c64 = 1; | ||
202 | sch->orb.i2k = 0; | ||
203 | #endif | ||
204 | sch->orb.key = key >> 4; | ||
205 | /* issue "Start Subchannel" */ | ||
206 | sch->orb.cpa = (__u32) __pa (cpa); | ||
207 | ccode = ssch (sch->irq, &sch->orb); | ||
208 | |||
209 | /* process condition code */ | ||
210 | sprintf (dbf_txt, "ccode:%d", ccode); | ||
211 | CIO_TRACE_EVENT (4, dbf_txt); | ||
212 | |||
213 | switch (ccode) { | ||
214 | case 0: | ||
215 | /* | ||
216 | * initialize device status information | ||
217 | */ | ||
218 | sch->schib.scsw.actl |= SCSW_ACTL_START_PEND; | ||
219 | return 0; | ||
220 | case 1: /* status pending */ | ||
221 | case 2: /* busy */ | ||
222 | return -EBUSY; | ||
223 | default: /* device/path not operational */ | ||
224 | return cio_start_handle_notoper(sch, lpm); | ||
225 | } | ||
226 | } | ||
227 | |||
228 | int | ||
229 | cio_start (struct subchannel *sch, struct ccw1 *cpa, __u8 lpm) | ||
230 | { | ||
231 | return cio_start_key(sch, cpa, lpm, default_storage_key); | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * resume suspended I/O operation | ||
236 | */ | ||
237 | int | ||
238 | cio_resume (struct subchannel *sch) | ||
239 | { | ||
240 | char dbf_txt[15]; | ||
241 | int ccode; | ||
242 | |||
243 | CIO_TRACE_EVENT (4, "resIO"); | ||
244 | CIO_TRACE_EVENT (4, sch->dev.bus_id); | ||
245 | |||
246 | ccode = rsch (sch->irq); | ||
247 | |||
248 | sprintf (dbf_txt, "ccode:%d", ccode); | ||
249 | CIO_TRACE_EVENT (4, dbf_txt); | ||
250 | |||
251 | switch (ccode) { | ||
252 | case 0: | ||
253 | sch->schib.scsw.actl |= SCSW_ACTL_RESUME_PEND; | ||
254 | return 0; | ||
255 | case 1: | ||
256 | return -EBUSY; | ||
257 | case 2: | ||
258 | return -EINVAL; | ||
259 | default: | ||
260 | /* | ||
261 | * useless to wait for request completion | ||
262 | * as device is no longer operational ! | ||
263 | */ | ||
264 | return -ENODEV; | ||
265 | } | ||
266 | } | ||
267 | |||
268 | /* | ||
269 | * halt I/O operation | ||
270 | */ | ||
271 | int | ||
272 | cio_halt(struct subchannel *sch) | ||
273 | { | ||
274 | char dbf_txt[15]; | ||
275 | int ccode; | ||
276 | |||
277 | if (!sch) | ||
278 | return -ENODEV; | ||
279 | |||
280 | CIO_TRACE_EVENT (2, "haltIO"); | ||
281 | CIO_TRACE_EVENT (2, sch->dev.bus_id); | ||
282 | |||
283 | /* | ||
284 | * Issue "Halt subchannel" and process condition code | ||
285 | */ | ||
286 | ccode = hsch (sch->irq); | ||
287 | |||
288 | sprintf (dbf_txt, "ccode:%d", ccode); | ||
289 | CIO_TRACE_EVENT (2, dbf_txt); | ||
290 | |||
291 | switch (ccode) { | ||
292 | case 0: | ||
293 | sch->schib.scsw.actl |= SCSW_ACTL_HALT_PEND; | ||
294 | return 0; | ||
295 | case 1: /* status pending */ | ||
296 | case 2: /* busy */ | ||
297 | return -EBUSY; | ||
298 | default: /* device not operational */ | ||
299 | return -ENODEV; | ||
300 | } | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * Clear I/O operation | ||
305 | */ | ||
306 | int | ||
307 | cio_clear(struct subchannel *sch) | ||
308 | { | ||
309 | char dbf_txt[15]; | ||
310 | int ccode; | ||
311 | |||
312 | if (!sch) | ||
313 | return -ENODEV; | ||
314 | |||
315 | CIO_TRACE_EVENT (2, "clearIO"); | ||
316 | CIO_TRACE_EVENT (2, sch->dev.bus_id); | ||
317 | |||
318 | /* | ||
319 | * Issue "Clear subchannel" and process condition code | ||
320 | */ | ||
321 | ccode = csch (sch->irq); | ||
322 | |||
323 | sprintf (dbf_txt, "ccode:%d", ccode); | ||
324 | CIO_TRACE_EVENT (2, dbf_txt); | ||
325 | |||
326 | switch (ccode) { | ||
327 | case 0: | ||
328 | sch->schib.scsw.actl |= SCSW_ACTL_CLEAR_PEND; | ||
329 | return 0; | ||
330 | default: /* device not operational */ | ||
331 | return -ENODEV; | ||
332 | } | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * Function: cio_cancel | ||
337 | * Issues a "Cancel Subchannel" on the specified subchannel | ||
338 | * Note: We don't need any fancy intparms and flags here | ||
339 | * since xsch is executed synchronously. | ||
340 | * Only for common I/O internal use as for now. | ||
341 | */ | ||
342 | int | ||
343 | cio_cancel (struct subchannel *sch) | ||
344 | { | ||
345 | char dbf_txt[15]; | ||
346 | int ccode; | ||
347 | |||
348 | if (!sch) | ||
349 | return -ENODEV; | ||
350 | |||
351 | CIO_TRACE_EVENT (2, "cancelIO"); | ||
352 | CIO_TRACE_EVENT (2, sch->dev.bus_id); | ||
353 | |||
354 | ccode = xsch (sch->irq); | ||
355 | |||
356 | sprintf (dbf_txt, "ccode:%d", ccode); | ||
357 | CIO_TRACE_EVENT (2, dbf_txt); | ||
358 | |||
359 | switch (ccode) { | ||
360 | case 0: /* success */ | ||
361 | /* Update information in scsw. */ | ||
362 | stsch (sch->irq, &sch->schib); | ||
363 | return 0; | ||
364 | case 1: /* status pending */ | ||
365 | return -EBUSY; | ||
366 | case 2: /* not applicable */ | ||
367 | return -EINVAL; | ||
368 | default: /* not oper */ | ||
369 | return -ENODEV; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | /* | ||
374 | * Function: cio_modify | ||
375 | * Issues a "Modify Subchannel" on the specified subchannel | ||
376 | */ | ||
377 | int | ||
378 | cio_modify (struct subchannel *sch) | ||
379 | { | ||
380 | int ccode, retry, ret; | ||
381 | |||
382 | ret = 0; | ||
383 | for (retry = 0; retry < 5; retry++) { | ||
384 | ccode = msch_err (sch->irq, &sch->schib); | ||
385 | if (ccode < 0) /* -EIO if msch gets a program check. */ | ||
386 | return ccode; | ||
387 | switch (ccode) { | ||
388 | case 0: /* successfull */ | ||
389 | return 0; | ||
390 | case 1: /* status pending */ | ||
391 | return -EBUSY; | ||
392 | case 2: /* busy */ | ||
393 | udelay (100); /* allow for recovery */ | ||
394 | ret = -EBUSY; | ||
395 | break; | ||
396 | case 3: /* not operational */ | ||
397 | return -ENODEV; | ||
398 | } | ||
399 | } | ||
400 | return ret; | ||
401 | } | ||
402 | |||
403 | /* | ||
404 | * Enable subchannel. | ||
405 | */ | ||
406 | int | ||
407 | cio_enable_subchannel (struct subchannel *sch, unsigned int isc) | ||
408 | { | ||
409 | char dbf_txt[15]; | ||
410 | int ccode; | ||
411 | int retry; | ||
412 | int ret; | ||
413 | |||
414 | CIO_TRACE_EVENT (2, "ensch"); | ||
415 | CIO_TRACE_EVENT (2, sch->dev.bus_id); | ||
416 | |||
417 | ccode = stsch (sch->irq, &sch->schib); | ||
418 | if (ccode) | ||
419 | return -ENODEV; | ||
420 | |||
421 | for (retry = 5, ret = 0; retry > 0; retry--) { | ||
422 | sch->schib.pmcw.ena = 1; | ||
423 | sch->schib.pmcw.isc = isc; | ||
424 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; | ||
425 | ret = cio_modify(sch); | ||
426 | if (ret == -ENODEV) | ||
427 | break; | ||
428 | if (ret == -EIO) | ||
429 | /* | ||
430 | * Got a program check in cio_modify. Try without | ||
431 | * the concurrent sense bit the next time. | ||
432 | */ | ||
433 | sch->schib.pmcw.csense = 0; | ||
434 | if (ret == 0) { | ||
435 | stsch (sch->irq, &sch->schib); | ||
436 | if (sch->schib.pmcw.ena) | ||
437 | break; | ||
438 | } | ||
439 | if (ret == -EBUSY) { | ||
440 | struct irb irb; | ||
441 | if (tsch(sch->irq, &irb) != 0) | ||
442 | break; | ||
443 | } | ||
444 | } | ||
445 | sprintf (dbf_txt, "ret:%d", ret); | ||
446 | CIO_TRACE_EVENT (2, dbf_txt); | ||
447 | return ret; | ||
448 | } | ||
449 | |||
450 | /* | ||
451 | * Disable subchannel. | ||
452 | */ | ||
453 | int | ||
454 | cio_disable_subchannel (struct subchannel *sch) | ||
455 | { | ||
456 | char dbf_txt[15]; | ||
457 | int ccode; | ||
458 | int retry; | ||
459 | int ret; | ||
460 | |||
461 | CIO_TRACE_EVENT (2, "dissch"); | ||
462 | CIO_TRACE_EVENT (2, sch->dev.bus_id); | ||
463 | |||
464 | ccode = stsch (sch->irq, &sch->schib); | ||
465 | if (ccode == 3) /* Not operational. */ | ||
466 | return -ENODEV; | ||
467 | |||
468 | if (sch->schib.scsw.actl != 0) | ||
469 | /* | ||
470 | * the disable function must not be called while there are | ||
471 | * requests pending for completion ! | ||
472 | */ | ||
473 | return -EBUSY; | ||
474 | |||
475 | for (retry = 5, ret = 0; retry > 0; retry--) { | ||
476 | sch->schib.pmcw.ena = 0; | ||
477 | ret = cio_modify(sch); | ||
478 | if (ret == -ENODEV) | ||
479 | break; | ||
480 | if (ret == -EBUSY) | ||
481 | /* | ||
482 | * The subchannel is busy or status pending. | ||
483 | * We'll disable when the next interrupt was delivered | ||
484 | * via the state machine. | ||
485 | */ | ||
486 | break; | ||
487 | if (ret == 0) { | ||
488 | stsch (sch->irq, &sch->schib); | ||
489 | if (!sch->schib.pmcw.ena) | ||
490 | break; | ||
491 | } | ||
492 | } | ||
493 | sprintf (dbf_txt, "ret:%d", ret); | ||
494 | CIO_TRACE_EVENT (2, dbf_txt); | ||
495 | return ret; | ||
496 | } | ||
497 | |||
498 | /* | ||
499 | * cio_validate_subchannel() | ||
500 | * | ||
501 | * Find out subchannel type and initialize struct subchannel. | ||
502 | * Return codes: | ||
503 | * SUBCHANNEL_TYPE_IO for a normal io subchannel | ||
504 | * SUBCHANNEL_TYPE_CHSC for a chsc subchannel | ||
505 | * SUBCHANNEL_TYPE_MESSAGE for a messaging subchannel | ||
506 | * SUBCHANNEL_TYPE_ADM for a adm(?) subchannel | ||
507 | * -ENXIO for non-defined subchannels | ||
508 | * -ENODEV for subchannels with invalid device number or blacklisted devices | ||
509 | */ | ||
510 | int | ||
511 | cio_validate_subchannel (struct subchannel *sch, unsigned int irq) | ||
512 | { | ||
513 | char dbf_txt[15]; | ||
514 | int ccode; | ||
515 | |||
516 | sprintf (dbf_txt, "valsch%x", irq); | ||
517 | CIO_TRACE_EVENT (4, dbf_txt); | ||
518 | |||
519 | /* Nuke all fields. */ | ||
520 | memset(sch, 0, sizeof(struct subchannel)); | ||
521 | |||
522 | spin_lock_init(&sch->lock); | ||
523 | |||
524 | /* Set a name for the subchannel */ | ||
525 | snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", irq); | ||
526 | |||
527 | /* | ||
528 | * The first subchannel that is not-operational (ccode==3) | ||
529 | * indicates that there aren't any more devices available. | ||
530 | */ | ||
531 | sch->irq = irq; | ||
532 | ccode = stsch (irq, &sch->schib); | ||
533 | if (ccode) | ||
534 | return -ENXIO; | ||
535 | |||
536 | /* Copy subchannel type from path management control word. */ | ||
537 | sch->st = sch->schib.pmcw.st; | ||
538 | |||
539 | /* | ||
540 | * ... just being curious we check for non I/O subchannels | ||
541 | */ | ||
542 | if (sch->st != 0) { | ||
543 | CIO_DEBUG(KERN_INFO, 0, | ||
544 | "Subchannel %04X reports " | ||
545 | "non-I/O subchannel type %04X\n", | ||
546 | sch->irq, sch->st); | ||
547 | /* We stop here for non-io subchannels. */ | ||
548 | return sch->st; | ||
549 | } | ||
550 | |||
551 | /* Initialization for io subchannels. */ | ||
552 | if (!sch->schib.pmcw.dnv) | ||
553 | /* io subchannel but device number is invalid. */ | ||
554 | return -ENODEV; | ||
555 | |||
556 | /* Devno is valid. */ | ||
557 | if (is_blacklisted (sch->schib.pmcw.dev)) { | ||
558 | /* | ||
559 | * This device must not be known to Linux. So we simply | ||
560 | * say that there is no device and return ENODEV. | ||
561 | */ | ||
562 | CIO_MSG_EVENT(0, "Blacklisted device detected " | ||
563 | "at devno %04X\n", sch->schib.pmcw.dev); | ||
564 | return -ENODEV; | ||
565 | } | ||
566 | sch->opm = 0xff; | ||
567 | chsc_validate_chpids(sch); | ||
568 | sch->lpm = sch->schib.pmcw.pim & | ||
569 | sch->schib.pmcw.pam & | ||
570 | sch->schib.pmcw.pom & | ||
571 | sch->opm; | ||
572 | |||
573 | CIO_DEBUG(KERN_INFO, 0, | ||
574 | "Detected device %04X on subchannel %04X" | ||
575 | " - PIM = %02X, PAM = %02X, POM = %02X\n", | ||
576 | sch->schib.pmcw.dev, sch->irq, sch->schib.pmcw.pim, | ||
577 | sch->schib.pmcw.pam, sch->schib.pmcw.pom); | ||
578 | |||
579 | /* | ||
580 | * We now have to initially ... | ||
581 | * ... set "interruption subclass" | ||
582 | * ... enable "concurrent sense" | ||
583 | * ... enable "multipath mode" if more than one | ||
584 | * CHPID is available. This is done regardless | ||
585 | * whether multiple paths are available for us. | ||
586 | */ | ||
587 | sch->schib.pmcw.isc = 3; /* could be smth. else */ | ||
588 | sch->schib.pmcw.csense = 1; /* concurrent sense */ | ||
589 | sch->schib.pmcw.ena = 0; | ||
590 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
591 | sch->schib.pmcw.mp = 1; /* multipath mode */ | ||
592 | return 0; | ||
593 | } | ||
594 | |||
595 | /* | ||
596 | * do_IRQ() handles all normal I/O device IRQ's (the special | ||
597 | * SMP cross-CPU interrupts have their own specific | ||
598 | * handlers). | ||
599 | * | ||
600 | */ | ||
601 | void | ||
602 | do_IRQ (struct pt_regs *regs) | ||
603 | { | ||
604 | struct tpi_info *tpi_info; | ||
605 | struct subchannel *sch; | ||
606 | struct irb *irb; | ||
607 | |||
608 | irq_enter (); | ||
609 | asm volatile ("mc 0,0"); | ||
610 | if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) | ||
611 | /** | ||
612 | * Make sure that the i/o interrupt did not "overtake" | ||
613 | * the last HZ timer interrupt. | ||
614 | */ | ||
615 | account_ticks(regs); | ||
616 | /* | ||
617 | * Get interrupt information from lowcore | ||
618 | */ | ||
619 | tpi_info = (struct tpi_info *) __LC_SUBCHANNEL_ID; | ||
620 | irb = (struct irb *) __LC_IRB; | ||
621 | do { | ||
622 | kstat_cpu(smp_processor_id()).irqs[IO_INTERRUPT]++; | ||
623 | /* | ||
624 | * Non I/O-subchannel thin interrupts are processed differently | ||
625 | */ | ||
626 | if (tpi_info->adapter_IO == 1 && | ||
627 | tpi_info->int_type == IO_INTERRUPT_TYPE) { | ||
628 | do_adapter_IO(); | ||
629 | continue; | ||
630 | } | ||
631 | sch = (struct subchannel *)(unsigned long)tpi_info->intparm; | ||
632 | if (sch) | ||
633 | spin_lock(&sch->lock); | ||
634 | /* Store interrupt response block to lowcore. */ | ||
635 | if (tsch (tpi_info->irq, irb) == 0 && sch) { | ||
636 | /* Keep subchannel information word up to date. */ | ||
637 | memcpy (&sch->schib.scsw, &irb->scsw, | ||
638 | sizeof (irb->scsw)); | ||
639 | /* Call interrupt handler if there is one. */ | ||
640 | if (sch->driver && sch->driver->irq) | ||
641 | sch->driver->irq(&sch->dev); | ||
642 | } | ||
643 | if (sch) | ||
644 | spin_unlock(&sch->lock); | ||
645 | /* | ||
646 | * Are more interrupts pending? | ||
647 | * If so, the tpi instruction will update the lowcore | ||
648 | * to hold the info for the next interrupt. | ||
649 | * We don't do this for VM because a tpi drops the cpu | ||
650 | * out of the sie which costs more cycles than it saves. | ||
651 | */ | ||
652 | } while (!MACHINE_IS_VM && tpi (NULL) != 0); | ||
653 | irq_exit (); | ||
654 | } | ||
655 | |||
656 | #ifdef CONFIG_CCW_CONSOLE | ||
657 | static struct subchannel console_subchannel; | ||
658 | static int console_subchannel_in_use; | ||
659 | |||
660 | /* | ||
661 | * busy wait for the next interrupt on the console | ||
662 | */ | ||
663 | void | ||
664 | wait_cons_dev (void) | ||
665 | { | ||
666 | unsigned long cr6 __attribute__ ((aligned (8))); | ||
667 | unsigned long save_cr6 __attribute__ ((aligned (8))); | ||
668 | |||
669 | /* | ||
670 | * before entering the spinlock we may already have | ||
671 | * processed the interrupt on a different CPU... | ||
672 | */ | ||
673 | if (!console_subchannel_in_use) | ||
674 | return; | ||
675 | |||
676 | /* disable all but isc 7 (console device) */ | ||
677 | __ctl_store (save_cr6, 6, 6); | ||
678 | cr6 = 0x01000000; | ||
679 | __ctl_load (cr6, 6, 6); | ||
680 | |||
681 | do { | ||
682 | spin_unlock(&console_subchannel.lock); | ||
683 | if (!cio_tpi()) | ||
684 | cpu_relax(); | ||
685 | spin_lock(&console_subchannel.lock); | ||
686 | } while (console_subchannel.schib.scsw.actl != 0); | ||
687 | /* | ||
688 | * restore previous isc value | ||
689 | */ | ||
690 | __ctl_load (save_cr6, 6, 6); | ||
691 | } | ||
692 | |||
693 | static int | ||
694 | cio_console_irq(void) | ||
695 | { | ||
696 | int irq; | ||
697 | |||
698 | if (console_irq != -1) { | ||
699 | /* VM provided us with the irq number of the console. */ | ||
700 | if (stsch(console_irq, &console_subchannel.schib) != 0 || | ||
701 | !console_subchannel.schib.pmcw.dnv) | ||
702 | return -1; | ||
703 | console_devno = console_subchannel.schib.pmcw.dev; | ||
704 | } else if (console_devno != -1) { | ||
705 | /* At least the console device number is known. */ | ||
706 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
707 | if (stsch(irq, &console_subchannel.schib) != 0) | ||
708 | break; | ||
709 | if (console_subchannel.schib.pmcw.dnv && | ||
710 | console_subchannel.schib.pmcw.dev == | ||
711 | console_devno) { | ||
712 | console_irq = irq; | ||
713 | break; | ||
714 | } | ||
715 | } | ||
716 | if (console_irq == -1) | ||
717 | return -1; | ||
718 | } else { | ||
719 | /* unlike in 2.4, we cannot autoprobe here, since | ||
720 | * the channel subsystem is not fully initialized. | ||
721 | * With some luck, the HWC console can take over */ | ||
722 | printk(KERN_WARNING "No ccw console found!\n"); | ||
723 | return -1; | ||
724 | } | ||
725 | return console_irq; | ||
726 | } | ||
727 | |||
728 | struct subchannel * | ||
729 | cio_probe_console(void) | ||
730 | { | ||
731 | int irq, ret; | ||
732 | |||
733 | if (xchg(&console_subchannel_in_use, 1) != 0) | ||
734 | return ERR_PTR(-EBUSY); | ||
735 | irq = cio_console_irq(); | ||
736 | if (irq == -1) { | ||
737 | console_subchannel_in_use = 0; | ||
738 | return ERR_PTR(-ENODEV); | ||
739 | } | ||
740 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | ||
741 | ret = cio_validate_subchannel(&console_subchannel, irq); | ||
742 | if (ret) { | ||
743 | console_subchannel_in_use = 0; | ||
744 | return ERR_PTR(-ENODEV); | ||
745 | } | ||
746 | |||
747 | /* | ||
748 | * enable console I/O-interrupt subclass 7 | ||
749 | */ | ||
750 | ctl_set_bit(6, 24); | ||
751 | console_subchannel.schib.pmcw.isc = 7; | ||
752 | console_subchannel.schib.pmcw.intparm = | ||
753 | (__u32)(unsigned long)&console_subchannel; | ||
754 | ret = cio_modify(&console_subchannel); | ||
755 | if (ret) { | ||
756 | console_subchannel_in_use = 0; | ||
757 | return ERR_PTR(ret); | ||
758 | } | ||
759 | return &console_subchannel; | ||
760 | } | ||
761 | |||
762 | void | ||
763 | cio_release_console(void) | ||
764 | { | ||
765 | console_subchannel.schib.pmcw.intparm = 0; | ||
766 | cio_modify(&console_subchannel); | ||
767 | ctl_clear_bit(6, 24); | ||
768 | console_subchannel_in_use = 0; | ||
769 | } | ||
770 | |||
771 | /* Bah... hack to catch console special sausages. */ | ||
772 | int | ||
773 | cio_is_console(int irq) | ||
774 | { | ||
775 | if (!console_subchannel_in_use) | ||
776 | return 0; | ||
777 | return (irq == console_subchannel.irq); | ||
778 | } | ||
779 | |||
780 | struct subchannel * | ||
781 | cio_get_console_subchannel(void) | ||
782 | { | ||
783 | if (!console_subchannel_in_use) | ||
784 | return 0; | ||
785 | return &console_subchannel; | ||
786 | } | ||
787 | |||
788 | #endif | ||
789 | static inline int | ||
790 | __disable_subchannel_easy(unsigned int schid, struct schib *schib) | ||
791 | { | ||
792 | int retry, cc; | ||
793 | |||
794 | cc = 0; | ||
795 | for (retry=0;retry<3;retry++) { | ||
796 | schib->pmcw.ena = 0; | ||
797 | cc = msch(schid, schib); | ||
798 | if (cc) | ||
799 | return (cc==3?-ENODEV:-EBUSY); | ||
800 | stsch(schid, schib); | ||
801 | if (!schib->pmcw.ena) | ||
802 | return 0; | ||
803 | } | ||
804 | return -EBUSY; /* uhm... */ | ||
805 | } | ||
806 | |||
807 | static inline int | ||
808 | __clear_subchannel_easy(unsigned int schid) | ||
809 | { | ||
810 | int retry; | ||
811 | |||
812 | if (csch(schid)) | ||
813 | return -ENODEV; | ||
814 | for (retry=0;retry<20;retry++) { | ||
815 | struct tpi_info ti; | ||
816 | |||
817 | if (tpi(&ti)) { | ||
818 | tsch(schid, (struct irb *)__LC_IRB); | ||
819 | return 0; | ||
820 | } | ||
821 | udelay(100); | ||
822 | } | ||
823 | return -EBUSY; | ||
824 | } | ||
825 | |||
826 | extern void do_reipl(unsigned long devno); | ||
827 | |||
828 | /* Clear all subchannels. */ | ||
829 | void | ||
830 | clear_all_subchannels(void) | ||
831 | { | ||
832 | unsigned int schid; | ||
833 | |||
834 | local_irq_disable(); | ||
835 | for (schid=0;schid<=highest_subchannel;schid++) { | ||
836 | struct schib schib; | ||
837 | if (stsch(schid, &schib)) | ||
838 | break; /* break out of the loop */ | ||
839 | if (!schib.pmcw.ena) | ||
840 | continue; | ||
841 | switch(__disable_subchannel_easy(schid, &schib)) { | ||
842 | case 0: | ||
843 | case -ENODEV: | ||
844 | break; | ||
845 | default: /* -EBUSY */ | ||
846 | if (__clear_subchannel_easy(schid)) | ||
847 | break; /* give up... jump out of switch */ | ||
848 | stsch(schid, &schib); | ||
849 | __disable_subchannel_easy(schid, &schib); | ||
850 | } | ||
851 | } | ||
852 | } | ||
853 | |||
854 | /* Make sure all subchannels are quiet before we re-ipl an lpar. */ | ||
855 | void | ||
856 | reipl(unsigned long devno) | ||
857 | { | ||
858 | clear_all_subchannels(); | ||
859 | do_reipl(devno); | ||
860 | } | ||
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h new file mode 100644 index 000000000000..c50a9da420a9 --- /dev/null +++ b/drivers/s390/cio/cio.h | |||
@@ -0,0 +1,143 @@ | |||
1 | #ifndef S390_CIO_H | ||
2 | #define S390_CIO_H | ||
3 | |||
4 | /* | ||
5 | * where we put the ssd info | ||
6 | */ | ||
7 | struct ssd_info { | ||
8 | __u8 valid:1; | ||
9 | __u8 type:7; /* subchannel type */ | ||
10 | __u8 chpid[8]; /* chpids */ | ||
11 | __u16 fla[8]; /* full link addresses */ | ||
12 | } __attribute__ ((packed)); | ||
13 | |||
14 | /* | ||
15 | * path management control word | ||
16 | */ | ||
17 | struct pmcw { | ||
18 | __u32 intparm; /* interruption parameter */ | ||
19 | __u32 qf : 1; /* qdio facility */ | ||
20 | __u32 res0 : 1; /* reserved zeros */ | ||
21 | __u32 isc : 3; /* interruption sublass */ | ||
22 | __u32 res5 : 3; /* reserved zeros */ | ||
23 | __u32 ena : 1; /* enabled */ | ||
24 | __u32 lm : 2; /* limit mode */ | ||
25 | __u32 mme : 2; /* measurement-mode enable */ | ||
26 | __u32 mp : 1; /* multipath mode */ | ||
27 | __u32 tf : 1; /* timing facility */ | ||
28 | __u32 dnv : 1; /* device number valid */ | ||
29 | __u32 dev : 16; /* device number */ | ||
30 | __u8 lpm; /* logical path mask */ | ||
31 | __u8 pnom; /* path not operational mask */ | ||
32 | __u8 lpum; /* last path used mask */ | ||
33 | __u8 pim; /* path installed mask */ | ||
34 | __u16 mbi; /* measurement-block index */ | ||
35 | __u8 pom; /* path operational mask */ | ||
36 | __u8 pam; /* path available mask */ | ||
37 | __u8 chpid[8]; /* CHPID 0-7 (if available) */ | ||
38 | __u32 unused1 : 8; /* reserved zeros */ | ||
39 | __u32 st : 3; /* subchannel type */ | ||
40 | __u32 unused2 : 18; /* reserved zeros */ | ||
41 | __u32 mbfc : 1; /* measurement block format control */ | ||
42 | __u32 xmwme : 1; /* extended measurement word mode enable */ | ||
43 | __u32 csense : 1; /* concurrent sense; can be enabled ...*/ | ||
44 | /* ... per MSCH, however, if facility */ | ||
45 | /* ... is not installed, this results */ | ||
46 | /* ... in an operand exception. */ | ||
47 | } __attribute__ ((packed)); | ||
48 | |||
49 | /* | ||
50 | * subchannel information block | ||
51 | */ | ||
52 | struct schib { | ||
53 | struct pmcw pmcw; /* path management control word */ | ||
54 | struct scsw scsw; /* subchannel status word */ | ||
55 | __u64 mba; /* measurement block address */ | ||
56 | __u8 mda[4]; /* model dependent area */ | ||
57 | } __attribute__ ((packed,aligned(4))); | ||
58 | |||
59 | /* | ||
60 | * operation request block | ||
61 | */ | ||
62 | struct orb { | ||
63 | __u32 intparm; /* interruption parameter */ | ||
64 | __u32 key : 4; /* flags, like key, suspend control, etc. */ | ||
65 | __u32 spnd : 1; /* suspend control */ | ||
66 | __u32 res1 : 1; /* reserved */ | ||
67 | __u32 mod : 1; /* modification control */ | ||
68 | __u32 sync : 1; /* synchronize control */ | ||
69 | __u32 fmt : 1; /* format control */ | ||
70 | __u32 pfch : 1; /* prefetch control */ | ||
71 | __u32 isic : 1; /* initial-status-interruption control */ | ||
72 | __u32 alcc : 1; /* address-limit-checking control */ | ||
73 | __u32 ssic : 1; /* suppress-suspended-interr. control */ | ||
74 | __u32 res2 : 1; /* reserved */ | ||
75 | __u32 c64 : 1; /* IDAW/QDIO 64 bit control */ | ||
76 | __u32 i2k : 1; /* IDAW 2/4kB block size control */ | ||
77 | __u32 lpm : 8; /* logical path mask */ | ||
78 | __u32 ils : 1; /* incorrect length */ | ||
79 | __u32 zero : 6; /* reserved zeros */ | ||
80 | __u32 orbx : 1; /* ORB extension control */ | ||
81 | __u32 cpa; /* channel program address */ | ||
82 | } __attribute__ ((packed,aligned(4))); | ||
83 | |||
84 | /* subchannel data structure used by I/O subroutines */ | ||
85 | struct subchannel { | ||
86 | unsigned int irq; /* aka. subchannel number */ | ||
87 | spinlock_t lock; /* subchannel lock */ | ||
88 | |||
89 | enum { | ||
90 | SUBCHANNEL_TYPE_IO = 0, | ||
91 | SUBCHANNEL_TYPE_CHSC = 1, | ||
92 | SUBCHANNEL_TYPE_MESSAGE = 2, | ||
93 | SUBCHANNEL_TYPE_ADM = 3, | ||
94 | } st; /* subchannel type */ | ||
95 | |||
96 | struct { | ||
97 | unsigned int suspend:1; /* allow suspend */ | ||
98 | unsigned int prefetch:1;/* deny prefetch */ | ||
99 | unsigned int inter:1; /* suppress intermediate interrupts */ | ||
100 | } __attribute__ ((packed)) options; | ||
101 | |||
102 | __u8 vpm; /* verified path mask */ | ||
103 | __u8 lpm; /* logical path mask */ | ||
104 | __u8 opm; /* operational path mask */ | ||
105 | struct schib schib; /* subchannel information block */ | ||
106 | struct orb orb; /* operation request block */ | ||
107 | struct ccw1 sense_ccw; /* static ccw for sense command */ | ||
108 | struct ssd_info ssd_info; /* subchannel description */ | ||
109 | struct device dev; /* entry in device tree */ | ||
110 | struct css_driver *driver; | ||
111 | } __attribute__ ((aligned(8))); | ||
112 | |||
113 | #define IO_INTERRUPT_TYPE 0 /* I/O interrupt type */ | ||
114 | |||
115 | #define to_subchannel(n) container_of(n, struct subchannel, dev) | ||
116 | |||
117 | extern int cio_validate_subchannel (struct subchannel *, unsigned int); | ||
118 | extern int cio_enable_subchannel (struct subchannel *, unsigned int); | ||
119 | extern int cio_disable_subchannel (struct subchannel *); | ||
120 | extern int cio_cancel (struct subchannel *); | ||
121 | extern int cio_clear (struct subchannel *); | ||
122 | extern int cio_resume (struct subchannel *); | ||
123 | extern int cio_halt (struct subchannel *); | ||
124 | extern int cio_start (struct subchannel *, struct ccw1 *, __u8); | ||
125 | extern int cio_start_key (struct subchannel *, struct ccw1 *, __u8, __u8); | ||
126 | extern int cio_cancel (struct subchannel *); | ||
127 | extern int cio_set_options (struct subchannel *, int); | ||
128 | extern int cio_get_options (struct subchannel *); | ||
129 | extern int cio_modify (struct subchannel *); | ||
130 | /* Use with care. */ | ||
131 | #ifdef CONFIG_CCW_CONSOLE | ||
132 | extern struct subchannel *cio_probe_console(void); | ||
133 | extern void cio_release_console(void); | ||
134 | extern int cio_is_console(int irq); | ||
135 | extern struct subchannel *cio_get_console_subchannel(void); | ||
136 | #else | ||
137 | #define cio_is_console(irq) 0 | ||
138 | #define cio_get_console_subchannel() NULL | ||
139 | #endif | ||
140 | |||
141 | extern int cio_show_msg; | ||
142 | |||
143 | #endif | ||
diff --git a/drivers/s390/cio/cio_debug.h b/drivers/s390/cio/cio_debug.h new file mode 100644 index 000000000000..6af8b27d366b --- /dev/null +++ b/drivers/s390/cio/cio_debug.h | |||
@@ -0,0 +1,32 @@ | |||
1 | #ifndef CIO_DEBUG_H | ||
2 | #define CIO_DEBUG_H | ||
3 | |||
4 | #include <asm/debug.h> | ||
5 | |||
6 | #define CIO_TRACE_EVENT(imp, txt) do { \ | ||
7 | debug_text_event(cio_debug_trace_id, imp, txt); \ | ||
8 | } while (0) | ||
9 | |||
10 | #define CIO_MSG_EVENT(imp, args...) do { \ | ||
11 | debug_sprintf_event(cio_debug_msg_id, imp , ##args); \ | ||
12 | } while (0) | ||
13 | |||
14 | #define CIO_CRW_EVENT(imp, args...) do { \ | ||
15 | debug_sprintf_event(cio_debug_crw_id, imp , ##args); \ | ||
16 | } while (0) | ||
17 | |||
18 | #define CIO_HEX_EVENT(imp, args...) do { \ | ||
19 | debug_event(cio_debug_trace_id, imp, ##args); \ | ||
20 | } while (0) | ||
21 | |||
22 | #define CIO_DEBUG(printk_level,event_level,msg...) ({ \ | ||
23 | if (cio_show_msg) printk(printk_level msg); \ | ||
24 | CIO_MSG_EVENT (event_level, msg); \ | ||
25 | }) | ||
26 | |||
27 | /* for use of debug feature */ | ||
28 | extern debug_info_t *cio_debug_msg_id; | ||
29 | extern debug_info_t *cio_debug_trace_id; | ||
30 | extern debug_info_t *cio_debug_crw_id; | ||
31 | |||
32 | #endif | ||
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c new file mode 100644 index 000000000000..49def26ba383 --- /dev/null +++ b/drivers/s390/cio/cmf.c | |||
@@ -0,0 +1,1042 @@ | |||
1 | /* | ||
2 | * linux/drivers/s390/cio/cmf.c ($Revision: 1.16 $) | ||
3 | * | ||
4 | * Linux on zSeries Channel Measurement Facility support | ||
5 | * | ||
6 | * Copyright 2000,2003 IBM Corporation | ||
7 | * | ||
8 | * Author: Arnd Bergmann <arndb@de.ibm.com> | ||
9 | * | ||
10 | * original idea from Natarajan Krishnaswami <nkrishna@us.ibm.com> | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or modify | ||
13 | * it under the terms of the GNU General Public License as published by | ||
14 | * the Free Software Foundation; either version 2, or (at your option) | ||
15 | * any later version. | ||
16 | * | ||
17 | * This program is distributed in the hope that it will be useful, | ||
18 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | * GNU General Public License for more details. | ||
21 | * | ||
22 | * You should have received a copy of the GNU General Public License | ||
23 | * along with this program; if not, write to the Free Software | ||
24 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
25 | */ | ||
26 | |||
27 | #include <linux/bootmem.h> | ||
28 | #include <linux/device.h> | ||
29 | #include <linux/init.h> | ||
30 | #include <linux/list.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/moduleparam.h> | ||
33 | |||
34 | #include <asm/ccwdev.h> | ||
35 | #include <asm/cio.h> | ||
36 | #include <asm/cmb.h> | ||
37 | |||
38 | #include "cio.h" | ||
39 | #include "css.h" | ||
40 | #include "device.h" | ||
41 | #include "ioasm.h" | ||
42 | #include "chsc.h" | ||
43 | |||
44 | /* parameter to enable cmf during boot, possible uses are: | ||
45 | * "s390cmf" -- enable cmf and allocate 2 MB of ram so measuring can be | ||
46 | * used on any subchannel | ||
47 | * "s390cmf=<num>" -- enable cmf and allocate enough memory to measure | ||
48 | * <num> subchannel, where <num> is an integer | ||
49 | * between 1 and 65535, default is 1024 | ||
50 | */ | ||
51 | #define ARGSTRING "s390cmf" | ||
52 | |||
53 | /* indices for READCMB */ | ||
54 | enum cmb_index { | ||
55 | /* basic and exended format: */ | ||
56 | cmb_ssch_rsch_count, | ||
57 | cmb_sample_count, | ||
58 | cmb_device_connect_time, | ||
59 | cmb_function_pending_time, | ||
60 | cmb_device_disconnect_time, | ||
61 | cmb_control_unit_queuing_time, | ||
62 | cmb_device_active_only_time, | ||
63 | /* extended format only: */ | ||
64 | cmb_device_busy_time, | ||
65 | cmb_initial_command_response_time, | ||
66 | }; | ||
67 | |||
68 | /** | ||
69 | * enum cmb_format - types of supported measurement block formats | ||
70 | * | ||
71 | * @CMF_BASIC: traditional channel measurement blocks supported | ||
72 | * by all machines that we run on | ||
73 | * @CMF_EXTENDED: improved format that was introduced with the z990 | ||
74 | * machine | ||
75 | * @CMF_AUTODETECT: default: use extended format when running on a z990 | ||
76 | * or later machine, otherwise fall back to basic format | ||
77 | **/ | ||
78 | enum cmb_format { | ||
79 | CMF_BASIC, | ||
80 | CMF_EXTENDED, | ||
81 | CMF_AUTODETECT = -1, | ||
82 | }; | ||
83 | /** | ||
84 | * format - actual format for all measurement blocks | ||
85 | * | ||
86 | * The format module parameter can be set to a value of 0 (zero) | ||
87 | * or 1, indicating basic or extended format as described for | ||
88 | * enum cmb_format. | ||
89 | */ | ||
90 | static int format = CMF_AUTODETECT; | ||
91 | module_param(format, bool, 0444); | ||
92 | |||
93 | /** | ||
94 | * struct cmb_operations - functions to use depending on cmb_format | ||
95 | * | ||
96 | * all these functions operate on a struct cmf_device. There is only | ||
97 | * one instance of struct cmb_operations because all cmf_device | ||
98 | * objects are guaranteed to be of the same type. | ||
99 | * | ||
100 | * @alloc: allocate memory for a channel measurement block, | ||
101 | * either with the help of a special pool or with kmalloc | ||
102 | * @free: free memory allocated with @alloc | ||
103 | * @set: enable or disable measurement | ||
104 | * @readall: read a measurement block in a common format | ||
105 | * @reset: clear the data in the associated measurement block and | ||
106 | * reset its time stamp | ||
107 | */ | ||
108 | struct cmb_operations { | ||
109 | int (*alloc) (struct ccw_device*); | ||
110 | void(*free) (struct ccw_device*); | ||
111 | int (*set) (struct ccw_device*, u32); | ||
112 | u64 (*read) (struct ccw_device*, int); | ||
113 | int (*readall)(struct ccw_device*, struct cmbdata *); | ||
114 | void (*reset) (struct ccw_device*); | ||
115 | |||
116 | struct attribute_group *attr_group; | ||
117 | }; | ||
118 | static struct cmb_operations *cmbops; | ||
119 | |||
120 | /* our user interface is designed in terms of nanoseconds, | ||
121 | * while the hardware measures total times in its own | ||
122 | * unit.*/ | ||
123 | static inline u64 time_to_nsec(u32 value) | ||
124 | { | ||
125 | return ((u64)value) * 128000ull; | ||
126 | } | ||
127 | |||
128 | /* | ||
129 | * Users are usually interested in average times, | ||
130 | * not accumulated time. | ||
131 | * This also helps us with atomicity problems | ||
132 | * when reading sinlge values. | ||
133 | */ | ||
134 | static inline u64 time_to_avg_nsec(u32 value, u32 count) | ||
135 | { | ||
136 | u64 ret; | ||
137 | |||
138 | /* no samples yet, avoid division by 0 */ | ||
139 | if (count == 0) | ||
140 | return 0; | ||
141 | |||
142 | /* value comes in units of 128 µsec */ | ||
143 | ret = time_to_nsec(value); | ||
144 | do_div(ret, count); | ||
145 | |||
146 | return ret; | ||
147 | } | ||
148 | |||
149 | /* activate or deactivate the channel monitor. When area is NULL, | ||
150 | * the monitor is deactivated. The channel monitor needs to | ||
151 | * be active in order to measure subchannels, which also need | ||
152 | * to be enabled. */ | ||
153 | static inline void | ||
154 | cmf_activate(void *area, unsigned int onoff) | ||
155 | { | ||
156 | register void * __gpr2 asm("2"); | ||
157 | register long __gpr1 asm("1"); | ||
158 | |||
159 | __gpr2 = area; | ||
160 | __gpr1 = onoff ? 2 : 0; | ||
161 | /* activate channel measurement */ | ||
162 | asm("schm" : : "d" (__gpr2), "d" (__gpr1) ); | ||
163 | } | ||
164 | |||
165 | static int | ||
166 | set_schib(struct ccw_device *cdev, u32 mme, int mbfc, unsigned long address) | ||
167 | { | ||
168 | int ret; | ||
169 | int retry; | ||
170 | struct subchannel *sch; | ||
171 | struct schib *schib; | ||
172 | |||
173 | sch = to_subchannel(cdev->dev.parent); | ||
174 | schib = &sch->schib; | ||
175 | /* msch can silently fail, so do it again if necessary */ | ||
176 | for (retry = 0; retry < 3; retry++) { | ||
177 | /* prepare schib */ | ||
178 | stsch(sch->irq, schib); | ||
179 | schib->pmcw.mme = mme; | ||
180 | schib->pmcw.mbfc = mbfc; | ||
181 | /* address can be either a block address or a block index */ | ||
182 | if (mbfc) | ||
183 | schib->mba = address; | ||
184 | else | ||
185 | schib->pmcw.mbi = address; | ||
186 | |||
187 | /* try to submit it */ | ||
188 | switch(ret = msch_err(sch->irq, schib)) { | ||
189 | case 0: | ||
190 | break; | ||
191 | case 1: | ||
192 | case 2: /* in I/O or status pending */ | ||
193 | ret = -EBUSY; | ||
194 | break; | ||
195 | case 3: /* subchannel is no longer valid */ | ||
196 | ret = -ENODEV; | ||
197 | break; | ||
198 | default: /* msch caught an exception */ | ||
199 | ret = -EINVAL; | ||
200 | break; | ||
201 | } | ||
202 | stsch(sch->irq, schib); /* restore the schib */ | ||
203 | |||
204 | if (ret) | ||
205 | break; | ||
206 | |||
207 | /* check if it worked */ | ||
208 | if (schib->pmcw.mme == mme && | ||
209 | schib->pmcw.mbfc == mbfc && | ||
210 | (mbfc ? (schib->mba == address) | ||
211 | : (schib->pmcw.mbi == address))) | ||
212 | return 0; | ||
213 | |||
214 | ret = -EINVAL; | ||
215 | } | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | struct set_schib_struct { | ||
221 | u32 mme; | ||
222 | int mbfc; | ||
223 | unsigned long address; | ||
224 | wait_queue_head_t wait; | ||
225 | int ret; | ||
226 | }; | ||
227 | |||
228 | static int set_schib_wait(struct ccw_device *cdev, u32 mme, | ||
229 | int mbfc, unsigned long address) | ||
230 | { | ||
231 | struct set_schib_struct s = { | ||
232 | .mme = mme, | ||
233 | .mbfc = mbfc, | ||
234 | .address = address, | ||
235 | .wait = __WAIT_QUEUE_HEAD_INITIALIZER(s.wait), | ||
236 | }; | ||
237 | |||
238 | spin_lock_irq(cdev->ccwlock); | ||
239 | s.ret = set_schib(cdev, mme, mbfc, address); | ||
240 | if (s.ret != -EBUSY) { | ||
241 | goto out_nowait; | ||
242 | } | ||
243 | |||
244 | if (cdev->private->state != DEV_STATE_ONLINE) { | ||
245 | s.ret = -EBUSY; | ||
246 | /* if the device is not online, don't even try again */ | ||
247 | goto out_nowait; | ||
248 | } | ||
249 | cdev->private->state = DEV_STATE_CMFCHANGE; | ||
250 | cdev->private->cmb_wait = &s; | ||
251 | s.ret = 1; | ||
252 | |||
253 | spin_unlock_irq(cdev->ccwlock); | ||
254 | if (wait_event_interruptible(s.wait, s.ret != 1)) { | ||
255 | spin_lock_irq(cdev->ccwlock); | ||
256 | if (s.ret == 1) { | ||
257 | s.ret = -ERESTARTSYS; | ||
258 | cdev->private->cmb_wait = 0; | ||
259 | if (cdev->private->state == DEV_STATE_CMFCHANGE) | ||
260 | cdev->private->state = DEV_STATE_ONLINE; | ||
261 | } | ||
262 | spin_unlock_irq(cdev->ccwlock); | ||
263 | } | ||
264 | return s.ret; | ||
265 | |||
266 | out_nowait: | ||
267 | spin_unlock_irq(cdev->ccwlock); | ||
268 | return s.ret; | ||
269 | } | ||
270 | |||
271 | void retry_set_schib(struct ccw_device *cdev) | ||
272 | { | ||
273 | struct set_schib_struct *s; | ||
274 | |||
275 | s = cdev->private->cmb_wait; | ||
276 | cdev->private->cmb_wait = 0; | ||
277 | if (!s) { | ||
278 | WARN_ON(1); | ||
279 | return; | ||
280 | } | ||
281 | s->ret = set_schib(cdev, s->mme, s->mbfc, s->address); | ||
282 | wake_up(&s->wait); | ||
283 | } | ||
284 | |||
285 | /** | ||
286 | * struct cmb_area - container for global cmb data | ||
287 | * | ||
288 | * @mem: pointer to CMBs (only in basic measurement mode) | ||
289 | * @list: contains a linked list of all subchannels | ||
290 | * @lock: protect concurrent access to @mem and @list | ||
291 | */ | ||
292 | struct cmb_area { | ||
293 | struct cmb *mem; | ||
294 | struct list_head list; | ||
295 | int num_channels; | ||
296 | spinlock_t lock; | ||
297 | }; | ||
298 | |||
299 | static struct cmb_area cmb_area = { | ||
300 | .lock = SPIN_LOCK_UNLOCKED, | ||
301 | .list = LIST_HEAD_INIT(cmb_area.list), | ||
302 | .num_channels = 1024, | ||
303 | }; | ||
304 | |||
305 | |||
306 | /* ****** old style CMB handling ********/ | ||
307 | |||
308 | /** int maxchannels | ||
309 | * | ||
310 | * Basic channel measurement blocks are allocated in one contiguous | ||
311 | * block of memory, which can not be moved as long as any channel | ||
312 | * is active. Therefore, a maximum number of subchannels needs to | ||
313 | * be defined somewhere. This is a module parameter, defaulting to | ||
314 | * a resonable value of 1024, or 32 kb of memory. | ||
315 | * Current kernels don't allow kmalloc with more than 128kb, so the | ||
316 | * maximum is 4096 | ||
317 | */ | ||
318 | |||
319 | module_param_named(maxchannels, cmb_area.num_channels, uint, 0444); | ||
320 | |||
321 | /** | ||
322 | * struct cmb - basic channel measurement block | ||
323 | * | ||
324 | * cmb as used by the hardware the fields are described in z/Architecture | ||
325 | * Principles of Operation, chapter 17. | ||
326 | * The area to be a contiguous array and may not be reallocated or freed. | ||
327 | * Only one cmb area can be present in the system. | ||
328 | */ | ||
329 | struct cmb { | ||
330 | u16 ssch_rsch_count; | ||
331 | u16 sample_count; | ||
332 | u32 device_connect_time; | ||
333 | u32 function_pending_time; | ||
334 | u32 device_disconnect_time; | ||
335 | u32 control_unit_queuing_time; | ||
336 | u32 device_active_only_time; | ||
337 | u32 reserved[2]; | ||
338 | }; | ||
339 | |||
340 | /* insert a single device into the cmb_area list | ||
341 | * called with cmb_area.lock held from alloc_cmb | ||
342 | */ | ||
343 | static inline int | ||
344 | alloc_cmb_single (struct ccw_device *cdev) | ||
345 | { | ||
346 | struct cmb *cmb; | ||
347 | struct ccw_device_private *node; | ||
348 | int ret; | ||
349 | |||
350 | spin_lock_irq(cdev->ccwlock); | ||
351 | if (!list_empty(&cdev->private->cmb_list)) { | ||
352 | ret = -EBUSY; | ||
353 | goto out; | ||
354 | } | ||
355 | |||
356 | /* find first unused cmb in cmb_area.mem. | ||
357 | * this is a little tricky: cmb_area.list | ||
358 | * remains sorted by ->cmb pointers */ | ||
359 | cmb = cmb_area.mem; | ||
360 | list_for_each_entry(node, &cmb_area.list, cmb_list) { | ||
361 | if ((struct cmb*)node->cmb > cmb) | ||
362 | break; | ||
363 | cmb++; | ||
364 | } | ||
365 | if (cmb - cmb_area.mem >= cmb_area.num_channels) { | ||
366 | ret = -ENOMEM; | ||
367 | goto out; | ||
368 | } | ||
369 | |||
370 | /* insert new cmb */ | ||
371 | list_add_tail(&cdev->private->cmb_list, &node->cmb_list); | ||
372 | cdev->private->cmb = cmb; | ||
373 | ret = 0; | ||
374 | out: | ||
375 | spin_unlock_irq(cdev->ccwlock); | ||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | static int | ||
380 | alloc_cmb (struct ccw_device *cdev) | ||
381 | { | ||
382 | int ret; | ||
383 | struct cmb *mem; | ||
384 | ssize_t size; | ||
385 | |||
386 | spin_lock(&cmb_area.lock); | ||
387 | |||
388 | if (!cmb_area.mem) { | ||
389 | /* there is no user yet, so we need a new area */ | ||
390 | size = sizeof(struct cmb) * cmb_area.num_channels; | ||
391 | WARN_ON(!list_empty(&cmb_area.list)); | ||
392 | |||
393 | spin_unlock(&cmb_area.lock); | ||
394 | mem = (void*)__get_free_pages(GFP_KERNEL | GFP_DMA, | ||
395 | get_order(size)); | ||
396 | spin_lock(&cmb_area.lock); | ||
397 | |||
398 | if (cmb_area.mem) { | ||
399 | /* ok, another thread was faster */ | ||
400 | free_pages((unsigned long)mem, get_order(size)); | ||
401 | } else if (!mem) { | ||
402 | /* no luck */ | ||
403 | ret = -ENOMEM; | ||
404 | goto out; | ||
405 | } else { | ||
406 | /* everything ok */ | ||
407 | memset(mem, 0, size); | ||
408 | cmb_area.mem = mem; | ||
409 | cmf_activate(cmb_area.mem, 1); | ||
410 | } | ||
411 | } | ||
412 | |||
413 | /* do the actual allocation */ | ||
414 | ret = alloc_cmb_single(cdev); | ||
415 | out: | ||
416 | spin_unlock(&cmb_area.lock); | ||
417 | |||
418 | return ret; | ||
419 | } | ||
420 | |||
421 | static void | ||
422 | free_cmb(struct ccw_device *cdev) | ||
423 | { | ||
424 | struct ccw_device_private *priv; | ||
425 | |||
426 | priv = cdev->private; | ||
427 | |||
428 | spin_lock(&cmb_area.lock); | ||
429 | spin_lock_irq(cdev->ccwlock); | ||
430 | |||
431 | if (list_empty(&priv->cmb_list)) { | ||
432 | /* already freed */ | ||
433 | goto out; | ||
434 | } | ||
435 | |||
436 | priv->cmb = NULL; | ||
437 | list_del_init(&priv->cmb_list); | ||
438 | |||
439 | if (list_empty(&cmb_area.list)) { | ||
440 | ssize_t size; | ||
441 | size = sizeof(struct cmb) * cmb_area.num_channels; | ||
442 | cmf_activate(NULL, 0); | ||
443 | free_pages((unsigned long)cmb_area.mem, get_order(size)); | ||
444 | cmb_area.mem = NULL; | ||
445 | } | ||
446 | out: | ||
447 | spin_unlock_irq(cdev->ccwlock); | ||
448 | spin_unlock(&cmb_area.lock); | ||
449 | } | ||
450 | |||
451 | static int | ||
452 | set_cmb(struct ccw_device *cdev, u32 mme) | ||
453 | { | ||
454 | u16 offset; | ||
455 | |||
456 | if (!cdev->private->cmb) | ||
457 | return -EINVAL; | ||
458 | |||
459 | offset = mme ? (struct cmb *)cdev->private->cmb - cmb_area.mem : 0; | ||
460 | |||
461 | return set_schib_wait(cdev, mme, 0, offset); | ||
462 | } | ||
463 | |||
464 | static u64 | ||
465 | read_cmb (struct ccw_device *cdev, int index) | ||
466 | { | ||
467 | /* yes, we have to put it on the stack | ||
468 | * because the cmb must only be accessed | ||
469 | * atomically, e.g. with mvc */ | ||
470 | struct cmb cmb; | ||
471 | unsigned long flags; | ||
472 | u32 val; | ||
473 | |||
474 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
475 | if (!cdev->private->cmb) { | ||
476 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
477 | return 0; | ||
478 | } | ||
479 | |||
480 | cmb = *(struct cmb*)cdev->private->cmb; | ||
481 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
482 | |||
483 | switch (index) { | ||
484 | case cmb_ssch_rsch_count: | ||
485 | return cmb.ssch_rsch_count; | ||
486 | case cmb_sample_count: | ||
487 | return cmb.sample_count; | ||
488 | case cmb_device_connect_time: | ||
489 | val = cmb.device_connect_time; | ||
490 | break; | ||
491 | case cmb_function_pending_time: | ||
492 | val = cmb.function_pending_time; | ||
493 | break; | ||
494 | case cmb_device_disconnect_time: | ||
495 | val = cmb.device_disconnect_time; | ||
496 | break; | ||
497 | case cmb_control_unit_queuing_time: | ||
498 | val = cmb.control_unit_queuing_time; | ||
499 | break; | ||
500 | case cmb_device_active_only_time: | ||
501 | val = cmb.device_active_only_time; | ||
502 | break; | ||
503 | default: | ||
504 | return 0; | ||
505 | } | ||
506 | return time_to_avg_nsec(val, cmb.sample_count); | ||
507 | } | ||
508 | |||
509 | static int | ||
510 | readall_cmb (struct ccw_device *cdev, struct cmbdata *data) | ||
511 | { | ||
512 | /* yes, we have to put it on the stack | ||
513 | * because the cmb must only be accessed | ||
514 | * atomically, e.g. with mvc */ | ||
515 | struct cmb cmb; | ||
516 | unsigned long flags; | ||
517 | u64 time; | ||
518 | |||
519 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
520 | if (!cdev->private->cmb) { | ||
521 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
522 | return -ENODEV; | ||
523 | } | ||
524 | |||
525 | cmb = *(struct cmb*)cdev->private->cmb; | ||
526 | time = get_clock() - cdev->private->cmb_start_time; | ||
527 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
528 | |||
529 | memset(data, 0, sizeof(struct cmbdata)); | ||
530 | |||
531 | /* we only know values before device_busy_time */ | ||
532 | data->size = offsetof(struct cmbdata, device_busy_time); | ||
533 | |||
534 | /* convert to nanoseconds */ | ||
535 | data->elapsed_time = (time * 1000) >> 12; | ||
536 | |||
537 | /* copy data to new structure */ | ||
538 | data->ssch_rsch_count = cmb.ssch_rsch_count; | ||
539 | data->sample_count = cmb.sample_count; | ||
540 | |||
541 | /* time fields are converted to nanoseconds while copying */ | ||
542 | data->device_connect_time = time_to_nsec(cmb.device_connect_time); | ||
543 | data->function_pending_time = time_to_nsec(cmb.function_pending_time); | ||
544 | data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); | ||
545 | data->control_unit_queuing_time | ||
546 | = time_to_nsec(cmb.control_unit_queuing_time); | ||
547 | data->device_active_only_time | ||
548 | = time_to_nsec(cmb.device_active_only_time); | ||
549 | |||
550 | return 0; | ||
551 | } | ||
552 | |||
553 | static void | ||
554 | reset_cmb(struct ccw_device *cdev) | ||
555 | { | ||
556 | struct cmb *cmb; | ||
557 | spin_lock_irq(cdev->ccwlock); | ||
558 | cmb = cdev->private->cmb; | ||
559 | if (cmb) | ||
560 | memset (cmb, 0, sizeof (*cmb)); | ||
561 | cdev->private->cmb_start_time = get_clock(); | ||
562 | spin_unlock_irq(cdev->ccwlock); | ||
563 | } | ||
564 | |||
565 | static struct attribute_group cmf_attr_group; | ||
566 | |||
567 | static struct cmb_operations cmbops_basic = { | ||
568 | .alloc = alloc_cmb, | ||
569 | .free = free_cmb, | ||
570 | .set = set_cmb, | ||
571 | .read = read_cmb, | ||
572 | .readall = readall_cmb, | ||
573 | .reset = reset_cmb, | ||
574 | .attr_group = &cmf_attr_group, | ||
575 | }; | ||
576 | |||
577 | /* ******** extended cmb handling ********/ | ||
578 | |||
579 | /** | ||
580 | * struct cmbe - extended channel measurement block | ||
581 | * | ||
582 | * cmb as used by the hardware, may be in any 64 bit physical location, | ||
583 | * the fields are described in z/Architecture Principles of Operation, | ||
584 | * third edition, chapter 17. | ||
585 | */ | ||
586 | struct cmbe { | ||
587 | u32 ssch_rsch_count; | ||
588 | u32 sample_count; | ||
589 | u32 device_connect_time; | ||
590 | u32 function_pending_time; | ||
591 | u32 device_disconnect_time; | ||
592 | u32 control_unit_queuing_time; | ||
593 | u32 device_active_only_time; | ||
594 | u32 device_busy_time; | ||
595 | u32 initial_command_response_time; | ||
596 | u32 reserved[7]; | ||
597 | }; | ||
598 | |||
599 | /* kmalloc only guarantees 8 byte alignment, but we need cmbe | ||
600 | * pointers to be naturally aligned. Make sure to allocate | ||
601 | * enough space for two cmbes */ | ||
602 | static inline struct cmbe* cmbe_align(struct cmbe *c) | ||
603 | { | ||
604 | unsigned long addr; | ||
605 | addr = ((unsigned long)c + sizeof (struct cmbe) - sizeof(long)) & | ||
606 | ~(sizeof (struct cmbe) - sizeof(long)); | ||
607 | return (struct cmbe*)addr; | ||
608 | } | ||
609 | |||
610 | static int | ||
611 | alloc_cmbe (struct ccw_device *cdev) | ||
612 | { | ||
613 | struct cmbe *cmbe; | ||
614 | cmbe = kmalloc (sizeof (*cmbe) * 2, GFP_KERNEL); | ||
615 | if (!cmbe) | ||
616 | return -ENOMEM; | ||
617 | |||
618 | spin_lock_irq(cdev->ccwlock); | ||
619 | if (cdev->private->cmb) { | ||
620 | kfree(cmbe); | ||
621 | spin_unlock_irq(cdev->ccwlock); | ||
622 | return -EBUSY; | ||
623 | } | ||
624 | |||
625 | cdev->private->cmb = cmbe; | ||
626 | spin_unlock_irq(cdev->ccwlock); | ||
627 | |||
628 | /* activate global measurement if this is the first channel */ | ||
629 | spin_lock(&cmb_area.lock); | ||
630 | if (list_empty(&cmb_area.list)) | ||
631 | cmf_activate(NULL, 1); | ||
632 | list_add_tail(&cdev->private->cmb_list, &cmb_area.list); | ||
633 | spin_unlock(&cmb_area.lock); | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | |||
638 | static void | ||
639 | free_cmbe (struct ccw_device *cdev) | ||
640 | { | ||
641 | spin_lock_irq(cdev->ccwlock); | ||
642 | if (cdev->private->cmb) | ||
643 | kfree(cdev->private->cmb); | ||
644 | cdev->private->cmb = NULL; | ||
645 | spin_unlock_irq(cdev->ccwlock); | ||
646 | |||
647 | /* deactivate global measurement if this is the last channel */ | ||
648 | spin_lock(&cmb_area.lock); | ||
649 | list_del_init(&cdev->private->cmb_list); | ||
650 | if (list_empty(&cmb_area.list)) | ||
651 | cmf_activate(NULL, 0); | ||
652 | spin_unlock(&cmb_area.lock); | ||
653 | } | ||
654 | |||
655 | static int | ||
656 | set_cmbe(struct ccw_device *cdev, u32 mme) | ||
657 | { | ||
658 | unsigned long mba; | ||
659 | |||
660 | if (!cdev->private->cmb) | ||
661 | return -EINVAL; | ||
662 | mba = mme ? (unsigned long) cmbe_align(cdev->private->cmb) : 0; | ||
663 | |||
664 | return set_schib_wait(cdev, mme, 1, mba); | ||
665 | } | ||
666 | |||
667 | |||
668 | u64 | ||
669 | read_cmbe (struct ccw_device *cdev, int index) | ||
670 | { | ||
671 | /* yes, we have to put it on the stack | ||
672 | * because the cmb must only be accessed | ||
673 | * atomically, e.g. with mvc */ | ||
674 | struct cmbe cmb; | ||
675 | unsigned long flags; | ||
676 | u32 val; | ||
677 | |||
678 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
679 | if (!cdev->private->cmb) { | ||
680 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | cmb = *cmbe_align(cdev->private->cmb); | ||
685 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
686 | |||
687 | switch (index) { | ||
688 | case cmb_ssch_rsch_count: | ||
689 | return cmb.ssch_rsch_count; | ||
690 | case cmb_sample_count: | ||
691 | return cmb.sample_count; | ||
692 | case cmb_device_connect_time: | ||
693 | val = cmb.device_connect_time; | ||
694 | break; | ||
695 | case cmb_function_pending_time: | ||
696 | val = cmb.function_pending_time; | ||
697 | break; | ||
698 | case cmb_device_disconnect_time: | ||
699 | val = cmb.device_disconnect_time; | ||
700 | break; | ||
701 | case cmb_control_unit_queuing_time: | ||
702 | val = cmb.control_unit_queuing_time; | ||
703 | break; | ||
704 | case cmb_device_active_only_time: | ||
705 | val = cmb.device_active_only_time; | ||
706 | break; | ||
707 | case cmb_device_busy_time: | ||
708 | val = cmb.device_busy_time; | ||
709 | break; | ||
710 | case cmb_initial_command_response_time: | ||
711 | val = cmb.initial_command_response_time; | ||
712 | break; | ||
713 | default: | ||
714 | return 0; | ||
715 | } | ||
716 | return time_to_avg_nsec(val, cmb.sample_count); | ||
717 | } | ||
718 | |||
719 | static int | ||
720 | readall_cmbe (struct ccw_device *cdev, struct cmbdata *data) | ||
721 | { | ||
722 | /* yes, we have to put it on the stack | ||
723 | * because the cmb must only be accessed | ||
724 | * atomically, e.g. with mvc */ | ||
725 | struct cmbe cmb; | ||
726 | unsigned long flags; | ||
727 | u64 time; | ||
728 | |||
729 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
730 | if (!cdev->private->cmb) { | ||
731 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
732 | return -ENODEV; | ||
733 | } | ||
734 | |||
735 | cmb = *cmbe_align(cdev->private->cmb); | ||
736 | time = get_clock() - cdev->private->cmb_start_time; | ||
737 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
738 | |||
739 | memset (data, 0, sizeof(struct cmbdata)); | ||
740 | |||
741 | /* we only know values before device_busy_time */ | ||
742 | data->size = offsetof(struct cmbdata, device_busy_time); | ||
743 | |||
744 | /* conver to nanoseconds */ | ||
745 | data->elapsed_time = (time * 1000) >> 12; | ||
746 | |||
747 | /* copy data to new structure */ | ||
748 | data->ssch_rsch_count = cmb.ssch_rsch_count; | ||
749 | data->sample_count = cmb.sample_count; | ||
750 | |||
751 | /* time fields are converted to nanoseconds while copying */ | ||
752 | data->device_connect_time = time_to_nsec(cmb.device_connect_time); | ||
753 | data->function_pending_time = time_to_nsec(cmb.function_pending_time); | ||
754 | data->device_disconnect_time = time_to_nsec(cmb.device_disconnect_time); | ||
755 | data->control_unit_queuing_time | ||
756 | = time_to_nsec(cmb.control_unit_queuing_time); | ||
757 | data->device_active_only_time | ||
758 | = time_to_nsec(cmb.device_active_only_time); | ||
759 | data->device_busy_time = time_to_nsec(cmb.device_busy_time); | ||
760 | data->initial_command_response_time | ||
761 | = time_to_nsec(cmb.initial_command_response_time); | ||
762 | |||
763 | return 0; | ||
764 | } | ||
765 | |||
766 | static void | ||
767 | reset_cmbe(struct ccw_device *cdev) | ||
768 | { | ||
769 | struct cmbe *cmb; | ||
770 | spin_lock_irq(cdev->ccwlock); | ||
771 | cmb = cmbe_align(cdev->private->cmb); | ||
772 | if (cmb) | ||
773 | memset (cmb, 0, sizeof (*cmb)); | ||
774 | cdev->private->cmb_start_time = get_clock(); | ||
775 | spin_unlock_irq(cdev->ccwlock); | ||
776 | } | ||
777 | |||
778 | static struct attribute_group cmf_attr_group_ext; | ||
779 | |||
780 | static struct cmb_operations cmbops_extended = { | ||
781 | .alloc = alloc_cmbe, | ||
782 | .free = free_cmbe, | ||
783 | .set = set_cmbe, | ||
784 | .read = read_cmbe, | ||
785 | .readall = readall_cmbe, | ||
786 | .reset = reset_cmbe, | ||
787 | .attr_group = &cmf_attr_group_ext, | ||
788 | }; | ||
789 | |||
790 | |||
791 | static ssize_t | ||
792 | cmb_show_attr(struct device *dev, char *buf, enum cmb_index idx) | ||
793 | { | ||
794 | return sprintf(buf, "%lld\n", | ||
795 | (unsigned long long) cmf_read(to_ccwdev(dev), idx)); | ||
796 | } | ||
797 | |||
798 | static ssize_t | ||
799 | cmb_show_avg_sample_interval(struct device *dev, char *buf) | ||
800 | { | ||
801 | struct ccw_device *cdev; | ||
802 | long interval; | ||
803 | unsigned long count; | ||
804 | |||
805 | cdev = to_ccwdev(dev); | ||
806 | interval = get_clock() - cdev->private->cmb_start_time; | ||
807 | count = cmf_read(cdev, cmb_sample_count); | ||
808 | if (count) | ||
809 | interval /= count; | ||
810 | else | ||
811 | interval = -1; | ||
812 | return sprintf(buf, "%ld\n", interval); | ||
813 | } | ||
814 | |||
815 | static ssize_t | ||
816 | cmb_show_avg_utilization(struct device *dev, char *buf) | ||
817 | { | ||
818 | struct cmbdata data; | ||
819 | u64 utilization; | ||
820 | unsigned long t, u; | ||
821 | int ret; | ||
822 | |||
823 | ret = cmf_readall(to_ccwdev(dev), &data); | ||
824 | if (ret) | ||
825 | return ret; | ||
826 | |||
827 | utilization = data.device_connect_time + | ||
828 | data.function_pending_time + | ||
829 | data.device_disconnect_time; | ||
830 | |||
831 | /* shift to avoid long long division */ | ||
832 | while (-1ul < (data.elapsed_time | utilization)) { | ||
833 | utilization >>= 8; | ||
834 | data.elapsed_time >>= 8; | ||
835 | } | ||
836 | |||
837 | /* calculate value in 0.1 percent units */ | ||
838 | t = (unsigned long) data.elapsed_time / 1000; | ||
839 | u = (unsigned long) utilization / t; | ||
840 | |||
841 | return sprintf(buf, "%02ld.%01ld%%\n", u/ 10, u - (u/ 10) * 10); | ||
842 | } | ||
843 | |||
844 | #define cmf_attr(name) \ | ||
845 | static ssize_t show_ ## name (struct device * dev, char * buf) \ | ||
846 | { return cmb_show_attr((dev), buf, cmb_ ## name); } \ | ||
847 | static DEVICE_ATTR(name, 0444, show_ ## name, NULL); | ||
848 | |||
849 | #define cmf_attr_avg(name) \ | ||
850 | static ssize_t show_avg_ ## name (struct device * dev, char * buf) \ | ||
851 | { return cmb_show_attr((dev), buf, cmb_ ## name); } \ | ||
852 | static DEVICE_ATTR(avg_ ## name, 0444, show_avg_ ## name, NULL); | ||
853 | |||
854 | cmf_attr(ssch_rsch_count); | ||
855 | cmf_attr(sample_count); | ||
856 | cmf_attr_avg(device_connect_time); | ||
857 | cmf_attr_avg(function_pending_time); | ||
858 | cmf_attr_avg(device_disconnect_time); | ||
859 | cmf_attr_avg(control_unit_queuing_time); | ||
860 | cmf_attr_avg(device_active_only_time); | ||
861 | cmf_attr_avg(device_busy_time); | ||
862 | cmf_attr_avg(initial_command_response_time); | ||
863 | |||
864 | static DEVICE_ATTR(avg_sample_interval, 0444, cmb_show_avg_sample_interval, NULL); | ||
865 | static DEVICE_ATTR(avg_utilization, 0444, cmb_show_avg_utilization, NULL); | ||
866 | |||
867 | static struct attribute *cmf_attributes[] = { | ||
868 | &dev_attr_avg_sample_interval.attr, | ||
869 | &dev_attr_avg_utilization.attr, | ||
870 | &dev_attr_ssch_rsch_count.attr, | ||
871 | &dev_attr_sample_count.attr, | ||
872 | &dev_attr_avg_device_connect_time.attr, | ||
873 | &dev_attr_avg_function_pending_time.attr, | ||
874 | &dev_attr_avg_device_disconnect_time.attr, | ||
875 | &dev_attr_avg_control_unit_queuing_time.attr, | ||
876 | &dev_attr_avg_device_active_only_time.attr, | ||
877 | 0, | ||
878 | }; | ||
879 | |||
880 | static struct attribute_group cmf_attr_group = { | ||
881 | .name = "cmf", | ||
882 | .attrs = cmf_attributes, | ||
883 | }; | ||
884 | |||
885 | static struct attribute *cmf_attributes_ext[] = { | ||
886 | &dev_attr_avg_sample_interval.attr, | ||
887 | &dev_attr_avg_utilization.attr, | ||
888 | &dev_attr_ssch_rsch_count.attr, | ||
889 | &dev_attr_sample_count.attr, | ||
890 | &dev_attr_avg_device_connect_time.attr, | ||
891 | &dev_attr_avg_function_pending_time.attr, | ||
892 | &dev_attr_avg_device_disconnect_time.attr, | ||
893 | &dev_attr_avg_control_unit_queuing_time.attr, | ||
894 | &dev_attr_avg_device_active_only_time.attr, | ||
895 | &dev_attr_avg_device_busy_time.attr, | ||
896 | &dev_attr_avg_initial_command_response_time.attr, | ||
897 | 0, | ||
898 | }; | ||
899 | |||
900 | static struct attribute_group cmf_attr_group_ext = { | ||
901 | .name = "cmf", | ||
902 | .attrs = cmf_attributes_ext, | ||
903 | }; | ||
904 | |||
905 | static ssize_t cmb_enable_show(struct device *dev, char *buf) | ||
906 | { | ||
907 | return sprintf(buf, "%d\n", to_ccwdev(dev)->private->cmb ? 1 : 0); | ||
908 | } | ||
909 | |||
910 | static ssize_t cmb_enable_store(struct device *dev, const char *buf, size_t c) | ||
911 | { | ||
912 | struct ccw_device *cdev; | ||
913 | int ret; | ||
914 | |||
915 | cdev = to_ccwdev(dev); | ||
916 | |||
917 | switch (buf[0]) { | ||
918 | case '0': | ||
919 | ret = disable_cmf(cdev); | ||
920 | if (ret) | ||
921 | printk(KERN_INFO "disable_cmf failed (%d)\n", ret); | ||
922 | break; | ||
923 | case '1': | ||
924 | ret = enable_cmf(cdev); | ||
925 | if (ret && ret != -EBUSY) | ||
926 | printk(KERN_INFO "enable_cmf failed (%d)\n", ret); | ||
927 | break; | ||
928 | } | ||
929 | |||
930 | return c; | ||
931 | } | ||
932 | |||
933 | DEVICE_ATTR(cmb_enable, 0644, cmb_enable_show, cmb_enable_store); | ||
934 | |||
935 | /* enable_cmf/disable_cmf: module interface for cmf (de)activation */ | ||
936 | int | ||
937 | enable_cmf(struct ccw_device *cdev) | ||
938 | { | ||
939 | int ret; | ||
940 | |||
941 | ret = cmbops->alloc(cdev); | ||
942 | cmbops->reset(cdev); | ||
943 | if (ret) | ||
944 | return ret; | ||
945 | ret = cmbops->set(cdev, 2); | ||
946 | if (ret) { | ||
947 | cmbops->free(cdev); | ||
948 | return ret; | ||
949 | } | ||
950 | ret = sysfs_create_group(&cdev->dev.kobj, cmbops->attr_group); | ||
951 | if (!ret) | ||
952 | return 0; | ||
953 | cmbops->set(cdev, 0); //FIXME: this can fail | ||
954 | cmbops->free(cdev); | ||
955 | return ret; | ||
956 | } | ||
957 | |||
958 | int | ||
959 | disable_cmf(struct ccw_device *cdev) | ||
960 | { | ||
961 | int ret; | ||
962 | |||
963 | ret = cmbops->set(cdev, 0); | ||
964 | if (ret) | ||
965 | return ret; | ||
966 | cmbops->free(cdev); | ||
967 | sysfs_remove_group(&cdev->dev.kobj, cmbops->attr_group); | ||
968 | return ret; | ||
969 | } | ||
970 | |||
971 | u64 | ||
972 | cmf_read(struct ccw_device *cdev, int index) | ||
973 | { | ||
974 | return cmbops->read(cdev, index); | ||
975 | } | ||
976 | |||
977 | int | ||
978 | cmf_readall(struct ccw_device *cdev, struct cmbdata *data) | ||
979 | { | ||
980 | return cmbops->readall(cdev, data); | ||
981 | } | ||
982 | |||
983 | static int __init | ||
984 | init_cmf(void) | ||
985 | { | ||
986 | char *format_string; | ||
987 | char *detect_string = "parameter"; | ||
988 | |||
989 | /* We cannot really autoprobe this. If the user did not give a parameter, | ||
990 | see if we are running on z990 or up, otherwise fall back to basic mode. */ | ||
991 | |||
992 | if (format == CMF_AUTODETECT) { | ||
993 | if (!css_characteristics_avail || | ||
994 | !css_general_characteristics.ext_mb) { | ||
995 | format = CMF_BASIC; | ||
996 | } else { | ||
997 | format = CMF_EXTENDED; | ||
998 | } | ||
999 | detect_string = "autodetected"; | ||
1000 | } else { | ||
1001 | detect_string = "parameter"; | ||
1002 | } | ||
1003 | |||
1004 | switch (format) { | ||
1005 | case CMF_BASIC: | ||
1006 | format_string = "basic"; | ||
1007 | cmbops = &cmbops_basic; | ||
1008 | if (cmb_area.num_channels > 4096 || cmb_area.num_channels < 1) { | ||
1009 | printk(KERN_ERR "Basic channel measurement facility" | ||
1010 | " can only use 1 to 4096 devices\n" | ||
1011 | KERN_ERR "when the cmf driver is built" | ||
1012 | " as a loadable module\n"); | ||
1013 | return 1; | ||
1014 | } | ||
1015 | break; | ||
1016 | case CMF_EXTENDED: | ||
1017 | format_string = "extended"; | ||
1018 | cmbops = &cmbops_extended; | ||
1019 | break; | ||
1020 | default: | ||
1021 | printk(KERN_ERR "Invalid format %d for channel " | ||
1022 | "measurement facility\n", format); | ||
1023 | return 1; | ||
1024 | } | ||
1025 | |||
1026 | printk(KERN_INFO "Channel measurement facility using %s format (%s)\n", | ||
1027 | format_string, detect_string); | ||
1028 | return 0; | ||
1029 | } | ||
1030 | |||
1031 | module_init(init_cmf); | ||
1032 | |||
1033 | |||
1034 | MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>"); | ||
1035 | MODULE_LICENSE("GPL"); | ||
1036 | MODULE_DESCRIPTION("channel measurement facility base driver\n" | ||
1037 | "Copyright 2003 IBM Corporation\n"); | ||
1038 | |||
1039 | EXPORT_SYMBOL_GPL(enable_cmf); | ||
1040 | EXPORT_SYMBOL_GPL(disable_cmf); | ||
1041 | EXPORT_SYMBOL_GPL(cmf_read); | ||
1042 | EXPORT_SYMBOL_GPL(cmf_readall); | ||
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c new file mode 100644 index 000000000000..87bd70eeabed --- /dev/null +++ b/drivers/s390/cio/css.c | |||
@@ -0,0 +1,575 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/css.c | ||
3 | * driver for channel subsystem | ||
4 | * $Revision: 1.85 $ | ||
5 | * | ||
6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
7 | * IBM Corporation | ||
8 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | ||
9 | * Cornelia Huck (cohuck@de.ibm.com) | ||
10 | */ | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/slab.h> | ||
15 | #include <linux/errno.h> | ||
16 | #include <linux/list.h> | ||
17 | |||
18 | #include "css.h" | ||
19 | #include "cio.h" | ||
20 | #include "cio_debug.h" | ||
21 | #include "ioasm.h" | ||
22 | #include "chsc.h" | ||
23 | |||
24 | unsigned int highest_subchannel; | ||
25 | int need_rescan = 0; | ||
26 | int css_init_done = 0; | ||
27 | |||
28 | struct pgid global_pgid; | ||
29 | int css_characteristics_avail = 0; | ||
30 | |||
31 | struct device css_bus_device = { | ||
32 | .bus_id = "css0", | ||
33 | }; | ||
34 | |||
35 | static struct subchannel * | ||
36 | css_alloc_subchannel(int irq) | ||
37 | { | ||
38 | struct subchannel *sch; | ||
39 | int ret; | ||
40 | |||
41 | sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); | ||
42 | if (sch == NULL) | ||
43 | return ERR_PTR(-ENOMEM); | ||
44 | ret = cio_validate_subchannel (sch, irq); | ||
45 | if (ret < 0) { | ||
46 | kfree(sch); | ||
47 | return ERR_PTR(ret); | ||
48 | } | ||
49 | if (irq > highest_subchannel) | ||
50 | highest_subchannel = irq; | ||
51 | |||
52 | if (sch->st != SUBCHANNEL_TYPE_IO) { | ||
53 | /* For now we ignore all non-io subchannels. */ | ||
54 | kfree(sch); | ||
55 | return ERR_PTR(-EINVAL); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Set intparm to subchannel address. | ||
60 | * This is fine even on 64bit since the subchannel is always located | ||
61 | * under 2G. | ||
62 | */ | ||
63 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; | ||
64 | ret = cio_modify(sch); | ||
65 | if (ret) { | ||
66 | kfree(sch); | ||
67 | return ERR_PTR(ret); | ||
68 | } | ||
69 | return sch; | ||
70 | } | ||
71 | |||
72 | static void | ||
73 | css_free_subchannel(struct subchannel *sch) | ||
74 | { | ||
75 | if (sch) { | ||
76 | /* Reset intparm to zeroes. */ | ||
77 | sch->schib.pmcw.intparm = 0; | ||
78 | cio_modify(sch); | ||
79 | kfree(sch); | ||
80 | } | ||
81 | |||
82 | } | ||
83 | |||
84 | static void | ||
85 | css_subchannel_release(struct device *dev) | ||
86 | { | ||
87 | struct subchannel *sch; | ||
88 | |||
89 | sch = to_subchannel(dev); | ||
90 | if (!cio_is_console(sch->irq)) | ||
91 | kfree(sch); | ||
92 | } | ||
93 | |||
94 | extern int css_get_ssd_info(struct subchannel *sch); | ||
95 | |||
96 | static int | ||
97 | css_register_subchannel(struct subchannel *sch) | ||
98 | { | ||
99 | int ret; | ||
100 | |||
101 | /* Initialize the subchannel structure */ | ||
102 | sch->dev.parent = &css_bus_device; | ||
103 | sch->dev.bus = &css_bus_type; | ||
104 | sch->dev.release = &css_subchannel_release; | ||
105 | |||
106 | /* make it known to the system */ | ||
107 | ret = device_register(&sch->dev); | ||
108 | if (ret) | ||
109 | printk (KERN_WARNING "%s: could not register %s\n", | ||
110 | __func__, sch->dev.bus_id); | ||
111 | else | ||
112 | css_get_ssd_info(sch); | ||
113 | return ret; | ||
114 | } | ||
115 | |||
116 | int | ||
117 | css_probe_device(int irq) | ||
118 | { | ||
119 | int ret; | ||
120 | struct subchannel *sch; | ||
121 | |||
122 | sch = css_alloc_subchannel(irq); | ||
123 | if (IS_ERR(sch)) | ||
124 | return PTR_ERR(sch); | ||
125 | ret = css_register_subchannel(sch); | ||
126 | if (ret) | ||
127 | css_free_subchannel(sch); | ||
128 | return ret; | ||
129 | } | ||
130 | |||
131 | struct subchannel * | ||
132 | get_subchannel_by_schid(int irq) | ||
133 | { | ||
134 | struct subchannel *sch; | ||
135 | struct list_head *entry; | ||
136 | struct device *dev; | ||
137 | |||
138 | if (!get_bus(&css_bus_type)) | ||
139 | return NULL; | ||
140 | down_read(&css_bus_type.subsys.rwsem); | ||
141 | sch = NULL; | ||
142 | list_for_each(entry, &css_bus_type.devices.list) { | ||
143 | dev = get_device(container_of(entry, | ||
144 | struct device, bus_list)); | ||
145 | if (!dev) | ||
146 | continue; | ||
147 | sch = to_subchannel(dev); | ||
148 | if (sch->irq == irq) | ||
149 | break; | ||
150 | put_device(dev); | ||
151 | sch = NULL; | ||
152 | } | ||
153 | up_read(&css_bus_type.subsys.rwsem); | ||
154 | put_bus(&css_bus_type); | ||
155 | |||
156 | return sch; | ||
157 | } | ||
158 | |||
159 | static inline int | ||
160 | css_get_subchannel_status(struct subchannel *sch, int schid) | ||
161 | { | ||
162 | struct schib schib; | ||
163 | int cc; | ||
164 | |||
165 | cc = stsch(schid, &schib); | ||
166 | if (cc) | ||
167 | return CIO_GONE; | ||
168 | if (!schib.pmcw.dnv) | ||
169 | return CIO_GONE; | ||
170 | if (sch && sch->schib.pmcw.dnv && | ||
171 | (schib.pmcw.dev != sch->schib.pmcw.dev)) | ||
172 | return CIO_REVALIDATE; | ||
173 | if (sch && !sch->lpm) | ||
174 | return CIO_NO_PATH; | ||
175 | return CIO_OPER; | ||
176 | } | ||
177 | |||
178 | static int | ||
179 | css_evaluate_subchannel(int irq, int slow) | ||
180 | { | ||
181 | int event, ret, disc; | ||
182 | struct subchannel *sch; | ||
183 | unsigned long flags; | ||
184 | |||
185 | sch = get_subchannel_by_schid(irq); | ||
186 | disc = sch ? device_is_disconnected(sch) : 0; | ||
187 | if (disc && slow) { | ||
188 | if (sch) | ||
189 | put_device(&sch->dev); | ||
190 | return 0; /* Already processed. */ | ||
191 | } | ||
192 | /* | ||
193 | * We've got a machine check, so running I/O won't get an interrupt. | ||
194 | * Kill any pending timers. | ||
195 | */ | ||
196 | if (sch) | ||
197 | device_kill_pending_timer(sch); | ||
198 | if (!disc && !slow) { | ||
199 | if (sch) | ||
200 | put_device(&sch->dev); | ||
201 | return -EAGAIN; /* Will be done on the slow path. */ | ||
202 | } | ||
203 | event = css_get_subchannel_status(sch, irq); | ||
204 | CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", | ||
205 | irq, event, sch?(disc?"disconnected":"normal"):"unknown", | ||
206 | slow?"slow":"fast"); | ||
207 | switch (event) { | ||
208 | case CIO_NO_PATH: | ||
209 | case CIO_GONE: | ||
210 | if (!sch) { | ||
211 | /* Never used this subchannel. Ignore. */ | ||
212 | ret = 0; | ||
213 | break; | ||
214 | } | ||
215 | if (disc && (event == CIO_NO_PATH)) { | ||
216 | /* | ||
217 | * Uargh, hack again. Because we don't get a machine | ||
218 | * check on configure on, our path bookkeeping can | ||
219 | * be out of date here (it's fine while we only do | ||
220 | * logical varying or get chsc machine checks). We | ||
221 | * need to force reprobing or we might miss devices | ||
222 | * coming operational again. It won't do harm in real | ||
223 | * no path situations. | ||
224 | */ | ||
225 | spin_lock_irqsave(&sch->lock, flags); | ||
226 | device_trigger_reprobe(sch); | ||
227 | spin_unlock_irqrestore(&sch->lock, flags); | ||
228 | ret = 0; | ||
229 | break; | ||
230 | } | ||
231 | if (sch->driver && sch->driver->notify && | ||
232 | sch->driver->notify(&sch->dev, event)) { | ||
233 | cio_disable_subchannel(sch); | ||
234 | device_set_disconnected(sch); | ||
235 | ret = 0; | ||
236 | break; | ||
237 | } | ||
238 | /* | ||
239 | * Unregister subchannel. | ||
240 | * The device will be killed automatically. | ||
241 | */ | ||
242 | cio_disable_subchannel(sch); | ||
243 | device_unregister(&sch->dev); | ||
244 | /* Reset intparm to zeroes. */ | ||
245 | sch->schib.pmcw.intparm = 0; | ||
246 | cio_modify(sch); | ||
247 | put_device(&sch->dev); | ||
248 | ret = 0; | ||
249 | break; | ||
250 | case CIO_REVALIDATE: | ||
251 | /* | ||
252 | * Revalidation machine check. Sick. | ||
253 | * We don't notify the driver since we have to throw the device | ||
254 | * away in any case. | ||
255 | */ | ||
256 | if (!disc) { | ||
257 | device_unregister(&sch->dev); | ||
258 | /* Reset intparm to zeroes. */ | ||
259 | sch->schib.pmcw.intparm = 0; | ||
260 | cio_modify(sch); | ||
261 | put_device(&sch->dev); | ||
262 | ret = css_probe_device(irq); | ||
263 | } else { | ||
264 | /* | ||
265 | * We can't immediately deregister the disconnected | ||
266 | * device since it might block. | ||
267 | */ | ||
268 | spin_lock_irqsave(&sch->lock, flags); | ||
269 | device_trigger_reprobe(sch); | ||
270 | spin_unlock_irqrestore(&sch->lock, flags); | ||
271 | ret = 0; | ||
272 | } | ||
273 | break; | ||
274 | case CIO_OPER: | ||
275 | if (disc) { | ||
276 | spin_lock_irqsave(&sch->lock, flags); | ||
277 | /* Get device operational again. */ | ||
278 | device_trigger_reprobe(sch); | ||
279 | spin_unlock_irqrestore(&sch->lock, flags); | ||
280 | } | ||
281 | ret = sch ? 0 : css_probe_device(irq); | ||
282 | break; | ||
283 | default: | ||
284 | BUG(); | ||
285 | ret = 0; | ||
286 | } | ||
287 | return ret; | ||
288 | } | ||
289 | |||
290 | static void | ||
291 | css_rescan_devices(void) | ||
292 | { | ||
293 | int irq, ret; | ||
294 | |||
295 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
296 | ret = css_evaluate_subchannel(irq, 1); | ||
297 | /* No more memory. It doesn't make sense to continue. No | ||
298 | * panic because this can happen in midflight and just | ||
299 | * because we can't use a new device is no reason to crash | ||
300 | * the system. */ | ||
301 | if (ret == -ENOMEM) | ||
302 | break; | ||
303 | /* -ENXIO indicates that there are no more subchannels. */ | ||
304 | if (ret == -ENXIO) | ||
305 | break; | ||
306 | } | ||
307 | } | ||
308 | |||
309 | struct slow_subchannel { | ||
310 | struct list_head slow_list; | ||
311 | unsigned long schid; | ||
312 | }; | ||
313 | |||
314 | static LIST_HEAD(slow_subchannels_head); | ||
315 | static DEFINE_SPINLOCK(slow_subchannel_lock); | ||
316 | |||
317 | static void | ||
318 | css_trigger_slow_path(void) | ||
319 | { | ||
320 | CIO_TRACE_EVENT(4, "slowpath"); | ||
321 | |||
322 | if (need_rescan) { | ||
323 | need_rescan = 0; | ||
324 | css_rescan_devices(); | ||
325 | return; | ||
326 | } | ||
327 | |||
328 | spin_lock_irq(&slow_subchannel_lock); | ||
329 | while (!list_empty(&slow_subchannels_head)) { | ||
330 | struct slow_subchannel *slow_sch = | ||
331 | list_entry(slow_subchannels_head.next, | ||
332 | struct slow_subchannel, slow_list); | ||
333 | |||
334 | list_del_init(slow_subchannels_head.next); | ||
335 | spin_unlock_irq(&slow_subchannel_lock); | ||
336 | css_evaluate_subchannel(slow_sch->schid, 1); | ||
337 | spin_lock_irq(&slow_subchannel_lock); | ||
338 | kfree(slow_sch); | ||
339 | } | ||
340 | spin_unlock_irq(&slow_subchannel_lock); | ||
341 | } | ||
342 | |||
343 | typedef void (*workfunc)(void *); | ||
344 | DECLARE_WORK(slow_path_work, (workfunc)css_trigger_slow_path, NULL); | ||
345 | struct workqueue_struct *slow_path_wq; | ||
346 | |||
347 | /* | ||
348 | * Rescan for new devices. FIXME: This is slow. | ||
349 | * This function is called when we have lost CRWs due to overflows and we have | ||
350 | * to do subchannel housekeeping. | ||
351 | */ | ||
352 | void | ||
353 | css_reiterate_subchannels(void) | ||
354 | { | ||
355 | css_clear_subchannel_slow_list(); | ||
356 | need_rescan = 1; | ||
357 | } | ||
358 | |||
359 | /* | ||
360 | * Called from the machine check handler for subchannel report words. | ||
361 | */ | ||
362 | int | ||
363 | css_process_crw(int irq) | ||
364 | { | ||
365 | int ret; | ||
366 | |||
367 | CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); | ||
368 | |||
369 | if (need_rescan) | ||
370 | /* We need to iterate all subchannels anyway. */ | ||
371 | return -EAGAIN; | ||
372 | /* | ||
373 | * Since we are always presented with IPI in the CRW, we have to | ||
374 | * use stsch() to find out if the subchannel in question has come | ||
375 | * or gone. | ||
376 | */ | ||
377 | ret = css_evaluate_subchannel(irq, 0); | ||
378 | if (ret == -EAGAIN) { | ||
379 | if (css_enqueue_subchannel_slow(irq)) { | ||
380 | css_clear_subchannel_slow_list(); | ||
381 | need_rescan = 1; | ||
382 | } | ||
383 | } | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | static void __init | ||
388 | css_generate_pgid(void) | ||
389 | { | ||
390 | /* Let's build our path group ID here. */ | ||
391 | if (css_characteristics_avail && css_general_characteristics.mcss) | ||
392 | global_pgid.cpu_addr = 0x8000; | ||
393 | else { | ||
394 | #ifdef CONFIG_SMP | ||
395 | global_pgid.cpu_addr = hard_smp_processor_id(); | ||
396 | #else | ||
397 | global_pgid.cpu_addr = 0; | ||
398 | #endif | ||
399 | } | ||
400 | global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; | ||
401 | global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; | ||
402 | global_pgid.tod_high = (__u32) (get_clock() >> 32); | ||
403 | } | ||
404 | |||
405 | /* | ||
406 | * Now that the driver core is running, we can setup our channel subsystem. | ||
407 | * The struct subchannel's are created during probing (except for the | ||
408 | * static console subchannel). | ||
409 | */ | ||
410 | static int __init | ||
411 | init_channel_subsystem (void) | ||
412 | { | ||
413 | int ret, irq; | ||
414 | |||
415 | if (chsc_determine_css_characteristics() == 0) | ||
416 | css_characteristics_avail = 1; | ||
417 | |||
418 | css_generate_pgid(); | ||
419 | |||
420 | if ((ret = bus_register(&css_bus_type))) | ||
421 | goto out; | ||
422 | if ((ret = device_register (&css_bus_device))) | ||
423 | goto out_bus; | ||
424 | |||
425 | css_init_done = 1; | ||
426 | |||
427 | ctl_set_bit(6, 28); | ||
428 | |||
429 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
430 | struct subchannel *sch; | ||
431 | |||
432 | if (cio_is_console(irq)) | ||
433 | sch = cio_get_console_subchannel(); | ||
434 | else { | ||
435 | sch = css_alloc_subchannel(irq); | ||
436 | if (IS_ERR(sch)) | ||
437 | ret = PTR_ERR(sch); | ||
438 | else | ||
439 | ret = 0; | ||
440 | if (ret == -ENOMEM) | ||
441 | panic("Out of memory in " | ||
442 | "init_channel_subsystem\n"); | ||
443 | /* -ENXIO: no more subchannels. */ | ||
444 | if (ret == -ENXIO) | ||
445 | break; | ||
446 | if (ret) | ||
447 | continue; | ||
448 | } | ||
449 | /* | ||
450 | * We register ALL valid subchannels in ioinfo, even those | ||
451 | * that have been present before init_channel_subsystem. | ||
452 | * These subchannels can't have been registered yet (kmalloc | ||
453 | * not working) so we do it now. This is true e.g. for the | ||
454 | * console subchannel. | ||
455 | */ | ||
456 | css_register_subchannel(sch); | ||
457 | } | ||
458 | return 0; | ||
459 | |||
460 | out_bus: | ||
461 | bus_unregister(&css_bus_type); | ||
462 | out: | ||
463 | return ret; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * find a driver for a subchannel. They identify by the subchannel | ||
468 | * type with the exception that the console subchannel driver has its own | ||
469 | * subchannel type although the device is an i/o subchannel | ||
470 | */ | ||
471 | static int | ||
472 | css_bus_match (struct device *dev, struct device_driver *drv) | ||
473 | { | ||
474 | struct subchannel *sch = container_of (dev, struct subchannel, dev); | ||
475 | struct css_driver *driver = container_of (drv, struct css_driver, drv); | ||
476 | |||
477 | if (sch->st == driver->subchannel_type) | ||
478 | return 1; | ||
479 | |||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | struct bus_type css_bus_type = { | ||
484 | .name = "css", | ||
485 | .match = &css_bus_match, | ||
486 | }; | ||
487 | |||
488 | subsys_initcall(init_channel_subsystem); | ||
489 | |||
490 | /* | ||
491 | * Register root devices for some drivers. The release function must not be | ||
492 | * in the device drivers, so we do it here. | ||
493 | */ | ||
494 | static void | ||
495 | s390_root_dev_release(struct device *dev) | ||
496 | { | ||
497 | kfree(dev); | ||
498 | } | ||
499 | |||
500 | struct device * | ||
501 | s390_root_dev_register(const char *name) | ||
502 | { | ||
503 | struct device *dev; | ||
504 | int ret; | ||
505 | |||
506 | if (!strlen(name)) | ||
507 | return ERR_PTR(-EINVAL); | ||
508 | dev = kmalloc(sizeof(struct device), GFP_KERNEL); | ||
509 | if (!dev) | ||
510 | return ERR_PTR(-ENOMEM); | ||
511 | memset(dev, 0, sizeof(struct device)); | ||
512 | strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); | ||
513 | dev->release = s390_root_dev_release; | ||
514 | ret = device_register(dev); | ||
515 | if (ret) { | ||
516 | kfree(dev); | ||
517 | return ERR_PTR(ret); | ||
518 | } | ||
519 | return dev; | ||
520 | } | ||
521 | |||
522 | void | ||
523 | s390_root_dev_unregister(struct device *dev) | ||
524 | { | ||
525 | if (dev) | ||
526 | device_unregister(dev); | ||
527 | } | ||
528 | |||
529 | int | ||
530 | css_enqueue_subchannel_slow(unsigned long schid) | ||
531 | { | ||
532 | struct slow_subchannel *new_slow_sch; | ||
533 | unsigned long flags; | ||
534 | |||
535 | new_slow_sch = kmalloc(sizeof(struct slow_subchannel), GFP_ATOMIC); | ||
536 | if (!new_slow_sch) | ||
537 | return -ENOMEM; | ||
538 | memset(new_slow_sch, 0, sizeof(struct slow_subchannel)); | ||
539 | new_slow_sch->schid = schid; | ||
540 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
541 | list_add_tail(&new_slow_sch->slow_list, &slow_subchannels_head); | ||
542 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
543 | return 0; | ||
544 | } | ||
545 | |||
546 | void | ||
547 | css_clear_subchannel_slow_list(void) | ||
548 | { | ||
549 | unsigned long flags; | ||
550 | |||
551 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
552 | while (!list_empty(&slow_subchannels_head)) { | ||
553 | struct slow_subchannel *slow_sch = | ||
554 | list_entry(slow_subchannels_head.next, | ||
555 | struct slow_subchannel, slow_list); | ||
556 | |||
557 | list_del_init(slow_subchannels_head.next); | ||
558 | kfree(slow_sch); | ||
559 | } | ||
560 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
561 | } | ||
562 | |||
563 | |||
564 | |||
565 | int | ||
566 | css_slow_subchannels_exist(void) | ||
567 | { | ||
568 | return (!list_empty(&slow_subchannels_head)); | ||
569 | } | ||
570 | |||
571 | MODULE_LICENSE("GPL"); | ||
572 | EXPORT_SYMBOL(css_bus_type); | ||
573 | EXPORT_SYMBOL(s390_root_dev_register); | ||
574 | EXPORT_SYMBOL(s390_root_dev_unregister); | ||
575 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | ||
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h new file mode 100644 index 000000000000..2004a6c49388 --- /dev/null +++ b/drivers/s390/cio/css.h | |||
@@ -0,0 +1,155 @@ | |||
1 | #ifndef _CSS_H | ||
2 | #define _CSS_H | ||
3 | |||
4 | #include <linux/wait.h> | ||
5 | #include <linux/workqueue.h> | ||
6 | |||
7 | #include <asm/cio.h> | ||
8 | |||
9 | /* | ||
10 | * path grouping stuff | ||
11 | */ | ||
12 | #define SPID_FUNC_SINGLE_PATH 0x00 | ||
13 | #define SPID_FUNC_MULTI_PATH 0x80 | ||
14 | #define SPID_FUNC_ESTABLISH 0x00 | ||
15 | #define SPID_FUNC_RESIGN 0x40 | ||
16 | #define SPID_FUNC_DISBAND 0x20 | ||
17 | |||
18 | #define SNID_STATE1_RESET 0 | ||
19 | #define SNID_STATE1_UNGROUPED 2 | ||
20 | #define SNID_STATE1_GROUPED 3 | ||
21 | |||
22 | #define SNID_STATE2_NOT_RESVD 0 | ||
23 | #define SNID_STATE2_RESVD_ELSE 2 | ||
24 | #define SNID_STATE2_RESVD_SELF 3 | ||
25 | |||
26 | #define SNID_STATE3_MULTI_PATH 1 | ||
27 | #define SNID_STATE3_SINGLE_PATH 0 | ||
28 | |||
29 | struct path_state { | ||
30 | __u8 state1 : 2; /* path state value 1 */ | ||
31 | __u8 state2 : 2; /* path state value 2 */ | ||
32 | __u8 state3 : 1; /* path state value 3 */ | ||
33 | __u8 resvd : 3; /* reserved */ | ||
34 | } __attribute__ ((packed)); | ||
35 | |||
36 | struct pgid { | ||
37 | union { | ||
38 | __u8 fc; /* SPID function code */ | ||
39 | struct path_state ps; /* SNID path state */ | ||
40 | } inf; | ||
41 | __u32 cpu_addr : 16; /* CPU address */ | ||
42 | __u32 cpu_id : 24; /* CPU identification */ | ||
43 | __u32 cpu_model : 16; /* CPU model */ | ||
44 | __u32 tod_high; /* high word TOD clock */ | ||
45 | } __attribute__ ((packed)); | ||
46 | |||
47 | extern struct pgid global_pgid; | ||
48 | |||
49 | #define MAX_CIWS 8 | ||
50 | |||
51 | /* | ||
52 | * sense-id response buffer layout | ||
53 | */ | ||
54 | struct senseid { | ||
55 | /* common part */ | ||
56 | __u8 reserved; /* always 0x'FF' */ | ||
57 | __u16 cu_type; /* control unit type */ | ||
58 | __u8 cu_model; /* control unit model */ | ||
59 | __u16 dev_type; /* device type */ | ||
60 | __u8 dev_model; /* device model */ | ||
61 | __u8 unused; /* padding byte */ | ||
62 | /* extended part */ | ||
63 | struct ciw ciw[MAX_CIWS]; /* variable # of CIWs */ | ||
64 | } __attribute__ ((packed,aligned(4))); | ||
65 | |||
66 | struct ccw_device_private { | ||
67 | int state; /* device state */ | ||
68 | atomic_t onoff; | ||
69 | unsigned long registered; | ||
70 | __u16 devno; /* device number */ | ||
71 | __u16 irq; /* subchannel number */ | ||
72 | __u8 imask; /* lpm mask for SNID/SID/SPGID */ | ||
73 | int iretry; /* retry counter SNID/SID/SPGID */ | ||
74 | struct { | ||
75 | unsigned int fast:1; /* post with "channel end" */ | ||
76 | unsigned int repall:1; /* report every interrupt status */ | ||
77 | unsigned int pgroup:1; /* do path grouping */ | ||
78 | unsigned int force:1; /* allow forced online */ | ||
79 | } __attribute__ ((packed)) options; | ||
80 | struct { | ||
81 | unsigned int pgid_single:1; /* use single path for Set PGID */ | ||
82 | unsigned int esid:1; /* Ext. SenseID supported by HW */ | ||
83 | unsigned int dosense:1; /* delayed SENSE required */ | ||
84 | unsigned int doverify:1; /* delayed path verification */ | ||
85 | unsigned int donotify:1; /* call notify function */ | ||
86 | unsigned int recog_done:1; /* dev. recog. complete */ | ||
87 | unsigned int fake_irb:1; /* deliver faked irb */ | ||
88 | } __attribute__((packed)) flags; | ||
89 | unsigned long intparm; /* user interruption parameter */ | ||
90 | struct qdio_irq *qdio_data; | ||
91 | struct irb irb; /* device status */ | ||
92 | struct senseid senseid; /* SenseID info */ | ||
93 | struct pgid pgid; /* path group ID */ | ||
94 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ | ||
95 | struct work_struct kick_work; | ||
96 | wait_queue_head_t wait_q; | ||
97 | struct timer_list timer; | ||
98 | void *cmb; /* measurement information */ | ||
99 | struct list_head cmb_list; /* list of measured devices */ | ||
100 | u64 cmb_start_time; /* clock value of cmb reset */ | ||
101 | void *cmb_wait; /* deferred cmb enable/disable */ | ||
102 | }; | ||
103 | |||
104 | /* | ||
105 | * A css driver handles all subchannels of one type. | ||
106 | * Currently, we only care about I/O subchannels (type 0), these | ||
107 | * have a ccw_device connected to them. | ||
108 | */ | ||
109 | struct css_driver { | ||
110 | unsigned int subchannel_type; | ||
111 | struct device_driver drv; | ||
112 | void (*irq)(struct device *); | ||
113 | int (*notify)(struct device *, int); | ||
114 | void (*verify)(struct device *); | ||
115 | void (*termination)(struct device *); | ||
116 | }; | ||
117 | |||
118 | /* | ||
119 | * all css_drivers have the css_bus_type | ||
120 | */ | ||
121 | extern struct bus_type css_bus_type; | ||
122 | extern struct css_driver io_subchannel_driver; | ||
123 | |||
124 | int css_probe_device(int irq); | ||
125 | extern struct subchannel * get_subchannel_by_schid(int irq); | ||
126 | extern unsigned int highest_subchannel; | ||
127 | extern int css_init_done; | ||
128 | |||
129 | #define __MAX_SUBCHANNELS 65536 | ||
130 | |||
131 | extern struct bus_type css_bus_type; | ||
132 | extern struct device css_bus_device; | ||
133 | |||
134 | /* Some helper functions for disconnected state. */ | ||
135 | int device_is_disconnected(struct subchannel *); | ||
136 | void device_set_disconnected(struct subchannel *); | ||
137 | void device_trigger_reprobe(struct subchannel *); | ||
138 | |||
139 | /* Helper functions for vary on/off. */ | ||
140 | int device_is_online(struct subchannel *); | ||
141 | void device_set_waiting(struct subchannel *); | ||
142 | |||
143 | /* Machine check helper function. */ | ||
144 | void device_kill_pending_timer(struct subchannel *); | ||
145 | |||
146 | /* Helper functions to build lists for the slow path. */ | ||
147 | int css_enqueue_subchannel_slow(unsigned long schid); | ||
148 | void css_walk_subchannel_slow_list(void (*fn)(unsigned long)); | ||
149 | void css_clear_subchannel_slow_list(void); | ||
150 | int css_slow_subchannels_exist(void); | ||
151 | extern int need_rescan; | ||
152 | |||
153 | extern struct workqueue_struct *slow_path_wq; | ||
154 | extern struct work_struct slow_path_work; | ||
155 | #endif | ||
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c new file mode 100644 index 000000000000..df0325505e4e --- /dev/null +++ b/drivers/s390/cio/device.c | |||
@@ -0,0 +1,1135 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/device.c | ||
3 | * bus driver for ccw devices | ||
4 | * $Revision: 1.131 $ | ||
5 | * | ||
6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
7 | * IBM Corporation | ||
8 | * Author(s): Arnd Bergmann (arndb@de.ibm.com) | ||
9 | * Cornelia Huck (cohuck@de.ibm.com) | ||
10 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
11 | */ | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/spinlock.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/err.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/list.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/workqueue.h> | ||
22 | |||
23 | #include <asm/ccwdev.h> | ||
24 | #include <asm/cio.h> | ||
25 | |||
26 | #include "cio.h" | ||
27 | #include "css.h" | ||
28 | #include "device.h" | ||
29 | #include "ioasm.h" | ||
30 | |||
31 | /******************* bus type handling ***********************/ | ||
32 | |||
33 | /* The Linux driver model distinguishes between a bus type and | ||
34 | * the bus itself. Of course we only have one channel | ||
35 | * subsystem driver and one channel system per machine, but | ||
36 | * we still use the abstraction. T.R. says it's a good idea. */ | ||
37 | static int | ||
38 | ccw_bus_match (struct device * dev, struct device_driver * drv) | ||
39 | { | ||
40 | struct ccw_device *cdev = to_ccwdev(dev); | ||
41 | struct ccw_driver *cdrv = to_ccwdrv(drv); | ||
42 | const struct ccw_device_id *ids = cdrv->ids, *found; | ||
43 | |||
44 | if (!ids) | ||
45 | return 0; | ||
46 | |||
47 | found = ccw_device_id_match(ids, &cdev->id); | ||
48 | if (!found) | ||
49 | return 0; | ||
50 | |||
51 | cdev->id.driver_info = found->driver_info; | ||
52 | |||
53 | return 1; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Hotplugging interface for ccw devices. | ||
58 | * Heavily modeled on pci and usb hotplug. | ||
59 | */ | ||
60 | static int | ||
61 | ccw_hotplug (struct device *dev, char **envp, int num_envp, | ||
62 | char *buffer, int buffer_size) | ||
63 | { | ||
64 | struct ccw_device *cdev = to_ccwdev(dev); | ||
65 | int i = 0; | ||
66 | int length = 0; | ||
67 | |||
68 | if (!cdev) | ||
69 | return -ENODEV; | ||
70 | |||
71 | /* what we want to pass to /sbin/hotplug */ | ||
72 | |||
73 | envp[i++] = buffer; | ||
74 | length += scnprintf(buffer, buffer_size - length, "CU_TYPE=%04X", | ||
75 | cdev->id.cu_type); | ||
76 | if ((buffer_size - length <= 0) || (i >= num_envp)) | ||
77 | return -ENOMEM; | ||
78 | ++length; | ||
79 | buffer += length; | ||
80 | |||
81 | envp[i++] = buffer; | ||
82 | length += scnprintf(buffer, buffer_size - length, "CU_MODEL=%02X", | ||
83 | cdev->id.cu_model); | ||
84 | if ((buffer_size - length <= 0) || (i >= num_envp)) | ||
85 | return -ENOMEM; | ||
86 | ++length; | ||
87 | buffer += length; | ||
88 | |||
89 | /* The next two can be zero, that's ok for us */ | ||
90 | envp[i++] = buffer; | ||
91 | length += scnprintf(buffer, buffer_size - length, "DEV_TYPE=%04X", | ||
92 | cdev->id.dev_type); | ||
93 | if ((buffer_size - length <= 0) || (i >= num_envp)) | ||
94 | return -ENOMEM; | ||
95 | ++length; | ||
96 | buffer += length; | ||
97 | |||
98 | envp[i++] = buffer; | ||
99 | length += scnprintf(buffer, buffer_size - length, "DEV_MODEL=%02X", | ||
100 | cdev->id.dev_model); | ||
101 | if ((buffer_size - length <= 0) || (i >= num_envp)) | ||
102 | return -ENOMEM; | ||
103 | |||
104 | envp[i] = 0; | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | struct bus_type ccw_bus_type = { | ||
110 | .name = "ccw", | ||
111 | .match = &ccw_bus_match, | ||
112 | .hotplug = &ccw_hotplug, | ||
113 | }; | ||
114 | |||
115 | static int io_subchannel_probe (struct device *); | ||
116 | static int io_subchannel_remove (struct device *); | ||
117 | void io_subchannel_irq (struct device *); | ||
118 | static int io_subchannel_notify(struct device *, int); | ||
119 | static void io_subchannel_verify(struct device *); | ||
120 | static void io_subchannel_ioterm(struct device *); | ||
121 | static void io_subchannel_shutdown(struct device *); | ||
122 | |||
123 | struct css_driver io_subchannel_driver = { | ||
124 | .subchannel_type = SUBCHANNEL_TYPE_IO, | ||
125 | .drv = { | ||
126 | .name = "io_subchannel", | ||
127 | .bus = &css_bus_type, | ||
128 | .probe = &io_subchannel_probe, | ||
129 | .remove = &io_subchannel_remove, | ||
130 | .shutdown = &io_subchannel_shutdown, | ||
131 | }, | ||
132 | .irq = io_subchannel_irq, | ||
133 | .notify = io_subchannel_notify, | ||
134 | .verify = io_subchannel_verify, | ||
135 | .termination = io_subchannel_ioterm, | ||
136 | }; | ||
137 | |||
138 | struct workqueue_struct *ccw_device_work; | ||
139 | struct workqueue_struct *ccw_device_notify_work; | ||
140 | static wait_queue_head_t ccw_device_init_wq; | ||
141 | static atomic_t ccw_device_init_count; | ||
142 | |||
143 | static int __init | ||
144 | init_ccw_bus_type (void) | ||
145 | { | ||
146 | int ret; | ||
147 | |||
148 | init_waitqueue_head(&ccw_device_init_wq); | ||
149 | atomic_set(&ccw_device_init_count, 0); | ||
150 | |||
151 | ccw_device_work = create_singlethread_workqueue("cio"); | ||
152 | if (!ccw_device_work) | ||
153 | return -ENOMEM; /* FIXME: better errno ? */ | ||
154 | ccw_device_notify_work = create_singlethread_workqueue("cio_notify"); | ||
155 | if (!ccw_device_notify_work) { | ||
156 | ret = -ENOMEM; /* FIXME: better errno ? */ | ||
157 | goto out_err; | ||
158 | } | ||
159 | slow_path_wq = create_singlethread_workqueue("kslowcrw"); | ||
160 | if (!slow_path_wq) { | ||
161 | ret = -ENOMEM; /* FIXME: better errno ? */ | ||
162 | goto out_err; | ||
163 | } | ||
164 | if ((ret = bus_register (&ccw_bus_type))) | ||
165 | goto out_err; | ||
166 | |||
167 | if ((ret = driver_register(&io_subchannel_driver.drv))) | ||
168 | goto out_err; | ||
169 | |||
170 | wait_event(ccw_device_init_wq, | ||
171 | atomic_read(&ccw_device_init_count) == 0); | ||
172 | flush_workqueue(ccw_device_work); | ||
173 | return 0; | ||
174 | out_err: | ||
175 | if (ccw_device_work) | ||
176 | destroy_workqueue(ccw_device_work); | ||
177 | if (ccw_device_notify_work) | ||
178 | destroy_workqueue(ccw_device_notify_work); | ||
179 | if (slow_path_wq) | ||
180 | destroy_workqueue(slow_path_wq); | ||
181 | return ret; | ||
182 | } | ||
183 | |||
184 | static void __exit | ||
185 | cleanup_ccw_bus_type (void) | ||
186 | { | ||
187 | driver_unregister(&io_subchannel_driver.drv); | ||
188 | bus_unregister(&ccw_bus_type); | ||
189 | destroy_workqueue(ccw_device_notify_work); | ||
190 | destroy_workqueue(ccw_device_work); | ||
191 | } | ||
192 | |||
193 | subsys_initcall(init_ccw_bus_type); | ||
194 | module_exit(cleanup_ccw_bus_type); | ||
195 | |||
196 | /************************ device handling **************************/ | ||
197 | |||
198 | /* | ||
199 | * A ccw_device has some interfaces in sysfs in addition to the | ||
200 | * standard ones. | ||
201 | * The following entries are designed to export the information which | ||
202 | * resided in 2.4 in /proc/subchannels. Subchannel and device number | ||
203 | * are obvious, so they don't have an entry :) | ||
204 | * TODO: Split chpids and pimpampom up? Where is "in use" in the tree? | ||
205 | */ | ||
206 | static ssize_t | ||
207 | chpids_show (struct device * dev, char * buf) | ||
208 | { | ||
209 | struct subchannel *sch = to_subchannel(dev); | ||
210 | struct ssd_info *ssd = &sch->ssd_info; | ||
211 | ssize_t ret = 0; | ||
212 | int chp; | ||
213 | |||
214 | for (chp = 0; chp < 8; chp++) | ||
215 | ret += sprintf (buf+ret, "%02x ", ssd->chpid[chp]); | ||
216 | |||
217 | ret += sprintf (buf+ret, "\n"); | ||
218 | return min((ssize_t)PAGE_SIZE, ret); | ||
219 | } | ||
220 | |||
221 | static ssize_t | ||
222 | pimpampom_show (struct device * dev, char * buf) | ||
223 | { | ||
224 | struct subchannel *sch = to_subchannel(dev); | ||
225 | struct pmcw *pmcw = &sch->schib.pmcw; | ||
226 | |||
227 | return sprintf (buf, "%02x %02x %02x\n", | ||
228 | pmcw->pim, pmcw->pam, pmcw->pom); | ||
229 | } | ||
230 | |||
231 | static ssize_t | ||
232 | devtype_show (struct device *dev, char *buf) | ||
233 | { | ||
234 | struct ccw_device *cdev = to_ccwdev(dev); | ||
235 | struct ccw_device_id *id = &(cdev->id); | ||
236 | |||
237 | if (id->dev_type != 0) | ||
238 | return sprintf(buf, "%04x/%02x\n", | ||
239 | id->dev_type, id->dev_model); | ||
240 | else | ||
241 | return sprintf(buf, "n/a\n"); | ||
242 | } | ||
243 | |||
244 | static ssize_t | ||
245 | cutype_show (struct device *dev, char *buf) | ||
246 | { | ||
247 | struct ccw_device *cdev = to_ccwdev(dev); | ||
248 | struct ccw_device_id *id = &(cdev->id); | ||
249 | |||
250 | return sprintf(buf, "%04x/%02x\n", | ||
251 | id->cu_type, id->cu_model); | ||
252 | } | ||
253 | |||
254 | static ssize_t | ||
255 | online_show (struct device *dev, char *buf) | ||
256 | { | ||
257 | struct ccw_device *cdev = to_ccwdev(dev); | ||
258 | |||
259 | return sprintf(buf, cdev->online ? "1\n" : "0\n"); | ||
260 | } | ||
261 | |||
262 | static void | ||
263 | ccw_device_remove_disconnected(struct ccw_device *cdev) | ||
264 | { | ||
265 | struct subchannel *sch; | ||
266 | /* | ||
267 | * Forced offline in disconnected state means | ||
268 | * 'throw away device'. | ||
269 | */ | ||
270 | sch = to_subchannel(cdev->dev.parent); | ||
271 | device_unregister(&sch->dev); | ||
272 | /* Reset intparm to zeroes. */ | ||
273 | sch->schib.pmcw.intparm = 0; | ||
274 | cio_modify(sch); | ||
275 | put_device(&sch->dev); | ||
276 | } | ||
277 | |||
278 | int | ||
279 | ccw_device_set_offline(struct ccw_device *cdev) | ||
280 | { | ||
281 | int ret; | ||
282 | |||
283 | if (!cdev) | ||
284 | return -ENODEV; | ||
285 | if (!cdev->online || !cdev->drv) | ||
286 | return -EINVAL; | ||
287 | |||
288 | if (cdev->drv->set_offline) { | ||
289 | ret = cdev->drv->set_offline(cdev); | ||
290 | if (ret != 0) | ||
291 | return ret; | ||
292 | } | ||
293 | cdev->online = 0; | ||
294 | spin_lock_irq(cdev->ccwlock); | ||
295 | ret = ccw_device_offline(cdev); | ||
296 | if (ret == -ENODEV) { | ||
297 | if (cdev->private->state != DEV_STATE_NOT_OPER) { | ||
298 | cdev->private->state = DEV_STATE_OFFLINE; | ||
299 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
300 | } | ||
301 | spin_unlock_irq(cdev->ccwlock); | ||
302 | return ret; | ||
303 | } | ||
304 | spin_unlock_irq(cdev->ccwlock); | ||
305 | if (ret == 0) | ||
306 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | ||
307 | else { | ||
308 | pr_debug("ccw_device_offline returned %d, device %s\n", | ||
309 | ret, cdev->dev.bus_id); | ||
310 | cdev->online = 1; | ||
311 | } | ||
312 | return ret; | ||
313 | } | ||
314 | |||
315 | int | ||
316 | ccw_device_set_online(struct ccw_device *cdev) | ||
317 | { | ||
318 | int ret; | ||
319 | |||
320 | if (!cdev) | ||
321 | return -ENODEV; | ||
322 | if (cdev->online || !cdev->drv) | ||
323 | return -EINVAL; | ||
324 | |||
325 | spin_lock_irq(cdev->ccwlock); | ||
326 | ret = ccw_device_online(cdev); | ||
327 | spin_unlock_irq(cdev->ccwlock); | ||
328 | if (ret == 0) | ||
329 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | ||
330 | else { | ||
331 | pr_debug("ccw_device_online returned %d, device %s\n", | ||
332 | ret, cdev->dev.bus_id); | ||
333 | return ret; | ||
334 | } | ||
335 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
336 | return -ENODEV; | ||
337 | if (!cdev->drv->set_online || cdev->drv->set_online(cdev) == 0) { | ||
338 | cdev->online = 1; | ||
339 | return 0; | ||
340 | } | ||
341 | spin_lock_irq(cdev->ccwlock); | ||
342 | ret = ccw_device_offline(cdev); | ||
343 | spin_unlock_irq(cdev->ccwlock); | ||
344 | if (ret == 0) | ||
345 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | ||
346 | else | ||
347 | pr_debug("ccw_device_offline returned %d, device %s\n", | ||
348 | ret, cdev->dev.bus_id); | ||
349 | return (ret = 0) ? -ENODEV : ret; | ||
350 | } | ||
351 | |||
352 | static ssize_t | ||
353 | online_store (struct device *dev, const char *buf, size_t count) | ||
354 | { | ||
355 | struct ccw_device *cdev = to_ccwdev(dev); | ||
356 | int i, force, ret; | ||
357 | char *tmp; | ||
358 | |||
359 | if (atomic_compare_and_swap(0, 1, &cdev->private->onoff)) | ||
360 | return -EAGAIN; | ||
361 | |||
362 | if (cdev->drv && !try_module_get(cdev->drv->owner)) { | ||
363 | atomic_set(&cdev->private->onoff, 0); | ||
364 | return -EINVAL; | ||
365 | } | ||
366 | if (!strncmp(buf, "force\n", count)) { | ||
367 | force = 1; | ||
368 | i = 1; | ||
369 | } else { | ||
370 | force = 0; | ||
371 | i = simple_strtoul(buf, &tmp, 16); | ||
372 | } | ||
373 | if (i == 1) { | ||
374 | /* Do device recognition, if needed. */ | ||
375 | if (cdev->id.cu_type == 0) { | ||
376 | ret = ccw_device_recognition(cdev); | ||
377 | if (ret) { | ||
378 | printk(KERN_WARNING"Couldn't start recognition " | ||
379 | "for device %s (ret=%d)\n", | ||
380 | cdev->dev.bus_id, ret); | ||
381 | goto out; | ||
382 | } | ||
383 | wait_event(cdev->private->wait_q, | ||
384 | cdev->private->flags.recog_done); | ||
385 | } | ||
386 | if (cdev->drv && cdev->drv->set_online) | ||
387 | ccw_device_set_online(cdev); | ||
388 | } else if (i == 0) { | ||
389 | if (cdev->private->state == DEV_STATE_DISCONNECTED) | ||
390 | ccw_device_remove_disconnected(cdev); | ||
391 | else if (cdev->drv && cdev->drv->set_offline) | ||
392 | ccw_device_set_offline(cdev); | ||
393 | } | ||
394 | if (force && cdev->private->state == DEV_STATE_BOXED) { | ||
395 | ret = ccw_device_stlck(cdev); | ||
396 | if (ret) { | ||
397 | printk(KERN_WARNING"ccw_device_stlck for device %s " | ||
398 | "returned %d!\n", cdev->dev.bus_id, ret); | ||
399 | goto out; | ||
400 | } | ||
401 | /* Do device recognition, if needed. */ | ||
402 | if (cdev->id.cu_type == 0) { | ||
403 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
404 | ret = ccw_device_recognition(cdev); | ||
405 | if (ret) { | ||
406 | printk(KERN_WARNING"Couldn't start recognition " | ||
407 | "for device %s (ret=%d)\n", | ||
408 | cdev->dev.bus_id, ret); | ||
409 | goto out; | ||
410 | } | ||
411 | wait_event(cdev->private->wait_q, | ||
412 | cdev->private->flags.recog_done); | ||
413 | } | ||
414 | if (cdev->drv && cdev->drv->set_online) | ||
415 | ccw_device_set_online(cdev); | ||
416 | } | ||
417 | out: | ||
418 | if (cdev->drv) | ||
419 | module_put(cdev->drv->owner); | ||
420 | atomic_set(&cdev->private->onoff, 0); | ||
421 | return count; | ||
422 | } | ||
423 | |||
424 | static ssize_t | ||
425 | available_show (struct device *dev, char *buf) | ||
426 | { | ||
427 | struct ccw_device *cdev = to_ccwdev(dev); | ||
428 | struct subchannel *sch; | ||
429 | |||
430 | switch (cdev->private->state) { | ||
431 | case DEV_STATE_BOXED: | ||
432 | return sprintf(buf, "boxed\n"); | ||
433 | case DEV_STATE_DISCONNECTED: | ||
434 | case DEV_STATE_DISCONNECTED_SENSE_ID: | ||
435 | case DEV_STATE_NOT_OPER: | ||
436 | sch = to_subchannel(dev->parent); | ||
437 | if (!sch->lpm) | ||
438 | return sprintf(buf, "no path\n"); | ||
439 | else | ||
440 | return sprintf(buf, "no device\n"); | ||
441 | default: | ||
442 | /* All other states considered fine. */ | ||
443 | return sprintf(buf, "good\n"); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | static DEVICE_ATTR(chpids, 0444, chpids_show, NULL); | ||
448 | static DEVICE_ATTR(pimpampom, 0444, pimpampom_show, NULL); | ||
449 | static DEVICE_ATTR(devtype, 0444, devtype_show, NULL); | ||
450 | static DEVICE_ATTR(cutype, 0444, cutype_show, NULL); | ||
451 | static DEVICE_ATTR(online, 0644, online_show, online_store); | ||
452 | extern struct device_attribute dev_attr_cmb_enable; | ||
453 | static DEVICE_ATTR(availability, 0444, available_show, NULL); | ||
454 | |||
455 | static struct attribute * subch_attrs[] = { | ||
456 | &dev_attr_chpids.attr, | ||
457 | &dev_attr_pimpampom.attr, | ||
458 | NULL, | ||
459 | }; | ||
460 | |||
461 | static struct attribute_group subch_attr_group = { | ||
462 | .attrs = subch_attrs, | ||
463 | }; | ||
464 | |||
465 | static inline int | ||
466 | subchannel_add_files (struct device *dev) | ||
467 | { | ||
468 | return sysfs_create_group(&dev->kobj, &subch_attr_group); | ||
469 | } | ||
470 | |||
471 | static struct attribute * ccwdev_attrs[] = { | ||
472 | &dev_attr_devtype.attr, | ||
473 | &dev_attr_cutype.attr, | ||
474 | &dev_attr_online.attr, | ||
475 | &dev_attr_cmb_enable.attr, | ||
476 | &dev_attr_availability.attr, | ||
477 | NULL, | ||
478 | }; | ||
479 | |||
480 | static struct attribute_group ccwdev_attr_group = { | ||
481 | .attrs = ccwdev_attrs, | ||
482 | }; | ||
483 | |||
484 | static inline int | ||
485 | device_add_files (struct device *dev) | ||
486 | { | ||
487 | return sysfs_create_group(&dev->kobj, &ccwdev_attr_group); | ||
488 | } | ||
489 | |||
490 | static inline void | ||
491 | device_remove_files(struct device *dev) | ||
492 | { | ||
493 | sysfs_remove_group(&dev->kobj, &ccwdev_attr_group); | ||
494 | } | ||
495 | |||
496 | /* this is a simple abstraction for device_register that sets the | ||
497 | * correct bus type and adds the bus specific files */ | ||
498 | int | ||
499 | ccw_device_register(struct ccw_device *cdev) | ||
500 | { | ||
501 | struct device *dev = &cdev->dev; | ||
502 | int ret; | ||
503 | |||
504 | dev->bus = &ccw_bus_type; | ||
505 | |||
506 | if ((ret = device_add(dev))) | ||
507 | return ret; | ||
508 | |||
509 | set_bit(1, &cdev->private->registered); | ||
510 | if ((ret = device_add_files(dev))) { | ||
511 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
512 | device_del(dev); | ||
513 | } | ||
514 | return ret; | ||
515 | } | ||
516 | |||
517 | static struct ccw_device * | ||
518 | get_disc_ccwdev_by_devno(unsigned int devno, struct ccw_device *sibling) | ||
519 | { | ||
520 | struct ccw_device *cdev; | ||
521 | struct list_head *entry; | ||
522 | struct device *dev; | ||
523 | |||
524 | if (!get_bus(&ccw_bus_type)) | ||
525 | return NULL; | ||
526 | down_read(&ccw_bus_type.subsys.rwsem); | ||
527 | cdev = NULL; | ||
528 | list_for_each(entry, &ccw_bus_type.devices.list) { | ||
529 | dev = get_device(container_of(entry, | ||
530 | struct device, bus_list)); | ||
531 | if (!dev) | ||
532 | continue; | ||
533 | cdev = to_ccwdev(dev); | ||
534 | if ((cdev->private->state == DEV_STATE_DISCONNECTED) && | ||
535 | (cdev->private->devno == devno) && | ||
536 | (cdev != sibling)) { | ||
537 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
538 | break; | ||
539 | } | ||
540 | put_device(dev); | ||
541 | cdev = NULL; | ||
542 | } | ||
543 | up_read(&ccw_bus_type.subsys.rwsem); | ||
544 | put_bus(&ccw_bus_type); | ||
545 | |||
546 | return cdev; | ||
547 | } | ||
548 | |||
549 | static void | ||
550 | ccw_device_add_changed(void *data) | ||
551 | { | ||
552 | |||
553 | struct ccw_device *cdev; | ||
554 | |||
555 | cdev = (struct ccw_device *)data; | ||
556 | if (device_add(&cdev->dev)) { | ||
557 | put_device(&cdev->dev); | ||
558 | return; | ||
559 | } | ||
560 | set_bit(1, &cdev->private->registered); | ||
561 | if (device_add_files(&cdev->dev)) { | ||
562 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
563 | device_unregister(&cdev->dev); | ||
564 | } | ||
565 | } | ||
566 | |||
567 | extern int css_get_ssd_info(struct subchannel *sch); | ||
568 | |||
569 | void | ||
570 | ccw_device_do_unreg_rereg(void *data) | ||
571 | { | ||
572 | struct ccw_device *cdev; | ||
573 | struct subchannel *sch; | ||
574 | int need_rename; | ||
575 | |||
576 | cdev = (struct ccw_device *)data; | ||
577 | sch = to_subchannel(cdev->dev.parent); | ||
578 | if (cdev->private->devno != sch->schib.pmcw.dev) { | ||
579 | /* | ||
580 | * The device number has changed. This is usually only when | ||
581 | * a device has been detached under VM and then re-appeared | ||
582 | * on another subchannel because of a different attachment | ||
583 | * order than before. Ideally, we should should just switch | ||
584 | * subchannels, but unfortunately, this is not possible with | ||
585 | * the current implementation. | ||
586 | * Instead, we search for the old subchannel for this device | ||
587 | * number and deregister so there are no collisions with the | ||
588 | * newly registered ccw_device. | ||
589 | * FIXME: Find another solution so the block layer doesn't | ||
590 | * get possibly sick... | ||
591 | */ | ||
592 | struct ccw_device *other_cdev; | ||
593 | |||
594 | need_rename = 1; | ||
595 | other_cdev = get_disc_ccwdev_by_devno(sch->schib.pmcw.dev, | ||
596 | cdev); | ||
597 | if (other_cdev) { | ||
598 | struct subchannel *other_sch; | ||
599 | |||
600 | other_sch = to_subchannel(other_cdev->dev.parent); | ||
601 | if (get_device(&other_sch->dev)) { | ||
602 | stsch(other_sch->irq, &other_sch->schib); | ||
603 | if (other_sch->schib.pmcw.dnv) { | ||
604 | other_sch->schib.pmcw.intparm = 0; | ||
605 | cio_modify(other_sch); | ||
606 | } | ||
607 | device_unregister(&other_sch->dev); | ||
608 | } | ||
609 | } | ||
610 | /* Update ssd info here. */ | ||
611 | css_get_ssd_info(sch); | ||
612 | cdev->private->devno = sch->schib.pmcw.dev; | ||
613 | } else | ||
614 | need_rename = 0; | ||
615 | device_remove_files(&cdev->dev); | ||
616 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
617 | device_del(&cdev->dev); | ||
618 | if (need_rename) | ||
619 | snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", | ||
620 | sch->schib.pmcw.dev); | ||
621 | PREPARE_WORK(&cdev->private->kick_work, | ||
622 | ccw_device_add_changed, (void *)cdev); | ||
623 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
624 | } | ||
625 | |||
626 | static void | ||
627 | ccw_device_release(struct device *dev) | ||
628 | { | ||
629 | struct ccw_device *cdev; | ||
630 | |||
631 | cdev = to_ccwdev(dev); | ||
632 | kfree(cdev->private); | ||
633 | kfree(cdev); | ||
634 | } | ||
635 | |||
636 | /* | ||
637 | * Register recognized device. | ||
638 | */ | ||
639 | static void | ||
640 | io_subchannel_register(void *data) | ||
641 | { | ||
642 | struct ccw_device *cdev; | ||
643 | struct subchannel *sch; | ||
644 | int ret; | ||
645 | unsigned long flags; | ||
646 | |||
647 | cdev = (struct ccw_device *) data; | ||
648 | sch = to_subchannel(cdev->dev.parent); | ||
649 | |||
650 | if (!list_empty(&sch->dev.children)) { | ||
651 | bus_rescan_devices(&ccw_bus_type); | ||
652 | goto out; | ||
653 | } | ||
654 | /* make it known to the system */ | ||
655 | ret = ccw_device_register(cdev); | ||
656 | if (ret) { | ||
657 | printk (KERN_WARNING "%s: could not register %s\n", | ||
658 | __func__, cdev->dev.bus_id); | ||
659 | put_device(&cdev->dev); | ||
660 | spin_lock_irqsave(&sch->lock, flags); | ||
661 | sch->dev.driver_data = NULL; | ||
662 | spin_unlock_irqrestore(&sch->lock, flags); | ||
663 | kfree (cdev->private); | ||
664 | kfree (cdev); | ||
665 | put_device(&sch->dev); | ||
666 | if (atomic_dec_and_test(&ccw_device_init_count)) | ||
667 | wake_up(&ccw_device_init_wq); | ||
668 | return; | ||
669 | } | ||
670 | |||
671 | ret = subchannel_add_files(cdev->dev.parent); | ||
672 | if (ret) | ||
673 | printk(KERN_WARNING "%s: could not add attributes to %s\n", | ||
674 | __func__, sch->dev.bus_id); | ||
675 | put_device(&cdev->dev); | ||
676 | out: | ||
677 | cdev->private->flags.recog_done = 1; | ||
678 | put_device(&sch->dev); | ||
679 | wake_up(&cdev->private->wait_q); | ||
680 | if (atomic_dec_and_test(&ccw_device_init_count)) | ||
681 | wake_up(&ccw_device_init_wq); | ||
682 | } | ||
683 | |||
684 | void | ||
685 | ccw_device_call_sch_unregister(void *data) | ||
686 | { | ||
687 | struct ccw_device *cdev = data; | ||
688 | struct subchannel *sch; | ||
689 | |||
690 | sch = to_subchannel(cdev->dev.parent); | ||
691 | device_unregister(&sch->dev); | ||
692 | /* Reset intparm to zeroes. */ | ||
693 | sch->schib.pmcw.intparm = 0; | ||
694 | cio_modify(sch); | ||
695 | put_device(&cdev->dev); | ||
696 | put_device(&sch->dev); | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * subchannel recognition done. Called from the state machine. | ||
701 | */ | ||
702 | void | ||
703 | io_subchannel_recog_done(struct ccw_device *cdev) | ||
704 | { | ||
705 | struct subchannel *sch; | ||
706 | |||
707 | if (css_init_done == 0) { | ||
708 | cdev->private->flags.recog_done = 1; | ||
709 | return; | ||
710 | } | ||
711 | switch (cdev->private->state) { | ||
712 | case DEV_STATE_NOT_OPER: | ||
713 | cdev->private->flags.recog_done = 1; | ||
714 | /* Remove device found not operational. */ | ||
715 | if (!get_device(&cdev->dev)) | ||
716 | break; | ||
717 | sch = to_subchannel(cdev->dev.parent); | ||
718 | PREPARE_WORK(&cdev->private->kick_work, | ||
719 | ccw_device_call_sch_unregister, (void *) cdev); | ||
720 | queue_work(slow_path_wq, &cdev->private->kick_work); | ||
721 | if (atomic_dec_and_test(&ccw_device_init_count)) | ||
722 | wake_up(&ccw_device_init_wq); | ||
723 | break; | ||
724 | case DEV_STATE_BOXED: | ||
725 | /* Device did not respond in time. */ | ||
726 | case DEV_STATE_OFFLINE: | ||
727 | /* | ||
728 | * We can't register the device in interrupt context so | ||
729 | * we schedule a work item. | ||
730 | */ | ||
731 | if (!get_device(&cdev->dev)) | ||
732 | break; | ||
733 | PREPARE_WORK(&cdev->private->kick_work, | ||
734 | io_subchannel_register, (void *) cdev); | ||
735 | queue_work(slow_path_wq, &cdev->private->kick_work); | ||
736 | break; | ||
737 | } | ||
738 | } | ||
739 | |||
740 | static int | ||
741 | io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch) | ||
742 | { | ||
743 | int rc; | ||
744 | struct ccw_device_private *priv; | ||
745 | |||
746 | sch->dev.driver_data = cdev; | ||
747 | sch->driver = &io_subchannel_driver; | ||
748 | cdev->ccwlock = &sch->lock; | ||
749 | /* Init private data. */ | ||
750 | priv = cdev->private; | ||
751 | priv->devno = sch->schib.pmcw.dev; | ||
752 | priv->irq = sch->irq; | ||
753 | priv->state = DEV_STATE_NOT_OPER; | ||
754 | INIT_LIST_HEAD(&priv->cmb_list); | ||
755 | init_waitqueue_head(&priv->wait_q); | ||
756 | init_timer(&priv->timer); | ||
757 | |||
758 | /* Set an initial name for the device. */ | ||
759 | snprintf (cdev->dev.bus_id, BUS_ID_SIZE, "0.0.%04x", | ||
760 | sch->schib.pmcw.dev); | ||
761 | |||
762 | /* Increase counter of devices currently in recognition. */ | ||
763 | atomic_inc(&ccw_device_init_count); | ||
764 | |||
765 | /* Start async. device sensing. */ | ||
766 | spin_lock_irq(&sch->lock); | ||
767 | rc = ccw_device_recognition(cdev); | ||
768 | spin_unlock_irq(&sch->lock); | ||
769 | if (rc) { | ||
770 | if (atomic_dec_and_test(&ccw_device_init_count)) | ||
771 | wake_up(&ccw_device_init_wq); | ||
772 | } | ||
773 | return rc; | ||
774 | } | ||
775 | |||
776 | static int | ||
777 | io_subchannel_probe (struct device *pdev) | ||
778 | { | ||
779 | struct subchannel *sch; | ||
780 | struct ccw_device *cdev; | ||
781 | int rc; | ||
782 | unsigned long flags; | ||
783 | |||
784 | sch = to_subchannel(pdev); | ||
785 | if (sch->dev.driver_data) { | ||
786 | /* | ||
787 | * This subchannel already has an associated ccw_device. | ||
788 | * Register it and exit. This happens for all early | ||
789 | * device, e.g. the console. | ||
790 | */ | ||
791 | cdev = sch->dev.driver_data; | ||
792 | device_initialize(&cdev->dev); | ||
793 | ccw_device_register(cdev); | ||
794 | subchannel_add_files(&sch->dev); | ||
795 | /* | ||
796 | * Check if the device is already online. If it is | ||
797 | * the reference count needs to be corrected | ||
798 | * (see ccw_device_online and css_init_done for the | ||
799 | * ugly details). | ||
800 | */ | ||
801 | if (cdev->private->state != DEV_STATE_NOT_OPER && | ||
802 | cdev->private->state != DEV_STATE_OFFLINE && | ||
803 | cdev->private->state != DEV_STATE_BOXED) | ||
804 | get_device(&cdev->dev); | ||
805 | return 0; | ||
806 | } | ||
807 | cdev = kmalloc (sizeof(*cdev), GFP_KERNEL); | ||
808 | if (!cdev) | ||
809 | return -ENOMEM; | ||
810 | memset(cdev, 0, sizeof(struct ccw_device)); | ||
811 | cdev->private = kmalloc(sizeof(struct ccw_device_private), | ||
812 | GFP_KERNEL | GFP_DMA); | ||
813 | if (!cdev->private) { | ||
814 | kfree(cdev); | ||
815 | return -ENOMEM; | ||
816 | } | ||
817 | memset(cdev->private, 0, sizeof(struct ccw_device_private)); | ||
818 | atomic_set(&cdev->private->onoff, 0); | ||
819 | cdev->dev = (struct device) { | ||
820 | .parent = pdev, | ||
821 | .release = ccw_device_release, | ||
822 | }; | ||
823 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); | ||
824 | /* Do first half of device_register. */ | ||
825 | device_initialize(&cdev->dev); | ||
826 | |||
827 | if (!get_device(&sch->dev)) { | ||
828 | if (cdev->dev.release) | ||
829 | cdev->dev.release(&cdev->dev); | ||
830 | return -ENODEV; | ||
831 | } | ||
832 | |||
833 | rc = io_subchannel_recog(cdev, to_subchannel(pdev)); | ||
834 | if (rc) { | ||
835 | spin_lock_irqsave(&sch->lock, flags); | ||
836 | sch->dev.driver_data = NULL; | ||
837 | spin_unlock_irqrestore(&sch->lock, flags); | ||
838 | if (cdev->dev.release) | ||
839 | cdev->dev.release(&cdev->dev); | ||
840 | } | ||
841 | |||
842 | return rc; | ||
843 | } | ||
844 | |||
845 | static void | ||
846 | ccw_device_unregister(void *data) | ||
847 | { | ||
848 | struct ccw_device *cdev; | ||
849 | |||
850 | cdev = (struct ccw_device *)data; | ||
851 | if (test_and_clear_bit(1, &cdev->private->registered)) | ||
852 | device_unregister(&cdev->dev); | ||
853 | put_device(&cdev->dev); | ||
854 | } | ||
855 | |||
856 | static int | ||
857 | io_subchannel_remove (struct device *dev) | ||
858 | { | ||
859 | struct ccw_device *cdev; | ||
860 | unsigned long flags; | ||
861 | |||
862 | if (!dev->driver_data) | ||
863 | return 0; | ||
864 | cdev = dev->driver_data; | ||
865 | /* Set ccw device to not operational and drop reference. */ | ||
866 | spin_lock_irqsave(cdev->ccwlock, flags); | ||
867 | dev->driver_data = NULL; | ||
868 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
869 | spin_unlock_irqrestore(cdev->ccwlock, flags); | ||
870 | /* | ||
871 | * Put unregistration on workqueue to avoid livelocks on the css bus | ||
872 | * semaphore. | ||
873 | */ | ||
874 | if (get_device(&cdev->dev)) { | ||
875 | PREPARE_WORK(&cdev->private->kick_work, | ||
876 | ccw_device_unregister, (void *) cdev); | ||
877 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
878 | } | ||
879 | return 0; | ||
880 | } | ||
881 | |||
882 | static int | ||
883 | io_subchannel_notify(struct device *dev, int event) | ||
884 | { | ||
885 | struct ccw_device *cdev; | ||
886 | |||
887 | cdev = dev->driver_data; | ||
888 | if (!cdev) | ||
889 | return 0; | ||
890 | if (!cdev->drv) | ||
891 | return 0; | ||
892 | if (!cdev->online) | ||
893 | return 0; | ||
894 | return cdev->drv->notify ? cdev->drv->notify(cdev, event) : 0; | ||
895 | } | ||
896 | |||
897 | static void | ||
898 | io_subchannel_verify(struct device *dev) | ||
899 | { | ||
900 | struct ccw_device *cdev; | ||
901 | |||
902 | cdev = dev->driver_data; | ||
903 | if (cdev) | ||
904 | dev_fsm_event(cdev, DEV_EVENT_VERIFY); | ||
905 | } | ||
906 | |||
907 | static void | ||
908 | io_subchannel_ioterm(struct device *dev) | ||
909 | { | ||
910 | struct ccw_device *cdev; | ||
911 | |||
912 | cdev = dev->driver_data; | ||
913 | if (!cdev) | ||
914 | return; | ||
915 | cdev->private->state = DEV_STATE_CLEAR_VERIFY; | ||
916 | if (cdev->handler) | ||
917 | cdev->handler(cdev, cdev->private->intparm, | ||
918 | ERR_PTR(-EIO)); | ||
919 | } | ||
920 | |||
921 | static void | ||
922 | io_subchannel_shutdown(struct device *dev) | ||
923 | { | ||
924 | struct subchannel *sch; | ||
925 | struct ccw_device *cdev; | ||
926 | int ret; | ||
927 | |||
928 | sch = to_subchannel(dev); | ||
929 | cdev = dev->driver_data; | ||
930 | |||
931 | if (cio_is_console(sch->irq)) | ||
932 | return; | ||
933 | if (!sch->schib.pmcw.ena) | ||
934 | /* Nothing to do. */ | ||
935 | return; | ||
936 | ret = cio_disable_subchannel(sch); | ||
937 | if (ret != -EBUSY) | ||
938 | /* Subchannel is disabled, we're done. */ | ||
939 | return; | ||
940 | cdev->private->state = DEV_STATE_QUIESCE; | ||
941 | if (cdev->handler) | ||
942 | cdev->handler(cdev, cdev->private->intparm, | ||
943 | ERR_PTR(-EIO)); | ||
944 | ret = ccw_device_cancel_halt_clear(cdev); | ||
945 | if (ret == -EBUSY) { | ||
946 | ccw_device_set_timeout(cdev, HZ/10); | ||
947 | wait_event(cdev->private->wait_q, dev_fsm_final_state(cdev)); | ||
948 | } | ||
949 | cio_disable_subchannel(sch); | ||
950 | } | ||
951 | |||
952 | #ifdef CONFIG_CCW_CONSOLE | ||
953 | static struct ccw_device console_cdev; | ||
954 | static struct ccw_device_private console_private; | ||
955 | static int console_cdev_in_use; | ||
956 | |||
957 | static int | ||
958 | ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) | ||
959 | { | ||
960 | int rc; | ||
961 | |||
962 | /* Initialize the ccw_device structure. */ | ||
963 | cdev->dev = (struct device) { | ||
964 | .parent = &sch->dev, | ||
965 | }; | ||
966 | /* Initialize the subchannel structure */ | ||
967 | sch->dev.parent = &css_bus_device; | ||
968 | sch->dev.bus = &css_bus_type; | ||
969 | |||
970 | rc = io_subchannel_recog(cdev, sch); | ||
971 | if (rc) | ||
972 | return rc; | ||
973 | |||
974 | /* Now wait for the async. recognition to come to an end. */ | ||
975 | spin_lock_irq(cdev->ccwlock); | ||
976 | while (!dev_fsm_final_state(cdev)) | ||
977 | wait_cons_dev(); | ||
978 | rc = -EIO; | ||
979 | if (cdev->private->state != DEV_STATE_OFFLINE) | ||
980 | goto out_unlock; | ||
981 | ccw_device_online(cdev); | ||
982 | while (!dev_fsm_final_state(cdev)) | ||
983 | wait_cons_dev(); | ||
984 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
985 | goto out_unlock; | ||
986 | rc = 0; | ||
987 | out_unlock: | ||
988 | spin_unlock_irq(cdev->ccwlock); | ||
989 | return 0; | ||
990 | } | ||
991 | |||
992 | struct ccw_device * | ||
993 | ccw_device_probe_console(void) | ||
994 | { | ||
995 | struct subchannel *sch; | ||
996 | int ret; | ||
997 | |||
998 | if (xchg(&console_cdev_in_use, 1) != 0) | ||
999 | return NULL; | ||
1000 | sch = cio_probe_console(); | ||
1001 | if (IS_ERR(sch)) { | ||
1002 | console_cdev_in_use = 0; | ||
1003 | return (void *) sch; | ||
1004 | } | ||
1005 | memset(&console_cdev, 0, sizeof(struct ccw_device)); | ||
1006 | memset(&console_private, 0, sizeof(struct ccw_device_private)); | ||
1007 | console_cdev.private = &console_private; | ||
1008 | ret = ccw_device_console_enable(&console_cdev, sch); | ||
1009 | if (ret) { | ||
1010 | cio_release_console(); | ||
1011 | console_cdev_in_use = 0; | ||
1012 | return ERR_PTR(ret); | ||
1013 | } | ||
1014 | console_cdev.online = 1; | ||
1015 | return &console_cdev; | ||
1016 | } | ||
1017 | #endif | ||
1018 | |||
1019 | /* | ||
1020 | * get ccw_device matching the busid, but only if owned by cdrv | ||
1021 | */ | ||
1022 | struct ccw_device * | ||
1023 | get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id) | ||
1024 | { | ||
1025 | struct device *d, *dev; | ||
1026 | struct device_driver *drv; | ||
1027 | |||
1028 | drv = get_driver(&cdrv->driver); | ||
1029 | if (!drv) | ||
1030 | return 0; | ||
1031 | |||
1032 | down_read(&drv->bus->subsys.rwsem); | ||
1033 | |||
1034 | dev = NULL; | ||
1035 | list_for_each_entry(d, &drv->devices, driver_list) { | ||
1036 | dev = get_device(d); | ||
1037 | |||
1038 | if (dev && !strncmp(bus_id, dev->bus_id, BUS_ID_SIZE)) | ||
1039 | break; | ||
1040 | else if (dev) { | ||
1041 | put_device(dev); | ||
1042 | dev = NULL; | ||
1043 | } | ||
1044 | } | ||
1045 | up_read(&drv->bus->subsys.rwsem); | ||
1046 | put_driver(drv); | ||
1047 | |||
1048 | return dev ? to_ccwdev(dev) : 0; | ||
1049 | } | ||
1050 | |||
1051 | /************************** device driver handling ************************/ | ||
1052 | |||
1053 | /* This is the implementation of the ccw_driver class. The probe, remove | ||
1054 | * and release methods are initially very similar to the device_driver | ||
1055 | * implementations, with the difference that they have ccw_device | ||
1056 | * arguments. | ||
1057 | * | ||
1058 | * A ccw driver also contains the information that is needed for | ||
1059 | * device matching. | ||
1060 | */ | ||
1061 | static int | ||
1062 | ccw_device_probe (struct device *dev) | ||
1063 | { | ||
1064 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1065 | struct ccw_driver *cdrv = to_ccwdrv(dev->driver); | ||
1066 | int ret; | ||
1067 | |||
1068 | cdev->drv = cdrv; /* to let the driver call _set_online */ | ||
1069 | |||
1070 | ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; | ||
1071 | |||
1072 | if (ret) { | ||
1073 | cdev->drv = 0; | ||
1074 | return ret; | ||
1075 | } | ||
1076 | |||
1077 | return 0; | ||
1078 | } | ||
1079 | |||
1080 | static int | ||
1081 | ccw_device_remove (struct device *dev) | ||
1082 | { | ||
1083 | struct ccw_device *cdev = to_ccwdev(dev); | ||
1084 | struct ccw_driver *cdrv = cdev->drv; | ||
1085 | int ret; | ||
1086 | |||
1087 | pr_debug("removing device %s\n", cdev->dev.bus_id); | ||
1088 | if (cdrv->remove) | ||
1089 | cdrv->remove(cdev); | ||
1090 | if (cdev->online) { | ||
1091 | cdev->online = 0; | ||
1092 | spin_lock_irq(cdev->ccwlock); | ||
1093 | ret = ccw_device_offline(cdev); | ||
1094 | spin_unlock_irq(cdev->ccwlock); | ||
1095 | if (ret == 0) | ||
1096 | wait_event(cdev->private->wait_q, | ||
1097 | dev_fsm_final_state(cdev)); | ||
1098 | else | ||
1099 | //FIXME: we can't fail! | ||
1100 | pr_debug("ccw_device_offline returned %d, device %s\n", | ||
1101 | ret, cdev->dev.bus_id); | ||
1102 | } | ||
1103 | ccw_device_set_timeout(cdev, 0); | ||
1104 | cdev->drv = 0; | ||
1105 | return 0; | ||
1106 | } | ||
1107 | |||
1108 | int | ||
1109 | ccw_driver_register (struct ccw_driver *cdriver) | ||
1110 | { | ||
1111 | struct device_driver *drv = &cdriver->driver; | ||
1112 | |||
1113 | drv->bus = &ccw_bus_type; | ||
1114 | drv->name = cdriver->name; | ||
1115 | drv->probe = ccw_device_probe; | ||
1116 | drv->remove = ccw_device_remove; | ||
1117 | |||
1118 | return driver_register(drv); | ||
1119 | } | ||
1120 | |||
1121 | void | ||
1122 | ccw_driver_unregister (struct ccw_driver *cdriver) | ||
1123 | { | ||
1124 | driver_unregister(&cdriver->driver); | ||
1125 | } | ||
1126 | |||
1127 | MODULE_LICENSE("GPL"); | ||
1128 | EXPORT_SYMBOL(ccw_device_set_online); | ||
1129 | EXPORT_SYMBOL(ccw_device_set_offline); | ||
1130 | EXPORT_SYMBOL(ccw_driver_register); | ||
1131 | EXPORT_SYMBOL(ccw_driver_unregister); | ||
1132 | EXPORT_SYMBOL(get_ccwdev_by_busid); | ||
1133 | EXPORT_SYMBOL(ccw_bus_type); | ||
1134 | EXPORT_SYMBOL(ccw_device_work); | ||
1135 | EXPORT_SYMBOL(ccw_device_notify_work); | ||
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h new file mode 100644 index 000000000000..a3aa056d7245 --- /dev/null +++ b/drivers/s390/cio/device.h | |||
@@ -0,0 +1,115 @@ | |||
1 | #ifndef S390_DEVICE_H | ||
2 | #define S390_DEVICE_H | ||
3 | |||
4 | /* | ||
5 | * states of the device statemachine | ||
6 | */ | ||
7 | enum dev_state { | ||
8 | DEV_STATE_NOT_OPER, | ||
9 | DEV_STATE_SENSE_PGID, | ||
10 | DEV_STATE_SENSE_ID, | ||
11 | DEV_STATE_OFFLINE, | ||
12 | DEV_STATE_VERIFY, | ||
13 | DEV_STATE_ONLINE, | ||
14 | DEV_STATE_W4SENSE, | ||
15 | DEV_STATE_DISBAND_PGID, | ||
16 | DEV_STATE_BOXED, | ||
17 | /* states to wait for i/o completion before doing something */ | ||
18 | DEV_STATE_CLEAR_VERIFY, | ||
19 | DEV_STATE_TIMEOUT_KILL, | ||
20 | DEV_STATE_WAIT4IO, | ||
21 | DEV_STATE_QUIESCE, | ||
22 | /* special states for devices gone not operational */ | ||
23 | DEV_STATE_DISCONNECTED, | ||
24 | DEV_STATE_DISCONNECTED_SENSE_ID, | ||
25 | DEV_STATE_CMFCHANGE, | ||
26 | /* last element! */ | ||
27 | NR_DEV_STATES | ||
28 | }; | ||
29 | |||
30 | /* | ||
31 | * asynchronous events of the device statemachine | ||
32 | */ | ||
33 | enum dev_event { | ||
34 | DEV_EVENT_NOTOPER, | ||
35 | DEV_EVENT_INTERRUPT, | ||
36 | DEV_EVENT_TIMEOUT, | ||
37 | DEV_EVENT_VERIFY, | ||
38 | /* last element! */ | ||
39 | NR_DEV_EVENTS | ||
40 | }; | ||
41 | |||
42 | struct ccw_device; | ||
43 | |||
44 | /* | ||
45 | * action called through jumptable | ||
46 | */ | ||
47 | typedef void (fsm_func_t)(struct ccw_device *, enum dev_event); | ||
48 | extern fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS]; | ||
49 | |||
50 | static inline void | ||
51 | dev_fsm_event(struct ccw_device *cdev, enum dev_event dev_event) | ||
52 | { | ||
53 | dev_jumptable[cdev->private->state][dev_event](cdev, dev_event); | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Delivers 1 if the device state is final. | ||
58 | */ | ||
59 | static inline int | ||
60 | dev_fsm_final_state(struct ccw_device *cdev) | ||
61 | { | ||
62 | return (cdev->private->state == DEV_STATE_NOT_OPER || | ||
63 | cdev->private->state == DEV_STATE_OFFLINE || | ||
64 | cdev->private->state == DEV_STATE_ONLINE || | ||
65 | cdev->private->state == DEV_STATE_BOXED); | ||
66 | } | ||
67 | |||
68 | extern struct workqueue_struct *ccw_device_work; | ||
69 | extern struct workqueue_struct *ccw_device_notify_work; | ||
70 | |||
71 | void io_subchannel_recog_done(struct ccw_device *cdev); | ||
72 | |||
73 | int ccw_device_cancel_halt_clear(struct ccw_device *); | ||
74 | |||
75 | int ccw_device_register(struct ccw_device *); | ||
76 | void ccw_device_do_unreg_rereg(void *); | ||
77 | void ccw_device_call_sch_unregister(void *); | ||
78 | |||
79 | int ccw_device_recognition(struct ccw_device *); | ||
80 | int ccw_device_online(struct ccw_device *); | ||
81 | int ccw_device_offline(struct ccw_device *); | ||
82 | |||
83 | /* Function prototypes for device status and basic sense stuff. */ | ||
84 | void ccw_device_accumulate_irb(struct ccw_device *, struct irb *); | ||
85 | void ccw_device_accumulate_basic_sense(struct ccw_device *, struct irb *); | ||
86 | int ccw_device_accumulate_and_sense(struct ccw_device *, struct irb *); | ||
87 | int ccw_device_do_sense(struct ccw_device *, struct irb *); | ||
88 | |||
89 | /* Function prototypes for sense id stuff. */ | ||
90 | void ccw_device_sense_id_start(struct ccw_device *); | ||
91 | void ccw_device_sense_id_irq(struct ccw_device *, enum dev_event); | ||
92 | void ccw_device_sense_id_done(struct ccw_device *, int); | ||
93 | |||
94 | /* Function prototypes for path grouping stuff. */ | ||
95 | void ccw_device_sense_pgid_start(struct ccw_device *); | ||
96 | void ccw_device_sense_pgid_irq(struct ccw_device *, enum dev_event); | ||
97 | void ccw_device_sense_pgid_done(struct ccw_device *, int); | ||
98 | |||
99 | void ccw_device_verify_start(struct ccw_device *); | ||
100 | void ccw_device_verify_irq(struct ccw_device *, enum dev_event); | ||
101 | void ccw_device_verify_done(struct ccw_device *, int); | ||
102 | |||
103 | void ccw_device_disband_start(struct ccw_device *); | ||
104 | void ccw_device_disband_irq(struct ccw_device *, enum dev_event); | ||
105 | void ccw_device_disband_done(struct ccw_device *, int); | ||
106 | |||
107 | int ccw_device_call_handler(struct ccw_device *); | ||
108 | |||
109 | int ccw_device_stlck(struct ccw_device *); | ||
110 | |||
111 | /* qdio needs this. */ | ||
112 | void ccw_device_set_timeout(struct ccw_device *, int); | ||
113 | |||
114 | void retry_set_schib(struct ccw_device *cdev); | ||
115 | #endif | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c new file mode 100644 index 000000000000..9b7f6f548b1d --- /dev/null +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -0,0 +1,1250 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/device_fsm.c | ||
3 | * finite state machine for device handling | ||
4 | * | ||
5 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
6 | * IBM Corporation | ||
7 | * Author(s): Cornelia Huck(cohuck@de.ibm.com) | ||
8 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
9 | */ | ||
10 | |||
11 | #include <linux/module.h> | ||
12 | #include <linux/config.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | #include <asm/ccwdev.h> | ||
16 | #include <asm/qdio.h> | ||
17 | |||
18 | #include "cio.h" | ||
19 | #include "cio_debug.h" | ||
20 | #include "css.h" | ||
21 | #include "device.h" | ||
22 | #include "chsc.h" | ||
23 | #include "ioasm.h" | ||
24 | #include "qdio.h" | ||
25 | |||
26 | int | ||
27 | device_is_online(struct subchannel *sch) | ||
28 | { | ||
29 | struct ccw_device *cdev; | ||
30 | |||
31 | if (!sch->dev.driver_data) | ||
32 | return 0; | ||
33 | cdev = sch->dev.driver_data; | ||
34 | return (cdev->private->state == DEV_STATE_ONLINE); | ||
35 | } | ||
36 | |||
37 | int | ||
38 | device_is_disconnected(struct subchannel *sch) | ||
39 | { | ||
40 | struct ccw_device *cdev; | ||
41 | |||
42 | if (!sch->dev.driver_data) | ||
43 | return 0; | ||
44 | cdev = sch->dev.driver_data; | ||
45 | return (cdev->private->state == DEV_STATE_DISCONNECTED || | ||
46 | cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID); | ||
47 | } | ||
48 | |||
49 | void | ||
50 | device_set_disconnected(struct subchannel *sch) | ||
51 | { | ||
52 | struct ccw_device *cdev; | ||
53 | |||
54 | if (!sch->dev.driver_data) | ||
55 | return; | ||
56 | cdev = sch->dev.driver_data; | ||
57 | ccw_device_set_timeout(cdev, 0); | ||
58 | cdev->private->flags.fake_irb = 0; | ||
59 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
60 | } | ||
61 | |||
62 | void | ||
63 | device_set_waiting(struct subchannel *sch) | ||
64 | { | ||
65 | struct ccw_device *cdev; | ||
66 | |||
67 | if (!sch->dev.driver_data) | ||
68 | return; | ||
69 | cdev = sch->dev.driver_data; | ||
70 | ccw_device_set_timeout(cdev, 10*HZ); | ||
71 | cdev->private->state = DEV_STATE_WAIT4IO; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Timeout function. It just triggers a DEV_EVENT_TIMEOUT. | ||
76 | */ | ||
77 | static void | ||
78 | ccw_device_timeout(unsigned long data) | ||
79 | { | ||
80 | struct ccw_device *cdev; | ||
81 | |||
82 | cdev = (struct ccw_device *) data; | ||
83 | spin_lock_irq(cdev->ccwlock); | ||
84 | dev_fsm_event(cdev, DEV_EVENT_TIMEOUT); | ||
85 | spin_unlock_irq(cdev->ccwlock); | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Set timeout | ||
90 | */ | ||
91 | void | ||
92 | ccw_device_set_timeout(struct ccw_device *cdev, int expires) | ||
93 | { | ||
94 | if (expires == 0) { | ||
95 | del_timer(&cdev->private->timer); | ||
96 | return; | ||
97 | } | ||
98 | if (timer_pending(&cdev->private->timer)) { | ||
99 | if (mod_timer(&cdev->private->timer, jiffies + expires)) | ||
100 | return; | ||
101 | } | ||
102 | cdev->private->timer.function = ccw_device_timeout; | ||
103 | cdev->private->timer.data = (unsigned long) cdev; | ||
104 | cdev->private->timer.expires = jiffies + expires; | ||
105 | add_timer(&cdev->private->timer); | ||
106 | } | ||
107 | |||
108 | /* Kill any pending timers after machine check. */ | ||
109 | void | ||
110 | device_kill_pending_timer(struct subchannel *sch) | ||
111 | { | ||
112 | struct ccw_device *cdev; | ||
113 | |||
114 | if (!sch->dev.driver_data) | ||
115 | return; | ||
116 | cdev = sch->dev.driver_data; | ||
117 | ccw_device_set_timeout(cdev, 0); | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Cancel running i/o. This is called repeatedly since halt/clear are | ||
122 | * asynchronous operations. We do one try with cio_cancel, two tries | ||
123 | * with cio_halt, 255 tries with cio_clear. If everythings fails panic. | ||
124 | * Returns 0 if device now idle, -ENODEV for device not operational and | ||
125 | * -EBUSY if an interrupt is expected (either from halt/clear or from a | ||
126 | * status pending). | ||
127 | */ | ||
128 | int | ||
129 | ccw_device_cancel_halt_clear(struct ccw_device *cdev) | ||
130 | { | ||
131 | struct subchannel *sch; | ||
132 | int ret; | ||
133 | |||
134 | sch = to_subchannel(cdev->dev.parent); | ||
135 | ret = stsch(sch->irq, &sch->schib); | ||
136 | if (ret || !sch->schib.pmcw.dnv) | ||
137 | return -ENODEV; | ||
138 | if (!sch->schib.pmcw.ena || sch->schib.scsw.actl == 0) | ||
139 | /* Not operational or no activity -> done. */ | ||
140 | return 0; | ||
141 | /* Stage 1: cancel io. */ | ||
142 | if (!(sch->schib.scsw.actl & SCSW_ACTL_HALT_PEND) && | ||
143 | !(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | ||
144 | ret = cio_cancel(sch); | ||
145 | if (ret != -EINVAL) | ||
146 | return ret; | ||
147 | /* cancel io unsuccessful. From now on it is asynchronous. */ | ||
148 | cdev->private->iretry = 3; /* 3 halt retries. */ | ||
149 | } | ||
150 | if (!(sch->schib.scsw.actl & SCSW_ACTL_CLEAR_PEND)) { | ||
151 | /* Stage 2: halt io. */ | ||
152 | if (cdev->private->iretry) { | ||
153 | cdev->private->iretry--; | ||
154 | ret = cio_halt(sch); | ||
155 | return (ret == 0) ? -EBUSY : ret; | ||
156 | } | ||
157 | /* halt io unsuccessful. */ | ||
158 | cdev->private->iretry = 255; /* 255 clear retries. */ | ||
159 | } | ||
160 | /* Stage 3: clear io. */ | ||
161 | if (cdev->private->iretry) { | ||
162 | cdev->private->iretry--; | ||
163 | ret = cio_clear (sch); | ||
164 | return (ret == 0) ? -EBUSY : ret; | ||
165 | } | ||
166 | panic("Can't stop i/o on subchannel.\n"); | ||
167 | } | ||
168 | |||
169 | static int | ||
170 | ccw_device_handle_oper(struct ccw_device *cdev) | ||
171 | { | ||
172 | struct subchannel *sch; | ||
173 | |||
174 | sch = to_subchannel(cdev->dev.parent); | ||
175 | cdev->private->flags.recog_done = 1; | ||
176 | /* | ||
177 | * Check if cu type and device type still match. If | ||
178 | * not, it is certainly another device and we have to | ||
179 | * de- and re-register. Also check here for non-matching devno. | ||
180 | */ | ||
181 | if (cdev->id.cu_type != cdev->private->senseid.cu_type || | ||
182 | cdev->id.cu_model != cdev->private->senseid.cu_model || | ||
183 | cdev->id.dev_type != cdev->private->senseid.dev_type || | ||
184 | cdev->id.dev_model != cdev->private->senseid.dev_model || | ||
185 | cdev->private->devno != sch->schib.pmcw.dev) { | ||
186 | PREPARE_WORK(&cdev->private->kick_work, | ||
187 | ccw_device_do_unreg_rereg, (void *)cdev); | ||
188 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
189 | return 0; | ||
190 | } | ||
191 | cdev->private->flags.donotify = 1; | ||
192 | return 1; | ||
193 | } | ||
194 | |||
195 | /* | ||
196 | * The machine won't give us any notification by machine check if a chpid has | ||
197 | * been varied online on the SE so we have to find out by magic (i. e. driving | ||
198 | * the channel subsystem to device selection and updating our path masks). | ||
199 | */ | ||
200 | static inline void | ||
201 | __recover_lost_chpids(struct subchannel *sch, int old_lpm) | ||
202 | { | ||
203 | int mask, i; | ||
204 | |||
205 | for (i = 0; i<8; i++) { | ||
206 | mask = 0x80 >> i; | ||
207 | if (!(sch->lpm & mask)) | ||
208 | continue; | ||
209 | if (old_lpm & mask) | ||
210 | continue; | ||
211 | chpid_is_actually_online(sch->schib.pmcw.chpid[i]); | ||
212 | } | ||
213 | } | ||
214 | |||
215 | /* | ||
216 | * Stop device recognition. | ||
217 | */ | ||
218 | static void | ||
219 | ccw_device_recog_done(struct ccw_device *cdev, int state) | ||
220 | { | ||
221 | struct subchannel *sch; | ||
222 | int notify, old_lpm, same_dev; | ||
223 | |||
224 | sch = to_subchannel(cdev->dev.parent); | ||
225 | |||
226 | ccw_device_set_timeout(cdev, 0); | ||
227 | cio_disable_subchannel(sch); | ||
228 | /* | ||
229 | * Now that we tried recognition, we have performed device selection | ||
230 | * through ssch() and the path information is up to date. | ||
231 | */ | ||
232 | old_lpm = sch->lpm; | ||
233 | stsch(sch->irq, &sch->schib); | ||
234 | sch->lpm = sch->schib.pmcw.pim & | ||
235 | sch->schib.pmcw.pam & | ||
236 | sch->schib.pmcw.pom & | ||
237 | sch->opm; | ||
238 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) | ||
239 | /* Force reprobe on all chpids. */ | ||
240 | old_lpm = 0; | ||
241 | if (sch->lpm != old_lpm) | ||
242 | __recover_lost_chpids(sch, old_lpm); | ||
243 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { | ||
244 | if (state == DEV_STATE_NOT_OPER) { | ||
245 | cdev->private->flags.recog_done = 1; | ||
246 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
247 | return; | ||
248 | } | ||
249 | /* Boxed devices don't need extra treatment. */ | ||
250 | } | ||
251 | notify = 0; | ||
252 | same_dev = 0; /* Keep the compiler quiet... */ | ||
253 | switch (state) { | ||
254 | case DEV_STATE_NOT_OPER: | ||
255 | CIO_DEBUG(KERN_WARNING, 2, | ||
256 | "SenseID : unknown device %04x on subchannel %04x\n", | ||
257 | cdev->private->devno, sch->irq); | ||
258 | break; | ||
259 | case DEV_STATE_OFFLINE: | ||
260 | if (cdev->private->state == DEV_STATE_DISCONNECTED_SENSE_ID) { | ||
261 | same_dev = ccw_device_handle_oper(cdev); | ||
262 | notify = 1; | ||
263 | } | ||
264 | /* fill out sense information */ | ||
265 | cdev->id = (struct ccw_device_id) { | ||
266 | .cu_type = cdev->private->senseid.cu_type, | ||
267 | .cu_model = cdev->private->senseid.cu_model, | ||
268 | .dev_type = cdev->private->senseid.dev_type, | ||
269 | .dev_model = cdev->private->senseid.dev_model, | ||
270 | }; | ||
271 | if (notify) { | ||
272 | cdev->private->state = DEV_STATE_OFFLINE; | ||
273 | if (same_dev) { | ||
274 | /* Get device online again. */ | ||
275 | ccw_device_online(cdev); | ||
276 | wake_up(&cdev->private->wait_q); | ||
277 | } | ||
278 | return; | ||
279 | } | ||
280 | /* Issue device info message. */ | ||
281 | CIO_DEBUG(KERN_INFO, 2, "SenseID : device %04x reports: " | ||
282 | "CU Type/Mod = %04X/%02X, Dev Type/Mod = " | ||
283 | "%04X/%02X\n", cdev->private->devno, | ||
284 | cdev->id.cu_type, cdev->id.cu_model, | ||
285 | cdev->id.dev_type, cdev->id.dev_model); | ||
286 | break; | ||
287 | case DEV_STATE_BOXED: | ||
288 | CIO_DEBUG(KERN_WARNING, 2, | ||
289 | "SenseID : boxed device %04x on subchannel %04x\n", | ||
290 | cdev->private->devno, sch->irq); | ||
291 | break; | ||
292 | } | ||
293 | cdev->private->state = state; | ||
294 | io_subchannel_recog_done(cdev); | ||
295 | if (state != DEV_STATE_NOT_OPER) | ||
296 | wake_up(&cdev->private->wait_q); | ||
297 | } | ||
298 | |||
299 | /* | ||
300 | * Function called from device_id.c after sense id has completed. | ||
301 | */ | ||
302 | void | ||
303 | ccw_device_sense_id_done(struct ccw_device *cdev, int err) | ||
304 | { | ||
305 | switch (err) { | ||
306 | case 0: | ||
307 | ccw_device_recog_done(cdev, DEV_STATE_OFFLINE); | ||
308 | break; | ||
309 | case -ETIME: /* Sense id stopped by timeout. */ | ||
310 | ccw_device_recog_done(cdev, DEV_STATE_BOXED); | ||
311 | break; | ||
312 | default: | ||
313 | ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); | ||
314 | break; | ||
315 | } | ||
316 | } | ||
317 | |||
318 | static void | ||
319 | ccw_device_oper_notify(void *data) | ||
320 | { | ||
321 | struct ccw_device *cdev; | ||
322 | struct subchannel *sch; | ||
323 | int ret; | ||
324 | |||
325 | cdev = (struct ccw_device *)data; | ||
326 | sch = to_subchannel(cdev->dev.parent); | ||
327 | ret = (sch->driver && sch->driver->notify) ? | ||
328 | sch->driver->notify(&sch->dev, CIO_OPER) : 0; | ||
329 | if (!ret) | ||
330 | /* Driver doesn't want device back. */ | ||
331 | ccw_device_do_unreg_rereg((void *)cdev); | ||
332 | else | ||
333 | wake_up(&cdev->private->wait_q); | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * Finished with online/offline processing. | ||
338 | */ | ||
339 | static void | ||
340 | ccw_device_done(struct ccw_device *cdev, int state) | ||
341 | { | ||
342 | struct subchannel *sch; | ||
343 | |||
344 | sch = to_subchannel(cdev->dev.parent); | ||
345 | |||
346 | if (state != DEV_STATE_ONLINE) | ||
347 | cio_disable_subchannel(sch); | ||
348 | |||
349 | /* Reset device status. */ | ||
350 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
351 | |||
352 | cdev->private->state = state; | ||
353 | |||
354 | |||
355 | if (state == DEV_STATE_BOXED) | ||
356 | CIO_DEBUG(KERN_WARNING, 2, | ||
357 | "Boxed device %04x on subchannel %04x\n", | ||
358 | cdev->private->devno, sch->irq); | ||
359 | |||
360 | if (cdev->private->flags.donotify) { | ||
361 | cdev->private->flags.donotify = 0; | ||
362 | PREPARE_WORK(&cdev->private->kick_work, ccw_device_oper_notify, | ||
363 | (void *)cdev); | ||
364 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
365 | } | ||
366 | wake_up(&cdev->private->wait_q); | ||
367 | |||
368 | if (css_init_done && state != DEV_STATE_ONLINE) | ||
369 | put_device (&cdev->dev); | ||
370 | } | ||
371 | |||
372 | /* | ||
373 | * Function called from device_pgid.c after sense path ground has completed. | ||
374 | */ | ||
375 | void | ||
376 | ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) | ||
377 | { | ||
378 | struct subchannel *sch; | ||
379 | |||
380 | sch = to_subchannel(cdev->dev.parent); | ||
381 | switch (err) { | ||
382 | case 0: | ||
383 | /* Start Path Group verification. */ | ||
384 | sch->vpm = 0; /* Start with no path groups set. */ | ||
385 | cdev->private->state = DEV_STATE_VERIFY; | ||
386 | ccw_device_verify_start(cdev); | ||
387 | break; | ||
388 | case -ETIME: /* Sense path group id stopped by timeout. */ | ||
389 | case -EUSERS: /* device is reserved for someone else. */ | ||
390 | ccw_device_done(cdev, DEV_STATE_BOXED); | ||
391 | break; | ||
392 | case -EOPNOTSUPP: /* path grouping not supported, just set online. */ | ||
393 | cdev->private->options.pgroup = 0; | ||
394 | ccw_device_done(cdev, DEV_STATE_ONLINE); | ||
395 | break; | ||
396 | default: | ||
397 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
398 | break; | ||
399 | } | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Start device recognition. | ||
404 | */ | ||
405 | int | ||
406 | ccw_device_recognition(struct ccw_device *cdev) | ||
407 | { | ||
408 | struct subchannel *sch; | ||
409 | int ret; | ||
410 | |||
411 | if ((cdev->private->state != DEV_STATE_NOT_OPER) && | ||
412 | (cdev->private->state != DEV_STATE_BOXED)) | ||
413 | return -EINVAL; | ||
414 | sch = to_subchannel(cdev->dev.parent); | ||
415 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); | ||
416 | if (ret != 0) | ||
417 | /* Couldn't enable the subchannel for i/o. Sick device. */ | ||
418 | return ret; | ||
419 | |||
420 | /* After 60s the device recognition is considered to have failed. */ | ||
421 | ccw_device_set_timeout(cdev, 60*HZ); | ||
422 | |||
423 | /* | ||
424 | * We used to start here with a sense pgid to find out whether a device | ||
425 | * is locked by someone else. Unfortunately, the sense pgid command | ||
426 | * code has other meanings on devices predating the path grouping | ||
427 | * algorithm, so we start with sense id and box the device after an | ||
428 | * timeout (or if sense pgid during path verification detects the device | ||
429 | * is locked, as may happen on newer devices). | ||
430 | */ | ||
431 | cdev->private->flags.recog_done = 0; | ||
432 | cdev->private->state = DEV_STATE_SENSE_ID; | ||
433 | ccw_device_sense_id_start(cdev); | ||
434 | return 0; | ||
435 | } | ||
436 | |||
437 | /* | ||
438 | * Handle timeout in device recognition. | ||
439 | */ | ||
440 | static void | ||
441 | ccw_device_recog_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
442 | { | ||
443 | int ret; | ||
444 | |||
445 | ret = ccw_device_cancel_halt_clear(cdev); | ||
446 | switch (ret) { | ||
447 | case 0: | ||
448 | ccw_device_recog_done(cdev, DEV_STATE_BOXED); | ||
449 | break; | ||
450 | case -ENODEV: | ||
451 | ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); | ||
452 | break; | ||
453 | default: | ||
454 | ccw_device_set_timeout(cdev, 3*HZ); | ||
455 | } | ||
456 | } | ||
457 | |||
458 | |||
459 | static void | ||
460 | ccw_device_nopath_notify(void *data) | ||
461 | { | ||
462 | struct ccw_device *cdev; | ||
463 | struct subchannel *sch; | ||
464 | int ret; | ||
465 | |||
466 | cdev = (struct ccw_device *)data; | ||
467 | sch = to_subchannel(cdev->dev.parent); | ||
468 | /* Extra sanity. */ | ||
469 | if (sch->lpm) | ||
470 | return; | ||
471 | ret = (sch->driver && sch->driver->notify) ? | ||
472 | sch->driver->notify(&sch->dev, CIO_NO_PATH) : 0; | ||
473 | if (!ret) { | ||
474 | if (get_device(&sch->dev)) { | ||
475 | /* Driver doesn't want to keep device. */ | ||
476 | cio_disable_subchannel(sch); | ||
477 | if (get_device(&cdev->dev)) { | ||
478 | PREPARE_WORK(&cdev->private->kick_work, | ||
479 | ccw_device_call_sch_unregister, | ||
480 | (void *)cdev); | ||
481 | queue_work(ccw_device_work, | ||
482 | &cdev->private->kick_work); | ||
483 | } else | ||
484 | put_device(&sch->dev); | ||
485 | } | ||
486 | } else { | ||
487 | cio_disable_subchannel(sch); | ||
488 | ccw_device_set_timeout(cdev, 0); | ||
489 | cdev->private->flags.fake_irb = 0; | ||
490 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
491 | wake_up(&cdev->private->wait_q); | ||
492 | } | ||
493 | } | ||
494 | |||
495 | void | ||
496 | ccw_device_verify_done(struct ccw_device *cdev, int err) | ||
497 | { | ||
498 | cdev->private->flags.doverify = 0; | ||
499 | switch (err) { | ||
500 | case -EOPNOTSUPP: /* path grouping not supported, just set online. */ | ||
501 | cdev->private->options.pgroup = 0; | ||
502 | case 0: | ||
503 | ccw_device_done(cdev, DEV_STATE_ONLINE); | ||
504 | /* Deliver fake irb to device driver, if needed. */ | ||
505 | if (cdev->private->flags.fake_irb) { | ||
506 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
507 | cdev->private->irb.scsw = (struct scsw) { | ||
508 | .cc = 1, | ||
509 | .fctl = SCSW_FCTL_START_FUNC, | ||
510 | .actl = SCSW_ACTL_START_PEND, | ||
511 | .stctl = SCSW_STCTL_STATUS_PEND, | ||
512 | }; | ||
513 | cdev->private->flags.fake_irb = 0; | ||
514 | if (cdev->handler) | ||
515 | cdev->handler(cdev, cdev->private->intparm, | ||
516 | &cdev->private->irb); | ||
517 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
518 | } | ||
519 | break; | ||
520 | case -ETIME: | ||
521 | ccw_device_done(cdev, DEV_STATE_BOXED); | ||
522 | break; | ||
523 | default: | ||
524 | PREPARE_WORK(&cdev->private->kick_work, | ||
525 | ccw_device_nopath_notify, (void *)cdev); | ||
526 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
527 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
528 | break; | ||
529 | } | ||
530 | } | ||
531 | |||
532 | /* | ||
533 | * Get device online. | ||
534 | */ | ||
535 | int | ||
536 | ccw_device_online(struct ccw_device *cdev) | ||
537 | { | ||
538 | struct subchannel *sch; | ||
539 | int ret; | ||
540 | |||
541 | if ((cdev->private->state != DEV_STATE_OFFLINE) && | ||
542 | (cdev->private->state != DEV_STATE_BOXED)) | ||
543 | return -EINVAL; | ||
544 | sch = to_subchannel(cdev->dev.parent); | ||
545 | if (css_init_done && !get_device(&cdev->dev)) | ||
546 | return -ENODEV; | ||
547 | ret = cio_enable_subchannel(sch, sch->schib.pmcw.isc); | ||
548 | if (ret != 0) { | ||
549 | /* Couldn't enable the subchannel for i/o. Sick device. */ | ||
550 | if (ret == -ENODEV) | ||
551 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
552 | return ret; | ||
553 | } | ||
554 | /* Do we want to do path grouping? */ | ||
555 | if (!cdev->private->options.pgroup) { | ||
556 | /* No, set state online immediately. */ | ||
557 | ccw_device_done(cdev, DEV_STATE_ONLINE); | ||
558 | return 0; | ||
559 | } | ||
560 | /* Do a SensePGID first. */ | ||
561 | cdev->private->state = DEV_STATE_SENSE_PGID; | ||
562 | ccw_device_sense_pgid_start(cdev); | ||
563 | return 0; | ||
564 | } | ||
565 | |||
566 | void | ||
567 | ccw_device_disband_done(struct ccw_device *cdev, int err) | ||
568 | { | ||
569 | switch (err) { | ||
570 | case 0: | ||
571 | ccw_device_done(cdev, DEV_STATE_OFFLINE); | ||
572 | break; | ||
573 | case -ETIME: | ||
574 | ccw_device_done(cdev, DEV_STATE_BOXED); | ||
575 | break; | ||
576 | default: | ||
577 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
578 | break; | ||
579 | } | ||
580 | } | ||
581 | |||
582 | /* | ||
583 | * Shutdown device. | ||
584 | */ | ||
585 | int | ||
586 | ccw_device_offline(struct ccw_device *cdev) | ||
587 | { | ||
588 | struct subchannel *sch; | ||
589 | |||
590 | sch = to_subchannel(cdev->dev.parent); | ||
591 | if (stsch(sch->irq, &sch->schib) || !sch->schib.pmcw.dnv) | ||
592 | return -ENODEV; | ||
593 | if (cdev->private->state != DEV_STATE_ONLINE) { | ||
594 | if (sch->schib.scsw.actl != 0) | ||
595 | return -EBUSY; | ||
596 | return -EINVAL; | ||
597 | } | ||
598 | if (sch->schib.scsw.actl != 0) | ||
599 | return -EBUSY; | ||
600 | /* Are we doing path grouping? */ | ||
601 | if (!cdev->private->options.pgroup) { | ||
602 | /* No, set state offline immediately. */ | ||
603 | ccw_device_done(cdev, DEV_STATE_OFFLINE); | ||
604 | return 0; | ||
605 | } | ||
606 | /* Start Set Path Group commands. */ | ||
607 | cdev->private->state = DEV_STATE_DISBAND_PGID; | ||
608 | ccw_device_disband_start(cdev); | ||
609 | return 0; | ||
610 | } | ||
611 | |||
612 | /* | ||
613 | * Handle timeout in device online/offline process. | ||
614 | */ | ||
615 | static void | ||
616 | ccw_device_onoff_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
617 | { | ||
618 | int ret; | ||
619 | |||
620 | ret = ccw_device_cancel_halt_clear(cdev); | ||
621 | switch (ret) { | ||
622 | case 0: | ||
623 | ccw_device_done(cdev, DEV_STATE_BOXED); | ||
624 | break; | ||
625 | case -ENODEV: | ||
626 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | ||
627 | break; | ||
628 | default: | ||
629 | ccw_device_set_timeout(cdev, 3*HZ); | ||
630 | } | ||
631 | } | ||
632 | |||
633 | /* | ||
634 | * Handle not oper event in device recognition. | ||
635 | */ | ||
636 | static void | ||
637 | ccw_device_recog_notoper(struct ccw_device *cdev, enum dev_event dev_event) | ||
638 | { | ||
639 | ccw_device_recog_done(cdev, DEV_STATE_NOT_OPER); | ||
640 | } | ||
641 | |||
642 | /* | ||
643 | * Handle not operational event while offline. | ||
644 | */ | ||
645 | static void | ||
646 | ccw_device_offline_notoper(struct ccw_device *cdev, enum dev_event dev_event) | ||
647 | { | ||
648 | struct subchannel *sch; | ||
649 | |||
650 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
651 | sch = to_subchannel(cdev->dev.parent); | ||
652 | if (get_device(&cdev->dev)) { | ||
653 | PREPARE_WORK(&cdev->private->kick_work, | ||
654 | ccw_device_call_sch_unregister, (void *)cdev); | ||
655 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
656 | } | ||
657 | wake_up(&cdev->private->wait_q); | ||
658 | } | ||
659 | |||
660 | /* | ||
661 | * Handle not operational event while online. | ||
662 | */ | ||
663 | static void | ||
664 | ccw_device_online_notoper(struct ccw_device *cdev, enum dev_event dev_event) | ||
665 | { | ||
666 | struct subchannel *sch; | ||
667 | |||
668 | sch = to_subchannel(cdev->dev.parent); | ||
669 | if (sch->driver->notify && | ||
670 | sch->driver->notify(&sch->dev, sch->lpm ? CIO_GONE : CIO_NO_PATH)) { | ||
671 | ccw_device_set_timeout(cdev, 0); | ||
672 | cdev->private->flags.fake_irb = 0; | ||
673 | cdev->private->state = DEV_STATE_DISCONNECTED; | ||
674 | wake_up(&cdev->private->wait_q); | ||
675 | return; | ||
676 | } | ||
677 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
678 | cio_disable_subchannel(sch); | ||
679 | if (sch->schib.scsw.actl != 0) { | ||
680 | // FIXME: not-oper indication to device driver ? | ||
681 | ccw_device_call_handler(cdev); | ||
682 | } | ||
683 | if (get_device(&cdev->dev)) { | ||
684 | PREPARE_WORK(&cdev->private->kick_work, | ||
685 | ccw_device_call_sch_unregister, (void *)cdev); | ||
686 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
687 | } | ||
688 | wake_up(&cdev->private->wait_q); | ||
689 | } | ||
690 | |||
691 | /* | ||
692 | * Handle path verification event. | ||
693 | */ | ||
694 | static void | ||
695 | ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | ||
696 | { | ||
697 | struct subchannel *sch; | ||
698 | |||
699 | if (!cdev->private->options.pgroup) | ||
700 | return; | ||
701 | if (cdev->private->state == DEV_STATE_W4SENSE) { | ||
702 | cdev->private->flags.doverify = 1; | ||
703 | return; | ||
704 | } | ||
705 | sch = to_subchannel(cdev->dev.parent); | ||
706 | /* | ||
707 | * Since we might not just be coming from an interrupt from the | ||
708 | * subchannel we have to update the schib. | ||
709 | */ | ||
710 | stsch(sch->irq, &sch->schib); | ||
711 | |||
712 | if (sch->schib.scsw.actl != 0 || | ||
713 | (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { | ||
714 | /* | ||
715 | * No final status yet or final status not yet delivered | ||
716 | * to the device driver. Can't do path verfication now, | ||
717 | * delay until final status was delivered. | ||
718 | */ | ||
719 | cdev->private->flags.doverify = 1; | ||
720 | return; | ||
721 | } | ||
722 | /* Device is idle, we can do the path verification. */ | ||
723 | cdev->private->state = DEV_STATE_VERIFY; | ||
724 | ccw_device_verify_start(cdev); | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Got an interrupt for a normal io (state online). | ||
729 | */ | ||
730 | static void | ||
731 | ccw_device_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
732 | { | ||
733 | struct irb *irb; | ||
734 | |||
735 | irb = (struct irb *) __LC_IRB; | ||
736 | /* Check for unsolicited interrupt. */ | ||
737 | if ((irb->scsw.stctl == | ||
738 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) | ||
739 | && (!irb->scsw.cc)) { | ||
740 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | ||
741 | !irb->esw.esw0.erw.cons) { | ||
742 | /* Unit check but no sense data. Need basic sense. */ | ||
743 | if (ccw_device_do_sense(cdev, irb) != 0) | ||
744 | goto call_handler_unsol; | ||
745 | memcpy(irb, &cdev->private->irb, sizeof(struct irb)); | ||
746 | cdev->private->state = DEV_STATE_W4SENSE; | ||
747 | cdev->private->intparm = 0; | ||
748 | return; | ||
749 | } | ||
750 | call_handler_unsol: | ||
751 | if (cdev->handler) | ||
752 | cdev->handler (cdev, 0, irb); | ||
753 | return; | ||
754 | } | ||
755 | /* Accumulate status and find out if a basic sense is needed. */ | ||
756 | ccw_device_accumulate_irb(cdev, irb); | ||
757 | if (cdev->private->flags.dosense) { | ||
758 | if (ccw_device_do_sense(cdev, irb) == 0) { | ||
759 | cdev->private->state = DEV_STATE_W4SENSE; | ||
760 | } | ||
761 | return; | ||
762 | } | ||
763 | /* Call the handler. */ | ||
764 | if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) | ||
765 | /* Start delayed path verification. */ | ||
766 | ccw_device_online_verify(cdev, 0); | ||
767 | } | ||
768 | |||
769 | /* | ||
770 | * Got an timeout in online state. | ||
771 | */ | ||
772 | static void | ||
773 | ccw_device_online_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
774 | { | ||
775 | int ret; | ||
776 | |||
777 | ccw_device_set_timeout(cdev, 0); | ||
778 | ret = ccw_device_cancel_halt_clear(cdev); | ||
779 | if (ret == -EBUSY) { | ||
780 | ccw_device_set_timeout(cdev, 3*HZ); | ||
781 | cdev->private->state = DEV_STATE_TIMEOUT_KILL; | ||
782 | return; | ||
783 | } | ||
784 | if (ret == -ENODEV) { | ||
785 | struct subchannel *sch; | ||
786 | |||
787 | sch = to_subchannel(cdev->dev.parent); | ||
788 | if (!sch->lpm) { | ||
789 | PREPARE_WORK(&cdev->private->kick_work, | ||
790 | ccw_device_nopath_notify, (void *)cdev); | ||
791 | queue_work(ccw_device_notify_work, | ||
792 | &cdev->private->kick_work); | ||
793 | } else | ||
794 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
795 | } else if (cdev->handler) | ||
796 | cdev->handler(cdev, cdev->private->intparm, | ||
797 | ERR_PTR(-ETIMEDOUT)); | ||
798 | } | ||
799 | |||
800 | /* | ||
801 | * Got an interrupt for a basic sense. | ||
802 | */ | ||
803 | void | ||
804 | ccw_device_w4sense(struct ccw_device *cdev, enum dev_event dev_event) | ||
805 | { | ||
806 | struct irb *irb; | ||
807 | |||
808 | irb = (struct irb *) __LC_IRB; | ||
809 | /* Check for unsolicited interrupt. */ | ||
810 | if (irb->scsw.stctl == | ||
811 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | ||
812 | if (irb->scsw.cc == 1) | ||
813 | /* Basic sense hasn't started. Try again. */ | ||
814 | ccw_device_do_sense(cdev, irb); | ||
815 | else { | ||
816 | printk("Huh? %s(%s): unsolicited interrupt...\n", | ||
817 | __FUNCTION__, cdev->dev.bus_id); | ||
818 | if (cdev->handler) | ||
819 | cdev->handler (cdev, 0, irb); | ||
820 | } | ||
821 | return; | ||
822 | } | ||
823 | /* Add basic sense info to irb. */ | ||
824 | ccw_device_accumulate_basic_sense(cdev, irb); | ||
825 | if (cdev->private->flags.dosense) { | ||
826 | /* Another basic sense is needed. */ | ||
827 | ccw_device_do_sense(cdev, irb); | ||
828 | return; | ||
829 | } | ||
830 | cdev->private->state = DEV_STATE_ONLINE; | ||
831 | /* Call the handler. */ | ||
832 | if (ccw_device_call_handler(cdev) && cdev->private->flags.doverify) | ||
833 | /* Start delayed path verification. */ | ||
834 | ccw_device_online_verify(cdev, 0); | ||
835 | } | ||
836 | |||
837 | static void | ||
838 | ccw_device_clear_verify(struct ccw_device *cdev, enum dev_event dev_event) | ||
839 | { | ||
840 | struct irb *irb; | ||
841 | |||
842 | irb = (struct irb *) __LC_IRB; | ||
843 | /* Accumulate status. We don't do basic sense. */ | ||
844 | ccw_device_accumulate_irb(cdev, irb); | ||
845 | /* Try to start delayed device verification. */ | ||
846 | ccw_device_online_verify(cdev, 0); | ||
847 | /* Note: Don't call handler for cio initiated clear! */ | ||
848 | } | ||
849 | |||
850 | static void | ||
851 | ccw_device_killing_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
852 | { | ||
853 | struct subchannel *sch; | ||
854 | |||
855 | sch = to_subchannel(cdev->dev.parent); | ||
856 | ccw_device_set_timeout(cdev, 0); | ||
857 | /* OK, i/o is dead now. Call interrupt handler. */ | ||
858 | cdev->private->state = DEV_STATE_ONLINE; | ||
859 | if (cdev->handler) | ||
860 | cdev->handler(cdev, cdev->private->intparm, | ||
861 | ERR_PTR(-ETIMEDOUT)); | ||
862 | if (!sch->lpm) { | ||
863 | PREPARE_WORK(&cdev->private->kick_work, | ||
864 | ccw_device_nopath_notify, (void *)cdev); | ||
865 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
866 | } else if (cdev->private->flags.doverify) | ||
867 | /* Start delayed path verification. */ | ||
868 | ccw_device_online_verify(cdev, 0); | ||
869 | } | ||
870 | |||
871 | static void | ||
872 | ccw_device_killing_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
873 | { | ||
874 | int ret; | ||
875 | |||
876 | ret = ccw_device_cancel_halt_clear(cdev); | ||
877 | if (ret == -EBUSY) { | ||
878 | ccw_device_set_timeout(cdev, 3*HZ); | ||
879 | return; | ||
880 | } | ||
881 | if (ret == -ENODEV) { | ||
882 | struct subchannel *sch; | ||
883 | |||
884 | sch = to_subchannel(cdev->dev.parent); | ||
885 | if (!sch->lpm) { | ||
886 | PREPARE_WORK(&cdev->private->kick_work, | ||
887 | ccw_device_nopath_notify, (void *)cdev); | ||
888 | queue_work(ccw_device_notify_work, | ||
889 | &cdev->private->kick_work); | ||
890 | } else | ||
891 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
892 | return; | ||
893 | } | ||
894 | //FIXME: Can we get here? | ||
895 | cdev->private->state = DEV_STATE_ONLINE; | ||
896 | if (cdev->handler) | ||
897 | cdev->handler(cdev, cdev->private->intparm, | ||
898 | ERR_PTR(-ETIMEDOUT)); | ||
899 | } | ||
900 | |||
901 | static void | ||
902 | ccw_device_wait4io_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
903 | { | ||
904 | struct irb *irb; | ||
905 | struct subchannel *sch; | ||
906 | |||
907 | irb = (struct irb *) __LC_IRB; | ||
908 | /* | ||
909 | * Accumulate status and find out if a basic sense is needed. | ||
910 | * This is fine since we have already adapted the lpm. | ||
911 | */ | ||
912 | ccw_device_accumulate_irb(cdev, irb); | ||
913 | if (cdev->private->flags.dosense) { | ||
914 | if (ccw_device_do_sense(cdev, irb) == 0) { | ||
915 | cdev->private->state = DEV_STATE_W4SENSE; | ||
916 | } | ||
917 | return; | ||
918 | } | ||
919 | |||
920 | /* Iff device is idle, reset timeout. */ | ||
921 | sch = to_subchannel(cdev->dev.parent); | ||
922 | if (!stsch(sch->irq, &sch->schib)) | ||
923 | if (sch->schib.scsw.actl == 0) | ||
924 | ccw_device_set_timeout(cdev, 0); | ||
925 | /* Call the handler. */ | ||
926 | ccw_device_call_handler(cdev); | ||
927 | if (!sch->lpm) { | ||
928 | PREPARE_WORK(&cdev->private->kick_work, | ||
929 | ccw_device_nopath_notify, (void *)cdev); | ||
930 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
931 | } else if (cdev->private->flags.doverify) | ||
932 | ccw_device_online_verify(cdev, 0); | ||
933 | } | ||
934 | |||
935 | static void | ||
936 | ccw_device_wait4io_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
937 | { | ||
938 | int ret; | ||
939 | struct subchannel *sch; | ||
940 | |||
941 | sch = to_subchannel(cdev->dev.parent); | ||
942 | ccw_device_set_timeout(cdev, 0); | ||
943 | ret = ccw_device_cancel_halt_clear(cdev); | ||
944 | if (ret == -EBUSY) { | ||
945 | ccw_device_set_timeout(cdev, 3*HZ); | ||
946 | cdev->private->state = DEV_STATE_TIMEOUT_KILL; | ||
947 | return; | ||
948 | } | ||
949 | if (ret == -ENODEV) { | ||
950 | if (!sch->lpm) { | ||
951 | PREPARE_WORK(&cdev->private->kick_work, | ||
952 | ccw_device_nopath_notify, (void *)cdev); | ||
953 | queue_work(ccw_device_notify_work, | ||
954 | &cdev->private->kick_work); | ||
955 | } else | ||
956 | dev_fsm_event(cdev, DEV_EVENT_NOTOPER); | ||
957 | return; | ||
958 | } | ||
959 | if (cdev->handler) | ||
960 | cdev->handler(cdev, cdev->private->intparm, | ||
961 | ERR_PTR(-ETIMEDOUT)); | ||
962 | if (!sch->lpm) { | ||
963 | PREPARE_WORK(&cdev->private->kick_work, | ||
964 | ccw_device_nopath_notify, (void *)cdev); | ||
965 | queue_work(ccw_device_notify_work, &cdev->private->kick_work); | ||
966 | } else if (cdev->private->flags.doverify) | ||
967 | /* Start delayed path verification. */ | ||
968 | ccw_device_online_verify(cdev, 0); | ||
969 | } | ||
970 | |||
971 | static void | ||
972 | ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) | ||
973 | { | ||
974 | /* When the I/O has terminated, we have to start verification. */ | ||
975 | if (cdev->private->options.pgroup) | ||
976 | cdev->private->flags.doverify = 1; | ||
977 | } | ||
978 | |||
979 | static void | ||
980 | ccw_device_stlck_done(struct ccw_device *cdev, enum dev_event dev_event) | ||
981 | { | ||
982 | struct irb *irb; | ||
983 | |||
984 | switch (dev_event) { | ||
985 | case DEV_EVENT_INTERRUPT: | ||
986 | irb = (struct irb *) __LC_IRB; | ||
987 | /* Check for unsolicited interrupt. */ | ||
988 | if ((irb->scsw.stctl == | ||
989 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | ||
990 | (!irb->scsw.cc)) | ||
991 | /* FIXME: we should restart stlck here, but this | ||
992 | * is extremely unlikely ... */ | ||
993 | goto out_wakeup; | ||
994 | |||
995 | ccw_device_accumulate_irb(cdev, irb); | ||
996 | /* We don't care about basic sense etc. */ | ||
997 | break; | ||
998 | default: /* timeout */ | ||
999 | break; | ||
1000 | } | ||
1001 | out_wakeup: | ||
1002 | wake_up(&cdev->private->wait_q); | ||
1003 | } | ||
1004 | |||
1005 | static void | ||
1006 | ccw_device_start_id(struct ccw_device *cdev, enum dev_event dev_event) | ||
1007 | { | ||
1008 | struct subchannel *sch; | ||
1009 | |||
1010 | sch = to_subchannel(cdev->dev.parent); | ||
1011 | if (cio_enable_subchannel(sch, sch->schib.pmcw.isc) != 0) | ||
1012 | /* Couldn't enable the subchannel for i/o. Sick device. */ | ||
1013 | return; | ||
1014 | |||
1015 | /* After 60s the device recognition is considered to have failed. */ | ||
1016 | ccw_device_set_timeout(cdev, 60*HZ); | ||
1017 | |||
1018 | cdev->private->state = DEV_STATE_DISCONNECTED_SENSE_ID; | ||
1019 | ccw_device_sense_id_start(cdev); | ||
1020 | } | ||
1021 | |||
1022 | void | ||
1023 | device_trigger_reprobe(struct subchannel *sch) | ||
1024 | { | ||
1025 | struct ccw_device *cdev; | ||
1026 | |||
1027 | if (!sch->dev.driver_data) | ||
1028 | return; | ||
1029 | cdev = sch->dev.driver_data; | ||
1030 | if (cdev->private->state != DEV_STATE_DISCONNECTED) | ||
1031 | return; | ||
1032 | |||
1033 | /* Update some values. */ | ||
1034 | if (stsch(sch->irq, &sch->schib)) | ||
1035 | return; | ||
1036 | |||
1037 | /* | ||
1038 | * The pim, pam, pom values may not be accurate, but they are the best | ||
1039 | * we have before performing device selection :/ | ||
1040 | */ | ||
1041 | sch->lpm = sch->schib.pmcw.pim & | ||
1042 | sch->schib.pmcw.pam & | ||
1043 | sch->schib.pmcw.pom & | ||
1044 | sch->opm; | ||
1045 | /* Re-set some bits in the pmcw that were lost. */ | ||
1046 | sch->schib.pmcw.isc = 3; | ||
1047 | sch->schib.pmcw.csense = 1; | ||
1048 | sch->schib.pmcw.ena = 0; | ||
1049 | if ((sch->lpm & (sch->lpm - 1)) != 0) | ||
1050 | sch->schib.pmcw.mp = 1; | ||
1051 | sch->schib.pmcw.intparm = (__u32)(unsigned long)sch; | ||
1052 | /* We should also udate ssd info, but this has to wait. */ | ||
1053 | ccw_device_start_id(cdev, 0); | ||
1054 | } | ||
1055 | |||
1056 | static void | ||
1057 | ccw_device_offline_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
1058 | { | ||
1059 | struct subchannel *sch; | ||
1060 | |||
1061 | sch = to_subchannel(cdev->dev.parent); | ||
1062 | /* | ||
1063 | * An interrupt in state offline means a previous disable was not | ||
1064 | * successful. Try again. | ||
1065 | */ | ||
1066 | cio_disable_subchannel(sch); | ||
1067 | } | ||
1068 | |||
1069 | static void | ||
1070 | ccw_device_change_cmfstate(struct ccw_device *cdev, enum dev_event dev_event) | ||
1071 | { | ||
1072 | retry_set_schib(cdev); | ||
1073 | cdev->private->state = DEV_STATE_ONLINE; | ||
1074 | dev_fsm_event(cdev, dev_event); | ||
1075 | } | ||
1076 | |||
1077 | |||
1078 | static void | ||
1079 | ccw_device_quiesce_done(struct ccw_device *cdev, enum dev_event dev_event) | ||
1080 | { | ||
1081 | ccw_device_set_timeout(cdev, 0); | ||
1082 | if (dev_event == DEV_EVENT_NOTOPER) | ||
1083 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
1084 | else | ||
1085 | cdev->private->state = DEV_STATE_OFFLINE; | ||
1086 | wake_up(&cdev->private->wait_q); | ||
1087 | } | ||
1088 | |||
1089 | static void | ||
1090 | ccw_device_quiesce_timeout(struct ccw_device *cdev, enum dev_event dev_event) | ||
1091 | { | ||
1092 | int ret; | ||
1093 | |||
1094 | ret = ccw_device_cancel_halt_clear(cdev); | ||
1095 | switch (ret) { | ||
1096 | case 0: | ||
1097 | cdev->private->state = DEV_STATE_OFFLINE; | ||
1098 | wake_up(&cdev->private->wait_q); | ||
1099 | break; | ||
1100 | case -ENODEV: | ||
1101 | cdev->private->state = DEV_STATE_NOT_OPER; | ||
1102 | wake_up(&cdev->private->wait_q); | ||
1103 | break; | ||
1104 | default: | ||
1105 | ccw_device_set_timeout(cdev, HZ/10); | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | /* | ||
1110 | * No operation action. This is used e.g. to ignore a timeout event in | ||
1111 | * state offline. | ||
1112 | */ | ||
1113 | static void | ||
1114 | ccw_device_nop(struct ccw_device *cdev, enum dev_event dev_event) | ||
1115 | { | ||
1116 | } | ||
1117 | |||
1118 | /* | ||
1119 | * Bug operation action. | ||
1120 | */ | ||
1121 | static void | ||
1122 | ccw_device_bug(struct ccw_device *cdev, enum dev_event dev_event) | ||
1123 | { | ||
1124 | printk(KERN_EMERG "dev_jumptable[%i][%i] == NULL\n", | ||
1125 | cdev->private->state, dev_event); | ||
1126 | BUG(); | ||
1127 | } | ||
1128 | |||
1129 | /* | ||
1130 | * device statemachine | ||
1131 | */ | ||
1132 | fsm_func_t *dev_jumptable[NR_DEV_STATES][NR_DEV_EVENTS] = { | ||
1133 | [DEV_STATE_NOT_OPER] = { | ||
1134 | [DEV_EVENT_NOTOPER] = ccw_device_nop, | ||
1135 | [DEV_EVENT_INTERRUPT] = ccw_device_bug, | ||
1136 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | ||
1137 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1138 | }, | ||
1139 | [DEV_STATE_SENSE_PGID] = { | ||
1140 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1141 | [DEV_EVENT_INTERRUPT] = ccw_device_sense_pgid_irq, | ||
1142 | [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, | ||
1143 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1144 | }, | ||
1145 | [DEV_STATE_SENSE_ID] = { | ||
1146 | [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, | ||
1147 | [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, | ||
1148 | [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, | ||
1149 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1150 | }, | ||
1151 | [DEV_STATE_OFFLINE] = { | ||
1152 | [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, | ||
1153 | [DEV_EVENT_INTERRUPT] = ccw_device_offline_irq, | ||
1154 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | ||
1155 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1156 | }, | ||
1157 | [DEV_STATE_VERIFY] = { | ||
1158 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1159 | [DEV_EVENT_INTERRUPT] = ccw_device_verify_irq, | ||
1160 | [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, | ||
1161 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1162 | }, | ||
1163 | [DEV_STATE_ONLINE] = { | ||
1164 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1165 | [DEV_EVENT_INTERRUPT] = ccw_device_irq, | ||
1166 | [DEV_EVENT_TIMEOUT] = ccw_device_online_timeout, | ||
1167 | [DEV_EVENT_VERIFY] = ccw_device_online_verify, | ||
1168 | }, | ||
1169 | [DEV_STATE_W4SENSE] = { | ||
1170 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1171 | [DEV_EVENT_INTERRUPT] = ccw_device_w4sense, | ||
1172 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | ||
1173 | [DEV_EVENT_VERIFY] = ccw_device_online_verify, | ||
1174 | }, | ||
1175 | [DEV_STATE_DISBAND_PGID] = { | ||
1176 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1177 | [DEV_EVENT_INTERRUPT] = ccw_device_disband_irq, | ||
1178 | [DEV_EVENT_TIMEOUT] = ccw_device_onoff_timeout, | ||
1179 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1180 | }, | ||
1181 | [DEV_STATE_BOXED] = { | ||
1182 | [DEV_EVENT_NOTOPER] = ccw_device_offline_notoper, | ||
1183 | [DEV_EVENT_INTERRUPT] = ccw_device_stlck_done, | ||
1184 | [DEV_EVENT_TIMEOUT] = ccw_device_stlck_done, | ||
1185 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1186 | }, | ||
1187 | /* states to wait for i/o completion before doing something */ | ||
1188 | [DEV_STATE_CLEAR_VERIFY] = { | ||
1189 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1190 | [DEV_EVENT_INTERRUPT] = ccw_device_clear_verify, | ||
1191 | [DEV_EVENT_TIMEOUT] = ccw_device_nop, | ||
1192 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1193 | }, | ||
1194 | [DEV_STATE_TIMEOUT_KILL] = { | ||
1195 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1196 | [DEV_EVENT_INTERRUPT] = ccw_device_killing_irq, | ||
1197 | [DEV_EVENT_TIMEOUT] = ccw_device_killing_timeout, | ||
1198 | [DEV_EVENT_VERIFY] = ccw_device_nop, //FIXME | ||
1199 | }, | ||
1200 | [DEV_STATE_WAIT4IO] = { | ||
1201 | [DEV_EVENT_NOTOPER] = ccw_device_online_notoper, | ||
1202 | [DEV_EVENT_INTERRUPT] = ccw_device_wait4io_irq, | ||
1203 | [DEV_EVENT_TIMEOUT] = ccw_device_wait4io_timeout, | ||
1204 | [DEV_EVENT_VERIFY] = ccw_device_wait4io_verify, | ||
1205 | }, | ||
1206 | [DEV_STATE_QUIESCE] = { | ||
1207 | [DEV_EVENT_NOTOPER] = ccw_device_quiesce_done, | ||
1208 | [DEV_EVENT_INTERRUPT] = ccw_device_quiesce_done, | ||
1209 | [DEV_EVENT_TIMEOUT] = ccw_device_quiesce_timeout, | ||
1210 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1211 | }, | ||
1212 | /* special states for devices gone not operational */ | ||
1213 | [DEV_STATE_DISCONNECTED] = { | ||
1214 | [DEV_EVENT_NOTOPER] = ccw_device_nop, | ||
1215 | [DEV_EVENT_INTERRUPT] = ccw_device_start_id, | ||
1216 | [DEV_EVENT_TIMEOUT] = ccw_device_bug, | ||
1217 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1218 | }, | ||
1219 | [DEV_STATE_DISCONNECTED_SENSE_ID] = { | ||
1220 | [DEV_EVENT_NOTOPER] = ccw_device_recog_notoper, | ||
1221 | [DEV_EVENT_INTERRUPT] = ccw_device_sense_id_irq, | ||
1222 | [DEV_EVENT_TIMEOUT] = ccw_device_recog_timeout, | ||
1223 | [DEV_EVENT_VERIFY] = ccw_device_nop, | ||
1224 | }, | ||
1225 | [DEV_STATE_CMFCHANGE] = { | ||
1226 | [DEV_EVENT_NOTOPER] = ccw_device_change_cmfstate, | ||
1227 | [DEV_EVENT_INTERRUPT] = ccw_device_change_cmfstate, | ||
1228 | [DEV_EVENT_TIMEOUT] = ccw_device_change_cmfstate, | ||
1229 | [DEV_EVENT_VERIFY] = ccw_device_change_cmfstate, | ||
1230 | }, | ||
1231 | }; | ||
1232 | |||
1233 | /* | ||
1234 | * io_subchannel_irq is called for "real" interrupts or for status | ||
1235 | * pending conditions on msch. | ||
1236 | */ | ||
1237 | void | ||
1238 | io_subchannel_irq (struct device *pdev) | ||
1239 | { | ||
1240 | struct ccw_device *cdev; | ||
1241 | |||
1242 | cdev = to_subchannel(pdev)->dev.driver_data; | ||
1243 | |||
1244 | CIO_TRACE_EVENT (3, "IRQ"); | ||
1245 | CIO_TRACE_EVENT (3, pdev->bus_id); | ||
1246 | if (cdev) | ||
1247 | dev_fsm_event(cdev, DEV_EVENT_INTERRUPT); | ||
1248 | } | ||
1249 | |||
1250 | EXPORT_SYMBOL_GPL(ccw_device_set_timeout); | ||
diff --git a/drivers/s390/cio/device_id.c b/drivers/s390/cio/device_id.c new file mode 100644 index 000000000000..0e68fb511dc9 --- /dev/null +++ b/drivers/s390/cio/device_id.c | |||
@@ -0,0 +1,355 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/device_id.c | ||
3 | * | ||
4 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
5 | * IBM Corporation | ||
6 | * Author(s): Cornelia Huck(cohuck@de.ibm.com) | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * | ||
9 | * Sense ID functions. | ||
10 | */ | ||
11 | |||
12 | #include <linux/module.h> | ||
13 | #include <linux/config.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/ccwdev.h> | ||
17 | #include <asm/delay.h> | ||
18 | #include <asm/cio.h> | ||
19 | #include <asm/lowcore.h> | ||
20 | |||
21 | #include "cio.h" | ||
22 | #include "cio_debug.h" | ||
23 | #include "css.h" | ||
24 | #include "device.h" | ||
25 | #include "ioasm.h" | ||
26 | |||
27 | /* | ||
28 | * diag210 is used under VM to get information about a virtual device | ||
29 | */ | ||
30 | #ifdef CONFIG_ARCH_S390X | ||
31 | int | ||
32 | diag210(struct diag210 * addr) | ||
33 | { | ||
34 | /* | ||
35 | * diag 210 needs its data below the 2GB border, so we | ||
36 | * use a static data area to be sure | ||
37 | */ | ||
38 | static struct diag210 diag210_tmp; | ||
39 | static DEFINE_SPINLOCK(diag210_lock); | ||
40 | unsigned long flags; | ||
41 | int ccode; | ||
42 | |||
43 | spin_lock_irqsave(&diag210_lock, flags); | ||
44 | diag210_tmp = *addr; | ||
45 | |||
46 | asm volatile ( | ||
47 | " lhi %0,-1\n" | ||
48 | " sam31\n" | ||
49 | " diag %1,0,0x210\n" | ||
50 | "0: ipm %0\n" | ||
51 | " srl %0,28\n" | ||
52 | "1: sam64\n" | ||
53 | ".section __ex_table,\"a\"\n" | ||
54 | " .align 8\n" | ||
55 | " .quad 0b,1b\n" | ||
56 | ".previous" | ||
57 | : "=&d" (ccode) : "a" (__pa(&diag210_tmp)) : "cc", "memory" ); | ||
58 | |||
59 | *addr = diag210_tmp; | ||
60 | spin_unlock_irqrestore(&diag210_lock, flags); | ||
61 | |||
62 | return ccode; | ||
63 | } | ||
64 | #else | ||
65 | int | ||
66 | diag210(struct diag210 * addr) | ||
67 | { | ||
68 | int ccode; | ||
69 | |||
70 | asm volatile ( | ||
71 | " lhi %0,-1\n" | ||
72 | " diag %1,0,0x210\n" | ||
73 | "0: ipm %0\n" | ||
74 | " srl %0,28\n" | ||
75 | "1:\n" | ||
76 | ".section __ex_table,\"a\"\n" | ||
77 | " .align 4\n" | ||
78 | " .long 0b,1b\n" | ||
79 | ".previous" | ||
80 | : "=&d" (ccode) : "a" (__pa(addr)) : "cc", "memory" ); | ||
81 | |||
82 | return ccode; | ||
83 | } | ||
84 | #endif | ||
85 | |||
86 | /* | ||
87 | * Input : | ||
88 | * devno - device number | ||
89 | * ps - pointer to sense ID data area | ||
90 | * Output : none | ||
91 | */ | ||
92 | static void | ||
93 | VM_virtual_device_info (__u16 devno, struct senseid *ps) | ||
94 | { | ||
95 | static struct { | ||
96 | int vrdcvcla, vrdcvtyp, cu_type; | ||
97 | } vm_devices[] = { | ||
98 | { 0x08, 0x01, 0x3480 }, | ||
99 | { 0x08, 0x02, 0x3430 }, | ||
100 | { 0x08, 0x10, 0x3420 }, | ||
101 | { 0x08, 0x42, 0x3424 }, | ||
102 | { 0x08, 0x44, 0x9348 }, | ||
103 | { 0x08, 0x81, 0x3490 }, | ||
104 | { 0x08, 0x82, 0x3422 }, | ||
105 | { 0x10, 0x41, 0x1403 }, | ||
106 | { 0x10, 0x42, 0x3211 }, | ||
107 | { 0x10, 0x43, 0x3203 }, | ||
108 | { 0x10, 0x45, 0x3800 }, | ||
109 | { 0x10, 0x47, 0x3262 }, | ||
110 | { 0x10, 0x48, 0x3820 }, | ||
111 | { 0x10, 0x49, 0x3800 }, | ||
112 | { 0x10, 0x4a, 0x4245 }, | ||
113 | { 0x10, 0x4b, 0x4248 }, | ||
114 | { 0x10, 0x4d, 0x3800 }, | ||
115 | { 0x10, 0x4e, 0x3820 }, | ||
116 | { 0x10, 0x4f, 0x3820 }, | ||
117 | { 0x10, 0x82, 0x2540 }, | ||
118 | { 0x10, 0x84, 0x3525 }, | ||
119 | { 0x20, 0x81, 0x2501 }, | ||
120 | { 0x20, 0x82, 0x2540 }, | ||
121 | { 0x20, 0x84, 0x3505 }, | ||
122 | { 0x40, 0x01, 0x3278 }, | ||
123 | { 0x40, 0x04, 0x3277 }, | ||
124 | { 0x40, 0x80, 0x2250 }, | ||
125 | { 0x40, 0xc0, 0x5080 }, | ||
126 | { 0x80, 0x00, 0x3215 }, | ||
127 | }; | ||
128 | struct diag210 diag_data; | ||
129 | int ccode, i; | ||
130 | |||
131 | CIO_TRACE_EVENT (4, "VMvdinf"); | ||
132 | |||
133 | diag_data = (struct diag210) { | ||
134 | .vrdcdvno = devno, | ||
135 | .vrdclen = sizeof (diag_data), | ||
136 | }; | ||
137 | |||
138 | ccode = diag210 (&diag_data); | ||
139 | ps->reserved = 0xff; | ||
140 | |||
141 | /* Special case for bloody osa devices. */ | ||
142 | if (diag_data.vrdcvcla == 0x02 && | ||
143 | diag_data.vrdcvtyp == 0x20) { | ||
144 | ps->cu_type = 0x3088; | ||
145 | ps->cu_model = 0x60; | ||
146 | return; | ||
147 | } | ||
148 | for (i = 0; i < sizeof(vm_devices) / sizeof(vm_devices[0]); i++) | ||
149 | if (diag_data.vrdcvcla == vm_devices[i].vrdcvcla && | ||
150 | diag_data.vrdcvtyp == vm_devices[i].vrdcvtyp) { | ||
151 | ps->cu_type = vm_devices[i].cu_type; | ||
152 | return; | ||
153 | } | ||
154 | CIO_MSG_EVENT(0, "DIAG X'210' for device %04X returned (cc = %d):" | ||
155 | "vdev class : %02X, vdev type : %04X \n ... " | ||
156 | "rdev class : %02X, rdev type : %04X, " | ||
157 | "rdev model: %02X\n", | ||
158 | devno, ccode, | ||
159 | diag_data.vrdcvcla, diag_data.vrdcvtyp, | ||
160 | diag_data.vrdcrccl, diag_data.vrdccrty, | ||
161 | diag_data.vrdccrmd); | ||
162 | } | ||
163 | |||
164 | /* | ||
165 | * Start Sense ID helper function. | ||
166 | * Try to obtain the 'control unit'/'device type' information | ||
167 | * associated with the subchannel. | ||
168 | */ | ||
169 | static int | ||
170 | __ccw_device_sense_id_start(struct ccw_device *cdev) | ||
171 | { | ||
172 | struct subchannel *sch; | ||
173 | struct ccw1 *ccw; | ||
174 | int ret; | ||
175 | |||
176 | sch = to_subchannel(cdev->dev.parent); | ||
177 | /* Setup sense channel program. */ | ||
178 | ccw = cdev->private->iccws; | ||
179 | if (sch->schib.pmcw.pim != 0x80) { | ||
180 | /* more than one path installed. */ | ||
181 | ccw->cmd_code = CCW_CMD_SUSPEND_RECONN; | ||
182 | ccw->cda = 0; | ||
183 | ccw->count = 0; | ||
184 | ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
185 | ccw++; | ||
186 | } | ||
187 | ccw->cmd_code = CCW_CMD_SENSE_ID; | ||
188 | ccw->cda = (__u32) __pa (&cdev->private->senseid); | ||
189 | ccw->count = sizeof (struct senseid); | ||
190 | ccw->flags = CCW_FLAG_SLI; | ||
191 | |||
192 | /* Reset device status. */ | ||
193 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
194 | |||
195 | /* Try on every path. */ | ||
196 | ret = -ENODEV; | ||
197 | while (cdev->private->imask != 0) { | ||
198 | if ((sch->opm & cdev->private->imask) != 0 && | ||
199 | cdev->private->iretry > 0) { | ||
200 | cdev->private->iretry--; | ||
201 | ret = cio_start (sch, cdev->private->iccws, | ||
202 | cdev->private->imask); | ||
203 | /* ret is 0, -EBUSY, -EACCES or -ENODEV */ | ||
204 | if (ret != -EACCES) | ||
205 | return ret; | ||
206 | } | ||
207 | cdev->private->imask >>= 1; | ||
208 | cdev->private->iretry = 5; | ||
209 | } | ||
210 | return ret; | ||
211 | } | ||
212 | |||
213 | void | ||
214 | ccw_device_sense_id_start(struct ccw_device *cdev) | ||
215 | { | ||
216 | int ret; | ||
217 | |||
218 | memset (&cdev->private->senseid, 0, sizeof (struct senseid)); | ||
219 | cdev->private->senseid.cu_type = 0xFFFF; | ||
220 | cdev->private->imask = 0x80; | ||
221 | cdev->private->iretry = 5; | ||
222 | ret = __ccw_device_sense_id_start(cdev); | ||
223 | if (ret && ret != -EBUSY) | ||
224 | ccw_device_sense_id_done(cdev, ret); | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * Called from interrupt context to check if a valid answer | ||
229 | * to Sense ID was received. | ||
230 | */ | ||
231 | static int | ||
232 | ccw_device_check_sense_id(struct ccw_device *cdev) | ||
233 | { | ||
234 | struct subchannel *sch; | ||
235 | struct irb *irb; | ||
236 | |||
237 | sch = to_subchannel(cdev->dev.parent); | ||
238 | irb = &cdev->private->irb; | ||
239 | /* Did we get a proper answer ? */ | ||
240 | if (cdev->private->senseid.cu_type != 0xFFFF && | ||
241 | cdev->private->senseid.reserved == 0xFF) { | ||
242 | if (irb->scsw.count < sizeof (struct senseid) - 8) | ||
243 | cdev->private->flags.esid = 1; | ||
244 | return 0; /* Success */ | ||
245 | } | ||
246 | /* Check the error cases. */ | ||
247 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) | ||
248 | return -ETIME; | ||
249 | if (irb->esw.esw0.erw.cons && (irb->ecw[0] & SNS0_CMD_REJECT)) { | ||
250 | /* | ||
251 | * if the device doesn't support the SenseID | ||
252 | * command further retries wouldn't help ... | ||
253 | * NB: We don't check here for intervention required like we | ||
254 | * did before, because tape devices with no tape inserted | ||
255 | * may present this status *in conjunction with* the | ||
256 | * sense id information. So, for intervention required, | ||
257 | * we use the "whack it until it talks" strategy... | ||
258 | */ | ||
259 | CIO_MSG_EVENT(2, "SenseID : device %04x on Subchannel %04x " | ||
260 | "reports cmd reject\n", | ||
261 | cdev->private->devno, sch->irq); | ||
262 | return -EOPNOTSUPP; | ||
263 | } | ||
264 | if (irb->esw.esw0.erw.cons) { | ||
265 | CIO_MSG_EVENT(2, "SenseID : UC on dev %04x, " | ||
266 | "lpum %02X, cnt %02d, sns :" | ||
267 | " %02X%02X%02X%02X %02X%02X%02X%02X ...\n", | ||
268 | cdev->private->devno, | ||
269 | irb->esw.esw0.sublog.lpum, | ||
270 | irb->esw.esw0.erw.scnt, | ||
271 | irb->ecw[0], irb->ecw[1], | ||
272 | irb->ecw[2], irb->ecw[3], | ||
273 | irb->ecw[4], irb->ecw[5], | ||
274 | irb->ecw[6], irb->ecw[7]); | ||
275 | return -EAGAIN; | ||
276 | } | ||
277 | if (irb->scsw.cc == 3) { | ||
278 | if ((sch->orb.lpm & | ||
279 | sch->schib.pmcw.pim & sch->schib.pmcw.pam) != 0) | ||
280 | CIO_MSG_EVENT(2, "SenseID : path %02X for device %04x on" | ||
281 | " subchannel %04x is 'not operational'\n", | ||
282 | sch->orb.lpm, cdev->private->devno, | ||
283 | sch->irq); | ||
284 | return -EACCES; | ||
285 | } | ||
286 | /* Hmm, whatever happened, try again. */ | ||
287 | CIO_MSG_EVENT(2, "SenseID : start_IO() for device %04x on " | ||
288 | "subchannel %04x returns status %02X%02X\n", | ||
289 | cdev->private->devno, sch->irq, | ||
290 | irb->scsw.dstat, irb->scsw.cstat); | ||
291 | return -EAGAIN; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Got interrupt for Sense ID. | ||
296 | */ | ||
297 | void | ||
298 | ccw_device_sense_id_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
299 | { | ||
300 | struct subchannel *sch; | ||
301 | struct irb *irb; | ||
302 | int ret; | ||
303 | |||
304 | sch = to_subchannel(cdev->dev.parent); | ||
305 | irb = (struct irb *) __LC_IRB; | ||
306 | /* Retry sense id, if needed. */ | ||
307 | if (irb->scsw.stctl == | ||
308 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | ||
309 | if ((irb->scsw.cc == 1) || !irb->scsw.actl) { | ||
310 | ret = __ccw_device_sense_id_start(cdev); | ||
311 | if (ret && ret != -EBUSY) | ||
312 | ccw_device_sense_id_done(cdev, ret); | ||
313 | } | ||
314 | return; | ||
315 | } | ||
316 | if (ccw_device_accumulate_and_sense(cdev, irb) != 0) | ||
317 | return; | ||
318 | ret = ccw_device_check_sense_id(cdev); | ||
319 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
320 | switch (ret) { | ||
321 | /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN or -EACCES */ | ||
322 | case 0: /* Sense id succeeded. */ | ||
323 | case -ETIME: /* Sense id stopped by timeout. */ | ||
324 | ccw_device_sense_id_done(cdev, ret); | ||
325 | break; | ||
326 | case -EACCES: /* channel is not operational. */ | ||
327 | sch->lpm &= ~cdev->private->imask; | ||
328 | cdev->private->imask >>= 1; | ||
329 | cdev->private->iretry = 5; | ||
330 | /* fall through. */ | ||
331 | case -EAGAIN: /* try again. */ | ||
332 | ret = __ccw_device_sense_id_start(cdev); | ||
333 | if (ret == 0 || ret == -EBUSY) | ||
334 | break; | ||
335 | /* fall through. */ | ||
336 | default: /* Sense ID failed. Try asking VM. */ | ||
337 | if (MACHINE_IS_VM) { | ||
338 | VM_virtual_device_info (cdev->private->devno, | ||
339 | &cdev->private->senseid); | ||
340 | if (cdev->private->senseid.cu_type != 0xFFFF) { | ||
341 | /* Got the device information from VM. */ | ||
342 | ccw_device_sense_id_done(cdev, 0); | ||
343 | return; | ||
344 | } | ||
345 | } | ||
346 | /* | ||
347 | * If we can't couldn't identify the device type we | ||
348 | * consider the device "not operational". | ||
349 | */ | ||
350 | ccw_device_sense_id_done(cdev, -ENODEV); | ||
351 | break; | ||
352 | } | ||
353 | } | ||
354 | |||
355 | EXPORT_SYMBOL(diag210); | ||
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c new file mode 100644 index 000000000000..11e260e0b9c9 --- /dev/null +++ b/drivers/s390/cio/device_ops.c | |||
@@ -0,0 +1,603 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/device_ops.c | ||
3 | * | ||
4 | * $Revision: 1.55 $ | ||
5 | * | ||
6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
7 | * IBM Corporation | ||
8 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
9 | * Cornelia Huck (cohuck@de.ibm.com) | ||
10 | */ | ||
11 | #include <linux/config.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/errno.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/list.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/delay.h> | ||
19 | |||
20 | #include <asm/ccwdev.h> | ||
21 | #include <asm/idals.h> | ||
22 | #include <asm/qdio.h> | ||
23 | |||
24 | #include "cio.h" | ||
25 | #include "cio_debug.h" | ||
26 | #include "css.h" | ||
27 | #include "chsc.h" | ||
28 | #include "device.h" | ||
29 | #include "qdio.h" | ||
30 | |||
31 | int | ||
32 | ccw_device_set_options(struct ccw_device *cdev, unsigned long flags) | ||
33 | { | ||
34 | /* | ||
35 | * The flag usage is mutal exclusive ... | ||
36 | */ | ||
37 | if ((flags & CCWDEV_EARLY_NOTIFICATION) && | ||
38 | (flags & CCWDEV_REPORT_ALL)) | ||
39 | return -EINVAL; | ||
40 | cdev->private->options.fast = (flags & CCWDEV_EARLY_NOTIFICATION) != 0; | ||
41 | cdev->private->options.repall = (flags & CCWDEV_REPORT_ALL) != 0; | ||
42 | cdev->private->options.pgroup = (flags & CCWDEV_DO_PATHGROUP) != 0; | ||
43 | cdev->private->options.force = (flags & CCWDEV_ALLOW_FORCE) != 0; | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | int | ||
48 | ccw_device_clear(struct ccw_device *cdev, unsigned long intparm) | ||
49 | { | ||
50 | struct subchannel *sch; | ||
51 | int ret; | ||
52 | |||
53 | if (!cdev) | ||
54 | return -ENODEV; | ||
55 | if (cdev->private->state == DEV_STATE_NOT_OPER) | ||
56 | return -ENODEV; | ||
57 | if (cdev->private->state != DEV_STATE_ONLINE && | ||
58 | cdev->private->state != DEV_STATE_WAIT4IO && | ||
59 | cdev->private->state != DEV_STATE_W4SENSE) | ||
60 | return -EINVAL; | ||
61 | sch = to_subchannel(cdev->dev.parent); | ||
62 | if (!sch) | ||
63 | return -ENODEV; | ||
64 | ret = cio_clear(sch); | ||
65 | if (ret == 0) | ||
66 | cdev->private->intparm = intparm; | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | int | ||
71 | ccw_device_start_key(struct ccw_device *cdev, struct ccw1 *cpa, | ||
72 | unsigned long intparm, __u8 lpm, __u8 key, | ||
73 | unsigned long flags) | ||
74 | { | ||
75 | struct subchannel *sch; | ||
76 | int ret; | ||
77 | |||
78 | if (!cdev) | ||
79 | return -ENODEV; | ||
80 | sch = to_subchannel(cdev->dev.parent); | ||
81 | if (!sch) | ||
82 | return -ENODEV; | ||
83 | if (cdev->private->state == DEV_STATE_NOT_OPER) | ||
84 | return -ENODEV; | ||
85 | if (cdev->private->state == DEV_STATE_VERIFY) { | ||
86 | /* Remember to fake irb when finished. */ | ||
87 | if (!cdev->private->flags.fake_irb) { | ||
88 | cdev->private->flags.fake_irb = 1; | ||
89 | cdev->private->intparm = intparm; | ||
90 | return 0; | ||
91 | } else | ||
92 | /* There's already a fake I/O around. */ | ||
93 | return -EBUSY; | ||
94 | } | ||
95 | if (cdev->private->state != DEV_STATE_ONLINE || | ||
96 | ((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | ||
97 | !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || | ||
98 | cdev->private->flags.doverify) | ||
99 | return -EBUSY; | ||
100 | ret = cio_set_options (sch, flags); | ||
101 | if (ret) | ||
102 | return ret; | ||
103 | ret = cio_start_key (sch, cpa, lpm, key); | ||
104 | if (ret == 0) | ||
105 | cdev->private->intparm = intparm; | ||
106 | return ret; | ||
107 | } | ||
108 | |||
109 | |||
110 | int | ||
111 | ccw_device_start_timeout_key(struct ccw_device *cdev, struct ccw1 *cpa, | ||
112 | unsigned long intparm, __u8 lpm, __u8 key, | ||
113 | unsigned long flags, int expires) | ||
114 | { | ||
115 | int ret; | ||
116 | |||
117 | if (!cdev) | ||
118 | return -ENODEV; | ||
119 | ccw_device_set_timeout(cdev, expires); | ||
120 | ret = ccw_device_start_key(cdev, cpa, intparm, lpm, key, flags); | ||
121 | if (ret != 0) | ||
122 | ccw_device_set_timeout(cdev, 0); | ||
123 | return ret; | ||
124 | } | ||
125 | |||
126 | int | ||
127 | ccw_device_start(struct ccw_device *cdev, struct ccw1 *cpa, | ||
128 | unsigned long intparm, __u8 lpm, unsigned long flags) | ||
129 | { | ||
130 | return ccw_device_start_key(cdev, cpa, intparm, lpm, | ||
131 | default_storage_key, flags); | ||
132 | } | ||
133 | |||
134 | int | ||
135 | ccw_device_start_timeout(struct ccw_device *cdev, struct ccw1 *cpa, | ||
136 | unsigned long intparm, __u8 lpm, unsigned long flags, | ||
137 | int expires) | ||
138 | { | ||
139 | return ccw_device_start_timeout_key(cdev, cpa, intparm, lpm, | ||
140 | default_storage_key, flags, | ||
141 | expires); | ||
142 | } | ||
143 | |||
144 | |||
145 | int | ||
146 | ccw_device_halt(struct ccw_device *cdev, unsigned long intparm) | ||
147 | { | ||
148 | struct subchannel *sch; | ||
149 | int ret; | ||
150 | |||
151 | if (!cdev) | ||
152 | return -ENODEV; | ||
153 | if (cdev->private->state == DEV_STATE_NOT_OPER) | ||
154 | return -ENODEV; | ||
155 | if (cdev->private->state != DEV_STATE_ONLINE && | ||
156 | cdev->private->state != DEV_STATE_WAIT4IO && | ||
157 | cdev->private->state != DEV_STATE_W4SENSE) | ||
158 | return -EINVAL; | ||
159 | sch = to_subchannel(cdev->dev.parent); | ||
160 | if (!sch) | ||
161 | return -ENODEV; | ||
162 | ret = cio_halt(sch); | ||
163 | if (ret == 0) | ||
164 | cdev->private->intparm = intparm; | ||
165 | return ret; | ||
166 | } | ||
167 | |||
168 | int | ||
169 | ccw_device_resume(struct ccw_device *cdev) | ||
170 | { | ||
171 | struct subchannel *sch; | ||
172 | |||
173 | if (!cdev) | ||
174 | return -ENODEV; | ||
175 | sch = to_subchannel(cdev->dev.parent); | ||
176 | if (!sch) | ||
177 | return -ENODEV; | ||
178 | if (cdev->private->state == DEV_STATE_NOT_OPER) | ||
179 | return -ENODEV; | ||
180 | if (cdev->private->state != DEV_STATE_ONLINE || | ||
181 | !(sch->schib.scsw.actl & SCSW_ACTL_SUSPENDED)) | ||
182 | return -EINVAL; | ||
183 | return cio_resume(sch); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Pass interrupt to device driver. | ||
188 | */ | ||
189 | int | ||
190 | ccw_device_call_handler(struct ccw_device *cdev) | ||
191 | { | ||
192 | struct subchannel *sch; | ||
193 | unsigned int stctl; | ||
194 | int ending_status; | ||
195 | |||
196 | sch = to_subchannel(cdev->dev.parent); | ||
197 | |||
198 | /* | ||
199 | * we allow for the device action handler if . | ||
200 | * - we received ending status | ||
201 | * - the action handler requested to see all interrupts | ||
202 | * - we received an intermediate status | ||
203 | * - fast notification was requested (primary status) | ||
204 | * - unsolicited interrupts | ||
205 | */ | ||
206 | stctl = cdev->private->irb.scsw.stctl; | ||
207 | ending_status = (stctl & SCSW_STCTL_SEC_STATUS) || | ||
208 | (stctl == (SCSW_STCTL_ALERT_STATUS | SCSW_STCTL_STATUS_PEND)) || | ||
209 | (stctl == SCSW_STCTL_STATUS_PEND); | ||
210 | if (!ending_status && | ||
211 | !cdev->private->options.repall && | ||
212 | !(stctl & SCSW_STCTL_INTER_STATUS) && | ||
213 | !(cdev->private->options.fast && | ||
214 | (stctl & SCSW_STCTL_PRIM_STATUS))) | ||
215 | return 0; | ||
216 | |||
217 | /* | ||
218 | * Now we are ready to call the device driver interrupt handler. | ||
219 | */ | ||
220 | if (cdev->handler) | ||
221 | cdev->handler(cdev, cdev->private->intparm, | ||
222 | &cdev->private->irb); | ||
223 | |||
224 | /* | ||
225 | * Clear the old and now useless interrupt response block. | ||
226 | */ | ||
227 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
228 | |||
229 | return 1; | ||
230 | } | ||
231 | |||
232 | /* | ||
233 | * Search for CIW command in extended sense data. | ||
234 | */ | ||
235 | struct ciw * | ||
236 | ccw_device_get_ciw(struct ccw_device *cdev, __u32 ct) | ||
237 | { | ||
238 | int ciw_cnt; | ||
239 | |||
240 | if (cdev->private->flags.esid == 0) | ||
241 | return NULL; | ||
242 | for (ciw_cnt = 0; ciw_cnt < MAX_CIWS; ciw_cnt++) | ||
243 | if (cdev->private->senseid.ciw[ciw_cnt].ct == ct) | ||
244 | return cdev->private->senseid.ciw + ciw_cnt; | ||
245 | return NULL; | ||
246 | } | ||
247 | |||
248 | __u8 | ||
249 | ccw_device_get_path_mask(struct ccw_device *cdev) | ||
250 | { | ||
251 | struct subchannel *sch; | ||
252 | |||
253 | sch = to_subchannel(cdev->dev.parent); | ||
254 | if (!sch) | ||
255 | return 0; | ||
256 | else | ||
257 | return sch->vpm; | ||
258 | } | ||
259 | |||
260 | static void | ||
261 | ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) | ||
262 | { | ||
263 | if (!ip) | ||
264 | /* unsolicited interrupt */ | ||
265 | return; | ||
266 | |||
267 | /* Abuse intparm for error reporting. */ | ||
268 | if (IS_ERR(irb)) | ||
269 | cdev->private->intparm = -EIO; | ||
270 | else if ((irb->scsw.dstat != | ||
271 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || | ||
272 | (irb->scsw.cstat != 0)) { | ||
273 | /* | ||
274 | * We didn't get channel end / device end. Check if path | ||
275 | * verification has been started; we can retry after it has | ||
276 | * finished. We also retry unit checks except for command reject | ||
277 | * or intervention required. | ||
278 | */ | ||
279 | if (cdev->private->flags.doverify || | ||
280 | cdev->private->state == DEV_STATE_VERIFY) | ||
281 | cdev->private->intparm = -EAGAIN; | ||
282 | if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | ||
283 | !(irb->ecw[0] & | ||
284 | (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ))) | ||
285 | cdev->private->intparm = -EAGAIN; | ||
286 | else | ||
287 | cdev->private->intparm = -EIO; | ||
288 | |||
289 | } else | ||
290 | cdev->private->intparm = 0; | ||
291 | wake_up(&cdev->private->wait_q); | ||
292 | } | ||
293 | |||
294 | static inline int | ||
295 | __ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm) | ||
296 | { | ||
297 | int ret; | ||
298 | struct subchannel *sch; | ||
299 | |||
300 | sch = to_subchannel(cdev->dev.parent); | ||
301 | do { | ||
302 | ret = cio_start (sch, ccw, lpm); | ||
303 | if ((ret == -EBUSY) || (ret == -EACCES)) { | ||
304 | /* Try again later. */ | ||
305 | spin_unlock_irq(&sch->lock); | ||
306 | msleep(10); | ||
307 | spin_lock_irq(&sch->lock); | ||
308 | continue; | ||
309 | } | ||
310 | if (ret != 0) | ||
311 | /* Non-retryable error. */ | ||
312 | break; | ||
313 | /* Wait for end of request. */ | ||
314 | cdev->private->intparm = magic; | ||
315 | spin_unlock_irq(&sch->lock); | ||
316 | wait_event(cdev->private->wait_q, | ||
317 | (cdev->private->intparm == -EIO) || | ||
318 | (cdev->private->intparm == -EAGAIN) || | ||
319 | (cdev->private->intparm == 0)); | ||
320 | spin_lock_irq(&sch->lock); | ||
321 | /* Check at least for channel end / device end */ | ||
322 | if (cdev->private->intparm == -EIO) { | ||
323 | /* Non-retryable error. */ | ||
324 | ret = -EIO; | ||
325 | break; | ||
326 | } | ||
327 | if (cdev->private->intparm == 0) | ||
328 | /* Success. */ | ||
329 | break; | ||
330 | /* Try again later. */ | ||
331 | spin_unlock_irq(&sch->lock); | ||
332 | msleep(10); | ||
333 | spin_lock_irq(&sch->lock); | ||
334 | } while (1); | ||
335 | |||
336 | return ret; | ||
337 | } | ||
338 | |||
339 | /** | ||
340 | * read_dev_chars() - read device characteristics | ||
341 | * @param cdev target ccw device | ||
342 | * @param buffer pointer to buffer for rdc data | ||
343 | * @param length size of rdc data | ||
344 | * @returns 0 for success, negative error value on failure | ||
345 | * | ||
346 | * Context: | ||
347 | * called for online device, lock not held | ||
348 | **/ | ||
349 | int | ||
350 | read_dev_chars (struct ccw_device *cdev, void **buffer, int length) | ||
351 | { | ||
352 | void (*handler)(struct ccw_device *, unsigned long, struct irb *); | ||
353 | struct subchannel *sch; | ||
354 | int ret; | ||
355 | struct ccw1 *rdc_ccw; | ||
356 | |||
357 | if (!cdev) | ||
358 | return -ENODEV; | ||
359 | if (!buffer || !length) | ||
360 | return -EINVAL; | ||
361 | sch = to_subchannel(cdev->dev.parent); | ||
362 | |||
363 | CIO_TRACE_EVENT (4, "rddevch"); | ||
364 | CIO_TRACE_EVENT (4, sch->dev.bus_id); | ||
365 | |||
366 | rdc_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); | ||
367 | if (!rdc_ccw) | ||
368 | return -ENOMEM; | ||
369 | memset(rdc_ccw, 0, sizeof(struct ccw1)); | ||
370 | rdc_ccw->cmd_code = CCW_CMD_RDC; | ||
371 | rdc_ccw->count = length; | ||
372 | rdc_ccw->flags = CCW_FLAG_SLI; | ||
373 | ret = set_normalized_cda (rdc_ccw, (*buffer)); | ||
374 | if (ret != 0) { | ||
375 | kfree(rdc_ccw); | ||
376 | return ret; | ||
377 | } | ||
378 | |||
379 | spin_lock_irq(&sch->lock); | ||
380 | /* Save interrupt handler. */ | ||
381 | handler = cdev->handler; | ||
382 | /* Temporarily install own handler. */ | ||
383 | cdev->handler = ccw_device_wake_up; | ||
384 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
385 | ret = -ENODEV; | ||
386 | else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | ||
387 | !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || | ||
388 | cdev->private->flags.doverify) | ||
389 | ret = -EBUSY; | ||
390 | else | ||
391 | /* 0x00D9C4C3 == ebcdic "RDC" */ | ||
392 | ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0); | ||
393 | |||
394 | /* Restore interrupt handler. */ | ||
395 | cdev->handler = handler; | ||
396 | spin_unlock_irq(&sch->lock); | ||
397 | |||
398 | clear_normalized_cda (rdc_ccw); | ||
399 | kfree(rdc_ccw); | ||
400 | |||
401 | return ret; | ||
402 | } | ||
403 | |||
404 | /* | ||
405 | * Read Configuration data using path mask | ||
406 | */ | ||
407 | int | ||
408 | read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm) | ||
409 | { | ||
410 | void (*handler)(struct ccw_device *, unsigned long, struct irb *); | ||
411 | struct subchannel *sch; | ||
412 | struct ciw *ciw; | ||
413 | char *rcd_buf; | ||
414 | int ret; | ||
415 | struct ccw1 *rcd_ccw; | ||
416 | |||
417 | if (!cdev) | ||
418 | return -ENODEV; | ||
419 | if (!buffer || !length) | ||
420 | return -EINVAL; | ||
421 | sch = to_subchannel(cdev->dev.parent); | ||
422 | |||
423 | CIO_TRACE_EVENT (4, "rdconf"); | ||
424 | CIO_TRACE_EVENT (4, sch->dev.bus_id); | ||
425 | |||
426 | /* | ||
427 | * scan for RCD command in extended SenseID data | ||
428 | */ | ||
429 | ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD); | ||
430 | if (!ciw || ciw->cmd == 0) | ||
431 | return -EOPNOTSUPP; | ||
432 | |||
433 | rcd_ccw = kmalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA); | ||
434 | if (!rcd_ccw) | ||
435 | return -ENOMEM; | ||
436 | memset(rcd_ccw, 0, sizeof(struct ccw1)); | ||
437 | rcd_buf = kmalloc(ciw->count, GFP_KERNEL | GFP_DMA); | ||
438 | if (!rcd_buf) { | ||
439 | kfree(rcd_ccw); | ||
440 | return -ENOMEM; | ||
441 | } | ||
442 | memset (rcd_buf, 0, ciw->count); | ||
443 | rcd_ccw->cmd_code = ciw->cmd; | ||
444 | rcd_ccw->cda = (__u32) __pa (rcd_buf); | ||
445 | rcd_ccw->count = ciw->count; | ||
446 | rcd_ccw->flags = CCW_FLAG_SLI; | ||
447 | |||
448 | spin_lock_irq(&sch->lock); | ||
449 | /* Save interrupt handler. */ | ||
450 | handler = cdev->handler; | ||
451 | /* Temporarily install own handler. */ | ||
452 | cdev->handler = ccw_device_wake_up; | ||
453 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
454 | ret = -ENODEV; | ||
455 | else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | ||
456 | !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) || | ||
457 | cdev->private->flags.doverify) | ||
458 | ret = -EBUSY; | ||
459 | else | ||
460 | /* 0x00D9C3C4 == ebcdic "RCD" */ | ||
461 | ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm); | ||
462 | |||
463 | /* Restore interrupt handler. */ | ||
464 | cdev->handler = handler; | ||
465 | spin_unlock_irq(&sch->lock); | ||
466 | |||
467 | /* | ||
468 | * on success we update the user input parms | ||
469 | */ | ||
470 | if (ret) { | ||
471 | kfree (rcd_buf); | ||
472 | *buffer = NULL; | ||
473 | *length = 0; | ||
474 | } else { | ||
475 | *length = ciw->count; | ||
476 | *buffer = rcd_buf; | ||
477 | } | ||
478 | kfree(rcd_ccw); | ||
479 | |||
480 | return ret; | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Read Configuration data | ||
485 | */ | ||
486 | int | ||
487 | read_conf_data (struct ccw_device *cdev, void **buffer, int *length) | ||
488 | { | ||
489 | return read_conf_data_lpm (cdev, buffer, length, 0); | ||
490 | } | ||
491 | |||
492 | /* | ||
493 | * Try to break the lock on a boxed device. | ||
494 | */ | ||
495 | int | ||
496 | ccw_device_stlck(struct ccw_device *cdev) | ||
497 | { | ||
498 | void *buf, *buf2; | ||
499 | unsigned long flags; | ||
500 | struct subchannel *sch; | ||
501 | int ret; | ||
502 | |||
503 | if (!cdev) | ||
504 | return -ENODEV; | ||
505 | |||
506 | if (cdev->drv && !cdev->private->options.force) | ||
507 | return -EINVAL; | ||
508 | |||
509 | sch = to_subchannel(cdev->dev.parent); | ||
510 | |||
511 | CIO_TRACE_EVENT(2, "stl lock"); | ||
512 | CIO_TRACE_EVENT(2, cdev->dev.bus_id); | ||
513 | |||
514 | buf = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); | ||
515 | if (!buf) | ||
516 | return -ENOMEM; | ||
517 | buf2 = kmalloc(32*sizeof(char), GFP_DMA|GFP_KERNEL); | ||
518 | if (!buf2) { | ||
519 | kfree(buf); | ||
520 | return -ENOMEM; | ||
521 | } | ||
522 | spin_lock_irqsave(&sch->lock, flags); | ||
523 | ret = cio_enable_subchannel(sch, 3); | ||
524 | if (ret) | ||
525 | goto out_unlock; | ||
526 | /* | ||
527 | * Setup ccw. We chain an unconditional reserve and a release so we | ||
528 | * only break the lock. | ||
529 | */ | ||
530 | cdev->private->iccws[0].cmd_code = CCW_CMD_STLCK; | ||
531 | cdev->private->iccws[0].cda = (__u32) __pa(buf); | ||
532 | cdev->private->iccws[0].count = 32; | ||
533 | cdev->private->iccws[0].flags = CCW_FLAG_CC; | ||
534 | cdev->private->iccws[1].cmd_code = CCW_CMD_RELEASE; | ||
535 | cdev->private->iccws[1].cda = (__u32) __pa(buf2); | ||
536 | cdev->private->iccws[1].count = 32; | ||
537 | cdev->private->iccws[1].flags = 0; | ||
538 | ret = cio_start(sch, cdev->private->iccws, 0); | ||
539 | if (ret) { | ||
540 | cio_disable_subchannel(sch); //FIXME: return code? | ||
541 | goto out_unlock; | ||
542 | } | ||
543 | cdev->private->irb.scsw.actl |= SCSW_ACTL_START_PEND; | ||
544 | spin_unlock_irqrestore(&sch->lock, flags); | ||
545 | wait_event(cdev->private->wait_q, cdev->private->irb.scsw.actl == 0); | ||
546 | spin_lock_irqsave(&sch->lock, flags); | ||
547 | cio_disable_subchannel(sch); //FIXME: return code? | ||
548 | if ((cdev->private->irb.scsw.dstat != | ||
549 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || | ||
550 | (cdev->private->irb.scsw.cstat != 0)) | ||
551 | ret = -EIO; | ||
552 | /* Clear irb. */ | ||
553 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
554 | out_unlock: | ||
555 | if (buf) | ||
556 | kfree(buf); | ||
557 | if (buf2) | ||
558 | kfree(buf2); | ||
559 | spin_unlock_irqrestore(&sch->lock, flags); | ||
560 | return ret; | ||
561 | } | ||
562 | |||
563 | void * | ||
564 | ccw_device_get_chp_desc(struct ccw_device *cdev, int chp_no) | ||
565 | { | ||
566 | struct subchannel *sch; | ||
567 | |||
568 | sch = to_subchannel(cdev->dev.parent); | ||
569 | return chsc_get_chp_desc(sch, chp_no); | ||
570 | } | ||
571 | |||
572 | // FIXME: these have to go: | ||
573 | |||
574 | int | ||
575 | _ccw_device_get_subchannel_number(struct ccw_device *cdev) | ||
576 | { | ||
577 | return cdev->private->irq; | ||
578 | } | ||
579 | |||
580 | int | ||
581 | _ccw_device_get_device_number(struct ccw_device *cdev) | ||
582 | { | ||
583 | return cdev->private->devno; | ||
584 | } | ||
585 | |||
586 | |||
587 | MODULE_LICENSE("GPL"); | ||
588 | EXPORT_SYMBOL(ccw_device_set_options); | ||
589 | EXPORT_SYMBOL(ccw_device_clear); | ||
590 | EXPORT_SYMBOL(ccw_device_halt); | ||
591 | EXPORT_SYMBOL(ccw_device_resume); | ||
592 | EXPORT_SYMBOL(ccw_device_start_timeout); | ||
593 | EXPORT_SYMBOL(ccw_device_start); | ||
594 | EXPORT_SYMBOL(ccw_device_start_timeout_key); | ||
595 | EXPORT_SYMBOL(ccw_device_start_key); | ||
596 | EXPORT_SYMBOL(ccw_device_get_ciw); | ||
597 | EXPORT_SYMBOL(ccw_device_get_path_mask); | ||
598 | EXPORT_SYMBOL(read_conf_data); | ||
599 | EXPORT_SYMBOL(read_dev_chars); | ||
600 | EXPORT_SYMBOL(_ccw_device_get_subchannel_number); | ||
601 | EXPORT_SYMBOL(_ccw_device_get_device_number); | ||
602 | EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); | ||
603 | EXPORT_SYMBOL_GPL(read_conf_data_lpm); | ||
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c new file mode 100644 index 000000000000..0adac8a67331 --- /dev/null +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -0,0 +1,448 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/device_pgid.c | ||
3 | * | ||
4 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
5 | * IBM Corporation | ||
6 | * Author(s): Cornelia Huck(cohuck@de.ibm.com) | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * | ||
9 | * Path Group ID functions. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/ccwdev.h> | ||
17 | #include <asm/cio.h> | ||
18 | #include <asm/delay.h> | ||
19 | #include <asm/lowcore.h> | ||
20 | |||
21 | #include "cio.h" | ||
22 | #include "cio_debug.h" | ||
23 | #include "css.h" | ||
24 | #include "device.h" | ||
25 | |||
26 | /* | ||
27 | * Start Sense Path Group ID helper function. Used in ccw_device_recog | ||
28 | * and ccw_device_sense_pgid. | ||
29 | */ | ||
30 | static int | ||
31 | __ccw_device_sense_pgid_start(struct ccw_device *cdev) | ||
32 | { | ||
33 | struct subchannel *sch; | ||
34 | struct ccw1 *ccw; | ||
35 | int ret; | ||
36 | |||
37 | sch = to_subchannel(cdev->dev.parent); | ||
38 | /* Setup sense path group id channel program. */ | ||
39 | ccw = cdev->private->iccws; | ||
40 | ccw->cmd_code = CCW_CMD_SENSE_PGID; | ||
41 | ccw->cda = (__u32) __pa (&cdev->private->pgid); | ||
42 | ccw->count = sizeof (struct pgid); | ||
43 | ccw->flags = CCW_FLAG_SLI; | ||
44 | |||
45 | /* Reset device status. */ | ||
46 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
47 | /* Try on every path. */ | ||
48 | ret = -ENODEV; | ||
49 | while (cdev->private->imask != 0) { | ||
50 | /* Try every path multiple times. */ | ||
51 | if (cdev->private->iretry > 0) { | ||
52 | cdev->private->iretry--; | ||
53 | ret = cio_start (sch, cdev->private->iccws, | ||
54 | cdev->private->imask); | ||
55 | /* ret is 0, -EBUSY, -EACCES or -ENODEV */ | ||
56 | if (ret != -EACCES) | ||
57 | return ret; | ||
58 | CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " | ||
59 | "%04x, lpm %02X, became 'not " | ||
60 | "operational'\n", | ||
61 | cdev->private->devno, sch->irq, | ||
62 | cdev->private->imask); | ||
63 | |||
64 | } | ||
65 | cdev->private->imask >>= 1; | ||
66 | cdev->private->iretry = 5; | ||
67 | } | ||
68 | return ret; | ||
69 | } | ||
70 | |||
71 | void | ||
72 | ccw_device_sense_pgid_start(struct ccw_device *cdev) | ||
73 | { | ||
74 | int ret; | ||
75 | |||
76 | cdev->private->state = DEV_STATE_SENSE_PGID; | ||
77 | cdev->private->imask = 0x80; | ||
78 | cdev->private->iretry = 5; | ||
79 | memset (&cdev->private->pgid, 0, sizeof (struct pgid)); | ||
80 | ret = __ccw_device_sense_pgid_start(cdev); | ||
81 | if (ret && ret != -EBUSY) | ||
82 | ccw_device_sense_pgid_done(cdev, ret); | ||
83 | } | ||
84 | |||
85 | /* | ||
86 | * Called from interrupt context to check if a valid answer | ||
87 | * to Sense Path Group ID was received. | ||
88 | */ | ||
89 | static int | ||
90 | __ccw_device_check_sense_pgid(struct ccw_device *cdev) | ||
91 | { | ||
92 | struct subchannel *sch; | ||
93 | struct irb *irb; | ||
94 | |||
95 | sch = to_subchannel(cdev->dev.parent); | ||
96 | irb = &cdev->private->irb; | ||
97 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) | ||
98 | return -ETIME; | ||
99 | if (irb->esw.esw0.erw.cons && | ||
100 | (irb->ecw[0]&(SNS0_CMD_REJECT|SNS0_INTERVENTION_REQ))) { | ||
101 | /* | ||
102 | * If the device doesn't support the Sense Path Group ID | ||
103 | * command further retries wouldn't help ... | ||
104 | */ | ||
105 | return -EOPNOTSUPP; | ||
106 | } | ||
107 | if (irb->esw.esw0.erw.cons) { | ||
108 | CIO_MSG_EVENT(2, "SNID - device %04x, unit check, " | ||
109 | "lpum %02X, cnt %02d, sns : " | ||
110 | "%02X%02X%02X%02X %02X%02X%02X%02X ...\n", | ||
111 | cdev->private->devno, | ||
112 | irb->esw.esw0.sublog.lpum, | ||
113 | irb->esw.esw0.erw.scnt, | ||
114 | irb->ecw[0], irb->ecw[1], | ||
115 | irb->ecw[2], irb->ecw[3], | ||
116 | irb->ecw[4], irb->ecw[5], | ||
117 | irb->ecw[6], irb->ecw[7]); | ||
118 | return -EAGAIN; | ||
119 | } | ||
120 | if (irb->scsw.cc == 3) { | ||
121 | CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel " | ||
122 | "%04x, lpm %02X, became 'not operational'\n", | ||
123 | cdev->private->devno, sch->irq, sch->orb.lpm); | ||
124 | return -EACCES; | ||
125 | } | ||
126 | if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { | ||
127 | CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel %04x " | ||
128 | "is reserved by someone else\n", | ||
129 | cdev->private->devno, sch->irq); | ||
130 | return -EUSERS; | ||
131 | } | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * Got interrupt for Sense Path Group ID. | ||
137 | */ | ||
138 | void | ||
139 | ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
140 | { | ||
141 | struct subchannel *sch; | ||
142 | struct irb *irb; | ||
143 | int ret; | ||
144 | |||
145 | irb = (struct irb *) __LC_IRB; | ||
146 | /* Retry sense pgid for cc=1. */ | ||
147 | if (irb->scsw.stctl == | ||
148 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | ||
149 | if (irb->scsw.cc == 1) { | ||
150 | ret = __ccw_device_sense_pgid_start(cdev); | ||
151 | if (ret && ret != -EBUSY) | ||
152 | ccw_device_sense_pgid_done(cdev, ret); | ||
153 | } | ||
154 | return; | ||
155 | } | ||
156 | if (ccw_device_accumulate_and_sense(cdev, irb) != 0) | ||
157 | return; | ||
158 | sch = to_subchannel(cdev->dev.parent); | ||
159 | ret = __ccw_device_check_sense_pgid(cdev); | ||
160 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
161 | switch (ret) { | ||
162 | /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ | ||
163 | case 0: /* Sense Path Group ID successful. */ | ||
164 | if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) | ||
165 | memcpy(&cdev->private->pgid, &global_pgid, | ||
166 | sizeof(struct pgid)); | ||
167 | ccw_device_sense_pgid_done(cdev, 0); | ||
168 | break; | ||
169 | case -EOPNOTSUPP: /* Sense Path Group ID not supported */ | ||
170 | ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP); | ||
171 | break; | ||
172 | case -ETIME: /* Sense path group id stopped by timeout. */ | ||
173 | ccw_device_sense_pgid_done(cdev, -ETIME); | ||
174 | break; | ||
175 | case -EACCES: /* channel is not operational. */ | ||
176 | sch->lpm &= ~cdev->private->imask; | ||
177 | cdev->private->imask >>= 1; | ||
178 | cdev->private->iretry = 5; | ||
179 | /* Fall through. */ | ||
180 | case -EAGAIN: /* Try again. */ | ||
181 | ret = __ccw_device_sense_pgid_start(cdev); | ||
182 | if (ret != 0 && ret != -EBUSY) | ||
183 | ccw_device_sense_pgid_done(cdev, -ENODEV); | ||
184 | break; | ||
185 | case -EUSERS: /* device is reserved for someone else. */ | ||
186 | ccw_device_sense_pgid_done(cdev, -EUSERS); | ||
187 | break; | ||
188 | } | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Path Group ID helper function. | ||
193 | */ | ||
194 | static int | ||
195 | __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) | ||
196 | { | ||
197 | struct subchannel *sch; | ||
198 | struct ccw1 *ccw; | ||
199 | int ret; | ||
200 | |||
201 | sch = to_subchannel(cdev->dev.parent); | ||
202 | |||
203 | /* Setup sense path group id channel program. */ | ||
204 | cdev->private->pgid.inf.fc = func; | ||
205 | ccw = cdev->private->iccws; | ||
206 | if (!cdev->private->flags.pgid_single) { | ||
207 | cdev->private->pgid.inf.fc |= SPID_FUNC_MULTI_PATH; | ||
208 | ccw->cmd_code = CCW_CMD_SUSPEND_RECONN; | ||
209 | ccw->cda = 0; | ||
210 | ccw->count = 0; | ||
211 | ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC; | ||
212 | ccw++; | ||
213 | } else | ||
214 | cdev->private->pgid.inf.fc |= SPID_FUNC_SINGLE_PATH; | ||
215 | |||
216 | ccw->cmd_code = CCW_CMD_SET_PGID; | ||
217 | ccw->cda = (__u32) __pa (&cdev->private->pgid); | ||
218 | ccw->count = sizeof (struct pgid); | ||
219 | ccw->flags = CCW_FLAG_SLI; | ||
220 | |||
221 | /* Reset device status. */ | ||
222 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
223 | |||
224 | /* Try multiple times. */ | ||
225 | ret = -ENODEV; | ||
226 | if (cdev->private->iretry > 0) { | ||
227 | cdev->private->iretry--; | ||
228 | ret = cio_start (sch, cdev->private->iccws, | ||
229 | cdev->private->imask); | ||
230 | /* ret is 0, -EBUSY, -EACCES or -ENODEV */ | ||
231 | if ((ret != -EACCES) && (ret != -ENODEV)) | ||
232 | return ret; | ||
233 | } | ||
234 | /* PGID command failed on this path. Switch it off. */ | ||
235 | sch->lpm &= ~cdev->private->imask; | ||
236 | sch->vpm &= ~cdev->private->imask; | ||
237 | CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " | ||
238 | "%04x, lpm %02X, became 'not operational'\n", | ||
239 | cdev->private->devno, sch->irq, cdev->private->imask); | ||
240 | return ret; | ||
241 | } | ||
242 | |||
243 | /* | ||
244 | * Called from interrupt context to check if a valid answer | ||
245 | * to Set Path Group ID was received. | ||
246 | */ | ||
247 | static int | ||
248 | __ccw_device_check_pgid(struct ccw_device *cdev) | ||
249 | { | ||
250 | struct subchannel *sch; | ||
251 | struct irb *irb; | ||
252 | |||
253 | sch = to_subchannel(cdev->dev.parent); | ||
254 | irb = &cdev->private->irb; | ||
255 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) | ||
256 | return -ETIME; | ||
257 | if (irb->esw.esw0.erw.cons) { | ||
258 | if (irb->ecw[0] & SNS0_CMD_REJECT) | ||
259 | return -EOPNOTSUPP; | ||
260 | /* Hmm, whatever happened, try again. */ | ||
261 | CIO_MSG_EVENT(2, "SPID - device %04x, unit check, cnt %02d, " | ||
262 | "sns : %02X%02X%02X%02X %02X%02X%02X%02X ...\n", | ||
263 | cdev->private->devno, irb->esw.esw0.erw.scnt, | ||
264 | irb->ecw[0], irb->ecw[1], | ||
265 | irb->ecw[2], irb->ecw[3], | ||
266 | irb->ecw[4], irb->ecw[5], | ||
267 | irb->ecw[6], irb->ecw[7]); | ||
268 | return -EAGAIN; | ||
269 | } | ||
270 | if (irb->scsw.cc == 3) { | ||
271 | CIO_MSG_EVENT(2, "SPID - Device %04x on Subchannel " | ||
272 | "%04x, lpm %02X, became 'not operational'\n", | ||
273 | cdev->private->devno, sch->irq, | ||
274 | cdev->private->imask); | ||
275 | return -EACCES; | ||
276 | } | ||
277 | return 0; | ||
278 | } | ||
279 | |||
280 | static void | ||
281 | __ccw_device_verify_start(struct ccw_device *cdev) | ||
282 | { | ||
283 | struct subchannel *sch; | ||
284 | __u8 imask, func; | ||
285 | int ret; | ||
286 | |||
287 | sch = to_subchannel(cdev->dev.parent); | ||
288 | while (sch->vpm != sch->lpm) { | ||
289 | /* Find first unequal bit in vpm vs. lpm */ | ||
290 | for (imask = 0x80; imask != 0; imask >>= 1) | ||
291 | if ((sch->vpm & imask) != (sch->lpm & imask)) | ||
292 | break; | ||
293 | cdev->private->imask = imask; | ||
294 | func = (sch->vpm & imask) ? | ||
295 | SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; | ||
296 | ret = __ccw_device_do_pgid(cdev, func); | ||
297 | if (ret == 0 || ret == -EBUSY) | ||
298 | return; | ||
299 | cdev->private->iretry = 5; | ||
300 | } | ||
301 | ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); | ||
302 | } | ||
303 | |||
304 | /* | ||
305 | * Got interrupt for Set Path Group ID. | ||
306 | */ | ||
307 | void | ||
308 | ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
309 | { | ||
310 | struct subchannel *sch; | ||
311 | struct irb *irb; | ||
312 | int ret; | ||
313 | |||
314 | irb = (struct irb *) __LC_IRB; | ||
315 | /* Retry set pgid for cc=1. */ | ||
316 | if (irb->scsw.stctl == | ||
317 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | ||
318 | if (irb->scsw.cc == 1) | ||
319 | __ccw_device_verify_start(cdev); | ||
320 | return; | ||
321 | } | ||
322 | if (ccw_device_accumulate_and_sense(cdev, irb) != 0) | ||
323 | return; | ||
324 | sch = to_subchannel(cdev->dev.parent); | ||
325 | ret = __ccw_device_check_pgid(cdev); | ||
326 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
327 | switch (ret) { | ||
328 | /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ | ||
329 | case 0: | ||
330 | /* Establish or Resign Path Group done. Update vpm. */ | ||
331 | if ((sch->lpm & cdev->private->imask) != 0) | ||
332 | sch->vpm |= cdev->private->imask; | ||
333 | else | ||
334 | sch->vpm &= ~cdev->private->imask; | ||
335 | cdev->private->iretry = 5; | ||
336 | __ccw_device_verify_start(cdev); | ||
337 | break; | ||
338 | case -EOPNOTSUPP: | ||
339 | /* | ||
340 | * One of those strange devices which claim to be able | ||
341 | * to do multipathing but not for Set Path Group ID. | ||
342 | */ | ||
343 | if (cdev->private->flags.pgid_single) { | ||
344 | ccw_device_verify_done(cdev, -EOPNOTSUPP); | ||
345 | break; | ||
346 | } | ||
347 | cdev->private->flags.pgid_single = 1; | ||
348 | /* fall through. */ | ||
349 | case -EAGAIN: /* Try again. */ | ||
350 | __ccw_device_verify_start(cdev); | ||
351 | break; | ||
352 | case -ETIME: /* Set path group id stopped by timeout. */ | ||
353 | ccw_device_verify_done(cdev, -ETIME); | ||
354 | break; | ||
355 | case -EACCES: /* channel is not operational. */ | ||
356 | sch->lpm &= ~cdev->private->imask; | ||
357 | sch->vpm &= ~cdev->private->imask; | ||
358 | cdev->private->iretry = 5; | ||
359 | __ccw_device_verify_start(cdev); | ||
360 | break; | ||
361 | } | ||
362 | } | ||
363 | |||
364 | void | ||
365 | ccw_device_verify_start(struct ccw_device *cdev) | ||
366 | { | ||
367 | cdev->private->flags.pgid_single = 0; | ||
368 | cdev->private->iretry = 5; | ||
369 | __ccw_device_verify_start(cdev); | ||
370 | } | ||
371 | |||
372 | static void | ||
373 | __ccw_device_disband_start(struct ccw_device *cdev) | ||
374 | { | ||
375 | struct subchannel *sch; | ||
376 | int ret; | ||
377 | |||
378 | sch = to_subchannel(cdev->dev.parent); | ||
379 | while (cdev->private->imask != 0) { | ||
380 | if (sch->lpm & cdev->private->imask) { | ||
381 | ret = __ccw_device_do_pgid(cdev, SPID_FUNC_DISBAND); | ||
382 | if (ret == 0) | ||
383 | return; | ||
384 | } | ||
385 | cdev->private->iretry = 5; | ||
386 | cdev->private->imask >>= 1; | ||
387 | } | ||
388 | ccw_device_verify_done(cdev, (sch->lpm != 0) ? 0 : -ENODEV); | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * Got interrupt for Unset Path Group ID. | ||
393 | */ | ||
394 | void | ||
395 | ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) | ||
396 | { | ||
397 | struct subchannel *sch; | ||
398 | struct irb *irb; | ||
399 | int ret; | ||
400 | |||
401 | irb = (struct irb *) __LC_IRB; | ||
402 | /* Retry set pgid for cc=1. */ | ||
403 | if (irb->scsw.stctl == | ||
404 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | ||
405 | if (irb->scsw.cc == 1) | ||
406 | __ccw_device_disband_start(cdev); | ||
407 | return; | ||
408 | } | ||
409 | if (ccw_device_accumulate_and_sense(cdev, irb) != 0) | ||
410 | return; | ||
411 | sch = to_subchannel(cdev->dev.parent); | ||
412 | ret = __ccw_device_check_pgid(cdev); | ||
413 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
414 | switch (ret) { | ||
415 | /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ | ||
416 | case 0: /* disband successful. */ | ||
417 | sch->vpm = 0; | ||
418 | ccw_device_disband_done(cdev, ret); | ||
419 | break; | ||
420 | case -EOPNOTSUPP: | ||
421 | /* | ||
422 | * One of those strange devices which claim to be able | ||
423 | * to do multipathing but not for Unset Path Group ID. | ||
424 | */ | ||
425 | cdev->private->flags.pgid_single = 1; | ||
426 | /* fall through. */ | ||
427 | case -EAGAIN: /* Try again. */ | ||
428 | __ccw_device_disband_start(cdev); | ||
429 | break; | ||
430 | case -ETIME: /* Set path group id stopped by timeout. */ | ||
431 | ccw_device_disband_done(cdev, -ETIME); | ||
432 | break; | ||
433 | case -EACCES: /* channel is not operational. */ | ||
434 | cdev->private->imask >>= 1; | ||
435 | cdev->private->iretry = 5; | ||
436 | __ccw_device_disband_start(cdev); | ||
437 | break; | ||
438 | } | ||
439 | } | ||
440 | |||
441 | void | ||
442 | ccw_device_disband_start(struct ccw_device *cdev) | ||
443 | { | ||
444 | cdev->private->flags.pgid_single = 0; | ||
445 | cdev->private->iretry = 5; | ||
446 | cdev->private->imask = 0x80; | ||
447 | __ccw_device_disband_start(cdev); | ||
448 | } | ||
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c new file mode 100644 index 000000000000..4ab2e0d95009 --- /dev/null +++ b/drivers/s390/cio/device_status.c | |||
@@ -0,0 +1,385 @@ | |||
1 | /* | ||
2 | * drivers/s390/cio/device_status.c | ||
3 | * | ||
4 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | ||
5 | * IBM Corporation | ||
6 | * Author(s): Cornelia Huck(cohuck@de.ibm.com) | ||
7 | * Martin Schwidefsky (schwidefsky@de.ibm.com) | ||
8 | * | ||
9 | * Status accumulation and basic sense functions. | ||
10 | */ | ||
11 | |||
12 | #include <linux/config.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | |||
16 | #include <asm/ccwdev.h> | ||
17 | #include <asm/cio.h> | ||
18 | |||
19 | #include "cio.h" | ||
20 | #include "cio_debug.h" | ||
21 | #include "css.h" | ||
22 | #include "device.h" | ||
23 | #include "ioasm.h" | ||
24 | |||
25 | /* | ||
26 | * Check for any kind of channel or interface control check but don't | ||
27 | * issue the message for the console device | ||
28 | */ | ||
29 | static inline void | ||
30 | ccw_device_msg_control_check(struct ccw_device *cdev, struct irb *irb) | ||
31 | { | ||
32 | if (!(irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | ||
33 | SCHN_STAT_CHN_CTRL_CHK | | ||
34 | SCHN_STAT_INTF_CTRL_CHK))) | ||
35 | return; | ||
36 | |||
37 | CIO_MSG_EVENT(0, "Channel-Check or Interface-Control-Check " | ||
38 | "received" | ||
39 | " ... device %04X on subchannel %04X, dev_stat " | ||
40 | ": %02X sch_stat : %02X\n", | ||
41 | cdev->private->devno, cdev->private->irq, | ||
42 | cdev->private->irb.scsw.dstat, | ||
43 | cdev->private->irb.scsw.cstat); | ||
44 | |||
45 | if (irb->scsw.cc != 3) { | ||
46 | char dbf_text[15]; | ||
47 | |||
48 | sprintf(dbf_text, "chk%x", cdev->private->irq); | ||
49 | CIO_TRACE_EVENT(0, dbf_text); | ||
50 | CIO_HEX_EVENT(0, &cdev->private->irb, sizeof (struct irb)); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* | ||
55 | * Some paths became not operational (pno bit in scsw is set). | ||
56 | */ | ||
57 | static void | ||
58 | ccw_device_path_notoper(struct ccw_device *cdev) | ||
59 | { | ||
60 | struct subchannel *sch; | ||
61 | |||
62 | sch = to_subchannel(cdev->dev.parent); | ||
63 | stsch (sch->irq, &sch->schib); | ||
64 | |||
65 | CIO_MSG_EVENT(0, "%s(%04x) - path(s) %02x are " | ||
66 | "not operational \n", __FUNCTION__, sch->irq, | ||
67 | sch->schib.pmcw.pnom); | ||
68 | |||
69 | sch->lpm &= ~sch->schib.pmcw.pnom; | ||
70 | if (cdev->private->options.pgroup) | ||
71 | cdev->private->flags.doverify = 1; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Copy valid bits from the extended control word to device irb. | ||
76 | */ | ||
77 | static inline void | ||
78 | ccw_device_accumulate_ecw(struct ccw_device *cdev, struct irb *irb) | ||
79 | { | ||
80 | /* | ||
81 | * Copy extended control bit if it is valid... yes there | ||
82 | * are condition that have to be met for the extended control | ||
83 | * bit to have meaning. Sick. | ||
84 | */ | ||
85 | cdev->private->irb.scsw.ectl = 0; | ||
86 | if ((irb->scsw.stctl & SCSW_STCTL_ALERT_STATUS) && | ||
87 | !(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS)) | ||
88 | cdev->private->irb.scsw.ectl = irb->scsw.ectl; | ||
89 | /* Check if extended control word is valid. */ | ||
90 | if (!cdev->private->irb.scsw.ectl) | ||
91 | return; | ||
92 | /* Copy concurrent sense / model dependent information. */ | ||
93 | memcpy (&cdev->private->irb.ecw, irb->ecw, sizeof (irb->ecw)); | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * Check if extended status word is valid. | ||
98 | */ | ||
99 | static inline int | ||
100 | ccw_device_accumulate_esw_valid(struct irb *irb) | ||
101 | { | ||
102 | if (!irb->scsw.eswf && irb->scsw.stctl == SCSW_STCTL_STATUS_PEND) | ||
103 | return 0; | ||
104 | if (irb->scsw.stctl == | ||
105 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND) && | ||
106 | !(irb->scsw.actl & SCSW_ACTL_SUSPENDED)) | ||
107 | return 0; | ||
108 | return 1; | ||
109 | } | ||
110 | |||
111 | /* | ||
112 | * Copy valid bits from the extended status word to device irb. | ||
113 | */ | ||
114 | static inline void | ||
115 | ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | ||
116 | { | ||
117 | struct irb *cdev_irb; | ||
118 | struct sublog *cdev_sublog, *sublog; | ||
119 | |||
120 | if (!ccw_device_accumulate_esw_valid(irb)) | ||
121 | return; | ||
122 | |||
123 | cdev_irb = &cdev->private->irb; | ||
124 | |||
125 | /* Copy last path used mask. */ | ||
126 | cdev_irb->esw.esw1.lpum = irb->esw.esw1.lpum; | ||
127 | |||
128 | /* Copy subchannel logout information if esw is of format 0. */ | ||
129 | if (irb->scsw.eswf) { | ||
130 | cdev_sublog = &cdev_irb->esw.esw0.sublog; | ||
131 | sublog = &irb->esw.esw0.sublog; | ||
132 | /* Copy extended status flags. */ | ||
133 | cdev_sublog->esf = sublog->esf; | ||
134 | /* | ||
135 | * Copy fields that have a meaning for channel data check | ||
136 | * channel control check and interface control check. | ||
137 | */ | ||
138 | if (irb->scsw.cstat & (SCHN_STAT_CHN_DATA_CHK | | ||
139 | SCHN_STAT_CHN_CTRL_CHK | | ||
140 | SCHN_STAT_INTF_CTRL_CHK)) { | ||
141 | /* Copy ancillary report bit. */ | ||
142 | cdev_sublog->arep = sublog->arep; | ||
143 | /* Copy field-validity-flags. */ | ||
144 | cdev_sublog->fvf = sublog->fvf; | ||
145 | /* Copy storage access code. */ | ||
146 | cdev_sublog->sacc = sublog->sacc; | ||
147 | /* Copy termination code. */ | ||
148 | cdev_sublog->termc = sublog->termc; | ||
149 | /* Copy sequence code. */ | ||
150 | cdev_sublog->seqc = sublog->seqc; | ||
151 | } | ||
152 | /* Copy device status check. */ | ||
153 | cdev_sublog->devsc = sublog->devsc; | ||
154 | /* Copy secondary error. */ | ||
155 | cdev_sublog->serr = sublog->serr; | ||
156 | /* Copy i/o-error alert. */ | ||
157 | cdev_sublog->ioerr = sublog->ioerr; | ||
158 | /* Copy channel path timeout bit. */ | ||
159 | if (irb->scsw.cstat & SCHN_STAT_INTF_CTRL_CHK) | ||
160 | cdev_irb->esw.esw0.erw.cpt = irb->esw.esw0.erw.cpt; | ||
161 | /* Copy failing storage address validity flag. */ | ||
162 | cdev_irb->esw.esw0.erw.fsavf = irb->esw.esw0.erw.fsavf; | ||
163 | if (cdev_irb->esw.esw0.erw.fsavf) { | ||
164 | /* ... and copy the failing storage address. */ | ||
165 | memcpy(cdev_irb->esw.esw0.faddr, irb->esw.esw0.faddr, | ||
166 | sizeof (irb->esw.esw0.faddr)); | ||
167 | /* ... and copy the failing storage address format. */ | ||
168 | cdev_irb->esw.esw0.erw.fsaf = irb->esw.esw0.erw.fsaf; | ||
169 | } | ||
170 | /* Copy secondary ccw address validity bit. */ | ||
171 | cdev_irb->esw.esw0.erw.scavf = irb->esw.esw0.erw.scavf; | ||
172 | if (irb->esw.esw0.erw.scavf) | ||
173 | /* ... and copy the secondary ccw address. */ | ||
174 | cdev_irb->esw.esw0.saddr = irb->esw.esw0.saddr; | ||
175 | |||
176 | } | ||
177 | /* FIXME: DCTI for format 2? */ | ||
178 | |||
179 | /* Copy authorization bit. */ | ||
180 | cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; | ||
181 | /* Copy path verification required flag. */ | ||
182 | cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; | ||
183 | if (irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) | ||
184 | cdev->private->flags.doverify = 1; | ||
185 | /* Copy concurrent sense bit. */ | ||
186 | cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; | ||
187 | if (irb->esw.esw0.erw.cons) | ||
188 | cdev_irb->esw.esw0.erw.scnt = irb->esw.esw0.erw.scnt; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * Accumulate status from irb to devstat. | ||
193 | */ | ||
194 | void | ||
195 | ccw_device_accumulate_irb(struct ccw_device *cdev, struct irb *irb) | ||
196 | { | ||
197 | struct irb *cdev_irb; | ||
198 | |||
199 | /* | ||
200 | * Check if the status pending bit is set in stctl. | ||
201 | * If not, the remaining bit have no meaning and we must ignore them. | ||
202 | * The esw is not meaningful as well... | ||
203 | */ | ||
204 | if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) | ||
205 | return; | ||
206 | |||
207 | /* Check for channel checks and interface control checks. */ | ||
208 | ccw_device_msg_control_check(cdev, irb); | ||
209 | |||
210 | /* Check for path not operational. */ | ||
211 | if (irb->scsw.pno && irb->scsw.fctl != 0 && | ||
212 | (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || | ||
213 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) | ||
214 | ccw_device_path_notoper(cdev); | ||
215 | |||
216 | /* | ||
217 | * Don't accumulate unsolicited interrupts. | ||
218 | */ | ||
219 | if ((irb->scsw.stctl == | ||
220 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) && | ||
221 | (!irb->scsw.cc)) | ||
222 | return; | ||
223 | |||
224 | cdev_irb = &cdev->private->irb; | ||
225 | |||
226 | /* Copy bits which are valid only for the start function. */ | ||
227 | if (irb->scsw.fctl & SCSW_FCTL_START_FUNC) { | ||
228 | /* Copy key. */ | ||
229 | cdev_irb->scsw.key = irb->scsw.key; | ||
230 | /* Copy suspend control bit. */ | ||
231 | cdev_irb->scsw.sctl = irb->scsw.sctl; | ||
232 | /* Accumulate deferred condition code. */ | ||
233 | cdev_irb->scsw.cc |= irb->scsw.cc; | ||
234 | /* Copy ccw format bit. */ | ||
235 | cdev_irb->scsw.fmt = irb->scsw.fmt; | ||
236 | /* Copy prefetch bit. */ | ||
237 | cdev_irb->scsw.pfch = irb->scsw.pfch; | ||
238 | /* Copy initial-status-interruption-control. */ | ||
239 | cdev_irb->scsw.isic = irb->scsw.isic; | ||
240 | /* Copy address limit checking control. */ | ||
241 | cdev_irb->scsw.alcc = irb->scsw.alcc; | ||
242 | /* Copy suppress suspend bit. */ | ||
243 | cdev_irb->scsw.ssi = irb->scsw.ssi; | ||
244 | } | ||
245 | |||
246 | /* Take care of the extended control bit and extended control word. */ | ||
247 | ccw_device_accumulate_ecw(cdev, irb); | ||
248 | |||
249 | /* Accumulate function control. */ | ||
250 | cdev_irb->scsw.fctl |= irb->scsw.fctl; | ||
251 | /* Copy activity control. */ | ||
252 | cdev_irb->scsw.actl= irb->scsw.actl; | ||
253 | /* Accumulate status control. */ | ||
254 | cdev_irb->scsw.stctl |= irb->scsw.stctl; | ||
255 | /* | ||
256 | * Copy ccw address if it is valid. This is a bit simplified | ||
257 | * but should be close enough for all practical purposes. | ||
258 | */ | ||
259 | if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) || | ||
260 | ((irb->scsw.stctl == | ||
261 | (SCSW_STCTL_INTER_STATUS|SCSW_STCTL_STATUS_PEND)) && | ||
262 | (irb->scsw.actl & SCSW_ACTL_DEVACT) && | ||
263 | (irb->scsw.actl & SCSW_ACTL_SCHACT)) || | ||
264 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED)) | ||
265 | cdev_irb->scsw.cpa = irb->scsw.cpa; | ||
266 | /* Accumulate device status, but not the device busy flag. */ | ||
267 | cdev_irb->scsw.dstat &= ~DEV_STAT_BUSY; | ||
268 | cdev_irb->scsw.dstat |= irb->scsw.dstat; | ||
269 | /* Accumulate subchannel status. */ | ||
270 | cdev_irb->scsw.cstat |= irb->scsw.cstat; | ||
271 | /* Copy residual count if it is valid. */ | ||
272 | if ((irb->scsw.stctl & SCSW_STCTL_PRIM_STATUS) && | ||
273 | (irb->scsw.cstat & ~(SCHN_STAT_PCI | SCHN_STAT_INCORR_LEN)) == 0) | ||
274 | cdev_irb->scsw.count = irb->scsw.count; | ||
275 | |||
276 | /* Take care of bits in the extended status word. */ | ||
277 | ccw_device_accumulate_esw(cdev, irb); | ||
278 | |||
279 | /* | ||
280 | * Check whether we must issue a SENSE CCW ourselves if there is no | ||
281 | * concurrent sense facility installed for the subchannel. | ||
282 | * No sense is required if no delayed sense is pending | ||
283 | * and we did not get a unit check without sense information. | ||
284 | * | ||
285 | * Note: We should check for ioinfo[irq]->flags.consns but VM | ||
286 | * violates the ESA/390 architecture and doesn't present an | ||
287 | * operand exception for virtual devices without concurrent | ||
288 | * sense facility available/supported when enabling the | ||
289 | * concurrent sense facility. | ||
290 | */ | ||
291 | if ((cdev_irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | ||
292 | !(cdev_irb->esw.esw0.erw.cons)) | ||
293 | cdev->private->flags.dosense = 1; | ||
294 | } | ||
295 | |||
296 | /* | ||
297 | * Do a basic sense. | ||
298 | */ | ||
299 | int | ||
300 | ccw_device_do_sense(struct ccw_device *cdev, struct irb *irb) | ||
301 | { | ||
302 | struct subchannel *sch; | ||
303 | |||
304 | sch = to_subchannel(cdev->dev.parent); | ||
305 | |||
306 | /* A sense is required, can we do it now ? */ | ||
307 | if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) | ||
308 | /* | ||
309 | * we received an Unit Check but we have no final | ||
310 | * status yet, therefore we must delay the SENSE | ||
311 | * processing. We must not report this intermediate | ||
312 | * status to the device interrupt handler. | ||
313 | */ | ||
314 | return -EBUSY; | ||
315 | |||
316 | /* | ||
317 | * We have ending status but no sense information. Do a basic sense. | ||
318 | */ | ||
319 | sch = to_subchannel(cdev->dev.parent); | ||
320 | sch->sense_ccw.cmd_code = CCW_CMD_BASIC_SENSE; | ||
321 | sch->sense_ccw.cda = (__u32) __pa(cdev->private->irb.ecw); | ||
322 | sch->sense_ccw.count = SENSE_MAX_COUNT; | ||
323 | sch->sense_ccw.flags = CCW_FLAG_SLI; | ||
324 | |||
325 | return cio_start (sch, &sch->sense_ccw, 0xff); | ||
326 | } | ||
327 | |||
328 | /* | ||
329 | * Add information from basic sense to devstat. | ||
330 | */ | ||
331 | void | ||
332 | ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) | ||
333 | { | ||
334 | /* | ||
335 | * Check if the status pending bit is set in stctl. | ||
336 | * If not, the remaining bit have no meaning and we must ignore them. | ||
337 | * The esw is not meaningful as well... | ||
338 | */ | ||
339 | if (!(irb->scsw.stctl & SCSW_STCTL_STATUS_PEND)) | ||
340 | return; | ||
341 | |||
342 | /* Check for channel checks and interface control checks. */ | ||
343 | ccw_device_msg_control_check(cdev, irb); | ||
344 | |||
345 | /* Check for path not operational. */ | ||
346 | if (irb->scsw.pno && irb->scsw.fctl != 0 && | ||
347 | (!(irb->scsw.stctl & SCSW_STCTL_INTER_STATUS) || | ||
348 | (irb->scsw.actl & SCSW_ACTL_SUSPENDED))) | ||
349 | ccw_device_path_notoper(cdev); | ||
350 | |||
351 | if (!(irb->scsw.dstat & DEV_STAT_UNIT_CHECK) && | ||
352 | (irb->scsw.dstat & DEV_STAT_CHN_END)) { | ||
353 | cdev->private->irb.esw.esw0.erw.cons = 1; | ||
354 | cdev->private->flags.dosense = 0; | ||
355 | } | ||
356 | /* Check if path verification is required. */ | ||
357 | if (ccw_device_accumulate_esw_valid(irb) && | ||
358 | irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) | ||
359 | cdev->private->flags.doverify = 1; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * This function accumulates the status into the private devstat and | ||
364 | * starts a basic sense if one is needed. | ||
365 | */ | ||
366 | int | ||
367 | ccw_device_accumulate_and_sense(struct ccw_device *cdev, struct irb *irb) | ||
368 | { | ||
369 | ccw_device_accumulate_irb(cdev, irb); | ||
370 | if ((irb->scsw.actl & (SCSW_ACTL_DEVACT | SCSW_ACTL_SCHACT)) != 0) | ||
371 | return -EBUSY; | ||
372 | /* Check for basic sense. */ | ||
373 | if (cdev->private->flags.dosense && | ||
374 | !(irb->scsw.dstat & DEV_STAT_UNIT_CHECK)) { | ||
375 | cdev->private->irb.esw.esw0.erw.cons = 1; | ||
376 | cdev->private->flags.dosense = 0; | ||
377 | return 0; | ||
378 | } | ||
379 | if (cdev->private->flags.dosense) { | ||
380 | ccw_device_do_sense(cdev, irb); | ||
381 | return -EBUSY; | ||
382 | } | ||
383 | return 0; | ||
384 | } | ||
385 | |||
diff --git a/drivers/s390/cio/ioasm.h b/drivers/s390/cio/ioasm.h new file mode 100644 index 000000000000..c874607d9a80 --- /dev/null +++ b/drivers/s390/cio/ioasm.h | |||
@@ -0,0 +1,228 @@ | |||
1 | #ifndef S390_CIO_IOASM_H | ||
2 | #define S390_CIO_IOASM_H | ||
3 | |||
4 | /* | ||
5 | * TPI info structure | ||
6 | */ | ||
7 | struct tpi_info { | ||
8 | __u32 reserved1 : 16; /* reserved 0x00000001 */ | ||
9 | __u32 irq : 16; /* aka. subchannel number */ | ||
10 | __u32 intparm; /* interruption parameter */ | ||
11 | __u32 adapter_IO : 1; | ||
12 | __u32 reserved2 : 1; | ||
13 | __u32 isc : 3; | ||
14 | __u32 reserved3 : 12; | ||
15 | __u32 int_type : 3; | ||
16 | __u32 reserved4 : 12; | ||
17 | } __attribute__ ((packed)); | ||
18 | |||
19 | |||
20 | /* | ||
21 | * Some S390 specific IO instructions as inline | ||
22 | */ | ||
23 | |||
24 | extern __inline__ int stsch(int irq, volatile struct schib *addr) | ||
25 | { | ||
26 | int ccode; | ||
27 | |||
28 | __asm__ __volatile__( | ||
29 | " lr 1,%1\n" | ||
30 | " stsch 0(%2)\n" | ||
31 | " ipm %0\n" | ||
32 | " srl %0,28" | ||
33 | : "=d" (ccode) | ||
34 | : "d" (irq | 0x10000), "a" (addr) | ||
35 | : "cc", "1" ); | ||
36 | return ccode; | ||
37 | } | ||
38 | |||
39 | extern __inline__ int msch(int irq, volatile struct schib *addr) | ||
40 | { | ||
41 | int ccode; | ||
42 | |||
43 | __asm__ __volatile__( | ||
44 | " lr 1,%1\n" | ||
45 | " msch 0(%2)\n" | ||
46 | " ipm %0\n" | ||
47 | " srl %0,28" | ||
48 | : "=d" (ccode) | ||
49 | : "d" (irq | 0x10000L), "a" (addr) | ||
50 | : "cc", "1" ); | ||
51 | return ccode; | ||
52 | } | ||
53 | |||
54 | extern __inline__ int msch_err(int irq, volatile struct schib *addr) | ||
55 | { | ||
56 | int ccode; | ||
57 | |||
58 | __asm__ __volatile__( | ||
59 | " lhi %0,%3\n" | ||
60 | " lr 1,%1\n" | ||
61 | " msch 0(%2)\n" | ||
62 | "0: ipm %0\n" | ||
63 | " srl %0,28\n" | ||
64 | "1:\n" | ||
65 | #ifdef CONFIG_ARCH_S390X | ||
66 | ".section __ex_table,\"a\"\n" | ||
67 | " .align 8\n" | ||
68 | " .quad 0b,1b\n" | ||
69 | ".previous" | ||
70 | #else | ||
71 | ".section __ex_table,\"a\"\n" | ||
72 | " .align 4\n" | ||
73 | " .long 0b,1b\n" | ||
74 | ".previous" | ||
75 | #endif | ||
76 | : "=&d" (ccode) | ||
77 | : "d" (irq | 0x10000L), "a" (addr), "K" (-EIO) | ||
78 | : "cc", "1" ); | ||
79 | return ccode; | ||
80 | } | ||
81 | |||
82 | extern __inline__ int tsch(int irq, volatile struct irb *addr) | ||
83 | { | ||
84 | int ccode; | ||
85 | |||
86 | __asm__ __volatile__( | ||
87 | " lr 1,%1\n" | ||
88 | " tsch 0(%2)\n" | ||
89 | " ipm %0\n" | ||
90 | " srl %0,28" | ||
91 | : "=d" (ccode) | ||
92 | : "d" (irq | 0x10000L), "a" (addr) | ||
93 | : "cc", "1" ); | ||
94 | return ccode; | ||
95 | } | ||
96 | |||
97 | extern __inline__ int tpi( volatile struct tpi_info *addr) | ||
98 | { | ||
99 | int ccode; | ||
100 | |||
101 | __asm__ __volatile__( | ||
102 | " tpi 0(%1)\n" | ||
103 | " ipm %0\n" | ||
104 | " srl %0,28" | ||
105 | : "=d" (ccode) | ||
106 | : "a" (addr) | ||
107 | : "cc", "1" ); | ||
108 | return ccode; | ||
109 | } | ||
110 | |||
111 | extern __inline__ int ssch(int irq, volatile struct orb *addr) | ||
112 | { | ||
113 | int ccode; | ||
114 | |||
115 | __asm__ __volatile__( | ||
116 | " lr 1,%1\n" | ||
117 | " ssch 0(%2)\n" | ||
118 | " ipm %0\n" | ||
119 | " srl %0,28" | ||
120 | : "=d" (ccode) | ||
121 | : "d" (irq | 0x10000L), "a" (addr) | ||
122 | : "cc", "1" ); | ||
123 | return ccode; | ||
124 | } | ||
125 | |||
126 | extern __inline__ int rsch(int irq) | ||
127 | { | ||
128 | int ccode; | ||
129 | |||
130 | __asm__ __volatile__( | ||
131 | " lr 1,%1\n" | ||
132 | " rsch\n" | ||
133 | " ipm %0\n" | ||
134 | " srl %0,28" | ||
135 | : "=d" (ccode) | ||
136 | : "d" (irq | 0x10000L) | ||
137 | : "cc", "1" ); | ||
138 | return ccode; | ||
139 | } | ||
140 | |||
141 | extern __inline__ int csch(int irq) | ||
142 | { | ||
143 | int ccode; | ||
144 | |||
145 | __asm__ __volatile__( | ||
146 | " lr 1,%1\n" | ||
147 | " csch\n" | ||
148 | " ipm %0\n" | ||
149 | " srl %0,28" | ||
150 | : "=d" (ccode) | ||
151 | : "d" (irq | 0x10000L) | ||
152 | : "cc", "1" ); | ||
153 | return ccode; | ||
154 | } | ||
155 | |||
156 | extern __inline__ int hsch(int irq) | ||
157 | { | ||
158 | int ccode; | ||
159 | |||
160 | __asm__ __volatile__( | ||
161 | " lr 1,%1\n" | ||
162 | " hsch\n" | ||
163 | " ipm %0\n" | ||
164 | " srl %0,28" | ||
165 | : "=d" (ccode) | ||
166 | : "d" (irq | 0x10000L) | ||
167 | : "cc", "1" ); | ||
168 | return ccode; | ||
169 | } | ||
170 | |||
171 | extern __inline__ int xsch(int irq) | ||
172 | { | ||
173 | int ccode; | ||
174 | |||
175 | __asm__ __volatile__( | ||
176 | " lr 1,%1\n" | ||
177 | " .insn rre,0xb2760000,%1,0\n" | ||
178 | " ipm %0\n" | ||
179 | " srl %0,28" | ||
180 | : "=d" (ccode) | ||
181 | : "d" (irq | 0x10000L) | ||
182 | : "cc", "1" ); | ||
183 | return ccode; | ||
184 | } | ||
185 | |||
186 | extern __inline__ int chsc(void *chsc_area) | ||
187 | { | ||
188 | int cc; | ||
189 | |||
190 | __asm__ __volatile__ ( | ||
191 | ".insn rre,0xb25f0000,%1,0 \n\t" | ||
192 | "ipm %0 \n\t" | ||
193 | "srl %0,28 \n\t" | ||
194 | : "=d" (cc) | ||
195 | : "d" (chsc_area) | ||
196 | : "cc" ); | ||
197 | |||
198 | return cc; | ||
199 | } | ||
200 | |||
201 | extern __inline__ int iac( void) | ||
202 | { | ||
203 | int ccode; | ||
204 | |||
205 | __asm__ __volatile__( | ||
206 | " iac 1\n" | ||
207 | " ipm %0\n" | ||
208 | " srl %0,28" | ||
209 | : "=d" (ccode) : : "cc", "1" ); | ||
210 | return ccode; | ||
211 | } | ||
212 | |||
213 | extern __inline__ int rchp(int chpid) | ||
214 | { | ||
215 | int ccode; | ||
216 | |||
217 | __asm__ __volatile__( | ||
218 | " lr 1,%1\n" | ||
219 | " rchp\n" | ||
220 | " ipm %0\n" | ||
221 | " srl %0,28" | ||
222 | : "=d" (ccode) | ||
223 | : "d" (chpid) | ||
224 | : "cc", "1" ); | ||
225 | return ccode; | ||
226 | } | ||
227 | |||
228 | #endif | ||
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c new file mode 100644 index 000000000000..bbe9f45d1438 --- /dev/null +++ b/drivers/s390/cio/qdio.c | |||
@@ -0,0 +1,3468 @@ | |||
1 | /* | ||
2 | * | ||
3 | * linux/drivers/s390/cio/qdio.c | ||
4 | * | ||
5 | * Linux for S/390 QDIO base support, Hipersocket base support | ||
6 | * version 2 | ||
7 | * | ||
8 | * Copyright 2000,2002 IBM Corporation | ||
9 | * Author(s): Utz Bacher <utz.bacher@de.ibm.com> | ||
10 | * 2.6 cio integration by Cornelia Huck <cohuck@de.ibm.com> | ||
11 | * | ||
12 | * Restriction: only 63 iqdio subchannels would have its own indicator, | ||
13 | * after that, subsequent subchannels share one indicator | ||
14 | * | ||
15 | * | ||
16 | * | ||
17 | * | ||
18 | * This program is free software; you can redistribute it and/or modify | ||
19 | * it under the terms of the GNU General Public License as published by | ||
20 | * the Free Software Foundation; either version 2, or (at your option) | ||
21 | * any later version. | ||
22 | * | ||
23 | * This program is distributed in the hope that it will be useful, | ||
24 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
25 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
26 | * GNU General Public License for more details. | ||
27 | * | ||
28 | * You should have received a copy of the GNU General Public License | ||
29 | * along with this program; if not, write to the Free Software | ||
30 | * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | ||
31 | */ | ||
32 | |||
33 | #include <linux/config.h> | ||
34 | #include <linux/module.h> | ||
35 | #include <linux/init.h> | ||
36 | |||
37 | #include <linux/slab.h> | ||
38 | #include <linux/kernel.h> | ||
39 | #include <linux/proc_fs.h> | ||
40 | #include <linux/timer.h> | ||
41 | |||
42 | #include <asm/ccwdev.h> | ||
43 | #include <asm/io.h> | ||
44 | #include <asm/atomic.h> | ||
45 | #include <asm/semaphore.h> | ||
46 | #include <asm/timex.h> | ||
47 | |||
48 | #include <asm/debug.h> | ||
49 | #include <asm/qdio.h> | ||
50 | |||
51 | #include "cio.h" | ||
52 | #include "css.h" | ||
53 | #include "device.h" | ||
54 | #include "airq.h" | ||
55 | #include "qdio.h" | ||
56 | #include "ioasm.h" | ||
57 | #include "chsc.h" | ||
58 | |||
59 | #define VERSION_QDIO_C "$Revision: 1.98 $" | ||
60 | |||
61 | /****************** MODULE PARAMETER VARIABLES ********************/ | ||
62 | MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>"); | ||
63 | MODULE_DESCRIPTION("QDIO base support version 2, " \ | ||
64 | "Copyright 2000 IBM Corporation"); | ||
65 | MODULE_LICENSE("GPL"); | ||
66 | |||
67 | /******************** HERE WE GO ***********************************/ | ||
68 | |||
69 | static const char version[] = "QDIO base support version 2 (" | ||
70 | VERSION_QDIO_C "/" VERSION_QDIO_H "/" VERSION_CIO_QDIO_H ")"; | ||
71 | |||
72 | #ifdef QDIO_PERFORMANCE_STATS | ||
73 | static int proc_perf_file_registration; | ||
74 | static unsigned long i_p_c, i_p_nc, o_p_c, o_p_nc, ii_p_c, ii_p_nc; | ||
75 | static struct qdio_perf_stats perf_stats; | ||
76 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
77 | |||
78 | static int hydra_thinints; | ||
79 | static int omit_svs; | ||
80 | |||
81 | static int indicator_used[INDICATORS_PER_CACHELINE]; | ||
82 | static __u32 * volatile indicators; | ||
83 | static __u32 volatile spare_indicator; | ||
84 | static atomic_t spare_indicator_usecount; | ||
85 | |||
86 | static debug_info_t *qdio_dbf_setup; | ||
87 | static debug_info_t *qdio_dbf_sbal; | ||
88 | static debug_info_t *qdio_dbf_trace; | ||
89 | static debug_info_t *qdio_dbf_sense; | ||
90 | #ifdef CONFIG_QDIO_DEBUG | ||
91 | static debug_info_t *qdio_dbf_slsb_out; | ||
92 | static debug_info_t *qdio_dbf_slsb_in; | ||
93 | #endif /* CONFIG_QDIO_DEBUG */ | ||
94 | |||
95 | /* iQDIO stuff: */ | ||
96 | static volatile struct qdio_q *tiq_list=NULL; /* volatile as it could change | ||
97 | during a while loop */ | ||
98 | static DEFINE_SPINLOCK(ttiq_list_lock); | ||
99 | static int register_thinint_result; | ||
100 | static void tiqdio_tl(unsigned long); | ||
101 | static DECLARE_TASKLET(tiqdio_tasklet,tiqdio_tl,0); | ||
102 | |||
103 | /* not a macro, as one of the arguments is atomic_read */ | ||
104 | static inline int | ||
105 | qdio_min(int a,int b) | ||
106 | { | ||
107 | if (a<b) | ||
108 | return a; | ||
109 | else | ||
110 | return b; | ||
111 | } | ||
112 | |||
113 | /***************** SCRUBBER HELPER ROUTINES **********************/ | ||
114 | |||
115 | static inline volatile __u64 | ||
116 | qdio_get_micros(void) | ||
117 | { | ||
118 | return (get_clock() >> 10); /* time>>12 is microseconds */ | ||
119 | } | ||
120 | |||
121 | /* | ||
122 | * unfortunately, we can't just xchg the values; in do_QDIO we want to reserve | ||
123 | * the q in any case, so that we'll not be interrupted when we are in | ||
124 | * qdio_mark_tiq... shouldn't have a really bad impact, as reserving almost | ||
125 | * ever works (last famous words) | ||
126 | */ | ||
127 | static inline int | ||
128 | qdio_reserve_q(struct qdio_q *q) | ||
129 | { | ||
130 | return atomic_add_return(1,&q->use_count) - 1; | ||
131 | } | ||
132 | |||
133 | static inline void | ||
134 | qdio_release_q(struct qdio_q *q) | ||
135 | { | ||
136 | atomic_dec(&q->use_count); | ||
137 | } | ||
138 | |||
139 | static volatile inline void | ||
140 | qdio_set_slsb(volatile char *slsb, unsigned char value) | ||
141 | { | ||
142 | xchg((char*)slsb,value); | ||
143 | } | ||
144 | |||
145 | static inline int | ||
146 | qdio_siga_sync(struct qdio_q *q, unsigned int gpr2, | ||
147 | unsigned int gpr3) | ||
148 | { | ||
149 | int cc; | ||
150 | |||
151 | QDIO_DBF_TEXT4(0,trace,"sigasync"); | ||
152 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
153 | |||
154 | #ifdef QDIO_PERFORMANCE_STATS | ||
155 | perf_stats.siga_syncs++; | ||
156 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
157 | |||
158 | cc = do_siga_sync(q->irq, gpr2, gpr3); | ||
159 | if (cc) | ||
160 | QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); | ||
161 | |||
162 | return cc; | ||
163 | } | ||
164 | |||
165 | static inline int | ||
166 | qdio_siga_sync_q(struct qdio_q *q) | ||
167 | { | ||
168 | if (q->is_input_q) | ||
169 | return qdio_siga_sync(q, 0, q->mask); | ||
170 | return qdio_siga_sync(q, q->mask, 0); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * returns QDIO_SIGA_ERROR_ACCESS_EXCEPTION as cc, when SIGA returns | ||
175 | * an access exception | ||
176 | */ | ||
177 | static inline int | ||
178 | qdio_siga_output(struct qdio_q *q) | ||
179 | { | ||
180 | int cc; | ||
181 | __u32 busy_bit; | ||
182 | __u64 start_time=0; | ||
183 | |||
184 | #ifdef QDIO_PERFORMANCE_STATS | ||
185 | perf_stats.siga_outs++; | ||
186 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
187 | |||
188 | QDIO_DBF_TEXT4(0,trace,"sigaout"); | ||
189 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
190 | |||
191 | for (;;) { | ||
192 | cc = do_siga_output(q->irq, q->mask, &busy_bit); | ||
193 | //QDIO_PRINT_ERR("cc=%x, busy=%x\n",cc,busy_bit); | ||
194 | if ((cc==2) && (busy_bit) && (q->is_iqdio_q)) { | ||
195 | if (!start_time) | ||
196 | start_time=NOW; | ||
197 | if ((NOW-start_time)>QDIO_BUSY_BIT_PATIENCE) | ||
198 | break; | ||
199 | } else | ||
200 | break; | ||
201 | } | ||
202 | |||
203 | if ((cc==2) && (busy_bit)) | ||
204 | cc |= QDIO_SIGA_ERROR_B_BIT_SET; | ||
205 | |||
206 | if (cc) | ||
207 | QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); | ||
208 | |||
209 | return cc; | ||
210 | } | ||
211 | |||
212 | static inline int | ||
213 | qdio_siga_input(struct qdio_q *q) | ||
214 | { | ||
215 | int cc; | ||
216 | |||
217 | QDIO_DBF_TEXT4(0,trace,"sigain"); | ||
218 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
219 | |||
220 | #ifdef QDIO_PERFORMANCE_STATS | ||
221 | perf_stats.siga_ins++; | ||
222 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
223 | |||
224 | cc = do_siga_input(q->irq, q->mask); | ||
225 | |||
226 | if (cc) | ||
227 | QDIO_DBF_HEX3(0,trace,&cc,sizeof(int*)); | ||
228 | |||
229 | return cc; | ||
230 | } | ||
231 | |||
232 | /* locked by the locks in qdio_activate and qdio_cleanup */ | ||
233 | static __u32 * volatile | ||
234 | qdio_get_indicator(void) | ||
235 | { | ||
236 | int i; | ||
237 | |||
238 | for (i=1;i<INDICATORS_PER_CACHELINE;i++) | ||
239 | if (!indicator_used[i]) { | ||
240 | indicator_used[i]=1; | ||
241 | return indicators+i; | ||
242 | } | ||
243 | atomic_inc(&spare_indicator_usecount); | ||
244 | return (__u32 * volatile) &spare_indicator; | ||
245 | } | ||
246 | |||
247 | /* locked by the locks in qdio_activate and qdio_cleanup */ | ||
248 | static void | ||
249 | qdio_put_indicator(__u32 *addr) | ||
250 | { | ||
251 | int i; | ||
252 | |||
253 | if ( (addr) && (addr!=&spare_indicator) ) { | ||
254 | i=addr-indicators; | ||
255 | indicator_used[i]=0; | ||
256 | } | ||
257 | if (addr == &spare_indicator) | ||
258 | atomic_dec(&spare_indicator_usecount); | ||
259 | } | ||
260 | |||
261 | static inline volatile void | ||
262 | tiqdio_clear_summary_bit(__u32 *location) | ||
263 | { | ||
264 | QDIO_DBF_TEXT5(0,trace,"clrsummb"); | ||
265 | QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); | ||
266 | |||
267 | xchg(location,0); | ||
268 | } | ||
269 | |||
270 | static inline volatile void | ||
271 | tiqdio_set_summary_bit(__u32 *location) | ||
272 | { | ||
273 | QDIO_DBF_TEXT5(0,trace,"setsummb"); | ||
274 | QDIO_DBF_HEX5(0,trace,&location,sizeof(void*)); | ||
275 | |||
276 | xchg(location,-1); | ||
277 | } | ||
278 | |||
279 | static inline void | ||
280 | tiqdio_sched_tl(void) | ||
281 | { | ||
282 | tasklet_hi_schedule(&tiqdio_tasklet); | ||
283 | } | ||
284 | |||
285 | static inline void | ||
286 | qdio_mark_tiq(struct qdio_q *q) | ||
287 | { | ||
288 | unsigned long flags; | ||
289 | |||
290 | QDIO_DBF_TEXT4(0,trace,"mark iq"); | ||
291 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
292 | |||
293 | spin_lock_irqsave(&ttiq_list_lock,flags); | ||
294 | if (unlikely(atomic_read(&q->is_in_shutdown))) | ||
295 | goto out_unlock; | ||
296 | |||
297 | if (!q->is_input_q) | ||
298 | goto out_unlock; | ||
299 | |||
300 | if ((q->list_prev) || (q->list_next)) | ||
301 | goto out_unlock; | ||
302 | |||
303 | if (!tiq_list) { | ||
304 | tiq_list=q; | ||
305 | q->list_prev=q; | ||
306 | q->list_next=q; | ||
307 | } else { | ||
308 | q->list_next=tiq_list; | ||
309 | q->list_prev=tiq_list->list_prev; | ||
310 | tiq_list->list_prev->list_next=q; | ||
311 | tiq_list->list_prev=q; | ||
312 | } | ||
313 | spin_unlock_irqrestore(&ttiq_list_lock,flags); | ||
314 | |||
315 | tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); | ||
316 | tiqdio_sched_tl(); | ||
317 | return; | ||
318 | out_unlock: | ||
319 | spin_unlock_irqrestore(&ttiq_list_lock,flags); | ||
320 | return; | ||
321 | } | ||
322 | |||
323 | static inline void | ||
324 | qdio_mark_q(struct qdio_q *q) | ||
325 | { | ||
326 | QDIO_DBF_TEXT4(0,trace,"mark q"); | ||
327 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
328 | |||
329 | if (unlikely(atomic_read(&q->is_in_shutdown))) | ||
330 | return; | ||
331 | |||
332 | tasklet_schedule(&q->tasklet); | ||
333 | } | ||
334 | |||
335 | static inline int | ||
336 | qdio_stop_polling(struct qdio_q *q) | ||
337 | { | ||
338 | #ifdef QDIO_USE_PROCESSING_STATE | ||
339 | int gsf; | ||
340 | |||
341 | if (!atomic_swap(&q->polling,0)) | ||
342 | return 1; | ||
343 | |||
344 | QDIO_DBF_TEXT4(0,trace,"stoppoll"); | ||
345 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
346 | |||
347 | /* show the card that we are not polling anymore */ | ||
348 | if (!q->is_input_q) | ||
349 | return 1; | ||
350 | |||
351 | gsf=GET_SAVED_FRONTIER(q); | ||
352 | set_slsb(&q->slsb.acc.val[(gsf+QDIO_MAX_BUFFERS_PER_Q-1)& | ||
353 | (QDIO_MAX_BUFFERS_PER_Q-1)], | ||
354 | SLSB_P_INPUT_NOT_INIT); | ||
355 | /* | ||
356 | * we don't issue this SYNC_MEMORY, as we trust Rick T and | ||
357 | * moreover will not use the PROCESSING state under VM, so | ||
358 | * q->polling was 0 anyway | ||
359 | */ | ||
360 | /*SYNC_MEMORY;*/ | ||
361 | if (q->slsb.acc.val[gsf]!=SLSB_P_INPUT_PRIMED) | ||
362 | return 1; | ||
363 | /* | ||
364 | * set our summary bit again, as otherwise there is a | ||
365 | * small window we can miss between resetting it and | ||
366 | * checking for PRIMED state | ||
367 | */ | ||
368 | if (q->is_thinint_q) | ||
369 | tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); | ||
370 | return 0; | ||
371 | |||
372 | #else /* QDIO_USE_PROCESSING_STATE */ | ||
373 | return 1; | ||
374 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
375 | } | ||
376 | |||
377 | /* | ||
378 | * see the comment in do_QDIO and before qdio_reserve_q about the | ||
379 | * sophisticated locking outside of unmark_q, so that we don't need to | ||
380 | * disable the interrupts :-) | ||
381 | */ | ||
382 | static inline void | ||
383 | qdio_unmark_q(struct qdio_q *q) | ||
384 | { | ||
385 | unsigned long flags; | ||
386 | |||
387 | QDIO_DBF_TEXT4(0,trace,"unmark q"); | ||
388 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
389 | |||
390 | if ((!q->list_prev)||(!q->list_next)) | ||
391 | return; | ||
392 | |||
393 | if ((q->is_thinint_q)&&(q->is_input_q)) { | ||
394 | /* iQDIO */ | ||
395 | spin_lock_irqsave(&ttiq_list_lock,flags); | ||
396 | /* in case cleanup has done this already and simultanously | ||
397 | * qdio_unmark_q is called from the interrupt handler, we've | ||
398 | * got to check this in this specific case again */ | ||
399 | if ((!q->list_prev)||(!q->list_next)) | ||
400 | goto out; | ||
401 | if (q->list_next==q) { | ||
402 | /* q was the only interesting q */ | ||
403 | tiq_list=NULL; | ||
404 | q->list_next=NULL; | ||
405 | q->list_prev=NULL; | ||
406 | } else { | ||
407 | q->list_next->list_prev=q->list_prev; | ||
408 | q->list_prev->list_next=q->list_next; | ||
409 | tiq_list=q->list_next; | ||
410 | q->list_next=NULL; | ||
411 | q->list_prev=NULL; | ||
412 | } | ||
413 | out: | ||
414 | spin_unlock_irqrestore(&ttiq_list_lock,flags); | ||
415 | } | ||
416 | } | ||
417 | |||
418 | static inline unsigned long | ||
419 | tiqdio_clear_global_summary(void) | ||
420 | { | ||
421 | unsigned long time; | ||
422 | |||
423 | QDIO_DBF_TEXT5(0,trace,"clrglobl"); | ||
424 | |||
425 | time = do_clear_global_summary(); | ||
426 | |||
427 | QDIO_DBF_HEX5(0,trace,&time,sizeof(unsigned long)); | ||
428 | |||
429 | return time; | ||
430 | } | ||
431 | |||
432 | |||
433 | /************************* OUTBOUND ROUTINES *******************************/ | ||
434 | |||
435 | inline static int | ||
436 | qdio_get_outbound_buffer_frontier(struct qdio_q *q) | ||
437 | { | ||
438 | int f,f_mod_no; | ||
439 | volatile char *slsb; | ||
440 | int first_not_to_check; | ||
441 | char dbf_text[15]; | ||
442 | |||
443 | QDIO_DBF_TEXT4(0,trace,"getobfro"); | ||
444 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
445 | |||
446 | slsb=&q->slsb.acc.val[0]; | ||
447 | f_mod_no=f=q->first_to_check; | ||
448 | /* | ||
449 | * f points to already processed elements, so f+no_used is correct... | ||
450 | * ... but: we don't check 128 buffers, as otherwise | ||
451 | * qdio_has_outbound_q_moved would return 0 | ||
452 | */ | ||
453 | first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), | ||
454 | (QDIO_MAX_BUFFERS_PER_Q-1)); | ||
455 | |||
456 | if ((!q->is_iqdio_q)&&(!q->hydra_gives_outbound_pcis)) | ||
457 | SYNC_MEMORY; | ||
458 | |||
459 | check_next: | ||
460 | if (f==first_not_to_check) | ||
461 | goto out; | ||
462 | |||
463 | switch(slsb[f_mod_no]) { | ||
464 | |||
465 | /* the adapter has not fetched the output yet */ | ||
466 | case SLSB_CU_OUTPUT_PRIMED: | ||
467 | QDIO_DBF_TEXT5(0,trace,"outpprim"); | ||
468 | break; | ||
469 | |||
470 | /* the adapter got it */ | ||
471 | case SLSB_P_OUTPUT_EMPTY: | ||
472 | atomic_dec(&q->number_of_buffers_used); | ||
473 | f++; | ||
474 | f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
475 | QDIO_DBF_TEXT5(0,trace,"outpempt"); | ||
476 | goto check_next; | ||
477 | |||
478 | case SLSB_P_OUTPUT_ERROR: | ||
479 | QDIO_DBF_TEXT3(0,trace,"outperr"); | ||
480 | sprintf(dbf_text,"%x-%x-%x",f_mod_no, | ||
481 | q->sbal[f_mod_no]->element[14].sbalf.value, | ||
482 | q->sbal[f_mod_no]->element[15].sbalf.value); | ||
483 | QDIO_DBF_TEXT3(1,trace,dbf_text); | ||
484 | QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); | ||
485 | |||
486 | /* kind of process the buffer */ | ||
487 | set_slsb(&q->slsb.acc.val[f_mod_no], SLSB_P_OUTPUT_NOT_INIT); | ||
488 | |||
489 | /* | ||
490 | * we increment the frontier, as this buffer | ||
491 | * was processed obviously | ||
492 | */ | ||
493 | atomic_dec(&q->number_of_buffers_used); | ||
494 | f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
495 | |||
496 | if (q->qdio_error) | ||
497 | q->error_status_flags|= | ||
498 | QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; | ||
499 | q->qdio_error=SLSB_P_OUTPUT_ERROR; | ||
500 | q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; | ||
501 | |||
502 | break; | ||
503 | |||
504 | /* no new buffers */ | ||
505 | default: | ||
506 | QDIO_DBF_TEXT5(0,trace,"outpni"); | ||
507 | } | ||
508 | out: | ||
509 | return (q->first_to_check=f_mod_no); | ||
510 | } | ||
511 | |||
512 | /* all buffers are processed */ | ||
513 | inline static int | ||
514 | qdio_is_outbound_q_done(struct qdio_q *q) | ||
515 | { | ||
516 | int no_used; | ||
517 | #ifdef CONFIG_QDIO_DEBUG | ||
518 | char dbf_text[15]; | ||
519 | #endif | ||
520 | |||
521 | no_used=atomic_read(&q->number_of_buffers_used); | ||
522 | |||
523 | #ifdef CONFIG_QDIO_DEBUG | ||
524 | if (no_used) { | ||
525 | sprintf(dbf_text,"oqisnt%02x",no_used); | ||
526 | QDIO_DBF_TEXT4(0,trace,dbf_text); | ||
527 | } else { | ||
528 | QDIO_DBF_TEXT4(0,trace,"oqisdone"); | ||
529 | } | ||
530 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
531 | #endif /* CONFIG_QDIO_DEBUG */ | ||
532 | return (no_used==0); | ||
533 | } | ||
534 | |||
535 | inline static int | ||
536 | qdio_has_outbound_q_moved(struct qdio_q *q) | ||
537 | { | ||
538 | int i; | ||
539 | |||
540 | i=qdio_get_outbound_buffer_frontier(q); | ||
541 | |||
542 | if ( (i!=GET_SAVED_FRONTIER(q)) || | ||
543 | (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { | ||
544 | SAVE_FRONTIER(q,i); | ||
545 | QDIO_DBF_TEXT4(0,trace,"oqhasmvd"); | ||
546 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
547 | return 1; | ||
548 | } else { | ||
549 | QDIO_DBF_TEXT4(0,trace,"oqhsntmv"); | ||
550 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
551 | return 0; | ||
552 | } | ||
553 | } | ||
554 | |||
555 | inline static void | ||
556 | qdio_kick_outbound_q(struct qdio_q *q) | ||
557 | { | ||
558 | int result; | ||
559 | #ifdef CONFIG_QDIO_DEBUG | ||
560 | char dbf_text[15]; | ||
561 | |||
562 | QDIO_DBF_TEXT4(0,trace,"kickoutq"); | ||
563 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
564 | #endif /* CONFIG_QDIO_DEBUG */ | ||
565 | |||
566 | if (!q->siga_out) | ||
567 | return; | ||
568 | |||
569 | /* here's the story with cc=2 and busy bit set (thanks, Rick): | ||
570 | * VM's CP could present us cc=2 and busy bit set on SIGA-write | ||
571 | * during reconfiguration of their Guest LAN (only in HIPERS mode, | ||
572 | * QDIO mode is asynchronous -- cc=2 and busy bit there will take | ||
573 | * the queues down immediately; and not being under VM we have a | ||
574 | * problem on cc=2 and busy bit set right away). | ||
575 | * | ||
576 | * Therefore qdio_siga_output will try for a short time constantly, | ||
577 | * if such a condition occurs. If it doesn't change, it will | ||
578 | * increase the busy_siga_counter and save the timestamp, and | ||
579 | * schedule the queue for later processing (via mark_q, using the | ||
580 | * queue tasklet). __qdio_outbound_processing will check out the | ||
581 | * counter. If non-zero, it will call qdio_kick_outbound_q as often | ||
582 | * as the value of the counter. This will attempt further SIGA | ||
583 | * instructions. For each successful SIGA, the counter is | ||
584 | * decreased, for failing SIGAs the counter remains the same, after | ||
585 | * all. | ||
586 | * After some time of no movement, qdio_kick_outbound_q will | ||
587 | * finally fail and reflect corresponding error codes to call | ||
588 | * the upper layer module and have it take the queues down. | ||
589 | * | ||
590 | * Note that this is a change from the original HiperSockets design | ||
591 | * (saying cc=2 and busy bit means take the queues down), but in | ||
592 | * these days Guest LAN didn't exist... excessive cc=2 with busy bit | ||
593 | * conditions will still take the queues down, but the threshold is | ||
594 | * higher due to the Guest LAN environment. | ||
595 | */ | ||
596 | |||
597 | |||
598 | result=qdio_siga_output(q); | ||
599 | |||
600 | switch (result) { | ||
601 | case 0: | ||
602 | /* went smooth this time, reset timestamp */ | ||
603 | #ifdef CONFIG_QDIO_DEBUG | ||
604 | QDIO_DBF_TEXT3(0,trace,"cc2reslv"); | ||
605 | sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, | ||
606 | atomic_read(&q->busy_siga_counter)); | ||
607 | QDIO_DBF_TEXT3(0,trace,dbf_text); | ||
608 | #endif /* CONFIG_QDIO_DEBUG */ | ||
609 | q->timing.busy_start=0; | ||
610 | break; | ||
611 | case (2|QDIO_SIGA_ERROR_B_BIT_SET): | ||
612 | /* cc=2 and busy bit: */ | ||
613 | atomic_inc(&q->busy_siga_counter); | ||
614 | |||
615 | /* if the last siga was successful, save | ||
616 | * timestamp here */ | ||
617 | if (!q->timing.busy_start) | ||
618 | q->timing.busy_start=NOW; | ||
619 | |||
620 | /* if we're in time, don't touch error_status_flags | ||
621 | * and siga_error */ | ||
622 | if (NOW-q->timing.busy_start<QDIO_BUSY_BIT_GIVE_UP) { | ||
623 | qdio_mark_q(q); | ||
624 | break; | ||
625 | } | ||
626 | QDIO_DBF_TEXT2(0,trace,"cc2REPRT"); | ||
627 | #ifdef CONFIG_QDIO_DEBUG | ||
628 | sprintf(dbf_text,"%4x%2x%2x",q->irq,q->q_no, | ||
629 | atomic_read(&q->busy_siga_counter)); | ||
630 | QDIO_DBF_TEXT3(0,trace,dbf_text); | ||
631 | #endif /* CONFIG_QDIO_DEBUG */ | ||
632 | /* else fallthrough and report error */ | ||
633 | default: | ||
634 | /* for plain cc=1, 2 or 3: */ | ||
635 | if (q->siga_error) | ||
636 | q->error_status_flags|= | ||
637 | QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; | ||
638 | q->error_status_flags|= | ||
639 | QDIO_STATUS_LOOK_FOR_ERROR; | ||
640 | q->siga_error=result; | ||
641 | } | ||
642 | } | ||
643 | |||
644 | inline static void | ||
645 | qdio_kick_outbound_handler(struct qdio_q *q) | ||
646 | { | ||
647 | int start, end, real_end, count; | ||
648 | #ifdef CONFIG_QDIO_DEBUG | ||
649 | char dbf_text[15]; | ||
650 | #endif | ||
651 | |||
652 | start = q->first_element_to_kick; | ||
653 | /* last_move_ftc was just updated */ | ||
654 | real_end = GET_SAVED_FRONTIER(q); | ||
655 | end = (real_end+QDIO_MAX_BUFFERS_PER_Q-1)& | ||
656 | (QDIO_MAX_BUFFERS_PER_Q-1); | ||
657 | count = (end+QDIO_MAX_BUFFERS_PER_Q+1-start)& | ||
658 | (QDIO_MAX_BUFFERS_PER_Q-1); | ||
659 | |||
660 | #ifdef CONFIG_QDIO_DEBUG | ||
661 | QDIO_DBF_TEXT4(0,trace,"kickouth"); | ||
662 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
663 | |||
664 | sprintf(dbf_text,"s=%2xc=%2x",start,count); | ||
665 | QDIO_DBF_TEXT4(0,trace,dbf_text); | ||
666 | #endif /* CONFIG_QDIO_DEBUG */ | ||
667 | |||
668 | if (q->state==QDIO_IRQ_STATE_ACTIVE) | ||
669 | q->handler(q->cdev,QDIO_STATUS_OUTBOUND_INT| | ||
670 | q->error_status_flags, | ||
671 | q->qdio_error,q->siga_error,q->q_no,start,count, | ||
672 | q->int_parm); | ||
673 | |||
674 | /* for the next time: */ | ||
675 | q->first_element_to_kick=real_end; | ||
676 | q->qdio_error=0; | ||
677 | q->siga_error=0; | ||
678 | q->error_status_flags=0; | ||
679 | } | ||
680 | |||
681 | static inline void | ||
682 | __qdio_outbound_processing(struct qdio_q *q) | ||
683 | { | ||
684 | int siga_attempts; | ||
685 | |||
686 | QDIO_DBF_TEXT4(0,trace,"qoutproc"); | ||
687 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
688 | |||
689 | if (unlikely(qdio_reserve_q(q))) { | ||
690 | qdio_release_q(q); | ||
691 | #ifdef QDIO_PERFORMANCE_STATS | ||
692 | o_p_c++; | ||
693 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
694 | /* as we're sissies, we'll check next time */ | ||
695 | if (likely(!atomic_read(&q->is_in_shutdown))) { | ||
696 | qdio_mark_q(q); | ||
697 | QDIO_DBF_TEXT4(0,trace,"busy,agn"); | ||
698 | } | ||
699 | return; | ||
700 | } | ||
701 | #ifdef QDIO_PERFORMANCE_STATS | ||
702 | o_p_nc++; | ||
703 | perf_stats.tl_runs++; | ||
704 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
705 | |||
706 | /* see comment in qdio_kick_outbound_q */ | ||
707 | siga_attempts=atomic_read(&q->busy_siga_counter); | ||
708 | while (siga_attempts) { | ||
709 | atomic_dec(&q->busy_siga_counter); | ||
710 | qdio_kick_outbound_q(q); | ||
711 | siga_attempts--; | ||
712 | } | ||
713 | |||
714 | if (qdio_has_outbound_q_moved(q)) | ||
715 | qdio_kick_outbound_handler(q); | ||
716 | |||
717 | if (q->is_iqdio_q) { | ||
718 | /* | ||
719 | * for asynchronous queues, we better check, if the fill | ||
720 | * level is too high. for synchronous queues, the fill | ||
721 | * level will never be that high. | ||
722 | */ | ||
723 | if (atomic_read(&q->number_of_buffers_used)> | ||
724 | IQDIO_FILL_LEVEL_TO_POLL) | ||
725 | qdio_mark_q(q); | ||
726 | |||
727 | } else if (!q->hydra_gives_outbound_pcis) | ||
728 | if (!qdio_is_outbound_q_done(q)) | ||
729 | qdio_mark_q(q); | ||
730 | |||
731 | qdio_release_q(q); | ||
732 | } | ||
733 | |||
734 | static void | ||
735 | qdio_outbound_processing(struct qdio_q *q) | ||
736 | { | ||
737 | __qdio_outbound_processing(q); | ||
738 | } | ||
739 | |||
740 | /************************* INBOUND ROUTINES *******************************/ | ||
741 | |||
742 | |||
743 | inline static int | ||
744 | qdio_get_inbound_buffer_frontier(struct qdio_q *q) | ||
745 | { | ||
746 | int f,f_mod_no; | ||
747 | volatile char *slsb; | ||
748 | int first_not_to_check; | ||
749 | #ifdef CONFIG_QDIO_DEBUG | ||
750 | char dbf_text[15]; | ||
751 | #endif /* CONFIG_QDIO_DEBUG */ | ||
752 | #ifdef QDIO_USE_PROCESSING_STATE | ||
753 | int last_position=-1; | ||
754 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
755 | |||
756 | QDIO_DBF_TEXT4(0,trace,"getibfro"); | ||
757 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
758 | |||
759 | slsb=&q->slsb.acc.val[0]; | ||
760 | f_mod_no=f=q->first_to_check; | ||
761 | /* | ||
762 | * we don't check 128 buffers, as otherwise qdio_has_inbound_q_moved | ||
763 | * would return 0 | ||
764 | */ | ||
765 | first_not_to_check=f+qdio_min(atomic_read(&q->number_of_buffers_used), | ||
766 | (QDIO_MAX_BUFFERS_PER_Q-1)); | ||
767 | |||
768 | /* | ||
769 | * we don't use this one, as a PCI or we after a thin interrupt | ||
770 | * will sync the queues | ||
771 | */ | ||
772 | /* SYNC_MEMORY;*/ | ||
773 | |||
774 | check_next: | ||
775 | f_mod_no=f&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
776 | if (f==first_not_to_check) | ||
777 | goto out; | ||
778 | switch (slsb[f_mod_no]) { | ||
779 | |||
780 | /* CU_EMPTY means frontier is reached */ | ||
781 | case SLSB_CU_INPUT_EMPTY: | ||
782 | QDIO_DBF_TEXT5(0,trace,"inptempt"); | ||
783 | break; | ||
784 | |||
785 | /* P_PRIMED means set slsb to P_PROCESSING and move on */ | ||
786 | case SLSB_P_INPUT_PRIMED: | ||
787 | QDIO_DBF_TEXT5(0,trace,"inptprim"); | ||
788 | |||
789 | #ifdef QDIO_USE_PROCESSING_STATE | ||
790 | /* | ||
791 | * as soon as running under VM, polling the input queues will | ||
792 | * kill VM in terms of CP overhead | ||
793 | */ | ||
794 | if (q->siga_sync) { | ||
795 | set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); | ||
796 | } else { | ||
797 | /* set the previous buffer to NOT_INIT. The current | ||
798 | * buffer will be set to PROCESSING at the end of | ||
799 | * this function to avoid further interrupts. */ | ||
800 | if (last_position>=0) | ||
801 | set_slsb(&slsb[last_position], | ||
802 | SLSB_P_INPUT_NOT_INIT); | ||
803 | atomic_set(&q->polling,1); | ||
804 | last_position=f_mod_no; | ||
805 | } | ||
806 | #else /* QDIO_USE_PROCESSING_STATE */ | ||
807 | set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); | ||
808 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
809 | /* | ||
810 | * not needed, as the inbound queue will be synced on the next | ||
811 | * siga-r, resp. tiqdio_is_inbound_q_done will do the siga-s | ||
812 | */ | ||
813 | /*SYNC_MEMORY;*/ | ||
814 | f++; | ||
815 | atomic_dec(&q->number_of_buffers_used); | ||
816 | goto check_next; | ||
817 | |||
818 | case SLSB_P_INPUT_NOT_INIT: | ||
819 | case SLSB_P_INPUT_PROCESSING: | ||
820 | QDIO_DBF_TEXT5(0,trace,"inpnipro"); | ||
821 | break; | ||
822 | |||
823 | /* P_ERROR means frontier is reached, break and report error */ | ||
824 | case SLSB_P_INPUT_ERROR: | ||
825 | #ifdef CONFIG_QDIO_DEBUG | ||
826 | sprintf(dbf_text,"inperr%2x",f_mod_no); | ||
827 | QDIO_DBF_TEXT3(1,trace,dbf_text); | ||
828 | #endif /* CONFIG_QDIO_DEBUG */ | ||
829 | QDIO_DBF_HEX2(1,sbal,q->sbal[f_mod_no],256); | ||
830 | |||
831 | /* kind of process the buffer */ | ||
832 | set_slsb(&slsb[f_mod_no],SLSB_P_INPUT_NOT_INIT); | ||
833 | |||
834 | if (q->qdio_error) | ||
835 | q->error_status_flags|= | ||
836 | QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR; | ||
837 | q->qdio_error=SLSB_P_INPUT_ERROR; | ||
838 | q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; | ||
839 | |||
840 | /* we increment the frontier, as this buffer | ||
841 | * was processed obviously */ | ||
842 | f_mod_no=(f_mod_no+1)&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
843 | atomic_dec(&q->number_of_buffers_used); | ||
844 | |||
845 | #ifdef QDIO_USE_PROCESSING_STATE | ||
846 | last_position=-1; | ||
847 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
848 | |||
849 | break; | ||
850 | |||
851 | /* everything else means frontier not changed (HALTED or so) */ | ||
852 | default: | ||
853 | break; | ||
854 | } | ||
855 | out: | ||
856 | q->first_to_check=f_mod_no; | ||
857 | |||
858 | #ifdef QDIO_USE_PROCESSING_STATE | ||
859 | if (last_position>=0) | ||
860 | set_slsb(&slsb[last_position],SLSB_P_INPUT_PROCESSING); | ||
861 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
862 | |||
863 | QDIO_DBF_HEX4(0,trace,&q->first_to_check,sizeof(int)); | ||
864 | |||
865 | return q->first_to_check; | ||
866 | } | ||
867 | |||
868 | inline static int | ||
869 | qdio_has_inbound_q_moved(struct qdio_q *q) | ||
870 | { | ||
871 | int i; | ||
872 | |||
873 | #ifdef QDIO_PERFORMANCE_STATS | ||
874 | static int old_pcis=0; | ||
875 | static int old_thinints=0; | ||
876 | |||
877 | if ((old_pcis==perf_stats.pcis)&&(old_thinints==perf_stats.thinints)) | ||
878 | perf_stats.start_time_inbound=NOW; | ||
879 | else | ||
880 | old_pcis=perf_stats.pcis; | ||
881 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
882 | |||
883 | i=qdio_get_inbound_buffer_frontier(q); | ||
884 | if ( (i!=GET_SAVED_FRONTIER(q)) || | ||
885 | (q->error_status_flags&QDIO_STATUS_LOOK_FOR_ERROR) ) { | ||
886 | SAVE_FRONTIER(q,i); | ||
887 | if ((!q->siga_sync)&&(!q->hydra_gives_outbound_pcis)) | ||
888 | SAVE_TIMESTAMP(q); | ||
889 | |||
890 | QDIO_DBF_TEXT4(0,trace,"inhasmvd"); | ||
891 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
892 | return 1; | ||
893 | } else { | ||
894 | QDIO_DBF_TEXT4(0,trace,"inhsntmv"); | ||
895 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
896 | return 0; | ||
897 | } | ||
898 | } | ||
899 | |||
900 | /* means, no more buffers to be filled */ | ||
901 | inline static int | ||
902 | tiqdio_is_inbound_q_done(struct qdio_q *q) | ||
903 | { | ||
904 | int no_used; | ||
905 | #ifdef CONFIG_QDIO_DEBUG | ||
906 | char dbf_text[15]; | ||
907 | #endif | ||
908 | |||
909 | no_used=atomic_read(&q->number_of_buffers_used); | ||
910 | |||
911 | /* propagate the change from 82 to 80 through VM */ | ||
912 | SYNC_MEMORY; | ||
913 | |||
914 | #ifdef CONFIG_QDIO_DEBUG | ||
915 | if (no_used) { | ||
916 | sprintf(dbf_text,"iqisnt%02x",no_used); | ||
917 | QDIO_DBF_TEXT4(0,trace,dbf_text); | ||
918 | } else { | ||
919 | QDIO_DBF_TEXT4(0,trace,"iniqisdo"); | ||
920 | } | ||
921 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
922 | #endif /* CONFIG_QDIO_DEBUG */ | ||
923 | |||
924 | if (!no_used) | ||
925 | return 1; | ||
926 | |||
927 | if (!q->siga_sync) | ||
928 | /* we'll check for more primed buffers in qeth_stop_polling */ | ||
929 | return 0; | ||
930 | |||
931 | if (q->slsb.acc.val[q->first_to_check]!=SLSB_P_INPUT_PRIMED) | ||
932 | /* | ||
933 | * nothing more to do, if next buffer is not PRIMED. | ||
934 | * note that we did a SYNC_MEMORY before, that there | ||
935 | * has been a sychnronization. | ||
936 | * we will return 0 below, as there is nothing to do | ||
937 | * (stop_polling not necessary, as we have not been | ||
938 | * using the PROCESSING state | ||
939 | */ | ||
940 | return 0; | ||
941 | |||
942 | /* | ||
943 | * ok, the next input buffer is primed. that means, that device state | ||
944 | * change indicator and adapter local summary are set, so we will find | ||
945 | * it next time. | ||
946 | * we will return 0 below, as there is nothing to do, except scheduling | ||
947 | * ourselves for the next time. | ||
948 | */ | ||
949 | tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); | ||
950 | tiqdio_sched_tl(); | ||
951 | return 0; | ||
952 | } | ||
953 | |||
954 | inline static int | ||
955 | qdio_is_inbound_q_done(struct qdio_q *q) | ||
956 | { | ||
957 | int no_used; | ||
958 | #ifdef CONFIG_QDIO_DEBUG | ||
959 | char dbf_text[15]; | ||
960 | #endif | ||
961 | |||
962 | no_used=atomic_read(&q->number_of_buffers_used); | ||
963 | |||
964 | /* | ||
965 | * we need that one for synchronization with the adapter, as it | ||
966 | * does a kind of PCI avoidance | ||
967 | */ | ||
968 | SYNC_MEMORY; | ||
969 | |||
970 | if (!no_used) { | ||
971 | QDIO_DBF_TEXT4(0,trace,"inqisdnA"); | ||
972 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
973 | QDIO_DBF_TEXT4(0,trace,dbf_text); | ||
974 | return 1; | ||
975 | } | ||
976 | |||
977 | if (q->slsb.acc.val[q->first_to_check]==SLSB_P_INPUT_PRIMED) { | ||
978 | /* we got something to do */ | ||
979 | QDIO_DBF_TEXT4(0,trace,"inqisntA"); | ||
980 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
981 | return 0; | ||
982 | } | ||
983 | |||
984 | /* on VM, we don't poll, so the q is always done here */ | ||
985 | if (q->siga_sync) | ||
986 | return 1; | ||
987 | if (q->hydra_gives_outbound_pcis) | ||
988 | return 1; | ||
989 | |||
990 | /* | ||
991 | * at this point we know, that inbound first_to_check | ||
992 | * has (probably) not moved (see qdio_inbound_processing) | ||
993 | */ | ||
994 | if (NOW>GET_SAVED_TIMESTAMP(q)+q->timing.threshold) { | ||
995 | #ifdef CONFIG_QDIO_DEBUG | ||
996 | QDIO_DBF_TEXT4(0,trace,"inqisdon"); | ||
997 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
998 | sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); | ||
999 | QDIO_DBF_TEXT4(0,trace,dbf_text); | ||
1000 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1001 | return 1; | ||
1002 | } else { | ||
1003 | #ifdef CONFIG_QDIO_DEBUG | ||
1004 | QDIO_DBF_TEXT4(0,trace,"inqisntd"); | ||
1005 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
1006 | sprintf(dbf_text,"pf%02xcn%02x",q->first_to_check,no_used); | ||
1007 | QDIO_DBF_TEXT4(0,trace,dbf_text); | ||
1008 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1009 | return 0; | ||
1010 | } | ||
1011 | } | ||
1012 | |||
1013 | inline static void | ||
1014 | qdio_kick_inbound_handler(struct qdio_q *q) | ||
1015 | { | ||
1016 | int count, start, end, real_end, i; | ||
1017 | #ifdef CONFIG_QDIO_DEBUG | ||
1018 | char dbf_text[15]; | ||
1019 | #endif | ||
1020 | |||
1021 | QDIO_DBF_TEXT4(0,trace,"kickinh"); | ||
1022 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
1023 | |||
1024 | start=q->first_element_to_kick; | ||
1025 | real_end=q->first_to_check; | ||
1026 | end=(real_end+QDIO_MAX_BUFFERS_PER_Q-1)&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
1027 | |||
1028 | i=start; | ||
1029 | count=0; | ||
1030 | while (1) { | ||
1031 | count++; | ||
1032 | if (i==end) | ||
1033 | break; | ||
1034 | i=(i+1)&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
1035 | } | ||
1036 | |||
1037 | #ifdef CONFIG_QDIO_DEBUG | ||
1038 | sprintf(dbf_text,"s=%2xc=%2x",start,count); | ||
1039 | QDIO_DBF_TEXT4(0,trace,dbf_text); | ||
1040 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1041 | |||
1042 | if (likely(q->state==QDIO_IRQ_STATE_ACTIVE)) | ||
1043 | q->handler(q->cdev, | ||
1044 | QDIO_STATUS_INBOUND_INT|q->error_status_flags, | ||
1045 | q->qdio_error,q->siga_error,q->q_no,start,count, | ||
1046 | q->int_parm); | ||
1047 | |||
1048 | /* for the next time: */ | ||
1049 | q->first_element_to_kick=real_end; | ||
1050 | q->qdio_error=0; | ||
1051 | q->siga_error=0; | ||
1052 | q->error_status_flags=0; | ||
1053 | |||
1054 | #ifdef QDIO_PERFORMANCE_STATS | ||
1055 | perf_stats.inbound_time+=NOW-perf_stats.start_time_inbound; | ||
1056 | perf_stats.inbound_cnt++; | ||
1057 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1058 | } | ||
1059 | |||
1060 | static inline void | ||
1061 | __tiqdio_inbound_processing(struct qdio_q *q, int spare_ind_was_set) | ||
1062 | { | ||
1063 | struct qdio_irq *irq_ptr; | ||
1064 | struct qdio_q *oq; | ||
1065 | int i; | ||
1066 | |||
1067 | QDIO_DBF_TEXT4(0,trace,"iqinproc"); | ||
1068 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
1069 | |||
1070 | /* | ||
1071 | * we first want to reserve the q, so that we know, that we don't | ||
1072 | * interrupt ourselves and call qdio_unmark_q, as is_in_shutdown might | ||
1073 | * be set | ||
1074 | */ | ||
1075 | if (unlikely(qdio_reserve_q(q))) { | ||
1076 | qdio_release_q(q); | ||
1077 | #ifdef QDIO_PERFORMANCE_STATS | ||
1078 | ii_p_c++; | ||
1079 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1080 | /* | ||
1081 | * as we might just be about to stop polling, we make | ||
1082 | * sure that we check again at least once more | ||
1083 | */ | ||
1084 | tiqdio_sched_tl(); | ||
1085 | return; | ||
1086 | } | ||
1087 | #ifdef QDIO_PERFORMANCE_STATS | ||
1088 | ii_p_nc++; | ||
1089 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1090 | if (unlikely(atomic_read(&q->is_in_shutdown))) { | ||
1091 | qdio_unmark_q(q); | ||
1092 | goto out; | ||
1093 | } | ||
1094 | |||
1095 | /* | ||
1096 | * we reset spare_ind_was_set, when the queue does not use the | ||
1097 | * spare indicator | ||
1098 | */ | ||
1099 | if (spare_ind_was_set) | ||
1100 | spare_ind_was_set = (q->dev_st_chg_ind == &spare_indicator); | ||
1101 | |||
1102 | if (!(*(q->dev_st_chg_ind)) && !spare_ind_was_set) | ||
1103 | goto out; | ||
1104 | /* | ||
1105 | * q->dev_st_chg_ind is the indicator, be it shared or not. | ||
1106 | * only clear it, if indicator is non-shared | ||
1107 | */ | ||
1108 | if (!spare_ind_was_set) | ||
1109 | tiqdio_clear_summary_bit((__u32*)q->dev_st_chg_ind); | ||
1110 | |||
1111 | if (q->hydra_gives_outbound_pcis) { | ||
1112 | if (!q->siga_sync_done_on_thinints) { | ||
1113 | SYNC_MEMORY_ALL; | ||
1114 | } else if ((!q->siga_sync_done_on_outb_tis)&& | ||
1115 | (q->hydra_gives_outbound_pcis)) { | ||
1116 | SYNC_MEMORY_ALL_OUTB; | ||
1117 | } | ||
1118 | } else { | ||
1119 | SYNC_MEMORY; | ||
1120 | } | ||
1121 | /* | ||
1122 | * maybe we have to do work on our outbound queues... at least | ||
1123 | * we have to check the outbound-int-capable thinint-capable | ||
1124 | * queues | ||
1125 | */ | ||
1126 | if (q->hydra_gives_outbound_pcis) { | ||
1127 | irq_ptr = (struct qdio_irq*)q->irq_ptr; | ||
1128 | for (i=0;i<irq_ptr->no_output_qs;i++) { | ||
1129 | oq = irq_ptr->output_qs[i]; | ||
1130 | #ifdef QDIO_PERFORMANCE_STATS | ||
1131 | perf_stats.tl_runs--; | ||
1132 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1133 | if (!qdio_is_outbound_q_done(oq)) | ||
1134 | __qdio_outbound_processing(oq); | ||
1135 | } | ||
1136 | } | ||
1137 | |||
1138 | if (!qdio_has_inbound_q_moved(q)) | ||
1139 | goto out; | ||
1140 | |||
1141 | qdio_kick_inbound_handler(q); | ||
1142 | if (tiqdio_is_inbound_q_done(q)) | ||
1143 | if (!qdio_stop_polling(q)) { | ||
1144 | /* | ||
1145 | * we set the flags to get into the stuff next time, | ||
1146 | * see also comment in qdio_stop_polling | ||
1147 | */ | ||
1148 | tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); | ||
1149 | tiqdio_sched_tl(); | ||
1150 | } | ||
1151 | out: | ||
1152 | qdio_release_q(q); | ||
1153 | } | ||
1154 | |||
1155 | static void | ||
1156 | tiqdio_inbound_processing(struct qdio_q *q) | ||
1157 | { | ||
1158 | __tiqdio_inbound_processing(q, atomic_read(&spare_indicator_usecount)); | ||
1159 | } | ||
1160 | |||
1161 | static inline void | ||
1162 | __qdio_inbound_processing(struct qdio_q *q) | ||
1163 | { | ||
1164 | int q_laps=0; | ||
1165 | |||
1166 | QDIO_DBF_TEXT4(0,trace,"qinproc"); | ||
1167 | QDIO_DBF_HEX4(0,trace,&q,sizeof(void*)); | ||
1168 | |||
1169 | if (unlikely(qdio_reserve_q(q))) { | ||
1170 | qdio_release_q(q); | ||
1171 | #ifdef QDIO_PERFORMANCE_STATS | ||
1172 | i_p_c++; | ||
1173 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1174 | /* as we're sissies, we'll check next time */ | ||
1175 | if (likely(!atomic_read(&q->is_in_shutdown))) { | ||
1176 | qdio_mark_q(q); | ||
1177 | QDIO_DBF_TEXT4(0,trace,"busy,agn"); | ||
1178 | } | ||
1179 | return; | ||
1180 | } | ||
1181 | #ifdef QDIO_PERFORMANCE_STATS | ||
1182 | i_p_nc++; | ||
1183 | perf_stats.tl_runs++; | ||
1184 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1185 | |||
1186 | again: | ||
1187 | if (qdio_has_inbound_q_moved(q)) { | ||
1188 | qdio_kick_inbound_handler(q); | ||
1189 | if (!qdio_stop_polling(q)) { | ||
1190 | q_laps++; | ||
1191 | if (q_laps<QDIO_Q_LAPS) | ||
1192 | goto again; | ||
1193 | } | ||
1194 | qdio_mark_q(q); | ||
1195 | } else { | ||
1196 | if (!qdio_is_inbound_q_done(q)) | ||
1197 | /* means poll time is not yet over */ | ||
1198 | qdio_mark_q(q); | ||
1199 | } | ||
1200 | |||
1201 | qdio_release_q(q); | ||
1202 | } | ||
1203 | |||
1204 | static void | ||
1205 | qdio_inbound_processing(struct qdio_q *q) | ||
1206 | { | ||
1207 | __qdio_inbound_processing(q); | ||
1208 | } | ||
1209 | |||
1210 | /************************* MAIN ROUTINES *******************************/ | ||
1211 | |||
1212 | #ifdef QDIO_USE_PROCESSING_STATE | ||
1213 | static inline int | ||
1214 | tiqdio_reset_processing_state(struct qdio_q *q, int q_laps) | ||
1215 | { | ||
1216 | if (!q) { | ||
1217 | tiqdio_sched_tl(); | ||
1218 | return 0; | ||
1219 | } | ||
1220 | |||
1221 | /* | ||
1222 | * under VM, we have not used the PROCESSING state, so no | ||
1223 | * need to stop polling | ||
1224 | */ | ||
1225 | if (q->siga_sync) | ||
1226 | return 2; | ||
1227 | |||
1228 | if (unlikely(qdio_reserve_q(q))) { | ||
1229 | qdio_release_q(q); | ||
1230 | #ifdef QDIO_PERFORMANCE_STATS | ||
1231 | ii_p_c++; | ||
1232 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1233 | /* | ||
1234 | * as we might just be about to stop polling, we make | ||
1235 | * sure that we check again at least once more | ||
1236 | */ | ||
1237 | |||
1238 | /* | ||
1239 | * sanity -- we'd get here without setting the | ||
1240 | * dev st chg ind | ||
1241 | */ | ||
1242 | tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); | ||
1243 | tiqdio_sched_tl(); | ||
1244 | return 0; | ||
1245 | } | ||
1246 | if (qdio_stop_polling(q)) { | ||
1247 | qdio_release_q(q); | ||
1248 | return 2; | ||
1249 | } | ||
1250 | if (q_laps<QDIO_Q_LAPS-1) { | ||
1251 | qdio_release_q(q); | ||
1252 | return 3; | ||
1253 | } | ||
1254 | /* | ||
1255 | * we set the flags to get into the stuff | ||
1256 | * next time, see also comment in qdio_stop_polling | ||
1257 | */ | ||
1258 | tiqdio_set_summary_bit((__u32*)q->dev_st_chg_ind); | ||
1259 | tiqdio_sched_tl(); | ||
1260 | qdio_release_q(q); | ||
1261 | return 1; | ||
1262 | |||
1263 | } | ||
1264 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
1265 | |||
1266 | static inline void | ||
1267 | tiqdio_inbound_checks(void) | ||
1268 | { | ||
1269 | struct qdio_q *q; | ||
1270 | int spare_ind_was_set=0; | ||
1271 | #ifdef QDIO_USE_PROCESSING_STATE | ||
1272 | int q_laps=0; | ||
1273 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
1274 | |||
1275 | QDIO_DBF_TEXT4(0,trace,"iqdinbck"); | ||
1276 | QDIO_DBF_TEXT5(0,trace,"iqlocsum"); | ||
1277 | |||
1278 | #ifdef QDIO_USE_PROCESSING_STATE | ||
1279 | again: | ||
1280 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
1281 | |||
1282 | /* when the spare indicator is used and set, save that and clear it */ | ||
1283 | if ((atomic_read(&spare_indicator_usecount)) && spare_indicator) { | ||
1284 | spare_ind_was_set = 1; | ||
1285 | tiqdio_clear_summary_bit((__u32*)&spare_indicator); | ||
1286 | } | ||
1287 | |||
1288 | q=(struct qdio_q*)tiq_list; | ||
1289 | do { | ||
1290 | if (!q) | ||
1291 | break; | ||
1292 | __tiqdio_inbound_processing(q, spare_ind_was_set); | ||
1293 | q=(struct qdio_q*)q->list_next; | ||
1294 | } while (q!=(struct qdio_q*)tiq_list); | ||
1295 | |||
1296 | #ifdef QDIO_USE_PROCESSING_STATE | ||
1297 | q=(struct qdio_q*)tiq_list; | ||
1298 | do { | ||
1299 | int ret; | ||
1300 | |||
1301 | ret = tiqdio_reset_processing_state(q, q_laps); | ||
1302 | switch (ret) { | ||
1303 | case 0: | ||
1304 | return; | ||
1305 | case 1: | ||
1306 | q_laps++; | ||
1307 | case 2: | ||
1308 | q = (struct qdio_q*)q->list_next; | ||
1309 | break; | ||
1310 | default: | ||
1311 | q_laps++; | ||
1312 | goto again; | ||
1313 | } | ||
1314 | } while (q!=(struct qdio_q*)tiq_list); | ||
1315 | #endif /* QDIO_USE_PROCESSING_STATE */ | ||
1316 | } | ||
1317 | |||
1318 | static void | ||
1319 | tiqdio_tl(unsigned long data) | ||
1320 | { | ||
1321 | QDIO_DBF_TEXT4(0,trace,"iqdio_tl"); | ||
1322 | |||
1323 | #ifdef QDIO_PERFORMANCE_STATS | ||
1324 | perf_stats.tl_runs++; | ||
1325 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1326 | |||
1327 | tiqdio_inbound_checks(); | ||
1328 | } | ||
1329 | |||
1330 | /********************* GENERAL HELPER_ROUTINES ***********************/ | ||
1331 | |||
1332 | static void | ||
1333 | qdio_release_irq_memory(struct qdio_irq *irq_ptr) | ||
1334 | { | ||
1335 | int i; | ||
1336 | |||
1337 | for (i=0;i<QDIO_MAX_QUEUES_PER_IRQ;i++) { | ||
1338 | if (!irq_ptr->input_qs[i]) | ||
1339 | goto next; | ||
1340 | |||
1341 | if (irq_ptr->input_qs[i]->slib) | ||
1342 | kfree(irq_ptr->input_qs[i]->slib); | ||
1343 | kfree(irq_ptr->input_qs[i]); | ||
1344 | |||
1345 | next: | ||
1346 | if (!irq_ptr->output_qs[i]) | ||
1347 | continue; | ||
1348 | |||
1349 | if (irq_ptr->output_qs[i]->slib) | ||
1350 | kfree(irq_ptr->output_qs[i]->slib); | ||
1351 | kfree(irq_ptr->output_qs[i]); | ||
1352 | |||
1353 | } | ||
1354 | kfree(irq_ptr->qdr); | ||
1355 | kfree(irq_ptr); | ||
1356 | } | ||
1357 | |||
1358 | static void | ||
1359 | qdio_set_impl_params(struct qdio_irq *irq_ptr, | ||
1360 | unsigned int qib_param_field_format, | ||
1361 | /* pointer to 128 bytes or NULL, if no param field */ | ||
1362 | unsigned char *qib_param_field, | ||
1363 | /* pointer to no_queues*128 words of data or NULL */ | ||
1364 | unsigned int no_input_qs, | ||
1365 | unsigned int no_output_qs, | ||
1366 | unsigned long *input_slib_elements, | ||
1367 | unsigned long *output_slib_elements) | ||
1368 | { | ||
1369 | int i,j; | ||
1370 | |||
1371 | if (!irq_ptr) | ||
1372 | return; | ||
1373 | |||
1374 | irq_ptr->qib.pfmt=qib_param_field_format; | ||
1375 | if (qib_param_field) | ||
1376 | memcpy(irq_ptr->qib.parm,qib_param_field, | ||
1377 | QDIO_MAX_BUFFERS_PER_Q); | ||
1378 | |||
1379 | if (input_slib_elements) | ||
1380 | for (i=0;i<no_input_qs;i++) { | ||
1381 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) | ||
1382 | irq_ptr->input_qs[i]->slib->slibe[j].parms= | ||
1383 | input_slib_elements[ | ||
1384 | i*QDIO_MAX_BUFFERS_PER_Q+j]; | ||
1385 | } | ||
1386 | if (output_slib_elements) | ||
1387 | for (i=0;i<no_output_qs;i++) { | ||
1388 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) | ||
1389 | irq_ptr->output_qs[i]->slib->slibe[j].parms= | ||
1390 | output_slib_elements[ | ||
1391 | i*QDIO_MAX_BUFFERS_PER_Q+j]; | ||
1392 | } | ||
1393 | } | ||
1394 | |||
1395 | static int | ||
1396 | qdio_alloc_qs(struct qdio_irq *irq_ptr, | ||
1397 | int no_input_qs, int no_output_qs) | ||
1398 | { | ||
1399 | int i; | ||
1400 | struct qdio_q *q; | ||
1401 | int result=-ENOMEM; | ||
1402 | |||
1403 | for (i=0;i<no_input_qs;i++) { | ||
1404 | q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); | ||
1405 | |||
1406 | if (!q) { | ||
1407 | QDIO_PRINT_ERR("kmalloc of q failed!\n"); | ||
1408 | goto out; | ||
1409 | } | ||
1410 | |||
1411 | memset(q,0,sizeof(struct qdio_q)); | ||
1412 | |||
1413 | q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); | ||
1414 | if (!q->slib) { | ||
1415 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); | ||
1416 | goto out; | ||
1417 | } | ||
1418 | |||
1419 | irq_ptr->input_qs[i]=q; | ||
1420 | } | ||
1421 | |||
1422 | for (i=0;i<no_output_qs;i++) { | ||
1423 | q=kmalloc(sizeof(struct qdio_q),GFP_KERNEL); | ||
1424 | |||
1425 | if (!q) { | ||
1426 | goto out; | ||
1427 | } | ||
1428 | |||
1429 | memset(q,0,sizeof(struct qdio_q)); | ||
1430 | |||
1431 | q->slib=kmalloc(PAGE_SIZE,GFP_KERNEL); | ||
1432 | if (!q->slib) { | ||
1433 | QDIO_PRINT_ERR("kmalloc of slib failed!\n"); | ||
1434 | goto out; | ||
1435 | } | ||
1436 | |||
1437 | irq_ptr->output_qs[i]=q; | ||
1438 | } | ||
1439 | |||
1440 | result=0; | ||
1441 | out: | ||
1442 | return result; | ||
1443 | } | ||
1444 | |||
1445 | static void | ||
1446 | qdio_fill_qs(struct qdio_irq *irq_ptr, struct ccw_device *cdev, | ||
1447 | int no_input_qs, int no_output_qs, | ||
1448 | qdio_handler_t *input_handler, | ||
1449 | qdio_handler_t *output_handler, | ||
1450 | unsigned long int_parm,int q_format, | ||
1451 | unsigned long flags, | ||
1452 | void **inbound_sbals_array, | ||
1453 | void **outbound_sbals_array) | ||
1454 | { | ||
1455 | struct qdio_q *q; | ||
1456 | int i,j; | ||
1457 | char dbf_text[20]; /* see qdio_initialize */ | ||
1458 | void *ptr; | ||
1459 | int available; | ||
1460 | |||
1461 | sprintf(dbf_text,"qfqs%4x",cdev->private->irq); | ||
1462 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
1463 | for (i=0;i<no_input_qs;i++) { | ||
1464 | q=irq_ptr->input_qs[i]; | ||
1465 | |||
1466 | memset(q,0,((char*)&q->slib)-((char*)q)); | ||
1467 | sprintf(dbf_text,"in-q%4x",i); | ||
1468 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
1469 | QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); | ||
1470 | |||
1471 | memset(q->slib,0,PAGE_SIZE); | ||
1472 | q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); | ||
1473 | |||
1474 | available=0; | ||
1475 | |||
1476 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) | ||
1477 | q->sbal[j]=*(inbound_sbals_array++); | ||
1478 | |||
1479 | q->queue_type=q_format; | ||
1480 | q->int_parm=int_parm; | ||
1481 | q->irq=irq_ptr->irq; | ||
1482 | q->irq_ptr = irq_ptr; | ||
1483 | q->cdev = cdev; | ||
1484 | q->mask=1<<(31-i); | ||
1485 | q->q_no=i; | ||
1486 | q->is_input_q=1; | ||
1487 | q->first_to_check=0; | ||
1488 | q->last_move_ftc=0; | ||
1489 | q->handler=input_handler; | ||
1490 | q->dev_st_chg_ind=irq_ptr->dev_st_chg_ind; | ||
1491 | |||
1492 | q->tasklet.data=(unsigned long)q; | ||
1493 | /* q->is_thinint_q isn't valid at this time, but | ||
1494 | * irq_ptr->is_thinint_irq is */ | ||
1495 | q->tasklet.func=(void(*)(unsigned long)) | ||
1496 | ((irq_ptr->is_thinint_irq)?&tiqdio_inbound_processing: | ||
1497 | &qdio_inbound_processing); | ||
1498 | |||
1499 | /* actually this is not used for inbound queues. yet. */ | ||
1500 | atomic_set(&q->busy_siga_counter,0); | ||
1501 | q->timing.busy_start=0; | ||
1502 | |||
1503 | /* for (j=0;j<QDIO_STATS_NUMBER;j++) | ||
1504 | q->timing.last_transfer_times[j]=(qdio_get_micros()/ | ||
1505 | QDIO_STATS_NUMBER)*j; | ||
1506 | q->timing.last_transfer_index=QDIO_STATS_NUMBER-1; | ||
1507 | */ | ||
1508 | |||
1509 | /* fill in slib */ | ||
1510 | if (i>0) irq_ptr->input_qs[i-1]->slib->nsliba= | ||
1511 | (unsigned long)(q->slib); | ||
1512 | q->slib->sla=(unsigned long)(q->sl); | ||
1513 | q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); | ||
1514 | |||
1515 | /* fill in sl */ | ||
1516 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) | ||
1517 | q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); | ||
1518 | |||
1519 | QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); | ||
1520 | ptr=(void*)q->sl; | ||
1521 | QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); | ||
1522 | ptr=(void*)&q->slsb; | ||
1523 | QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); | ||
1524 | ptr=(void*)q->sbal[0]; | ||
1525 | QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); | ||
1526 | |||
1527 | /* fill in slsb */ | ||
1528 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { | ||
1529 | set_slsb(&q->slsb.acc.val[j], | ||
1530 | SLSB_P_INPUT_NOT_INIT); | ||
1531 | /* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ | ||
1532 | } | ||
1533 | } | ||
1534 | |||
1535 | for (i=0;i<no_output_qs;i++) { | ||
1536 | q=irq_ptr->output_qs[i]; | ||
1537 | memset(q,0,((char*)&q->slib)-((char*)q)); | ||
1538 | |||
1539 | sprintf(dbf_text,"outq%4x",i); | ||
1540 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
1541 | QDIO_DBF_HEX0(0,setup,&q,sizeof(void*)); | ||
1542 | |||
1543 | memset(q->slib,0,PAGE_SIZE); | ||
1544 | q->sl=(struct sl*)(((char*)q->slib)+PAGE_SIZE/2); | ||
1545 | |||
1546 | available=0; | ||
1547 | |||
1548 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) | ||
1549 | q->sbal[j]=*(outbound_sbals_array++); | ||
1550 | |||
1551 | q->queue_type=q_format; | ||
1552 | q->int_parm=int_parm; | ||
1553 | q->is_input_q=0; | ||
1554 | q->irq=irq_ptr->irq; | ||
1555 | q->cdev = cdev; | ||
1556 | q->irq_ptr = irq_ptr; | ||
1557 | q->mask=1<<(31-i); | ||
1558 | q->q_no=i; | ||
1559 | q->first_to_check=0; | ||
1560 | q->last_move_ftc=0; | ||
1561 | q->handler=output_handler; | ||
1562 | |||
1563 | q->tasklet.data=(unsigned long)q; | ||
1564 | q->tasklet.func=(void(*)(unsigned long)) | ||
1565 | &qdio_outbound_processing; | ||
1566 | |||
1567 | atomic_set(&q->busy_siga_counter,0); | ||
1568 | q->timing.busy_start=0; | ||
1569 | |||
1570 | /* fill in slib */ | ||
1571 | if (i>0) irq_ptr->output_qs[i-1]->slib->nsliba= | ||
1572 | (unsigned long)(q->slib); | ||
1573 | q->slib->sla=(unsigned long)(q->sl); | ||
1574 | q->slib->slsba=(unsigned long)(&q->slsb.acc.val[0]); | ||
1575 | |||
1576 | /* fill in sl */ | ||
1577 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) | ||
1578 | q->sl->element[j].sbal=(unsigned long)(q->sbal[j]); | ||
1579 | |||
1580 | QDIO_DBF_TEXT2(0,setup,"sl-sb-b0"); | ||
1581 | ptr=(void*)q->sl; | ||
1582 | QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); | ||
1583 | ptr=(void*)&q->slsb; | ||
1584 | QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); | ||
1585 | ptr=(void*)q->sbal[0]; | ||
1586 | QDIO_DBF_HEX2(0,setup,&ptr,sizeof(void*)); | ||
1587 | |||
1588 | /* fill in slsb */ | ||
1589 | for (j=0;j<QDIO_MAX_BUFFERS_PER_Q;j++) { | ||
1590 | set_slsb(&q->slsb.acc.val[j], | ||
1591 | SLSB_P_OUTPUT_NOT_INIT); | ||
1592 | /* q->sbal[j]->element[1].sbalf.i1.key=QDIO_STORAGE_KEY;*/ | ||
1593 | } | ||
1594 | } | ||
1595 | } | ||
1596 | |||
1597 | static void | ||
1598 | qdio_fill_thresholds(struct qdio_irq *irq_ptr, | ||
1599 | unsigned int no_input_qs, | ||
1600 | unsigned int no_output_qs, | ||
1601 | unsigned int min_input_threshold, | ||
1602 | unsigned int max_input_threshold, | ||
1603 | unsigned int min_output_threshold, | ||
1604 | unsigned int max_output_threshold) | ||
1605 | { | ||
1606 | int i; | ||
1607 | struct qdio_q *q; | ||
1608 | |||
1609 | for (i=0;i<no_input_qs;i++) { | ||
1610 | q=irq_ptr->input_qs[i]; | ||
1611 | q->timing.threshold=max_input_threshold; | ||
1612 | /* for (j=0;j<QDIO_STATS_CLASSES;j++) { | ||
1613 | q->threshold_classes[j].threshold= | ||
1614 | min_input_threshold+ | ||
1615 | (max_input_threshold-min_input_threshold)/ | ||
1616 | QDIO_STATS_CLASSES; | ||
1617 | } | ||
1618 | qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ | ||
1619 | } | ||
1620 | for (i=0;i<no_output_qs;i++) { | ||
1621 | q=irq_ptr->output_qs[i]; | ||
1622 | q->timing.threshold=max_output_threshold; | ||
1623 | /* for (j=0;j<QDIO_STATS_CLASSES;j++) { | ||
1624 | q->threshold_classes[j].threshold= | ||
1625 | min_output_threshold+ | ||
1626 | (max_output_threshold-min_output_threshold)/ | ||
1627 | QDIO_STATS_CLASSES; | ||
1628 | } | ||
1629 | qdio_use_thresholds(q,QDIO_STATS_CLASSES/2);*/ | ||
1630 | } | ||
1631 | } | ||
1632 | |||
1633 | static int | ||
1634 | tiqdio_thinint_handler(void) | ||
1635 | { | ||
1636 | QDIO_DBF_TEXT4(0,trace,"thin_int"); | ||
1637 | |||
1638 | #ifdef QDIO_PERFORMANCE_STATS | ||
1639 | perf_stats.thinints++; | ||
1640 | perf_stats.start_time_inbound=NOW; | ||
1641 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1642 | |||
1643 | /* SVS only when needed: | ||
1644 | * issue SVS to benefit from iqdio interrupt avoidance | ||
1645 | * (SVS clears AISOI)*/ | ||
1646 | if (!omit_svs) | ||
1647 | tiqdio_clear_global_summary(); | ||
1648 | |||
1649 | tiqdio_inbound_checks(); | ||
1650 | return 0; | ||
1651 | } | ||
1652 | |||
1653 | static void | ||
1654 | qdio_set_state(struct qdio_irq *irq_ptr, enum qdio_irq_states state) | ||
1655 | { | ||
1656 | int i; | ||
1657 | #ifdef CONFIG_QDIO_DEBUG | ||
1658 | char dbf_text[15]; | ||
1659 | |||
1660 | QDIO_DBF_TEXT5(0,trace,"newstate"); | ||
1661 | sprintf(dbf_text,"%4x%4x",irq_ptr->irq,state); | ||
1662 | QDIO_DBF_TEXT5(0,trace,dbf_text); | ||
1663 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1664 | |||
1665 | irq_ptr->state=state; | ||
1666 | for (i=0;i<irq_ptr->no_input_qs;i++) | ||
1667 | irq_ptr->input_qs[i]->state=state; | ||
1668 | for (i=0;i<irq_ptr->no_output_qs;i++) | ||
1669 | irq_ptr->output_qs[i]->state=state; | ||
1670 | mb(); | ||
1671 | } | ||
1672 | |||
1673 | static inline void | ||
1674 | qdio_irq_check_sense(int irq, struct irb *irb) | ||
1675 | { | ||
1676 | char dbf_text[15]; | ||
1677 | |||
1678 | if (irb->esw.esw0.erw.cons) { | ||
1679 | sprintf(dbf_text,"sens%4x",irq); | ||
1680 | QDIO_DBF_TEXT2(1,trace,dbf_text); | ||
1681 | QDIO_DBF_HEX0(0,sense,irb,QDIO_DBF_SENSE_LEN); | ||
1682 | |||
1683 | QDIO_PRINT_WARN("sense data available on qdio channel.\n"); | ||
1684 | HEXDUMP16(WARN,"irb: ",irb); | ||
1685 | HEXDUMP16(WARN,"sense data: ",irb->ecw); | ||
1686 | } | ||
1687 | |||
1688 | } | ||
1689 | |||
1690 | static inline void | ||
1691 | qdio_handle_pci(struct qdio_irq *irq_ptr) | ||
1692 | { | ||
1693 | int i; | ||
1694 | struct qdio_q *q; | ||
1695 | |||
1696 | #ifdef QDIO_PERFORMANCE_STATS | ||
1697 | perf_stats.pcis++; | ||
1698 | perf_stats.start_time_inbound=NOW; | ||
1699 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1700 | for (i=0;i<irq_ptr->no_input_qs;i++) { | ||
1701 | q=irq_ptr->input_qs[i]; | ||
1702 | if (q->is_input_q&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) | ||
1703 | qdio_mark_q(q); | ||
1704 | else { | ||
1705 | #ifdef QDIO_PERFORMANCE_STATS | ||
1706 | perf_stats.tl_runs--; | ||
1707 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1708 | __qdio_inbound_processing(q); | ||
1709 | } | ||
1710 | } | ||
1711 | if (!irq_ptr->hydra_gives_outbound_pcis) | ||
1712 | return; | ||
1713 | for (i=0;i<irq_ptr->no_output_qs;i++) { | ||
1714 | q=irq_ptr->output_qs[i]; | ||
1715 | #ifdef QDIO_PERFORMANCE_STATS | ||
1716 | perf_stats.tl_runs--; | ||
1717 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
1718 | if (qdio_is_outbound_q_done(q)) | ||
1719 | continue; | ||
1720 | if (!irq_ptr->sync_done_on_outb_pcis) | ||
1721 | SYNC_MEMORY; | ||
1722 | __qdio_outbound_processing(q); | ||
1723 | } | ||
1724 | } | ||
1725 | |||
1726 | static void qdio_establish_handle_irq(struct ccw_device*, int, int); | ||
1727 | |||
1728 | static inline void | ||
1729 | qdio_handle_activate_check(struct ccw_device *cdev, unsigned long intparm, | ||
1730 | int cstat, int dstat) | ||
1731 | { | ||
1732 | struct qdio_irq *irq_ptr; | ||
1733 | struct qdio_q *q; | ||
1734 | char dbf_text[15]; | ||
1735 | |||
1736 | irq_ptr = cdev->private->qdio_data; | ||
1737 | |||
1738 | QDIO_DBF_TEXT2(1, trace, "ick2"); | ||
1739 | sprintf(dbf_text,"%s", cdev->dev.bus_id); | ||
1740 | QDIO_DBF_TEXT2(1,trace,dbf_text); | ||
1741 | QDIO_DBF_HEX2(0,trace,&intparm,sizeof(int)); | ||
1742 | QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); | ||
1743 | QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); | ||
1744 | QDIO_PRINT_ERR("received check condition on activate " \ | ||
1745 | "queues on device %s (cs=x%x, ds=x%x).\n", | ||
1746 | cdev->dev.bus_id, cstat, dstat); | ||
1747 | if (irq_ptr->no_input_qs) { | ||
1748 | q=irq_ptr->input_qs[0]; | ||
1749 | } else if (irq_ptr->no_output_qs) { | ||
1750 | q=irq_ptr->output_qs[0]; | ||
1751 | } else { | ||
1752 | QDIO_PRINT_ERR("oops... no queue registered for device %s!?\n", | ||
1753 | cdev->dev.bus_id); | ||
1754 | goto omit_handler_call; | ||
1755 | } | ||
1756 | q->handler(q->cdev,QDIO_STATUS_ACTIVATE_CHECK_CONDITION| | ||
1757 | QDIO_STATUS_LOOK_FOR_ERROR, | ||
1758 | 0,0,0,-1,-1,q->int_parm); | ||
1759 | omit_handler_call: | ||
1760 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_STOPPED); | ||
1761 | |||
1762 | } | ||
1763 | |||
1764 | static void | ||
1765 | qdio_call_shutdown(void *data) | ||
1766 | { | ||
1767 | struct ccw_device *cdev; | ||
1768 | |||
1769 | cdev = (struct ccw_device *)data; | ||
1770 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
1771 | put_device(&cdev->dev); | ||
1772 | } | ||
1773 | |||
1774 | static void | ||
1775 | qdio_timeout_handler(struct ccw_device *cdev) | ||
1776 | { | ||
1777 | struct qdio_irq *irq_ptr; | ||
1778 | char dbf_text[15]; | ||
1779 | |||
1780 | QDIO_DBF_TEXT2(0, trace, "qtoh"); | ||
1781 | sprintf(dbf_text, "%s", cdev->dev.bus_id); | ||
1782 | QDIO_DBF_TEXT2(0, trace, dbf_text); | ||
1783 | |||
1784 | irq_ptr = cdev->private->qdio_data; | ||
1785 | sprintf(dbf_text, "state:%d", irq_ptr->state); | ||
1786 | QDIO_DBF_TEXT2(0, trace, dbf_text); | ||
1787 | |||
1788 | switch (irq_ptr->state) { | ||
1789 | case QDIO_IRQ_STATE_INACTIVE: | ||
1790 | QDIO_PRINT_ERR("establish queues on irq %04x: timed out\n", | ||
1791 | irq_ptr->irq); | ||
1792 | QDIO_DBF_TEXT2(1,setup,"eq:timeo"); | ||
1793 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | ||
1794 | break; | ||
1795 | case QDIO_IRQ_STATE_CLEANUP: | ||
1796 | QDIO_PRINT_INFO("Did not get interrupt on cleanup, irq=0x%x.\n", | ||
1797 | irq_ptr->irq); | ||
1798 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | ||
1799 | break; | ||
1800 | case QDIO_IRQ_STATE_ESTABLISHED: | ||
1801 | case QDIO_IRQ_STATE_ACTIVE: | ||
1802 | /* I/O has been terminated by common I/O layer. */ | ||
1803 | QDIO_PRINT_INFO("Queues on irq %04x killed by cio.\n", | ||
1804 | irq_ptr->irq); | ||
1805 | QDIO_DBF_TEXT2(1, trace, "cio:term"); | ||
1806 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_STOPPED); | ||
1807 | if (get_device(&cdev->dev)) { | ||
1808 | /* Can't call shutdown from interrupt context. */ | ||
1809 | PREPARE_WORK(&cdev->private->kick_work, | ||
1810 | qdio_call_shutdown, (void *)cdev); | ||
1811 | queue_work(ccw_device_work, &cdev->private->kick_work); | ||
1812 | } | ||
1813 | break; | ||
1814 | default: | ||
1815 | BUG(); | ||
1816 | } | ||
1817 | ccw_device_set_timeout(cdev, 0); | ||
1818 | wake_up(&cdev->private->wait_q); | ||
1819 | } | ||
1820 | |||
1821 | static void | ||
1822 | qdio_handler(struct ccw_device *cdev, unsigned long intparm, struct irb *irb) | ||
1823 | { | ||
1824 | struct qdio_irq *irq_ptr; | ||
1825 | int cstat,dstat; | ||
1826 | char dbf_text[15]; | ||
1827 | |||
1828 | #ifdef CONFIG_QDIO_DEBUG | ||
1829 | QDIO_DBF_TEXT4(0, trace, "qint"); | ||
1830 | sprintf(dbf_text, "%s", cdev->dev.bus_id); | ||
1831 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
1832 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1833 | |||
1834 | if (!intparm) { | ||
1835 | QDIO_PRINT_ERR("got unsolicited interrupt in qdio " \ | ||
1836 | "handler, device %s\n", cdev->dev.bus_id); | ||
1837 | return; | ||
1838 | } | ||
1839 | |||
1840 | irq_ptr = cdev->private->qdio_data; | ||
1841 | if (!irq_ptr) { | ||
1842 | QDIO_DBF_TEXT2(1, trace, "uint"); | ||
1843 | sprintf(dbf_text,"%s", cdev->dev.bus_id); | ||
1844 | QDIO_DBF_TEXT2(1,trace,dbf_text); | ||
1845 | QDIO_PRINT_ERR("received interrupt on unused device %s!\n", | ||
1846 | cdev->dev.bus_id); | ||
1847 | return; | ||
1848 | } | ||
1849 | |||
1850 | if (IS_ERR(irb)) { | ||
1851 | /* Currently running i/o is in error. */ | ||
1852 | switch (PTR_ERR(irb)) { | ||
1853 | case -EIO: | ||
1854 | QDIO_PRINT_ERR("i/o error on device %s\n", | ||
1855 | cdev->dev.bus_id); | ||
1856 | return; | ||
1857 | case -ETIMEDOUT: | ||
1858 | qdio_timeout_handler(cdev); | ||
1859 | return; | ||
1860 | default: | ||
1861 | QDIO_PRINT_ERR("unknown error state %ld on device %s\n", | ||
1862 | PTR_ERR(irb), cdev->dev.bus_id); | ||
1863 | return; | ||
1864 | } | ||
1865 | } | ||
1866 | |||
1867 | qdio_irq_check_sense(irq_ptr->irq, irb); | ||
1868 | |||
1869 | #ifdef CONFIG_QDIO_DEBUG | ||
1870 | sprintf(dbf_text, "state:%d", irq_ptr->state); | ||
1871 | QDIO_DBF_TEXT4(0, trace, dbf_text); | ||
1872 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1873 | |||
1874 | cstat = irb->scsw.cstat; | ||
1875 | dstat = irb->scsw.dstat; | ||
1876 | |||
1877 | switch (irq_ptr->state) { | ||
1878 | case QDIO_IRQ_STATE_INACTIVE: | ||
1879 | qdio_establish_handle_irq(cdev, cstat, dstat); | ||
1880 | break; | ||
1881 | |||
1882 | case QDIO_IRQ_STATE_CLEANUP: | ||
1883 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | ||
1884 | break; | ||
1885 | |||
1886 | case QDIO_IRQ_STATE_ESTABLISHED: | ||
1887 | case QDIO_IRQ_STATE_ACTIVE: | ||
1888 | if (cstat & SCHN_STAT_PCI) { | ||
1889 | qdio_handle_pci(irq_ptr); | ||
1890 | break; | ||
1891 | } | ||
1892 | |||
1893 | if ((cstat&~SCHN_STAT_PCI)||dstat) { | ||
1894 | qdio_handle_activate_check(cdev, intparm, cstat, dstat); | ||
1895 | break; | ||
1896 | } | ||
1897 | default: | ||
1898 | QDIO_PRINT_ERR("got interrupt for queues in state %d on " \ | ||
1899 | "device %s?!\n", | ||
1900 | irq_ptr->state, cdev->dev.bus_id); | ||
1901 | } | ||
1902 | wake_up(&cdev->private->wait_q); | ||
1903 | |||
1904 | } | ||
1905 | |||
1906 | int | ||
1907 | qdio_synchronize(struct ccw_device *cdev, unsigned int flags, | ||
1908 | unsigned int queue_number) | ||
1909 | { | ||
1910 | int cc; | ||
1911 | struct qdio_q *q; | ||
1912 | struct qdio_irq *irq_ptr; | ||
1913 | void *ptr; | ||
1914 | #ifdef CONFIG_QDIO_DEBUG | ||
1915 | char dbf_text[15]="SyncXXXX"; | ||
1916 | #endif | ||
1917 | |||
1918 | irq_ptr = cdev->private->qdio_data; | ||
1919 | if (!irq_ptr) | ||
1920 | return -ENODEV; | ||
1921 | |||
1922 | #ifdef CONFIG_QDIO_DEBUG | ||
1923 | *((int*)(&dbf_text[4])) = irq_ptr->irq; | ||
1924 | QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); | ||
1925 | *((int*)(&dbf_text[0]))=flags; | ||
1926 | *((int*)(&dbf_text[4]))=queue_number; | ||
1927 | QDIO_DBF_HEX4(0,trace,dbf_text,QDIO_DBF_TRACE_LEN); | ||
1928 | #endif /* CONFIG_QDIO_DEBUG */ | ||
1929 | |||
1930 | if (flags&QDIO_FLAG_SYNC_INPUT) { | ||
1931 | q=irq_ptr->input_qs[queue_number]; | ||
1932 | if (!q) | ||
1933 | return -EINVAL; | ||
1934 | cc = do_siga_sync(q->irq, 0, q->mask); | ||
1935 | } else if (flags&QDIO_FLAG_SYNC_OUTPUT) { | ||
1936 | q=irq_ptr->output_qs[queue_number]; | ||
1937 | if (!q) | ||
1938 | return -EINVAL; | ||
1939 | cc = do_siga_sync(q->irq, q->mask, 0); | ||
1940 | } else | ||
1941 | return -EINVAL; | ||
1942 | |||
1943 | ptr=&cc; | ||
1944 | if (cc) | ||
1945 | QDIO_DBF_HEX3(0,trace,&ptr,sizeof(int)); | ||
1946 | |||
1947 | return cc; | ||
1948 | } | ||
1949 | |||
1950 | static unsigned char | ||
1951 | qdio_check_siga_needs(int sch) | ||
1952 | { | ||
1953 | int result; | ||
1954 | unsigned char qdioac; | ||
1955 | |||
1956 | struct { | ||
1957 | struct chsc_header request; | ||
1958 | u16 reserved1; | ||
1959 | u16 first_sch; | ||
1960 | u16 reserved2; | ||
1961 | u16 last_sch; | ||
1962 | u32 reserved3; | ||
1963 | struct chsc_header response; | ||
1964 | u32 reserved4; | ||
1965 | u8 flags; | ||
1966 | u8 reserved5; | ||
1967 | u16 sch; | ||
1968 | u8 qfmt; | ||
1969 | u8 reserved6; | ||
1970 | u8 qdioac; | ||
1971 | u8 sch_class; | ||
1972 | u8 reserved7; | ||
1973 | u8 icnt; | ||
1974 | u8 reserved8; | ||
1975 | u8 ocnt; | ||
1976 | } *ssqd_area; | ||
1977 | |||
1978 | ssqd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
1979 | if (!ssqd_area) { | ||
1980 | QDIO_PRINT_WARN("Could not get memory for chsc. Using all " \ | ||
1981 | "SIGAs for sch x%x.\n", sch); | ||
1982 | return CHSC_FLAG_SIGA_INPUT_NECESSARY || | ||
1983 | CHSC_FLAG_SIGA_OUTPUT_NECESSARY || | ||
1984 | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ | ||
1985 | } | ||
1986 | ssqd_area->request = (struct chsc_header) { | ||
1987 | .length = 0x0010, | ||
1988 | .code = 0x0024, | ||
1989 | }; | ||
1990 | |||
1991 | ssqd_area->first_sch = sch; | ||
1992 | ssqd_area->last_sch = sch; | ||
1993 | |||
1994 | result=chsc(ssqd_area); | ||
1995 | |||
1996 | if (result) { | ||
1997 | QDIO_PRINT_WARN("CHSC returned cc %i. Using all " \ | ||
1998 | "SIGAs for sch x%x.\n", | ||
1999 | result,sch); | ||
2000 | qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || | ||
2001 | CHSC_FLAG_SIGA_OUTPUT_NECESSARY || | ||
2002 | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ | ||
2003 | goto out; | ||
2004 | } | ||
2005 | |||
2006 | if (ssqd_area->response.code != QDIO_CHSC_RESPONSE_CODE_OK) { | ||
2007 | QDIO_PRINT_WARN("response upon checking SIGA needs " \ | ||
2008 | "is 0x%x. Using all SIGAs for sch x%x.\n", | ||
2009 | ssqd_area->response.code, sch); | ||
2010 | qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY || | ||
2011 | CHSC_FLAG_SIGA_OUTPUT_NECESSARY || | ||
2012 | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* all flags set */ | ||
2013 | goto out; | ||
2014 | } | ||
2015 | if (!(ssqd_area->flags & CHSC_FLAG_QDIO_CAPABILITY) || | ||
2016 | !(ssqd_area->flags & CHSC_FLAG_VALIDITY) || | ||
2017 | (ssqd_area->sch != sch)) { | ||
2018 | QDIO_PRINT_WARN("huh? problems checking out sch x%x... " \ | ||
2019 | "using all SIGAs.\n",sch); | ||
2020 | qdioac = CHSC_FLAG_SIGA_INPUT_NECESSARY | | ||
2021 | CHSC_FLAG_SIGA_OUTPUT_NECESSARY | | ||
2022 | CHSC_FLAG_SIGA_SYNC_NECESSARY; /* worst case */ | ||
2023 | goto out; | ||
2024 | } | ||
2025 | |||
2026 | qdioac = ssqd_area->qdioac; | ||
2027 | out: | ||
2028 | free_page ((unsigned long) ssqd_area); | ||
2029 | return qdioac; | ||
2030 | } | ||
2031 | |||
2032 | static unsigned int | ||
2033 | tiqdio_check_chsc_availability(void) | ||
2034 | { | ||
2035 | char dbf_text[15]; | ||
2036 | |||
2037 | if (!css_characteristics_avail) | ||
2038 | return -EIO; | ||
2039 | |||
2040 | /* Check for bit 41. */ | ||
2041 | if (!css_general_characteristics.aif) { | ||
2042 | QDIO_PRINT_WARN("Adapter interruption facility not " \ | ||
2043 | "installed.\n"); | ||
2044 | return -ENOENT; | ||
2045 | } | ||
2046 | |||
2047 | /* Check for bits 107 and 108. */ | ||
2048 | if (!css_chsc_characteristics.scssc || | ||
2049 | !css_chsc_characteristics.scsscf) { | ||
2050 | QDIO_PRINT_WARN("Set Chan Subsys. Char. & Fast-CHSCs " \ | ||
2051 | "not available.\n"); | ||
2052 | return -ENOENT; | ||
2053 | } | ||
2054 | |||
2055 | /* Check for OSA/FCP thin interrupts (bit 67). */ | ||
2056 | hydra_thinints = css_general_characteristics.aif_osa; | ||
2057 | sprintf(dbf_text,"hydrati%1x", hydra_thinints); | ||
2058 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2059 | |||
2060 | /* Check for aif time delay disablement fac (bit 56). If installed, | ||
2061 | * omit svs even under lpar (good point by rick again) */ | ||
2062 | omit_svs = css_general_characteristics.aif_tdd; | ||
2063 | sprintf(dbf_text,"omitsvs%1x", omit_svs); | ||
2064 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2065 | return 0; | ||
2066 | } | ||
2067 | |||
2068 | |||
2069 | static unsigned int | ||
2070 | tiqdio_set_subchannel_ind(struct qdio_irq *irq_ptr, int reset_to_zero) | ||
2071 | { | ||
2072 | unsigned long real_addr_local_summary_bit; | ||
2073 | unsigned long real_addr_dev_st_chg_ind; | ||
2074 | void *ptr; | ||
2075 | char dbf_text[15]; | ||
2076 | |||
2077 | unsigned int resp_code; | ||
2078 | int result; | ||
2079 | |||
2080 | struct { | ||
2081 | struct chsc_header request; | ||
2082 | u16 operation_code; | ||
2083 | u16 reserved1; | ||
2084 | u32 reserved2; | ||
2085 | u32 reserved3; | ||
2086 | u64 summary_indicator_addr; | ||
2087 | u64 subchannel_indicator_addr; | ||
2088 | u32 ks:4; | ||
2089 | u32 kc:4; | ||
2090 | u32 reserved4:21; | ||
2091 | u32 isc:3; | ||
2092 | u32 word_with_d_bit; | ||
2093 | /* set to 0x10000000 to enable | ||
2094 | * time delay disablement facility */ | ||
2095 | u32 reserved5; | ||
2096 | u32 subsystem_id; | ||
2097 | u32 reserved6[1004]; | ||
2098 | struct chsc_header response; | ||
2099 | u32 reserved7; | ||
2100 | } *scssc_area; | ||
2101 | |||
2102 | if (!irq_ptr->is_thinint_irq) | ||
2103 | return -ENODEV; | ||
2104 | |||
2105 | if (reset_to_zero) { | ||
2106 | real_addr_local_summary_bit=0; | ||
2107 | real_addr_dev_st_chg_ind=0; | ||
2108 | } else { | ||
2109 | real_addr_local_summary_bit= | ||
2110 | virt_to_phys((volatile void *)indicators); | ||
2111 | real_addr_dev_st_chg_ind= | ||
2112 | virt_to_phys((volatile void *)irq_ptr->dev_st_chg_ind); | ||
2113 | } | ||
2114 | |||
2115 | scssc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
2116 | if (!scssc_area) { | ||
2117 | QDIO_PRINT_WARN("No memory for setting indicators on " \ | ||
2118 | "subchannel x%x.\n", irq_ptr->irq); | ||
2119 | return -ENOMEM; | ||
2120 | } | ||
2121 | scssc_area->request = (struct chsc_header) { | ||
2122 | .length = 0x0fe0, | ||
2123 | .code = 0x0021, | ||
2124 | }; | ||
2125 | scssc_area->operation_code = 0; | ||
2126 | |||
2127 | scssc_area->summary_indicator_addr = real_addr_local_summary_bit; | ||
2128 | scssc_area->subchannel_indicator_addr = real_addr_dev_st_chg_ind; | ||
2129 | scssc_area->ks = QDIO_STORAGE_KEY; | ||
2130 | scssc_area->kc = QDIO_STORAGE_KEY; | ||
2131 | scssc_area->isc = TIQDIO_THININT_ISC; | ||
2132 | scssc_area->subsystem_id = (1<<16) + irq_ptr->irq; | ||
2133 | /* enables the time delay disablement facility. Don't care | ||
2134 | * whether it is really there (i.e. we haven't checked for | ||
2135 | * it) */ | ||
2136 | if (css_general_characteristics.aif_tdd) | ||
2137 | scssc_area->word_with_d_bit = 0x10000000; | ||
2138 | else | ||
2139 | QDIO_PRINT_WARN("Time delay disablement facility " \ | ||
2140 | "not available\n"); | ||
2141 | |||
2142 | |||
2143 | |||
2144 | result = chsc(scssc_area); | ||
2145 | if (result) { | ||
2146 | QDIO_PRINT_WARN("could not set indicators on irq x%x, " \ | ||
2147 | "cc=%i.\n",irq_ptr->irq,result); | ||
2148 | result = -EIO; | ||
2149 | goto out; | ||
2150 | } | ||
2151 | |||
2152 | resp_code = scssc_area->response.code; | ||
2153 | if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { | ||
2154 | QDIO_PRINT_WARN("response upon setting indicators " \ | ||
2155 | "is 0x%x.\n",resp_code); | ||
2156 | sprintf(dbf_text,"sidR%4x",resp_code); | ||
2157 | QDIO_DBF_TEXT1(0,trace,dbf_text); | ||
2158 | QDIO_DBF_TEXT1(0,setup,dbf_text); | ||
2159 | ptr=&scssc_area->response; | ||
2160 | QDIO_DBF_HEX2(1,setup,&ptr,QDIO_DBF_SETUP_LEN); | ||
2161 | result = -EIO; | ||
2162 | goto out; | ||
2163 | } | ||
2164 | |||
2165 | QDIO_DBF_TEXT2(0,setup,"setscind"); | ||
2166 | QDIO_DBF_HEX2(0,setup,&real_addr_local_summary_bit, | ||
2167 | sizeof(unsigned long)); | ||
2168 | QDIO_DBF_HEX2(0,setup,&real_addr_dev_st_chg_ind,sizeof(unsigned long)); | ||
2169 | result = 0; | ||
2170 | out: | ||
2171 | free_page ((unsigned long) scssc_area); | ||
2172 | return result; | ||
2173 | |||
2174 | } | ||
2175 | |||
2176 | static unsigned int | ||
2177 | tiqdio_set_delay_target(struct qdio_irq *irq_ptr, unsigned long delay_target) | ||
2178 | { | ||
2179 | unsigned int resp_code; | ||
2180 | int result; | ||
2181 | void *ptr; | ||
2182 | char dbf_text[15]; | ||
2183 | |||
2184 | struct { | ||
2185 | struct chsc_header request; | ||
2186 | u16 operation_code; | ||
2187 | u16 reserved1; | ||
2188 | u32 reserved2; | ||
2189 | u32 reserved3; | ||
2190 | u32 reserved4[2]; | ||
2191 | u32 delay_target; | ||
2192 | u32 reserved5[1009]; | ||
2193 | struct chsc_header response; | ||
2194 | u32 reserved6; | ||
2195 | } *scsscf_area; | ||
2196 | |||
2197 | if (!irq_ptr->is_thinint_irq) | ||
2198 | return -ENODEV; | ||
2199 | |||
2200 | scsscf_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
2201 | if (!scsscf_area) { | ||
2202 | QDIO_PRINT_WARN("No memory for setting delay target on " \ | ||
2203 | "subchannel x%x.\n", irq_ptr->irq); | ||
2204 | return -ENOMEM; | ||
2205 | } | ||
2206 | scsscf_area->request = (struct chsc_header) { | ||
2207 | .length = 0x0fe0, | ||
2208 | .code = 0x1027, | ||
2209 | }; | ||
2210 | |||
2211 | scsscf_area->delay_target = delay_target<<16; | ||
2212 | |||
2213 | result=chsc(scsscf_area); | ||
2214 | if (result) { | ||
2215 | QDIO_PRINT_WARN("could not set delay target on irq x%x, " \ | ||
2216 | "cc=%i. Continuing.\n",irq_ptr->irq,result); | ||
2217 | result = -EIO; | ||
2218 | goto out; | ||
2219 | } | ||
2220 | |||
2221 | resp_code = scsscf_area->response.code; | ||
2222 | if (resp_code!=QDIO_CHSC_RESPONSE_CODE_OK) { | ||
2223 | QDIO_PRINT_WARN("response upon setting delay target " \ | ||
2224 | "is 0x%x. Continuing.\n",resp_code); | ||
2225 | sprintf(dbf_text,"sdtR%4x",resp_code); | ||
2226 | QDIO_DBF_TEXT1(0,trace,dbf_text); | ||
2227 | QDIO_DBF_TEXT1(0,setup,dbf_text); | ||
2228 | ptr=&scsscf_area->response; | ||
2229 | QDIO_DBF_HEX2(1,trace,&ptr,QDIO_DBF_TRACE_LEN); | ||
2230 | } | ||
2231 | QDIO_DBF_TEXT2(0,trace,"delytrgt"); | ||
2232 | QDIO_DBF_HEX2(0,trace,&delay_target,sizeof(unsigned long)); | ||
2233 | result = 0; /* not critical */ | ||
2234 | out: | ||
2235 | free_page ((unsigned long) scsscf_area); | ||
2236 | return result; | ||
2237 | } | ||
2238 | |||
2239 | int | ||
2240 | qdio_cleanup(struct ccw_device *cdev, int how) | ||
2241 | { | ||
2242 | struct qdio_irq *irq_ptr; | ||
2243 | char dbf_text[15]; | ||
2244 | int rc; | ||
2245 | |||
2246 | irq_ptr = cdev->private->qdio_data; | ||
2247 | if (!irq_ptr) | ||
2248 | return -ENODEV; | ||
2249 | |||
2250 | sprintf(dbf_text,"qcln%4x",irq_ptr->irq); | ||
2251 | QDIO_DBF_TEXT1(0,trace,dbf_text); | ||
2252 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2253 | |||
2254 | rc = qdio_shutdown(cdev, how); | ||
2255 | if ((rc == 0) || (rc == -EINPROGRESS)) | ||
2256 | rc = qdio_free(cdev); | ||
2257 | return rc; | ||
2258 | } | ||
2259 | |||
2260 | int | ||
2261 | qdio_shutdown(struct ccw_device *cdev, int how) | ||
2262 | { | ||
2263 | struct qdio_irq *irq_ptr; | ||
2264 | int i; | ||
2265 | int result = 0; | ||
2266 | int rc; | ||
2267 | unsigned long flags; | ||
2268 | int timeout; | ||
2269 | char dbf_text[15]; | ||
2270 | |||
2271 | irq_ptr = cdev->private->qdio_data; | ||
2272 | if (!irq_ptr) | ||
2273 | return -ENODEV; | ||
2274 | |||
2275 | down(&irq_ptr->setting_up_sema); | ||
2276 | |||
2277 | sprintf(dbf_text,"qsqs%4x",irq_ptr->irq); | ||
2278 | QDIO_DBF_TEXT1(0,trace,dbf_text); | ||
2279 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2280 | |||
2281 | /* mark all qs as uninteresting */ | ||
2282 | for (i=0;i<irq_ptr->no_input_qs;i++) | ||
2283 | atomic_set(&irq_ptr->input_qs[i]->is_in_shutdown,1); | ||
2284 | |||
2285 | for (i=0;i<irq_ptr->no_output_qs;i++) | ||
2286 | atomic_set(&irq_ptr->output_qs[i]->is_in_shutdown,1); | ||
2287 | |||
2288 | tasklet_kill(&tiqdio_tasklet); | ||
2289 | |||
2290 | for (i=0;i<irq_ptr->no_input_qs;i++) { | ||
2291 | qdio_unmark_q(irq_ptr->input_qs[i]); | ||
2292 | tasklet_kill(&irq_ptr->input_qs[i]->tasklet); | ||
2293 | wait_event_interruptible_timeout(cdev->private->wait_q, | ||
2294 | !atomic_read(&irq_ptr-> | ||
2295 | input_qs[i]-> | ||
2296 | use_count), | ||
2297 | QDIO_NO_USE_COUNT_TIMEOUT); | ||
2298 | if (atomic_read(&irq_ptr->input_qs[i]->use_count)) | ||
2299 | result=-EINPROGRESS; | ||
2300 | } | ||
2301 | |||
2302 | for (i=0;i<irq_ptr->no_output_qs;i++) { | ||
2303 | tasklet_kill(&irq_ptr->output_qs[i]->tasklet); | ||
2304 | wait_event_interruptible_timeout(cdev->private->wait_q, | ||
2305 | !atomic_read(&irq_ptr-> | ||
2306 | output_qs[i]-> | ||
2307 | use_count), | ||
2308 | QDIO_NO_USE_COUNT_TIMEOUT); | ||
2309 | if (atomic_read(&irq_ptr->output_qs[i]->use_count)) | ||
2310 | result=-EINPROGRESS; | ||
2311 | } | ||
2312 | |||
2313 | /* cleanup subchannel */ | ||
2314 | spin_lock_irqsave(get_ccwdev_lock(cdev),flags); | ||
2315 | if (how&QDIO_FLAG_CLEANUP_USING_CLEAR) { | ||
2316 | rc = ccw_device_clear(cdev, QDIO_DOING_CLEANUP); | ||
2317 | timeout=QDIO_CLEANUP_CLEAR_TIMEOUT; | ||
2318 | } else if (how&QDIO_FLAG_CLEANUP_USING_HALT) { | ||
2319 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); | ||
2320 | timeout=QDIO_CLEANUP_HALT_TIMEOUT; | ||
2321 | } else { /* default behaviour */ | ||
2322 | rc = ccw_device_halt(cdev, QDIO_DOING_CLEANUP); | ||
2323 | timeout=QDIO_CLEANUP_HALT_TIMEOUT; | ||
2324 | } | ||
2325 | if (rc == -ENODEV) { | ||
2326 | /* No need to wait for device no longer present. */ | ||
2327 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | ||
2328 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | ||
2329 | } else if (((void *)cdev->handler != (void *)qdio_handler) && rc == 0) { | ||
2330 | /* | ||
2331 | * Whoever put another handler there, has to cope with the | ||
2332 | * interrupt theirself. Might happen if qdio_shutdown was | ||
2333 | * called on already shutdown queues, but this shouldn't have | ||
2334 | * bad side effects. | ||
2335 | */ | ||
2336 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | ||
2337 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | ||
2338 | } else if (rc == 0) { | ||
2339 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_CLEANUP); | ||
2340 | ccw_device_set_timeout(cdev, timeout); | ||
2341 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),flags); | ||
2342 | |||
2343 | wait_event(cdev->private->wait_q, | ||
2344 | irq_ptr->state == QDIO_IRQ_STATE_INACTIVE || | ||
2345 | irq_ptr->state == QDIO_IRQ_STATE_ERR); | ||
2346 | } else { | ||
2347 | QDIO_PRINT_INFO("ccw_device_{halt,clear} returned %d for " | ||
2348 | "device %s\n", result, cdev->dev.bus_id); | ||
2349 | spin_unlock_irqrestore(get_ccwdev_lock(cdev), flags); | ||
2350 | result = rc; | ||
2351 | goto out; | ||
2352 | } | ||
2353 | if (irq_ptr->is_thinint_irq) { | ||
2354 | qdio_put_indicator((__u32*)irq_ptr->dev_st_chg_ind); | ||
2355 | tiqdio_set_subchannel_ind(irq_ptr,1); | ||
2356 | /* reset adapter interrupt indicators */ | ||
2357 | } | ||
2358 | |||
2359 | /* exchange int handlers, if necessary */ | ||
2360 | if ((void*)cdev->handler == (void*)qdio_handler) | ||
2361 | cdev->handler=irq_ptr->original_int_handler; | ||
2362 | |||
2363 | /* Ignore errors. */ | ||
2364 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_INACTIVE); | ||
2365 | ccw_device_set_timeout(cdev, 0); | ||
2366 | out: | ||
2367 | up(&irq_ptr->setting_up_sema); | ||
2368 | return result; | ||
2369 | } | ||
2370 | |||
2371 | int | ||
2372 | qdio_free(struct ccw_device *cdev) | ||
2373 | { | ||
2374 | struct qdio_irq *irq_ptr; | ||
2375 | char dbf_text[15]; | ||
2376 | |||
2377 | irq_ptr = cdev->private->qdio_data; | ||
2378 | if (!irq_ptr) | ||
2379 | return -ENODEV; | ||
2380 | |||
2381 | down(&irq_ptr->setting_up_sema); | ||
2382 | |||
2383 | sprintf(dbf_text,"qfqs%4x",irq_ptr->irq); | ||
2384 | QDIO_DBF_TEXT1(0,trace,dbf_text); | ||
2385 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2386 | |||
2387 | cdev->private->qdio_data = 0; | ||
2388 | |||
2389 | up(&irq_ptr->setting_up_sema); | ||
2390 | |||
2391 | qdio_release_irq_memory(irq_ptr); | ||
2392 | module_put(THIS_MODULE); | ||
2393 | return 0; | ||
2394 | } | ||
2395 | |||
2396 | static inline void | ||
2397 | qdio_allocate_do_dbf(struct qdio_initialize *init_data) | ||
2398 | { | ||
2399 | char dbf_text[20]; /* if a printf printed out more than 8 chars */ | ||
2400 | |||
2401 | sprintf(dbf_text,"qfmt:%x",init_data->q_format); | ||
2402 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2403 | QDIO_DBF_HEX0(0,setup,init_data->adapter_name,8); | ||
2404 | sprintf(dbf_text,"qpff%4x",init_data->qib_param_field_format); | ||
2405 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2406 | QDIO_DBF_HEX0(0,setup,&init_data->qib_param_field,sizeof(char*)); | ||
2407 | QDIO_DBF_HEX0(0,setup,&init_data->input_slib_elements,sizeof(long*)); | ||
2408 | QDIO_DBF_HEX0(0,setup,&init_data->output_slib_elements,sizeof(long*)); | ||
2409 | sprintf(dbf_text,"miit%4x",init_data->min_input_threshold); | ||
2410 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2411 | sprintf(dbf_text,"mait%4x",init_data->max_input_threshold); | ||
2412 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2413 | sprintf(dbf_text,"miot%4x",init_data->min_output_threshold); | ||
2414 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2415 | sprintf(dbf_text,"maot%4x",init_data->max_output_threshold); | ||
2416 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2417 | sprintf(dbf_text,"niq:%4x",init_data->no_input_qs); | ||
2418 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2419 | sprintf(dbf_text,"noq:%4x",init_data->no_output_qs); | ||
2420 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2421 | QDIO_DBF_HEX0(0,setup,&init_data->input_handler,sizeof(void*)); | ||
2422 | QDIO_DBF_HEX0(0,setup,&init_data->output_handler,sizeof(void*)); | ||
2423 | QDIO_DBF_HEX0(0,setup,&init_data->int_parm,sizeof(long)); | ||
2424 | QDIO_DBF_HEX0(0,setup,&init_data->flags,sizeof(long)); | ||
2425 | QDIO_DBF_HEX0(0,setup,&init_data->input_sbal_addr_array,sizeof(void*)); | ||
2426 | QDIO_DBF_HEX0(0,setup,&init_data->output_sbal_addr_array,sizeof(void*)); | ||
2427 | } | ||
2428 | |||
2429 | static inline void | ||
2430 | qdio_allocate_fill_input_desc(struct qdio_irq *irq_ptr, int i, int iqfmt) | ||
2431 | { | ||
2432 | irq_ptr->input_qs[i]->is_iqdio_q = iqfmt; | ||
2433 | irq_ptr->input_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; | ||
2434 | |||
2435 | irq_ptr->qdr->qdf0[i].sliba=(unsigned long)(irq_ptr->input_qs[i]->slib); | ||
2436 | |||
2437 | irq_ptr->qdr->qdf0[i].sla=(unsigned long)(irq_ptr->input_qs[i]->sl); | ||
2438 | |||
2439 | irq_ptr->qdr->qdf0[i].slsba= | ||
2440 | (unsigned long)(&irq_ptr->input_qs[i]->slsb.acc.val[0]); | ||
2441 | |||
2442 | irq_ptr->qdr->qdf0[i].akey=QDIO_STORAGE_KEY; | ||
2443 | irq_ptr->qdr->qdf0[i].bkey=QDIO_STORAGE_KEY; | ||
2444 | irq_ptr->qdr->qdf0[i].ckey=QDIO_STORAGE_KEY; | ||
2445 | irq_ptr->qdr->qdf0[i].dkey=QDIO_STORAGE_KEY; | ||
2446 | } | ||
2447 | |||
2448 | static inline void | ||
2449 | qdio_allocate_fill_output_desc(struct qdio_irq *irq_ptr, int i, | ||
2450 | int j, int iqfmt) | ||
2451 | { | ||
2452 | irq_ptr->output_qs[i]->is_iqdio_q = iqfmt; | ||
2453 | irq_ptr->output_qs[i]->is_thinint_q = irq_ptr->is_thinint_irq; | ||
2454 | |||
2455 | irq_ptr->qdr->qdf0[i+j].sliba=(unsigned long)(irq_ptr->output_qs[i]->slib); | ||
2456 | |||
2457 | irq_ptr->qdr->qdf0[i+j].sla=(unsigned long)(irq_ptr->output_qs[i]->sl); | ||
2458 | |||
2459 | irq_ptr->qdr->qdf0[i+j].slsba= | ||
2460 | (unsigned long)(&irq_ptr->output_qs[i]->slsb.acc.val[0]); | ||
2461 | |||
2462 | irq_ptr->qdr->qdf0[i+j].akey=QDIO_STORAGE_KEY; | ||
2463 | irq_ptr->qdr->qdf0[i+j].bkey=QDIO_STORAGE_KEY; | ||
2464 | irq_ptr->qdr->qdf0[i+j].ckey=QDIO_STORAGE_KEY; | ||
2465 | irq_ptr->qdr->qdf0[i+j].dkey=QDIO_STORAGE_KEY; | ||
2466 | } | ||
2467 | |||
2468 | |||
2469 | static inline void | ||
2470 | qdio_initialize_set_siga_flags_input(struct qdio_irq *irq_ptr) | ||
2471 | { | ||
2472 | int i; | ||
2473 | |||
2474 | for (i=0;i<irq_ptr->no_input_qs;i++) { | ||
2475 | irq_ptr->input_qs[i]->siga_sync= | ||
2476 | irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; | ||
2477 | irq_ptr->input_qs[i]->siga_in= | ||
2478 | irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; | ||
2479 | irq_ptr->input_qs[i]->siga_out= | ||
2480 | irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; | ||
2481 | irq_ptr->input_qs[i]->siga_sync_done_on_thinints= | ||
2482 | irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; | ||
2483 | irq_ptr->input_qs[i]->hydra_gives_outbound_pcis= | ||
2484 | irq_ptr->hydra_gives_outbound_pcis; | ||
2485 | irq_ptr->input_qs[i]->siga_sync_done_on_outb_tis= | ||
2486 | ((irq_ptr->qdioac& | ||
2487 | (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| | ||
2488 | CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== | ||
2489 | (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| | ||
2490 | CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); | ||
2491 | |||
2492 | } | ||
2493 | } | ||
2494 | |||
2495 | static inline void | ||
2496 | qdio_initialize_set_siga_flags_output(struct qdio_irq *irq_ptr) | ||
2497 | { | ||
2498 | int i; | ||
2499 | |||
2500 | for (i=0;i<irq_ptr->no_output_qs;i++) { | ||
2501 | irq_ptr->output_qs[i]->siga_sync= | ||
2502 | irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY; | ||
2503 | irq_ptr->output_qs[i]->siga_in= | ||
2504 | irq_ptr->qdioac&CHSC_FLAG_SIGA_INPUT_NECESSARY; | ||
2505 | irq_ptr->output_qs[i]->siga_out= | ||
2506 | irq_ptr->qdioac&CHSC_FLAG_SIGA_OUTPUT_NECESSARY; | ||
2507 | irq_ptr->output_qs[i]->siga_sync_done_on_thinints= | ||
2508 | irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS; | ||
2509 | irq_ptr->output_qs[i]->hydra_gives_outbound_pcis= | ||
2510 | irq_ptr->hydra_gives_outbound_pcis; | ||
2511 | irq_ptr->output_qs[i]->siga_sync_done_on_outb_tis= | ||
2512 | ((irq_ptr->qdioac& | ||
2513 | (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| | ||
2514 | CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS))== | ||
2515 | (CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS| | ||
2516 | CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS)); | ||
2517 | |||
2518 | } | ||
2519 | } | ||
2520 | |||
2521 | static inline int | ||
2522 | qdio_establish_irq_check_for_errors(struct ccw_device *cdev, int cstat, | ||
2523 | int dstat) | ||
2524 | { | ||
2525 | char dbf_text[15]; | ||
2526 | struct qdio_irq *irq_ptr; | ||
2527 | |||
2528 | irq_ptr = cdev->private->qdio_data; | ||
2529 | |||
2530 | if (cstat || (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END))) { | ||
2531 | sprintf(dbf_text,"ick1%4x",irq_ptr->irq); | ||
2532 | QDIO_DBF_TEXT2(1,trace,dbf_text); | ||
2533 | QDIO_DBF_HEX2(0,trace,&dstat,sizeof(int)); | ||
2534 | QDIO_DBF_HEX2(0,trace,&cstat,sizeof(int)); | ||
2535 | QDIO_PRINT_ERR("received check condition on establish " \ | ||
2536 | "queues on irq 0x%x (cs=x%x, ds=x%x).\n", | ||
2537 | irq_ptr->irq,cstat,dstat); | ||
2538 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ERR); | ||
2539 | } | ||
2540 | |||
2541 | if (!(dstat & DEV_STAT_DEV_END)) { | ||
2542 | QDIO_DBF_TEXT2(1,setup,"eq:no de"); | ||
2543 | QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); | ||
2544 | QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); | ||
2545 | QDIO_PRINT_ERR("establish queues on irq %04x: didn't get " | ||
2546 | "device end: dstat=%02x, cstat=%02x\n", | ||
2547 | irq_ptr->irq, dstat, cstat); | ||
2548 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | ||
2549 | return 1; | ||
2550 | } | ||
2551 | |||
2552 | if (dstat & ~(DEV_STAT_CHN_END|DEV_STAT_DEV_END)) { | ||
2553 | QDIO_DBF_TEXT2(1,setup,"eq:badio"); | ||
2554 | QDIO_DBF_HEX2(0,setup,&dstat, sizeof(dstat)); | ||
2555 | QDIO_DBF_HEX2(0,setup,&cstat, sizeof(cstat)); | ||
2556 | QDIO_PRINT_ERR("establish queues on irq %04x: got " | ||
2557 | "the following devstat: dstat=%02x, " | ||
2558 | "cstat=%02x\n", | ||
2559 | irq_ptr->irq, dstat, cstat); | ||
2560 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ERR); | ||
2561 | return 1; | ||
2562 | } | ||
2563 | return 0; | ||
2564 | } | ||
2565 | |||
2566 | static void | ||
2567 | qdio_establish_handle_irq(struct ccw_device *cdev, int cstat, int dstat) | ||
2568 | { | ||
2569 | struct qdio_irq *irq_ptr; | ||
2570 | char dbf_text[15]; | ||
2571 | |||
2572 | irq_ptr = cdev->private->qdio_data; | ||
2573 | |||
2574 | sprintf(dbf_text,"qehi%4x",cdev->private->irq); | ||
2575 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2576 | QDIO_DBF_TEXT0(0,trace,dbf_text); | ||
2577 | |||
2578 | if (qdio_establish_irq_check_for_errors(cdev, cstat, dstat)) { | ||
2579 | ccw_device_set_timeout(cdev, 0); | ||
2580 | return; | ||
2581 | } | ||
2582 | |||
2583 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_ESTABLISHED); | ||
2584 | ccw_device_set_timeout(cdev, 0); | ||
2585 | } | ||
2586 | |||
2587 | int | ||
2588 | qdio_initialize(struct qdio_initialize *init_data) | ||
2589 | { | ||
2590 | int rc; | ||
2591 | char dbf_text[15]; | ||
2592 | |||
2593 | sprintf(dbf_text,"qini%4x",init_data->cdev->private->irq); | ||
2594 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2595 | QDIO_DBF_TEXT0(0,trace,dbf_text); | ||
2596 | |||
2597 | rc = qdio_allocate(init_data); | ||
2598 | if (rc == 0) { | ||
2599 | rc = qdio_establish(init_data); | ||
2600 | if (rc != 0) | ||
2601 | qdio_free(init_data->cdev); | ||
2602 | } | ||
2603 | |||
2604 | return rc; | ||
2605 | } | ||
2606 | |||
2607 | |||
2608 | int | ||
2609 | qdio_allocate(struct qdio_initialize *init_data) | ||
2610 | { | ||
2611 | struct qdio_irq *irq_ptr; | ||
2612 | char dbf_text[15]; | ||
2613 | |||
2614 | sprintf(dbf_text,"qalc%4x",init_data->cdev->private->irq); | ||
2615 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2616 | QDIO_DBF_TEXT0(0,trace,dbf_text); | ||
2617 | if ( (init_data->no_input_qs>QDIO_MAX_QUEUES_PER_IRQ) || | ||
2618 | (init_data->no_output_qs>QDIO_MAX_QUEUES_PER_IRQ) || | ||
2619 | ((init_data->no_input_qs) && (!init_data->input_handler)) || | ||
2620 | ((init_data->no_output_qs) && (!init_data->output_handler)) ) | ||
2621 | return -EINVAL; | ||
2622 | |||
2623 | if (!init_data->input_sbal_addr_array) | ||
2624 | return -EINVAL; | ||
2625 | |||
2626 | if (!init_data->output_sbal_addr_array) | ||
2627 | return -EINVAL; | ||
2628 | |||
2629 | qdio_allocate_do_dbf(init_data); | ||
2630 | |||
2631 | /* create irq */ | ||
2632 | irq_ptr=kmalloc(sizeof(struct qdio_irq), GFP_KERNEL | GFP_DMA); | ||
2633 | |||
2634 | QDIO_DBF_TEXT0(0,setup,"irq_ptr:"); | ||
2635 | QDIO_DBF_HEX0(0,setup,&irq_ptr,sizeof(void*)); | ||
2636 | |||
2637 | if (!irq_ptr) { | ||
2638 | QDIO_PRINT_ERR("kmalloc of irq_ptr failed!\n"); | ||
2639 | return -ENOMEM; | ||
2640 | } | ||
2641 | |||
2642 | memset(irq_ptr,0,sizeof(struct qdio_irq)); | ||
2643 | |||
2644 | init_MUTEX(&irq_ptr->setting_up_sema); | ||
2645 | |||
2646 | /* QDR must be in DMA area since CCW data address is only 32 bit */ | ||
2647 | irq_ptr->qdr=kmalloc(sizeof(struct qdr), GFP_KERNEL | GFP_DMA); | ||
2648 | if (!(irq_ptr->qdr)) { | ||
2649 | kfree(irq_ptr); | ||
2650 | QDIO_PRINT_ERR("kmalloc of irq_ptr->qdr failed!\n"); | ||
2651 | return -ENOMEM; | ||
2652 | } | ||
2653 | QDIO_DBF_TEXT0(0,setup,"qdr:"); | ||
2654 | QDIO_DBF_HEX0(0,setup,&irq_ptr->qdr,sizeof(void*)); | ||
2655 | |||
2656 | if (qdio_alloc_qs(irq_ptr, | ||
2657 | init_data->no_input_qs, | ||
2658 | init_data->no_output_qs)) { | ||
2659 | qdio_release_irq_memory(irq_ptr); | ||
2660 | return -ENOMEM; | ||
2661 | } | ||
2662 | |||
2663 | init_data->cdev->private->qdio_data = irq_ptr; | ||
2664 | |||
2665 | qdio_set_state(irq_ptr,QDIO_IRQ_STATE_INACTIVE); | ||
2666 | |||
2667 | return 0; | ||
2668 | } | ||
2669 | |||
2670 | int qdio_fill_irq(struct qdio_initialize *init_data) | ||
2671 | { | ||
2672 | int i; | ||
2673 | char dbf_text[15]; | ||
2674 | struct ciw *ciw; | ||
2675 | int is_iqdio; | ||
2676 | struct qdio_irq *irq_ptr; | ||
2677 | |||
2678 | irq_ptr = init_data->cdev->private->qdio_data; | ||
2679 | |||
2680 | memset(irq_ptr,0,((char*)&irq_ptr->qdr)-((char*)irq_ptr)); | ||
2681 | |||
2682 | /* wipes qib.ac, required by ar7063 */ | ||
2683 | memset(irq_ptr->qdr,0,sizeof(struct qdr)); | ||
2684 | |||
2685 | irq_ptr->int_parm=init_data->int_parm; | ||
2686 | |||
2687 | irq_ptr->irq = init_data->cdev->private->irq; | ||
2688 | irq_ptr->no_input_qs=init_data->no_input_qs; | ||
2689 | irq_ptr->no_output_qs=init_data->no_output_qs; | ||
2690 | |||
2691 | if (init_data->q_format==QDIO_IQDIO_QFMT) { | ||
2692 | irq_ptr->is_iqdio_irq=1; | ||
2693 | irq_ptr->is_thinint_irq=1; | ||
2694 | } else { | ||
2695 | irq_ptr->is_iqdio_irq=0; | ||
2696 | irq_ptr->is_thinint_irq=hydra_thinints; | ||
2697 | } | ||
2698 | sprintf(dbf_text,"is_i_t%1x%1x", | ||
2699 | irq_ptr->is_iqdio_irq,irq_ptr->is_thinint_irq); | ||
2700 | QDIO_DBF_TEXT2(0,setup,dbf_text); | ||
2701 | |||
2702 | if (irq_ptr->is_thinint_irq) { | ||
2703 | irq_ptr->dev_st_chg_ind=qdio_get_indicator(); | ||
2704 | QDIO_DBF_HEX1(0,setup,&irq_ptr->dev_st_chg_ind,sizeof(void*)); | ||
2705 | if (!irq_ptr->dev_st_chg_ind) { | ||
2706 | QDIO_PRINT_WARN("no indicator location available " \ | ||
2707 | "for irq 0x%x\n",irq_ptr->irq); | ||
2708 | qdio_release_irq_memory(irq_ptr); | ||
2709 | return -ENOBUFS; | ||
2710 | } | ||
2711 | } | ||
2712 | |||
2713 | /* defaults */ | ||
2714 | irq_ptr->equeue.cmd=DEFAULT_ESTABLISH_QS_CMD; | ||
2715 | irq_ptr->equeue.count=DEFAULT_ESTABLISH_QS_COUNT; | ||
2716 | irq_ptr->aqueue.cmd=DEFAULT_ACTIVATE_QS_CMD; | ||
2717 | irq_ptr->aqueue.count=DEFAULT_ACTIVATE_QS_COUNT; | ||
2718 | |||
2719 | qdio_fill_qs(irq_ptr, init_data->cdev, | ||
2720 | init_data->no_input_qs, | ||
2721 | init_data->no_output_qs, | ||
2722 | init_data->input_handler, | ||
2723 | init_data->output_handler,init_data->int_parm, | ||
2724 | init_data->q_format,init_data->flags, | ||
2725 | init_data->input_sbal_addr_array, | ||
2726 | init_data->output_sbal_addr_array); | ||
2727 | |||
2728 | if (!try_module_get(THIS_MODULE)) { | ||
2729 | QDIO_PRINT_CRIT("try_module_get() failed!\n"); | ||
2730 | qdio_release_irq_memory(irq_ptr); | ||
2731 | return -EINVAL; | ||
2732 | } | ||
2733 | |||
2734 | qdio_fill_thresholds(irq_ptr,init_data->no_input_qs, | ||
2735 | init_data->no_output_qs, | ||
2736 | init_data->min_input_threshold, | ||
2737 | init_data->max_input_threshold, | ||
2738 | init_data->min_output_threshold, | ||
2739 | init_data->max_output_threshold); | ||
2740 | |||
2741 | /* fill in qdr */ | ||
2742 | irq_ptr->qdr->qfmt=init_data->q_format; | ||
2743 | irq_ptr->qdr->iqdcnt=init_data->no_input_qs; | ||
2744 | irq_ptr->qdr->oqdcnt=init_data->no_output_qs; | ||
2745 | irq_ptr->qdr->iqdsz=sizeof(struct qdesfmt0)/4; /* size in words */ | ||
2746 | irq_ptr->qdr->oqdsz=sizeof(struct qdesfmt0)/4; | ||
2747 | |||
2748 | irq_ptr->qdr->qiba=(unsigned long)&irq_ptr->qib; | ||
2749 | irq_ptr->qdr->qkey=QDIO_STORAGE_KEY; | ||
2750 | |||
2751 | /* fill in qib */ | ||
2752 | irq_ptr->qib.qfmt=init_data->q_format; | ||
2753 | if (init_data->no_input_qs) | ||
2754 | irq_ptr->qib.isliba=(unsigned long)(irq_ptr->input_qs[0]->slib); | ||
2755 | if (init_data->no_output_qs) | ||
2756 | irq_ptr->qib.osliba=(unsigned long)(irq_ptr->output_qs[0]->slib); | ||
2757 | memcpy(irq_ptr->qib.ebcnam,init_data->adapter_name,8); | ||
2758 | |||
2759 | qdio_set_impl_params(irq_ptr,init_data->qib_param_field_format, | ||
2760 | init_data->qib_param_field, | ||
2761 | init_data->no_input_qs, | ||
2762 | init_data->no_output_qs, | ||
2763 | init_data->input_slib_elements, | ||
2764 | init_data->output_slib_elements); | ||
2765 | |||
2766 | /* first input descriptors, then output descriptors */ | ||
2767 | is_iqdio = (init_data->q_format == QDIO_IQDIO_QFMT) ? 1 : 0; | ||
2768 | for (i=0;i<init_data->no_input_qs;i++) | ||
2769 | qdio_allocate_fill_input_desc(irq_ptr, i, is_iqdio); | ||
2770 | |||
2771 | for (i=0;i<init_data->no_output_qs;i++) | ||
2772 | qdio_allocate_fill_output_desc(irq_ptr, i, | ||
2773 | init_data->no_input_qs, | ||
2774 | is_iqdio); | ||
2775 | |||
2776 | /* qdr, qib, sls, slsbs, slibs, sbales filled. */ | ||
2777 | |||
2778 | /* get qdio commands */ | ||
2779 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_EQUEUE); | ||
2780 | if (!ciw) { | ||
2781 | QDIO_DBF_TEXT2(1,setup,"no eq"); | ||
2782 | QDIO_PRINT_INFO("No equeue CIW found for QDIO commands. " | ||
2783 | "Trying to use default.\n"); | ||
2784 | } else | ||
2785 | irq_ptr->equeue = *ciw; | ||
2786 | ciw = ccw_device_get_ciw(init_data->cdev, CIW_TYPE_AQUEUE); | ||
2787 | if (!ciw) { | ||
2788 | QDIO_DBF_TEXT2(1,setup,"no aq"); | ||
2789 | QDIO_PRINT_INFO("No aqueue CIW found for QDIO commands. " | ||
2790 | "Trying to use default.\n"); | ||
2791 | } else | ||
2792 | irq_ptr->aqueue = *ciw; | ||
2793 | |||
2794 | /* Set new interrupt handler. */ | ||
2795 | irq_ptr->original_int_handler = init_data->cdev->handler; | ||
2796 | init_data->cdev->handler = qdio_handler; | ||
2797 | |||
2798 | return 0; | ||
2799 | } | ||
2800 | |||
2801 | int | ||
2802 | qdio_establish(struct qdio_initialize *init_data) | ||
2803 | { | ||
2804 | struct qdio_irq *irq_ptr; | ||
2805 | unsigned long saveflags; | ||
2806 | int result, result2; | ||
2807 | struct ccw_device *cdev; | ||
2808 | char dbf_text[20]; | ||
2809 | |||
2810 | cdev=init_data->cdev; | ||
2811 | irq_ptr = cdev->private->qdio_data; | ||
2812 | if (!irq_ptr) | ||
2813 | return -EINVAL; | ||
2814 | |||
2815 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
2816 | return -EINVAL; | ||
2817 | |||
2818 | down(&irq_ptr->setting_up_sema); | ||
2819 | |||
2820 | qdio_fill_irq(init_data); | ||
2821 | |||
2822 | /* the thinint CHSC stuff */ | ||
2823 | if (irq_ptr->is_thinint_irq) { | ||
2824 | |||
2825 | result = tiqdio_set_subchannel_ind(irq_ptr,0); | ||
2826 | if (result) { | ||
2827 | up(&irq_ptr->setting_up_sema); | ||
2828 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
2829 | return result; | ||
2830 | } | ||
2831 | tiqdio_set_delay_target(irq_ptr,TIQDIO_DELAY_TARGET); | ||
2832 | } | ||
2833 | |||
2834 | sprintf(dbf_text,"qest%4x",cdev->private->irq); | ||
2835 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
2836 | QDIO_DBF_TEXT0(0,trace,dbf_text); | ||
2837 | |||
2838 | /* establish q */ | ||
2839 | irq_ptr->ccw.cmd_code=irq_ptr->equeue.cmd; | ||
2840 | irq_ptr->ccw.flags=CCW_FLAG_SLI; | ||
2841 | irq_ptr->ccw.count=irq_ptr->equeue.count; | ||
2842 | irq_ptr->ccw.cda=QDIO_GET_ADDR(irq_ptr->qdr); | ||
2843 | |||
2844 | spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); | ||
2845 | |||
2846 | ccw_device_set_options(cdev, 0); | ||
2847 | result=ccw_device_start_timeout(cdev,&irq_ptr->ccw, | ||
2848 | QDIO_DOING_ESTABLISH,0, 0, | ||
2849 | QDIO_ESTABLISH_TIMEOUT); | ||
2850 | if (result) { | ||
2851 | result2=ccw_device_start_timeout(cdev,&irq_ptr->ccw, | ||
2852 | QDIO_DOING_ESTABLISH,0,0, | ||
2853 | QDIO_ESTABLISH_TIMEOUT); | ||
2854 | sprintf(dbf_text,"eq:io%4x",result); | ||
2855 | QDIO_DBF_TEXT2(1,setup,dbf_text); | ||
2856 | if (result2) { | ||
2857 | sprintf(dbf_text,"eq:io%4x",result); | ||
2858 | QDIO_DBF_TEXT2(1,setup,dbf_text); | ||
2859 | } | ||
2860 | QDIO_PRINT_WARN("establish queues on irq %04x: do_IO " \ | ||
2861 | "returned %i, next try returned %i\n", | ||
2862 | irq_ptr->irq,result,result2); | ||
2863 | result=result2; | ||
2864 | if (result) | ||
2865 | ccw_device_set_timeout(cdev, 0); | ||
2866 | } | ||
2867 | |||
2868 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); | ||
2869 | |||
2870 | if (result) { | ||
2871 | up(&irq_ptr->setting_up_sema); | ||
2872 | qdio_shutdown(cdev,QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
2873 | return result; | ||
2874 | } | ||
2875 | |||
2876 | wait_event_interruptible_timeout(cdev->private->wait_q, | ||
2877 | irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED || | ||
2878 | irq_ptr->state == QDIO_IRQ_STATE_ERR, | ||
2879 | QDIO_ESTABLISH_TIMEOUT); | ||
2880 | |||
2881 | if (irq_ptr->state == QDIO_IRQ_STATE_ESTABLISHED) | ||
2882 | result = 0; | ||
2883 | else { | ||
2884 | up(&irq_ptr->setting_up_sema); | ||
2885 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
2886 | return -EIO; | ||
2887 | } | ||
2888 | |||
2889 | irq_ptr->qdioac=qdio_check_siga_needs(irq_ptr->irq); | ||
2890 | /* if this gets set once, we're running under VM and can omit SVSes */ | ||
2891 | if (irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_NECESSARY) | ||
2892 | omit_svs=1; | ||
2893 | |||
2894 | sprintf(dbf_text,"qdioac%2x",irq_ptr->qdioac); | ||
2895 | QDIO_DBF_TEXT2(0,setup,dbf_text); | ||
2896 | |||
2897 | sprintf(dbf_text,"qib ac%2x",irq_ptr->qib.ac); | ||
2898 | QDIO_DBF_TEXT2(0,setup,dbf_text); | ||
2899 | |||
2900 | irq_ptr->hydra_gives_outbound_pcis= | ||
2901 | irq_ptr->qib.ac&QIB_AC_OUTBOUND_PCI_SUPPORTED; | ||
2902 | irq_ptr->sync_done_on_outb_pcis= | ||
2903 | irq_ptr->qdioac&CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS; | ||
2904 | |||
2905 | qdio_initialize_set_siga_flags_input(irq_ptr); | ||
2906 | qdio_initialize_set_siga_flags_output(irq_ptr); | ||
2907 | |||
2908 | up(&irq_ptr->setting_up_sema); | ||
2909 | |||
2910 | return result; | ||
2911 | |||
2912 | } | ||
2913 | |||
2914 | int | ||
2915 | qdio_activate(struct ccw_device *cdev, int flags) | ||
2916 | { | ||
2917 | struct qdio_irq *irq_ptr; | ||
2918 | int i,result=0,result2; | ||
2919 | unsigned long saveflags; | ||
2920 | char dbf_text[20]; /* see qdio_initialize */ | ||
2921 | |||
2922 | irq_ptr = cdev->private->qdio_data; | ||
2923 | if (!irq_ptr) | ||
2924 | return -ENODEV; | ||
2925 | |||
2926 | if (cdev->private->state != DEV_STATE_ONLINE) | ||
2927 | return -EINVAL; | ||
2928 | |||
2929 | down(&irq_ptr->setting_up_sema); | ||
2930 | if (irq_ptr->state==QDIO_IRQ_STATE_INACTIVE) { | ||
2931 | result=-EBUSY; | ||
2932 | goto out; | ||
2933 | } | ||
2934 | |||
2935 | sprintf(dbf_text,"qact%4x", irq_ptr->irq); | ||
2936 | QDIO_DBF_TEXT2(0,setup,dbf_text); | ||
2937 | QDIO_DBF_TEXT2(0,trace,dbf_text); | ||
2938 | |||
2939 | /* activate q */ | ||
2940 | irq_ptr->ccw.cmd_code=irq_ptr->aqueue.cmd; | ||
2941 | irq_ptr->ccw.flags=CCW_FLAG_SLI; | ||
2942 | irq_ptr->ccw.count=irq_ptr->aqueue.count; | ||
2943 | irq_ptr->ccw.cda=QDIO_GET_ADDR(0); | ||
2944 | |||
2945 | spin_lock_irqsave(get_ccwdev_lock(cdev),saveflags); | ||
2946 | |||
2947 | ccw_device_set_timeout(cdev, 0); | ||
2948 | ccw_device_set_options(cdev, CCWDEV_REPORT_ALL); | ||
2949 | result=ccw_device_start(cdev,&irq_ptr->ccw,QDIO_DOING_ACTIVATE, | ||
2950 | 0, DOIO_DENY_PREFETCH); | ||
2951 | if (result) { | ||
2952 | result2=ccw_device_start(cdev,&irq_ptr->ccw, | ||
2953 | QDIO_DOING_ACTIVATE,0,0); | ||
2954 | sprintf(dbf_text,"aq:io%4x",result); | ||
2955 | QDIO_DBF_TEXT2(1,setup,dbf_text); | ||
2956 | if (result2) { | ||
2957 | sprintf(dbf_text,"aq:io%4x",result); | ||
2958 | QDIO_DBF_TEXT2(1,setup,dbf_text); | ||
2959 | } | ||
2960 | QDIO_PRINT_WARN("activate queues on irq %04x: do_IO " \ | ||
2961 | "returned %i, next try returned %i\n", | ||
2962 | irq_ptr->irq,result,result2); | ||
2963 | result=result2; | ||
2964 | } | ||
2965 | |||
2966 | spin_unlock_irqrestore(get_ccwdev_lock(cdev),saveflags); | ||
2967 | if (result) | ||
2968 | goto out; | ||
2969 | |||
2970 | for (i=0;i<irq_ptr->no_input_qs;i++) { | ||
2971 | if (irq_ptr->is_thinint_irq) { | ||
2972 | /* | ||
2973 | * that way we know, that, if we will get interrupted | ||
2974 | * by tiqdio_inbound_processing, qdio_unmark_q will | ||
2975 | * not be called | ||
2976 | */ | ||
2977 | qdio_reserve_q(irq_ptr->input_qs[i]); | ||
2978 | qdio_mark_tiq(irq_ptr->input_qs[i]); | ||
2979 | qdio_release_q(irq_ptr->input_qs[i]); | ||
2980 | } | ||
2981 | } | ||
2982 | |||
2983 | if (flags&QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT) { | ||
2984 | for (i=0;i<irq_ptr->no_input_qs;i++) { | ||
2985 | irq_ptr->input_qs[i]->is_input_q|= | ||
2986 | QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT; | ||
2987 | } | ||
2988 | } | ||
2989 | |||
2990 | wait_event_interruptible_timeout(cdev->private->wait_q, | ||
2991 | ((irq_ptr->state == | ||
2992 | QDIO_IRQ_STATE_STOPPED) || | ||
2993 | (irq_ptr->state == | ||
2994 | QDIO_IRQ_STATE_ERR)), | ||
2995 | QDIO_ACTIVATE_TIMEOUT); | ||
2996 | |||
2997 | switch (irq_ptr->state) { | ||
2998 | case QDIO_IRQ_STATE_STOPPED: | ||
2999 | case QDIO_IRQ_STATE_ERR: | ||
3000 | up(&irq_ptr->setting_up_sema); | ||
3001 | qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR); | ||
3002 | down(&irq_ptr->setting_up_sema); | ||
3003 | result = -EIO; | ||
3004 | break; | ||
3005 | default: | ||
3006 | qdio_set_state(irq_ptr, QDIO_IRQ_STATE_ACTIVE); | ||
3007 | result = 0; | ||
3008 | } | ||
3009 | out: | ||
3010 | up(&irq_ptr->setting_up_sema); | ||
3011 | |||
3012 | return result; | ||
3013 | } | ||
3014 | |||
3015 | /* buffers filled forwards again to make Rick happy */ | ||
3016 | static inline void | ||
3017 | qdio_do_qdio_fill_input(struct qdio_q *q, unsigned int qidx, | ||
3018 | unsigned int count, struct qdio_buffer *buffers) | ||
3019 | { | ||
3020 | for (;;) { | ||
3021 | set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_INPUT_EMPTY); | ||
3022 | count--; | ||
3023 | if (!count) break; | ||
3024 | qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
3025 | } | ||
3026 | |||
3027 | /* not necessary, as the queues are synced during the SIGA read */ | ||
3028 | /*SYNC_MEMORY;*/ | ||
3029 | } | ||
3030 | |||
3031 | static inline void | ||
3032 | qdio_do_qdio_fill_output(struct qdio_q *q, unsigned int qidx, | ||
3033 | unsigned int count, struct qdio_buffer *buffers) | ||
3034 | { | ||
3035 | for (;;) { | ||
3036 | set_slsb(&q->slsb.acc.val[qidx],SLSB_CU_OUTPUT_PRIMED); | ||
3037 | count--; | ||
3038 | if (!count) break; | ||
3039 | qidx=(qidx+1)&(QDIO_MAX_BUFFERS_PER_Q-1); | ||
3040 | } | ||
3041 | |||
3042 | /* SIGA write will sync the queues */ | ||
3043 | /*SYNC_MEMORY;*/ | ||
3044 | } | ||
3045 | |||
3046 | static inline void | ||
3047 | do_qdio_handle_inbound(struct qdio_q *q, unsigned int callflags, | ||
3048 | unsigned int qidx, unsigned int count, | ||
3049 | struct qdio_buffer *buffers) | ||
3050 | { | ||
3051 | int used_elements; | ||
3052 | |||
3053 | /* This is the inbound handling of queues */ | ||
3054 | used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; | ||
3055 | |||
3056 | qdio_do_qdio_fill_input(q,qidx,count,buffers); | ||
3057 | |||
3058 | if ((used_elements+count==QDIO_MAX_BUFFERS_PER_Q)&& | ||
3059 | (callflags&QDIO_FLAG_UNDER_INTERRUPT)) | ||
3060 | atomic_swap(&q->polling,0); | ||
3061 | |||
3062 | if (used_elements) | ||
3063 | return; | ||
3064 | if (callflags&QDIO_FLAG_DONT_SIGA) | ||
3065 | return; | ||
3066 | if (q->siga_in) { | ||
3067 | int result; | ||
3068 | |||
3069 | result=qdio_siga_input(q); | ||
3070 | if (result) { | ||
3071 | if (q->siga_error) | ||
3072 | q->error_status_flags|= | ||
3073 | QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR; | ||
3074 | q->error_status_flags|=QDIO_STATUS_LOOK_FOR_ERROR; | ||
3075 | q->siga_error=result; | ||
3076 | } | ||
3077 | } | ||
3078 | |||
3079 | qdio_mark_q(q); | ||
3080 | } | ||
3081 | |||
3082 | static inline void | ||
3083 | do_qdio_handle_outbound(struct qdio_q *q, unsigned int callflags, | ||
3084 | unsigned int qidx, unsigned int count, | ||
3085 | struct qdio_buffer *buffers) | ||
3086 | { | ||
3087 | int used_elements; | ||
3088 | |||
3089 | /* This is the outbound handling of queues */ | ||
3090 | #ifdef QDIO_PERFORMANCE_STATS | ||
3091 | perf_stats.start_time_outbound=NOW; | ||
3092 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3093 | |||
3094 | qdio_do_qdio_fill_output(q,qidx,count,buffers); | ||
3095 | |||
3096 | used_elements=atomic_add_return(count, &q->number_of_buffers_used) - count; | ||
3097 | |||
3098 | if (callflags&QDIO_FLAG_DONT_SIGA) { | ||
3099 | #ifdef QDIO_PERFORMANCE_STATS | ||
3100 | perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; | ||
3101 | perf_stats.outbound_cnt++; | ||
3102 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3103 | return; | ||
3104 | } | ||
3105 | if (q->is_iqdio_q) { | ||
3106 | /* one siga for every sbal */ | ||
3107 | while (count--) | ||
3108 | qdio_kick_outbound_q(q); | ||
3109 | |||
3110 | __qdio_outbound_processing(q); | ||
3111 | } else { | ||
3112 | /* under VM, we do a SIGA sync unconditionally */ | ||
3113 | SYNC_MEMORY; | ||
3114 | else { | ||
3115 | /* | ||
3116 | * w/o shadow queues (else branch of | ||
3117 | * SYNC_MEMORY :-/ ), we try to | ||
3118 | * fast-requeue buffers | ||
3119 | */ | ||
3120 | if (q->slsb.acc.val[(qidx+QDIO_MAX_BUFFERS_PER_Q-1) | ||
3121 | &(QDIO_MAX_BUFFERS_PER_Q-1)]!= | ||
3122 | SLSB_CU_OUTPUT_PRIMED) { | ||
3123 | qdio_kick_outbound_q(q); | ||
3124 | } else { | ||
3125 | QDIO_DBF_TEXT3(0,trace, "fast-req"); | ||
3126 | #ifdef QDIO_PERFORMANCE_STATS | ||
3127 | perf_stats.fast_reqs++; | ||
3128 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3129 | } | ||
3130 | } | ||
3131 | /* | ||
3132 | * only marking the q could take too long, | ||
3133 | * the upper layer module could do a lot of | ||
3134 | * traffic in that time | ||
3135 | */ | ||
3136 | __qdio_outbound_processing(q); | ||
3137 | } | ||
3138 | |||
3139 | #ifdef QDIO_PERFORMANCE_STATS | ||
3140 | perf_stats.outbound_time+=NOW-perf_stats.start_time_outbound; | ||
3141 | perf_stats.outbound_cnt++; | ||
3142 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3143 | } | ||
3144 | |||
3145 | /* count must be 1 in iqdio */ | ||
3146 | int | ||
3147 | do_QDIO(struct ccw_device *cdev,unsigned int callflags, | ||
3148 | unsigned int queue_number, unsigned int qidx, | ||
3149 | unsigned int count,struct qdio_buffer *buffers) | ||
3150 | { | ||
3151 | struct qdio_irq *irq_ptr; | ||
3152 | #ifdef CONFIG_QDIO_DEBUG | ||
3153 | char dbf_text[20]; | ||
3154 | |||
3155 | sprintf(dbf_text,"doQD%04x",cdev->private->irq); | ||
3156 | QDIO_DBF_TEXT3(0,trace,dbf_text); | ||
3157 | #endif /* CONFIG_QDIO_DEBUG */ | ||
3158 | |||
3159 | if ( (qidx>QDIO_MAX_BUFFERS_PER_Q) || | ||
3160 | (count>QDIO_MAX_BUFFERS_PER_Q) || | ||
3161 | (queue_number>QDIO_MAX_QUEUES_PER_IRQ) ) | ||
3162 | return -EINVAL; | ||
3163 | |||
3164 | if (count==0) | ||
3165 | return 0; | ||
3166 | |||
3167 | irq_ptr = cdev->private->qdio_data; | ||
3168 | if (!irq_ptr) | ||
3169 | return -ENODEV; | ||
3170 | |||
3171 | #ifdef CONFIG_QDIO_DEBUG | ||
3172 | if (callflags&QDIO_FLAG_SYNC_INPUT) | ||
3173 | QDIO_DBF_HEX3(0,trace,&irq_ptr->input_qs[queue_number], | ||
3174 | sizeof(void*)); | ||
3175 | else | ||
3176 | QDIO_DBF_HEX3(0,trace,&irq_ptr->output_qs[queue_number], | ||
3177 | sizeof(void*)); | ||
3178 | sprintf(dbf_text,"flag%04x",callflags); | ||
3179 | QDIO_DBF_TEXT3(0,trace,dbf_text); | ||
3180 | sprintf(dbf_text,"qi%02xct%02x",qidx,count); | ||
3181 | QDIO_DBF_TEXT3(0,trace,dbf_text); | ||
3182 | #endif /* CONFIG_QDIO_DEBUG */ | ||
3183 | |||
3184 | if (irq_ptr->state!=QDIO_IRQ_STATE_ACTIVE) | ||
3185 | return -EBUSY; | ||
3186 | |||
3187 | if (callflags&QDIO_FLAG_SYNC_INPUT) | ||
3188 | do_qdio_handle_inbound(irq_ptr->input_qs[queue_number], | ||
3189 | callflags, qidx, count, buffers); | ||
3190 | else if (callflags&QDIO_FLAG_SYNC_OUTPUT) | ||
3191 | do_qdio_handle_outbound(irq_ptr->output_qs[queue_number], | ||
3192 | callflags, qidx, count, buffers); | ||
3193 | else { | ||
3194 | QDIO_DBF_TEXT3(1,trace,"doQD:inv"); | ||
3195 | return -EINVAL; | ||
3196 | } | ||
3197 | return 0; | ||
3198 | } | ||
3199 | |||
3200 | #ifdef QDIO_PERFORMANCE_STATS | ||
3201 | static int | ||
3202 | qdio_perf_procfile_read(char *buffer, char **buffer_location, off_t offset, | ||
3203 | int buffer_length, int *eof, void *data) | ||
3204 | { | ||
3205 | int c=0; | ||
3206 | |||
3207 | /* we are always called with buffer_length=4k, so we all | ||
3208 | deliver on the first read */ | ||
3209 | if (offset>0) | ||
3210 | return 0; | ||
3211 | |||
3212 | #define _OUTP_IT(x...) c+=sprintf(buffer+c,x) | ||
3213 | _OUTP_IT("i_p_nc/c=%lu/%lu\n",i_p_nc,i_p_c); | ||
3214 | _OUTP_IT("ii_p_nc/c=%lu/%lu\n",ii_p_nc,ii_p_c); | ||
3215 | _OUTP_IT("o_p_nc/c=%lu/%lu\n",o_p_nc,o_p_c); | ||
3216 | _OUTP_IT("Number of tasklet runs (total) : %u\n", | ||
3217 | perf_stats.tl_runs); | ||
3218 | _OUTP_IT("\n"); | ||
3219 | _OUTP_IT("Number of SIGA sync's issued : %u\n", | ||
3220 | perf_stats.siga_syncs); | ||
3221 | _OUTP_IT("Number of SIGA in's issued : %u\n", | ||
3222 | perf_stats.siga_ins); | ||
3223 | _OUTP_IT("Number of SIGA out's issued : %u\n", | ||
3224 | perf_stats.siga_outs); | ||
3225 | _OUTP_IT("Number of PCIs caught : %u\n", | ||
3226 | perf_stats.pcis); | ||
3227 | _OUTP_IT("Number of adapter interrupts caught : %u\n", | ||
3228 | perf_stats.thinints); | ||
3229 | _OUTP_IT("Number of fast requeues (outg. SBALs w/o SIGA) : %u\n", | ||
3230 | perf_stats.fast_reqs); | ||
3231 | _OUTP_IT("\n"); | ||
3232 | _OUTP_IT("Total time of all inbound actions (us) incl. UL : %u\n", | ||
3233 | perf_stats.inbound_time); | ||
3234 | _OUTP_IT("Number of inbound transfers : %u\n", | ||
3235 | perf_stats.inbound_cnt); | ||
3236 | _OUTP_IT("Total time of all outbound do_QDIOs (us) : %u\n", | ||
3237 | perf_stats.outbound_time); | ||
3238 | _OUTP_IT("Number of do_QDIOs outbound : %u\n", | ||
3239 | perf_stats.outbound_cnt); | ||
3240 | _OUTP_IT("\n"); | ||
3241 | |||
3242 | return c; | ||
3243 | } | ||
3244 | |||
3245 | static struct proc_dir_entry *qdio_perf_proc_file; | ||
3246 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3247 | |||
3248 | static void | ||
3249 | qdio_add_procfs_entry(void) | ||
3250 | { | ||
3251 | #ifdef QDIO_PERFORMANCE_STATS | ||
3252 | proc_perf_file_registration=0; | ||
3253 | qdio_perf_proc_file=create_proc_entry(QDIO_PERF, | ||
3254 | S_IFREG|0444,&proc_root); | ||
3255 | if (qdio_perf_proc_file) { | ||
3256 | qdio_perf_proc_file->read_proc=&qdio_perf_procfile_read; | ||
3257 | } else proc_perf_file_registration=-1; | ||
3258 | |||
3259 | if (proc_perf_file_registration) | ||
3260 | QDIO_PRINT_WARN("was not able to register perf. " \ | ||
3261 | "proc-file (%i).\n", | ||
3262 | proc_perf_file_registration); | ||
3263 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3264 | } | ||
3265 | |||
3266 | static void | ||
3267 | qdio_remove_procfs_entry(void) | ||
3268 | { | ||
3269 | #ifdef QDIO_PERFORMANCE_STATS | ||
3270 | perf_stats.tl_runs=0; | ||
3271 | |||
3272 | if (!proc_perf_file_registration) /* means if it went ok earlier */ | ||
3273 | remove_proc_entry(QDIO_PERF,&proc_root); | ||
3274 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3275 | } | ||
3276 | |||
3277 | static void | ||
3278 | tiqdio_register_thinints(void) | ||
3279 | { | ||
3280 | char dbf_text[20]; | ||
3281 | register_thinint_result= | ||
3282 | s390_register_adapter_interrupt(&tiqdio_thinint_handler); | ||
3283 | if (register_thinint_result) { | ||
3284 | sprintf(dbf_text,"regthn%x",(register_thinint_result&0xff)); | ||
3285 | QDIO_DBF_TEXT0(0,setup,dbf_text); | ||
3286 | QDIO_PRINT_ERR("failed to register adapter handler " \ | ||
3287 | "(rc=%i).\nAdapter interrupts might " \ | ||
3288 | "not work. Continuing.\n", | ||
3289 | register_thinint_result); | ||
3290 | } | ||
3291 | } | ||
3292 | |||
3293 | static void | ||
3294 | tiqdio_unregister_thinints(void) | ||
3295 | { | ||
3296 | if (!register_thinint_result) | ||
3297 | s390_unregister_adapter_interrupt(&tiqdio_thinint_handler); | ||
3298 | } | ||
3299 | |||
3300 | static int | ||
3301 | qdio_get_qdio_memory(void) | ||
3302 | { | ||
3303 | int i; | ||
3304 | indicator_used[0]=1; | ||
3305 | |||
3306 | for (i=1;i<INDICATORS_PER_CACHELINE;i++) | ||
3307 | indicator_used[i]=0; | ||
3308 | indicators=(__u32*)kmalloc(sizeof(__u32)*(INDICATORS_PER_CACHELINE), | ||
3309 | GFP_KERNEL); | ||
3310 | if (!indicators) return -ENOMEM; | ||
3311 | memset(indicators,0,sizeof(__u32)*(INDICATORS_PER_CACHELINE)); | ||
3312 | return 0; | ||
3313 | } | ||
3314 | |||
3315 | static void | ||
3316 | qdio_release_qdio_memory(void) | ||
3317 | { | ||
3318 | if (indicators) | ||
3319 | kfree(indicators); | ||
3320 | } | ||
3321 | |||
3322 | static void | ||
3323 | qdio_unregister_dbf_views(void) | ||
3324 | { | ||
3325 | if (qdio_dbf_setup) | ||
3326 | debug_unregister(qdio_dbf_setup); | ||
3327 | if (qdio_dbf_sbal) | ||
3328 | debug_unregister(qdio_dbf_sbal); | ||
3329 | if (qdio_dbf_sense) | ||
3330 | debug_unregister(qdio_dbf_sense); | ||
3331 | if (qdio_dbf_trace) | ||
3332 | debug_unregister(qdio_dbf_trace); | ||
3333 | #ifdef CONFIG_QDIO_DEBUG | ||
3334 | if (qdio_dbf_slsb_out) | ||
3335 | debug_unregister(qdio_dbf_slsb_out); | ||
3336 | if (qdio_dbf_slsb_in) | ||
3337 | debug_unregister(qdio_dbf_slsb_in); | ||
3338 | #endif /* CONFIG_QDIO_DEBUG */ | ||
3339 | } | ||
3340 | |||
3341 | static int | ||
3342 | qdio_register_dbf_views(void) | ||
3343 | { | ||
3344 | qdio_dbf_setup=debug_register(QDIO_DBF_SETUP_NAME, | ||
3345 | QDIO_DBF_SETUP_INDEX, | ||
3346 | QDIO_DBF_SETUP_NR_AREAS, | ||
3347 | QDIO_DBF_SETUP_LEN); | ||
3348 | if (!qdio_dbf_setup) | ||
3349 | goto oom; | ||
3350 | debug_register_view(qdio_dbf_setup,&debug_hex_ascii_view); | ||
3351 | debug_set_level(qdio_dbf_setup,QDIO_DBF_SETUP_LEVEL); | ||
3352 | |||
3353 | qdio_dbf_sbal=debug_register(QDIO_DBF_SBAL_NAME, | ||
3354 | QDIO_DBF_SBAL_INDEX, | ||
3355 | QDIO_DBF_SBAL_NR_AREAS, | ||
3356 | QDIO_DBF_SBAL_LEN); | ||
3357 | if (!qdio_dbf_sbal) | ||
3358 | goto oom; | ||
3359 | |||
3360 | debug_register_view(qdio_dbf_sbal,&debug_hex_ascii_view); | ||
3361 | debug_set_level(qdio_dbf_sbal,QDIO_DBF_SBAL_LEVEL); | ||
3362 | |||
3363 | qdio_dbf_sense=debug_register(QDIO_DBF_SENSE_NAME, | ||
3364 | QDIO_DBF_SENSE_INDEX, | ||
3365 | QDIO_DBF_SENSE_NR_AREAS, | ||
3366 | QDIO_DBF_SENSE_LEN); | ||
3367 | if (!qdio_dbf_sense) | ||
3368 | goto oom; | ||
3369 | |||
3370 | debug_register_view(qdio_dbf_sense,&debug_hex_ascii_view); | ||
3371 | debug_set_level(qdio_dbf_sense,QDIO_DBF_SENSE_LEVEL); | ||
3372 | |||
3373 | qdio_dbf_trace=debug_register(QDIO_DBF_TRACE_NAME, | ||
3374 | QDIO_DBF_TRACE_INDEX, | ||
3375 | QDIO_DBF_TRACE_NR_AREAS, | ||
3376 | QDIO_DBF_TRACE_LEN); | ||
3377 | if (!qdio_dbf_trace) | ||
3378 | goto oom; | ||
3379 | |||
3380 | debug_register_view(qdio_dbf_trace,&debug_hex_ascii_view); | ||
3381 | debug_set_level(qdio_dbf_trace,QDIO_DBF_TRACE_LEVEL); | ||
3382 | |||
3383 | #ifdef CONFIG_QDIO_DEBUG | ||
3384 | qdio_dbf_slsb_out=debug_register(QDIO_DBF_SLSB_OUT_NAME, | ||
3385 | QDIO_DBF_SLSB_OUT_INDEX, | ||
3386 | QDIO_DBF_SLSB_OUT_NR_AREAS, | ||
3387 | QDIO_DBF_SLSB_OUT_LEN); | ||
3388 | if (!qdio_dbf_slsb_out) | ||
3389 | goto oom; | ||
3390 | debug_register_view(qdio_dbf_slsb_out,&debug_hex_ascii_view); | ||
3391 | debug_set_level(qdio_dbf_slsb_out,QDIO_DBF_SLSB_OUT_LEVEL); | ||
3392 | |||
3393 | qdio_dbf_slsb_in=debug_register(QDIO_DBF_SLSB_IN_NAME, | ||
3394 | QDIO_DBF_SLSB_IN_INDEX, | ||
3395 | QDIO_DBF_SLSB_IN_NR_AREAS, | ||
3396 | QDIO_DBF_SLSB_IN_LEN); | ||
3397 | if (!qdio_dbf_slsb_in) | ||
3398 | goto oom; | ||
3399 | debug_register_view(qdio_dbf_slsb_in,&debug_hex_ascii_view); | ||
3400 | debug_set_level(qdio_dbf_slsb_in,QDIO_DBF_SLSB_IN_LEVEL); | ||
3401 | #endif /* CONFIG_QDIO_DEBUG */ | ||
3402 | return 0; | ||
3403 | oom: | ||
3404 | QDIO_PRINT_ERR("not enough memory for dbf.\n"); | ||
3405 | qdio_unregister_dbf_views(); | ||
3406 | return -ENOMEM; | ||
3407 | } | ||
3408 | |||
3409 | static int __init | ||
3410 | init_QDIO(void) | ||
3411 | { | ||
3412 | int res; | ||
3413 | #ifdef QDIO_PERFORMANCE_STATS | ||
3414 | void *ptr; | ||
3415 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3416 | |||
3417 | printk("qdio: loading %s\n",version); | ||
3418 | |||
3419 | res=qdio_get_qdio_memory(); | ||
3420 | if (res) | ||
3421 | return res; | ||
3422 | |||
3423 | res = qdio_register_dbf_views(); | ||
3424 | if (res) | ||
3425 | return res; | ||
3426 | |||
3427 | QDIO_DBF_TEXT0(0,setup,"initQDIO"); | ||
3428 | |||
3429 | #ifdef QDIO_PERFORMANCE_STATS | ||
3430 | memset((void*)&perf_stats,0,sizeof(perf_stats)); | ||
3431 | QDIO_DBF_TEXT0(0,setup,"perfstat"); | ||
3432 | ptr=&perf_stats; | ||
3433 | QDIO_DBF_HEX0(0,setup,&ptr,sizeof(void*)); | ||
3434 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
3435 | |||
3436 | qdio_add_procfs_entry(); | ||
3437 | |||
3438 | if (tiqdio_check_chsc_availability()) | ||
3439 | QDIO_PRINT_ERR("Not all CHSCs supported. Continuing.\n"); | ||
3440 | |||
3441 | tiqdio_register_thinints(); | ||
3442 | |||
3443 | return 0; | ||
3444 | } | ||
3445 | |||
3446 | static void __exit | ||
3447 | cleanup_QDIO(void) | ||
3448 | { | ||
3449 | tiqdio_unregister_thinints(); | ||
3450 | qdio_remove_procfs_entry(); | ||
3451 | qdio_release_qdio_memory(); | ||
3452 | qdio_unregister_dbf_views(); | ||
3453 | |||
3454 | printk("qdio: %s: module removed\n",version); | ||
3455 | } | ||
3456 | |||
3457 | module_init(init_QDIO); | ||
3458 | module_exit(cleanup_QDIO); | ||
3459 | |||
3460 | EXPORT_SYMBOL(qdio_allocate); | ||
3461 | EXPORT_SYMBOL(qdio_establish); | ||
3462 | EXPORT_SYMBOL(qdio_initialize); | ||
3463 | EXPORT_SYMBOL(qdio_activate); | ||
3464 | EXPORT_SYMBOL(do_QDIO); | ||
3465 | EXPORT_SYMBOL(qdio_shutdown); | ||
3466 | EXPORT_SYMBOL(qdio_free); | ||
3467 | EXPORT_SYMBOL(qdio_cleanup); | ||
3468 | EXPORT_SYMBOL(qdio_synchronize); | ||
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h new file mode 100644 index 000000000000..9ad14db24143 --- /dev/null +++ b/drivers/s390/cio/qdio.h | |||
@@ -0,0 +1,648 @@ | |||
1 | #ifndef _CIO_QDIO_H | ||
2 | #define _CIO_QDIO_H | ||
3 | |||
4 | #define VERSION_CIO_QDIO_H "$Revision: 1.26 $" | ||
5 | |||
6 | #ifdef CONFIG_QDIO_DEBUG | ||
7 | #define QDIO_VERBOSE_LEVEL 9 | ||
8 | #else /* CONFIG_QDIO_DEBUG */ | ||
9 | #define QDIO_VERBOSE_LEVEL 5 | ||
10 | #endif /* CONFIG_QDIO_DEBUG */ | ||
11 | |||
12 | #define QDIO_USE_PROCESSING_STATE | ||
13 | |||
14 | #ifdef CONFIG_QDIO_PERF_STATS | ||
15 | #define QDIO_PERFORMANCE_STATS | ||
16 | #endif /* CONFIG_QDIO_PERF_STATS */ | ||
17 | |||
18 | #define QDIO_MINIMAL_BH_RELIEF_TIME 16 | ||
19 | #define QDIO_TIMER_POLL_VALUE 1 | ||
20 | #define IQDIO_TIMER_POLL_VALUE 1 | ||
21 | |||
22 | /* | ||
23 | * unfortunately this can't be (QDIO_MAX_BUFFERS_PER_Q*4/3) or so -- as | ||
24 | * we never know, whether we'll get initiative again, e.g. to give the | ||
25 | * transmit skb's back to the stack, however the stack may be waiting for | ||
26 | * them... therefore we define 4 as threshold to start polling (which | ||
27 | * will stop as soon as the asynchronous queue catches up) | ||
28 | * btw, this only applies to the asynchronous HiperSockets queue | ||
29 | */ | ||
30 | #define IQDIO_FILL_LEVEL_TO_POLL 4 | ||
31 | |||
32 | #define TIQDIO_THININT_ISC 3 | ||
33 | #define TIQDIO_DELAY_TARGET 0 | ||
34 | #define QDIO_BUSY_BIT_PATIENCE 100 /* in microsecs */ | ||
35 | #define QDIO_BUSY_BIT_GIVE_UP 10000000 /* 10 seconds */ | ||
36 | #define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */ | ||
37 | #define IQDIO_GLOBAL_LAPS_INT 1 /* don't global summary */ | ||
38 | #define IQDIO_LOCAL_LAPS 4 | ||
39 | #define IQDIO_LOCAL_LAPS_INT 1 | ||
40 | #define IQDIO_GLOBAL_SUMMARY_CC_MASK 2 | ||
41 | /*#define IQDIO_IQDC_INT_PARM 0x1234*/ | ||
42 | |||
43 | #define QDIO_Q_LAPS 5 | ||
44 | |||
45 | #define QDIO_STORAGE_KEY 0 | ||
46 | |||
47 | #define L2_CACHELINE_SIZE 256 | ||
48 | #define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32)) | ||
49 | |||
50 | #define QDIO_PERF "qdio_perf" | ||
51 | |||
52 | /* must be a power of 2 */ | ||
53 | /*#define QDIO_STATS_NUMBER 4 | ||
54 | |||
55 | #define QDIO_STATS_CLASSES 2 | ||
56 | #define QDIO_STATS_COUNT_NEEDED 2*/ | ||
57 | |||
58 | #define QDIO_NO_USE_COUNT_TIMEOUT (1*HZ) /* wait for 1 sec on each q before | ||
59 | exiting without having use_count | ||
60 | of the queue to 0 */ | ||
61 | |||
62 | #define QDIO_ESTABLISH_TIMEOUT (1*HZ) | ||
63 | #define QDIO_ACTIVATE_TIMEOUT ((5*HZ)>>10) | ||
64 | #define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) | ||
65 | #define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) | ||
66 | |||
67 | enum qdio_irq_states { | ||
68 | QDIO_IRQ_STATE_INACTIVE, | ||
69 | QDIO_IRQ_STATE_ESTABLISHED, | ||
70 | QDIO_IRQ_STATE_ACTIVE, | ||
71 | QDIO_IRQ_STATE_STOPPED, | ||
72 | QDIO_IRQ_STATE_CLEANUP, | ||
73 | QDIO_IRQ_STATE_ERR, | ||
74 | NR_QDIO_IRQ_STATES, | ||
75 | }; | ||
76 | |||
77 | /* used as intparm in do_IO: */ | ||
78 | #define QDIO_DOING_SENSEID 0 | ||
79 | #define QDIO_DOING_ESTABLISH 1 | ||
80 | #define QDIO_DOING_ACTIVATE 2 | ||
81 | #define QDIO_DOING_CLEANUP 3 | ||
82 | |||
83 | /************************* DEBUG FACILITY STUFF *********************/ | ||
84 | |||
85 | #define QDIO_DBF_HEX(ex,name,level,addr,len) \ | ||
86 | do { \ | ||
87 | if (ex) \ | ||
88 | debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \ | ||
89 | else \ | ||
90 | debug_event(qdio_dbf_##name,level,(void*)(addr),len); \ | ||
91 | } while (0) | ||
92 | #define QDIO_DBF_TEXT(ex,name,level,text) \ | ||
93 | do { \ | ||
94 | if (ex) \ | ||
95 | debug_text_exception(qdio_dbf_##name,level,text); \ | ||
96 | else \ | ||
97 | debug_text_event(qdio_dbf_##name,level,text); \ | ||
98 | } while (0) | ||
99 | |||
100 | |||
101 | #define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len) | ||
102 | #define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len) | ||
103 | #define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len) | ||
104 | #ifdef CONFIG_QDIO_DEBUG | ||
105 | #define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len) | ||
106 | #define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len) | ||
107 | #define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len) | ||
108 | #define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len) | ||
109 | #else /* CONFIG_QDIO_DEBUG */ | ||
110 | #define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0) | ||
111 | #define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0) | ||
112 | #define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0) | ||
113 | #define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0) | ||
114 | #endif /* CONFIG_QDIO_DEBUG */ | ||
115 | |||
116 | #define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text) | ||
117 | #define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text) | ||
118 | #define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text) | ||
119 | #ifdef CONFIG_QDIO_DEBUG | ||
120 | #define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text) | ||
121 | #define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text) | ||
122 | #define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text) | ||
123 | #define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text) | ||
124 | #else /* CONFIG_QDIO_DEBUG */ | ||
125 | #define QDIO_DBF_TEXT3(ex,name,text) do {} while (0) | ||
126 | #define QDIO_DBF_TEXT4(ex,name,text) do {} while (0) | ||
127 | #define QDIO_DBF_TEXT5(ex,name,text) do {} while (0) | ||
128 | #define QDIO_DBF_TEXT6(ex,name,text) do {} while (0) | ||
129 | #endif /* CONFIG_QDIO_DEBUG */ | ||
130 | |||
131 | #define QDIO_DBF_SETUP_NAME "qdio_setup" | ||
132 | #define QDIO_DBF_SETUP_LEN 8 | ||
133 | #define QDIO_DBF_SETUP_INDEX 2 | ||
134 | #define QDIO_DBF_SETUP_NR_AREAS 1 | ||
135 | #ifdef CONFIG_QDIO_DEBUG | ||
136 | #define QDIO_DBF_SETUP_LEVEL 6 | ||
137 | #else /* CONFIG_QDIO_DEBUG */ | ||
138 | #define QDIO_DBF_SETUP_LEVEL 2 | ||
139 | #endif /* CONFIG_QDIO_DEBUG */ | ||
140 | |||
141 | #define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */ | ||
142 | #define QDIO_DBF_SBAL_LEN 256 | ||
143 | #define QDIO_DBF_SBAL_INDEX 2 | ||
144 | #define QDIO_DBF_SBAL_NR_AREAS 2 | ||
145 | #ifdef CONFIG_QDIO_DEBUG | ||
146 | #define QDIO_DBF_SBAL_LEVEL 6 | ||
147 | #else /* CONFIG_QDIO_DEBUG */ | ||
148 | #define QDIO_DBF_SBAL_LEVEL 2 | ||
149 | #endif /* CONFIG_QDIO_DEBUG */ | ||
150 | |||
151 | #define QDIO_DBF_TRACE_NAME "qdio_trace" | ||
152 | #define QDIO_DBF_TRACE_LEN 8 | ||
153 | #define QDIO_DBF_TRACE_NR_AREAS 2 | ||
154 | #ifdef CONFIG_QDIO_DEBUG | ||
155 | #define QDIO_DBF_TRACE_INDEX 4 | ||
156 | #define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */ | ||
157 | #else /* CONFIG_QDIO_DEBUG */ | ||
158 | #define QDIO_DBF_TRACE_INDEX 2 | ||
159 | #define QDIO_DBF_TRACE_LEVEL 2 | ||
160 | #endif /* CONFIG_QDIO_DEBUG */ | ||
161 | |||
162 | #define QDIO_DBF_SENSE_NAME "qdio_sense" | ||
163 | #define QDIO_DBF_SENSE_LEN 64 | ||
164 | #define QDIO_DBF_SENSE_INDEX 1 | ||
165 | #define QDIO_DBF_SENSE_NR_AREAS 1 | ||
166 | #ifdef CONFIG_QDIO_DEBUG | ||
167 | #define QDIO_DBF_SENSE_LEVEL 6 | ||
168 | #else /* CONFIG_QDIO_DEBUG */ | ||
169 | #define QDIO_DBF_SENSE_LEVEL 2 | ||
170 | #endif /* CONFIG_QDIO_DEBUG */ | ||
171 | |||
172 | #ifdef CONFIG_QDIO_DEBUG | ||
173 | #define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT | ||
174 | |||
175 | #define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out" | ||
176 | #define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q | ||
177 | #define QDIO_DBF_SLSB_OUT_INDEX 8 | ||
178 | #define QDIO_DBF_SLSB_OUT_NR_AREAS 1 | ||
179 | #define QDIO_DBF_SLSB_OUT_LEVEL 6 | ||
180 | |||
181 | #define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in" | ||
182 | #define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q | ||
183 | #define QDIO_DBF_SLSB_IN_INDEX 8 | ||
184 | #define QDIO_DBF_SLSB_IN_NR_AREAS 1 | ||
185 | #define QDIO_DBF_SLSB_IN_LEVEL 6 | ||
186 | #endif /* CONFIG_QDIO_DEBUG */ | ||
187 | |||
188 | #define QDIO_PRINTK_HEADER QDIO_NAME ": " | ||
189 | |||
190 | #if QDIO_VERBOSE_LEVEL>8 | ||
191 | #define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x) | ||
192 | #else | ||
193 | #define QDIO_PRINT_STUPID(x...) | ||
194 | #endif | ||
195 | |||
196 | #if QDIO_VERBOSE_LEVEL>7 | ||
197 | #define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x) | ||
198 | #else | ||
199 | #define QDIO_PRINT_ALL(x...) | ||
200 | #endif | ||
201 | |||
202 | #if QDIO_VERBOSE_LEVEL>6 | ||
203 | #define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x) | ||
204 | #else | ||
205 | #define QDIO_PRINT_INFO(x...) | ||
206 | #endif | ||
207 | |||
208 | #if QDIO_VERBOSE_LEVEL>5 | ||
209 | #define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x) | ||
210 | #else | ||
211 | #define QDIO_PRINT_WARN(x...) | ||
212 | #endif | ||
213 | |||
214 | #if QDIO_VERBOSE_LEVEL>4 | ||
215 | #define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x) | ||
216 | #else | ||
217 | #define QDIO_PRINT_ERR(x...) | ||
218 | #endif | ||
219 | |||
220 | #if QDIO_VERBOSE_LEVEL>3 | ||
221 | #define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x) | ||
222 | #else | ||
223 | #define QDIO_PRINT_CRIT(x...) | ||
224 | #endif | ||
225 | |||
226 | #if QDIO_VERBOSE_LEVEL>2 | ||
227 | #define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x) | ||
228 | #else | ||
229 | #define QDIO_PRINT_ALERT(x...) | ||
230 | #endif | ||
231 | |||
232 | #if QDIO_VERBOSE_LEVEL>1 | ||
233 | #define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x) | ||
234 | #else | ||
235 | #define QDIO_PRINT_EMERG(x...) | ||
236 | #endif | ||
237 | |||
238 | #define HEXDUMP16(importance,header,ptr) \ | ||
239 | QDIO_PRINT_##importance(header "%02x %02x %02x %02x " \ | ||
240 | "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
241 | "%02x %02x %02x %02x\n",*(((char*)ptr)), \ | ||
242 | *(((char*)ptr)+1),*(((char*)ptr)+2), \ | ||
243 | *(((char*)ptr)+3),*(((char*)ptr)+4), \ | ||
244 | *(((char*)ptr)+5),*(((char*)ptr)+6), \ | ||
245 | *(((char*)ptr)+7),*(((char*)ptr)+8), \ | ||
246 | *(((char*)ptr)+9),*(((char*)ptr)+10), \ | ||
247 | *(((char*)ptr)+11),*(((char*)ptr)+12), \ | ||
248 | *(((char*)ptr)+13),*(((char*)ptr)+14), \ | ||
249 | *(((char*)ptr)+15)); \ | ||
250 | QDIO_PRINT_##importance(header "%02x %02x %02x %02x %02x %02x %02x %02x " \ | ||
251 | "%02x %02x %02x %02x %02x %02x %02x %02x\n", \ | ||
252 | *(((char*)ptr)+16),*(((char*)ptr)+17), \ | ||
253 | *(((char*)ptr)+18),*(((char*)ptr)+19), \ | ||
254 | *(((char*)ptr)+20),*(((char*)ptr)+21), \ | ||
255 | *(((char*)ptr)+22),*(((char*)ptr)+23), \ | ||
256 | *(((char*)ptr)+24),*(((char*)ptr)+25), \ | ||
257 | *(((char*)ptr)+26),*(((char*)ptr)+27), \ | ||
258 | *(((char*)ptr)+28),*(((char*)ptr)+29), \ | ||
259 | *(((char*)ptr)+30),*(((char*)ptr)+31)); | ||
260 | |||
261 | /****************** END OF DEBUG FACILITY STUFF *********************/ | ||
262 | |||
263 | /* | ||
264 | * Some instructions as assembly | ||
265 | */ | ||
266 | extern __inline__ int | ||
267 | do_siga_sync(unsigned int irq, unsigned int mask1, unsigned int mask2) | ||
268 | { | ||
269 | int cc; | ||
270 | |||
271 | #ifndef CONFIG_ARCH_S390X | ||
272 | asm volatile ( | ||
273 | "lhi 0,2 \n\t" | ||
274 | "lr 1,%1 \n\t" | ||
275 | "lr 2,%2 \n\t" | ||
276 | "lr 3,%3 \n\t" | ||
277 | "siga 0 \n\t" | ||
278 | "ipm %0 \n\t" | ||
279 | "srl %0,28 \n\t" | ||
280 | : "=d" (cc) | ||
281 | : "d" (0x10000|irq), "d" (mask1), "d" (mask2) | ||
282 | : "cc", "0", "1", "2", "3" | ||
283 | ); | ||
284 | #else /* CONFIG_ARCH_S390X */ | ||
285 | asm volatile ( | ||
286 | "lghi 0,2 \n\t" | ||
287 | "llgfr 1,%1 \n\t" | ||
288 | "llgfr 2,%2 \n\t" | ||
289 | "llgfr 3,%3 \n\t" | ||
290 | "siga 0 \n\t" | ||
291 | "ipm %0 \n\t" | ||
292 | "srl %0,28 \n\t" | ||
293 | : "=d" (cc) | ||
294 | : "d" (0x10000|irq), "d" (mask1), "d" (mask2) | ||
295 | : "cc", "0", "1", "2", "3" | ||
296 | ); | ||
297 | #endif /* CONFIG_ARCH_S390X */ | ||
298 | return cc; | ||
299 | } | ||
300 | |||
301 | extern __inline__ int | ||
302 | do_siga_input(unsigned int irq, unsigned int mask) | ||
303 | { | ||
304 | int cc; | ||
305 | |||
306 | #ifndef CONFIG_ARCH_S390X | ||
307 | asm volatile ( | ||
308 | "lhi 0,1 \n\t" | ||
309 | "lr 1,%1 \n\t" | ||
310 | "lr 2,%2 \n\t" | ||
311 | "siga 0 \n\t" | ||
312 | "ipm %0 \n\t" | ||
313 | "srl %0,28 \n\t" | ||
314 | : "=d" (cc) | ||
315 | : "d" (0x10000|irq), "d" (mask) | ||
316 | : "cc", "0", "1", "2", "memory" | ||
317 | ); | ||
318 | #else /* CONFIG_ARCH_S390X */ | ||
319 | asm volatile ( | ||
320 | "lghi 0,1 \n\t" | ||
321 | "llgfr 1,%1 \n\t" | ||
322 | "llgfr 2,%2 \n\t" | ||
323 | "siga 0 \n\t" | ||
324 | "ipm %0 \n\t" | ||
325 | "srl %0,28 \n\t" | ||
326 | : "=d" (cc) | ||
327 | : "d" (0x10000|irq), "d" (mask) | ||
328 | : "cc", "0", "1", "2", "memory" | ||
329 | ); | ||
330 | #endif /* CONFIG_ARCH_S390X */ | ||
331 | |||
332 | return cc; | ||
333 | } | ||
334 | |||
335 | extern __inline__ int | ||
336 | do_siga_output(unsigned long irq, unsigned long mask, __u32 *bb) | ||
337 | { | ||
338 | int cc; | ||
339 | __u32 busy_bit; | ||
340 | |||
341 | #ifndef CONFIG_ARCH_S390X | ||
342 | asm volatile ( | ||
343 | "lhi 0,0 \n\t" | ||
344 | "lr 1,%2 \n\t" | ||
345 | "lr 2,%3 \n\t" | ||
346 | "siga 0 \n\t" | ||
347 | "0:" | ||
348 | "ipm %0 \n\t" | ||
349 | "srl %0,28 \n\t" | ||
350 | "srl 0,31 \n\t" | ||
351 | "lr %1,0 \n\t" | ||
352 | "1: \n\t" | ||
353 | ".section .fixup,\"ax\"\n\t" | ||
354 | "2: \n\t" | ||
355 | "lhi %0,%4 \n\t" | ||
356 | "bras 1,3f \n\t" | ||
357 | ".long 1b \n\t" | ||
358 | "3: \n\t" | ||
359 | "l 1,0(1) \n\t" | ||
360 | "br 1 \n\t" | ||
361 | ".previous \n\t" | ||
362 | ".section __ex_table,\"a\"\n\t" | ||
363 | ".align 4 \n\t" | ||
364 | ".long 0b,2b \n\t" | ||
365 | ".previous \n\t" | ||
366 | : "=d" (cc), "=d" (busy_bit) | ||
367 | : "d" (0x10000|irq), "d" (mask), | ||
368 | "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) | ||
369 | : "cc", "0", "1", "2", "memory" | ||
370 | ); | ||
371 | #else /* CONFIG_ARCH_S390X */ | ||
372 | asm volatile ( | ||
373 | "lghi 0,0 \n\t" | ||
374 | "llgfr 1,%2 \n\t" | ||
375 | "llgfr 2,%3 \n\t" | ||
376 | "siga 0 \n\t" | ||
377 | "0:" | ||
378 | "ipm %0 \n\t" | ||
379 | "srl %0,28 \n\t" | ||
380 | "srl 0,31 \n\t" | ||
381 | "llgfr %1,0 \n\t" | ||
382 | "1: \n\t" | ||
383 | ".section .fixup,\"ax\"\n\t" | ||
384 | "lghi %0,%4 \n\t" | ||
385 | "jg 1b \n\t" | ||
386 | ".previous\n\t" | ||
387 | ".section __ex_table,\"a\"\n\t" | ||
388 | ".align 8 \n\t" | ||
389 | ".quad 0b,1b \n\t" | ||
390 | ".previous \n\t" | ||
391 | : "=d" (cc), "=d" (busy_bit) | ||
392 | : "d" (0x10000|irq), "d" (mask), | ||
393 | "i" (QDIO_SIGA_ERROR_ACCESS_EXCEPTION) | ||
394 | : "cc", "0", "1", "2", "memory" | ||
395 | ); | ||
396 | #endif /* CONFIG_ARCH_S390X */ | ||
397 | |||
398 | (*bb) = busy_bit; | ||
399 | return cc; | ||
400 | } | ||
401 | |||
402 | extern __inline__ unsigned long | ||
403 | do_clear_global_summary(void) | ||
404 | { | ||
405 | |||
406 | unsigned long time; | ||
407 | |||
408 | #ifndef CONFIG_ARCH_S390X | ||
409 | asm volatile ( | ||
410 | "lhi 1,3 \n\t" | ||
411 | ".insn rre,0xb2650000,2,0 \n\t" | ||
412 | "lr %0,3 \n\t" | ||
413 | : "=d" (time) : : "cc", "1", "2", "3" | ||
414 | ); | ||
415 | #else /* CONFIG_ARCH_S390X */ | ||
416 | asm volatile ( | ||
417 | "lghi 1,3 \n\t" | ||
418 | ".insn rre,0xb2650000,2,0 \n\t" | ||
419 | "lgr %0,3 \n\t" | ||
420 | : "=d" (time) : : "cc", "1", "2", "3" | ||
421 | ); | ||
422 | #endif /* CONFIG_ARCH_S390X */ | ||
423 | |||
424 | return time; | ||
425 | } | ||
426 | |||
427 | /* | ||
428 | * QDIO device commands returned by extended Sense-ID | ||
429 | */ | ||
430 | #define DEFAULT_ESTABLISH_QS_CMD 0x1b | ||
431 | #define DEFAULT_ESTABLISH_QS_COUNT 0x1000 | ||
432 | #define DEFAULT_ACTIVATE_QS_CMD 0x1f | ||
433 | #define DEFAULT_ACTIVATE_QS_COUNT 0 | ||
434 | |||
435 | /* | ||
436 | * additional CIWs returned by extended Sense-ID | ||
437 | */ | ||
438 | #define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */ | ||
439 | #define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */ | ||
440 | |||
441 | #define QDIO_CHSC_RESPONSE_CODE_OK 1 | ||
442 | /* flags for st qdio sch data */ | ||
443 | #define CHSC_FLAG_QDIO_CAPABILITY 0x80 | ||
444 | #define CHSC_FLAG_VALIDITY 0x40 | ||
445 | |||
446 | #define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40 | ||
447 | #define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20 | ||
448 | #define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10 | ||
449 | #define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08 | ||
450 | #define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04 | ||
451 | |||
452 | #ifdef QDIO_PERFORMANCE_STATS | ||
453 | struct qdio_perf_stats { | ||
454 | unsigned int tl_runs; | ||
455 | |||
456 | unsigned int siga_outs; | ||
457 | unsigned int siga_ins; | ||
458 | unsigned int siga_syncs; | ||
459 | unsigned int pcis; | ||
460 | unsigned int thinints; | ||
461 | unsigned int fast_reqs; | ||
462 | |||
463 | __u64 start_time_outbound; | ||
464 | unsigned int outbound_cnt; | ||
465 | unsigned int outbound_time; | ||
466 | __u64 start_time_inbound; | ||
467 | unsigned int inbound_cnt; | ||
468 | unsigned int inbound_time; | ||
469 | }; | ||
470 | #endif /* QDIO_PERFORMANCE_STATS */ | ||
471 | |||
472 | #define atomic_swap(a,b) xchg((int*)a.counter,b) | ||
473 | |||
474 | /* unlikely as the later the better */ | ||
475 | #define SYNC_MEMORY if (unlikely(q->siga_sync)) qdio_siga_sync_q(q) | ||
476 | #define SYNC_MEMORY_ALL if (unlikely(q->siga_sync)) \ | ||
477 | qdio_siga_sync(q,~0U,~0U) | ||
478 | #define SYNC_MEMORY_ALL_OUTB if (unlikely(q->siga_sync)) \ | ||
479 | qdio_siga_sync(q,~0U,0) | ||
480 | |||
481 | #define NOW qdio_get_micros() | ||
482 | #define SAVE_TIMESTAMP(q) q->timing.last_transfer_time=NOW | ||
483 | #define GET_SAVED_TIMESTAMP(q) (q->timing.last_transfer_time) | ||
484 | #define SAVE_FRONTIER(q,val) q->last_move_ftc=val | ||
485 | #define GET_SAVED_FRONTIER(q) (q->last_move_ftc) | ||
486 | |||
487 | #define MY_MODULE_STRING(x) #x | ||
488 | |||
489 | #ifdef CONFIG_ARCH_S390X | ||
490 | #define QDIO_GET_ADDR(x) ((__u32)(unsigned long)x) | ||
491 | #else /* CONFIG_ARCH_S390X */ | ||
492 | #define QDIO_GET_ADDR(x) ((__u32)(long)x) | ||
493 | #endif /* CONFIG_ARCH_S390X */ | ||
494 | |||
495 | #ifdef CONFIG_QDIO_DEBUG | ||
496 | #define set_slsb(x,y) \ | ||
497 | if(q->queue_type==QDIO_TRACE_QTYPE) { \ | ||
498 | if(q->is_input_q) { \ | ||
499 | QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ | ||
500 | } else { \ | ||
501 | QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ | ||
502 | } \ | ||
503 | } \ | ||
504 | qdio_set_slsb(x,y); \ | ||
505 | if(q->queue_type==QDIO_TRACE_QTYPE) { \ | ||
506 | if(q->is_input_q) { \ | ||
507 | QDIO_DBF_HEX2(0,slsb_in,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ | ||
508 | } else { \ | ||
509 | QDIO_DBF_HEX2(0,slsb_out,&q->slsb,QDIO_MAX_BUFFERS_PER_Q); \ | ||
510 | } \ | ||
511 | } | ||
512 | #else /* CONFIG_QDIO_DEBUG */ | ||
513 | #define set_slsb(x,y) qdio_set_slsb(x,y) | ||
514 | #endif /* CONFIG_QDIO_DEBUG */ | ||
515 | |||
516 | struct qdio_q { | ||
517 | volatile struct slsb slsb; | ||
518 | |||
519 | char unused[QDIO_MAX_BUFFERS_PER_Q]; | ||
520 | |||
521 | __u32 * volatile dev_st_chg_ind; | ||
522 | |||
523 | int is_input_q; | ||
524 | int irq; | ||
525 | struct ccw_device *cdev; | ||
526 | |||
527 | unsigned int is_iqdio_q; | ||
528 | unsigned int is_thinint_q; | ||
529 | |||
530 | /* bit 0 means queue 0, bit 1 means queue 1, ... */ | ||
531 | unsigned int mask; | ||
532 | unsigned int q_no; | ||
533 | |||
534 | qdio_handler_t (*handler); | ||
535 | |||
536 | /* points to the next buffer to be checked for having | ||
537 | * been processed by the card (outbound) | ||
538 | * or to the next buffer the program should check for (inbound) */ | ||
539 | volatile int first_to_check; | ||
540 | /* and the last time it was: */ | ||
541 | volatile int last_move_ftc; | ||
542 | |||
543 | atomic_t number_of_buffers_used; | ||
544 | atomic_t polling; | ||
545 | |||
546 | unsigned int siga_in; | ||
547 | unsigned int siga_out; | ||
548 | unsigned int siga_sync; | ||
549 | unsigned int siga_sync_done_on_thinints; | ||
550 | unsigned int siga_sync_done_on_outb_tis; | ||
551 | unsigned int hydra_gives_outbound_pcis; | ||
552 | |||
553 | /* used to save beginning position when calling dd_handlers */ | ||
554 | int first_element_to_kick; | ||
555 | |||
556 | atomic_t use_count; | ||
557 | atomic_t is_in_shutdown; | ||
558 | |||
559 | void *irq_ptr; | ||
560 | |||
561 | #ifdef QDIO_USE_TIMERS_FOR_POLLING | ||
562 | struct timer_list timer; | ||
563 | atomic_t timer_already_set; | ||
564 | spinlock_t timer_lock; | ||
565 | #else /* QDIO_USE_TIMERS_FOR_POLLING */ | ||
566 | struct tasklet_struct tasklet; | ||
567 | #endif /* QDIO_USE_TIMERS_FOR_POLLING */ | ||
568 | |||
569 | enum qdio_irq_states state; | ||
570 | |||
571 | /* used to store the error condition during a data transfer */ | ||
572 | unsigned int qdio_error; | ||
573 | unsigned int siga_error; | ||
574 | unsigned int error_status_flags; | ||
575 | |||
576 | /* list of interesting queues */ | ||
577 | volatile struct qdio_q *list_next; | ||
578 | volatile struct qdio_q *list_prev; | ||
579 | |||
580 | struct sl *sl; | ||
581 | volatile struct sbal *sbal[QDIO_MAX_BUFFERS_PER_Q]; | ||
582 | |||
583 | struct qdio_buffer *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q]; | ||
584 | |||
585 | unsigned long int_parm; | ||
586 | |||
587 | /*struct { | ||
588 | int in_bh_check_limit; | ||
589 | int threshold; | ||
590 | } threshold_classes[QDIO_STATS_CLASSES];*/ | ||
591 | |||
592 | struct { | ||
593 | /* inbound: the time to stop polling | ||
594 | outbound: the time to kick peer */ | ||
595 | int threshold; /* the real value */ | ||
596 | |||
597 | /* outbound: last time of do_QDIO | ||
598 | inbound: last time of noticing incoming data */ | ||
599 | /*__u64 last_transfer_times[QDIO_STATS_NUMBER]; | ||
600 | int last_transfer_index; */ | ||
601 | |||
602 | __u64 last_transfer_time; | ||
603 | __u64 busy_start; | ||
604 | } timing; | ||
605 | atomic_t busy_siga_counter; | ||
606 | unsigned int queue_type; | ||
607 | |||
608 | /* leave this member at the end. won't be cleared in qdio_fill_qs */ | ||
609 | struct slib *slib; /* a page is allocated under this pointer, | ||
610 | sl points into this page, offset PAGE_SIZE/2 | ||
611 | (after slib) */ | ||
612 | } __attribute__ ((aligned(256))); | ||
613 | |||
614 | struct qdio_irq { | ||
615 | __u32 * volatile dev_st_chg_ind; | ||
616 | |||
617 | unsigned long int_parm; | ||
618 | int irq; | ||
619 | |||
620 | unsigned int is_iqdio_irq; | ||
621 | unsigned int is_thinint_irq; | ||
622 | unsigned int hydra_gives_outbound_pcis; | ||
623 | unsigned int sync_done_on_outb_pcis; | ||
624 | |||
625 | enum qdio_irq_states state; | ||
626 | |||
627 | unsigned int no_input_qs; | ||
628 | unsigned int no_output_qs; | ||
629 | |||
630 | unsigned char qdioac; | ||
631 | |||
632 | struct ccw1 ccw; | ||
633 | |||
634 | struct ciw equeue; | ||
635 | struct ciw aqueue; | ||
636 | |||
637 | struct qib qib; | ||
638 | |||
639 | void (*original_int_handler) (struct ccw_device *, | ||
640 | unsigned long, struct irb *); | ||
641 | |||
642 | /* leave these four members together at the end. won't be cleared in qdio_fill_irq */ | ||
643 | struct qdr *qdr; | ||
644 | struct qdio_q *input_qs[QDIO_MAX_QUEUES_PER_IRQ]; | ||
645 | struct qdio_q *output_qs[QDIO_MAX_QUEUES_PER_IRQ]; | ||
646 | struct semaphore setting_up_sema; | ||
647 | }; | ||
648 | #endif | ||