diff options
Diffstat (limited to 'drivers/s390/cio/css.c')
-rw-r--r-- | drivers/s390/cio/css.c | 297 |
1 files changed, 162 insertions, 135 deletions
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 555119cacc27..e565193650c7 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * drivers/s390/cio/css.c | 2 | * drivers/s390/cio/css.c |
3 | * driver for channel subsystem | 3 | * driver for channel subsystem |
4 | * $Revision: 1.85 $ | 4 | * $Revision: 1.93 $ |
5 | * | 5 | * |
6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, | 6 | * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, |
7 | * IBM Corporation | 7 | * IBM Corporation |
@@ -21,19 +21,35 @@ | |||
21 | #include "ioasm.h" | 21 | #include "ioasm.h" |
22 | #include "chsc.h" | 22 | #include "chsc.h" |
23 | 23 | ||
24 | unsigned int highest_subchannel; | ||
25 | int need_rescan = 0; | 24 | int need_rescan = 0; |
26 | int css_init_done = 0; | 25 | int css_init_done = 0; |
26 | static int max_ssid = 0; | ||
27 | |||
28 | struct channel_subsystem *css[__MAX_CSSID + 1]; | ||
27 | 29 | ||
28 | struct pgid global_pgid; | ||
29 | int css_characteristics_avail = 0; | 30 | int css_characteristics_avail = 0; |
30 | 31 | ||
31 | struct device css_bus_device = { | 32 | inline int |
32 | .bus_id = "css0", | 33 | for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *data) |
33 | }; | 34 | { |
35 | struct subchannel_id schid; | ||
36 | int ret; | ||
37 | |||
38 | init_subchannel_id(&schid); | ||
39 | ret = -ENODEV; | ||
40 | do { | ||
41 | do { | ||
42 | ret = fn(schid, data); | ||
43 | if (ret) | ||
44 | break; | ||
45 | } while (schid.sch_no++ < __MAX_SUBCHANNEL); | ||
46 | schid.sch_no = 0; | ||
47 | } while (schid.ssid++ < max_ssid); | ||
48 | return ret; | ||
49 | } | ||
34 | 50 | ||
35 | static struct subchannel * | 51 | static struct subchannel * |
36 | css_alloc_subchannel(int irq) | 52 | css_alloc_subchannel(struct subchannel_id schid) |
37 | { | 53 | { |
38 | struct subchannel *sch; | 54 | struct subchannel *sch; |
39 | int ret; | 55 | int ret; |
@@ -41,13 +57,11 @@ css_alloc_subchannel(int irq) | |||
41 | sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); | 57 | sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); |
42 | if (sch == NULL) | 58 | if (sch == NULL) |
43 | return ERR_PTR(-ENOMEM); | 59 | return ERR_PTR(-ENOMEM); |
44 | ret = cio_validate_subchannel (sch, irq); | 60 | ret = cio_validate_subchannel (sch, schid); |
45 | if (ret < 0) { | 61 | if (ret < 0) { |
46 | kfree(sch); | 62 | kfree(sch); |
47 | return ERR_PTR(ret); | 63 | return ERR_PTR(ret); |
48 | } | 64 | } |
49 | if (irq > highest_subchannel) | ||
50 | highest_subchannel = irq; | ||
51 | 65 | ||
52 | if (sch->st != SUBCHANNEL_TYPE_IO) { | 66 | if (sch->st != SUBCHANNEL_TYPE_IO) { |
53 | /* For now we ignore all non-io subchannels. */ | 67 | /* For now we ignore all non-io subchannels. */ |
@@ -87,7 +101,7 @@ css_subchannel_release(struct device *dev) | |||
87 | struct subchannel *sch; | 101 | struct subchannel *sch; |
88 | 102 | ||
89 | sch = to_subchannel(dev); | 103 | sch = to_subchannel(dev); |
90 | if (!cio_is_console(sch->irq)) | 104 | if (!cio_is_console(sch->schid)) |
91 | kfree(sch); | 105 | kfree(sch); |
92 | } | 106 | } |
93 | 107 | ||
@@ -99,7 +113,7 @@ css_register_subchannel(struct subchannel *sch) | |||
99 | int ret; | 113 | int ret; |
100 | 114 | ||
101 | /* Initialize the subchannel structure */ | 115 | /* Initialize the subchannel structure */ |
102 | sch->dev.parent = &css_bus_device; | 116 | sch->dev.parent = &css[0]->device; |
103 | sch->dev.bus = &css_bus_type; | 117 | sch->dev.bus = &css_bus_type; |
104 | sch->dev.release = &css_subchannel_release; | 118 | sch->dev.release = &css_subchannel_release; |
105 | 119 | ||
@@ -114,12 +128,12 @@ css_register_subchannel(struct subchannel *sch) | |||
114 | } | 128 | } |
115 | 129 | ||
116 | int | 130 | int |
117 | css_probe_device(int irq) | 131 | css_probe_device(struct subchannel_id schid) |
118 | { | 132 | { |
119 | int ret; | 133 | int ret; |
120 | struct subchannel *sch; | 134 | struct subchannel *sch; |
121 | 135 | ||
122 | sch = css_alloc_subchannel(irq); | 136 | sch = css_alloc_subchannel(schid); |
123 | if (IS_ERR(sch)) | 137 | if (IS_ERR(sch)) |
124 | return PTR_ERR(sch); | 138 | return PTR_ERR(sch); |
125 | ret = css_register_subchannel(sch); | 139 | ret = css_register_subchannel(sch); |
@@ -132,26 +146,26 @@ static int | |||
132 | check_subchannel(struct device * dev, void * data) | 146 | check_subchannel(struct device * dev, void * data) |
133 | { | 147 | { |
134 | struct subchannel *sch; | 148 | struct subchannel *sch; |
135 | int irq = (unsigned long)data; | 149 | struct subchannel_id *schid = data; |
136 | 150 | ||
137 | sch = to_subchannel(dev); | 151 | sch = to_subchannel(dev); |
138 | return (sch->irq == irq); | 152 | return schid_equal(&sch->schid, schid); |
139 | } | 153 | } |
140 | 154 | ||
141 | struct subchannel * | 155 | struct subchannel * |
142 | get_subchannel_by_schid(int irq) | 156 | get_subchannel_by_schid(struct subchannel_id schid) |
143 | { | 157 | { |
144 | struct device *dev; | 158 | struct device *dev; |
145 | 159 | ||
146 | dev = bus_find_device(&css_bus_type, NULL, | 160 | dev = bus_find_device(&css_bus_type, NULL, |
147 | (void *)(unsigned long)irq, check_subchannel); | 161 | (void *)&schid, check_subchannel); |
148 | 162 | ||
149 | return dev ? to_subchannel(dev) : NULL; | 163 | return dev ? to_subchannel(dev) : NULL; |
150 | } | 164 | } |
151 | 165 | ||
152 | 166 | ||
153 | static inline int | 167 | static inline int |
154 | css_get_subchannel_status(struct subchannel *sch, int schid) | 168 | css_get_subchannel_status(struct subchannel *sch, struct subchannel_id schid) |
155 | { | 169 | { |
156 | struct schib schib; | 170 | struct schib schib; |
157 | int cc; | 171 | int cc; |
@@ -170,13 +184,13 @@ css_get_subchannel_status(struct subchannel *sch, int schid) | |||
170 | } | 184 | } |
171 | 185 | ||
172 | static int | 186 | static int |
173 | css_evaluate_subchannel(int irq, int slow) | 187 | css_evaluate_subchannel(struct subchannel_id schid, int slow) |
174 | { | 188 | { |
175 | int event, ret, disc; | 189 | int event, ret, disc; |
176 | struct subchannel *sch; | 190 | struct subchannel *sch; |
177 | unsigned long flags; | 191 | unsigned long flags; |
178 | 192 | ||
179 | sch = get_subchannel_by_schid(irq); | 193 | sch = get_subchannel_by_schid(schid); |
180 | disc = sch ? device_is_disconnected(sch) : 0; | 194 | disc = sch ? device_is_disconnected(sch) : 0; |
181 | if (disc && slow) { | 195 | if (disc && slow) { |
182 | if (sch) | 196 | if (sch) |
@@ -194,9 +208,10 @@ css_evaluate_subchannel(int irq, int slow) | |||
194 | put_device(&sch->dev); | 208 | put_device(&sch->dev); |
195 | return -EAGAIN; /* Will be done on the slow path. */ | 209 | return -EAGAIN; /* Will be done on the slow path. */ |
196 | } | 210 | } |
197 | event = css_get_subchannel_status(sch, irq); | 211 | event = css_get_subchannel_status(sch, schid); |
198 | CIO_MSG_EVENT(4, "Evaluating schid %04x, event %d, %s, %s path.\n", | 212 | CIO_MSG_EVENT(4, "Evaluating schid 0.%x.%04x, event %d, %s, %s path.\n", |
199 | irq, event, sch?(disc?"disconnected":"normal"):"unknown", | 213 | schid.ssid, schid.sch_no, event, |
214 | sch?(disc?"disconnected":"normal"):"unknown", | ||
200 | slow?"slow":"fast"); | 215 | slow?"slow":"fast"); |
201 | switch (event) { | 216 | switch (event) { |
202 | case CIO_NO_PATH: | 217 | case CIO_NO_PATH: |
@@ -253,7 +268,7 @@ css_evaluate_subchannel(int irq, int slow) | |||
253 | sch->schib.pmcw.intparm = 0; | 268 | sch->schib.pmcw.intparm = 0; |
254 | cio_modify(sch); | 269 | cio_modify(sch); |
255 | put_device(&sch->dev); | 270 | put_device(&sch->dev); |
256 | ret = css_probe_device(irq); | 271 | ret = css_probe_device(schid); |
257 | } else { | 272 | } else { |
258 | /* | 273 | /* |
259 | * We can't immediately deregister the disconnected | 274 | * We can't immediately deregister the disconnected |
@@ -272,7 +287,7 @@ css_evaluate_subchannel(int irq, int slow) | |||
272 | device_trigger_reprobe(sch); | 287 | device_trigger_reprobe(sch); |
273 | spin_unlock_irqrestore(&sch->lock, flags); | 288 | spin_unlock_irqrestore(&sch->lock, flags); |
274 | } | 289 | } |
275 | ret = sch ? 0 : css_probe_device(irq); | 290 | ret = sch ? 0 : css_probe_device(schid); |
276 | break; | 291 | break; |
277 | default: | 292 | default: |
278 | BUG(); | 293 | BUG(); |
@@ -281,28 +296,15 @@ css_evaluate_subchannel(int irq, int slow) | |||
281 | return ret; | 296 | return ret; |
282 | } | 297 | } |
283 | 298 | ||
284 | static void | 299 | static int |
285 | css_rescan_devices(void) | 300 | css_rescan_devices(struct subchannel_id schid, void *data) |
286 | { | 301 | { |
287 | int irq, ret; | 302 | return css_evaluate_subchannel(schid, 1); |
288 | |||
289 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | ||
290 | ret = css_evaluate_subchannel(irq, 1); | ||
291 | /* No more memory. It doesn't make sense to continue. No | ||
292 | * panic because this can happen in midflight and just | ||
293 | * because we can't use a new device is no reason to crash | ||
294 | * the system. */ | ||
295 | if (ret == -ENOMEM) | ||
296 | break; | ||
297 | /* -ENXIO indicates that there are no more subchannels. */ | ||
298 | if (ret == -ENXIO) | ||
299 | break; | ||
300 | } | ||
301 | } | 303 | } |
302 | 304 | ||
303 | struct slow_subchannel { | 305 | struct slow_subchannel { |
304 | struct list_head slow_list; | 306 | struct list_head slow_list; |
305 | unsigned long schid; | 307 | struct subchannel_id schid; |
306 | }; | 308 | }; |
307 | 309 | ||
308 | static LIST_HEAD(slow_subchannels_head); | 310 | static LIST_HEAD(slow_subchannels_head); |
@@ -315,7 +317,7 @@ css_trigger_slow_path(void) | |||
315 | 317 | ||
316 | if (need_rescan) { | 318 | if (need_rescan) { |
317 | need_rescan = 0; | 319 | need_rescan = 0; |
318 | css_rescan_devices(); | 320 | for_each_subchannel(css_rescan_devices, NULL); |
319 | return; | 321 | return; |
320 | } | 322 | } |
321 | 323 | ||
@@ -354,23 +356,31 @@ css_reiterate_subchannels(void) | |||
354 | * Called from the machine check handler for subchannel report words. | 356 | * Called from the machine check handler for subchannel report words. |
355 | */ | 357 | */ |
356 | int | 358 | int |
357 | css_process_crw(int irq) | 359 | css_process_crw(int rsid1, int rsid2) |
358 | { | 360 | { |
359 | int ret; | 361 | int ret; |
362 | struct subchannel_id mchk_schid; | ||
360 | 363 | ||
361 | CIO_CRW_EVENT(2, "source is subchannel %04X\n", irq); | 364 | CIO_CRW_EVENT(2, "source is subchannel %04X, subsystem id %x\n", |
365 | rsid1, rsid2); | ||
362 | 366 | ||
363 | if (need_rescan) | 367 | if (need_rescan) |
364 | /* We need to iterate all subchannels anyway. */ | 368 | /* We need to iterate all subchannels anyway. */ |
365 | return -EAGAIN; | 369 | return -EAGAIN; |
370 | |||
371 | init_subchannel_id(&mchk_schid); | ||
372 | mchk_schid.sch_no = rsid1; | ||
373 | if (rsid2 != 0) | ||
374 | mchk_schid.ssid = (rsid2 >> 8) & 3; | ||
375 | |||
366 | /* | 376 | /* |
367 | * Since we are always presented with IPI in the CRW, we have to | 377 | * Since we are always presented with IPI in the CRW, we have to |
368 | * use stsch() to find out if the subchannel in question has come | 378 | * use stsch() to find out if the subchannel in question has come |
369 | * or gone. | 379 | * or gone. |
370 | */ | 380 | */ |
371 | ret = css_evaluate_subchannel(irq, 0); | 381 | ret = css_evaluate_subchannel(mchk_schid, 0); |
372 | if (ret == -EAGAIN) { | 382 | if (ret == -EAGAIN) { |
373 | if (css_enqueue_subchannel_slow(irq)) { | 383 | if (css_enqueue_subchannel_slow(mchk_schid)) { |
374 | css_clear_subchannel_slow_list(); | 384 | css_clear_subchannel_slow_list(); |
375 | need_rescan = 1; | 385 | need_rescan = 1; |
376 | } | 386 | } |
@@ -378,22 +388,83 @@ css_process_crw(int irq) | |||
378 | return ret; | 388 | return ret; |
379 | } | 389 | } |
380 | 390 | ||
381 | static void __init | 391 | static int __init |
382 | css_generate_pgid(void) | 392 | __init_channel_subsystem(struct subchannel_id schid, void *data) |
383 | { | 393 | { |
384 | /* Let's build our path group ID here. */ | 394 | struct subchannel *sch; |
385 | if (css_characteristics_avail && css_general_characteristics.mcss) | 395 | int ret; |
386 | global_pgid.cpu_addr = 0x8000; | 396 | |
397 | if (cio_is_console(schid)) | ||
398 | sch = cio_get_console_subchannel(); | ||
387 | else { | 399 | else { |
400 | sch = css_alloc_subchannel(schid); | ||
401 | if (IS_ERR(sch)) | ||
402 | ret = PTR_ERR(sch); | ||
403 | else | ||
404 | ret = 0; | ||
405 | switch (ret) { | ||
406 | case 0: | ||
407 | break; | ||
408 | case -ENOMEM: | ||
409 | panic("Out of memory in init_channel_subsystem\n"); | ||
410 | /* -ENXIO: no more subchannels. */ | ||
411 | case -ENXIO: | ||
412 | return ret; | ||
413 | default: | ||
414 | return 0; | ||
415 | } | ||
416 | } | ||
417 | /* | ||
418 | * We register ALL valid subchannels in ioinfo, even those | ||
419 | * that have been present before init_channel_subsystem. | ||
420 | * These subchannels can't have been registered yet (kmalloc | ||
421 | * not working) so we do it now. This is true e.g. for the | ||
422 | * console subchannel. | ||
423 | */ | ||
424 | css_register_subchannel(sch); | ||
425 | return 0; | ||
426 | } | ||
427 | |||
428 | static void __init | ||
429 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | ||
430 | { | ||
431 | if (css_characteristics_avail && css_general_characteristics.mcss) { | ||
432 | css->global_pgid.pgid_high.ext_cssid.version = 0x80; | ||
433 | css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid; | ||
434 | } else { | ||
388 | #ifdef CONFIG_SMP | 435 | #ifdef CONFIG_SMP |
389 | global_pgid.cpu_addr = hard_smp_processor_id(); | 436 | css->global_pgid.pgid_high.cpu_addr = hard_smp_processor_id(); |
390 | #else | 437 | #else |
391 | global_pgid.cpu_addr = 0; | 438 | css->global_pgid.pgid_high.cpu_addr = 0; |
392 | #endif | 439 | #endif |
393 | } | 440 | } |
394 | global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; | 441 | css->global_pgid.cpu_id = ((cpuid_t *) __LC_CPUID)->ident; |
395 | global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; | 442 | css->global_pgid.cpu_model = ((cpuid_t *) __LC_CPUID)->machine; |
396 | global_pgid.tod_high = (__u32) (get_clock() >> 32); | 443 | css->global_pgid.tod_high = tod_high; |
444 | |||
445 | } | ||
446 | |||
447 | static void | ||
448 | channel_subsystem_release(struct device *dev) | ||
449 | { | ||
450 | struct channel_subsystem *css; | ||
451 | |||
452 | css = to_css(dev); | ||
453 | kfree(css); | ||
454 | } | ||
455 | |||
456 | static inline void __init | ||
457 | setup_css(int nr) | ||
458 | { | ||
459 | u32 tod_high; | ||
460 | |||
461 | memset(css[nr], 0, sizeof(struct channel_subsystem)); | ||
462 | css[nr]->valid = 1; | ||
463 | css[nr]->cssid = nr; | ||
464 | sprintf(css[nr]->device.bus_id, "css%x", nr); | ||
465 | css[nr]->device.release = channel_subsystem_release; | ||
466 | tod_high = (u32) (get_clock() >> 32); | ||
467 | css_generate_pgid(css[nr], tod_high); | ||
397 | } | 468 | } |
398 | 469 | ||
399 | /* | 470 | /* |
@@ -404,53 +475,50 @@ css_generate_pgid(void) | |||
404 | static int __init | 475 | static int __init |
405 | init_channel_subsystem (void) | 476 | init_channel_subsystem (void) |
406 | { | 477 | { |
407 | int ret, irq; | 478 | int ret, i; |
408 | 479 | ||
409 | if (chsc_determine_css_characteristics() == 0) | 480 | if (chsc_determine_css_characteristics() == 0) |
410 | css_characteristics_avail = 1; | 481 | css_characteristics_avail = 1; |
411 | 482 | ||
412 | css_generate_pgid(); | ||
413 | |||
414 | if ((ret = bus_register(&css_bus_type))) | 483 | if ((ret = bus_register(&css_bus_type))) |
415 | goto out; | 484 | goto out; |
416 | if ((ret = device_register (&css_bus_device))) | ||
417 | goto out_bus; | ||
418 | 485 | ||
486 | /* Try to enable MSS. */ | ||
487 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | ||
488 | switch (ret) { | ||
489 | case 0: /* Success. */ | ||
490 | max_ssid = __MAX_SSID; | ||
491 | break; | ||
492 | case -ENOMEM: | ||
493 | goto out_bus; | ||
494 | default: | ||
495 | max_ssid = 0; | ||
496 | } | ||
497 | /* Setup css structure. */ | ||
498 | for (i = 0; i <= __MAX_CSSID; i++) { | ||
499 | css[i] = kmalloc(sizeof(struct channel_subsystem), GFP_KERNEL); | ||
500 | if (!css[i]) { | ||
501 | ret = -ENOMEM; | ||
502 | goto out_unregister; | ||
503 | } | ||
504 | setup_css(i); | ||
505 | ret = device_register(&css[i]->device); | ||
506 | if (ret) | ||
507 | goto out_free; | ||
508 | } | ||
419 | css_init_done = 1; | 509 | css_init_done = 1; |
420 | 510 | ||
421 | ctl_set_bit(6, 28); | 511 | ctl_set_bit(6, 28); |
422 | 512 | ||
423 | for (irq = 0; irq < __MAX_SUBCHANNELS; irq++) { | 513 | for_each_subchannel(__init_channel_subsystem, NULL); |
424 | struct subchannel *sch; | ||
425 | |||
426 | if (cio_is_console(irq)) | ||
427 | sch = cio_get_console_subchannel(); | ||
428 | else { | ||
429 | sch = css_alloc_subchannel(irq); | ||
430 | if (IS_ERR(sch)) | ||
431 | ret = PTR_ERR(sch); | ||
432 | else | ||
433 | ret = 0; | ||
434 | if (ret == -ENOMEM) | ||
435 | panic("Out of memory in " | ||
436 | "init_channel_subsystem\n"); | ||
437 | /* -ENXIO: no more subchannels. */ | ||
438 | if (ret == -ENXIO) | ||
439 | break; | ||
440 | if (ret) | ||
441 | continue; | ||
442 | } | ||
443 | /* | ||
444 | * We register ALL valid subchannels in ioinfo, even those | ||
445 | * that have been present before init_channel_subsystem. | ||
446 | * These subchannels can't have been registered yet (kmalloc | ||
447 | * not working) so we do it now. This is true e.g. for the | ||
448 | * console subchannel. | ||
449 | */ | ||
450 | css_register_subchannel(sch); | ||
451 | } | ||
452 | return 0; | 514 | return 0; |
453 | 515 | out_free: | |
516 | kfree(css[i]); | ||
517 | out_unregister: | ||
518 | while (i > 0) { | ||
519 | i--; | ||
520 | device_unregister(&css[i]->device); | ||
521 | } | ||
454 | out_bus: | 522 | out_bus: |
455 | bus_unregister(&css_bus_type); | 523 | bus_unregister(&css_bus_type); |
456 | out: | 524 | out: |
@@ -481,47 +549,8 @@ struct bus_type css_bus_type = { | |||
481 | 549 | ||
482 | subsys_initcall(init_channel_subsystem); | 550 | subsys_initcall(init_channel_subsystem); |
483 | 551 | ||
484 | /* | ||
485 | * Register root devices for some drivers. The release function must not be | ||
486 | * in the device drivers, so we do it here. | ||
487 | */ | ||
488 | static void | ||
489 | s390_root_dev_release(struct device *dev) | ||
490 | { | ||
491 | kfree(dev); | ||
492 | } | ||
493 | |||
494 | struct device * | ||
495 | s390_root_dev_register(const char *name) | ||
496 | { | ||
497 | struct device *dev; | ||
498 | int ret; | ||
499 | |||
500 | if (!strlen(name)) | ||
501 | return ERR_PTR(-EINVAL); | ||
502 | dev = kmalloc(sizeof(struct device), GFP_KERNEL); | ||
503 | if (!dev) | ||
504 | return ERR_PTR(-ENOMEM); | ||
505 | memset(dev, 0, sizeof(struct device)); | ||
506 | strncpy(dev->bus_id, name, min(strlen(name), (size_t)BUS_ID_SIZE)); | ||
507 | dev->release = s390_root_dev_release; | ||
508 | ret = device_register(dev); | ||
509 | if (ret) { | ||
510 | kfree(dev); | ||
511 | return ERR_PTR(ret); | ||
512 | } | ||
513 | return dev; | ||
514 | } | ||
515 | |||
516 | void | ||
517 | s390_root_dev_unregister(struct device *dev) | ||
518 | { | ||
519 | if (dev) | ||
520 | device_unregister(dev); | ||
521 | } | ||
522 | |||
523 | int | 552 | int |
524 | css_enqueue_subchannel_slow(unsigned long schid) | 553 | css_enqueue_subchannel_slow(struct subchannel_id schid) |
525 | { | 554 | { |
526 | struct slow_subchannel *new_slow_sch; | 555 | struct slow_subchannel *new_slow_sch; |
527 | unsigned long flags; | 556 | unsigned long flags; |
@@ -564,6 +593,4 @@ css_slow_subchannels_exist(void) | |||
564 | 593 | ||
565 | MODULE_LICENSE("GPL"); | 594 | MODULE_LICENSE("GPL"); |
566 | EXPORT_SYMBOL(css_bus_type); | 595 | EXPORT_SYMBOL(css_bus_type); |
567 | EXPORT_SYMBOL(s390_root_dev_register); | ||
568 | EXPORT_SYMBOL(s390_root_dev_unregister); | ||
569 | EXPORT_SYMBOL_GPL(css_characteristics_avail); | 596 | EXPORT_SYMBOL_GPL(css_characteristics_avail); |