diff options
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd.c | 20 | ||||
-rw-r--r-- | drivers/s390/block/dasd_eckd.c | 10 | ||||
-rw-r--r-- | drivers/s390/char/con3215.c | 22 | ||||
-rw-r--r-- | drivers/s390/char/con3270.c | 13 | ||||
-rw-r--r-- | drivers/s390/char/monreader.c | 6 | ||||
-rw-r--r-- | drivers/s390/char/raw3270.c | 36 | ||||
-rw-r--r-- | drivers/s390/char/sclp_con.c | 7 | ||||
-rw-r--r-- | drivers/s390/char/sclp_vt220.c | 18 | ||||
-rw-r--r-- | drivers/s390/char/tape_core.c | 2 | ||||
-rw-r--r-- | drivers/s390/char/vmlogrdr.c | 4 | ||||
-rw-r--r-- | drivers/s390/char/vmur.c | 2 | ||||
-rw-r--r-- | drivers/s390/cio/qdio.h | 11 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 3 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 144 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 114 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 85 | ||||
-rw-r--r-- | drivers/s390/net/netiucv.c | 4 |
17 files changed, 229 insertions, 272 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index e5b84db0aa03..749836668655 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -470,7 +470,7 @@ static int dasd_decrease_state(struct dasd_device *device) | |||
470 | */ | 470 | */ |
471 | static void dasd_change_state(struct dasd_device *device) | 471 | static void dasd_change_state(struct dasd_device *device) |
472 | { | 472 | { |
473 | int rc; | 473 | int rc; |
474 | 474 | ||
475 | if (device->state == device->target) | 475 | if (device->state == device->target) |
476 | /* Already where we want to go today... */ | 476 | /* Already where we want to go today... */ |
@@ -479,8 +479,10 @@ static void dasd_change_state(struct dasd_device *device) | |||
479 | rc = dasd_increase_state(device); | 479 | rc = dasd_increase_state(device); |
480 | else | 480 | else |
481 | rc = dasd_decrease_state(device); | 481 | rc = dasd_decrease_state(device); |
482 | if (rc && rc != -EAGAIN) | 482 | if (rc == -EAGAIN) |
483 | device->target = device->state; | 483 | return; |
484 | if (rc) | ||
485 | device->target = device->state; | ||
484 | 486 | ||
485 | if (device->state == device->target) { | 487 | if (device->state == device->target) { |
486 | wake_up(&dasd_init_waitq); | 488 | wake_up(&dasd_init_waitq); |
@@ -2503,15 +2505,25 @@ int dasd_generic_restore_device(struct ccw_device *cdev) | |||
2503 | if (IS_ERR(device)) | 2505 | if (IS_ERR(device)) |
2504 | return PTR_ERR(device); | 2506 | return PTR_ERR(device); |
2505 | 2507 | ||
2508 | /* allow new IO again */ | ||
2509 | device->stopped &= ~DASD_STOPPED_PM; | ||
2510 | device->stopped &= ~DASD_UNRESUMED_PM; | ||
2511 | |||
2506 | dasd_schedule_device_bh(device); | 2512 | dasd_schedule_device_bh(device); |
2507 | if (device->block) | 2513 | if (device->block) |
2508 | dasd_schedule_block_bh(device->block); | 2514 | dasd_schedule_block_bh(device->block); |
2509 | 2515 | ||
2510 | if (device->discipline->restore) | 2516 | if (device->discipline->restore) |
2511 | rc = device->discipline->restore(device); | 2517 | rc = device->discipline->restore(device); |
2518 | if (rc) | ||
2519 | /* | ||
2520 | * if the resume failed for the DASD we put it in | ||
2521 | * an UNRESUMED stop state | ||
2522 | */ | ||
2523 | device->stopped |= DASD_UNRESUMED_PM; | ||
2512 | 2524 | ||
2513 | dasd_put_device(device); | 2525 | dasd_put_device(device); |
2514 | return rc; | 2526 | return 0; |
2515 | } | 2527 | } |
2516 | EXPORT_SYMBOL_GPL(dasd_generic_restore_device); | 2528 | EXPORT_SYMBOL_GPL(dasd_generic_restore_device); |
2517 | 2529 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 1c28ec3e4ccb..f8b1f04f26b8 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -3243,9 +3243,6 @@ int dasd_eckd_restore_device(struct dasd_device *device) | |||
3243 | int is_known, rc; | 3243 | int is_known, rc; |
3244 | struct dasd_uid temp_uid; | 3244 | struct dasd_uid temp_uid; |
3245 | 3245 | ||
3246 | /* allow new IO again */ | ||
3247 | device->stopped &= ~DASD_STOPPED_PM; | ||
3248 | |||
3249 | private = (struct dasd_eckd_private *) device->private; | 3246 | private = (struct dasd_eckd_private *) device->private; |
3250 | 3247 | ||
3251 | /* Read Configuration Data */ | 3248 | /* Read Configuration Data */ |
@@ -3295,12 +3292,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) | |||
3295 | return 0; | 3292 | return 0; |
3296 | 3293 | ||
3297 | out_err: | 3294 | out_err: |
3298 | /* | 3295 | return -1; |
3299 | * if the resume failed for the DASD we put it in | ||
3300 | * an UNRESUMED stop state | ||
3301 | */ | ||
3302 | device->stopped |= DASD_UNRESUMED_PM; | ||
3303 | return 0; | ||
3304 | } | 3296 | } |
3305 | 3297 | ||
3306 | static struct ccw_driver dasd_eckd_driver = { | 3298 | static struct ccw_driver dasd_eckd_driver = { |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 04dc734805c6..21639d6c996f 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -20,10 +20,7 @@ | |||
20 | #include <linux/interrupt.h> | 20 | #include <linux/interrupt.h> |
21 | #include <linux/err.h> | 21 | #include <linux/err.h> |
22 | #include <linux/reboot.h> | 22 | #include <linux/reboot.h> |
23 | |||
24 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
25 | #include <linux/bootmem.h> | ||
26 | |||
27 | #include <asm/ccwdev.h> | 24 | #include <asm/ccwdev.h> |
28 | #include <asm/cio.h> | 25 | #include <asm/cio.h> |
29 | #include <asm/io.h> | 26 | #include <asm/io.h> |
@@ -735,7 +732,7 @@ static int raw3215_pm_stop(struct ccw_device *cdev) | |||
735 | unsigned long flags; | 732 | unsigned long flags; |
736 | 733 | ||
737 | /* Empty the output buffer, then prevent new I/O. */ | 734 | /* Empty the output buffer, then prevent new I/O. */ |
738 | raw = cdev->dev.driver_data; | 735 | raw = dev_get_drvdata(&cdev->dev); |
739 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); | 736 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); |
740 | raw3215_make_room(raw, RAW3215_BUFFER_SIZE); | 737 | raw3215_make_room(raw, RAW3215_BUFFER_SIZE); |
741 | raw->flags |= RAW3215_FROZEN; | 738 | raw->flags |= RAW3215_FROZEN; |
@@ -749,7 +746,7 @@ static int raw3215_pm_start(struct ccw_device *cdev) | |||
749 | unsigned long flags; | 746 | unsigned long flags; |
750 | 747 | ||
751 | /* Allow I/O again and flush output buffer. */ | 748 | /* Allow I/O again and flush output buffer. */ |
752 | raw = cdev->dev.driver_data; | 749 | raw = dev_get_drvdata(&cdev->dev); |
753 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); | 750 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); |
754 | raw->flags &= ~RAW3215_FROZEN; | 751 | raw->flags &= ~RAW3215_FROZEN; |
755 | raw->flags |= RAW3215_FLUSHING; | 752 | raw->flags |= RAW3215_FLUSHING; |
@@ -883,7 +880,7 @@ static int __init con3215_init(void) | |||
883 | raw3215_freelist = NULL; | 880 | raw3215_freelist = NULL; |
884 | spin_lock_init(&raw3215_freelist_lock); | 881 | spin_lock_init(&raw3215_freelist_lock); |
885 | for (i = 0; i < NR_3215_REQ; i++) { | 882 | for (i = 0; i < NR_3215_REQ; i++) { |
886 | req = (struct raw3215_req *) alloc_bootmem_low(sizeof(struct raw3215_req)); | 883 | req = kzalloc(sizeof(struct raw3215_req), GFP_KERNEL | GFP_DMA); |
887 | req->next = raw3215_freelist; | 884 | req->next = raw3215_freelist; |
888 | raw3215_freelist = req; | 885 | raw3215_freelist = req; |
889 | } | 886 | } |
@@ -893,10 +890,9 @@ static int __init con3215_init(void) | |||
893 | return -ENODEV; | 890 | return -ENODEV; |
894 | 891 | ||
895 | raw3215[0] = raw = (struct raw3215_info *) | 892 | raw3215[0] = raw = (struct raw3215_info *) |
896 | alloc_bootmem_low(sizeof(struct raw3215_info)); | 893 | kzalloc(sizeof(struct raw3215_info), GFP_KERNEL | GFP_DMA); |
897 | memset(raw, 0, sizeof(struct raw3215_info)); | 894 | raw->buffer = kzalloc(RAW3215_BUFFER_SIZE, GFP_KERNEL | GFP_DMA); |
898 | raw->buffer = (char *) alloc_bootmem_low(RAW3215_BUFFER_SIZE); | 895 | raw->inbuf = kzalloc(RAW3215_INBUF_SIZE, GFP_KERNEL | GFP_DMA); |
899 | raw->inbuf = (char *) alloc_bootmem_low(RAW3215_INBUF_SIZE); | ||
900 | raw->cdev = cdev; | 896 | raw->cdev = cdev; |
901 | dev_set_drvdata(&cdev->dev, raw); | 897 | dev_set_drvdata(&cdev->dev, raw); |
902 | cdev->handler = raw3215_irq; | 898 | cdev->handler = raw3215_irq; |
@@ -906,9 +902,9 @@ static int __init con3215_init(void) | |||
906 | 902 | ||
907 | /* Request the console irq */ | 903 | /* Request the console irq */ |
908 | if (raw3215_startup(raw) != 0) { | 904 | if (raw3215_startup(raw) != 0) { |
909 | free_bootmem((unsigned long) raw->inbuf, RAW3215_INBUF_SIZE); | 905 | kfree(raw->inbuf); |
910 | free_bootmem((unsigned long) raw->buffer, RAW3215_BUFFER_SIZE); | 906 | kfree(raw->buffer); |
911 | free_bootmem((unsigned long) raw, sizeof(struct raw3215_info)); | 907 | kfree(raw); |
912 | raw3215[0] = NULL; | 908 | raw3215[0] = NULL; |
913 | return -ENODEV; | 909 | return -ENODEV; |
914 | } | 910 | } |
diff --git a/drivers/s390/char/con3270.c b/drivers/s390/char/con3270.c index 44d02e371c04..bb838bdf829d 100644 --- a/drivers/s390/char/con3270.c +++ b/drivers/s390/char/con3270.c | |||
@@ -7,7 +7,6 @@ | |||
7 | * Copyright IBM Corp. 2003, 2009 | 7 | * Copyright IBM Corp. 2003, 2009 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/bootmem.h> | ||
11 | #include <linux/console.h> | 10 | #include <linux/console.h> |
12 | #include <linux/init.h> | 11 | #include <linux/init.h> |
13 | #include <linux/interrupt.h> | 12 | #include <linux/interrupt.h> |
@@ -600,16 +599,14 @@ con3270_init(void) | |||
600 | if (IS_ERR(rp)) | 599 | if (IS_ERR(rp)) |
601 | return PTR_ERR(rp); | 600 | return PTR_ERR(rp); |
602 | 601 | ||
603 | condev = (struct con3270 *) alloc_bootmem_low(sizeof(struct con3270)); | 602 | condev = kzalloc(sizeof(struct con3270), GFP_KERNEL | GFP_DMA); |
604 | memset(condev, 0, sizeof(struct con3270)); | ||
605 | condev->view.dev = rp; | 603 | condev->view.dev = rp; |
606 | 604 | ||
607 | condev->read = raw3270_request_alloc_bootmem(0); | 605 | condev->read = raw3270_request_alloc(0); |
608 | condev->read->callback = con3270_read_callback; | 606 | condev->read->callback = con3270_read_callback; |
609 | condev->read->callback_data = condev; | 607 | condev->read->callback_data = condev; |
610 | condev->write = | 608 | condev->write = raw3270_request_alloc(CON3270_OUTPUT_BUFFER_SIZE); |
611 | raw3270_request_alloc_bootmem(CON3270_OUTPUT_BUFFER_SIZE); | 609 | condev->kreset = raw3270_request_alloc(1); |
612 | condev->kreset = raw3270_request_alloc_bootmem(1); | ||
613 | 610 | ||
614 | INIT_LIST_HEAD(&condev->lines); | 611 | INIT_LIST_HEAD(&condev->lines); |
615 | INIT_LIST_HEAD(&condev->update); | 612 | INIT_LIST_HEAD(&condev->update); |
@@ -623,7 +620,7 @@ con3270_init(void) | |||
623 | 620 | ||
624 | INIT_LIST_HEAD(&condev->freemem); | 621 | INIT_LIST_HEAD(&condev->freemem); |
625 | for (i = 0; i < CON3270_STRING_PAGES; i++) { | 622 | for (i = 0; i < CON3270_STRING_PAGES; i++) { |
626 | cbuf = (void *) alloc_bootmem_low_pages(PAGE_SIZE); | 623 | cbuf = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
627 | add_string_memory(&condev->freemem, cbuf, PAGE_SIZE); | 624 | add_string_memory(&condev->freemem, cbuf, PAGE_SIZE); |
628 | } | 625 | } |
629 | condev->cline = alloc_string(&condev->freemem, condev->view.cols); | 626 | condev->cline = alloc_string(&condev->freemem, condev->view.cols); |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index 75a8831eebbc..7892550d7932 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -320,7 +320,7 @@ static int mon_open(struct inode *inode, struct file *filp) | |||
320 | goto out_path; | 320 | goto out_path; |
321 | } | 321 | } |
322 | filp->private_data = monpriv; | 322 | filp->private_data = monpriv; |
323 | monreader_device->driver_data = monpriv; | 323 | dev_set_drvdata(&monreader_device, monpriv); |
324 | unlock_kernel(); | 324 | unlock_kernel(); |
325 | return nonseekable_open(inode, filp); | 325 | return nonseekable_open(inode, filp); |
326 | 326 | ||
@@ -463,7 +463,7 @@ static struct miscdevice mon_dev = { | |||
463 | *****************************************************************************/ | 463 | *****************************************************************************/ |
464 | static int monreader_freeze(struct device *dev) | 464 | static int monreader_freeze(struct device *dev) |
465 | { | 465 | { |
466 | struct mon_private *monpriv = dev->driver_data; | 466 | struct mon_private *monpriv = dev_get_drvdata(&dev); |
467 | int rc; | 467 | int rc; |
468 | 468 | ||
469 | if (!monpriv) | 469 | if (!monpriv) |
@@ -487,7 +487,7 @@ static int monreader_freeze(struct device *dev) | |||
487 | 487 | ||
488 | static int monreader_thaw(struct device *dev) | 488 | static int monreader_thaw(struct device *dev) |
489 | { | 489 | { |
490 | struct mon_private *monpriv = dev->driver_data; | 490 | struct mon_private *monpriv = dev_get_drvdata(dev); |
491 | int rc; | 491 | int rc; |
492 | 492 | ||
493 | if (!monpriv) | 493 | if (!monpriv) |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index acab7b2dfe8a..d6a022f55e92 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -7,7 +7,6 @@ | |||
7 | * Copyright IBM Corp. 2003, 2009 | 7 | * Copyright IBM Corp. 2003, 2009 |
8 | */ | 8 | */ |
9 | 9 | ||
10 | #include <linux/bootmem.h> | ||
11 | #include <linux/module.h> | 10 | #include <linux/module.h> |
12 | #include <linux/err.h> | 11 | #include <linux/err.h> |
13 | #include <linux/init.h> | 12 | #include <linux/init.h> |
@@ -143,33 +142,6 @@ raw3270_request_alloc(size_t size) | |||
143 | return rq; | 142 | return rq; |
144 | } | 143 | } |
145 | 144 | ||
146 | #ifdef CONFIG_TN3270_CONSOLE | ||
147 | /* | ||
148 | * Allocate a new 3270 ccw request from bootmem. Only works very | ||
149 | * early in the boot process. Only con3270.c should be using this. | ||
150 | */ | ||
151 | struct raw3270_request __init *raw3270_request_alloc_bootmem(size_t size) | ||
152 | { | ||
153 | struct raw3270_request *rq; | ||
154 | |||
155 | rq = alloc_bootmem_low(sizeof(struct raw3270)); | ||
156 | |||
157 | /* alloc output buffer. */ | ||
158 | if (size > 0) | ||
159 | rq->buffer = alloc_bootmem_low(size); | ||
160 | rq->size = size; | ||
161 | INIT_LIST_HEAD(&rq->list); | ||
162 | |||
163 | /* | ||
164 | * Setup ccw. | ||
165 | */ | ||
166 | rq->ccw.cda = __pa(rq->buffer); | ||
167 | rq->ccw.flags = CCW_FLAG_SLI; | ||
168 | |||
169 | return rq; | ||
170 | } | ||
171 | #endif | ||
172 | |||
173 | /* | 145 | /* |
174 | * Free 3270 ccw request | 146 | * Free 3270 ccw request |
175 | */ | 147 | */ |
@@ -846,8 +818,8 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) | |||
846 | char *ascebc; | 818 | char *ascebc; |
847 | int rc; | 819 | int rc; |
848 | 820 | ||
849 | rp = (struct raw3270 *) alloc_bootmem_low(sizeof(struct raw3270)); | 821 | rp = kzalloc(sizeof(struct raw3270), GFP_KERNEL | GFP_DMA); |
850 | ascebc = (char *) alloc_bootmem(256); | 822 | ascebc = kzalloc(256, GFP_KERNEL); |
851 | rc = raw3270_setup_device(cdev, rp, ascebc); | 823 | rc = raw3270_setup_device(cdev, rp, ascebc); |
852 | if (rc) | 824 | if (rc) |
853 | return ERR_PTR(rc); | 825 | return ERR_PTR(rc); |
@@ -1350,7 +1322,7 @@ static int raw3270_pm_stop(struct ccw_device *cdev) | |||
1350 | struct raw3270_view *view; | 1322 | struct raw3270_view *view; |
1351 | unsigned long flags; | 1323 | unsigned long flags; |
1352 | 1324 | ||
1353 | rp = cdev->dev.driver_data; | 1325 | rp = dev_get_drvdata(&cdev->dev); |
1354 | if (!rp) | 1326 | if (!rp) |
1355 | return 0; | 1327 | return 0; |
1356 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); | 1328 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); |
@@ -1376,7 +1348,7 @@ static int raw3270_pm_start(struct ccw_device *cdev) | |||
1376 | struct raw3270 *rp; | 1348 | struct raw3270 *rp; |
1377 | unsigned long flags; | 1349 | unsigned long flags; |
1378 | 1350 | ||
1379 | rp = cdev->dev.driver_data; | 1351 | rp = dev_get_drvdata(&cdev->dev); |
1380 | if (!rp) | 1352 | if (!rp) |
1381 | return 0; | 1353 | return 0; |
1382 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); | 1354 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); |
diff --git a/drivers/s390/char/sclp_con.c b/drivers/s390/char/sclp_con.c index 336811a77672..ad698d30cb3b 100644 --- a/drivers/s390/char/sclp_con.c +++ b/drivers/s390/char/sclp_con.c | |||
@@ -11,7 +11,6 @@ | |||
11 | #include <linux/init.h> | 11 | #include <linux/init.h> |
12 | #include <linux/timer.h> | 12 | #include <linux/timer.h> |
13 | #include <linux/jiffies.h> | 13 | #include <linux/jiffies.h> |
14 | #include <linux/bootmem.h> | ||
15 | #include <linux/termios.h> | 14 | #include <linux/termios.h> |
16 | #include <linux/err.h> | 15 | #include <linux/err.h> |
17 | #include <linux/reboot.h> | 16 | #include <linux/reboot.h> |
@@ -110,7 +109,7 @@ static void sclp_console_sync_queue(void) | |||
110 | 109 | ||
111 | spin_lock_irqsave(&sclp_con_lock, flags); | 110 | spin_lock_irqsave(&sclp_con_lock, flags); |
112 | if (timer_pending(&sclp_con_timer)) | 111 | if (timer_pending(&sclp_con_timer)) |
113 | del_timer_sync(&sclp_con_timer); | 112 | del_timer(&sclp_con_timer); |
114 | while (sclp_con_queue_running) { | 113 | while (sclp_con_queue_running) { |
115 | spin_unlock_irqrestore(&sclp_con_lock, flags); | 114 | spin_unlock_irqrestore(&sclp_con_lock, flags); |
116 | sclp_sync_wait(); | 115 | sclp_sync_wait(); |
@@ -298,8 +297,8 @@ sclp_console_init(void) | |||
298 | /* Allocate pages for output buffering */ | 297 | /* Allocate pages for output buffering */ |
299 | INIT_LIST_HEAD(&sclp_con_pages); | 298 | INIT_LIST_HEAD(&sclp_con_pages); |
300 | for (i = 0; i < MAX_CONSOLE_PAGES; i++) { | 299 | for (i = 0; i < MAX_CONSOLE_PAGES; i++) { |
301 | page = alloc_bootmem_low_pages(PAGE_SIZE); | 300 | page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
302 | list_add_tail((struct list_head *) page, &sclp_con_pages); | 301 | list_add_tail(page, &sclp_con_pages); |
303 | } | 302 | } |
304 | INIT_LIST_HEAD(&sclp_con_outqueue); | 303 | INIT_LIST_HEAD(&sclp_con_outqueue); |
305 | spin_lock_init(&sclp_con_lock); | 304 | spin_lock_init(&sclp_con_lock); |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 5518e24946aa..178724f2a4c3 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -20,7 +20,6 @@ | |||
20 | #include <linux/major.h> | 20 | #include <linux/major.h> |
21 | #include <linux/console.h> | 21 | #include <linux/console.h> |
22 | #include <linux/kdev_t.h> | 22 | #include <linux/kdev_t.h> |
23 | #include <linux/bootmem.h> | ||
24 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
25 | #include <linux/init.h> | 24 | #include <linux/init.h> |
26 | #include <linux/reboot.h> | 25 | #include <linux/reboot.h> |
@@ -601,10 +600,7 @@ static void __init __sclp_vt220_free_pages(void) | |||
601 | 600 | ||
602 | list_for_each_safe(page, p, &sclp_vt220_empty) { | 601 | list_for_each_safe(page, p, &sclp_vt220_empty) { |
603 | list_del(page); | 602 | list_del(page); |
604 | if (slab_is_available()) | 603 | free_page((unsigned long) page); |
605 | free_page((unsigned long) page); | ||
606 | else | ||
607 | free_bootmem((unsigned long) page, PAGE_SIZE); | ||
608 | } | 604 | } |
609 | } | 605 | } |
610 | 606 | ||
@@ -640,16 +636,12 @@ static int __init __sclp_vt220_init(int num_pages) | |||
640 | sclp_vt220_flush_later = 0; | 636 | sclp_vt220_flush_later = 0; |
641 | 637 | ||
642 | /* Allocate pages for output buffering */ | 638 | /* Allocate pages for output buffering */ |
639 | rc = -ENOMEM; | ||
643 | for (i = 0; i < num_pages; i++) { | 640 | for (i = 0; i < num_pages; i++) { |
644 | if (slab_is_available()) | 641 | page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); |
645 | page = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA); | 642 | if (!page) |
646 | else | ||
647 | page = alloc_bootmem_low_pages(PAGE_SIZE); | ||
648 | if (!page) { | ||
649 | rc = -ENOMEM; | ||
650 | goto out; | 643 | goto out; |
651 | } | 644 | list_add_tail(page, &sclp_vt220_empty); |
652 | list_add_tail((struct list_head *) page, &sclp_vt220_empty); | ||
653 | } | 645 | } |
654 | rc = sclp_register(&sclp_vt220_register); | 646 | rc = sclp_register(&sclp_vt220_register); |
655 | out: | 647 | out: |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 595aa04cfd01..1d420d947596 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -396,7 +396,7 @@ int tape_generic_pm_suspend(struct ccw_device *cdev) | |||
396 | { | 396 | { |
397 | struct tape_device *device; | 397 | struct tape_device *device; |
398 | 398 | ||
399 | device = cdev->dev.driver_data; | 399 | device = dev_get_drvdata(&cdev->dev); |
400 | if (!device) { | 400 | if (!device) { |
401 | return -ENODEV; | 401 | return -ENODEV; |
402 | } | 402 | } |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index 411cfa3c7719..c20a4fe6da51 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -663,7 +663,7 @@ static struct attribute *vmlogrdr_attrs[] = { | |||
663 | static int vmlogrdr_pm_prepare(struct device *dev) | 663 | static int vmlogrdr_pm_prepare(struct device *dev) |
664 | { | 664 | { |
665 | int rc; | 665 | int rc; |
666 | struct vmlogrdr_priv_t *priv = dev->driver_data; | 666 | struct vmlogrdr_priv_t *priv = dev_get_drvdata(dev); |
667 | 667 | ||
668 | rc = 0; | 668 | rc = 0; |
669 | if (priv) { | 669 | if (priv) { |
@@ -753,7 +753,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) | |||
753 | dev->bus = &iucv_bus; | 753 | dev->bus = &iucv_bus; |
754 | dev->parent = iucv_root; | 754 | dev->parent = iucv_root; |
755 | dev->driver = &vmlogrdr_driver; | 755 | dev->driver = &vmlogrdr_driver; |
756 | dev->driver_data = priv; | 756 | dev_set_drvdata(dev, priv); |
757 | /* | 757 | /* |
758 | * The release function could be called after the | 758 | * The release function could be called after the |
759 | * module has been unloaded. It's _only_ task is to | 759 | * module has been unloaded. It's _only_ task is to |
diff --git a/drivers/s390/char/vmur.c b/drivers/s390/char/vmur.c index 7d9e67cb6471..31b902e94f7b 100644 --- a/drivers/s390/char/vmur.c +++ b/drivers/s390/char/vmur.c | |||
@@ -170,7 +170,7 @@ static void urdev_put(struct urdev *urd) | |||
170 | */ | 170 | */ |
171 | static int ur_pm_suspend(struct ccw_device *cdev) | 171 | static int ur_pm_suspend(struct ccw_device *cdev) |
172 | { | 172 | { |
173 | struct urdev *urd = cdev->dev.driver_data; | 173 | struct urdev *urd = dev_get_drvdata(&cdev->dev); |
174 | 174 | ||
175 | TRACE("ur_pm_suspend: cdev=%p\n", cdev); | 175 | TRACE("ur_pm_suspend: cdev=%p\n", cdev); |
176 | if (urd->open_flag) { | 176 | if (urd->open_flag) { |
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index 13bcb8114388..b1241f8fae88 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -351,15 +351,6 @@ static inline unsigned long long get_usecs(void) | |||
351 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) | 351 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) |
352 | 352 | ||
353 | /* prototypes for thin interrupt */ | 353 | /* prototypes for thin interrupt */ |
354 | void qdio_sync_after_thinint(struct qdio_q *q); | ||
355 | int get_buf_state(struct qdio_q *q, unsigned int bufnr, unsigned char *state, | ||
356 | int auto_ack); | ||
357 | void qdio_check_outbound_after_thinint(struct qdio_q *q); | ||
358 | int qdio_inbound_q_moved(struct qdio_q *q); | ||
359 | void qdio_kick_handler(struct qdio_q *q); | ||
360 | void qdio_stop_polling(struct qdio_q *q); | ||
361 | int qdio_siga_sync_q(struct qdio_q *q); | ||
362 | |||
363 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); | 354 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); |
364 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); | 355 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); |
365 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); | 356 | void qdio_shutdown_thinint(struct qdio_irq *irq_ptr); |
@@ -392,4 +383,6 @@ void qdio_setup_destroy_sysfs(struct ccw_device *cdev); | |||
392 | int qdio_setup_init(void); | 383 | int qdio_setup_init(void); |
393 | void qdio_setup_exit(void); | 384 | void qdio_setup_exit(void); |
394 | 385 | ||
386 | int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, | ||
387 | unsigned char *state); | ||
395 | #endif /* _CIO_QDIO_H */ | 388 | #endif /* _CIO_QDIO_H */ |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index e3434b34f86c..b8626d4df116 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -70,9 +70,8 @@ static int qstat_show(struct seq_file *m, void *v) | |||
70 | seq_printf(m, "slsb buffer states:\n"); | 70 | seq_printf(m, "slsb buffer states:\n"); |
71 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | 71 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); |
72 | 72 | ||
73 | qdio_siga_sync_q(q); | ||
74 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { | 73 | for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++) { |
75 | get_buf_state(q, i, &state, 0); | 74 | debug_get_buf_state(q, i, &state); |
76 | switch (state) { | 75 | switch (state) { |
77 | case SLSB_P_INPUT_NOT_INIT: | 76 | case SLSB_P_INPUT_NOT_INIT: |
78 | case SLSB_P_OUTPUT_NOT_INIT: | 77 | case SLSB_P_OUTPUT_NOT_INIT: |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index d79cf5bf0e62..0038750ad945 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -231,8 +231,8 @@ static inline int get_buf_states(struct qdio_q *q, unsigned int bufnr, | |||
231 | return i; | 231 | return i; |
232 | } | 232 | } |
233 | 233 | ||
234 | inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, | 234 | static inline int get_buf_state(struct qdio_q *q, unsigned int bufnr, |
235 | unsigned char *state, int auto_ack) | 235 | unsigned char *state, int auto_ack) |
236 | { | 236 | { |
237 | return get_buf_states(q, bufnr, state, 1, auto_ack); | 237 | return get_buf_states(q, bufnr, state, 1, auto_ack); |
238 | } | 238 | } |
@@ -276,7 +276,7 @@ void qdio_init_buf_states(struct qdio_irq *irq_ptr) | |||
276 | QDIO_MAX_BUFFERS_PER_Q); | 276 | QDIO_MAX_BUFFERS_PER_Q); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | 279 | static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output, |
280 | unsigned int input) | 280 | unsigned int input) |
281 | { | 281 | { |
282 | int cc; | 282 | int cc; |
@@ -293,7 +293,7 @@ static int qdio_siga_sync(struct qdio_q *q, unsigned int output, | |||
293 | return cc; | 293 | return cc; |
294 | } | 294 | } |
295 | 295 | ||
296 | inline int qdio_siga_sync_q(struct qdio_q *q) | 296 | static inline int qdio_siga_sync_q(struct qdio_q *q) |
297 | { | 297 | { |
298 | if (q->is_input_q) | 298 | if (q->is_input_q) |
299 | return qdio_siga_sync(q, 0, q->mask); | 299 | return qdio_siga_sync(q, 0, q->mask); |
@@ -358,8 +358,7 @@ static inline int qdio_siga_input(struct qdio_q *q) | |||
358 | return cc; | 358 | return cc; |
359 | } | 359 | } |
360 | 360 | ||
361 | /* called from thinint inbound handler */ | 361 | static inline void qdio_sync_after_thinint(struct qdio_q *q) |
362 | void qdio_sync_after_thinint(struct qdio_q *q) | ||
363 | { | 362 | { |
364 | if (pci_out_supported(q)) { | 363 | if (pci_out_supported(q)) { |
365 | if (need_siga_sync_thinint(q)) | 364 | if (need_siga_sync_thinint(q)) |
@@ -370,7 +369,14 @@ void qdio_sync_after_thinint(struct qdio_q *q) | |||
370 | qdio_siga_sync_q(q); | 369 | qdio_siga_sync_q(q); |
371 | } | 370 | } |
372 | 371 | ||
373 | inline void qdio_stop_polling(struct qdio_q *q) | 372 | int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr, |
373 | unsigned char *state) | ||
374 | { | ||
375 | qdio_siga_sync_q(q); | ||
376 | return get_buf_states(q, bufnr, state, 1, 0); | ||
377 | } | ||
378 | |||
379 | static inline void qdio_stop_polling(struct qdio_q *q) | ||
374 | { | 380 | { |
375 | if (!q->u.in.polling) | 381 | if (!q->u.in.polling) |
376 | return; | 382 | return; |
@@ -449,13 +455,6 @@ static inline void inbound_primed(struct qdio_q *q, int count) | |||
449 | count--; | 455 | count--; |
450 | if (!count) | 456 | if (!count) |
451 | return; | 457 | return; |
452 | |||
453 | /* | ||
454 | * Need to change all PRIMED buffers to NOT_INIT, otherwise | ||
455 | * we're loosing initiative in the thinint code. | ||
456 | */ | ||
457 | set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, | ||
458 | count); | ||
459 | } | 458 | } |
460 | 459 | ||
461 | static int get_inbound_buffer_frontier(struct qdio_q *q) | 460 | static int get_inbound_buffer_frontier(struct qdio_q *q) |
@@ -470,19 +469,13 @@ static int get_inbound_buffer_frontier(struct qdio_q *q) | |||
470 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); | 469 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); |
471 | stop = add_buf(q->first_to_check, count); | 470 | stop = add_buf(q->first_to_check, count); |
472 | 471 | ||
473 | /* | ||
474 | * No siga sync here, as a PCI or we after a thin interrupt | ||
475 | * will sync the queues. | ||
476 | */ | ||
477 | |||
478 | /* need to set count to 1 for non-qebsm */ | ||
479 | if (!is_qebsm(q)) | ||
480 | count = 1; | ||
481 | |||
482 | check_next: | ||
483 | if (q->first_to_check == stop) | 472 | if (q->first_to_check == stop) |
484 | goto out; | 473 | goto out; |
485 | 474 | ||
475 | /* | ||
476 | * No siga sync here, as a PCI or we after a thin interrupt | ||
477 | * already sync'ed the queues. | ||
478 | */ | ||
486 | count = get_buf_states(q, q->first_to_check, &state, count, 1); | 479 | count = get_buf_states(q, q->first_to_check, &state, count, 1); |
487 | if (!count) | 480 | if (!count) |
488 | goto out; | 481 | goto out; |
@@ -490,14 +483,9 @@ check_next: | |||
490 | switch (state) { | 483 | switch (state) { |
491 | case SLSB_P_INPUT_PRIMED: | 484 | case SLSB_P_INPUT_PRIMED: |
492 | inbound_primed(q, count); | 485 | inbound_primed(q, count); |
493 | /* | ||
494 | * No siga-sync needed for non-qebsm here, as the inbound queue | ||
495 | * will be synced on the next siga-r, resp. | ||
496 | * tiqdio_is_inbound_q_done will do the siga-sync. | ||
497 | */ | ||
498 | q->first_to_check = add_buf(q->first_to_check, count); | 486 | q->first_to_check = add_buf(q->first_to_check, count); |
499 | atomic_sub(count, &q->nr_buf_used); | 487 | atomic_sub(count, &q->nr_buf_used); |
500 | goto check_next; | 488 | break; |
501 | case SLSB_P_INPUT_ERROR: | 489 | case SLSB_P_INPUT_ERROR: |
502 | announce_buffer_error(q, count); | 490 | announce_buffer_error(q, count); |
503 | /* process the buffer, the upper layer will take care of it */ | 491 | /* process the buffer, the upper layer will take care of it */ |
@@ -516,7 +504,7 @@ out: | |||
516 | return q->first_to_check; | 504 | return q->first_to_check; |
517 | } | 505 | } |
518 | 506 | ||
519 | int qdio_inbound_q_moved(struct qdio_q *q) | 507 | static int qdio_inbound_q_moved(struct qdio_q *q) |
520 | { | 508 | { |
521 | int bufnr; | 509 | int bufnr; |
522 | 510 | ||
@@ -524,35 +512,32 @@ int qdio_inbound_q_moved(struct qdio_q *q) | |||
524 | 512 | ||
525 | if ((bufnr != q->last_move) || q->qdio_error) { | 513 | if ((bufnr != q->last_move) || q->qdio_error) { |
526 | q->last_move = bufnr; | 514 | q->last_move = bufnr; |
527 | if (!need_siga_sync(q) && !pci_out_supported(q)) | 515 | if (!is_thinint_irq(q->irq_ptr) && !MACHINE_IS_VM) |
528 | q->u.in.timestamp = get_usecs(); | 516 | q->u.in.timestamp = get_usecs(); |
529 | |||
530 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in moved"); | ||
531 | return 1; | 517 | return 1; |
532 | } else | 518 | } else |
533 | return 0; | 519 | return 0; |
534 | } | 520 | } |
535 | 521 | ||
536 | static int qdio_inbound_q_done(struct qdio_q *q) | 522 | static inline int qdio_inbound_q_done(struct qdio_q *q) |
537 | { | 523 | { |
538 | unsigned char state = 0; | 524 | unsigned char state = 0; |
539 | 525 | ||
540 | if (!atomic_read(&q->nr_buf_used)) | 526 | if (!atomic_read(&q->nr_buf_used)) |
541 | return 1; | 527 | return 1; |
542 | 528 | ||
543 | /* | ||
544 | * We need that one for synchronization with the adapter, as it | ||
545 | * does a kind of PCI avoidance. | ||
546 | */ | ||
547 | qdio_siga_sync_q(q); | 529 | qdio_siga_sync_q(q); |
548 | |||
549 | get_buf_state(q, q->first_to_check, &state, 0); | 530 | get_buf_state(q, q->first_to_check, &state, 0); |
531 | |||
550 | if (state == SLSB_P_INPUT_PRIMED) | 532 | if (state == SLSB_P_INPUT_PRIMED) |
551 | /* we got something to do */ | 533 | /* more work coming */ |
552 | return 0; | 534 | return 0; |
553 | 535 | ||
554 | /* on VM, we don't poll, so the q is always done here */ | 536 | if (is_thinint_irq(q->irq_ptr)) |
555 | if (need_siga_sync(q) || pci_out_supported(q)) | 537 | return 1; |
538 | |||
539 | /* don't poll under z/VM */ | ||
540 | if (MACHINE_IS_VM) | ||
556 | return 1; | 541 | return 1; |
557 | 542 | ||
558 | /* | 543 | /* |
@@ -563,14 +548,11 @@ static int qdio_inbound_q_done(struct qdio_q *q) | |||
563 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", | 548 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", |
564 | q->first_to_check); | 549 | q->first_to_check); |
565 | return 1; | 550 | return 1; |
566 | } else { | 551 | } else |
567 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in notd:%3d", | ||
568 | q->first_to_check); | ||
569 | return 0; | 552 | return 0; |
570 | } | ||
571 | } | 553 | } |
572 | 554 | ||
573 | void qdio_kick_handler(struct qdio_q *q) | 555 | static void qdio_kick_handler(struct qdio_q *q) |
574 | { | 556 | { |
575 | int start = q->first_to_kick; | 557 | int start = q->first_to_kick; |
576 | int end = q->first_to_check; | 558 | int end = q->first_to_check; |
@@ -619,7 +601,6 @@ again: | |||
619 | goto again; | 601 | goto again; |
620 | } | 602 | } |
621 | 603 | ||
622 | /* inbound tasklet */ | ||
623 | void qdio_inbound_processing(unsigned long data) | 604 | void qdio_inbound_processing(unsigned long data) |
624 | { | 605 | { |
625 | struct qdio_q *q = (struct qdio_q *)data; | 606 | struct qdio_q *q = (struct qdio_q *)data; |
@@ -642,11 +623,6 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) | |||
642 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); | 623 | count = min(atomic_read(&q->nr_buf_used), QDIO_MAX_BUFFERS_MASK); |
643 | stop = add_buf(q->first_to_check, count); | 624 | stop = add_buf(q->first_to_check, count); |
644 | 625 | ||
645 | /* need to set count to 1 for non-qebsm */ | ||
646 | if (!is_qebsm(q)) | ||
647 | count = 1; | ||
648 | |||
649 | check_next: | ||
650 | if (q->first_to_check == stop) | 626 | if (q->first_to_check == stop) |
651 | return q->first_to_check; | 627 | return q->first_to_check; |
652 | 628 | ||
@@ -661,13 +637,7 @@ check_next: | |||
661 | 637 | ||
662 | atomic_sub(count, &q->nr_buf_used); | 638 | atomic_sub(count, &q->nr_buf_used); |
663 | q->first_to_check = add_buf(q->first_to_check, count); | 639 | q->first_to_check = add_buf(q->first_to_check, count); |
664 | /* | 640 | break; |
665 | * We fetch all buffer states at once. get_buf_states may | ||
666 | * return count < stop. For QEBSM we do not loop. | ||
667 | */ | ||
668 | if (is_qebsm(q)) | ||
669 | break; | ||
670 | goto check_next; | ||
671 | case SLSB_P_OUTPUT_ERROR: | 641 | case SLSB_P_OUTPUT_ERROR: |
672 | announce_buffer_error(q, count); | 642 | announce_buffer_error(q, count); |
673 | /* process the buffer, the upper layer will take care of it */ | 643 | /* process the buffer, the upper layer will take care of it */ |
@@ -797,8 +767,7 @@ void qdio_outbound_timer(unsigned long data) | |||
797 | tasklet_schedule(&q->tasklet); | 767 | tasklet_schedule(&q->tasklet); |
798 | } | 768 | } |
799 | 769 | ||
800 | /* called from thinint inbound tasklet */ | 770 | static inline void qdio_check_outbound_after_thinint(struct qdio_q *q) |
801 | void qdio_check_outbound_after_thinint(struct qdio_q *q) | ||
802 | { | 771 | { |
803 | struct qdio_q *out; | 772 | struct qdio_q *out; |
804 | int i; | 773 | int i; |
@@ -811,6 +780,46 @@ void qdio_check_outbound_after_thinint(struct qdio_q *q) | |||
811 | tasklet_schedule(&out->tasklet); | 780 | tasklet_schedule(&out->tasklet); |
812 | } | 781 | } |
813 | 782 | ||
783 | static void __tiqdio_inbound_processing(struct qdio_q *q) | ||
784 | { | ||
785 | qdio_perf_stat_inc(&perf_stats.thinint_inbound); | ||
786 | qdio_sync_after_thinint(q); | ||
787 | |||
788 | /* | ||
789 | * The interrupt could be caused by a PCI request. Check the | ||
790 | * PCI capable outbound queues. | ||
791 | */ | ||
792 | qdio_check_outbound_after_thinint(q); | ||
793 | |||
794 | if (!qdio_inbound_q_moved(q)) | ||
795 | return; | ||
796 | |||
797 | qdio_kick_handler(q); | ||
798 | |||
799 | if (!qdio_inbound_q_done(q)) { | ||
800 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | ||
801 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
802 | tasklet_schedule(&q->tasklet); | ||
803 | } | ||
804 | |||
805 | qdio_stop_polling(q); | ||
806 | /* | ||
807 | * We need to check again to not lose initiative after | ||
808 | * resetting the ACK state. | ||
809 | */ | ||
810 | if (!qdio_inbound_q_done(q)) { | ||
811 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); | ||
812 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
813 | tasklet_schedule(&q->tasklet); | ||
814 | } | ||
815 | } | ||
816 | |||
817 | void tiqdio_inbound_processing(unsigned long data) | ||
818 | { | ||
819 | struct qdio_q *q = (struct qdio_q *)data; | ||
820 | __tiqdio_inbound_processing(q); | ||
821 | } | ||
822 | |||
814 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, | 823 | static inline void qdio_set_state(struct qdio_irq *irq_ptr, |
815 | enum qdio_irq_states state) | 824 | enum qdio_irq_states state) |
816 | { | 825 | { |
@@ -1488,18 +1497,13 @@ out: | |||
1488 | * @count: how many buffers to process | 1497 | * @count: how many buffers to process |
1489 | */ | 1498 | */ |
1490 | int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | 1499 | int do_QDIO(struct ccw_device *cdev, unsigned int callflags, |
1491 | int q_nr, int bufnr, int count) | 1500 | int q_nr, unsigned int bufnr, unsigned int count) |
1492 | { | 1501 | { |
1493 | struct qdio_irq *irq_ptr; | 1502 | struct qdio_irq *irq_ptr; |
1494 | 1503 | ||
1495 | if ((bufnr > QDIO_MAX_BUFFERS_PER_Q) || | 1504 | if (bufnr >= QDIO_MAX_BUFFERS_PER_Q || count > QDIO_MAX_BUFFERS_PER_Q) |
1496 | (count > QDIO_MAX_BUFFERS_PER_Q) || | ||
1497 | (q_nr >= QDIO_MAX_QUEUES_PER_IRQ)) | ||
1498 | return -EINVAL; | 1505 | return -EINVAL; |
1499 | 1506 | ||
1500 | if (!count) | ||
1501 | return 0; | ||
1502 | |||
1503 | irq_ptr = cdev->private->qdio_data; | 1507 | irq_ptr = cdev->private->qdio_data; |
1504 | if (!irq_ptr) | 1508 | if (!irq_ptr) |
1505 | return -ENODEV; | 1509 | return -ENODEV; |
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index c655d011a78d..981a77ea7ee2 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -43,9 +43,6 @@ struct indicator_t { | |||
43 | }; | 43 | }; |
44 | static struct indicator_t *q_indicators; | 44 | static struct indicator_t *q_indicators; |
45 | 45 | ||
46 | static void tiqdio_tasklet_fn(unsigned long data); | ||
47 | static DECLARE_TASKLET(tiqdio_tasklet, tiqdio_tasklet_fn, 0); | ||
48 | |||
49 | static int css_qdio_omit_svs; | 46 | static int css_qdio_omit_svs; |
50 | 47 | ||
51 | static inline unsigned long do_clear_global_summary(void) | 48 | static inline unsigned long do_clear_global_summary(void) |
@@ -103,11 +100,6 @@ void tiqdio_add_input_queues(struct qdio_irq *irq_ptr) | |||
103 | xchg(irq_ptr->dsci, 1); | 100 | xchg(irq_ptr->dsci, 1); |
104 | } | 101 | } |
105 | 102 | ||
106 | /* | ||
107 | * we cannot stop the tiqdio tasklet here since it is for all | ||
108 | * thinint qdio devices and it must run as long as there is a | ||
109 | * thinint device left | ||
110 | */ | ||
111 | void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | 103 | void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) |
112 | { | 104 | { |
113 | struct qdio_q *q; | 105 | struct qdio_q *q; |
@@ -126,79 +118,39 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
126 | } | 118 | } |
127 | } | 119 | } |
128 | 120 | ||
129 | static inline int tiqdio_inbound_q_done(struct qdio_q *q) | ||
130 | { | ||
131 | unsigned char state = 0; | ||
132 | |||
133 | if (!atomic_read(&q->nr_buf_used)) | ||
134 | return 1; | ||
135 | |||
136 | qdio_siga_sync_q(q); | ||
137 | get_buf_state(q, q->first_to_check, &state, 0); | ||
138 | |||
139 | if (state == SLSB_P_INPUT_PRIMED) | ||
140 | /* more work coming */ | ||
141 | return 0; | ||
142 | return 1; | ||
143 | } | ||
144 | |||
145 | static inline int shared_ind(struct qdio_irq *irq_ptr) | 121 | static inline int shared_ind(struct qdio_irq *irq_ptr) |
146 | { | 122 | { |
147 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; | 123 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; |
148 | } | 124 | } |
149 | 125 | ||
150 | static void __tiqdio_inbound_processing(struct qdio_q *q) | 126 | /** |
127 | * tiqdio_thinint_handler - thin interrupt handler for qdio | ||
128 | * @ind: pointer to adapter local summary indicator | ||
129 | * @drv_data: NULL | ||
130 | */ | ||
131 | static void tiqdio_thinint_handler(void *ind, void *drv_data) | ||
151 | { | 132 | { |
152 | qdio_perf_stat_inc(&perf_stats.thinint_inbound); | 133 | struct qdio_q *q; |
153 | qdio_sync_after_thinint(q); | 134 | |
135 | qdio_perf_stat_inc(&perf_stats.thin_int); | ||
154 | 136 | ||
155 | /* | 137 | /* |
156 | * Maybe we have work on our outbound queues... at least | 138 | * SVS only when needed: issue SVS to benefit from iqdio interrupt |
157 | * we have to check the PCI capable queues. | 139 | * avoidance (SVS clears adapter interrupt suppression overwrite) |
158 | */ | 140 | */ |
159 | qdio_check_outbound_after_thinint(q); | 141 | if (!css_qdio_omit_svs) |
160 | 142 | do_clear_global_summary(); | |
161 | if (!qdio_inbound_q_moved(q)) | ||
162 | return; | ||
163 | |||
164 | qdio_kick_handler(q); | ||
165 | |||
166 | if (!tiqdio_inbound_q_done(q)) { | ||
167 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); | ||
168 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
169 | tasklet_schedule(&q->tasklet); | ||
170 | } | ||
171 | 143 | ||
172 | qdio_stop_polling(q); | ||
173 | /* | 144 | /* |
174 | * We need to check again to not lose initiative after | 145 | * reset local summary indicator (tiqdio_alsi) to stop adapter |
175 | * resetting the ACK state. | 146 | * interrupts for now |
176 | */ | 147 | */ |
177 | if (!tiqdio_inbound_q_done(q)) { | 148 | xchg((u8 *)ind, 0); |
178 | qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); | ||
179 | if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) | ||
180 | tasklet_schedule(&q->tasklet); | ||
181 | } | ||
182 | } | ||
183 | |||
184 | void tiqdio_inbound_processing(unsigned long data) | ||
185 | { | ||
186 | struct qdio_q *q = (struct qdio_q *)data; | ||
187 | |||
188 | __tiqdio_inbound_processing(q); | ||
189 | } | ||
190 | |||
191 | /* check for work on all inbound thinint queues */ | ||
192 | static void tiqdio_tasklet_fn(unsigned long data) | ||
193 | { | ||
194 | struct qdio_q *q; | ||
195 | |||
196 | qdio_perf_stat_inc(&perf_stats.tasklet_thinint); | ||
197 | again: | ||
198 | 149 | ||
199 | /* protect tiq_list entries, only changed in activate or shutdown */ | 150 | /* protect tiq_list entries, only changed in activate or shutdown */ |
200 | rcu_read_lock(); | 151 | rcu_read_lock(); |
201 | 152 | ||
153 | /* check for work on all inbound thinint queues */ | ||
202 | list_for_each_entry_rcu(q, &tiq_list, entry) | 154 | list_for_each_entry_rcu(q, &tiq_list, entry) |
203 | /* only process queues from changed sets */ | 155 | /* only process queues from changed sets */ |
204 | if (*q->irq_ptr->dsci) { | 156 | if (*q->irq_ptr->dsci) { |
@@ -226,37 +178,6 @@ again: | |||
226 | if (*tiqdio_alsi) | 178 | if (*tiqdio_alsi) |
227 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); | 179 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 1); |
228 | } | 180 | } |
229 | |||
230 | /* check for more work */ | ||
231 | if (*tiqdio_alsi) { | ||
232 | xchg(tiqdio_alsi, 0); | ||
233 | qdio_perf_stat_inc(&perf_stats.tasklet_thinint_loop); | ||
234 | goto again; | ||
235 | } | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * tiqdio_thinint_handler - thin interrupt handler for qdio | ||
240 | * @ind: pointer to adapter local summary indicator | ||
241 | * @drv_data: NULL | ||
242 | */ | ||
243 | static void tiqdio_thinint_handler(void *ind, void *drv_data) | ||
244 | { | ||
245 | qdio_perf_stat_inc(&perf_stats.thin_int); | ||
246 | |||
247 | /* | ||
248 | * SVS only when needed: issue SVS to benefit from iqdio interrupt | ||
249 | * avoidance (SVS clears adapter interrupt suppression overwrite) | ||
250 | */ | ||
251 | if (!css_qdio_omit_svs) | ||
252 | do_clear_global_summary(); | ||
253 | |||
254 | /* | ||
255 | * reset local summary indicator (tiqdio_alsi) to stop adapter | ||
256 | * interrupts for now, the tasklet will clean all dsci's | ||
257 | */ | ||
258 | xchg((u8 *)ind, 0); | ||
259 | tasklet_hi_schedule(&tiqdio_tasklet); | ||
260 | } | 181 | } |
261 | 182 | ||
262 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) | 183 | static int set_subchannel_ind(struct qdio_irq *irq_ptr, int reset) |
@@ -376,5 +297,4 @@ void __exit tiqdio_unregister_thinints(void) | |||
376 | s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); | 297 | s390_unregister_adapter_interrupt(tiqdio_alsi, QDIO_AIRQ_ISC); |
377 | isc_unregister(QDIO_AIRQ_ISC); | 298 | isc_unregister(QDIO_AIRQ_ISC); |
378 | } | 299 | } |
379 | tasklet_kill(&tiqdio_tasklet); | ||
380 | } | 300 | } |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 9c148406b980..727a809636d8 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -54,6 +54,12 @@ static int ap_poll_thread_start(void); | |||
54 | static void ap_poll_thread_stop(void); | 54 | static void ap_poll_thread_stop(void); |
55 | static void ap_request_timeout(unsigned long); | 55 | static void ap_request_timeout(unsigned long); |
56 | static inline void ap_schedule_poll_timer(void); | 56 | static inline void ap_schedule_poll_timer(void); |
57 | static int __ap_poll_device(struct ap_device *ap_dev, unsigned long *flags); | ||
58 | static int ap_device_remove(struct device *dev); | ||
59 | static int ap_device_probe(struct device *dev); | ||
60 | static void ap_interrupt_handler(void *unused1, void *unused2); | ||
61 | static void ap_reset(struct ap_device *ap_dev); | ||
62 | static void ap_config_timeout(unsigned long ptr); | ||
57 | 63 | ||
58 | /* | 64 | /* |
59 | * Module description. | 65 | * Module description. |
@@ -101,6 +107,10 @@ static struct hrtimer ap_poll_timer; | |||
101 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ | 107 | * If z/VM change to 1500000 nanoseconds to adjust to z/VM polling.*/ |
102 | static unsigned long long poll_timeout = 250000; | 108 | static unsigned long long poll_timeout = 250000; |
103 | 109 | ||
110 | /* Suspend flag */ | ||
111 | static int ap_suspend_flag; | ||
112 | static struct bus_type ap_bus_type; | ||
113 | |||
104 | /** | 114 | /** |
105 | * ap_using_interrupts() - Returns non-zero if interrupt support is | 115 | * ap_using_interrupts() - Returns non-zero if interrupt support is |
106 | * available. | 116 | * available. |
@@ -617,10 +627,79 @@ static int ap_uevent (struct device *dev, struct kobj_uevent_env *env) | |||
617 | return retval; | 627 | return retval; |
618 | } | 628 | } |
619 | 629 | ||
630 | static int ap_bus_suspend(struct device *dev, pm_message_t state) | ||
631 | { | ||
632 | struct ap_device *ap_dev = to_ap_dev(dev); | ||
633 | unsigned long flags; | ||
634 | |||
635 | if (!ap_suspend_flag) { | ||
636 | ap_suspend_flag = 1; | ||
637 | |||
638 | /* Disable scanning for devices, thus we do not want to scan | ||
639 | * for them after removing. | ||
640 | */ | ||
641 | del_timer_sync(&ap_config_timer); | ||
642 | if (ap_work_queue != NULL) { | ||
643 | destroy_workqueue(ap_work_queue); | ||
644 | ap_work_queue = NULL; | ||
645 | } | ||
646 | tasklet_disable(&ap_tasklet); | ||
647 | } | ||
648 | /* Poll on the device until all requests are finished. */ | ||
649 | do { | ||
650 | flags = 0; | ||
651 | __ap_poll_device(ap_dev, &flags); | ||
652 | } while ((flags & 1) || (flags & 2)); | ||
653 | |||
654 | ap_device_remove(dev); | ||
655 | return 0; | ||
656 | } | ||
657 | |||
658 | static int ap_bus_resume(struct device *dev) | ||
659 | { | ||
660 | int rc = 0; | ||
661 | struct ap_device *ap_dev = to_ap_dev(dev); | ||
662 | |||
663 | if (ap_suspend_flag) { | ||
664 | ap_suspend_flag = 0; | ||
665 | if (!ap_interrupts_available()) | ||
666 | ap_interrupt_indicator = NULL; | ||
667 | ap_device_probe(dev); | ||
668 | ap_reset(ap_dev); | ||
669 | setup_timer(&ap_dev->timeout, ap_request_timeout, | ||
670 | (unsigned long) ap_dev); | ||
671 | ap_scan_bus(NULL); | ||
672 | init_timer(&ap_config_timer); | ||
673 | ap_config_timer.function = ap_config_timeout; | ||
674 | ap_config_timer.data = 0; | ||
675 | ap_config_timer.expires = jiffies + ap_config_time * HZ; | ||
676 | add_timer(&ap_config_timer); | ||
677 | ap_work_queue = create_singlethread_workqueue("kapwork"); | ||
678 | if (!ap_work_queue) | ||
679 | return -ENOMEM; | ||
680 | tasklet_enable(&ap_tasklet); | ||
681 | if (!ap_using_interrupts()) | ||
682 | ap_schedule_poll_timer(); | ||
683 | else | ||
684 | tasklet_schedule(&ap_tasklet); | ||
685 | if (ap_thread_flag) | ||
686 | rc = ap_poll_thread_start(); | ||
687 | } else { | ||
688 | ap_device_probe(dev); | ||
689 | ap_reset(ap_dev); | ||
690 | setup_timer(&ap_dev->timeout, ap_request_timeout, | ||
691 | (unsigned long) ap_dev); | ||
692 | } | ||
693 | |||
694 | return rc; | ||
695 | } | ||
696 | |||
620 | static struct bus_type ap_bus_type = { | 697 | static struct bus_type ap_bus_type = { |
621 | .name = "ap", | 698 | .name = "ap", |
622 | .match = &ap_bus_match, | 699 | .match = &ap_bus_match, |
623 | .uevent = &ap_uevent, | 700 | .uevent = &ap_uevent, |
701 | .suspend = ap_bus_suspend, | ||
702 | .resume = ap_bus_resume | ||
624 | }; | 703 | }; |
625 | 704 | ||
626 | static int ap_device_probe(struct device *dev) | 705 | static int ap_device_probe(struct device *dev) |
@@ -1066,7 +1145,7 @@ ap_config_timeout(unsigned long ptr) | |||
1066 | */ | 1145 | */ |
1067 | static inline void ap_schedule_poll_timer(void) | 1146 | static inline void ap_schedule_poll_timer(void) |
1068 | { | 1147 | { |
1069 | if (ap_using_interrupts()) | 1148 | if (ap_using_interrupts() || ap_suspend_flag) |
1070 | return; | 1149 | return; |
1071 | if (hrtimer_is_queued(&ap_poll_timer)) | 1150 | if (hrtimer_is_queued(&ap_poll_timer)) |
1072 | return; | 1151 | return; |
@@ -1384,6 +1463,8 @@ static int ap_poll_thread(void *data) | |||
1384 | 1463 | ||
1385 | set_user_nice(current, 19); | 1464 | set_user_nice(current, 19); |
1386 | while (1) { | 1465 | while (1) { |
1466 | if (ap_suspend_flag) | ||
1467 | return 0; | ||
1387 | if (need_resched()) { | 1468 | if (need_resched()) { |
1388 | schedule(); | 1469 | schedule(); |
1389 | continue; | 1470 | continue; |
@@ -1414,7 +1495,7 @@ static int ap_poll_thread_start(void) | |||
1414 | { | 1495 | { |
1415 | int rc; | 1496 | int rc; |
1416 | 1497 | ||
1417 | if (ap_using_interrupts()) | 1498 | if (ap_using_interrupts() || ap_suspend_flag) |
1418 | return 0; | 1499 | return 0; |
1419 | mutex_lock(&ap_poll_thread_mutex); | 1500 | mutex_lock(&ap_poll_thread_mutex); |
1420 | if (!ap_poll_kthread) { | 1501 | if (!ap_poll_kthread) { |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index 52574ce797b2..8c36eafcfbfe 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -1307,7 +1307,7 @@ static void netiucv_pm_complete(struct device *dev) | |||
1307 | */ | 1307 | */ |
1308 | static int netiucv_pm_freeze(struct device *dev) | 1308 | static int netiucv_pm_freeze(struct device *dev) |
1309 | { | 1309 | { |
1310 | struct netiucv_priv *priv = dev->driver_data; | 1310 | struct netiucv_priv *priv = dev_get_drvdata(dev); |
1311 | struct net_device *ndev = NULL; | 1311 | struct net_device *ndev = NULL; |
1312 | int rc = 0; | 1312 | int rc = 0; |
1313 | 1313 | ||
@@ -1331,7 +1331,7 @@ out: | |||
1331 | */ | 1331 | */ |
1332 | static int netiucv_pm_restore_thaw(struct device *dev) | 1332 | static int netiucv_pm_restore_thaw(struct device *dev) |
1333 | { | 1333 | { |
1334 | struct netiucv_priv *priv = dev->driver_data; | 1334 | struct netiucv_priv *priv = dev_get_drvdata(dev); |
1335 | struct net_device *ndev = NULL; | 1335 | struct net_device *ndev = NULL; |
1336 | int rc = 0; | 1336 | int rc = 0; |
1337 | 1337 | ||