aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c33
-rw-r--r--drivers/s390/block/dasd_devmap.c10
-rw-r--r--drivers/s390/block/dasd_int.h2
-rw-r--r--drivers/s390/char/raw3270.c9
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_vt220.c27
-rw-r--r--drivers/s390/char/tape.h3
-rw-r--r--drivers/s390/char/tape_block.c4
-rw-r--r--drivers/s390/char/tape_core.c47
-rw-r--r--drivers/s390/char/vmlogrdr.c11
-rw-r--r--drivers/s390/cio/blacklist.c1
-rw-r--r--drivers/s390/cio/device_pgid.c12
-rw-r--r--drivers/s390/kvm/kvm_virtio.c58
13 files changed, 89 insertions, 130 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index ac6d4d3218b3..1a4025683362 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -63,6 +63,7 @@ static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *);
63 */ 63 */
64static wait_queue_head_t dasd_init_waitq; 64static wait_queue_head_t dasd_init_waitq;
65static wait_queue_head_t dasd_flush_wq; 65static wait_queue_head_t dasd_flush_wq;
66static wait_queue_head_t generic_waitq;
66 67
67/* 68/*
68 * Allocate memory for a new device structure. 69 * Allocate memory for a new device structure.
@@ -925,6 +926,8 @@ static void dasd_handle_killed_request(struct ccw_device *cdev,
925 struct dasd_ccw_req *cqr; 926 struct dasd_ccw_req *cqr;
926 struct dasd_device *device; 927 struct dasd_device *device;
927 928
929 if (!intparm)
930 return;
928 cqr = (struct dasd_ccw_req *) intparm; 931 cqr = (struct dasd_ccw_req *) intparm;
929 if (cqr->status != DASD_CQR_IN_IO) { 932 if (cqr->status != DASD_CQR_IN_IO) {
930 MESSAGE(KERN_DEBUG, 933 MESSAGE(KERN_DEBUG,
@@ -976,17 +979,16 @@ void dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
976 if (IS_ERR(irb)) { 979 if (IS_ERR(irb)) {
977 switch (PTR_ERR(irb)) { 980 switch (PTR_ERR(irb)) {
978 case -EIO: 981 case -EIO:
979 dasd_handle_killed_request(cdev, intparm);
980 break; 982 break;
981 case -ETIMEDOUT: 983 case -ETIMEDOUT:
982 printk(KERN_WARNING"%s(%s): request timed out\n", 984 printk(KERN_WARNING"%s(%s): request timed out\n",
983 __func__, cdev->dev.bus_id); 985 __func__, cdev->dev.bus_id);
984 //FIXME - dasd uses own timeout interface...
985 break; 986 break;
986 default: 987 default:
987 printk(KERN_WARNING"%s(%s): unknown error %ld\n", 988 printk(KERN_WARNING"%s(%s): unknown error %ld\n",
988 __func__, cdev->dev.bus_id, PTR_ERR(irb)); 989 __func__, cdev->dev.bus_id, PTR_ERR(irb));
989 } 990 }
991 dasd_handle_killed_request(cdev, intparm);
990 return; 992 return;
991 } 993 }
992 994
@@ -1150,11 +1152,15 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1150 struct list_head *l, *n; 1152 struct list_head *l, *n;
1151 struct dasd_ccw_req *cqr; 1153 struct dasd_ccw_req *cqr;
1152 struct dasd_block *block; 1154 struct dasd_block *block;
1155 void (*callback)(struct dasd_ccw_req *, void *data);
1156 void *callback_data;
1153 1157
1154 list_for_each_safe(l, n, final_queue) { 1158 list_for_each_safe(l, n, final_queue) {
1155 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1159 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1156 list_del_init(&cqr->devlist); 1160 list_del_init(&cqr->devlist);
1157 block = cqr->block; 1161 block = cqr->block;
1162 callback = cqr->callback;
1163 callback_data = cqr->callback_data;
1158 if (block) 1164 if (block)
1159 spin_lock_bh(&block->queue_lock); 1165 spin_lock_bh(&block->queue_lock);
1160 switch (cqr->status) { 1166 switch (cqr->status) {
@@ -1175,7 +1181,7 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1175 BUG(); 1181 BUG();
1176 } 1182 }
1177 if (cqr->callback != NULL) 1183 if (cqr->callback != NULL)
1178 (cqr->callback)(cqr, cqr->callback_data); 1184 (callback)(cqr, callback_data);
1179 if (block) 1185 if (block)
1180 spin_unlock_bh(&block->queue_lock); 1186 spin_unlock_bh(&block->queue_lock);
1181 } 1187 }
@@ -1405,17 +1411,15 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr)
1405 */ 1411 */
1406int dasd_sleep_on(struct dasd_ccw_req *cqr) 1412int dasd_sleep_on(struct dasd_ccw_req *cqr)
1407{ 1413{
1408 wait_queue_head_t wait_q;
1409 struct dasd_device *device; 1414 struct dasd_device *device;
1410 int rc; 1415 int rc;
1411 1416
1412 device = cqr->startdev; 1417 device = cqr->startdev;
1413 1418
1414 init_waitqueue_head (&wait_q);
1415 cqr->callback = dasd_wakeup_cb; 1419 cqr->callback = dasd_wakeup_cb;
1416 cqr->callback_data = (void *) &wait_q; 1420 cqr->callback_data = (void *) &generic_waitq;
1417 dasd_add_request_tail(cqr); 1421 dasd_add_request_tail(cqr);
1418 wait_event(wait_q, _wait_for_wakeup(cqr)); 1422 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1419 1423
1420 /* Request status is either done or failed. */ 1424 /* Request status is either done or failed. */
1421 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1425 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
@@ -1428,20 +1432,18 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr)
1428 */ 1432 */
1429int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) 1433int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr)
1430{ 1434{
1431 wait_queue_head_t wait_q;
1432 struct dasd_device *device; 1435 struct dasd_device *device;
1433 int rc; 1436 int rc;
1434 1437
1435 device = cqr->startdev; 1438 device = cqr->startdev;
1436 init_waitqueue_head (&wait_q);
1437 cqr->callback = dasd_wakeup_cb; 1439 cqr->callback = dasd_wakeup_cb;
1438 cqr->callback_data = (void *) &wait_q; 1440 cqr->callback_data = (void *) &generic_waitq;
1439 dasd_add_request_tail(cqr); 1441 dasd_add_request_tail(cqr);
1440 rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); 1442 rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr));
1441 if (rc == -ERESTARTSYS) { 1443 if (rc == -ERESTARTSYS) {
1442 dasd_cancel_req(cqr); 1444 dasd_cancel_req(cqr);
1443 /* wait (non-interruptible) for final status */ 1445 /* wait (non-interruptible) for final status */
1444 wait_event(wait_q, _wait_for_wakeup(cqr)); 1446 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1445 } 1447 }
1446 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1448 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
1447 return rc; 1449 return rc;
@@ -1465,7 +1467,6 @@ static inline int _dasd_term_running_cqr(struct dasd_device *device)
1465 1467
1466int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) 1468int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1467{ 1469{
1468 wait_queue_head_t wait_q;
1469 struct dasd_device *device; 1470 struct dasd_device *device;
1470 int rc; 1471 int rc;
1471 1472
@@ -1477,9 +1478,8 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1477 return rc; 1478 return rc;
1478 } 1479 }
1479 1480
1480 init_waitqueue_head (&wait_q);
1481 cqr->callback = dasd_wakeup_cb; 1481 cqr->callback = dasd_wakeup_cb;
1482 cqr->callback_data = (void *) &wait_q; 1482 cqr->callback_data = (void *) &generic_waitq;
1483 cqr->status = DASD_CQR_QUEUED; 1483 cqr->status = DASD_CQR_QUEUED;
1484 list_add(&cqr->devlist, &device->ccw_queue); 1484 list_add(&cqr->devlist, &device->ccw_queue);
1485 1485
@@ -1488,7 +1488,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr)
1488 1488
1489 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1489 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1490 1490
1491 wait_event(wait_q, _wait_for_wakeup(cqr)); 1491 wait_event(generic_waitq, _wait_for_wakeup(cqr));
1492 1492
1493 /* Request status is either done or failed. */ 1493 /* Request status is either done or failed. */
1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; 1494 rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
@@ -2429,6 +2429,7 @@ static int __init dasd_init(void)
2429 2429
2430 init_waitqueue_head(&dasd_init_waitq); 2430 init_waitqueue_head(&dasd_init_waitq);
2431 init_waitqueue_head(&dasd_flush_wq); 2431 init_waitqueue_head(&dasd_flush_wq);
2432 init_waitqueue_head(&generic_waitq);
2432 2433
2433 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2434 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2434 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); 2435 dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long));
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index f4fb40257348..d774e79476fe 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -86,10 +86,10 @@ static DEFINE_SPINLOCK(dasd_devmap_lock);
86static struct list_head dasd_hashlists[256]; 86static struct list_head dasd_hashlists[256];
87int dasd_max_devindex; 87int dasd_max_devindex;
88 88
89static struct dasd_devmap *dasd_add_busid(char *, int); 89static struct dasd_devmap *dasd_add_busid(const char *, int);
90 90
91static inline int 91static inline int
92dasd_hash_busid(char *bus_id) 92dasd_hash_busid(const char *bus_id)
93{ 93{
94 int hash, i; 94 int hash, i;
95 95
@@ -394,7 +394,7 @@ dasd_parse(void)
394 * devices. 394 * devices.
395 */ 395 */
396static struct dasd_devmap * 396static struct dasd_devmap *
397dasd_add_busid(char *bus_id, int features) 397dasd_add_busid(const char *bus_id, int features)
398{ 398{
399 struct dasd_devmap *devmap, *new, *tmp; 399 struct dasd_devmap *devmap, *new, *tmp;
400 int hash; 400 int hash;
@@ -430,7 +430,7 @@ dasd_add_busid(char *bus_id, int features)
430 * Find devmap for device with given bus_id. 430 * Find devmap for device with given bus_id.
431 */ 431 */
432static struct dasd_devmap * 432static struct dasd_devmap *
433dasd_find_busid(char *bus_id) 433dasd_find_busid(const char *bus_id)
434{ 434{
435 struct dasd_devmap *devmap, *tmp; 435 struct dasd_devmap *devmap, *tmp;
436 int hash; 436 int hash;
@@ -452,7 +452,7 @@ dasd_find_busid(char *bus_id)
452 * Check if busid has been added to the list of dasd ranges. 452 * Check if busid has been added to the list of dasd ranges.
453 */ 453 */
454int 454int
455dasd_busid_known(char *bus_id) 455dasd_busid_known(const char *bus_id)
456{ 456{
457 return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0; 457 return IS_ERR(dasd_find_busid(bus_id)) ? -ENOENT : 0;
458} 458}
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 6c624bf44617..fb2f931cf844 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -598,7 +598,7 @@ struct dasd_device *dasd_device_from_cdev_locked(struct ccw_device *);
598struct dasd_device *dasd_device_from_devindex(int); 598struct dasd_device *dasd_device_from_devindex(int);
599 599
600int dasd_parse(void); 600int dasd_parse(void);
601int dasd_busid_known(char *); 601int dasd_busid_known(const char *);
602 602
603/* externals in dasd_gendisk.c */ 603/* externals in dasd_gendisk.c */
604int dasd_gendisk_init(void); 604int dasd_gendisk_init(void);
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c
index 0d98f1ff2edd..848ef7e8523f 100644
--- a/drivers/s390/char/raw3270.c
+++ b/drivers/s390/char/raw3270.c
@@ -549,7 +549,6 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
549 struct raw3270_request *rq) 549 struct raw3270_request *rq)
550{ 550{
551 unsigned long flags; 551 unsigned long flags;
552 wait_queue_head_t wq;
553 int rc; 552 int rc;
554 553
555#ifdef CONFIG_TN3270_CONSOLE 554#ifdef CONFIG_TN3270_CONSOLE
@@ -566,20 +565,20 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view,
566 return rq->rc; 565 return rq->rc;
567 } 566 }
568#endif 567#endif
569 init_waitqueue_head(&wq);
570 rq->callback = raw3270_wake_init; 568 rq->callback = raw3270_wake_init;
571 rq->callback_data = &wq; 569 rq->callback_data = &raw3270_wait_queue;
572 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); 570 spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags);
573 rc = __raw3270_start(rp, view, rq); 571 rc = __raw3270_start(rp, view, rq);
574 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); 572 spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags);
575 if (rc) 573 if (rc)
576 return rc; 574 return rc;
577 /* Now wait for the completion. */ 575 /* Now wait for the completion. */
578 rc = wait_event_interruptible(wq, raw3270_request_final(rq)); 576 rc = wait_event_interruptible(raw3270_wait_queue,
577 raw3270_request_final(rq));
579 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ 578 if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */
580 raw3270_halt_io(view->dev, rq); 579 raw3270_halt_io(view->dev, rq);
581 /* No wait for the halt to complete. */ 580 /* No wait for the halt to complete. */
582 wait_event(wq, raw3270_request_final(rq)); 581 wait_event(raw3270_wait_queue, raw3270_request_final(rq));
583 return -ERESTARTSYS; 582 return -ERESTARTSYS;
584 } 583 }
585 return rq->rc; 584 return rq->rc;
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 9e784d5f7f57..ad05a87bc480 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work)
40 put_online_cpus(); 40 put_online_cpus();
41} 41}
42 42
43static void sclp_cpu_change_notify(struct work_struct *work) 43static void __ref sclp_cpu_change_notify(struct work_struct *work)
44{ 44{
45 smp_rescan_cpus(); 45 smp_rescan_cpus();
46} 46}
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index 35707c04e613..62576af36f47 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -71,9 +71,6 @@ static struct list_head sclp_vt220_outqueue;
71/* Number of requests in outqueue */ 71/* Number of requests in outqueue */
72static int sclp_vt220_outqueue_count; 72static int sclp_vt220_outqueue_count;
73 73
74/* Wait queue used to delay write requests while we've run out of buffers */
75static wait_queue_head_t sclp_vt220_waitq;
76
77/* Timer used for delaying write requests to merge subsequent messages into 74/* Timer used for delaying write requests to merge subsequent messages into
78 * a single buffer */ 75 * a single buffer */
79static struct timer_list sclp_vt220_timer; 76static struct timer_list sclp_vt220_timer;
@@ -133,7 +130,6 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request)
133 } while (request && __sclp_vt220_emit(request)); 130 } while (request && __sclp_vt220_emit(request));
134 if (request == NULL && sclp_vt220_flush_later) 131 if (request == NULL && sclp_vt220_flush_later)
135 sclp_vt220_emit_current(); 132 sclp_vt220_emit_current();
136 wake_up(&sclp_vt220_waitq);
137 /* Check if the tty needs a wake up call */ 133 /* Check if the tty needs a wake up call */
138 if (sclp_vt220_tty != NULL) { 134 if (sclp_vt220_tty != NULL) {
139 tty_wakeup(sclp_vt220_tty); 135 tty_wakeup(sclp_vt220_tty);
@@ -383,7 +379,7 @@ sclp_vt220_timeout(unsigned long data)
383 */ 379 */
384static int 380static int
385__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, 381__sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
386 int convertlf, int may_schedule) 382 int convertlf, int may_fail)
387{ 383{
388 unsigned long flags; 384 unsigned long flags;
389 void *page; 385 void *page;
@@ -395,15 +391,14 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
395 overall_written = 0; 391 overall_written = 0;
396 spin_lock_irqsave(&sclp_vt220_lock, flags); 392 spin_lock_irqsave(&sclp_vt220_lock, flags);
397 do { 393 do {
398 /* Create a sclp output buffer if none exists yet */ 394 /* Create an sclp output buffer if none exists yet */
399 if (sclp_vt220_current_request == NULL) { 395 if (sclp_vt220_current_request == NULL) {
400 while (list_empty(&sclp_vt220_empty)) { 396 while (list_empty(&sclp_vt220_empty)) {
401 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 397 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
402 if (in_interrupt() || !may_schedule) 398 if (may_fail)
403 sclp_sync_wait(); 399 goto out;
404 else 400 else
405 wait_event(sclp_vt220_waitq, 401 sclp_sync_wait();
406 !list_empty(&sclp_vt220_empty));
407 spin_lock_irqsave(&sclp_vt220_lock, flags); 402 spin_lock_irqsave(&sclp_vt220_lock, flags);
408 } 403 }
409 page = (void *) sclp_vt220_empty.next; 404 page = (void *) sclp_vt220_empty.next;
@@ -437,6 +432,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule,
437 add_timer(&sclp_vt220_timer); 432 add_timer(&sclp_vt220_timer);
438 } 433 }
439 spin_unlock_irqrestore(&sclp_vt220_lock, flags); 434 spin_unlock_irqrestore(&sclp_vt220_lock, flags);
435out:
440 return overall_written; 436 return overall_written;
441} 437}
442 438
@@ -520,19 +516,11 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp)
520 * character to the tty device. If the kernel uses this routine, 516 * character to the tty device. If the kernel uses this routine,
521 * it must call the flush_chars() routine (if defined) when it is 517 * it must call the flush_chars() routine (if defined) when it is
522 * done stuffing characters into the driver. 518 * done stuffing characters into the driver.
523 *
524 * NOTE: include/linux/tty_driver.h specifies that a character should be
525 * ignored if there is no room in the queue. This driver implements a different
526 * semantic in that it will block when there is no more room left.
527 *
528 * FIXME: putchar can currently be called from BH and other non blocking
529 * handlers so this semantic isn't a good idea.
530 */ 519 */
531static int 520static int
532sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) 521sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch)
533{ 522{
534 __sclp_vt220_write(&ch, 1, 0, 0, 1); 523 return __sclp_vt220_write(&ch, 1, 0, 0, 1);
535 return 1;
536} 524}
537 525
538/* 526/*
@@ -653,7 +641,6 @@ static int __init __sclp_vt220_init(void)
653 spin_lock_init(&sclp_vt220_lock); 641 spin_lock_init(&sclp_vt220_lock);
654 INIT_LIST_HEAD(&sclp_vt220_empty); 642 INIT_LIST_HEAD(&sclp_vt220_empty);
655 INIT_LIST_HEAD(&sclp_vt220_outqueue); 643 INIT_LIST_HEAD(&sclp_vt220_outqueue);
656 init_waitqueue_head(&sclp_vt220_waitq);
657 init_timer(&sclp_vt220_timer); 644 init_timer(&sclp_vt220_timer);
658 sclp_vt220_current_request = NULL; 645 sclp_vt220_current_request = NULL;
659 sclp_vt220_buffered_chars = 0; 646 sclp_vt220_buffered_chars = 0;
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h
index dddf8d62c153..d0d565a05dfe 100644
--- a/drivers/s390/char/tape.h
+++ b/drivers/s390/char/tape.h
@@ -231,6 +231,9 @@ struct tape_device {
231 /* Request queue. */ 231 /* Request queue. */
232 struct list_head req_queue; 232 struct list_head req_queue;
233 233
234 /* Request wait queue. */
235 wait_queue_head_t wait_queue;
236
234 /* Each tape device has (currently) two minor numbers. */ 237 /* Each tape device has (currently) two minor numbers. */
235 int first_minor; 238 int first_minor;
236 239
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c
index ddc4a114e7f4..95da72bc17e8 100644
--- a/drivers/s390/char/tape_block.c
+++ b/drivers/s390/char/tape_block.c
@@ -179,11 +179,11 @@ tapeblock_requeue(struct work_struct *work) {
179 tapeblock_end_request(req, -EIO); 179 tapeblock_end_request(req, -EIO);
180 continue; 180 continue;
181 } 181 }
182 blkdev_dequeue_request(req);
183 nr_queued++;
182 spin_unlock_irq(&device->blk_data.request_queue_lock); 184 spin_unlock_irq(&device->blk_data.request_queue_lock);
183 rc = tapeblock_start_request(device, req); 185 rc = tapeblock_start_request(device, req);
184 spin_lock_irq(&device->blk_data.request_queue_lock); 186 spin_lock_irq(&device->blk_data.request_queue_lock);
185 blkdev_dequeue_request(req);
186 nr_queued++;
187 } 187 }
188 spin_unlock_irq(&device->blk_data.request_queue_lock); 188 spin_unlock_irq(&device->blk_data.request_queue_lock);
189 atomic_set(&device->blk_data.requeue_scheduled, 0); 189 atomic_set(&device->blk_data.requeue_scheduled, 0);
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c
index 7ad8cf157641..c20e3c548343 100644
--- a/drivers/s390/char/tape_core.c
+++ b/drivers/s390/char/tape_core.c
@@ -76,32 +76,9 @@ const char *tape_op_verbose[TO_SIZE] =
76 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC", 76 [TO_KEKL_QUERY] = "KLQ",[TO_RDC] = "RDC",
77}; 77};
78 78
79static int 79static int devid_to_int(struct ccw_dev_id *dev_id)
80busid_to_int(char *bus_id)
81{ 80{
82 int dec; 81 return dev_id->devno + (dev_id->ssid << 16);
83 int d;
84 char * s;
85
86 for(s = bus_id, d = 0; *s != '\0' && *s != '.'; s++)
87 d = (d * 10) + (*s - '0');
88 dec = d;
89 for(s++, d = 0; *s != '\0' && *s != '.'; s++)
90 d = (d * 10) + (*s - '0');
91 dec = (dec << 8) + d;
92
93 for(s++; *s != '\0'; s++) {
94 if (*s >= '0' && *s <= '9') {
95 d = *s - '0';
96 } else if (*s >= 'a' && *s <= 'f') {
97 d = *s - 'a' + 10;
98 } else {
99 d = *s - 'A' + 10;
100 }
101 dec = (dec << 4) + d;
102 }
103
104 return dec;
105} 82}
106 83
107/* 84/*
@@ -472,6 +449,7 @@ tape_alloc_device(void)
472 INIT_LIST_HEAD(&device->req_queue); 449 INIT_LIST_HEAD(&device->req_queue);
473 INIT_LIST_HEAD(&device->node); 450 INIT_LIST_HEAD(&device->node);
474 init_waitqueue_head(&device->state_change_wq); 451 init_waitqueue_head(&device->state_change_wq);
452 init_waitqueue_head(&device->wait_queue);
475 device->tape_state = TS_INIT; 453 device->tape_state = TS_INIT;
476 device->medium_state = MS_UNKNOWN; 454 device->medium_state = MS_UNKNOWN;
477 *device->modeset_byte = 0; 455 *device->modeset_byte = 0;
@@ -551,6 +529,7 @@ tape_generic_probe(struct ccw_device *cdev)
551{ 529{
552 struct tape_device *device; 530 struct tape_device *device;
553 int ret; 531 int ret;
532 struct ccw_dev_id dev_id;
554 533
555 device = tape_alloc_device(); 534 device = tape_alloc_device();
556 if (IS_ERR(device)) 535 if (IS_ERR(device))
@@ -565,7 +544,8 @@ tape_generic_probe(struct ccw_device *cdev)
565 cdev->dev.driver_data = device; 544 cdev->dev.driver_data = device;
566 cdev->handler = __tape_do_irq; 545 cdev->handler = __tape_do_irq;
567 device->cdev = cdev; 546 device->cdev = cdev;
568 device->cdev_id = busid_to_int(cdev->dev.bus_id); 547 ccw_device_get_id(cdev, &dev_id);
548 device->cdev_id = devid_to_int(&dev_id);
569 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); 549 PRINT_INFO("tape device %s found\n", cdev->dev.bus_id);
570 return ret; 550 return ret;
571} 551}
@@ -975,21 +955,19 @@ __tape_wake_up(struct tape_request *request, void *data)
975int 955int
976tape_do_io(struct tape_device *device, struct tape_request *request) 956tape_do_io(struct tape_device *device, struct tape_request *request)
977{ 957{
978 wait_queue_head_t wq;
979 int rc; 958 int rc;
980 959
981 init_waitqueue_head(&wq);
982 spin_lock_irq(get_ccwdev_lock(device->cdev)); 960 spin_lock_irq(get_ccwdev_lock(device->cdev));
983 /* Setup callback */ 961 /* Setup callback */
984 request->callback = __tape_wake_up; 962 request->callback = __tape_wake_up;
985 request->callback_data = &wq; 963 request->callback_data = &device->wait_queue;
986 /* Add request to request queue and try to start it. */ 964 /* Add request to request queue and try to start it. */
987 rc = __tape_start_request(device, request); 965 rc = __tape_start_request(device, request);
988 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 966 spin_unlock_irq(get_ccwdev_lock(device->cdev));
989 if (rc) 967 if (rc)
990 return rc; 968 return rc;
991 /* Request added to the queue. Wait for its completion. */ 969 /* Request added to the queue. Wait for its completion. */
992 wait_event(wq, (request->callback == NULL)); 970 wait_event(device->wait_queue, (request->callback == NULL));
993 /* Get rc from request */ 971 /* Get rc from request */
994 return request->rc; 972 return request->rc;
995} 973}
@@ -1010,20 +988,19 @@ int
1010tape_do_io_interruptible(struct tape_device *device, 988tape_do_io_interruptible(struct tape_device *device,
1011 struct tape_request *request) 989 struct tape_request *request)
1012{ 990{
1013 wait_queue_head_t wq;
1014 int rc; 991 int rc;
1015 992
1016 init_waitqueue_head(&wq);
1017 spin_lock_irq(get_ccwdev_lock(device->cdev)); 993 spin_lock_irq(get_ccwdev_lock(device->cdev));
1018 /* Setup callback */ 994 /* Setup callback */
1019 request->callback = __tape_wake_up_interruptible; 995 request->callback = __tape_wake_up_interruptible;
1020 request->callback_data = &wq; 996 request->callback_data = &device->wait_queue;
1021 rc = __tape_start_request(device, request); 997 rc = __tape_start_request(device, request);
1022 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 998 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1023 if (rc) 999 if (rc)
1024 return rc; 1000 return rc;
1025 /* Request added to the queue. Wait for its completion. */ 1001 /* Request added to the queue. Wait for its completion. */
1026 rc = wait_event_interruptible(wq, (request->callback == NULL)); 1002 rc = wait_event_interruptible(device->wait_queue,
1003 (request->callback == NULL));
1027 if (rc != -ERESTARTSYS) 1004 if (rc != -ERESTARTSYS)
1028 /* Request finished normally. */ 1005 /* Request finished normally. */
1029 return request->rc; 1006 return request->rc;
@@ -1036,7 +1013,7 @@ tape_do_io_interruptible(struct tape_device *device,
1036 /* Wait for the interrupt that acknowledges the halt. */ 1013 /* Wait for the interrupt that acknowledges the halt. */
1037 do { 1014 do {
1038 rc = wait_event_interruptible( 1015 rc = wait_event_interruptible(
1039 wq, 1016 device->wait_queue,
1040 (request->callback == NULL) 1017 (request->callback == NULL)
1041 ); 1018 );
1042 } while (rc == -ERESTARTSYS); 1019 } while (rc == -ERESTARTSYS);
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c
index d364e0bfae12..2c2428cc05d8 100644
--- a/drivers/s390/char/vmlogrdr.c
+++ b/drivers/s390/char/vmlogrdr.c
@@ -762,10 +762,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
762 device_unregister(dev); 762 device_unregister(dev);
763 return ret; 763 return ret;
764 } 764 }
765 priv->class_device = device_create(vmlogrdr_class, dev, 765 priv->class_device = device_create_drvdata(vmlogrdr_class, dev,
766 MKDEV(vmlogrdr_major, 766 MKDEV(vmlogrdr_major,
767 priv->minor_num), 767 priv->minor_num),
768 "%s", dev->bus_id); 768 priv, "%s", dev->bus_id);
769 if (IS_ERR(priv->class_device)) { 769 if (IS_ERR(priv->class_device)) {
770 ret = PTR_ERR(priv->class_device); 770 ret = PTR_ERR(priv->class_device);
771 priv->class_device=NULL; 771 priv->class_device=NULL;
@@ -773,7 +773,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
773 device_unregister(dev); 773 device_unregister(dev);
774 return ret; 774 return ret;
775 } 775 }
776 dev->driver_data = priv;
777 priv->device = dev; 776 priv->device = dev;
778 return 0; 777 return 0;
779} 778}
@@ -858,7 +857,7 @@ static int __init vmlogrdr_init(void)
858 for (i=0; i < MAXMINOR; ++i ) { 857 for (i=0; i < MAXMINOR; ++i ) {
859 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL); 858 sys_ser[i].buffer = (char *) get_zeroed_page(GFP_KERNEL);
860 if (!sys_ser[i].buffer) { 859 if (!sys_ser[i].buffer) {
861 rc = ENOMEM; 860 rc = -ENOMEM;
862 break; 861 break;
863 } 862 }
864 sys_ser[i].current_position = sys_ser[i].buffer; 863 sys_ser[i].current_position = sys_ser[i].buffer;
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index 9c21b8f43f9b..a4a5f2efea48 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -19,7 +19,6 @@
19 19
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22#include <asm/cio.h>
23 22
24#include "blacklist.h" 23#include "blacklist.h"
25#include "cio.h" 24#include "cio.h"
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index ba559053402e..5cf7be008e98 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -243,16 +243,10 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func)
243 /* Setup sense path group id channel program. */ 243 /* Setup sense path group id channel program. */
244 cdev->private->pgid[0].inf.fc = func; 244 cdev->private->pgid[0].inf.fc = func;
245 ccw = cdev->private->iccws; 245 ccw = cdev->private->iccws;
246 if (!cdev->private->flags.pgid_single) { 246 if (cdev->private->flags.pgid_single)
247 cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
248 ccw->cmd_code = CCW_CMD_SUSPEND_RECONN;
249 ccw->cda = 0;
250 ccw->count = 0;
251 ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC;
252 ccw++;
253 } else
254 cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH; 247 cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH;
255 248 else
249 cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH;
256 ccw->cmd_code = CCW_CMD_SET_PGID; 250 ccw->cmd_code = CCW_CMD_SET_PGID;
257 ccw->cda = (__u32) __pa (&cdev->private->pgid[0]); 251 ccw->cda = (__u32) __pa (&cdev->private->pgid[0]);
258 ccw->count = sizeof (struct pgid); 252 ccw->count = sizeof (struct pgid);
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c
index 47a7e6200b26..5ab34340919b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/kvm/kvm_virtio.c
@@ -31,11 +31,6 @@
31 */ 31 */
32static void *kvm_devices; 32static void *kvm_devices;
33 33
34/*
35 * Unique numbering for kvm devices.
36 */
37static unsigned int dev_index;
38
39struct kvm_device { 34struct kvm_device {
40 struct virtio_device vdev; 35 struct virtio_device vdev;
41 struct kvm_device_desc *desc; 36 struct kvm_device_desc *desc;
@@ -78,27 +73,32 @@ static unsigned desc_size(const struct kvm_device_desc *desc)
78 + desc->config_len; 73 + desc->config_len;
79} 74}
80 75
81/* 76/* This gets the device's feature bits. */
82 * This tests (and acknowleges) a feature bit. 77static u32 kvm_get_features(struct virtio_device *vdev)
83 */
84static bool kvm_feature(struct virtio_device *vdev, unsigned fbit)
85{ 78{
79 unsigned int i;
80 u32 features = 0;
86 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; 81 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
87 u8 *features; 82 u8 *in_features = kvm_vq_features(desc);
88 83
89 if (fbit / 8 > desc->feature_len) 84 for (i = 0; i < min(desc->feature_len * 8, 32); i++)
90 return false; 85 if (in_features[i / 8] & (1 << (i % 8)))
86 features |= (1 << i);
87 return features;
88}
91 89
92 features = kvm_vq_features(desc); 90static void kvm_set_features(struct virtio_device *vdev, u32 features)
93 if (!(features[fbit / 8] & (1 << (fbit % 8)))) 91{
94 return false; 92 unsigned int i;
93 struct kvm_device_desc *desc = to_kvmdev(vdev)->desc;
94 /* Second half of bitmap is features we accept. */
95 u8 *out_features = kvm_vq_features(desc) + desc->feature_len;
95 96
96 /* 97 memset(out_features, 0, desc->feature_len);
97 * We set the matching bit in the other half of the bitmap to tell the 98 for (i = 0; i < min(desc->feature_len * 8, 32); i++) {
98 * Host we want to use this feature. 99 if (features & (1 << i))
99 */ 100 out_features[i / 8] |= (1 << (i % 8));
100 features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8)); 101 }
101 return true;
102} 102}
103 103
104/* 104/*
@@ -221,7 +221,8 @@ static void kvm_del_vq(struct virtqueue *vq)
221 * The config ops structure as defined by virtio config 221 * The config ops structure as defined by virtio config
222 */ 222 */
223static struct virtio_config_ops kvm_vq_configspace_ops = { 223static struct virtio_config_ops kvm_vq_configspace_ops = {
224 .feature = kvm_feature, 224 .get_features = kvm_get_features,
225 .set_features = kvm_set_features,
225 .get = kvm_get, 226 .get = kvm_get,
226 .set = kvm_set, 227 .set = kvm_set,
227 .get_status = kvm_get_status, 228 .get_status = kvm_get_status,
@@ -244,26 +245,25 @@ static struct device kvm_root = {
244 * adds a new device and register it with virtio 245 * adds a new device and register it with virtio
245 * appropriate drivers are loaded by the device model 246 * appropriate drivers are loaded by the device model
246 */ 247 */
247static void add_kvm_device(struct kvm_device_desc *d) 248static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset)
248{ 249{
249 struct kvm_device *kdev; 250 struct kvm_device *kdev;
250 251
251 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); 252 kdev = kzalloc(sizeof(*kdev), GFP_KERNEL);
252 if (!kdev) { 253 if (!kdev) {
253 printk(KERN_EMERG "Cannot allocate kvm dev %u\n", 254 printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n",
254 dev_index++); 255 offset, d->type);
255 return; 256 return;
256 } 257 }
257 258
258 kdev->vdev.dev.parent = &kvm_root; 259 kdev->vdev.dev.parent = &kvm_root;
259 kdev->vdev.index = dev_index++;
260 kdev->vdev.id.device = d->type; 260 kdev->vdev.id.device = d->type;
261 kdev->vdev.config = &kvm_vq_configspace_ops; 261 kdev->vdev.config = &kvm_vq_configspace_ops;
262 kdev->desc = d; 262 kdev->desc = d;
263 263
264 if (register_virtio_device(&kdev->vdev) != 0) { 264 if (register_virtio_device(&kdev->vdev) != 0) {
265 printk(KERN_ERR "Failed to register kvm device %u\n", 265 printk(KERN_ERR "Failed to register kvm device %u type %u\n",
266 kdev->vdev.index); 266 offset, d->type);
267 kfree(kdev); 267 kfree(kdev);
268 } 268 }
269} 269}
@@ -283,7 +283,7 @@ static void scan_devices(void)
283 if (d->type == 0) 283 if (d->type == 0)
284 break; 284 break;
285 285
286 add_kvm_device(d); 286 add_kvm_device(d, i);
287 } 287 }
288} 288}
289 289