diff options
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd.c | 28 | ||||
-rw-r--r-- | drivers/s390/char/raw3270.c | 9 | ||||
-rw-r--r-- | drivers/s390/char/sclp_config.c | 2 | ||||
-rw-r--r-- | drivers/s390/char/sclp_vt220.c | 28 | ||||
-rw-r--r-- | drivers/s390/char/tape.h | 3 | ||||
-rw-r--r-- | drivers/s390/char/tape_3590.c | 2 | ||||
-rw-r--r-- | drivers/s390/char/tape_block.c | 4 | ||||
-rw-r--r-- | drivers/s390/char/tape_core.c | 16 | ||||
-rw-r--r-- | drivers/s390/char/vmlogrdr.c | 9 | ||||
-rw-r--r-- | drivers/s390/cio/blacklist.c | 6 | ||||
-rw-r--r-- | drivers/s390/cio/cio.c | 20 | ||||
-rw-r--r-- | drivers/s390/kvm/kvm_virtio.c | 58 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_main.c | 49 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_offl.c | 6 | ||||
-rw-r--r-- | drivers/s390/net/qeth_core_sys.c | 12 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l2_main.c | 41 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_main.c | 75 | ||||
-rw-r--r-- | drivers/s390/net/qeth_l3_sys.c | 24 | ||||
-rw-r--r-- | drivers/s390/s390mach.c | 1 |
19 files changed, 145 insertions, 248 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 8ba3f135da22..1a4025683362 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -63,6 +63,7 @@ static void dasd_return_cqr_cb(struct dasd_ccw_req *, void *); | |||
63 | */ | 63 | */ |
64 | static wait_queue_head_t dasd_init_waitq; | 64 | static wait_queue_head_t dasd_init_waitq; |
65 | static wait_queue_head_t dasd_flush_wq; | 65 | static wait_queue_head_t dasd_flush_wq; |
66 | static wait_queue_head_t generic_waitq; | ||
66 | 67 | ||
67 | /* | 68 | /* |
68 | * Allocate memory for a new device structure. | 69 | * Allocate memory for a new device structure. |
@@ -1151,11 +1152,15 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, | |||
1151 | struct list_head *l, *n; | 1152 | struct list_head *l, *n; |
1152 | struct dasd_ccw_req *cqr; | 1153 | struct dasd_ccw_req *cqr; |
1153 | struct dasd_block *block; | 1154 | struct dasd_block *block; |
1155 | void (*callback)(struct dasd_ccw_req *, void *data); | ||
1156 | void *callback_data; | ||
1154 | 1157 | ||
1155 | list_for_each_safe(l, n, final_queue) { | 1158 | list_for_each_safe(l, n, final_queue) { |
1156 | cqr = list_entry(l, struct dasd_ccw_req, devlist); | 1159 | cqr = list_entry(l, struct dasd_ccw_req, devlist); |
1157 | list_del_init(&cqr->devlist); | 1160 | list_del_init(&cqr->devlist); |
1158 | block = cqr->block; | 1161 | block = cqr->block; |
1162 | callback = cqr->callback; | ||
1163 | callback_data = cqr->callback_data; | ||
1159 | if (block) | 1164 | if (block) |
1160 | spin_lock_bh(&block->queue_lock); | 1165 | spin_lock_bh(&block->queue_lock); |
1161 | switch (cqr->status) { | 1166 | switch (cqr->status) { |
@@ -1176,7 +1181,7 @@ static void __dasd_device_process_final_queue(struct dasd_device *device, | |||
1176 | BUG(); | 1181 | BUG(); |
1177 | } | 1182 | } |
1178 | if (cqr->callback != NULL) | 1183 | if (cqr->callback != NULL) |
1179 | (cqr->callback)(cqr, cqr->callback_data); | 1184 | (callback)(cqr, callback_data); |
1180 | if (block) | 1185 | if (block) |
1181 | spin_unlock_bh(&block->queue_lock); | 1186 | spin_unlock_bh(&block->queue_lock); |
1182 | } | 1187 | } |
@@ -1406,17 +1411,15 @@ static inline int _wait_for_wakeup(struct dasd_ccw_req *cqr) | |||
1406 | */ | 1411 | */ |
1407 | int dasd_sleep_on(struct dasd_ccw_req *cqr) | 1412 | int dasd_sleep_on(struct dasd_ccw_req *cqr) |
1408 | { | 1413 | { |
1409 | wait_queue_head_t wait_q; | ||
1410 | struct dasd_device *device; | 1414 | struct dasd_device *device; |
1411 | int rc; | 1415 | int rc; |
1412 | 1416 | ||
1413 | device = cqr->startdev; | 1417 | device = cqr->startdev; |
1414 | 1418 | ||
1415 | init_waitqueue_head (&wait_q); | ||
1416 | cqr->callback = dasd_wakeup_cb; | 1419 | cqr->callback = dasd_wakeup_cb; |
1417 | cqr->callback_data = (void *) &wait_q; | 1420 | cqr->callback_data = (void *) &generic_waitq; |
1418 | dasd_add_request_tail(cqr); | 1421 | dasd_add_request_tail(cqr); |
1419 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1422 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); |
1420 | 1423 | ||
1421 | /* Request status is either done or failed. */ | 1424 | /* Request status is either done or failed. */ |
1422 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | 1425 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
@@ -1429,20 +1432,18 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) | |||
1429 | */ | 1432 | */ |
1430 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) | 1433 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *cqr) |
1431 | { | 1434 | { |
1432 | wait_queue_head_t wait_q; | ||
1433 | struct dasd_device *device; | 1435 | struct dasd_device *device; |
1434 | int rc; | 1436 | int rc; |
1435 | 1437 | ||
1436 | device = cqr->startdev; | 1438 | device = cqr->startdev; |
1437 | init_waitqueue_head (&wait_q); | ||
1438 | cqr->callback = dasd_wakeup_cb; | 1439 | cqr->callback = dasd_wakeup_cb; |
1439 | cqr->callback_data = (void *) &wait_q; | 1440 | cqr->callback_data = (void *) &generic_waitq; |
1440 | dasd_add_request_tail(cqr); | 1441 | dasd_add_request_tail(cqr); |
1441 | rc = wait_event_interruptible(wait_q, _wait_for_wakeup(cqr)); | 1442 | rc = wait_event_interruptible(generic_waitq, _wait_for_wakeup(cqr)); |
1442 | if (rc == -ERESTARTSYS) { | 1443 | if (rc == -ERESTARTSYS) { |
1443 | dasd_cancel_req(cqr); | 1444 | dasd_cancel_req(cqr); |
1444 | /* wait (non-interruptible) for final status */ | 1445 | /* wait (non-interruptible) for final status */ |
1445 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1446 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); |
1446 | } | 1447 | } |
1447 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | 1448 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
1448 | return rc; | 1449 | return rc; |
@@ -1466,7 +1467,6 @@ static inline int _dasd_term_running_cqr(struct dasd_device *device) | |||
1466 | 1467 | ||
1467 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | 1468 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) |
1468 | { | 1469 | { |
1469 | wait_queue_head_t wait_q; | ||
1470 | struct dasd_device *device; | 1470 | struct dasd_device *device; |
1471 | int rc; | 1471 | int rc; |
1472 | 1472 | ||
@@ -1478,9 +1478,8 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |||
1478 | return rc; | 1478 | return rc; |
1479 | } | 1479 | } |
1480 | 1480 | ||
1481 | init_waitqueue_head (&wait_q); | ||
1482 | cqr->callback = dasd_wakeup_cb; | 1481 | cqr->callback = dasd_wakeup_cb; |
1483 | cqr->callback_data = (void *) &wait_q; | 1482 | cqr->callback_data = (void *) &generic_waitq; |
1484 | cqr->status = DASD_CQR_QUEUED; | 1483 | cqr->status = DASD_CQR_QUEUED; |
1485 | list_add(&cqr->devlist, &device->ccw_queue); | 1484 | list_add(&cqr->devlist, &device->ccw_queue); |
1486 | 1485 | ||
@@ -1489,7 +1488,7 @@ int dasd_sleep_on_immediatly(struct dasd_ccw_req *cqr) | |||
1489 | 1488 | ||
1490 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1489 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1491 | 1490 | ||
1492 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1491 | wait_event(generic_waitq, _wait_for_wakeup(cqr)); |
1493 | 1492 | ||
1494 | /* Request status is either done or failed. */ | 1493 | /* Request status is either done or failed. */ |
1495 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; | 1494 | rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO; |
@@ -2430,6 +2429,7 @@ static int __init dasd_init(void) | |||
2430 | 2429 | ||
2431 | init_waitqueue_head(&dasd_init_waitq); | 2430 | init_waitqueue_head(&dasd_init_waitq); |
2432 | init_waitqueue_head(&dasd_flush_wq); | 2431 | init_waitqueue_head(&dasd_flush_wq); |
2432 | init_waitqueue_head(&generic_waitq); | ||
2433 | 2433 | ||
2434 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ | 2434 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ |
2435 | dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); | 2435 | dasd_debug_area = debug_register("dasd", 1, 1, 8 * sizeof(long)); |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 0d98f1ff2edd..848ef7e8523f 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -549,7 +549,6 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, | |||
549 | struct raw3270_request *rq) | 549 | struct raw3270_request *rq) |
550 | { | 550 | { |
551 | unsigned long flags; | 551 | unsigned long flags; |
552 | wait_queue_head_t wq; | ||
553 | int rc; | 552 | int rc; |
554 | 553 | ||
555 | #ifdef CONFIG_TN3270_CONSOLE | 554 | #ifdef CONFIG_TN3270_CONSOLE |
@@ -566,20 +565,20 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, | |||
566 | return rq->rc; | 565 | return rq->rc; |
567 | } | 566 | } |
568 | #endif | 567 | #endif |
569 | init_waitqueue_head(&wq); | ||
570 | rq->callback = raw3270_wake_init; | 568 | rq->callback = raw3270_wake_init; |
571 | rq->callback_data = &wq; | 569 | rq->callback_data = &raw3270_wait_queue; |
572 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); | 570 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); |
573 | rc = __raw3270_start(rp, view, rq); | 571 | rc = __raw3270_start(rp, view, rq); |
574 | spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); | 572 | spin_unlock_irqrestore(get_ccwdev_lock(view->dev->cdev), flags); |
575 | if (rc) | 573 | if (rc) |
576 | return rc; | 574 | return rc; |
577 | /* Now wait for the completion. */ | 575 | /* Now wait for the completion. */ |
578 | rc = wait_event_interruptible(wq, raw3270_request_final(rq)); | 576 | rc = wait_event_interruptible(raw3270_wait_queue, |
577 | raw3270_request_final(rq)); | ||
579 | if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ | 578 | if (rc == -ERESTARTSYS) { /* Interrupted by a signal. */ |
580 | raw3270_halt_io(view->dev, rq); | 579 | raw3270_halt_io(view->dev, rq); |
581 | /* No wait for the halt to complete. */ | 580 | /* No wait for the halt to complete. */ |
582 | wait_event(wq, raw3270_request_final(rq)); | 581 | wait_event(raw3270_wait_queue, raw3270_request_final(rq)); |
583 | return -ERESTARTSYS; | 582 | return -ERESTARTSYS; |
584 | } | 583 | } |
585 | return rq->rc; | 584 | return rq->rc; |
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c index 9e784d5f7f57..ad05a87bc480 100644 --- a/drivers/s390/char/sclp_config.c +++ b/drivers/s390/char/sclp_config.c | |||
@@ -40,7 +40,7 @@ static void sclp_cpu_capability_notify(struct work_struct *work) | |||
40 | put_online_cpus(); | 40 | put_online_cpus(); |
41 | } | 41 | } |
42 | 42 | ||
43 | static void sclp_cpu_change_notify(struct work_struct *work) | 43 | static void __ref sclp_cpu_change_notify(struct work_struct *work) |
44 | { | 44 | { |
45 | smp_rescan_cpus(); | 45 | smp_rescan_cpus(); |
46 | } | 46 | } |
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c index 35707c04e613..3e577f655b18 100644 --- a/drivers/s390/char/sclp_vt220.c +++ b/drivers/s390/char/sclp_vt220.c | |||
@@ -71,9 +71,6 @@ static struct list_head sclp_vt220_outqueue; | |||
71 | /* Number of requests in outqueue */ | 71 | /* Number of requests in outqueue */ |
72 | static int sclp_vt220_outqueue_count; | 72 | static int sclp_vt220_outqueue_count; |
73 | 73 | ||
74 | /* Wait queue used to delay write requests while we've run out of buffers */ | ||
75 | static wait_queue_head_t sclp_vt220_waitq; | ||
76 | |||
77 | /* Timer used for delaying write requests to merge subsequent messages into | 74 | /* Timer used for delaying write requests to merge subsequent messages into |
78 | * a single buffer */ | 75 | * a single buffer */ |
79 | static struct timer_list sclp_vt220_timer; | 76 | static struct timer_list sclp_vt220_timer; |
@@ -133,7 +130,6 @@ sclp_vt220_process_queue(struct sclp_vt220_request *request) | |||
133 | } while (request && __sclp_vt220_emit(request)); | 130 | } while (request && __sclp_vt220_emit(request)); |
134 | if (request == NULL && sclp_vt220_flush_later) | 131 | if (request == NULL && sclp_vt220_flush_later) |
135 | sclp_vt220_emit_current(); | 132 | sclp_vt220_emit_current(); |
136 | wake_up(&sclp_vt220_waitq); | ||
137 | /* Check if the tty needs a wake up call */ | 133 | /* Check if the tty needs a wake up call */ |
138 | if (sclp_vt220_tty != NULL) { | 134 | if (sclp_vt220_tty != NULL) { |
139 | tty_wakeup(sclp_vt220_tty); | 135 | tty_wakeup(sclp_vt220_tty); |
@@ -383,7 +379,7 @@ sclp_vt220_timeout(unsigned long data) | |||
383 | */ | 379 | */ |
384 | static int | 380 | static int |
385 | __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | 381 | __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, |
386 | int convertlf, int may_schedule) | 382 | int convertlf, int may_fail) |
387 | { | 383 | { |
388 | unsigned long flags; | 384 | unsigned long flags; |
389 | void *page; | 385 | void *page; |
@@ -395,15 +391,14 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | |||
395 | overall_written = 0; | 391 | overall_written = 0; |
396 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 392 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
397 | do { | 393 | do { |
398 | /* Create a sclp output buffer if none exists yet */ | 394 | /* Create an sclp output buffer if none exists yet */ |
399 | if (sclp_vt220_current_request == NULL) { | 395 | if (sclp_vt220_current_request == NULL) { |
400 | while (list_empty(&sclp_vt220_empty)) { | 396 | while (list_empty(&sclp_vt220_empty)) { |
401 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 397 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
402 | if (in_interrupt() || !may_schedule) | 398 | if (may_fail) |
403 | sclp_sync_wait(); | 399 | goto out; |
404 | else | 400 | else |
405 | wait_event(sclp_vt220_waitq, | 401 | sclp_sync_wait(); |
406 | !list_empty(&sclp_vt220_empty)); | ||
407 | spin_lock_irqsave(&sclp_vt220_lock, flags); | 402 | spin_lock_irqsave(&sclp_vt220_lock, flags); |
408 | } | 403 | } |
409 | page = (void *) sclp_vt220_empty.next; | 404 | page = (void *) sclp_vt220_empty.next; |
@@ -437,6 +432,7 @@ __sclp_vt220_write(const unsigned char *buf, int count, int do_schedule, | |||
437 | add_timer(&sclp_vt220_timer); | 432 | add_timer(&sclp_vt220_timer); |
438 | } | 433 | } |
439 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); | 434 | spin_unlock_irqrestore(&sclp_vt220_lock, flags); |
435 | out: | ||
440 | return overall_written; | 436 | return overall_written; |
441 | } | 437 | } |
442 | 438 | ||
@@ -520,19 +516,11 @@ sclp_vt220_close(struct tty_struct *tty, struct file *filp) | |||
520 | * character to the tty device. If the kernel uses this routine, | 516 | * character to the tty device. If the kernel uses this routine, |
521 | * it must call the flush_chars() routine (if defined) when it is | 517 | * it must call the flush_chars() routine (if defined) when it is |
522 | * done stuffing characters into the driver. | 518 | * done stuffing characters into the driver. |
523 | * | ||
524 | * NOTE: include/linux/tty_driver.h specifies that a character should be | ||
525 | * ignored if there is no room in the queue. This driver implements a different | ||
526 | * semantic in that it will block when there is no more room left. | ||
527 | * | ||
528 | * FIXME: putchar can currently be called from BH and other non blocking | ||
529 | * handlers so this semantic isn't a good idea. | ||
530 | */ | 519 | */ |
531 | static int | 520 | static int |
532 | sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) | 521 | sclp_vt220_put_char(struct tty_struct *tty, unsigned char ch) |
533 | { | 522 | { |
534 | __sclp_vt220_write(&ch, 1, 0, 0, 1); | 523 | return __sclp_vt220_write(&ch, 1, 0, 0, 1); |
535 | return 1; | ||
536 | } | 524 | } |
537 | 525 | ||
538 | /* | 526 | /* |
@@ -653,7 +641,6 @@ static int __init __sclp_vt220_init(void) | |||
653 | spin_lock_init(&sclp_vt220_lock); | 641 | spin_lock_init(&sclp_vt220_lock); |
654 | INIT_LIST_HEAD(&sclp_vt220_empty); | 642 | INIT_LIST_HEAD(&sclp_vt220_empty); |
655 | INIT_LIST_HEAD(&sclp_vt220_outqueue); | 643 | INIT_LIST_HEAD(&sclp_vt220_outqueue); |
656 | init_waitqueue_head(&sclp_vt220_waitq); | ||
657 | init_timer(&sclp_vt220_timer); | 644 | init_timer(&sclp_vt220_timer); |
658 | sclp_vt220_current_request = NULL; | 645 | sclp_vt220_current_request = NULL; |
659 | sclp_vt220_buffered_chars = 0; | 646 | sclp_vt220_buffered_chars = 0; |
@@ -786,6 +773,7 @@ sclp_vt220_con_init(void) | |||
786 | { | 773 | { |
787 | int rc; | 774 | int rc; |
788 | 775 | ||
776 | INIT_LIST_HEAD(&sclp_vt220_register.list); | ||
789 | if (!CONSOLE_IS_SCLP) | 777 | if (!CONSOLE_IS_SCLP) |
790 | return 0; | 778 | return 0; |
791 | rc = __sclp_vt220_init(); | 779 | rc = __sclp_vt220_init(); |
diff --git a/drivers/s390/char/tape.h b/drivers/s390/char/tape.h index dddf8d62c153..d0d565a05dfe 100644 --- a/drivers/s390/char/tape.h +++ b/drivers/s390/char/tape.h | |||
@@ -231,6 +231,9 @@ struct tape_device { | |||
231 | /* Request queue. */ | 231 | /* Request queue. */ |
232 | struct list_head req_queue; | 232 | struct list_head req_queue; |
233 | 233 | ||
234 | /* Request wait queue. */ | ||
235 | wait_queue_head_t wait_queue; | ||
236 | |||
234 | /* Each tape device has (currently) two minor numbers. */ | 237 | /* Each tape device has (currently) two minor numbers. */ |
235 | int first_minor; | 238 | int first_minor; |
236 | 239 | ||
diff --git a/drivers/s390/char/tape_3590.c b/drivers/s390/char/tape_3590.c index 8246ef3ab095..42ce7915fc5d 100644 --- a/drivers/s390/char/tape_3590.c +++ b/drivers/s390/char/tape_3590.c | |||
@@ -1598,7 +1598,7 @@ tape_3590_setup_device(struct tape_device *device) | |||
1598 | rc = tape_3590_read_dev_chars(device, rdc_data); | 1598 | rc = tape_3590_read_dev_chars(device, rdc_data); |
1599 | if (rc) { | 1599 | if (rc) { |
1600 | DBF_LH(3, "Read device characteristics failed!\n"); | 1600 | DBF_LH(3, "Read device characteristics failed!\n"); |
1601 | goto fail_kmalloc; | 1601 | goto fail_rdc_data; |
1602 | } | 1602 | } |
1603 | rc = tape_std_assign(device); | 1603 | rc = tape_std_assign(device); |
1604 | if (rc) | 1604 | if (rc) |
diff --git a/drivers/s390/char/tape_block.c b/drivers/s390/char/tape_block.c index ddc4a114e7f4..95da72bc17e8 100644 --- a/drivers/s390/char/tape_block.c +++ b/drivers/s390/char/tape_block.c | |||
@@ -179,11 +179,11 @@ tapeblock_requeue(struct work_struct *work) { | |||
179 | tapeblock_end_request(req, -EIO); | 179 | tapeblock_end_request(req, -EIO); |
180 | continue; | 180 | continue; |
181 | } | 181 | } |
182 | blkdev_dequeue_request(req); | ||
183 | nr_queued++; | ||
182 | spin_unlock_irq(&device->blk_data.request_queue_lock); | 184 | spin_unlock_irq(&device->blk_data.request_queue_lock); |
183 | rc = tapeblock_start_request(device, req); | 185 | rc = tapeblock_start_request(device, req); |
184 | spin_lock_irq(&device->blk_data.request_queue_lock); | 186 | spin_lock_irq(&device->blk_data.request_queue_lock); |
185 | blkdev_dequeue_request(req); | ||
186 | nr_queued++; | ||
187 | } | 187 | } |
188 | spin_unlock_irq(&device->blk_data.request_queue_lock); | 188 | spin_unlock_irq(&device->blk_data.request_queue_lock); |
189 | atomic_set(&device->blk_data.requeue_scheduled, 0); | 189 | atomic_set(&device->blk_data.requeue_scheduled, 0); |
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 76e44eb7c47f..c20e3c548343 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -449,6 +449,7 @@ tape_alloc_device(void) | |||
449 | INIT_LIST_HEAD(&device->req_queue); | 449 | INIT_LIST_HEAD(&device->req_queue); |
450 | INIT_LIST_HEAD(&device->node); | 450 | INIT_LIST_HEAD(&device->node); |
451 | init_waitqueue_head(&device->state_change_wq); | 451 | init_waitqueue_head(&device->state_change_wq); |
452 | init_waitqueue_head(&device->wait_queue); | ||
452 | device->tape_state = TS_INIT; | 453 | device->tape_state = TS_INIT; |
453 | device->medium_state = MS_UNKNOWN; | 454 | device->medium_state = MS_UNKNOWN; |
454 | *device->modeset_byte = 0; | 455 | *device->modeset_byte = 0; |
@@ -954,21 +955,19 @@ __tape_wake_up(struct tape_request *request, void *data) | |||
954 | int | 955 | int |
955 | tape_do_io(struct tape_device *device, struct tape_request *request) | 956 | tape_do_io(struct tape_device *device, struct tape_request *request) |
956 | { | 957 | { |
957 | wait_queue_head_t wq; | ||
958 | int rc; | 958 | int rc; |
959 | 959 | ||
960 | init_waitqueue_head(&wq); | ||
961 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 960 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
962 | /* Setup callback */ | 961 | /* Setup callback */ |
963 | request->callback = __tape_wake_up; | 962 | request->callback = __tape_wake_up; |
964 | request->callback_data = &wq; | 963 | request->callback_data = &device->wait_queue; |
965 | /* Add request to request queue and try to start it. */ | 964 | /* Add request to request queue and try to start it. */ |
966 | rc = __tape_start_request(device, request); | 965 | rc = __tape_start_request(device, request); |
967 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 966 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
968 | if (rc) | 967 | if (rc) |
969 | return rc; | 968 | return rc; |
970 | /* Request added to the queue. Wait for its completion. */ | 969 | /* Request added to the queue. Wait for its completion. */ |
971 | wait_event(wq, (request->callback == NULL)); | 970 | wait_event(device->wait_queue, (request->callback == NULL)); |
972 | /* Get rc from request */ | 971 | /* Get rc from request */ |
973 | return request->rc; | 972 | return request->rc; |
974 | } | 973 | } |
@@ -989,20 +988,19 @@ int | |||
989 | tape_do_io_interruptible(struct tape_device *device, | 988 | tape_do_io_interruptible(struct tape_device *device, |
990 | struct tape_request *request) | 989 | struct tape_request *request) |
991 | { | 990 | { |
992 | wait_queue_head_t wq; | ||
993 | int rc; | 991 | int rc; |
994 | 992 | ||
995 | init_waitqueue_head(&wq); | ||
996 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 993 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
997 | /* Setup callback */ | 994 | /* Setup callback */ |
998 | request->callback = __tape_wake_up_interruptible; | 995 | request->callback = __tape_wake_up_interruptible; |
999 | request->callback_data = &wq; | 996 | request->callback_data = &device->wait_queue; |
1000 | rc = __tape_start_request(device, request); | 997 | rc = __tape_start_request(device, request); |
1001 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 998 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1002 | if (rc) | 999 | if (rc) |
1003 | return rc; | 1000 | return rc; |
1004 | /* Request added to the queue. Wait for its completion. */ | 1001 | /* Request added to the queue. Wait for its completion. */ |
1005 | rc = wait_event_interruptible(wq, (request->callback == NULL)); | 1002 | rc = wait_event_interruptible(device->wait_queue, |
1003 | (request->callback == NULL)); | ||
1006 | if (rc != -ERESTARTSYS) | 1004 | if (rc != -ERESTARTSYS) |
1007 | /* Request finished normally. */ | 1005 | /* Request finished normally. */ |
1008 | return request->rc; | 1006 | return request->rc; |
@@ -1015,7 +1013,7 @@ tape_do_io_interruptible(struct tape_device *device, | |||
1015 | /* Wait for the interrupt that acknowledges the halt. */ | 1013 | /* Wait for the interrupt that acknowledges the halt. */ |
1016 | do { | 1014 | do { |
1017 | rc = wait_event_interruptible( | 1015 | rc = wait_event_interruptible( |
1018 | wq, | 1016 | device->wait_queue, |
1019 | (request->callback == NULL) | 1017 | (request->callback == NULL) |
1020 | ); | 1018 | ); |
1021 | } while (rc == -ERESTARTSYS); | 1019 | } while (rc == -ERESTARTSYS); |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index d101328a2980..6f852fdb6d70 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -768,10 +768,10 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) | |||
768 | device_unregister(dev); | 768 | device_unregister(dev); |
769 | return ret; | 769 | return ret; |
770 | } | 770 | } |
771 | priv->class_device = device_create(vmlogrdr_class, dev, | 771 | priv->class_device = device_create_drvdata(vmlogrdr_class, dev, |
772 | MKDEV(vmlogrdr_major, | 772 | MKDEV(vmlogrdr_major, |
773 | priv->minor_num), | 773 | priv->minor_num), |
774 | "%s", dev->bus_id); | 774 | priv, "%s", dev->bus_id); |
775 | if (IS_ERR(priv->class_device)) { | 775 | if (IS_ERR(priv->class_device)) { |
776 | ret = PTR_ERR(priv->class_device); | 776 | ret = PTR_ERR(priv->class_device); |
777 | priv->class_device=NULL; | 777 | priv->class_device=NULL; |
@@ -779,7 +779,6 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv) | |||
779 | device_unregister(dev); | 779 | device_unregister(dev); |
780 | return ret; | 780 | return ret; |
781 | } | 781 | } |
782 | dev->driver_data = priv; | ||
783 | priv->device = dev; | 782 | priv->device = dev; |
784 | return 0; | 783 | return 0; |
785 | } | 784 | } |
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c index a4a5f2efea48..0bfcbbe375c4 100644 --- a/drivers/s390/cio/blacklist.c +++ b/drivers/s390/cio/blacklist.c | |||
@@ -97,8 +97,8 @@ static int pure_hex(char **cp, unsigned int *val, int min_digit, | |||
97 | return 0; | 97 | return 0; |
98 | } | 98 | } |
99 | 99 | ||
100 | static int parse_busid(char *str, int *cssid, int *ssid, int *devno, | 100 | static int parse_busid(char *str, unsigned int *cssid, unsigned int *ssid, |
101 | int msgtrigger) | 101 | unsigned int *devno, int msgtrigger) |
102 | { | 102 | { |
103 | char *str_work; | 103 | char *str_work; |
104 | int val, rc, ret; | 104 | int val, rc, ret; |
@@ -148,7 +148,7 @@ out: | |||
148 | static int blacklist_parse_parameters(char *str, range_action action, | 148 | static int blacklist_parse_parameters(char *str, range_action action, |
149 | int msgtrigger) | 149 | int msgtrigger) |
150 | { | 150 | { |
151 | int from_cssid, to_cssid, from_ssid, to_ssid, from, to; | 151 | unsigned int from_cssid, to_cssid, from_ssid, to_ssid, from, to; |
152 | int rc, totalrc; | 152 | int rc, totalrc; |
153 | char *parm; | 153 | char *parm; |
154 | range_action ra; | 154 | range_action ra; |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 82c6a2d45128..b32d7eb3d81a 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -576,12 +576,14 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
576 | err = -ENODEV; | 576 | err = -ENODEV; |
577 | goto out; | 577 | goto out; |
578 | } | 578 | } |
579 | if (cio_is_console(sch->schid)) | 579 | if (cio_is_console(sch->schid)) { |
580 | sch->opm = 0xff; | 580 | sch->opm = 0xff; |
581 | else | 581 | sch->isc = 1; |
582 | } else { | ||
582 | sch->opm = chp_get_sch_opm(sch); | 583 | sch->opm = chp_get_sch_opm(sch); |
584 | sch->isc = 3; | ||
585 | } | ||
583 | sch->lpm = sch->schib.pmcw.pam & sch->opm; | 586 | sch->lpm = sch->schib.pmcw.pam & sch->opm; |
584 | sch->isc = 3; | ||
585 | 587 | ||
586 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " | 588 | CIO_MSG_EVENT(6, "Detected device %04x on subchannel 0.%x.%04X " |
587 | "- PIM = %02X, PAM = %02X, POM = %02X\n", | 589 | "- PIM = %02X, PAM = %02X, POM = %02X\n", |
@@ -704,9 +706,9 @@ void wait_cons_dev(void) | |||
704 | if (!console_subchannel_in_use) | 706 | if (!console_subchannel_in_use) |
705 | return; | 707 | return; |
706 | 708 | ||
707 | /* disable all but isc 7 (console device) */ | 709 | /* disable all but isc 1 (console device) */ |
708 | __ctl_store (save_cr6, 6, 6); | 710 | __ctl_store (save_cr6, 6, 6); |
709 | cr6 = 0x01000000; | 711 | cr6 = 0x40000000; |
710 | __ctl_load (cr6, 6, 6); | 712 | __ctl_load (cr6, 6, 6); |
711 | 713 | ||
712 | do { | 714 | do { |
@@ -788,11 +790,11 @@ cio_probe_console(void) | |||
788 | } | 790 | } |
789 | 791 | ||
790 | /* | 792 | /* |
791 | * enable console I/O-interrupt subclass 7 | 793 | * enable console I/O-interrupt subclass 1 |
792 | */ | 794 | */ |
793 | ctl_set_bit(6, 24); | 795 | ctl_set_bit(6, 30); |
794 | console_subchannel.isc = 7; | 796 | console_subchannel.isc = 1; |
795 | console_subchannel.schib.pmcw.isc = 7; | 797 | console_subchannel.schib.pmcw.isc = 1; |
796 | console_subchannel.schib.pmcw.intparm = | 798 | console_subchannel.schib.pmcw.intparm = |
797 | (u32)(addr_t)&console_subchannel; | 799 | (u32)(addr_t)&console_subchannel; |
798 | ret = cio_modify(&console_subchannel); | 800 | ret = cio_modify(&console_subchannel); |
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 47a7e6200b26..5ab34340919b 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
@@ -31,11 +31,6 @@ | |||
31 | */ | 31 | */ |
32 | static void *kvm_devices; | 32 | static void *kvm_devices; |
33 | 33 | ||
34 | /* | ||
35 | * Unique numbering for kvm devices. | ||
36 | */ | ||
37 | static unsigned int dev_index; | ||
38 | |||
39 | struct kvm_device { | 34 | struct kvm_device { |
40 | struct virtio_device vdev; | 35 | struct virtio_device vdev; |
41 | struct kvm_device_desc *desc; | 36 | struct kvm_device_desc *desc; |
@@ -78,27 +73,32 @@ static unsigned desc_size(const struct kvm_device_desc *desc) | |||
78 | + desc->config_len; | 73 | + desc->config_len; |
79 | } | 74 | } |
80 | 75 | ||
81 | /* | 76 | /* This gets the device's feature bits. */ |
82 | * This tests (and acknowleges) a feature bit. | 77 | static u32 kvm_get_features(struct virtio_device *vdev) |
83 | */ | ||
84 | static bool kvm_feature(struct virtio_device *vdev, unsigned fbit) | ||
85 | { | 78 | { |
79 | unsigned int i; | ||
80 | u32 features = 0; | ||
86 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; | 81 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; |
87 | u8 *features; | 82 | u8 *in_features = kvm_vq_features(desc); |
88 | 83 | ||
89 | if (fbit / 8 > desc->feature_len) | 84 | for (i = 0; i < min(desc->feature_len * 8, 32); i++) |
90 | return false; | 85 | if (in_features[i / 8] & (1 << (i % 8))) |
86 | features |= (1 << i); | ||
87 | return features; | ||
88 | } | ||
91 | 89 | ||
92 | features = kvm_vq_features(desc); | 90 | static void kvm_set_features(struct virtio_device *vdev, u32 features) |
93 | if (!(features[fbit / 8] & (1 << (fbit % 8)))) | 91 | { |
94 | return false; | 92 | unsigned int i; |
93 | struct kvm_device_desc *desc = to_kvmdev(vdev)->desc; | ||
94 | /* Second half of bitmap is features we accept. */ | ||
95 | u8 *out_features = kvm_vq_features(desc) + desc->feature_len; | ||
95 | 96 | ||
96 | /* | 97 | memset(out_features, 0, desc->feature_len); |
97 | * We set the matching bit in the other half of the bitmap to tell the | 98 | for (i = 0; i < min(desc->feature_len * 8, 32); i++) { |
98 | * Host we want to use this feature. | 99 | if (features & (1 << i)) |
99 | */ | 100 | out_features[i / 8] |= (1 << (i % 8)); |
100 | features[desc->feature_len + fbit / 8] |= (1 << (fbit % 8)); | 101 | } |
101 | return true; | ||
102 | } | 102 | } |
103 | 103 | ||
104 | /* | 104 | /* |
@@ -221,7 +221,8 @@ static void kvm_del_vq(struct virtqueue *vq) | |||
221 | * The config ops structure as defined by virtio config | 221 | * The config ops structure as defined by virtio config |
222 | */ | 222 | */ |
223 | static struct virtio_config_ops kvm_vq_configspace_ops = { | 223 | static struct virtio_config_ops kvm_vq_configspace_ops = { |
224 | .feature = kvm_feature, | 224 | .get_features = kvm_get_features, |
225 | .set_features = kvm_set_features, | ||
225 | .get = kvm_get, | 226 | .get = kvm_get, |
226 | .set = kvm_set, | 227 | .set = kvm_set, |
227 | .get_status = kvm_get_status, | 228 | .get_status = kvm_get_status, |
@@ -244,26 +245,25 @@ static struct device kvm_root = { | |||
244 | * adds a new device and register it with virtio | 245 | * adds a new device and register it with virtio |
245 | * appropriate drivers are loaded by the device model | 246 | * appropriate drivers are loaded by the device model |
246 | */ | 247 | */ |
247 | static void add_kvm_device(struct kvm_device_desc *d) | 248 | static void add_kvm_device(struct kvm_device_desc *d, unsigned int offset) |
248 | { | 249 | { |
249 | struct kvm_device *kdev; | 250 | struct kvm_device *kdev; |
250 | 251 | ||
251 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); | 252 | kdev = kzalloc(sizeof(*kdev), GFP_KERNEL); |
252 | if (!kdev) { | 253 | if (!kdev) { |
253 | printk(KERN_EMERG "Cannot allocate kvm dev %u\n", | 254 | printk(KERN_EMERG "Cannot allocate kvm dev %u type %u\n", |
254 | dev_index++); | 255 | offset, d->type); |
255 | return; | 256 | return; |
256 | } | 257 | } |
257 | 258 | ||
258 | kdev->vdev.dev.parent = &kvm_root; | 259 | kdev->vdev.dev.parent = &kvm_root; |
259 | kdev->vdev.index = dev_index++; | ||
260 | kdev->vdev.id.device = d->type; | 260 | kdev->vdev.id.device = d->type; |
261 | kdev->vdev.config = &kvm_vq_configspace_ops; | 261 | kdev->vdev.config = &kvm_vq_configspace_ops; |
262 | kdev->desc = d; | 262 | kdev->desc = d; |
263 | 263 | ||
264 | if (register_virtio_device(&kdev->vdev) != 0) { | 264 | if (register_virtio_device(&kdev->vdev) != 0) { |
265 | printk(KERN_ERR "Failed to register kvm device %u\n", | 265 | printk(KERN_ERR "Failed to register kvm device %u type %u\n", |
266 | kdev->vdev.index); | 266 | offset, d->type); |
267 | kfree(kdev); | 267 | kfree(kdev); |
268 | } | 268 | } |
269 | } | 269 | } |
@@ -283,7 +283,7 @@ static void scan_devices(void) | |||
283 | if (d->type == 0) | 283 | if (d->type == 0) |
284 | break; | 284 | break; |
285 | 285 | ||
286 | add_kvm_device(d); | 286 | add_kvm_device(d, i); |
287 | } | 287 | } |
288 | } | 288 | } |
289 | 289 | ||
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 436bf1f6d4a6..9a71dae223e8 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -290,9 +290,6 @@ int qeth_set_large_send(struct qeth_card *card, | |||
290 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | | 290 | card->dev->features |= NETIF_F_TSO | NETIF_F_SG | |
291 | NETIF_F_HW_CSUM; | 291 | NETIF_F_HW_CSUM; |
292 | } else { | 292 | } else { |
293 | PRINT_WARN("TSO not supported on %s. " | ||
294 | "large_send set to 'no'.\n", | ||
295 | card->dev->name); | ||
296 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | | 293 | card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG | |
297 | NETIF_F_HW_CSUM); | 294 | NETIF_F_HW_CSUM); |
298 | card->options.large_send = QETH_LARGE_SEND_NO; | 295 | card->options.large_send = QETH_LARGE_SEND_NO; |
@@ -1407,12 +1404,6 @@ static void qeth_init_func_level(struct qeth_card *card) | |||
1407 | } | 1404 | } |
1408 | } | 1405 | } |
1409 | 1406 | ||
1410 | static inline __u16 qeth_raw_devno_from_bus_id(char *id) | ||
1411 | { | ||
1412 | id += (strlen(id) - 4); | ||
1413 | return (__u16) simple_strtoul(id, &id, 16); | ||
1414 | } | ||
1415 | |||
1416 | static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | 1407 | static int qeth_idx_activate_get_answer(struct qeth_channel *channel, |
1417 | void (*idx_reply_cb)(struct qeth_channel *, | 1408 | void (*idx_reply_cb)(struct qeth_channel *, |
1418 | struct qeth_cmd_buffer *)) | 1409 | struct qeth_cmd_buffer *)) |
@@ -1439,7 +1430,7 @@ static int qeth_idx_activate_get_answer(struct qeth_channel *channel, | |||
1439 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); | 1430 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); |
1440 | 1431 | ||
1441 | if (rc) { | 1432 | if (rc) { |
1442 | PRINT_ERR("Error2 in activating channel rc=%d\n", rc); | 1433 | QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc); |
1443 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); | 1434 | QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); |
1444 | atomic_set(&channel->irq_pending, 0); | 1435 | atomic_set(&channel->irq_pending, 0); |
1445 | wake_up(&card->wait_q); | 1436 | wake_up(&card->wait_q); |
@@ -1468,6 +1459,7 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1468 | __u16 temp; | 1459 | __u16 temp; |
1469 | __u8 tmp; | 1460 | __u8 tmp; |
1470 | int rc; | 1461 | int rc; |
1462 | struct ccw_dev_id temp_devid; | ||
1471 | 1463 | ||
1472 | card = CARD_FROM_CDEV(channel->ccwdev); | 1464 | card = CARD_FROM_CDEV(channel->ccwdev); |
1473 | 1465 | ||
@@ -1494,8 +1486,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1494 | &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); | 1486 | &card->token.issuer_rm_w, QETH_MPC_TOKEN_LENGTH); |
1495 | memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), | 1487 | memcpy(QETH_IDX_ACT_FUNC_LEVEL(iob->data), |
1496 | &card->info.func_level, sizeof(__u16)); | 1488 | &card->info.func_level, sizeof(__u16)); |
1497 | temp = qeth_raw_devno_from_bus_id(CARD_DDEV_ID(card)); | 1489 | ccw_device_get_id(CARD_DDEV(card), &temp_devid); |
1498 | memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp, 2); | 1490 | memcpy(QETH_IDX_ACT_QDIO_DEV_CUA(iob->data), &temp_devid.devno, 2); |
1499 | temp = (card->info.cula << 8) + card->info.unit_addr2; | 1491 | temp = (card->info.cula << 8) + card->info.unit_addr2; |
1500 | memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); | 1492 | memcpy(QETH_IDX_ACT_QDIO_DEV_REALADDR(iob->data), &temp, 2); |
1501 | 1493 | ||
@@ -1508,7 +1500,8 @@ static int qeth_idx_activate_channel(struct qeth_channel *channel, | |||
1508 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); | 1500 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); |
1509 | 1501 | ||
1510 | if (rc) { | 1502 | if (rc) { |
1511 | PRINT_ERR("Error1 in activating channel. rc=%d\n", rc); | 1503 | QETH_DBF_MESSAGE(2, "Error1 in activating channel. rc=%d\n", |
1504 | rc); | ||
1512 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 1505 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
1513 | atomic_set(&channel->irq_pending, 0); | 1506 | atomic_set(&channel->irq_pending, 0); |
1514 | wake_up(&card->wait_q); | 1507 | wake_up(&card->wait_q); |
@@ -1658,7 +1651,6 @@ int qeth_send_control_data(struct qeth_card *card, int len, | |||
1658 | 1651 | ||
1659 | reply = qeth_alloc_reply(card); | 1652 | reply = qeth_alloc_reply(card); |
1660 | if (!reply) { | 1653 | if (!reply) { |
1661 | PRINT_WARN("Could not alloc qeth_reply!\n"); | ||
1662 | return -ENOMEM; | 1654 | return -ENOMEM; |
1663 | } | 1655 | } |
1664 | reply->callback = reply_cb; | 1656 | reply->callback = reply_cb; |
@@ -2612,15 +2604,9 @@ void qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2612 | if (newcount < count) { | 2604 | if (newcount < count) { |
2613 | /* we are in memory shortage so we switch back to | 2605 | /* we are in memory shortage so we switch back to |
2614 | traditional skb allocation and drop packages */ | 2606 | traditional skb allocation and drop packages */ |
2615 | if (!atomic_read(&card->force_alloc_skb) && | ||
2616 | net_ratelimit()) | ||
2617 | PRINT_WARN("Switch to alloc skb\n"); | ||
2618 | atomic_set(&card->force_alloc_skb, 3); | 2607 | atomic_set(&card->force_alloc_skb, 3); |
2619 | count = newcount; | 2608 | count = newcount; |
2620 | } else { | 2609 | } else { |
2621 | if ((atomic_read(&card->force_alloc_skb) == 1) && | ||
2622 | net_ratelimit()) | ||
2623 | PRINT_WARN("Switch to sg\n"); | ||
2624 | atomic_add_unless(&card->force_alloc_skb, -1, 0); | 2610 | atomic_add_unless(&card->force_alloc_skb, -1, 0); |
2625 | } | 2611 | } |
2626 | 2612 | ||
@@ -3034,7 +3020,7 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
3034 | elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) | 3020 | elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) |
3035 | + skb->len) >> PAGE_SHIFT); | 3021 | + skb->len) >> PAGE_SHIFT); |
3036 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3022 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3037 | PRINT_ERR("Invalid size of IP packet " | 3023 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3038 | "(Number=%d / Length=%d). Discarded.\n", | 3024 | "(Number=%d / Length=%d). Discarded.\n", |
3039 | (elements_needed+elems), skb->len); | 3025 | (elements_needed+elems), skb->len); |
3040 | return 0; | 3026 | return 0; |
@@ -3247,8 +3233,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
3247 | * free buffers) to handle eddp context */ | 3233 | * free buffers) to handle eddp context */ |
3248 | if (qeth_eddp_check_buffers_for_context(queue, ctx) | 3234 | if (qeth_eddp_check_buffers_for_context(queue, ctx) |
3249 | < 0) { | 3235 | < 0) { |
3250 | if (net_ratelimit()) | ||
3251 | PRINT_WARN("eddp tx_dropped 1\n"); | ||
3252 | rc = -EBUSY; | 3236 | rc = -EBUSY; |
3253 | goto out; | 3237 | goto out; |
3254 | } | 3238 | } |
@@ -3260,7 +3244,6 @@ int qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
3260 | tmp = qeth_eddp_fill_buffer(queue, ctx, | 3244 | tmp = qeth_eddp_fill_buffer(queue, ctx, |
3261 | queue->next_buf_to_fill); | 3245 | queue->next_buf_to_fill); |
3262 | if (tmp < 0) { | 3246 | if (tmp < 0) { |
3263 | PRINT_ERR("eddp tx_dropped 2\n"); | ||
3264 | rc = -EBUSY; | 3247 | rc = -EBUSY; |
3265 | goto out; | 3248 | goto out; |
3266 | } | 3249 | } |
@@ -3602,8 +3585,6 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3602 | 3585 | ||
3603 | if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && | 3586 | if ((!qeth_adp_supported(card, IPA_SETADP_SET_SNMP_CONTROL)) && |
3604 | (!card->options.layer2)) { | 3587 | (!card->options.layer2)) { |
3605 | PRINT_WARN("SNMP Query MIBS not supported " | ||
3606 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
3607 | return -EOPNOTSUPP; | 3588 | return -EOPNOTSUPP; |
3608 | } | 3589 | } |
3609 | /* skip 4 bytes (data_len struct member) to get req_len */ | 3590 | /* skip 4 bytes (data_len struct member) to get req_len */ |
@@ -3634,7 +3615,7 @@ int qeth_snmp_command(struct qeth_card *card, char __user *udata) | |||
3634 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, | 3615 | rc = qeth_send_ipa_snmp_cmd(card, iob, QETH_SETADP_BASE_LEN + req_len, |
3635 | qeth_snmp_command_cb, (void *)&qinfo); | 3616 | qeth_snmp_command_cb, (void *)&qinfo); |
3636 | if (rc) | 3617 | if (rc) |
3637 | PRINT_WARN("SNMP command failed on %s: (0x%x)\n", | 3618 | QETH_DBF_MESSAGE(2, "SNMP command failed on %s: (0x%x)\n", |
3638 | QETH_CARD_IFNAME(card), rc); | 3619 | QETH_CARD_IFNAME(card), rc); |
3639 | else { | 3620 | else { |
3640 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) | 3621 | if (copy_to_user(udata, qinfo.udata, qinfo.udata_len)) |
@@ -3807,8 +3788,8 @@ retry: | |||
3807 | if (mpno) | 3788 | if (mpno) |
3808 | mpno = min(mpno - 1, QETH_MAX_PORTNO); | 3789 | mpno = min(mpno - 1, QETH_MAX_PORTNO); |
3809 | if (card->info.portno > mpno) { | 3790 | if (card->info.portno > mpno) { |
3810 | PRINT_ERR("Device %s does not offer port number %d \n.", | 3791 | QETH_DBF_MESSAGE(2, "Device %s does not offer port number %d" |
3811 | CARD_BUS_ID(card), card->info.portno); | 3792 | "\n.", CARD_BUS_ID(card), card->info.portno); |
3812 | rc = -ENODEV; | 3793 | rc = -ENODEV; |
3813 | goto out; | 3794 | goto out; |
3814 | } | 3795 | } |
@@ -3985,8 +3966,6 @@ struct sk_buff *qeth_core_get_next_skb(struct qeth_card *card, | |||
3985 | return skb; | 3966 | return skb; |
3986 | no_mem: | 3967 | no_mem: |
3987 | if (net_ratelimit()) { | 3968 | if (net_ratelimit()) { |
3988 | PRINT_WARN("No memory for packet received on %s.\n", | ||
3989 | QETH_CARD_IFNAME(card)); | ||
3990 | QETH_DBF_TEXT(TRACE, 2, "noskbmem"); | 3969 | QETH_DBF_TEXT(TRACE, 2, "noskbmem"); |
3991 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); | 3970 | QETH_DBF_TEXT_(TRACE, 2, "%s", CARD_BUS_ID(card)); |
3992 | } | 3971 | } |
@@ -4004,15 +3983,17 @@ static void qeth_unregister_dbf_views(void) | |||
4004 | } | 3983 | } |
4005 | } | 3984 | } |
4006 | 3985 | ||
4007 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...) | 3986 | void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *fmt, ...) |
4008 | { | 3987 | { |
4009 | char dbf_txt_buf[32]; | 3988 | char dbf_txt_buf[32]; |
3989 | va_list args; | ||
4010 | 3990 | ||
4011 | if (level > (qeth_dbf[dbf_nix].id)->level) | 3991 | if (level > (qeth_dbf[dbf_nix].id)->level) |
4012 | return; | 3992 | return; |
4013 | snprintf(dbf_txt_buf, sizeof(dbf_txt_buf), text); | 3993 | va_start(args, fmt); |
3994 | vsnprintf(dbf_txt_buf, sizeof(dbf_txt_buf), fmt, args); | ||
3995 | va_end(args); | ||
4014 | debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); | 3996 | debug_text_event(qeth_dbf[dbf_nix].id, level, dbf_txt_buf); |
4015 | |||
4016 | } | 3997 | } |
4017 | EXPORT_SYMBOL_GPL(qeth_dbf_longtext); | 3998 | EXPORT_SYMBOL_GPL(qeth_dbf_longtext); |
4018 | 3999 | ||
diff --git a/drivers/s390/net/qeth_core_offl.c b/drivers/s390/net/qeth_core_offl.c index 822df8362856..452874e89740 100644 --- a/drivers/s390/net/qeth_core_offl.c +++ b/drivers/s390/net/qeth_core_offl.c | |||
@@ -122,8 +122,8 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | |||
122 | if (element == 0) | 122 | if (element == 0) |
123 | return -EBUSY; | 123 | return -EBUSY; |
124 | else { | 124 | else { |
125 | PRINT_WARN("could only partially fill eddp " | 125 | QETH_DBF_MESSAGE(2, "could only partially fill" |
126 | "buffer!\n"); | 126 | "eddp buffer!\n"); |
127 | goto out; | 127 | goto out; |
128 | } | 128 | } |
129 | } | 129 | } |
@@ -143,8 +143,6 @@ int qeth_eddp_fill_buffer(struct qeth_qdio_out_q *queue, | |||
143 | if (must_refcnt) { | 143 | if (must_refcnt) { |
144 | must_refcnt = 0; | 144 | must_refcnt = 0; |
145 | if (qeth_eddp_buf_ref_context(buf, ctx)) { | 145 | if (qeth_eddp_buf_ref_context(buf, ctx)) { |
146 | PRINT_WARN("no memory to create eddp context " | ||
147 | "reference\n"); | ||
148 | goto out_check; | 146 | goto out_check; |
149 | } | 147 | } |
150 | } | 148 | } |
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c index 08a50f057284..c26e842ad905 100644 --- a/drivers/s390/net/qeth_core_sys.c +++ b/drivers/s390/net/qeth_core_sys.c | |||
@@ -129,7 +129,6 @@ static ssize_t qeth_dev_portno_store(struct device *dev, | |||
129 | 129 | ||
130 | portno = simple_strtoul(buf, &tmp, 16); | 130 | portno = simple_strtoul(buf, &tmp, 16); |
131 | if (portno > QETH_MAX_PORTNO) { | 131 | if (portno > QETH_MAX_PORTNO) { |
132 | PRINT_WARN("portno 0x%X is out of range\n", portno); | ||
133 | return -EINVAL; | 132 | return -EINVAL; |
134 | } | 133 | } |
135 | 134 | ||
@@ -223,8 +222,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, | |||
223 | * if though we have to permit priority queueing | 222 | * if though we have to permit priority queueing |
224 | */ | 223 | */ |
225 | if (card->qdio.no_out_queues == 1) { | 224 | if (card->qdio.no_out_queues == 1) { |
226 | PRINT_WARN("Priority queueing disabled due " | ||
227 | "to hardware limitations!\n"); | ||
228 | card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; | 225 | card->qdio.do_prio_queueing = QETH_PRIOQ_DEFAULT; |
229 | return -EPERM; | 226 | return -EPERM; |
230 | } | 227 | } |
@@ -250,7 +247,6 @@ static ssize_t qeth_dev_prioqing_store(struct device *dev, | |||
250 | card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; | 247 | card->qdio.do_prio_queueing = QETH_NO_PRIO_QUEUEING; |
251 | card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; | 248 | card->qdio.default_out_queue = QETH_DEFAULT_QUEUE; |
252 | } else { | 249 | } else { |
253 | PRINT_WARN("Unknown queueing type '%s'\n", tmp); | ||
254 | return -EINVAL; | 250 | return -EINVAL; |
255 | } | 251 | } |
256 | return count; | 252 | return count; |
@@ -291,9 +287,6 @@ static ssize_t qeth_dev_bufcnt_store(struct device *dev, | |||
291 | ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); | 287 | ((cnt > QETH_IN_BUF_COUNT_MAX) ? QETH_IN_BUF_COUNT_MAX : cnt); |
292 | if (old_cnt != cnt) { | 288 | if (old_cnt != cnt) { |
293 | rc = qeth_realloc_buffer_pool(card, cnt); | 289 | rc = qeth_realloc_buffer_pool(card, cnt); |
294 | if (rc) | ||
295 | PRINT_WARN("Error (%d) while setting " | ||
296 | "buffer count.\n", rc); | ||
297 | } | 290 | } |
298 | return count; | 291 | return count; |
299 | } | 292 | } |
@@ -355,7 +348,6 @@ static ssize_t qeth_dev_performance_stats_store(struct device *dev, | |||
355 | card->perf_stats.initial_rx_packets = card->stats.rx_packets; | 348 | card->perf_stats.initial_rx_packets = card->stats.rx_packets; |
356 | card->perf_stats.initial_tx_packets = card->stats.tx_packets; | 349 | card->perf_stats.initial_tx_packets = card->stats.tx_packets; |
357 | } else { | 350 | } else { |
358 | PRINT_WARN("performance_stats: write 0 or 1 to this file!\n"); | ||
359 | return -EINVAL; | 351 | return -EINVAL; |
360 | } | 352 | } |
361 | return count; | 353 | return count; |
@@ -399,7 +391,6 @@ static ssize_t qeth_dev_layer2_store(struct device *dev, | |||
399 | newdis = QETH_DISCIPLINE_LAYER2; | 391 | newdis = QETH_DISCIPLINE_LAYER2; |
400 | break; | 392 | break; |
401 | default: | 393 | default: |
402 | PRINT_WARN("layer2: write 0 or 1 to this file!\n"); | ||
403 | return -EINVAL; | 394 | return -EINVAL; |
404 | } | 395 | } |
405 | 396 | ||
@@ -463,7 +454,6 @@ static ssize_t qeth_dev_large_send_store(struct device *dev, | |||
463 | } else if (!strcmp(tmp, "TSO")) { | 454 | } else if (!strcmp(tmp, "TSO")) { |
464 | type = QETH_LARGE_SEND_TSO; | 455 | type = QETH_LARGE_SEND_TSO; |
465 | } else { | 456 | } else { |
466 | PRINT_WARN("large_send: invalid mode %s!\n", tmp); | ||
467 | return -EINVAL; | 457 | return -EINVAL; |
468 | } | 458 | } |
469 | if (card->options.large_send == type) | 459 | if (card->options.large_send == type) |
@@ -503,8 +493,6 @@ static ssize_t qeth_dev_blkt_store(struct qeth_card *card, | |||
503 | if (i <= max_value) { | 493 | if (i <= max_value) { |
504 | *value = i; | 494 | *value = i; |
505 | } else { | 495 | } else { |
506 | PRINT_WARN("blkt total time: write values between" | ||
507 | " 0 and %d to this file!\n", max_value); | ||
508 | return -EINVAL; | 496 | return -EINVAL; |
509 | } | 497 | } |
510 | return count; | 498 | return count; |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 86ec50ddae13..f682f7b14480 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -101,19 +101,16 @@ static struct net_device *qeth_l2_netdev_by_devno(unsigned char *read_dev_no) | |||
101 | { | 101 | { |
102 | struct qeth_card *card; | 102 | struct qeth_card *card; |
103 | struct net_device *ndev; | 103 | struct net_device *ndev; |
104 | unsigned char *readno; | 104 | __u16 temp_dev_no; |
105 | __u16 temp_dev_no, card_dev_no; | ||
106 | char *endp; | ||
107 | unsigned long flags; | 105 | unsigned long flags; |
106 | struct ccw_dev_id read_devid; | ||
108 | 107 | ||
109 | ndev = NULL; | 108 | ndev = NULL; |
110 | memcpy(&temp_dev_no, read_dev_no, 2); | 109 | memcpy(&temp_dev_no, read_dev_no, 2); |
111 | read_lock_irqsave(&qeth_core_card_list.rwlock, flags); | 110 | read_lock_irqsave(&qeth_core_card_list.rwlock, flags); |
112 | list_for_each_entry(card, &qeth_core_card_list.list, list) { | 111 | list_for_each_entry(card, &qeth_core_card_list.list, list) { |
113 | readno = CARD_RDEV_ID(card); | 112 | ccw_device_get_id(CARD_RDEV(card), &read_devid); |
114 | readno += (strlen(readno) - 4); | 113 | if (read_devid.devno == temp_dev_no) { |
115 | card_dev_no = simple_strtoul(readno, &endp, 16); | ||
116 | if (card_dev_no == temp_dev_no) { | ||
117 | ndev = card->dev; | 114 | ndev = card->dev; |
118 | break; | 115 | break; |
119 | } | 116 | } |
@@ -134,14 +131,14 @@ static int qeth_l2_send_setgroupmac_cb(struct qeth_card *card, | |||
134 | mac = &cmd->data.setdelmac.mac[0]; | 131 | mac = &cmd->data.setdelmac.mac[0]; |
135 | /* MAC already registered, needed in couple/uncouple case */ | 132 | /* MAC already registered, needed in couple/uncouple case */ |
136 | if (cmd->hdr.return_code == 0x2005) { | 133 | if (cmd->hdr.return_code == 0x2005) { |
137 | PRINT_WARN("Group MAC %02x:%02x:%02x:%02x:%02x:%02x " \ | 134 | QETH_DBF_MESSAGE(2, "Group MAC %02x:%02x:%02x:%02x:%02x:%02x " |
138 | "already existing on %s \n", | 135 | "already existing on %s \n", |
139 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], | 136 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], |
140 | QETH_CARD_IFNAME(card)); | 137 | QETH_CARD_IFNAME(card)); |
141 | cmd->hdr.return_code = 0; | 138 | cmd->hdr.return_code = 0; |
142 | } | 139 | } |
143 | if (cmd->hdr.return_code) | 140 | if (cmd->hdr.return_code) |
144 | PRINT_ERR("Could not set group MAC " \ | 141 | QETH_DBF_MESSAGE(2, "Could not set group MAC " |
145 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", | 142 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", |
146 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], | 143 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], |
147 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 144 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
@@ -166,7 +163,7 @@ static int qeth_l2_send_delgroupmac_cb(struct qeth_card *card, | |||
166 | cmd = (struct qeth_ipa_cmd *) data; | 163 | cmd = (struct qeth_ipa_cmd *) data; |
167 | mac = &cmd->data.setdelmac.mac[0]; | 164 | mac = &cmd->data.setdelmac.mac[0]; |
168 | if (cmd->hdr.return_code) | 165 | if (cmd->hdr.return_code) |
169 | PRINT_ERR("Could not delete group MAC " \ | 166 | QETH_DBF_MESSAGE(2, "Could not delete group MAC " |
170 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", | 167 | "%02x:%02x:%02x:%02x:%02x:%02x on %s: %x\n", |
171 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], | 168 | mac[0], mac[1], mac[2], mac[3], mac[4], mac[5], |
172 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 169 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
@@ -186,10 +183,8 @@ static void qeth_l2_add_mc(struct qeth_card *card, __u8 *mac) | |||
186 | 183 | ||
187 | mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); | 184 | mc = kmalloc(sizeof(struct qeth_mc_mac), GFP_ATOMIC); |
188 | 185 | ||
189 | if (!mc) { | 186 | if (!mc) |
190 | PRINT_ERR("no mem vor mc mac address\n"); | ||
191 | return; | 187 | return; |
192 | } | ||
193 | 188 | ||
194 | memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); | 189 | memcpy(mc->mc_addr, mac, OSA_ADDR_LEN); |
195 | mc->mc_addrlen = OSA_ADDR_LEN; | 190 | mc->mc_addrlen = OSA_ADDR_LEN; |
@@ -280,7 +275,7 @@ static int qeth_l2_send_setdelvlan_cb(struct qeth_card *card, | |||
280 | QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); | 275 | QETH_DBF_TEXT(TRACE, 2, "L2sdvcb"); |
281 | cmd = (struct qeth_ipa_cmd *) data; | 276 | cmd = (struct qeth_ipa_cmd *) data; |
282 | if (cmd->hdr.return_code) { | 277 | if (cmd->hdr.return_code) { |
283 | PRINT_ERR("Error in processing VLAN %i on %s: 0x%x. " | 278 | QETH_DBF_MESSAGE(2, "Error in processing VLAN %i on %s: 0x%x. " |
284 | "Continuing\n", cmd->data.setdelvlan.vlan_id, | 279 | "Continuing\n", cmd->data.setdelvlan.vlan_id, |
285 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); | 280 | QETH_CARD_IFNAME(card), cmd->hdr.return_code); |
286 | QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); | 281 | QETH_DBF_TEXT_(TRACE, 2, "L2VL%4x", cmd->hdr.command); |
@@ -333,8 +328,6 @@ static void qeth_l2_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) | |||
333 | spin_lock_bh(&card->vlanlock); | 328 | spin_lock_bh(&card->vlanlock); |
334 | list_add_tail(&id->list, &card->vid_list); | 329 | list_add_tail(&id->list, &card->vid_list); |
335 | spin_unlock_bh(&card->vlanlock); | 330 | spin_unlock_bh(&card->vlanlock); |
336 | } else { | ||
337 | PRINT_ERR("no memory for vid\n"); | ||
338 | } | 331 | } |
339 | } | 332 | } |
340 | 333 | ||
@@ -550,16 +543,15 @@ static int qeth_l2_request_initial_mac(struct qeth_card *card) | |||
550 | 543 | ||
551 | rc = qeth_query_setadapterparms(card); | 544 | rc = qeth_query_setadapterparms(card); |
552 | if (rc) { | 545 | if (rc) { |
553 | PRINT_WARN("could not query adapter parameters on device %s: " | 546 | QETH_DBF_MESSAGE(2, "could not query adapter parameters on " |
554 | "x%x\n", CARD_BUS_ID(card), rc); | 547 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
555 | } | 548 | } |
556 | 549 | ||
557 | if (card->info.guestlan) { | 550 | if (card->info.guestlan) { |
558 | rc = qeth_setadpparms_change_macaddr(card); | 551 | rc = qeth_setadpparms_change_macaddr(card); |
559 | if (rc) { | 552 | if (rc) { |
560 | PRINT_WARN("couldn't get MAC address on " | 553 | QETH_DBF_MESSAGE(2, "couldn't get MAC address on " |
561 | "device %s: x%x\n", | 554 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
562 | CARD_BUS_ID(card), rc); | ||
563 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); | 555 | QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); |
564 | return rc; | 556 | return rc; |
565 | } | 557 | } |
@@ -585,8 +577,6 @@ static int qeth_l2_set_mac_address(struct net_device *dev, void *p) | |||
585 | } | 577 | } |
586 | 578 | ||
587 | if (card->info.type == QETH_CARD_TYPE_OSN) { | 579 | if (card->info.type == QETH_CARD_TYPE_OSN) { |
588 | PRINT_WARN("Setting MAC address on %s is not supported.\n", | ||
589 | dev->name); | ||
590 | QETH_DBF_TEXT(TRACE, 3, "setmcOSN"); | 580 | QETH_DBF_TEXT(TRACE, 3, "setmcOSN"); |
591 | return -EOPNOTSUPP; | 581 | return -EOPNOTSUPP; |
592 | } | 582 | } |
@@ -666,7 +656,7 @@ static int qeth_l2_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
666 | ctx = qeth_eddp_create_context(card, new_skb, hdr, | 656 | ctx = qeth_eddp_create_context(card, new_skb, hdr, |
667 | skb->sk->sk_protocol); | 657 | skb->sk->sk_protocol); |
668 | if (ctx == NULL) { | 658 | if (ctx == NULL) { |
669 | PRINT_WARN("could not create eddp context\n"); | 659 | QETH_DBF_MESSAGE(2, "could not create eddp context\n"); |
670 | goto tx_drop; | 660 | goto tx_drop; |
671 | } | 661 | } |
672 | } else { | 662 | } else { |
@@ -731,6 +721,7 @@ tx_drop: | |||
731 | if ((new_skb != skb) && new_skb) | 721 | if ((new_skb != skb) && new_skb) |
732 | dev_kfree_skb_any(new_skb); | 722 | dev_kfree_skb_any(new_skb); |
733 | dev_kfree_skb_any(skb); | 723 | dev_kfree_skb_any(skb); |
724 | netif_wake_queue(dev); | ||
734 | return NETDEV_TX_OK; | 725 | return NETDEV_TX_OK; |
735 | } | 726 | } |
736 | 727 | ||
@@ -1155,7 +1146,7 @@ static int qeth_osn_send_control_data(struct qeth_card *card, int len, | |||
1155 | (addr_t) iob, 0, 0); | 1146 | (addr_t) iob, 0, 0); |
1156 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); | 1147 | spin_unlock_irqrestore(get_ccwdev_lock(card->write.ccwdev), flags); |
1157 | if (rc) { | 1148 | if (rc) { |
1158 | PRINT_WARN("qeth_osn_send_control_data: " | 1149 | QETH_DBF_MESSAGE(2, "qeth_osn_send_control_data: " |
1159 | "ccw_device_start rc = %i\n", rc); | 1150 | "ccw_device_start rc = %i\n", rc); |
1160 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); | 1151 | QETH_DBF_TEXT_(TRACE, 2, " err%d", rc); |
1161 | qeth_release_buffer(iob->channel, iob); | 1152 | qeth_release_buffer(iob->channel, iob); |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 94a8ead64ed4..999552c83bbe 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -311,7 +311,6 @@ static struct qeth_ipaddr *qeth_l3_get_addr_buffer( | |||
311 | 311 | ||
312 | addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); | 312 | addr = kzalloc(sizeof(struct qeth_ipaddr), GFP_ATOMIC); |
313 | if (addr == NULL) { | 313 | if (addr == NULL) { |
314 | PRINT_WARN("Not enough memory to add address\n"); | ||
315 | return NULL; | 314 | return NULL; |
316 | } | 315 | } |
317 | addr->type = QETH_IP_TYPE_NORMAL; | 316 | addr->type = QETH_IP_TYPE_NORMAL; |
@@ -649,15 +648,6 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
649 | } | 648 | } |
650 | } | 649 | } |
651 | out_inval: | 650 | out_inval: |
652 | PRINT_WARN("Routing type '%s' not supported for interface %s.\n" | ||
653 | "Router status set to 'no router'.\n", | ||
654 | ((*type == PRIMARY_ROUTER)? "primary router" : | ||
655 | (*type == SECONDARY_ROUTER)? "secondary router" : | ||
656 | (*type == PRIMARY_CONNECTOR)? "primary connector" : | ||
657 | (*type == SECONDARY_CONNECTOR)? "secondary connector" : | ||
658 | (*type == MULTICAST_ROUTER)? "multicast router" : | ||
659 | "unknown"), | ||
660 | card->dev->name); | ||
661 | *type = NO_ROUTER; | 651 | *type = NO_ROUTER; |
662 | } | 652 | } |
663 | 653 | ||
@@ -674,9 +664,9 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
674 | QETH_PROT_IPV4); | 664 | QETH_PROT_IPV4); |
675 | if (rc) { | 665 | if (rc) { |
676 | card->options.route4.type = NO_ROUTER; | 666 | card->options.route4.type = NO_ROUTER; |
677 | PRINT_WARN("Error (0x%04x) while setting routing type on %s. " | 667 | QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" |
678 | "Type set to 'no router'.\n", | 668 | " on %s. Type set to 'no router'.\n", rc, |
679 | rc, QETH_CARD_IFNAME(card)); | 669 | QETH_CARD_IFNAME(card)); |
680 | } | 670 | } |
681 | return rc; | 671 | return rc; |
682 | } | 672 | } |
@@ -697,9 +687,9 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
697 | QETH_PROT_IPV6); | 687 | QETH_PROT_IPV6); |
698 | if (rc) { | 688 | if (rc) { |
699 | card->options.route6.type = NO_ROUTER; | 689 | card->options.route6.type = NO_ROUTER; |
700 | PRINT_WARN("Error (0x%04x) while setting routing type on %s. " | 690 | QETH_DBF_MESSAGE(2, "Error (0x%04x) while setting routing type" |
701 | "Type set to 'no router'.\n", | 691 | " on %s. Type set to 'no router'.\n", rc, |
702 | rc, QETH_CARD_IFNAME(card)); | 692 | QETH_CARD_IFNAME(card)); |
703 | } | 693 | } |
704 | #endif | 694 | #endif |
705 | return rc; | 695 | return rc; |
@@ -737,7 +727,6 @@ int qeth_l3_add_ipato_entry(struct qeth_card *card, | |||
737 | if (!memcmp(ipatoe->addr, new->addr, | 727 | if (!memcmp(ipatoe->addr, new->addr, |
738 | (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && | 728 | (ipatoe->proto == QETH_PROT_IPV4)? 4:16) && |
739 | (ipatoe->mask_bits == new->mask_bits)) { | 729 | (ipatoe->mask_bits == new->mask_bits)) { |
740 | PRINT_WARN("ipato entry already exists!\n"); | ||
741 | rc = -EEXIST; | 730 | rc = -EEXIST; |
742 | break; | 731 | break; |
743 | } | 732 | } |
@@ -802,7 +791,6 @@ int qeth_l3_add_vipa(struct qeth_card *card, enum qeth_prot_versions proto, | |||
802 | rc = -EEXIST; | 791 | rc = -EEXIST; |
803 | spin_unlock_irqrestore(&card->ip_lock, flags); | 792 | spin_unlock_irqrestore(&card->ip_lock, flags); |
804 | if (rc) { | 793 | if (rc) { |
805 | PRINT_WARN("Cannot add VIPA. Address already exists!\n"); | ||
806 | return rc; | 794 | return rc; |
807 | } | 795 | } |
808 | if (!qeth_l3_add_ip(card, ipaddr)) | 796 | if (!qeth_l3_add_ip(card, ipaddr)) |
@@ -867,7 +855,6 @@ int qeth_l3_add_rxip(struct qeth_card *card, enum qeth_prot_versions proto, | |||
867 | rc = -EEXIST; | 855 | rc = -EEXIST; |
868 | spin_unlock_irqrestore(&card->ip_lock, flags); | 856 | spin_unlock_irqrestore(&card->ip_lock, flags); |
869 | if (rc) { | 857 | if (rc) { |
870 | PRINT_WARN("Cannot add RXIP. Address already exists!\n"); | ||
871 | return rc; | 858 | return rc; |
872 | } | 859 | } |
873 | if (!qeth_l3_add_ip(card, ipaddr)) | 860 | if (!qeth_l3_add_ip(card, ipaddr)) |
@@ -1020,23 +1007,23 @@ static int qeth_l3_setadapter_hstr(struct qeth_card *card) | |||
1020 | IPA_SETADP_SET_BROADCAST_MODE, | 1007 | IPA_SETADP_SET_BROADCAST_MODE, |
1021 | card->options.broadcast_mode); | 1008 | card->options.broadcast_mode); |
1022 | if (rc) | 1009 | if (rc) |
1023 | PRINT_WARN("couldn't set broadcast mode on " | 1010 | QETH_DBF_MESSAGE(2, "couldn't set broadcast mode on " |
1024 | "device %s: x%x\n", | 1011 | "device %s: x%x\n", |
1025 | CARD_BUS_ID(card), rc); | 1012 | CARD_BUS_ID(card), rc); |
1026 | rc = qeth_l3_send_setadp_mode(card, | 1013 | rc = qeth_l3_send_setadp_mode(card, |
1027 | IPA_SETADP_ALTER_MAC_ADDRESS, | 1014 | IPA_SETADP_ALTER_MAC_ADDRESS, |
1028 | card->options.macaddr_mode); | 1015 | card->options.macaddr_mode); |
1029 | if (rc) | 1016 | if (rc) |
1030 | PRINT_WARN("couldn't set macaddr mode on " | 1017 | QETH_DBF_MESSAGE(2, "couldn't set macaddr mode on " |
1031 | "device %s: x%x\n", CARD_BUS_ID(card), rc); | 1018 | "device %s: x%x\n", CARD_BUS_ID(card), rc); |
1032 | return rc; | 1019 | return rc; |
1033 | } | 1020 | } |
1034 | if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) | 1021 | if (card->options.broadcast_mode == QETH_TR_BROADCAST_LOCAL) |
1035 | PRINT_WARN("set adapter parameters not available " | 1022 | QETH_DBF_MESSAGE(2, "set adapter parameters not available " |
1036 | "to set broadcast mode, using ALLRINGS " | 1023 | "to set broadcast mode, using ALLRINGS " |
1037 | "on device %s:\n", CARD_BUS_ID(card)); | 1024 | "on device %s:\n", CARD_BUS_ID(card)); |
1038 | if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) | 1025 | if (card->options.macaddr_mode == QETH_TR_MACADDR_CANONICAL) |
1039 | PRINT_WARN("set adapter parameters not available " | 1026 | QETH_DBF_MESSAGE(2, "set adapter parameters not available " |
1040 | "to set macaddr mode, using NONCANONICAL " | 1027 | "to set macaddr mode, using NONCANONICAL " |
1041 | "on device %s:\n", CARD_BUS_ID(card)); | 1028 | "on device %s:\n", CARD_BUS_ID(card)); |
1042 | return 0; | 1029 | return 0; |
@@ -2070,7 +2057,7 @@ static struct qeth_card *qeth_l3_get_card_from_dev(struct net_device *dev) | |||
2070 | card = netdev_priv(dev); | 2057 | card = netdev_priv(dev); |
2071 | else if (rc == QETH_VLAN_CARD) | 2058 | else if (rc == QETH_VLAN_CARD) |
2072 | card = netdev_priv(vlan_dev_info(dev)->real_dev); | 2059 | card = netdev_priv(vlan_dev_info(dev)->real_dev); |
2073 | if (card->options.layer2) | 2060 | if (card && card->options.layer2) |
2074 | card = NULL; | 2061 | card = NULL; |
2075 | QETH_DBF_TEXT_(TRACE, 4, "%d", rc); | 2062 | QETH_DBF_TEXT_(TRACE, 4, "%d", rc); |
2076 | return card ; | 2063 | return card ; |
@@ -2182,8 +2169,6 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
2182 | if (card->info.guestlan) | 2169 | if (card->info.guestlan) |
2183 | return -EOPNOTSUPP; | 2170 | return -EOPNOTSUPP; |
2184 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2171 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2185 | PRINT_WARN("ARP processing not supported " | ||
2186 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2187 | return -EOPNOTSUPP; | 2172 | return -EOPNOTSUPP; |
2188 | } | 2173 | } |
2189 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, | 2174 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, |
@@ -2191,8 +2176,8 @@ static int qeth_l3_arp_set_no_entries(struct qeth_card *card, int no_entries) | |||
2191 | no_entries); | 2176 | no_entries); |
2192 | if (rc) { | 2177 | if (rc) { |
2193 | tmp = rc; | 2178 | tmp = rc; |
2194 | PRINT_WARN("Could not set number of ARP entries on %s: " | 2179 | QETH_DBF_MESSAGE(2, "Could not set number of ARP entries on " |
2195 | "%s (0x%x/%d)\n", QETH_CARD_IFNAME(card), | 2180 | "%s: %s (0x%x/%d)\n", QETH_CARD_IFNAME(card), |
2196 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | 2181 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2197 | } | 2182 | } |
2198 | return rc; | 2183 | return rc; |
@@ -2260,9 +2245,6 @@ static int qeth_l3_arp_query_cb(struct qeth_card *card, | |||
2260 | qdata->no_entries * uentry_size){ | 2245 | qdata->no_entries * uentry_size){ |
2261 | QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); | 2246 | QETH_DBF_TEXT_(TRACE, 4, "qaer3%i", -ENOMEM); |
2262 | cmd->hdr.return_code = -ENOMEM; | 2247 | cmd->hdr.return_code = -ENOMEM; |
2263 | PRINT_WARN("query ARP user space buffer is too small for " | ||
2264 | "the returned number of ARP entries. " | ||
2265 | "Aborting query!\n"); | ||
2266 | goto out_error; | 2248 | goto out_error; |
2267 | } | 2249 | } |
2268 | QETH_DBF_TEXT_(TRACE, 4, "anore%i", | 2250 | QETH_DBF_TEXT_(TRACE, 4, "anore%i", |
@@ -2324,8 +2306,6 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) | |||
2324 | 2306 | ||
2325 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ | 2307 | if (!qeth_is_supported(card,/*IPA_QUERY_ARP_ADDR_INFO*/ |
2326 | IPA_ARP_PROCESSING)) { | 2308 | IPA_ARP_PROCESSING)) { |
2327 | PRINT_WARN("ARP processing not supported " | ||
2328 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2329 | return -EOPNOTSUPP; | 2309 | return -EOPNOTSUPP; |
2330 | } | 2310 | } |
2331 | /* get size of userspace buffer and mask_bits -> 6 bytes */ | 2311 | /* get size of userspace buffer and mask_bits -> 6 bytes */ |
@@ -2344,7 +2324,7 @@ static int qeth_l3_arp_query(struct qeth_card *card, char __user *udata) | |||
2344 | qeth_l3_arp_query_cb, (void *)&qinfo); | 2324 | qeth_l3_arp_query_cb, (void *)&qinfo); |
2345 | if (rc) { | 2325 | if (rc) { |
2346 | tmp = rc; | 2326 | tmp = rc; |
2347 | PRINT_WARN("Error while querying ARP cache on %s: %s " | 2327 | QETH_DBF_MESSAGE(2, "Error while querying ARP cache on %s: %s " |
2348 | "(0x%x/%d)\n", QETH_CARD_IFNAME(card), | 2328 | "(0x%x/%d)\n", QETH_CARD_IFNAME(card), |
2349 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | 2329 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2350 | if (copy_to_user(udata, qinfo.udata, 4)) | 2330 | if (copy_to_user(udata, qinfo.udata, 4)) |
@@ -2375,8 +2355,6 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2375 | if (card->info.guestlan) | 2355 | if (card->info.guestlan) |
2376 | return -EOPNOTSUPP; | 2356 | return -EOPNOTSUPP; |
2377 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2357 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2378 | PRINT_WARN("ARP processing not supported " | ||
2379 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2380 | return -EOPNOTSUPP; | 2358 | return -EOPNOTSUPP; |
2381 | } | 2359 | } |
2382 | 2360 | ||
@@ -2391,10 +2369,9 @@ static int qeth_l3_arp_add_entry(struct qeth_card *card, | |||
2391 | if (rc) { | 2369 | if (rc) { |
2392 | tmp = rc; | 2370 | tmp = rc; |
2393 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); | 2371 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); |
2394 | PRINT_WARN("Could not add ARP entry for address %s on %s: " | 2372 | QETH_DBF_MESSAGE(2, "Could not add ARP entry for address %s " |
2395 | "%s (0x%x/%d)\n", | 2373 | "on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), |
2396 | buf, QETH_CARD_IFNAME(card), | 2374 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2397 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | ||
2398 | } | 2375 | } |
2399 | return rc; | 2376 | return rc; |
2400 | } | 2377 | } |
@@ -2417,8 +2394,6 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2417 | if (card->info.guestlan) | 2394 | if (card->info.guestlan) |
2418 | return -EOPNOTSUPP; | 2395 | return -EOPNOTSUPP; |
2419 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2396 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2420 | PRINT_WARN("ARP processing not supported " | ||
2421 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2422 | return -EOPNOTSUPP; | 2397 | return -EOPNOTSUPP; |
2423 | } | 2398 | } |
2424 | memcpy(buf, entry, 12); | 2399 | memcpy(buf, entry, 12); |
@@ -2433,10 +2408,9 @@ static int qeth_l3_arp_remove_entry(struct qeth_card *card, | |||
2433 | tmp = rc; | 2408 | tmp = rc; |
2434 | memset(buf, 0, 16); | 2409 | memset(buf, 0, 16); |
2435 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); | 2410 | qeth_l3_ipaddr4_to_string((u8 *)entry->ipaddr, buf); |
2436 | PRINT_WARN("Could not delete ARP entry for address %s on %s: " | 2411 | QETH_DBF_MESSAGE(2, "Could not delete ARP entry for address %s" |
2437 | "%s (0x%x/%d)\n", | 2412 | " on %s: %s (0x%x/%d)\n", buf, QETH_CARD_IFNAME(card), |
2438 | buf, QETH_CARD_IFNAME(card), | 2413 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2439 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | ||
2440 | } | 2414 | } |
2441 | return rc; | 2415 | return rc; |
2442 | } | 2416 | } |
@@ -2456,16 +2430,14 @@ static int qeth_l3_arp_flush_cache(struct qeth_card *card) | |||
2456 | if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) | 2430 | if (card->info.guestlan || (card->info.type == QETH_CARD_TYPE_IQD)) |
2457 | return -EOPNOTSUPP; | 2431 | return -EOPNOTSUPP; |
2458 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { | 2432 | if (!qeth_is_supported(card, IPA_ARP_PROCESSING)) { |
2459 | PRINT_WARN("ARP processing not supported " | ||
2460 | "on %s!\n", QETH_CARD_IFNAME(card)); | ||
2461 | return -EOPNOTSUPP; | 2433 | return -EOPNOTSUPP; |
2462 | } | 2434 | } |
2463 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, | 2435 | rc = qeth_l3_send_simple_setassparms(card, IPA_ARP_PROCESSING, |
2464 | IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); | 2436 | IPA_CMD_ASS_ARP_FLUSH_CACHE, 0); |
2465 | if (rc) { | 2437 | if (rc) { |
2466 | tmp = rc; | 2438 | tmp = rc; |
2467 | PRINT_WARN("Could not flush ARP cache on %s: %s (0x%x/%d)\n", | 2439 | QETH_DBF_MESSAGE(2, "Could not flush ARP cache on %s: %s " |
2468 | QETH_CARD_IFNAME(card), | 2440 | "(0x%x/%d)\n", QETH_CARD_IFNAME(card), |
2469 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); | 2441 | qeth_l3_arp_get_error_cause(&rc), tmp, tmp); |
2470 | } | 2442 | } |
2471 | return rc; | 2443 | return rc; |
@@ -2724,7 +2696,7 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
2724 | ctx = qeth_eddp_create_context(card, new_skb, hdr, | 2696 | ctx = qeth_eddp_create_context(card, new_skb, hdr, |
2725 | skb->sk->sk_protocol); | 2697 | skb->sk->sk_protocol); |
2726 | if (ctx == NULL) { | 2698 | if (ctx == NULL) { |
2727 | PRINT_WARN("could not create eddp context\n"); | 2699 | QETH_DBF_MESSAGE(2, "could not create eddp context\n"); |
2728 | goto tx_drop; | 2700 | goto tx_drop; |
2729 | } | 2701 | } |
2730 | } else { | 2702 | } else { |
@@ -2792,6 +2764,7 @@ tx_drop: | |||
2792 | if ((new_skb != skb) && new_skb) | 2764 | if ((new_skb != skb) && new_skb) |
2793 | dev_kfree_skb_any(new_skb); | 2765 | dev_kfree_skb_any(new_skb); |
2794 | dev_kfree_skb_any(skb); | 2766 | dev_kfree_skb_any(skb); |
2767 | netif_wake_queue(dev); | ||
2795 | return NETDEV_TX_OK; | 2768 | return NETDEV_TX_OK; |
2796 | } | 2769 | } |
2797 | 2770 | ||
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index 08f51fd902c4..ac1993708ae9 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -85,7 +85,6 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, | |||
85 | } else if (!strcmp(tmp, "multicast_router")) { | 85 | } else if (!strcmp(tmp, "multicast_router")) { |
86 | route->type = MULTICAST_ROUTER; | 86 | route->type = MULTICAST_ROUTER; |
87 | } else { | 87 | } else { |
88 | PRINT_WARN("Invalid routing type '%s'.\n", tmp); | ||
89 | return -EINVAL; | 88 | return -EINVAL; |
90 | } | 89 | } |
91 | if (((card->state == CARD_STATE_SOFTSETUP) || | 90 | if (((card->state == CARD_STATE_SOFTSETUP) || |
@@ -137,9 +136,6 @@ static ssize_t qeth_l3_dev_route6_store(struct device *dev, | |||
137 | return -EINVAL; | 136 | return -EINVAL; |
138 | 137 | ||
139 | if (!qeth_is_supported(card, IPA_IPV6)) { | 138 | if (!qeth_is_supported(card, IPA_IPV6)) { |
140 | PRINT_WARN("IPv6 not supported for interface %s.\n" | ||
141 | "Routing status no changed.\n", | ||
142 | QETH_CARD_IFNAME(card)); | ||
143 | return -ENOTSUPP; | 139 | return -ENOTSUPP; |
144 | } | 140 | } |
145 | 141 | ||
@@ -179,7 +175,6 @@ static ssize_t qeth_l3_dev_fake_broadcast_store(struct device *dev, | |||
179 | if ((i == 0) || (i == 1)) | 175 | if ((i == 0) || (i == 1)) |
180 | card->options.fake_broadcast = i; | 176 | card->options.fake_broadcast = i; |
181 | else { | 177 | else { |
182 | PRINT_WARN("fake_broadcast: write 0 or 1 to this file!\n"); | ||
183 | return -EINVAL; | 178 | return -EINVAL; |
184 | } | 179 | } |
185 | return count; | 180 | return count; |
@@ -220,7 +215,6 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev, | |||
220 | 215 | ||
221 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || | 216 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || |
222 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { | 217 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { |
223 | PRINT_WARN("Device is not a tokenring device!\n"); | ||
224 | return -EINVAL; | 218 | return -EINVAL; |
225 | } | 219 | } |
226 | 220 | ||
@@ -233,8 +227,6 @@ static ssize_t qeth_l3_dev_broadcast_mode_store(struct device *dev, | |||
233 | card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; | 227 | card->options.broadcast_mode = QETH_TR_BROADCAST_ALLRINGS; |
234 | return count; | 228 | return count; |
235 | } else { | 229 | } else { |
236 | PRINT_WARN("broadcast_mode: invalid mode %s!\n", | ||
237 | tmp); | ||
238 | return -EINVAL; | 230 | return -EINVAL; |
239 | } | 231 | } |
240 | return count; | 232 | return count; |
@@ -275,7 +267,6 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev, | |||
275 | 267 | ||
276 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || | 268 | if (!((card->info.link_type == QETH_LINK_TYPE_HSTR) || |
277 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { | 269 | (card->info.link_type == QETH_LINK_TYPE_LANE_TR))) { |
278 | PRINT_WARN("Device is not a tokenring device!\n"); | ||
279 | return -EINVAL; | 270 | return -EINVAL; |
280 | } | 271 | } |
281 | 272 | ||
@@ -285,7 +276,6 @@ static ssize_t qeth_l3_dev_canonical_macaddr_store(struct device *dev, | |||
285 | QETH_TR_MACADDR_CANONICAL : | 276 | QETH_TR_MACADDR_CANONICAL : |
286 | QETH_TR_MACADDR_NONCANONICAL; | 277 | QETH_TR_MACADDR_NONCANONICAL; |
287 | else { | 278 | else { |
288 | PRINT_WARN("canonical_macaddr: write 0 or 1 to this file!\n"); | ||
289 | return -EINVAL; | 279 | return -EINVAL; |
290 | } | 280 | } |
291 | return count; | 281 | return count; |
@@ -327,7 +317,6 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev, | |||
327 | else if (!strcmp(tmp, "no_checksumming")) | 317 | else if (!strcmp(tmp, "no_checksumming")) |
328 | card->options.checksum_type = NO_CHECKSUMMING; | 318 | card->options.checksum_type = NO_CHECKSUMMING; |
329 | else { | 319 | else { |
330 | PRINT_WARN("Unknown checksumming type '%s'\n", tmp); | ||
331 | return -EINVAL; | 320 | return -EINVAL; |
332 | } | 321 | } |
333 | return count; | 322 | return count; |
@@ -382,8 +371,6 @@ static ssize_t qeth_l3_dev_ipato_enable_store(struct device *dev, | |||
382 | } else if (!strcmp(tmp, "0")) { | 371 | } else if (!strcmp(tmp, "0")) { |
383 | card->ipato.enabled = 0; | 372 | card->ipato.enabled = 0; |
384 | } else { | 373 | } else { |
385 | PRINT_WARN("ipato_enable: write 0, 1 or 'toggle' to " | ||
386 | "this file\n"); | ||
387 | return -EINVAL; | 374 | return -EINVAL; |
388 | } | 375 | } |
389 | return count; | 376 | return count; |
@@ -422,8 +409,6 @@ static ssize_t qeth_l3_dev_ipato_invert4_store(struct device *dev, | |||
422 | } else if (!strcmp(tmp, "0")) { | 409 | } else if (!strcmp(tmp, "0")) { |
423 | card->ipato.invert4 = 0; | 410 | card->ipato.invert4 = 0; |
424 | } else { | 411 | } else { |
425 | PRINT_WARN("ipato_invert4: write 0, 1 or 'toggle' to " | ||
426 | "this file\n"); | ||
427 | return -EINVAL; | 412 | return -EINVAL; |
428 | } | 413 | } |
429 | return count; | 414 | return count; |
@@ -486,13 +471,10 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto, | |||
486 | /* get address string */ | 471 | /* get address string */ |
487 | end = strchr(start, '/'); | 472 | end = strchr(start, '/'); |
488 | if (!end || (end - start >= 40)) { | 473 | if (!end || (end - start >= 40)) { |
489 | PRINT_WARN("Invalid format for ipato_addx/delx. " | ||
490 | "Use <ip addr>/<mask bits>\n"); | ||
491 | return -EINVAL; | 474 | return -EINVAL; |
492 | } | 475 | } |
493 | strncpy(buffer, start, end - start); | 476 | strncpy(buffer, start, end - start); |
494 | if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) { | 477 | if (qeth_l3_string_to_ipaddr(buffer, proto, addr)) { |
495 | PRINT_WARN("Invalid IP address format!\n"); | ||
496 | return -EINVAL; | 478 | return -EINVAL; |
497 | } | 479 | } |
498 | start = end + 1; | 480 | start = end + 1; |
@@ -500,7 +482,6 @@ static int qeth_l3_parse_ipatoe(const char *buf, enum qeth_prot_versions proto, | |||
500 | if (!strlen(start) || | 482 | if (!strlen(start) || |
501 | (tmp == start) || | 483 | (tmp == start) || |
502 | (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) { | 484 | (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) { |
503 | PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n"); | ||
504 | return -EINVAL; | 485 | return -EINVAL; |
505 | } | 486 | } |
506 | return 0; | 487 | return 0; |
@@ -520,7 +501,6 @@ static ssize_t qeth_l3_dev_ipato_add_store(const char *buf, size_t count, | |||
520 | 501 | ||
521 | ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); | 502 | ipatoe = kzalloc(sizeof(struct qeth_ipato_entry), GFP_KERNEL); |
522 | if (!ipatoe) { | 503 | if (!ipatoe) { |
523 | PRINT_WARN("No memory to allocate ipato entry\n"); | ||
524 | return -ENOMEM; | 504 | return -ENOMEM; |
525 | } | 505 | } |
526 | ipatoe->proto = proto; | 506 | ipatoe->proto = proto; |
@@ -609,8 +589,6 @@ static ssize_t qeth_l3_dev_ipato_invert6_store(struct device *dev, | |||
609 | } else if (!strcmp(tmp, "0")) { | 589 | } else if (!strcmp(tmp, "0")) { |
610 | card->ipato.invert6 = 0; | 590 | card->ipato.invert6 = 0; |
611 | } else { | 591 | } else { |
612 | PRINT_WARN("ipato_invert6: write 0, 1 or 'toggle' to " | ||
613 | "this file\n"); | ||
614 | return -EINVAL; | 592 | return -EINVAL; |
615 | } | 593 | } |
616 | return count; | 594 | return count; |
@@ -724,7 +702,6 @@ static int qeth_l3_parse_vipae(const char *buf, enum qeth_prot_versions proto, | |||
724 | u8 *addr) | 702 | u8 *addr) |
725 | { | 703 | { |
726 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { | 704 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { |
727 | PRINT_WARN("Invalid IP address format!\n"); | ||
728 | return -EINVAL; | 705 | return -EINVAL; |
729 | } | 706 | } |
730 | return 0; | 707 | return 0; |
@@ -891,7 +868,6 @@ static int qeth_l3_parse_rxipe(const char *buf, enum qeth_prot_versions proto, | |||
891 | u8 *addr) | 868 | u8 *addr) |
892 | { | 869 | { |
893 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { | 870 | if (qeth_l3_string_to_ipaddr(buf, proto, addr)) { |
894 | PRINT_WARN("Invalid IP address format!\n"); | ||
895 | return -EINVAL; | 871 | return -EINVAL; |
896 | } | 872 | } |
897 | return 0; | 873 | return 0; |
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 5080f343ad74..5bfbe7659830 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -207,6 +207,7 @@ s390_handle_mcck(void) | |||
207 | do_exit(SIGSEGV); | 207 | do_exit(SIGSEGV); |
208 | } | 208 | } |
209 | } | 209 | } |
210 | EXPORT_SYMBOL_GPL(s390_handle_mcck); | ||
210 | 211 | ||
211 | /* | 212 | /* |
212 | * returns 0 if all registers could be validated | 213 | * returns 0 if all registers could be validated |