aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/qdio_main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r--drivers/s390/cio/qdio_main.c67
1 files changed, 14 insertions, 53 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index 88be7b9ea6e1..00520f9a7a8e 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -336,10 +336,10 @@ again:
336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2); 336 WARN_ON(queue_type(q) != QDIO_IQDIO_QFMT || cc != 2);
337 337
338 if (!start_time) { 338 if (!start_time) {
339 start_time = get_usecs(); 339 start_time = get_clock();
340 goto again; 340 goto again;
341 } 341 }
342 if ((get_usecs() - start_time) < QDIO_BUSY_BIT_PATIENCE) 342 if ((get_clock() - start_time) < QDIO_BUSY_BIT_PATIENCE)
343 goto again; 343 goto again;
344 } 344 }
345 return cc; 345 return cc;
@@ -536,7 +536,7 @@ static int qdio_inbound_q_moved(struct qdio_q *q)
536 if ((bufnr != q->last_move) || q->qdio_error) { 536 if ((bufnr != q->last_move) || q->qdio_error) {
537 q->last_move = bufnr; 537 q->last_move = bufnr;
538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR) 538 if (!is_thinint_irq(q->irq_ptr) && MACHINE_IS_LPAR)
539 q->u.in.timestamp = get_usecs(); 539 q->u.in.timestamp = get_clock();
540 return 1; 540 return 1;
541 } else 541 } else
542 return 0; 542 return 0;
@@ -567,7 +567,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q)
567 * At this point we know, that inbound first_to_check 567 * At this point we know, that inbound first_to_check
568 * has (probably) not moved (see qdio_inbound_processing). 568 * has (probably) not moved (see qdio_inbound_processing).
569 */ 569 */
570 if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { 570 if (get_clock() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) {
571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", 571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x",
572 q->first_to_check); 572 q->first_to_check);
573 return 1; 573 return 1;
@@ -606,7 +606,7 @@ static void qdio_kick_handler(struct qdio_q *q)
606static void __qdio_inbound_processing(struct qdio_q *q) 606static void __qdio_inbound_processing(struct qdio_q *q)
607{ 607{
608 qperf_inc(q, tasklet_inbound); 608 qperf_inc(q, tasklet_inbound);
609again: 609
610 if (!qdio_inbound_q_moved(q)) 610 if (!qdio_inbound_q_moved(q))
611 return; 611 return;
612 612
@@ -615,7 +615,10 @@ again:
615 if (!qdio_inbound_q_done(q)) { 615 if (!qdio_inbound_q_done(q)) {
616 /* means poll time is not yet over */ 616 /* means poll time is not yet over */
617 qperf_inc(q, tasklet_inbound_resched); 617 qperf_inc(q, tasklet_inbound_resched);
618 goto again; 618 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
619 tasklet_schedule(&q->tasklet);
620 return;
621 }
619 } 622 }
620 623
621 qdio_stop_polling(q); 624 qdio_stop_polling(q);
@@ -625,7 +628,8 @@ again:
625 */ 628 */
626 if (!qdio_inbound_q_done(q)) { 629 if (!qdio_inbound_q_done(q)) {
627 qperf_inc(q, tasklet_inbound_resched2); 630 qperf_inc(q, tasklet_inbound_resched2);
628 goto again; 631 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
632 tasklet_schedule(&q->tasklet);
629 } 633 }
630} 634}
631 635
@@ -955,6 +959,9 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
955 return; 959 return;
956 } 960 }
957 961
962 if (irq_ptr->perf_stat_enabled)
963 irq_ptr->perf_stat.qdio_int++;
964
958 if (IS_ERR(irb)) { 965 if (IS_ERR(irb)) {
959 switch (PTR_ERR(irb)) { 966 switch (PTR_ERR(irb)) {
960 case -EIO: 967 case -EIO:
@@ -1016,30 +1023,6 @@ int qdio_get_ssqd_desc(struct ccw_device *cdev,
1016} 1023}
1017EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc); 1024EXPORT_SYMBOL_GPL(qdio_get_ssqd_desc);
1018 1025
1019/**
1020 * qdio_cleanup - shutdown queues and free data structures
1021 * @cdev: associated ccw device
1022 * @how: use halt or clear to shutdown
1023 *
1024 * This function calls qdio_shutdown() for @cdev with method @how.
1025 * and qdio_free(). The qdio_free() return value is ignored since
1026 * !irq_ptr is already checked.
1027 */
1028int qdio_cleanup(struct ccw_device *cdev, int how)
1029{
1030 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
1031 int rc;
1032
1033 if (!irq_ptr)
1034 return -ENODEV;
1035
1036 rc = qdio_shutdown(cdev, how);
1037
1038 qdio_free(cdev);
1039 return rc;
1040}
1041EXPORT_SYMBOL_GPL(qdio_cleanup);
1042
1043static void qdio_shutdown_queues(struct ccw_device *cdev) 1026static void qdio_shutdown_queues(struct ccw_device *cdev)
1044{ 1027{
1045 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 1028 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
@@ -1157,28 +1140,6 @@ int qdio_free(struct ccw_device *cdev)
1157EXPORT_SYMBOL_GPL(qdio_free); 1140EXPORT_SYMBOL_GPL(qdio_free);
1158 1141
1159/** 1142/**
1160 * qdio_initialize - allocate and establish queues for a qdio subchannel
1161 * @init_data: initialization data
1162 *
1163 * This function first allocates queues via qdio_allocate() and on success
1164 * establishes them via qdio_establish().
1165 */
1166int qdio_initialize(struct qdio_initialize *init_data)
1167{
1168 int rc;
1169
1170 rc = qdio_allocate(init_data);
1171 if (rc)
1172 return rc;
1173
1174 rc = qdio_establish(init_data);
1175 if (rc)
1176 qdio_free(init_data->cdev);
1177 return rc;
1178}
1179EXPORT_SYMBOL_GPL(qdio_initialize);
1180
1181/**
1182 * qdio_allocate - allocate qdio queues and associated data 1143 * qdio_allocate - allocate qdio queues and associated data
1183 * @init_data: initialization data 1144 * @init_data: initialization data
1184 */ 1145 */