aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/qdio_main.c
diff options
context:
space:
mode:
authorJan Glauber <jang@linux.vnet.ibm.com>2010-01-04 03:05:42 -0500
committerMartin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com>2010-01-04 03:05:58 -0500
commit6486cda6c6b15368e2c925d89b4e9ed13e67b91b (patch)
treec4ca8e2b2b961adbc19733d1bcf3ebc6c5c6be32 /drivers/s390/cio/qdio_main.c
parent45d28b097280a78893ce25a5d0db41e6a2717853 (diff)
[S390] qdio: convert global statistics to per-device stats
Revamp the qdio performance statistics and move them from procfs to debugfs using the seq_file interface. Since the statistics are not intended for the general user the removal of /proc/qdio_perf should not surprise anyone. The per device statistics are disabled by default, writing 1 to /<debugfs mountpoint>/qdio/<device bus ID>/statistics enables the statistics for the given device. Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com> Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio/qdio_main.c')
-rw-r--r--drivers/s390/cio/qdio_main.c71
1 files changed, 33 insertions, 38 deletions
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index b2275c5000e7..999fe80c4051 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -22,7 +22,6 @@
22#include "device.h" 22#include "device.h"
23#include "qdio.h" 23#include "qdio.h"
24#include "qdio_debug.h" 24#include "qdio_debug.h"
25#include "qdio_perf.h"
26 25
27MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\ 26MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com>,"\
28 "Jan Glauber <jang@linux.vnet.ibm.com>"); 27 "Jan Glauber <jang@linux.vnet.ibm.com>");
@@ -126,7 +125,7 @@ static int qdio_do_eqbs(struct qdio_q *q, unsigned char *state,
126 int rc; 125 int rc;
127 126
128 BUG_ON(!q->irq_ptr->sch_token); 127 BUG_ON(!q->irq_ptr->sch_token);
129 qdio_perf_stat_inc(&perf_stats.debug_eqbs_all); 128 qperf_inc(q, eqbs);
130 129
131 if (!q->is_input_q) 130 if (!q->is_input_q)
132 nr += q->irq_ptr->nr_input_qs; 131 nr += q->irq_ptr->nr_input_qs;
@@ -139,7 +138,7 @@ again:
139 * buffers later. 138 * buffers later.
140 */ 139 */
141 if ((ccq == 96) && (count != tmp_count)) { 140 if ((ccq == 96) && (count != tmp_count)) {
142 qdio_perf_stat_inc(&perf_stats.debug_eqbs_incomplete); 141 qperf_inc(q, eqbs_partial);
143 return (count - tmp_count); 142 return (count - tmp_count);
144 } 143 }
145 144
@@ -182,7 +181,7 @@ static int qdio_do_sqbs(struct qdio_q *q, unsigned char state, int start,
182 return 0; 181 return 0;
183 182
184 BUG_ON(!q->irq_ptr->sch_token); 183 BUG_ON(!q->irq_ptr->sch_token);
185 qdio_perf_stat_inc(&perf_stats.debug_sqbs_all); 184 qperf_inc(q, sqbs);
186 185
187 if (!q->is_input_q) 186 if (!q->is_input_q)
188 nr += q->irq_ptr->nr_input_qs; 187 nr += q->irq_ptr->nr_input_qs;
@@ -191,7 +190,7 @@ again:
191 rc = qdio_check_ccq(q, ccq); 190 rc = qdio_check_ccq(q, ccq);
192 if (rc == 1) { 191 if (rc == 1) {
193 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq); 192 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "SQBS again:%2d", ccq);
194 qdio_perf_stat_inc(&perf_stats.debug_sqbs_incomplete); 193 qperf_inc(q, sqbs_partial);
195 goto again; 194 goto again;
196 } 195 }
197 if (rc < 0) { 196 if (rc < 0) {
@@ -285,7 +284,7 @@ static inline int qdio_siga_sync(struct qdio_q *q, unsigned int output,
285 return 0; 284 return 0;
286 285
287 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr); 286 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-s:%1d", q->nr);
288 qdio_perf_stat_inc(&perf_stats.siga_sync); 287 qperf_inc(q, siga_sync);
289 288
290 cc = do_siga_sync(q->irq_ptr->schid, output, input); 289 cc = do_siga_sync(q->irq_ptr->schid, output, input);
291 if (cc) 290 if (cc)
@@ -350,7 +349,7 @@ static inline int qdio_siga_input(struct qdio_q *q)
350 int cc; 349 int cc;
351 350
352 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr); 351 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-r:%1d", q->nr);
353 qdio_perf_stat_inc(&perf_stats.siga_in); 352 qperf_inc(q, siga_read);
354 353
355 cc = do_siga_input(q->irq_ptr->schid, q->mask); 354 cc = do_siga_input(q->irq_ptr->schid, q->mask);
356 if (cc) 355 if (cc)
@@ -382,7 +381,7 @@ static inline void qdio_stop_polling(struct qdio_q *q)
382 return; 381 return;
383 382
384 q->u.in.polling = 0; 383 q->u.in.polling = 0;
385 qdio_perf_stat_inc(&perf_stats.debug_stop_polling); 384 qperf_inc(q, stop_polling);
386 385
387 /* show the card that we are not polling anymore */ 386 /* show the card that we are not polling anymore */
388 if (is_qebsm(q)) { 387 if (is_qebsm(q)) {
@@ -400,7 +399,7 @@ static void announce_buffer_error(struct qdio_q *q, int count)
400 /* special handling for no target buffer empty */ 399 /* special handling for no target buffer empty */
401 if ((!q->is_input_q && 400 if ((!q->is_input_q &&
402 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { 401 (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) {
403 qdio_perf_stat_inc(&perf_stats.outbound_target_full); 402 qperf_inc(q, target_full);
404 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", 403 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x",
405 q->first_to_check); 404 q->first_to_check);
406 return; 405 return;
@@ -487,7 +486,7 @@ static int get_inbound_buffer_frontier(struct qdio_q *q)
487 inbound_primed(q, count); 486 inbound_primed(q, count);
488 q->first_to_check = add_buf(q->first_to_check, count); 487 q->first_to_check = add_buf(q->first_to_check, count);
489 if (atomic_sub(count, &q->nr_buf_used) == 0) 488 if (atomic_sub(count, &q->nr_buf_used) == 0)
490 qdio_perf_stat_inc(&perf_stats.inbound_queue_full); 489 qperf_inc(q, inbound_queue_full);
491 break; 490 break;
492 case SLSB_P_INPUT_ERROR: 491 case SLSB_P_INPUT_ERROR:
493 announce_buffer_error(q, count); 492 announce_buffer_error(q, count);
@@ -567,9 +566,10 @@ static void qdio_kick_handler(struct qdio_q *q)
567 count = sub_buf(end, start); 566 count = sub_buf(end, start);
568 567
569 if (q->is_input_q) { 568 if (q->is_input_q) {
570 qdio_perf_stat_inc(&perf_stats.inbound_handler); 569 qperf_inc(q, inbound_handler);
571 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); 570 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count);
572 } else 571 } else
572 qperf_inc(q, outbound_handler);
573 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", 573 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x",
574 start, count); 574 start, count);
575 575
@@ -583,24 +583,28 @@ static void qdio_kick_handler(struct qdio_q *q)
583 583
584static void __qdio_inbound_processing(struct qdio_q *q) 584static void __qdio_inbound_processing(struct qdio_q *q)
585{ 585{
586 qdio_perf_stat_inc(&perf_stats.tasklet_inbound); 586 qperf_inc(q, tasklet_inbound);
587again: 587again:
588 if (!qdio_inbound_q_moved(q)) 588 if (!qdio_inbound_q_moved(q))
589 return; 589 return;
590 590
591 qdio_kick_handler(q); 591 qdio_kick_handler(q);
592 592
593 if (!qdio_inbound_q_done(q)) 593 if (!qdio_inbound_q_done(q)) {
594 /* means poll time is not yet over */ 594 /* means poll time is not yet over */
595 qperf_inc(q, tasklet_inbound_resched);
595 goto again; 596 goto again;
597 }
596 598
597 qdio_stop_polling(q); 599 qdio_stop_polling(q);
598 /* 600 /*
599 * We need to check again to not lose initiative after 601 * We need to check again to not lose initiative after
600 * resetting the ACK state. 602 * resetting the ACK state.
601 */ 603 */
602 if (!qdio_inbound_q_done(q)) 604 if (!qdio_inbound_q_done(q)) {
605 qperf_inc(q, tasklet_inbound_resched2);
603 goto again; 606 goto again;
607 }
604} 608}
605 609
606void qdio_inbound_processing(unsigned long data) 610void qdio_inbound_processing(unsigned long data)
@@ -688,7 +692,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
688 return 0; 692 return 0;
689 693
690 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr); 694 DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "siga-w:%1d", q->nr);
691 qdio_perf_stat_inc(&perf_stats.siga_out); 695 qperf_inc(q, siga_write);
692 696
693 cc = qdio_siga_output(q, &busy_bit); 697 cc = qdio_siga_output(q, &busy_bit);
694 switch (cc) { 698 switch (cc) {
@@ -711,7 +715,7 @@ static int qdio_kick_outbound_q(struct qdio_q *q)
711 715
712static void __qdio_outbound_processing(struct qdio_q *q) 716static void __qdio_outbound_processing(struct qdio_q *q)
713{ 717{
714 qdio_perf_stat_inc(&perf_stats.tasklet_outbound); 718 qperf_inc(q, tasklet_outbound);
715 BUG_ON(atomic_read(&q->nr_buf_used) < 0); 719 BUG_ON(atomic_read(&q->nr_buf_used) < 0);
716 720
717 if (qdio_outbound_q_moved(q)) 721 if (qdio_outbound_q_moved(q))
@@ -739,12 +743,9 @@ static void __qdio_outbound_processing(struct qdio_q *q)
739 */ 743 */
740 if (qdio_outbound_q_done(q)) 744 if (qdio_outbound_q_done(q))
741 del_timer(&q->u.out.timer); 745 del_timer(&q->u.out.timer);
742 else { 746 else
743 if (!timer_pending(&q->u.out.timer)) { 747 if (!timer_pending(&q->u.out.timer))
744 mod_timer(&q->u.out.timer, jiffies + 10 * HZ); 748 mod_timer(&q->u.out.timer, jiffies + 10 * HZ);
745 qdio_perf_stat_inc(&perf_stats.debug_tl_out_timer);
746 }
747 }
748 return; 749 return;
749 750
750sched: 751sched:
@@ -784,7 +785,7 @@ static inline void qdio_check_outbound_after_thinint(struct qdio_q *q)
784 785
785static void __tiqdio_inbound_processing(struct qdio_q *q) 786static void __tiqdio_inbound_processing(struct qdio_q *q)
786{ 787{
787 qdio_perf_stat_inc(&perf_stats.thinint_inbound); 788 qperf_inc(q, tasklet_inbound);
788 qdio_sync_after_thinint(q); 789 qdio_sync_after_thinint(q);
789 790
790 /* 791 /*
@@ -799,7 +800,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
799 qdio_kick_handler(q); 800 qdio_kick_handler(q);
800 801
801 if (!qdio_inbound_q_done(q)) { 802 if (!qdio_inbound_q_done(q)) {
802 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop); 803 qperf_inc(q, tasklet_inbound_resched);
803 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) { 804 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) {
804 tasklet_schedule(&q->tasklet); 805 tasklet_schedule(&q->tasklet);
805 return; 806 return;
@@ -812,7 +813,7 @@ static void __tiqdio_inbound_processing(struct qdio_q *q)
812 * resetting the ACK state. 813 * resetting the ACK state.
813 */ 814 */
814 if (!qdio_inbound_q_done(q)) { 815 if (!qdio_inbound_q_done(q)) {
815 qdio_perf_stat_inc(&perf_stats.thinint_inbound_loop2); 816 qperf_inc(q, tasklet_inbound_resched2);
816 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED)) 817 if (likely(q->irq_ptr->state != QDIO_IRQ_STATE_STOPPED))
817 tasklet_schedule(&q->tasklet); 818 tasklet_schedule(&q->tasklet);
818 } 819 }
@@ -851,8 +852,6 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr)
851 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) 852 if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED))
852 return; 853 return;
853 854
854 qdio_perf_stat_inc(&perf_stats.pci_int);
855
856 for_each_input_queue(irq_ptr, q, i) 855 for_each_input_queue(irq_ptr, q, i)
857 tasklet_schedule(&q->tasklet); 856 tasklet_schedule(&q->tasklet);
858 857
@@ -923,8 +922,6 @@ void qdio_int_handler(struct ccw_device *cdev, unsigned long intparm,
923 struct qdio_irq *irq_ptr = cdev->private->qdio_data; 922 struct qdio_irq *irq_ptr = cdev->private->qdio_data;
924 int cstat, dstat; 923 int cstat, dstat;
925 924
926 qdio_perf_stat_inc(&perf_stats.qdio_int);
927
928 if (!intparm || !irq_ptr) { 925 if (!intparm || !irq_ptr) {
929 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no); 926 DBF_ERROR("qint:%4x", cdev->private->schid.sch_no);
930 return; 927 return;
@@ -1383,6 +1380,8 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
1383{ 1380{
1384 int used, diff; 1381 int used, diff;
1385 1382
1383 qperf_inc(q, inbound_call);
1384
1386 if (!q->u.in.polling) 1385 if (!q->u.in.polling)
1387 goto set; 1386 goto set;
1388 1387
@@ -1438,14 +1437,16 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1438 unsigned char state; 1437 unsigned char state;
1439 int used, rc = 0; 1438 int used, rc = 0;
1440 1439
1441 qdio_perf_stat_inc(&perf_stats.outbound_handler); 1440 qperf_inc(q, outbound_call);
1442 1441
1443 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count); 1442 count = set_buf_states(q, bufnr, SLSB_CU_OUTPUT_PRIMED, count);
1444 used = atomic_add_return(count, &q->nr_buf_used); 1443 used = atomic_add_return(count, &q->nr_buf_used);
1445 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q); 1444 BUG_ON(used > QDIO_MAX_BUFFERS_PER_Q);
1446 1445
1447 if (callflags & QDIO_FLAG_PCI_OUT) 1446 if (callflags & QDIO_FLAG_PCI_OUT) {
1448 q->u.out.pci_out_enabled = 1; 1447 q->u.out.pci_out_enabled = 1;
1448 qperf_inc(q, pci_request_int);
1449 }
1449 else 1450 else
1450 q->u.out.pci_out_enabled = 0; 1451 q->u.out.pci_out_enabled = 0;
1451 1452
@@ -1484,7 +1485,7 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags,
1484 if (state != SLSB_CU_OUTPUT_PRIMED) 1485 if (state != SLSB_CU_OUTPUT_PRIMED)
1485 rc = qdio_kick_outbound_q(q); 1486 rc = qdio_kick_outbound_q(q);
1486 else 1487 else
1487 qdio_perf_stat_inc(&perf_stats.fast_requeue); 1488 qperf_inc(q, fast_requeue);
1488 1489
1489out: 1490out:
1490 tasklet_schedule(&q->tasklet); 1491 tasklet_schedule(&q->tasklet);
@@ -1540,16 +1541,11 @@ static int __init init_QDIO(void)
1540 rc = qdio_debug_init(); 1541 rc = qdio_debug_init();
1541 if (rc) 1542 if (rc)
1542 goto out_ti; 1543 goto out_ti;
1543 rc = qdio_setup_perf_stats();
1544 if (rc)
1545 goto out_debug;
1546 rc = tiqdio_register_thinints(); 1544 rc = tiqdio_register_thinints();
1547 if (rc) 1545 if (rc)
1548 goto out_perf; 1546 goto out_debug;
1549 return 0; 1547 return 0;
1550 1548
1551out_perf:
1552 qdio_remove_perf_stats();
1553out_debug: 1549out_debug:
1554 qdio_debug_exit(); 1550 qdio_debug_exit();
1555out_ti: 1551out_ti:
@@ -1563,7 +1559,6 @@ static void __exit exit_QDIO(void)
1563{ 1559{
1564 tiqdio_unregister_thinints(); 1560 tiqdio_unregister_thinints();
1565 tiqdio_free_memory(); 1561 tiqdio_free_memory();
1566 qdio_remove_perf_stats();
1567 qdio_debug_exit(); 1562 qdio_debug_exit();
1568 qdio_setup_exit(); 1563 qdio_setup_exit();
1569} 1564}