diff options
author | Jan Glauber <jang@linux.vnet.ibm.com> | 2010-09-07 17:14:39 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2010-09-08 17:31:00 -0400 |
commit | d36deae75011a7890f0e730dd0f867c64081cb50 (patch) | |
tree | d24672cf5cc40c637186362187450362cabefd24 /drivers/s390 | |
parent | e508be174ad36b0cf9b324cd04978c2b13c21502 (diff) |
qdio: extend API to allow polling
Extend the qdio API to allow polling in the upper-layer driver. This
is needed by qeth to use NAPI.
To use the new interface the upper-layer driver must specify the
queue_start_poll(). This callback is used to signal the upper-layer
driver that is has initiative and must process the inbound queue by
calling qdio_get_next_buffers(). If the upper-layer driver wants to
stop polling it calls qdio_start_irq().
Since adapter interrupts are not completely stoppable qdio implements
a software bit QDIO_QUEUE_IRQS_DISABLED to safely disable interrupts for an
input queue.
The old interface is preserved and will be used as is by zfcp.
Signed-off-by: Jan Glauber <jang@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Frank Blaschka <frank.blaschka@de.ibm.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/cio/qdio.h | 29 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_debug.c | 33 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 138 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_setup.c | 1 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_thinint.c | 66 | ||||
-rw-r--r-- | drivers/s390/scsi/zfcp_qdio.c | 6 |
6 files changed, 217 insertions, 56 deletions
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h index f0037eefd44..0f4ef8769a3 100644 --- a/drivers/s390/cio/qdio.h +++ b/drivers/s390/cio/qdio.h | |||
@@ -208,6 +208,7 @@ struct qdio_dev_perf_stat { | |||
208 | unsigned int eqbs_partial; | 208 | unsigned int eqbs_partial; |
209 | unsigned int sqbs; | 209 | unsigned int sqbs; |
210 | unsigned int sqbs_partial; | 210 | unsigned int sqbs_partial; |
211 | unsigned int int_discarded; | ||
211 | } ____cacheline_aligned; | 212 | } ____cacheline_aligned; |
212 | 213 | ||
213 | struct qdio_queue_perf_stat { | 214 | struct qdio_queue_perf_stat { |
@@ -222,6 +223,10 @@ struct qdio_queue_perf_stat { | |||
222 | unsigned int nr_sbal_total; | 223 | unsigned int nr_sbal_total; |
223 | }; | 224 | }; |
224 | 225 | ||
226 | enum qdio_queue_irq_states { | ||
227 | QDIO_QUEUE_IRQS_DISABLED, | ||
228 | }; | ||
229 | |||
225 | struct qdio_input_q { | 230 | struct qdio_input_q { |
226 | /* input buffer acknowledgement flag */ | 231 | /* input buffer acknowledgement flag */ |
227 | int polling; | 232 | int polling; |
@@ -231,6 +236,10 @@ struct qdio_input_q { | |||
231 | int ack_count; | 236 | int ack_count; |
232 | /* last time of noticing incoming data */ | 237 | /* last time of noticing incoming data */ |
233 | u64 timestamp; | 238 | u64 timestamp; |
239 | /* upper-layer polling flag */ | ||
240 | unsigned long queue_irq_state; | ||
241 | /* callback to start upper-layer polling */ | ||
242 | void (*queue_start_poll) (struct ccw_device *, int, unsigned long); | ||
234 | }; | 243 | }; |
235 | 244 | ||
236 | struct qdio_output_q { | 245 | struct qdio_output_q { |
@@ -399,6 +408,26 @@ static inline int multicast_outbound(struct qdio_q *q) | |||
399 | #define sub_buf(bufnr, dec) \ | 408 | #define sub_buf(bufnr, dec) \ |
400 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) | 409 | ((bufnr - dec) & QDIO_MAX_BUFFERS_MASK) |
401 | 410 | ||
411 | #define queue_irqs_enabled(q) \ | ||
412 | (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) == 0) | ||
413 | #define queue_irqs_disabled(q) \ | ||
414 | (test_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state) != 0) | ||
415 | |||
416 | #define TIQDIO_SHARED_IND 63 | ||
417 | |||
418 | /* device state change indicators */ | ||
419 | struct indicator_t { | ||
420 | u32 ind; /* u32 because of compare-and-swap performance */ | ||
421 | atomic_t count; /* use count, 0 or 1 for non-shared indicators */ | ||
422 | }; | ||
423 | |||
424 | extern struct indicator_t *q_indicators; | ||
425 | |||
426 | static inline int shared_ind(struct qdio_irq *irq_ptr) | ||
427 | { | ||
428 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; | ||
429 | } | ||
430 | |||
402 | /* prototypes for thin interrupt */ | 431 | /* prototypes for thin interrupt */ |
403 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); | 432 | void qdio_setup_thinint(struct qdio_irq *irq_ptr); |
404 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); | 433 | int qdio_establish_thinint(struct qdio_irq *irq_ptr); |
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c index 6ce83f56d53..28868e7471a 100644 --- a/drivers/s390/cio/qdio_debug.c +++ b/drivers/s390/cio/qdio_debug.c | |||
@@ -56,9 +56,16 @@ static int qstat_show(struct seq_file *m, void *v) | |||
56 | 56 | ||
57 | seq_printf(m, "DSCI: %d nr_used: %d\n", | 57 | seq_printf(m, "DSCI: %d nr_used: %d\n", |
58 | *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); | 58 | *(u32 *)q->irq_ptr->dsci, atomic_read(&q->nr_buf_used)); |
59 | seq_printf(m, "ftc: %d last_move: %d\n", q->first_to_check, q->last_move); | 59 | seq_printf(m, "ftc: %d last_move: %d\n", |
60 | seq_printf(m, "polling: %d ack start: %d ack count: %d\n", | 60 | q->first_to_check, q->last_move); |
61 | q->u.in.polling, q->u.in.ack_start, q->u.in.ack_count); | 61 | if (q->is_input_q) { |
62 | seq_printf(m, "polling: %d ack start: %d ack count: %d\n", | ||
63 | q->u.in.polling, q->u.in.ack_start, | ||
64 | q->u.in.ack_count); | ||
65 | seq_printf(m, "IRQs disabled: %u\n", | ||
66 | test_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
67 | &q->u.in.queue_irq_state)); | ||
68 | } | ||
62 | seq_printf(m, "SBAL states:\n"); | 69 | seq_printf(m, "SBAL states:\n"); |
63 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); | 70 | seq_printf(m, "|0 |8 |16 |24 |32 |40 |48 |56 63|\n"); |
64 | 71 | ||
@@ -113,22 +120,6 @@ static int qstat_show(struct seq_file *m, void *v) | |||
113 | return 0; | 120 | return 0; |
114 | } | 121 | } |
115 | 122 | ||
116 | static ssize_t qstat_seq_write(struct file *file, const char __user *buf, | ||
117 | size_t count, loff_t *off) | ||
118 | { | ||
119 | struct seq_file *seq = file->private_data; | ||
120 | struct qdio_q *q = seq->private; | ||
121 | |||
122 | if (!q) | ||
123 | return 0; | ||
124 | if (q->is_input_q) | ||
125 | xchg(q->irq_ptr->dsci, 1); | ||
126 | local_bh_disable(); | ||
127 | tasklet_schedule(&q->tasklet); | ||
128 | local_bh_enable(); | ||
129 | return count; | ||
130 | } | ||
131 | |||
132 | static int qstat_seq_open(struct inode *inode, struct file *filp) | 123 | static int qstat_seq_open(struct inode *inode, struct file *filp) |
133 | { | 124 | { |
134 | return single_open(filp, qstat_show, | 125 | return single_open(filp, qstat_show, |
@@ -139,7 +130,6 @@ static const struct file_operations debugfs_fops = { | |||
139 | .owner = THIS_MODULE, | 130 | .owner = THIS_MODULE, |
140 | .open = qstat_seq_open, | 131 | .open = qstat_seq_open, |
141 | .read = seq_read, | 132 | .read = seq_read, |
142 | .write = qstat_seq_write, | ||
143 | .llseek = seq_lseek, | 133 | .llseek = seq_lseek, |
144 | .release = single_release, | 134 | .release = single_release, |
145 | }; | 135 | }; |
@@ -166,7 +156,8 @@ static char *qperf_names[] = { | |||
166 | "QEBSM eqbs", | 156 | "QEBSM eqbs", |
167 | "QEBSM eqbs partial", | 157 | "QEBSM eqbs partial", |
168 | "QEBSM sqbs", | 158 | "QEBSM sqbs", |
169 | "QEBSM sqbs partial" | 159 | "QEBSM sqbs partial", |
160 | "Discarded interrupts" | ||
170 | }; | 161 | }; |
171 | 162 | ||
172 | static int qperf_show(struct seq_file *m, void *v) | 163 | static int qperf_show(struct seq_file *m, void *v) |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 00520f9a7a8..5fcfa7f9e9e 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -884,8 +884,19 @@ static void qdio_int_handler_pci(struct qdio_irq *irq_ptr) | |||
884 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) | 884 | if (unlikely(irq_ptr->state == QDIO_IRQ_STATE_STOPPED)) |
885 | return; | 885 | return; |
886 | 886 | ||
887 | for_each_input_queue(irq_ptr, q, i) | 887 | for_each_input_queue(irq_ptr, q, i) { |
888 | tasklet_schedule(&q->tasklet); | 888 | if (q->u.in.queue_start_poll) { |
889 | /* skip if polling is enabled or already in work */ | ||
890 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
891 | &q->u.in.queue_irq_state)) { | ||
892 | qperf_inc(q, int_discarded); | ||
893 | continue; | ||
894 | } | ||
895 | q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, | ||
896 | q->irq_ptr->int_parm); | ||
897 | } else | ||
898 | tasklet_schedule(&q->tasklet); | ||
899 | } | ||
889 | 900 | ||
890 | if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) | 901 | if (!(irq_ptr->qib.ac & QIB_AC_OUTBOUND_PCI_SUPPORTED)) |
891 | return; | 902 | return; |
@@ -1519,6 +1530,129 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1519 | } | 1530 | } |
1520 | EXPORT_SYMBOL_GPL(do_QDIO); | 1531 | EXPORT_SYMBOL_GPL(do_QDIO); |
1521 | 1532 | ||
1533 | /** | ||
1534 | * qdio_start_irq - process input buffers | ||
1535 | * @cdev: associated ccw_device for the qdio subchannel | ||
1536 | * @nr: input queue number | ||
1537 | * | ||
1538 | * Return codes | ||
1539 | * 0 - success | ||
1540 | * 1 - irqs not started since new data is available | ||
1541 | */ | ||
1542 | int qdio_start_irq(struct ccw_device *cdev, int nr) | ||
1543 | { | ||
1544 | struct qdio_q *q; | ||
1545 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | ||
1546 | |||
1547 | if (!irq_ptr) | ||
1548 | return -ENODEV; | ||
1549 | q = irq_ptr->input_qs[nr]; | ||
1550 | |||
1551 | WARN_ON(queue_irqs_enabled(q)); | ||
1552 | |||
1553 | if (!shared_ind(q->irq_ptr)) | ||
1554 | xchg(q->irq_ptr->dsci, 0); | ||
1555 | |||
1556 | qdio_stop_polling(q); | ||
1557 | clear_bit(QDIO_QUEUE_IRQS_DISABLED, &q->u.in.queue_irq_state); | ||
1558 | |||
1559 | /* | ||
1560 | * We need to check again to not lose initiative after | ||
1561 | * resetting the ACK state. | ||
1562 | */ | ||
1563 | if (!shared_ind(q->irq_ptr) && *q->irq_ptr->dsci) | ||
1564 | goto rescan; | ||
1565 | if (!qdio_inbound_q_done(q)) | ||
1566 | goto rescan; | ||
1567 | return 0; | ||
1568 | |||
1569 | rescan: | ||
1570 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
1571 | &q->u.in.queue_irq_state)) | ||
1572 | return 0; | ||
1573 | else | ||
1574 | return 1; | ||
1575 | |||
1576 | } | ||
1577 | EXPORT_SYMBOL(qdio_start_irq); | ||
1578 | |||
1579 | /** | ||
1580 | * qdio_get_next_buffers - process input buffers | ||
1581 | * @cdev: associated ccw_device for the qdio subchannel | ||
1582 | * @nr: input queue number | ||
1583 | * @bufnr: first filled buffer number | ||
1584 | * @error: buffers are in error state | ||
1585 | * | ||
1586 | * Return codes | ||
1587 | * < 0 - error | ||
1588 | * = 0 - no new buffers found | ||
1589 | * > 0 - number of processed buffers | ||
1590 | */ | ||
1591 | int qdio_get_next_buffers(struct ccw_device *cdev, int nr, int *bufnr, | ||
1592 | int *error) | ||
1593 | { | ||
1594 | struct qdio_q *q; | ||
1595 | int start, end; | ||
1596 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | ||
1597 | |||
1598 | if (!irq_ptr) | ||
1599 | return -ENODEV; | ||
1600 | q = irq_ptr->input_qs[nr]; | ||
1601 | WARN_ON(queue_irqs_enabled(q)); | ||
1602 | |||
1603 | qdio_sync_after_thinint(q); | ||
1604 | |||
1605 | /* | ||
1606 | * The interrupt could be caused by a PCI request. Check the | ||
1607 | * PCI capable outbound queues. | ||
1608 | */ | ||
1609 | qdio_check_outbound_after_thinint(q); | ||
1610 | |||
1611 | if (!qdio_inbound_q_moved(q)) | ||
1612 | return 0; | ||
1613 | |||
1614 | /* Note: upper-layer MUST stop processing immediately here ... */ | ||
1615 | if (unlikely(q->irq_ptr->state != QDIO_IRQ_STATE_ACTIVE)) | ||
1616 | return -EIO; | ||
1617 | |||
1618 | start = q->first_to_kick; | ||
1619 | end = q->first_to_check; | ||
1620 | *bufnr = start; | ||
1621 | *error = q->qdio_error; | ||
1622 | |||
1623 | /* for the next time */ | ||
1624 | q->first_to_kick = end; | ||
1625 | q->qdio_error = 0; | ||
1626 | return sub_buf(end, start); | ||
1627 | } | ||
1628 | EXPORT_SYMBOL(qdio_get_next_buffers); | ||
1629 | |||
1630 | /** | ||
1631 | * qdio_stop_irq - disable interrupt processing for the device | ||
1632 | * @cdev: associated ccw_device for the qdio subchannel | ||
1633 | * @nr: input queue number | ||
1634 | * | ||
1635 | * Return codes | ||
1636 | * 0 - interrupts were already disabled | ||
1637 | * 1 - interrupts successfully disabled | ||
1638 | */ | ||
1639 | int qdio_stop_irq(struct ccw_device *cdev, int nr) | ||
1640 | { | ||
1641 | struct qdio_q *q; | ||
1642 | struct qdio_irq *irq_ptr = cdev->private->qdio_data; | ||
1643 | |||
1644 | if (!irq_ptr) | ||
1645 | return -ENODEV; | ||
1646 | q = irq_ptr->input_qs[nr]; | ||
1647 | |||
1648 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
1649 | &q->u.in.queue_irq_state)) | ||
1650 | return 0; | ||
1651 | else | ||
1652 | return 1; | ||
1653 | } | ||
1654 | EXPORT_SYMBOL(qdio_stop_irq); | ||
1655 | |||
1522 | static int __init init_QDIO(void) | 1656 | static int __init init_QDIO(void) |
1523 | { | 1657 | { |
1524 | int rc; | 1658 | int rc; |
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c index 34c7e4046df..a13cf7ec64b 100644 --- a/drivers/s390/cio/qdio_setup.c +++ b/drivers/s390/cio/qdio_setup.c | |||
@@ -161,6 +161,7 @@ static void setup_queues(struct qdio_irq *irq_ptr, | |||
161 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); | 161 | setup_queues_misc(q, irq_ptr, qdio_init->input_handler, i); |
162 | 162 | ||
163 | q->is_input_q = 1; | 163 | q->is_input_q = 1; |
164 | q->u.in.queue_start_poll = qdio_init->queue_start_poll; | ||
164 | setup_storage_lists(q, irq_ptr, input_sbal_array, i); | 165 | setup_storage_lists(q, irq_ptr, input_sbal_array, i); |
165 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; | 166 | input_sbal_array += QDIO_MAX_BUFFERS_PER_Q; |
166 | 167 | ||
diff --git a/drivers/s390/cio/qdio_thinint.c b/drivers/s390/cio/qdio_thinint.c index 8daf1b99f15..752dbee06af 100644 --- a/drivers/s390/cio/qdio_thinint.c +++ b/drivers/s390/cio/qdio_thinint.c | |||
@@ -25,24 +25,20 @@ | |||
25 | */ | 25 | */ |
26 | #define TIQDIO_NR_NONSHARED_IND 63 | 26 | #define TIQDIO_NR_NONSHARED_IND 63 |
27 | #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) | 27 | #define TIQDIO_NR_INDICATORS (TIQDIO_NR_NONSHARED_IND + 1) |
28 | #define TIQDIO_SHARED_IND 63 | ||
29 | 28 | ||
30 | /* list of thin interrupt input queues */ | 29 | /* list of thin interrupt input queues */ |
31 | static LIST_HEAD(tiq_list); | 30 | static LIST_HEAD(tiq_list); |
32 | DEFINE_MUTEX(tiq_list_lock); | 31 | DEFINE_MUTEX(tiq_list_lock); |
33 | 32 | ||
34 | /* adapter local summary indicator */ | 33 | /* adapter local summary indicator */ |
35 | static unsigned char *tiqdio_alsi; | 34 | static u8 *tiqdio_alsi; |
36 | 35 | ||
37 | /* device state change indicators */ | 36 | struct indicator_t *q_indicators; |
38 | struct indicator_t { | ||
39 | u32 ind; /* u32 because of compare-and-swap performance */ | ||
40 | atomic_t count; /* use count, 0 or 1 for non-shared indicators */ | ||
41 | }; | ||
42 | static struct indicator_t *q_indicators; | ||
43 | 37 | ||
44 | static int css_qdio_omit_svs; | 38 | static int css_qdio_omit_svs; |
45 | 39 | ||
40 | static u64 last_ai_time; | ||
41 | |||
46 | static inline unsigned long do_clear_global_summary(void) | 42 | static inline unsigned long do_clear_global_summary(void) |
47 | { | 43 | { |
48 | register unsigned long __fn asm("1") = 3; | 44 | register unsigned long __fn asm("1") = 3; |
@@ -116,59 +112,73 @@ void tiqdio_remove_input_queues(struct qdio_irq *irq_ptr) | |||
116 | } | 112 | } |
117 | } | 113 | } |
118 | 114 | ||
119 | static inline int shared_ind(struct qdio_irq *irq_ptr) | 115 | static inline int shared_ind_used(void) |
120 | { | 116 | { |
121 | return irq_ptr->dsci == &q_indicators[TIQDIO_SHARED_IND].ind; | 117 | return atomic_read(&q_indicators[TIQDIO_SHARED_IND].count); |
122 | } | 118 | } |
123 | 119 | ||
124 | /** | 120 | /** |
125 | * tiqdio_thinint_handler - thin interrupt handler for qdio | 121 | * tiqdio_thinint_handler - thin interrupt handler for qdio |
126 | * @ind: pointer to adapter local summary indicator | 122 | * @alsi: pointer to adapter local summary indicator |
127 | * @drv_data: NULL | 123 | * @data: NULL |
128 | */ | 124 | */ |
129 | static void tiqdio_thinint_handler(void *ind, void *drv_data) | 125 | static void tiqdio_thinint_handler(void *alsi, void *data) |
130 | { | 126 | { |
131 | struct qdio_q *q; | 127 | struct qdio_q *q; |
132 | 128 | ||
129 | last_ai_time = S390_lowcore.int_clock; | ||
130 | |||
133 | /* | 131 | /* |
134 | * SVS only when needed: issue SVS to benefit from iqdio interrupt | 132 | * SVS only when needed: issue SVS to benefit from iqdio interrupt |
135 | * avoidance (SVS clears adapter interrupt suppression overwrite) | 133 | * avoidance (SVS clears adapter interrupt suppression overwrite). |
136 | */ | 134 | */ |
137 | if (!css_qdio_omit_svs) | 135 | if (!css_qdio_omit_svs) |
138 | do_clear_global_summary(); | 136 | do_clear_global_summary(); |
139 | 137 | ||
140 | /* | 138 | /* reset local summary indicator */ |
141 | * reset local summary indicator (tiqdio_alsi) to stop adapter | 139 | if (shared_ind_used()) |
142 | * interrupts for now | 140 | xchg(tiqdio_alsi, 0); |
143 | */ | ||
144 | xchg((u8 *)ind, 0); | ||
145 | 141 | ||
146 | /* protect tiq_list entries, only changed in activate or shutdown */ | 142 | /* protect tiq_list entries, only changed in activate or shutdown */ |
147 | rcu_read_lock(); | 143 | rcu_read_lock(); |
148 | 144 | ||
149 | /* check for work on all inbound thinint queues */ | 145 | /* check for work on all inbound thinint queues */ |
150 | list_for_each_entry_rcu(q, &tiq_list, entry) | 146 | list_for_each_entry_rcu(q, &tiq_list, entry) { |
147 | |||
151 | /* only process queues from changed sets */ | 148 | /* only process queues from changed sets */ |
152 | if (*q->irq_ptr->dsci) { | 149 | if (!*q->irq_ptr->dsci) |
153 | qperf_inc(q, adapter_int); | 150 | continue; |
154 | 151 | ||
152 | if (q->u.in.queue_start_poll) { | ||
153 | /* skip if polling is enabled or already in work */ | ||
154 | if (test_and_set_bit(QDIO_QUEUE_IRQS_DISABLED, | ||
155 | &q->u.in.queue_irq_state)) { | ||
156 | qperf_inc(q, int_discarded); | ||
157 | continue; | ||
158 | } | ||
159 | |||
160 | /* avoid dsci clear here, done after processing */ | ||
161 | q->u.in.queue_start_poll(q->irq_ptr->cdev, q->nr, | ||
162 | q->irq_ptr->int_parm); | ||
163 | } else { | ||
155 | /* only clear it if the indicator is non-shared */ | 164 | /* only clear it if the indicator is non-shared */ |
156 | if (!shared_ind(q->irq_ptr)) | 165 | if (!shared_ind(q->irq_ptr)) |
157 | xchg(q->irq_ptr->dsci, 0); | 166 | xchg(q->irq_ptr->dsci, 0); |
158 | /* | 167 | /* |
159 | * don't call inbound processing directly since | 168 | * Call inbound processing but not directly |
160 | * that could starve other thinint queues | 169 | * since that could starve other thinint queues. |
161 | */ | 170 | */ |
162 | tasklet_schedule(&q->tasklet); | 171 | tasklet_schedule(&q->tasklet); |
163 | } | 172 | } |
164 | 173 | qperf_inc(q, adapter_int); | |
174 | } | ||
165 | rcu_read_unlock(); | 175 | rcu_read_unlock(); |
166 | 176 | ||
167 | /* | 177 | /* |
168 | * if we used the shared indicator clear it now after all queues | 178 | * If the shared indicator was used clear it now after all queues |
169 | * were processed | 179 | * were processed. |
170 | */ | 180 | */ |
171 | if (atomic_read(&q_indicators[TIQDIO_SHARED_IND].count)) { | 181 | if (shared_ind_used()) { |
172 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); | 182 | xchg(&q_indicators[TIQDIO_SHARED_IND].ind, 0); |
173 | 183 | ||
174 | /* prevent racing */ | 184 | /* prevent racing */ |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index b2635759721..da54a28a1b8 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -277,16 +277,12 @@ int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req) | |||
277 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, | 277 | static void zfcp_qdio_setup_init_data(struct qdio_initialize *id, |
278 | struct zfcp_qdio *qdio) | 278 | struct zfcp_qdio *qdio) |
279 | { | 279 | { |
280 | 280 | memset(id, 0, sizeof(*id)); | |
281 | id->cdev = qdio->adapter->ccw_device; | 281 | id->cdev = qdio->adapter->ccw_device; |
282 | id->q_format = QDIO_ZFCP_QFMT; | 282 | id->q_format = QDIO_ZFCP_QFMT; |
283 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); | 283 | memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8); |
284 | ASCEBC(id->adapter_name, 8); | 284 | ASCEBC(id->adapter_name, 8); |
285 | id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; | 285 | id->qib_rflags = QIB_RFLAGS_ENABLE_DATA_DIV; |
286 | id->qib_param_field_format = 0; | ||
287 | id->qib_param_field = NULL; | ||
288 | id->input_slib_elements = NULL; | ||
289 | id->output_slib_elements = NULL; | ||
290 | id->no_input_qs = 1; | 286 | id->no_input_qs = 1; |
291 | id->no_output_qs = 1; | 287 | id->no_output_qs = 1; |
292 | id->input_handler = zfcp_qdio_int_resp; | 288 | id->input_handler = zfcp_qdio_int_resp; |