aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c12
-rw-r--r--drivers/s390/block/dcssblk.c2
-rw-r--r--drivers/s390/char/sclp.c12
-rw-r--r--drivers/s390/char/sclp.h6
-rw-r--r--drivers/s390/char/sclp_config.c2
-rw-r--r--drivers/s390/char/sclp_cpi_sys.c2
-rw-r--r--drivers/s390/char/sclp_rw.c4
-rw-r--r--drivers/s390/char/sclp_vt220.c2
-rw-r--r--drivers/s390/cio/device.c15
-rw-r--r--drivers/s390/cio/qdio.c13
-rw-r--r--drivers/s390/cio/qdio.h2
-rw-r--r--drivers/s390/net/claw.c39
12 files changed, 56 insertions, 55 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index d984e0fae630..ccf46c96adb4 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -1149,12 +1149,14 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1149{ 1149{
1150 struct list_head *l, *n; 1150 struct list_head *l, *n;
1151 struct dasd_ccw_req *cqr; 1151 struct dasd_ccw_req *cqr;
1152 struct dasd_block *block;
1152 1153
1153 list_for_each_safe(l, n, final_queue) { 1154 list_for_each_safe(l, n, final_queue) {
1154 cqr = list_entry(l, struct dasd_ccw_req, devlist); 1155 cqr = list_entry(l, struct dasd_ccw_req, devlist);
1155 list_del_init(&cqr->devlist); 1156 list_del_init(&cqr->devlist);
1156 if (cqr->block) 1157 block = cqr->block;
1157 spin_lock_bh(&cqr->block->queue_lock); 1158 if (block)
1159 spin_lock_bh(&block->queue_lock);
1158 switch (cqr->status) { 1160 switch (cqr->status) {
1159 case DASD_CQR_SUCCESS: 1161 case DASD_CQR_SUCCESS:
1160 cqr->status = DASD_CQR_DONE; 1162 cqr->status = DASD_CQR_DONE;
@@ -1172,15 +1174,13 @@ static void __dasd_device_process_final_queue(struct dasd_device *device,
1172 cqr, cqr->status); 1174 cqr, cqr->status);
1173 BUG(); 1175 BUG();
1174 } 1176 }
1175 if (cqr->block)
1176 spin_unlock_bh(&cqr->block->queue_lock);
1177 if (cqr->callback != NULL) 1177 if (cqr->callback != NULL)
1178 (cqr->callback)(cqr, cqr->callback_data); 1178 (cqr->callback)(cqr, cqr->callback_data);
1179 if (block)
1180 spin_unlock_bh(&block->queue_lock);
1179 } 1181 }
1180} 1182}
1181 1183
1182
1183
1184/* 1184/*
1185 * Take a look at the first request on the ccw queue and check 1185 * Take a look at the first request on the ccw queue and check
1186 * if it reached its expire time. If so, terminate the IO. 1186 * if it reached its expire time. If so, terminate the IO.
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 3faf0538b328..e6c94dbfdeaa 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -666,7 +666,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
666 page_addr = (unsigned long) 666 page_addr = (unsigned long)
667 page_address(bvec->bv_page) + bvec->bv_offset; 667 page_address(bvec->bv_page) + bvec->bv_offset;
668 source_addr = dev_info->start + (index<<12) + bytes_done; 668 source_addr = dev_info->start + (index<<12) + bytes_done;
669 if (unlikely(page_addr & 4095) != 0 || (bvec->bv_len & 4095) != 0) 669 if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0)
670 // More paranoia. 670 // More paranoia.
671 goto fail; 671 goto fail;
672 if (bio_data_dir(bio) == READ) { 672 if (bio_data_dir(bio) == READ) {
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c
index 25629b92dec3..2c7a1ee6b041 100644
--- a/drivers/s390/char/sclp.c
+++ b/drivers/s390/char/sclp.c
@@ -29,10 +29,10 @@ static ext_int_info_t ext_int_info_hwc;
29/* Lock to protect internal data consistency. */ 29/* Lock to protect internal data consistency. */
30static DEFINE_SPINLOCK(sclp_lock); 30static DEFINE_SPINLOCK(sclp_lock);
31 31
32/* Mask of events that we can receive from the sclp interface. */ 32/* Mask of events that we can send to the sclp interface. */
33static sccb_mask_t sclp_receive_mask; 33static sccb_mask_t sclp_receive_mask;
34 34
35/* Mask of events that we can send to the sclp interface. */ 35/* Mask of events that we can receive from the sclp interface. */
36static sccb_mask_t sclp_send_mask; 36static sccb_mask_t sclp_send_mask;
37 37
38/* List of registered event listeners and senders. */ 38/* List of registered event listeners and senders. */
@@ -380,7 +380,7 @@ sclp_interrupt_handler(__u16 code)
380 } 380 }
381 sclp_running_state = sclp_running_state_idle; 381 sclp_running_state = sclp_running_state_idle;
382 } 382 }
383 if (evbuf_pending && sclp_receive_mask != 0 && 383 if (evbuf_pending &&
384 sclp_activation_state == sclp_activation_state_active) 384 sclp_activation_state == sclp_activation_state_active)
385 __sclp_queue_read_req(); 385 __sclp_queue_read_req();
386 spin_unlock(&sclp_lock); 386 spin_unlock(&sclp_lock);
@@ -459,8 +459,8 @@ sclp_dispatch_state_change(void)
459 reg = NULL; 459 reg = NULL;
460 list_for_each(l, &sclp_reg_list) { 460 list_for_each(l, &sclp_reg_list) {
461 reg = list_entry(l, struct sclp_register, list); 461 reg = list_entry(l, struct sclp_register, list);
462 receive_mask = reg->receive_mask & sclp_receive_mask; 462 receive_mask = reg->send_mask & sclp_receive_mask;
463 send_mask = reg->send_mask & sclp_send_mask; 463 send_mask = reg->receive_mask & sclp_send_mask;
464 if (reg->sclp_receive_mask != receive_mask || 464 if (reg->sclp_receive_mask != receive_mask ||
465 reg->sclp_send_mask != send_mask) { 465 reg->sclp_send_mask != send_mask) {
466 reg->sclp_receive_mask = receive_mask; 466 reg->sclp_receive_mask = receive_mask;
@@ -615,8 +615,8 @@ struct init_sccb {
615 u16 mask_length; 615 u16 mask_length;
616 sccb_mask_t receive_mask; 616 sccb_mask_t receive_mask;
617 sccb_mask_t send_mask; 617 sccb_mask_t send_mask;
618 sccb_mask_t sclp_send_mask;
619 sccb_mask_t sclp_receive_mask; 618 sccb_mask_t sclp_receive_mask;
619 sccb_mask_t sclp_send_mask;
620} __attribute__((packed)); 620} __attribute__((packed));
621 621
622/* Prepare init mask request. Called while sclp_lock is locked. */ 622/* Prepare init mask request. Called while sclp_lock is locked. */
diff --git a/drivers/s390/char/sclp.h b/drivers/s390/char/sclp.h
index aa8186d18aee..bac80e856f97 100644
--- a/drivers/s390/char/sclp.h
+++ b/drivers/s390/char/sclp.h
@@ -122,11 +122,13 @@ struct sclp_req {
122/* of some routines it wants to be called from the low level driver */ 122/* of some routines it wants to be called from the low level driver */
123struct sclp_register { 123struct sclp_register {
124 struct list_head list; 124 struct list_head list;
125 /* event masks this user is registered for */ 125 /* User wants to receive: */
126 sccb_mask_t receive_mask; 126 sccb_mask_t receive_mask;
127 /* User wants to send: */
127 sccb_mask_t send_mask; 128 sccb_mask_t send_mask;
128 /* actually present events */ 129 /* H/W can receive: */
129 sccb_mask_t sclp_receive_mask; 130 sccb_mask_t sclp_receive_mask;
131 /* H/W can send: */
130 sccb_mask_t sclp_send_mask; 132 sccb_mask_t sclp_send_mask;
131 /* called if event type availability changes */ 133 /* called if event type availability changes */
132 void (*state_change_fn)(struct sclp_register *); 134 void (*state_change_fn)(struct sclp_register *);
diff --git a/drivers/s390/char/sclp_config.c b/drivers/s390/char/sclp_config.c
index 9dc77f14fa52..b8f35bc52b7b 100644
--- a/drivers/s390/char/sclp_config.c
+++ b/drivers/s390/char/sclp_config.c
@@ -64,7 +64,7 @@ static int __init sclp_conf_init(void)
64 return rc; 64 return rc;
65 } 65 }
66 66
67 if (!(sclp_conf_register.sclp_receive_mask & EVTYP_CONFMGMDATA_MASK)) { 67 if (!(sclp_conf_register.sclp_send_mask & EVTYP_CONFMGMDATA_MASK)) {
68 printk(KERN_WARNING TAG "no configuration management.\n"); 68 printk(KERN_WARNING TAG "no configuration management.\n");
69 sclp_unregister(&sclp_conf_register); 69 sclp_unregister(&sclp_conf_register);
70 rc = -ENOSYS; 70 rc = -ENOSYS;
diff --git a/drivers/s390/char/sclp_cpi_sys.c b/drivers/s390/char/sclp_cpi_sys.c
index 41617032afdc..9f37456222e9 100644
--- a/drivers/s390/char/sclp_cpi_sys.c
+++ b/drivers/s390/char/sclp_cpi_sys.c
@@ -129,7 +129,7 @@ static int cpi_req(void)
129 "to hardware console.\n"); 129 "to hardware console.\n");
130 goto out; 130 goto out;
131 } 131 }
132 if (!(sclp_cpi_event.sclp_send_mask & EVTYP_CTLPROGIDENT_MASK)) { 132 if (!(sclp_cpi_event.sclp_receive_mask & EVTYP_CTLPROGIDENT_MASK)) {
133 printk(KERN_WARNING "cpi: no control program " 133 printk(KERN_WARNING "cpi: no control program "
134 "identification support\n"); 134 "identification support\n");
135 rc = -EOPNOTSUPP; 135 rc = -EOPNOTSUPP;
diff --git a/drivers/s390/char/sclp_rw.c b/drivers/s390/char/sclp_rw.c
index ad7195d3de0c..da09781b32f7 100644
--- a/drivers/s390/char/sclp_rw.c
+++ b/drivers/s390/char/sclp_rw.c
@@ -452,10 +452,10 @@ sclp_emit_buffer(struct sclp_buffer *buffer,
452 return -EIO; 452 return -EIO;
453 453
454 sccb = buffer->sccb; 454 sccb = buffer->sccb;
455 if (sclp_rw_event.sclp_send_mask & EVTYP_MSG_MASK) 455 if (sclp_rw_event.sclp_receive_mask & EVTYP_MSG_MASK)
456 /* Use normal write message */ 456 /* Use normal write message */
457 sccb->msg_buf.header.type = EVTYP_MSG; 457 sccb->msg_buf.header.type = EVTYP_MSG;
458 else if (sclp_rw_event.sclp_send_mask & EVTYP_PMSGCMD_MASK) 458 else if (sclp_rw_event.sclp_receive_mask & EVTYP_PMSGCMD_MASK)
459 /* Use write priority message */ 459 /* Use write priority message */
460 sccb->msg_buf.header.type = EVTYP_PMSGCMD; 460 sccb->msg_buf.header.type = EVTYP_PMSGCMD;
461 else 461 else
diff --git a/drivers/s390/char/sclp_vt220.c b/drivers/s390/char/sclp_vt220.c
index f47f4a768be5..92f527201792 100644
--- a/drivers/s390/char/sclp_vt220.c
+++ b/drivers/s390/char/sclp_vt220.c
@@ -202,7 +202,7 @@ sclp_vt220_callback(struct sclp_req *request, void *data)
202static int 202static int
203__sclp_vt220_emit(struct sclp_vt220_request *request) 203__sclp_vt220_emit(struct sclp_vt220_request *request)
204{ 204{
205 if (!(sclp_vt220_register.sclp_send_mask & EVTYP_VT220MSG_MASK)) { 205 if (!(sclp_vt220_register.sclp_receive_mask & EVTYP_VT220MSG_MASK)) {
206 request->sclp_req.status = SCLP_REQ_FAILED; 206 request->sclp_req.status = SCLP_REQ_FAILED;
207 return -EIO; 207 return -EIO;
208 } 208 }
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index d35dc3f25d06..fec004f62bcf 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -32,7 +32,7 @@
32#include "io_sch.h" 32#include "io_sch.h"
33 33
34static struct timer_list recovery_timer; 34static struct timer_list recovery_timer;
35static spinlock_t recovery_lock; 35static DEFINE_SPINLOCK(recovery_lock);
36static int recovery_phase; 36static int recovery_phase;
37static const unsigned long recovery_delay[] = { 3, 30, 300 }; 37static const unsigned long recovery_delay[] = { 3, 30, 300 };
38 38
@@ -1535,7 +1535,7 @@ static int recovery_check(struct device *dev, void *data)
1535 return 0; 1535 return 0;
1536} 1536}
1537 1537
1538static void recovery_func(unsigned long data) 1538static void recovery_work_func(struct work_struct *unused)
1539{ 1539{
1540 int redo = 0; 1540 int redo = 0;
1541 1541
@@ -1553,6 +1553,17 @@ static void recovery_func(unsigned long data)
1553 CIO_MSG_EVENT(2, "recovery: end\n"); 1553 CIO_MSG_EVENT(2, "recovery: end\n");
1554} 1554}
1555 1555
1556static DECLARE_WORK(recovery_work, recovery_work_func);
1557
1558static void recovery_func(unsigned long data)
1559{
1560 /*
1561 * We can't do our recovery in softirq context and it's not
1562 * performance critical, so we schedule it.
1563 */
1564 schedule_work(&recovery_work);
1565}
1566
1556void ccw_device_schedule_recovery(void) 1567void ccw_device_schedule_recovery(void)
1557{ 1568{
1558 unsigned long flags; 1569 unsigned long flags;
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c
index 097fc0967e9d..2b5bfb7c69e5 100644
--- a/drivers/s390/cio/qdio.c
+++ b/drivers/s390/cio/qdio.c
@@ -32,7 +32,7 @@
32 32
33#include <linux/module.h> 33#include <linux/module.h>
34#include <linux/init.h> 34#include <linux/init.h>
35 35#include <linux/delay.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/kernel.h> 37#include <linux/kernel.h>
38#include <linux/proc_fs.h> 38#include <linux/proc_fs.h>
@@ -1215,9 +1215,6 @@ tiqdio_is_inbound_q_done(struct qdio_q *q)
1215 1215
1216 if (!no_used) 1216 if (!no_used)
1217 return 1; 1217 return 1;
1218 if (!q->siga_sync && !irq->is_qebsm)
1219 /* we'll check for more primed buffers in qeth_stop_polling */
1220 return 0;
1221 if (irq->is_qebsm) { 1218 if (irq->is_qebsm) {
1222 count = 1; 1219 count = 1;
1223 start_buf = q->first_to_check; 1220 start_buf = q->first_to_check;
@@ -3332,13 +3329,7 @@ qdio_activate(struct ccw_device *cdev, int flags)
3332 } 3329 }
3333 } 3330 }
3334 3331
3335 wait_event_interruptible_timeout(cdev->private->wait_q, 3332 msleep(QDIO_ACTIVATE_TIMEOUT);
3336 ((irq_ptr->state ==
3337 QDIO_IRQ_STATE_STOPPED) ||
3338 (irq_ptr->state ==
3339 QDIO_IRQ_STATE_ERR)),
3340 QDIO_ACTIVATE_TIMEOUT);
3341
3342 switch (irq_ptr->state) { 3333 switch (irq_ptr->state) {
3343 case QDIO_IRQ_STATE_STOPPED: 3334 case QDIO_IRQ_STATE_STOPPED:
3344 case QDIO_IRQ_STATE_ERR: 3335 case QDIO_IRQ_STATE_ERR:
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 37870e4e938e..da8a272fd75b 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -57,10 +57,10 @@
57 of the queue to 0 */ 57 of the queue to 0 */
58 58
59#define QDIO_ESTABLISH_TIMEOUT (1*HZ) 59#define QDIO_ESTABLISH_TIMEOUT (1*HZ)
60#define QDIO_ACTIVATE_TIMEOUT (5*HZ)
61#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ) 60#define QDIO_CLEANUP_CLEAR_TIMEOUT (20*HZ)
62#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ) 61#define QDIO_CLEANUP_HALT_TIMEOUT (10*HZ)
63#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ) 62#define QDIO_FORCE_CHECK_TIMEOUT (10*HZ)
63#define QDIO_ACTIVATE_TIMEOUT (5) /* 5 ms */
64 64
65enum qdio_irq_states { 65enum qdio_irq_states {
66 QDIO_IRQ_STATE_INACTIVE, 66 QDIO_IRQ_STATE_INACTIVE,
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c3076217871e..d8a5c229c5a7 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -1851,8 +1851,7 @@ claw_hw_tx(struct sk_buff *skb, struct net_device *dev, long linkid)
1851 } 1851 }
1852 } 1852 }
1853 /* See how many write buffers are required to hold this data */ 1853 /* See how many write buffers are required to hold this data */
1854 numBuffers= ( skb->len + privptr->p_env->write_size - 1) / 1854 numBuffers = DIV_ROUND_UP(skb->len, privptr->p_env->write_size);
1855 ( privptr->p_env->write_size);
1856 1855
1857 /* If that number of buffers isn't available, give up for now */ 1856 /* If that number of buffers isn't available, give up for now */
1858 if (privptr->write_free_count < numBuffers || 1857 if (privptr->write_free_count < numBuffers ||
@@ -2114,8 +2113,7 @@ init_ccw_bk(struct net_device *dev)
2114 */ 2113 */
2115 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE; 2114 ccw_blocks_perpage= PAGE_SIZE / CCWBK_SIZE;
2116 ccw_pages_required= 2115 ccw_pages_required=
2117 (ccw_blocks_required+ccw_blocks_perpage -1) / 2116 DIV_ROUND_UP(ccw_blocks_required, ccw_blocks_perpage);
2118 ccw_blocks_perpage;
2119 2117
2120#ifdef DEBUGMSG 2118#ifdef DEBUGMSG
2121 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n", 2119 printk(KERN_INFO "%s: %s() > ccw_blocks_perpage=%d\n",
@@ -2131,30 +2129,29 @@ init_ccw_bk(struct net_device *dev)
2131 * provide good performance. With packing buffers support 32k 2129 * provide good performance. With packing buffers support 32k
2132 * buffers are used. 2130 * buffers are used.
2133 */ 2131 */
2134 if (privptr->p_env->read_size < PAGE_SIZE) { 2132 if (privptr->p_env->read_size < PAGE_SIZE) {
2135 claw_reads_perpage= PAGE_SIZE / privptr->p_env->read_size; 2133 claw_reads_perpage = PAGE_SIZE / privptr->p_env->read_size;
2136 claw_read_pages= (privptr->p_env->read_buffers + 2134 claw_read_pages = DIV_ROUND_UP(privptr->p_env->read_buffers,
2137 claw_reads_perpage -1) / claw_reads_perpage; 2135 claw_reads_perpage);
2138 } 2136 }
2139 else { /* > or equal */ 2137 else { /* > or equal */
2140 privptr->p_buff_pages_perread= 2138 privptr->p_buff_pages_perread =
2141 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; 2139 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
2142 claw_read_pages= 2140 claw_read_pages = privptr->p_env->read_buffers *
2143 privptr->p_env->read_buffers * privptr->p_buff_pages_perread; 2141 privptr->p_buff_pages_perread;
2144 } 2142 }
2145 if (privptr->p_env->write_size < PAGE_SIZE) { 2143 if (privptr->p_env->write_size < PAGE_SIZE) {
2146 claw_writes_perpage= 2144 claw_writes_perpage =
2147 PAGE_SIZE / privptr->p_env->write_size; 2145 PAGE_SIZE / privptr->p_env->write_size;
2148 claw_write_pages= 2146 claw_write_pages = DIV_ROUND_UP(privptr->p_env->write_buffers,
2149 (privptr->p_env->write_buffers + claw_writes_perpage -1) / 2147 claw_writes_perpage);
2150 claw_writes_perpage;
2151 2148
2152 } 2149 }
2153 else { /* > or equal */ 2150 else { /* > or equal */
2154 privptr->p_buff_pages_perwrite= 2151 privptr->p_buff_pages_perwrite =
2155 (privptr->p_env->read_size + PAGE_SIZE - 1) / PAGE_SIZE; 2152 DIV_ROUND_UP(privptr->p_env->read_size, PAGE_SIZE);
2156 claw_write_pages= 2153 claw_write_pages = privptr->p_env->write_buffers *
2157 privptr->p_env->write_buffers * privptr->p_buff_pages_perwrite; 2154 privptr->p_buff_pages_perwrite;
2158 } 2155 }
2159#ifdef DEBUGMSG 2156#ifdef DEBUGMSG
2160 if (privptr->p_env->read_size < PAGE_SIZE) { 2157 if (privptr->p_env->read_size < PAGE_SIZE) {