aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390')
-rw-r--r--drivers/s390/block/dasd.c194
-rw-r--r--drivers/s390/block/dasd_devmap.c86
-rw-r--r--drivers/s390/block/dasd_eckd.c22
-rw-r--r--drivers/s390/block/dasd_genhd.c10
-rw-r--r--drivers/s390/block/xpram.c25
-rw-r--r--drivers/s390/char/tape_class.c2
-rw-r--r--drivers/s390/cio/ccwgroup.c24
-rw-r--r--drivers/s390/cio/chsc.c10
-rw-r--r--drivers/s390/cio/device.c19
-rw-r--r--drivers/s390/cio/device_fsm.c24
-rw-r--r--drivers/s390/cio/device_ops.c3
-rw-r--r--drivers/s390/cio/device_pgid.c27
-rw-r--r--drivers/s390/net/qeth_main.c8
-rw-r--r--drivers/s390/scsi/zfcp_aux.c120
-rw-r--r--drivers/s390/scsi/zfcp_ccw.c5
-rw-r--r--drivers/s390/scsi/zfcp_def.h15
-rw-r--r--drivers/s390/scsi/zfcp_erp.c212
-rw-r--r--drivers/s390/scsi/zfcp_ext.h9
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c122
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c79
-rw-r--r--drivers/s390/scsi/zfcp_scsi.c73
21 files changed, 589 insertions, 500 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 4bf03fb67f8d..25c1ef6dfd44 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -52,7 +52,7 @@ static void dasd_setup_queue(struct dasd_device * device);
52static void dasd_free_queue(struct dasd_device * device); 52static void dasd_free_queue(struct dasd_device * device);
53static void dasd_flush_request_queue(struct dasd_device *); 53static void dasd_flush_request_queue(struct dasd_device *);
54static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); 54static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *);
55static void dasd_flush_ccw_queue(struct dasd_device *, int); 55static int dasd_flush_ccw_queue(struct dasd_device *, int);
56static void dasd_tasklet(struct dasd_device *); 56static void dasd_tasklet(struct dasd_device *);
57static void do_kick_device(void *data); 57static void do_kick_device(void *data);
58 58
@@ -60,6 +60,7 @@ static void do_kick_device(void *data);
60 * SECTION: Operations on the device structure. 60 * SECTION: Operations on the device structure.
61 */ 61 */
62static wait_queue_head_t dasd_init_waitq; 62static wait_queue_head_t dasd_init_waitq;
63static wait_queue_head_t dasd_flush_wq;
63 64
64/* 65/*
65 * Allocate memory for a new device structure. 66 * Allocate memory for a new device structure.
@@ -121,7 +122,7 @@ dasd_free_device(struct dasd_device *device)
121/* 122/*
122 * Make a new device known to the system. 123 * Make a new device known to the system.
123 */ 124 */
124static inline int 125static int
125dasd_state_new_to_known(struct dasd_device *device) 126dasd_state_new_to_known(struct dasd_device *device)
126{ 127{
127 int rc; 128 int rc;
@@ -145,7 +146,7 @@ dasd_state_new_to_known(struct dasd_device *device)
145/* 146/*
146 * Let the system forget about a device. 147 * Let the system forget about a device.
147 */ 148 */
148static inline void 149static int
149dasd_state_known_to_new(struct dasd_device * device) 150dasd_state_known_to_new(struct dasd_device * device)
150{ 151{
151 /* Disable extended error reporting for this device. */ 152 /* Disable extended error reporting for this device. */
@@ -163,12 +164,13 @@ dasd_state_known_to_new(struct dasd_device * device)
163 164
164 /* Give up reference we took in dasd_state_new_to_known. */ 165 /* Give up reference we took in dasd_state_new_to_known. */
165 dasd_put_device(device); 166 dasd_put_device(device);
167 return 0;
166} 168}
167 169
168/* 170/*
169 * Request the irq line for the device. 171 * Request the irq line for the device.
170 */ 172 */
171static inline int 173static int
172dasd_state_known_to_basic(struct dasd_device * device) 174dasd_state_known_to_basic(struct dasd_device * device)
173{ 175{
174 int rc; 176 int rc;
@@ -192,17 +194,23 @@ dasd_state_known_to_basic(struct dasd_device * device)
192/* 194/*
193 * Release the irq line for the device. Terminate any running i/o. 195 * Release the irq line for the device. Terminate any running i/o.
194 */ 196 */
195static inline void 197static int
196dasd_state_basic_to_known(struct dasd_device * device) 198dasd_state_basic_to_known(struct dasd_device * device)
197{ 199{
200 int rc;
201
198 dasd_gendisk_free(device); 202 dasd_gendisk_free(device);
199 dasd_flush_ccw_queue(device, 1); 203 rc = dasd_flush_ccw_queue(device, 1);
204 if (rc)
205 return rc;
206
200 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); 207 DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device);
201 if (device->debug_area != NULL) { 208 if (device->debug_area != NULL) {
202 debug_unregister(device->debug_area); 209 debug_unregister(device->debug_area);
203 device->debug_area = NULL; 210 device->debug_area = NULL;
204 } 211 }
205 device->state = DASD_STATE_KNOWN; 212 device->state = DASD_STATE_KNOWN;
213 return 0;
206} 214}
207 215
208/* 216/*
@@ -219,7 +227,7 @@ dasd_state_basic_to_known(struct dasd_device * device)
219 * In case the analysis returns an error, the device setup is stopped 227 * In case the analysis returns an error, the device setup is stopped
220 * (a fake disk was already added to allow formatting). 228 * (a fake disk was already added to allow formatting).
221 */ 229 */
222static inline int 230static int
223dasd_state_basic_to_ready(struct dasd_device * device) 231dasd_state_basic_to_ready(struct dasd_device * device)
224{ 232{
225 int rc; 233 int rc;
@@ -247,25 +255,31 @@ dasd_state_basic_to_ready(struct dasd_device * device)
247 * Forget format information. Check if the target level is basic 255 * Forget format information. Check if the target level is basic
248 * and if it is create fake disk for formatting. 256 * and if it is create fake disk for formatting.
249 */ 257 */
250static inline void 258static int
251dasd_state_ready_to_basic(struct dasd_device * device) 259dasd_state_ready_to_basic(struct dasd_device * device)
252{ 260{
253 dasd_flush_ccw_queue(device, 0); 261 int rc;
262
263 rc = dasd_flush_ccw_queue(device, 0);
264 if (rc)
265 return rc;
254 dasd_destroy_partitions(device); 266 dasd_destroy_partitions(device);
255 dasd_flush_request_queue(device); 267 dasd_flush_request_queue(device);
256 device->blocks = 0; 268 device->blocks = 0;
257 device->bp_block = 0; 269 device->bp_block = 0;
258 device->s2b_shift = 0; 270 device->s2b_shift = 0;
259 device->state = DASD_STATE_BASIC; 271 device->state = DASD_STATE_BASIC;
272 return 0;
260} 273}
261 274
262/* 275/*
263 * Back to basic. 276 * Back to basic.
264 */ 277 */
265static inline void 278static int
266dasd_state_unfmt_to_basic(struct dasd_device * device) 279dasd_state_unfmt_to_basic(struct dasd_device * device)
267{ 280{
268 device->state = DASD_STATE_BASIC; 281 device->state = DASD_STATE_BASIC;
282 return 0;
269} 283}
270 284
271/* 285/*
@@ -273,7 +287,7 @@ dasd_state_unfmt_to_basic(struct dasd_device * device)
273 * the requeueing of requests from the linux request queue to the 287 * the requeueing of requests from the linux request queue to the
274 * ccw queue. 288 * ccw queue.
275 */ 289 */
276static inline int 290static int
277dasd_state_ready_to_online(struct dasd_device * device) 291dasd_state_ready_to_online(struct dasd_device * device)
278{ 292{
279 device->state = DASD_STATE_ONLINE; 293 device->state = DASD_STATE_ONLINE;
@@ -284,16 +298,17 @@ dasd_state_ready_to_online(struct dasd_device * device)
284/* 298/*
285 * Stop the requeueing of requests again. 299 * Stop the requeueing of requests again.
286 */ 300 */
287static inline void 301static int
288dasd_state_online_to_ready(struct dasd_device * device) 302dasd_state_online_to_ready(struct dasd_device * device)
289{ 303{
290 device->state = DASD_STATE_READY; 304 device->state = DASD_STATE_READY;
305 return 0;
291} 306}
292 307
293/* 308/*
294 * Device startup state changes. 309 * Device startup state changes.
295 */ 310 */
296static inline int 311static int
297dasd_increase_state(struct dasd_device *device) 312dasd_increase_state(struct dasd_device *device)
298{ 313{
299 int rc; 314 int rc;
@@ -329,30 +344,37 @@ dasd_increase_state(struct dasd_device *device)
329/* 344/*
330 * Device shutdown state changes. 345 * Device shutdown state changes.
331 */ 346 */
332static inline int 347static int
333dasd_decrease_state(struct dasd_device *device) 348dasd_decrease_state(struct dasd_device *device)
334{ 349{
350 int rc;
351
352 rc = 0;
335 if (device->state == DASD_STATE_ONLINE && 353 if (device->state == DASD_STATE_ONLINE &&
336 device->target <= DASD_STATE_READY) 354 device->target <= DASD_STATE_READY)
337 dasd_state_online_to_ready(device); 355 rc = dasd_state_online_to_ready(device);
338 356
339 if (device->state == DASD_STATE_READY && 357 if (!rc &&
358 device->state == DASD_STATE_READY &&
340 device->target <= DASD_STATE_BASIC) 359 device->target <= DASD_STATE_BASIC)
341 dasd_state_ready_to_basic(device); 360 rc = dasd_state_ready_to_basic(device);
342 361
343 if (device->state == DASD_STATE_UNFMT && 362 if (!rc &&
363 device->state == DASD_STATE_UNFMT &&
344 device->target <= DASD_STATE_BASIC) 364 device->target <= DASD_STATE_BASIC)
345 dasd_state_unfmt_to_basic(device); 365 rc = dasd_state_unfmt_to_basic(device);
346 366
347 if (device->state == DASD_STATE_BASIC && 367 if (!rc &&
368 device->state == DASD_STATE_BASIC &&
348 device->target <= DASD_STATE_KNOWN) 369 device->target <= DASD_STATE_KNOWN)
349 dasd_state_basic_to_known(device); 370 rc = dasd_state_basic_to_known(device);
350 371
351 if (device->state == DASD_STATE_KNOWN && 372 if (!rc &&
373 device->state == DASD_STATE_KNOWN &&
352 device->target <= DASD_STATE_NEW) 374 device->target <= DASD_STATE_NEW)
353 dasd_state_known_to_new(device); 375 rc = dasd_state_known_to_new(device);
354 376
355 return 0; 377 return rc;
356} 378}
357 379
358/* 380/*
@@ -701,6 +723,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr)
701 cqr->retries--; 723 cqr->retries--;
702 cqr->status = DASD_CQR_CLEAR; 724 cqr->status = DASD_CQR_CLEAR;
703 cqr->stopclk = get_clock(); 725 cqr->stopclk = get_clock();
726 cqr->starttime = 0;
704 DBF_DEV_EVENT(DBF_DEBUG, device, 727 DBF_DEV_EVENT(DBF_DEBUG, device,
705 "terminate cqr %p successful", 728 "terminate cqr %p successful",
706 cqr); 729 cqr);
@@ -978,6 +1001,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm,
978 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { 1001 irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) {
979 cqr->status = DASD_CQR_QUEUED; 1002 cqr->status = DASD_CQR_QUEUED;
980 dasd_clear_timer(device); 1003 dasd_clear_timer(device);
1004 wake_up(&dasd_flush_wq);
981 dasd_schedule_bh(device); 1005 dasd_schedule_bh(device);
982 return; 1006 return;
983 } 1007 }
@@ -1241,6 +1265,10 @@ __dasd_check_expire(struct dasd_device * device)
1241 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1265 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1242 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) { 1266 if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) {
1243 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) { 1267 if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) {
1268 DEV_MESSAGE(KERN_ERR, device,
1269 "internal error - timeout (%is) expired "
1270 "for cqr %p (%i retries left)",
1271 (cqr->expires/HZ), cqr, cqr->retries);
1244 if (device->discipline->term_IO(cqr) != 0) 1272 if (device->discipline->term_IO(cqr) != 0)
1245 /* Hmpf, try again in 1/10 sec */ 1273 /* Hmpf, try again in 1/10 sec */
1246 dasd_set_timer(device, 10); 1274 dasd_set_timer(device, 10);
@@ -1285,46 +1313,100 @@ __dasd_start_head(struct dasd_device * device)
1285 dasd_set_timer(device, 50); 1313 dasd_set_timer(device, 50);
1286} 1314}
1287 1315
1316static inline int
1317_wait_for_clear(struct dasd_ccw_req *cqr)
1318{
1319 return (cqr->status == DASD_CQR_QUEUED);
1320}
1321
1288/* 1322/*
1289 * Remove requests from the ccw queue. 1323 * Remove all requests from the ccw queue (all = '1') or only block device
1324 * requests in case all = '0'.
1325 * Take care of the erp-chain (chained via cqr->refers) and remove either
1326 * the whole erp-chain or none of the erp-requests.
1327 * If a request is currently running, term_IO is called and the request
1328 * is re-queued. Prior to removing the terminated request we need to wait
1329 * for the clear-interrupt.
1330 * In case termination is not possible we stop processing and just finishing
1331 * the already moved requests.
1290 */ 1332 */
1291static void 1333static int
1292dasd_flush_ccw_queue(struct dasd_device * device, int all) 1334dasd_flush_ccw_queue(struct dasd_device * device, int all)
1293{ 1335{
1336 struct dasd_ccw_req *cqr, *orig, *n;
1337 int rc, i;
1338
1294 struct list_head flush_queue; 1339 struct list_head flush_queue;
1295 struct list_head *l, *n;
1296 struct dasd_ccw_req *cqr;
1297 1340
1298 INIT_LIST_HEAD(&flush_queue); 1341 INIT_LIST_HEAD(&flush_queue);
1299 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1342 spin_lock_irq(get_ccwdev_lock(device->cdev));
1300 list_for_each_safe(l, n, &device->ccw_queue) { 1343 rc = 0;
1301 cqr = list_entry(l, struct dasd_ccw_req, list); 1344restart:
1345 list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) {
1346 /* get original request of erp request-chain */
1347 for (orig = cqr; orig->refers != NULL; orig = orig->refers);
1348
1302 /* Flush all request or only block device requests? */ 1349 /* Flush all request or only block device requests? */
1303 if (all == 0 && cqr->callback == dasd_end_request_cb) 1350 if (all == 0 && cqr->callback != dasd_end_request_cb &&
1351 orig->callback != dasd_end_request_cb) {
1304 continue; 1352 continue;
1305 if (cqr->status == DASD_CQR_IN_IO) 1353 }
1306 device->discipline->term_IO(cqr); 1354 /* Check status and move request to flush_queue */
1307 if (cqr->status != DASD_CQR_DONE || 1355 switch (cqr->status) {
1308 cqr->status != DASD_CQR_FAILED) { 1356 case DASD_CQR_IN_IO:
1309 cqr->status = DASD_CQR_FAILED; 1357 rc = device->discipline->term_IO(cqr);
1358 if (rc) {
1359 /* unable to terminate requeust */
1360 DEV_MESSAGE(KERN_ERR, device,
1361 "dasd flush ccw_queue is unable "
1362 " to terminate request %p",
1363 cqr);
1364 /* stop flush processing */
1365 goto finished;
1366 }
1367 break;
1368 case DASD_CQR_QUEUED:
1369 case DASD_CQR_ERROR:
1370 /* set request to FAILED */
1310 cqr->stopclk = get_clock(); 1371 cqr->stopclk = get_clock();
1372 cqr->status = DASD_CQR_FAILED;
1373 break;
1374 default: /* do not touch the others */
1375 break;
1376 }
1377 /* Rechain request (including erp chain) */
1378 for (i = 0; cqr != NULL; cqr = cqr->refers, i++) {
1379 cqr->endclk = get_clock();
1380 list_move_tail(&cqr->list, &flush_queue);
1381 }
1382 if (i > 1)
1383 /* moved more than one request - need to restart */
1384 goto restart;
1385 }
1386
1387finished:
1388 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1389 /* Now call the callback function of flushed requests */
1390restart_cb:
1391 list_for_each_entry_safe(cqr, n, &flush_queue, list) {
1392 if (cqr->status == DASD_CQR_CLEAR) {
1393 /* wait for clear interrupt! */
1394 wait_event(dasd_flush_wq, _wait_for_clear(cqr));
1395 cqr->status = DASD_CQR_FAILED;
1311 } 1396 }
1312 /* Process finished ERP request. */ 1397 /* Process finished ERP request. */
1313 if (cqr->refers) { 1398 if (cqr->refers) {
1314 __dasd_process_erp(device, cqr); 1399 __dasd_process_erp(device, cqr);
1315 continue; 1400 /* restart list_for_xx loop since dasd_process_erp
1401 * might remove multiple elements */
1402 goto restart_cb;
1316 } 1403 }
1317 /* Rechain request on device request queue */ 1404 /* call the callback function */
1318 cqr->endclk = get_clock(); 1405 cqr->endclk = get_clock();
1319 list_move_tail(&cqr->list, &flush_queue);
1320 }
1321 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1322 /* Now call the callback function of flushed requests */
1323 list_for_each_safe(l, n, &flush_queue) {
1324 cqr = list_entry(l, struct dasd_ccw_req, list);
1325 if (cqr->callback != NULL) 1406 if (cqr->callback != NULL)
1326 (cqr->callback)(cqr, cqr->callback_data); 1407 (cqr->callback)(cqr, cqr->callback_data);
1327 } 1408 }
1409 return rc;
1328} 1410}
1329 1411
1330/* 1412/*
@@ -1510,10 +1592,8 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr)
1510 if (device->discipline->term_IO) { 1592 if (device->discipline->term_IO) {
1511 cqr->retries = -1; 1593 cqr->retries = -1;
1512 device->discipline->term_IO(cqr); 1594 device->discipline->term_IO(cqr);
1513 /*nished = 1595 /* wait (non-interruptible) for final status
1514 * wait (non-interruptible) for final status 1596 * because signal ist still pending */
1515 * because signal ist still pending
1516 */
1517 spin_unlock_irq(get_ccwdev_lock(device->cdev)); 1597 spin_unlock_irq(get_ccwdev_lock(device->cdev));
1518 wait_event(wait_q, _wait_for_wakeup(cqr)); 1598 wait_event(wait_q, _wait_for_wakeup(cqr));
1519 spin_lock_irq(get_ccwdev_lock(device->cdev)); 1599 spin_lock_irq(get_ccwdev_lock(device->cdev));
@@ -1546,19 +1626,11 @@ static inline int
1546_dasd_term_running_cqr(struct dasd_device *device) 1626_dasd_term_running_cqr(struct dasd_device *device)
1547{ 1627{
1548 struct dasd_ccw_req *cqr; 1628 struct dasd_ccw_req *cqr;
1549 int rc;
1550 1629
1551 if (list_empty(&device->ccw_queue)) 1630 if (list_empty(&device->ccw_queue))
1552 return 0; 1631 return 0;
1553 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); 1632 cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list);
1554 rc = device->discipline->term_IO(cqr); 1633 return device->discipline->term_IO(cqr);
1555 if (rc == 0) {
1556 /* termination successful */
1557 cqr->status = DASD_CQR_QUEUED;
1558 cqr->startclk = cqr->stopclk = 0;
1559 cqr->starttime = 0;
1560 }
1561 return rc;
1562} 1634}
1563 1635
1564int 1636int
@@ -1726,12 +1798,9 @@ dasd_flush_request_queue(struct dasd_device * device)
1726 return; 1798 return;
1727 1799
1728 spin_lock_irq(&device->request_queue_lock); 1800 spin_lock_irq(&device->request_queue_lock);
1729 while (!list_empty(&device->request_queue->queue_head)) { 1801 while ((req = elv_next_request(device->request_queue))) {
1730 req = elv_next_request(device->request_queue);
1731 if (req == NULL)
1732 break;
1733 dasd_end_request(req, 0);
1734 blkdev_dequeue_request(req); 1802 blkdev_dequeue_request(req);
1803 dasd_end_request(req, 0);
1735 } 1804 }
1736 spin_unlock_irq(&device->request_queue_lock); 1805 spin_unlock_irq(&device->request_queue_lock);
1737} 1806}
@@ -2091,6 +2160,7 @@ dasd_init(void)
2091 int rc; 2160 int rc;
2092 2161
2093 init_waitqueue_head(&dasd_init_waitq); 2162 init_waitqueue_head(&dasd_init_waitq);
2163 init_waitqueue_head(&dasd_flush_wq);
2094 2164
2095 /* register 'common' DASD debug area, used for all DBF_XXX calls */ 2165 /* register 'common' DASD debug area, used for all DBF_XXX calls */
2096 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); 2166 dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long));
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c
index 7f6fdac74706..9af02c79ce8a 100644
--- a/drivers/s390/block/dasd_devmap.c
+++ b/drivers/s390/block/dasd_devmap.c
@@ -48,18 +48,20 @@ struct dasd_devmap {
48}; 48};
49 49
50/* 50/*
51 * dasd_servermap is used to store the server_id of all storage servers 51 * dasd_server_ssid_map contains a globally unique storage server subsystem ID.
52 * accessed by DASD device driver. 52 * dasd_server_ssid_list contains the list of all subsystem IDs accessed by
53 * the DASD device driver.
53 */ 54 */
54struct dasd_servermap { 55struct dasd_server_ssid_map {
55 struct list_head list; 56 struct list_head list;
56 struct server_id { 57 struct system_id {
57 char vendor[4]; 58 char vendor[4];
58 char serial[15]; 59 char serial[15];
60 __u16 ssid;
59 } sid; 61 } sid;
60}; 62};
61 63
62static struct list_head dasd_serverlist; 64static struct list_head dasd_server_ssid_list;
63 65
64/* 66/*
65 * Parameter parsing functions for dasd= parameter. The syntax is: 67 * Parameter parsing functions for dasd= parameter. The syntax is:
@@ -89,7 +91,7 @@ static char *dasd[256];
89module_param_array(dasd, charp, NULL, 0); 91module_param_array(dasd, charp, NULL, 0);
90 92
91/* 93/*
92 * Single spinlock to protect devmap structures and lists. 94 * Single spinlock to protect devmap and servermap structures and lists.
93 */ 95 */
94static DEFINE_SPINLOCK(dasd_devmap_lock); 96static DEFINE_SPINLOCK(dasd_devmap_lock);
95 97
@@ -264,8 +266,9 @@ dasd_parse_keyword( char *parsestring ) {
264 if (dasd_page_cache) 266 if (dasd_page_cache)
265 return residual_str; 267 return residual_str;
266 dasd_page_cache = 268 dasd_page_cache =
267 kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0, 269 kmem_cache_create("dasd_page_cache", PAGE_SIZE,
268 SLAB_CACHE_DMA, NULL, NULL ); 270 PAGE_SIZE, SLAB_CACHE_DMA,
271 NULL, NULL );
269 if (!dasd_page_cache) 272 if (!dasd_page_cache)
270 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " 273 MESSAGE(KERN_WARNING, "%s", "Failed to create slab, "
271 "fixed buffer mode disabled."); 274 "fixed buffer mode disabled.");
@@ -859,39 +862,6 @@ static struct attribute_group dasd_attr_group = {
859}; 862};
860 863
861/* 864/*
862 * Check if the related storage server is already contained in the
863 * dasd_serverlist. If server is not contained, create new entry.
864 * Return 0 if server was already in serverlist,
865 * 1 if the server was added successfully
866 * <0 in case of error.
867 */
868static int
869dasd_add_server(struct dasd_uid *uid)
870{
871 struct dasd_servermap *new, *tmp;
872
873 /* check if server is already contained */
874 list_for_each_entry(tmp, &dasd_serverlist, list)
875 // normale cmp?
876 if (strncmp(tmp->sid.vendor, uid->vendor,
877 sizeof(tmp->sid.vendor)) == 0
878 && strncmp(tmp->sid.serial, uid->serial,
879 sizeof(tmp->sid.serial)) == 0)
880 return 0;
881
882 new = (struct dasd_servermap *)
883 kzalloc(sizeof(struct dasd_servermap), GFP_KERNEL);
884 if (!new)
885 return -ENOMEM;
886
887 strncpy(new->sid.vendor, uid->vendor, sizeof(new->sid.vendor));
888 strncpy(new->sid.serial, uid->serial, sizeof(new->sid.serial));
889 list_add(&new->list, &dasd_serverlist);
890 return 1;
891}
892
893
894/*
895 * Return copy of the device unique identifier. 865 * Return copy of the device unique identifier.
896 */ 866 */
897int 867int
@@ -910,6 +880,9 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid)
910 880
911/* 881/*
912 * Register the given device unique identifier into devmap struct. 882 * Register the given device unique identifier into devmap struct.
883 * In addition check if the related storage server subsystem ID is already
884 * contained in the dasd_server_ssid_list. If subsystem ID is not contained,
885 * create new entry.
913 * Return 0 if server was already in serverlist, 886 * Return 0 if server was already in serverlist,
914 * 1 if the server was added successful 887 * 1 if the server was added successful
915 * <0 in case of error. 888 * <0 in case of error.
@@ -918,16 +891,39 @@ int
918dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) 891dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid)
919{ 892{
920 struct dasd_devmap *devmap; 893 struct dasd_devmap *devmap;
921 int rc; 894 struct dasd_server_ssid_map *srv, *tmp;
922 895
923 devmap = dasd_find_busid(cdev->dev.bus_id); 896 devmap = dasd_find_busid(cdev->dev.bus_id);
924 if (IS_ERR(devmap)) 897 if (IS_ERR(devmap))
925 return PTR_ERR(devmap); 898 return PTR_ERR(devmap);
899
900 /* generate entry for server_ssid_map */
901 srv = (struct dasd_server_ssid_map *)
902 kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL);
903 if (!srv)
904 return -ENOMEM;
905 strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1);
906 strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1);
907 srv->sid.ssid = uid->ssid;
908
909 /* server is already contained ? */
926 spin_lock(&dasd_devmap_lock); 910 spin_lock(&dasd_devmap_lock);
927 devmap->uid = *uid; 911 devmap->uid = *uid;
928 rc = dasd_add_server(uid); 912 list_for_each_entry(tmp, &dasd_server_ssid_list, list) {
913 if (!memcmp(&srv->sid, &tmp->sid,
914 sizeof(struct system_id))) {
915 kfree(srv);
916 srv = NULL;
917 break;
918 }
919 }
920
921 /* add servermap to serverlist */
922 if (srv)
923 list_add(&srv->list, &dasd_server_ssid_list);
929 spin_unlock(&dasd_devmap_lock); 924 spin_unlock(&dasd_devmap_lock);
930 return rc; 925
926 return (srv ? 1 : 0);
931} 927}
932EXPORT_SYMBOL_GPL(dasd_set_uid); 928EXPORT_SYMBOL_GPL(dasd_set_uid);
933 929
@@ -995,7 +991,7 @@ dasd_devmap_init(void)
995 INIT_LIST_HEAD(&dasd_hashlists[i]); 991 INIT_LIST_HEAD(&dasd_hashlists[i]);
996 992
997 /* Initialize servermap structure. */ 993 /* Initialize servermap structure. */
998 INIT_LIST_HEAD(&dasd_serverlist); 994 INIT_LIST_HEAD(&dasd_server_ssid_list);
999 return 0; 995 return 0;
1000} 996}
1001 997
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 39c2281371b5..b7a7fac3f7c3 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -468,11 +468,11 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid)
468 return -ENODEV; 468 return -ENODEV;
469 469
470 memset(uid, 0, sizeof(struct dasd_uid)); 470 memset(uid, 0, sizeof(struct dasd_uid));
471 strncpy(uid->vendor, confdata->ned1.HDA_manufacturer, 471 memcpy(uid->vendor, confdata->ned1.HDA_manufacturer,
472 sizeof(uid->vendor) - 1); 472 sizeof(uid->vendor) - 1);
473 EBCASC(uid->vendor, sizeof(uid->vendor) - 1); 473 EBCASC(uid->vendor, sizeof(uid->vendor) - 1);
474 strncpy(uid->serial, confdata->ned1.HDA_location, 474 memcpy(uid->serial, confdata->ned1.HDA_location,
475 sizeof(uid->serial) - 1); 475 sizeof(uid->serial) - 1);
476 EBCASC(uid->serial, sizeof(uid->serial) - 1); 476 EBCASC(uid->serial, sizeof(uid->serial) - 1);
477 uid->ssid = confdata->neq.subsystemID; 477 uid->ssid = confdata->neq.subsystemID;
478 if (confdata->ned2.sneq.flags == 0x40) { 478 if (confdata->ned2.sneq.flags == 0x40) {
@@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device)
607 * Valide storage server of current device. 607 * Valide storage server of current device.
608 */ 608 */
609static int 609static int
610dasd_eckd_validate_server(struct dasd_device *device) 610dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid)
611{ 611{
612 int rc; 612 int rc;
613 613
@@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device)
616 return 0; 616 return 0;
617 617
618 rc = dasd_eckd_psf_ssc(device); 618 rc = dasd_eckd_psf_ssc(device);
619 if (rc) 619 /* may be requested feature is not available on server,
620 /* may be requested feature is not available on server, 620 * therefore just report error and go ahead */
621 * therefore just report error and go ahead */ 621 DEV_MESSAGE(KERN_INFO, device,
622 DEV_MESSAGE(KERN_INFO, device, 622 "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d",
623 "Perform Subsystem Function returned rc=%d", rc); 623 uid->vendor, uid->serial, uid->ssid, rc);
624 /* RE-Read Configuration Data */ 624 /* RE-Read Configuration Data */
625 return dasd_eckd_read_conf(device); 625 return dasd_eckd_read_conf(device);
626} 626}
@@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device)
666 return rc; 666 return rc;
667 rc = dasd_set_uid(device->cdev, &uid); 667 rc = dasd_set_uid(device->cdev, &uid);
668 if (rc == 1) /* new server found */ 668 if (rc == 1) /* new server found */
669 rc = dasd_eckd_validate_server(device); 669 rc = dasd_eckd_validate_server(device, &uid);
670 if (rc) 670 if (rc)
671 return rc; 671 return rc;
672 672
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c
index 4c272b70f41a..d163632101d2 100644
--- a/drivers/s390/block/dasd_genhd.c
+++ b/drivers/s390/block/dasd_genhd.c
@@ -83,10 +83,12 @@ dasd_gendisk_alloc(struct dasd_device *device)
83void 83void
84dasd_gendisk_free(struct dasd_device *device) 84dasd_gendisk_free(struct dasd_device *device)
85{ 85{
86 del_gendisk(device->gdp); 86 if (device->gdp) {
87 device->gdp->queue = NULL; 87 del_gendisk(device->gdp);
88 put_disk(device->gdp); 88 device->gdp->queue = NULL;
89 device->gdp = NULL; 89 put_disk(device->gdp);
90 device->gdp = NULL;
91 }
90} 92}
91 93
92/* 94/*
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 1140302ff11d..ca7d51f7eccc 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -48,15 +48,6 @@
48#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) 48#define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x)
49 49
50 50
51static struct sysdev_class xpram_sysclass = {
52 set_kset_name("xpram"),
53};
54
55static struct sys_device xpram_sys_device = {
56 .id = 0,
57 .cls = &xpram_sysclass,
58};
59
60typedef struct { 51typedef struct {
61 unsigned int size; /* size of xpram segment in pages */ 52 unsigned int size; /* size of xpram segment in pages */
62 unsigned int offset; /* start page of xpram segment */ 53 unsigned int offset; /* start page of xpram segment */
@@ -451,8 +442,6 @@ static void __exit xpram_exit(void)
451 } 442 }
452 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); 443 unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME);
453 blk_cleanup_queue(xpram_queue); 444 blk_cleanup_queue(xpram_queue);
454 sysdev_unregister(&xpram_sys_device);
455 sysdev_class_unregister(&xpram_sysclass);
456} 445}
457 446
458static int __init xpram_init(void) 447static int __init xpram_init(void)
@@ -470,19 +459,7 @@ static int __init xpram_init(void)
470 rc = xpram_setup_sizes(xpram_pages); 459 rc = xpram_setup_sizes(xpram_pages);
471 if (rc) 460 if (rc)
472 return rc; 461 return rc;
473 rc = sysdev_class_register(&xpram_sysclass); 462 return xpram_setup_blkdev();
474 if (rc)
475 return rc;
476
477 rc = sysdev_register(&xpram_sys_device);
478 if (rc) {
479 sysdev_class_unregister(&xpram_sysclass);
480 return rc;
481 }
482 rc = xpram_setup_blkdev();
483 if (rc)
484 sysdev_unregister(&xpram_sys_device);
485 return rc;
486} 463}
487 464
488module_init(xpram_init); 465module_init(xpram_init);
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c
index 643b6d078563..56b87618b100 100644
--- a/drivers/s390/char/tape_class.c
+++ b/drivers/s390/char/tape_class.c
@@ -76,7 +76,7 @@ struct tape_class_device *register_tape_dev(
76 device, 76 device,
77 "%s", tcd->device_name 77 "%s", tcd->device_name
78 ); 78 );
79 rc = PTR_ERR(tcd->class_device); 79 rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0;
80 if (rc) 80 if (rc)
81 goto fail_with_cdev; 81 goto fail_with_cdev;
82 rc = sysfs_create_link( 82 rc = sysfs_create_link(
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c
index f26a2ee3aad8..38954f5cd14c 100644
--- a/drivers/s390/cio/ccwgroup.c
+++ b/drivers/s390/cio/ccwgroup.c
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root,
152 struct ccwgroup_device *gdev; 152 struct ccwgroup_device *gdev;
153 int i; 153 int i;
154 int rc; 154 int rc;
155 int del_drvdata;
156 155
157 if (argc > 256) /* disallow dumb users */ 156 if (argc > 256) /* disallow dumb users */
158 return -EINVAL; 157 return -EINVAL;
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root,
163 162
164 atomic_set(&gdev->onoff, 0); 163 atomic_set(&gdev->onoff, 0);
165 164
166 del_drvdata = 0;
167 for (i = 0; i < argc; i++) { 165 for (i = 0; i < argc; i++) {
168 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); 166 gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]);
169 167
@@ -180,18 +178,14 @@ ccwgroup_create(struct device *root,
180 rc = -EINVAL; 178 rc = -EINVAL;
181 goto free_dev; 179 goto free_dev;
182 } 180 }
183 }
184 for (i = 0; i < argc; i++)
185 gdev->cdev[i]->dev.driver_data = gdev; 181 gdev->cdev[i]->dev.driver_data = gdev;
186 del_drvdata = 1; 182 }
187 183
188 gdev->creator_id = creator_id; 184 gdev->creator_id = creator_id;
189 gdev->count = argc; 185 gdev->count = argc;
190 gdev->dev = (struct device ) { 186 gdev->dev.bus = &ccwgroup_bus_type;
191 .bus = &ccwgroup_bus_type, 187 gdev->dev.parent = root;
192 .parent = root, 188 gdev->dev.release = ccwgroup_release;
193 .release = ccwgroup_release,
194 };
195 189
196 snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", 190 snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s",
197 gdev->cdev[0]->dev.bus_id); 191 gdev->cdev[0]->dev.bus_id);
@@ -226,9 +220,9 @@ error:
226free_dev: 220free_dev:
227 for (i = 0; i < argc; i++) 221 for (i = 0; i < argc; i++)
228 if (gdev->cdev[i]) { 222 if (gdev->cdev[i]) {
229 put_device(&gdev->cdev[i]->dev); 223 if (gdev->cdev[i]->dev.driver_data == gdev)
230 if (del_drvdata)
231 gdev->cdev[i]->dev.driver_data = NULL; 224 gdev->cdev[i]->dev.driver_data = NULL;
225 put_device(&gdev->cdev[i]->dev);
232 } 226 }
233 kfree(gdev); 227 kfree(gdev);
234 return rc; 228 return rc;
@@ -395,10 +389,8 @@ int
395ccwgroup_driver_register (struct ccwgroup_driver *cdriver) 389ccwgroup_driver_register (struct ccwgroup_driver *cdriver)
396{ 390{
397 /* register our new driver with the core */ 391 /* register our new driver with the core */
398 cdriver->driver = (struct device_driver) { 392 cdriver->driver.bus = &ccwgroup_bus_type;
399 .bus = &ccwgroup_bus_type, 393 cdriver->driver.name = cdriver->name;
400 .name = cdriver->name,
401 };
402 394
403 return driver_register(&cdriver->driver); 395 return driver_register(&cdriver->driver);
404} 396}
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 61ce3f1d5228..c28444af0919 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -238,8 +238,6 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
238 /* Check for single path devices. */ 238 /* Check for single path devices. */
239 if (sch->schib.pmcw.pim == 0x80) 239 if (sch->schib.pmcw.pim == 0x80)
240 goto out_unreg; 240 goto out_unreg;
241 if (sch->vpm == mask)
242 goto out_unreg;
243 241
244 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && 242 if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) &&
245 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && 243 (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) &&
@@ -258,6 +256,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data)
258 /* trigger path verification. */ 256 /* trigger path verification. */
259 if (sch->driver && sch->driver->verify) 257 if (sch->driver && sch->driver->verify)
260 sch->driver->verify(&sch->dev); 258 sch->driver->verify(&sch->dev);
259 else if (sch->vpm == mask)
260 goto out_unreg;
261out_unlock: 261out_unlock:
262 spin_unlock_irq(&sch->lock); 262 spin_unlock_irq(&sch->lock);
263 return 0; 263 return 0;
@@ -1391,10 +1391,8 @@ new_channel_path(int chpid)
1391 /* fill in status, etc. */ 1391 /* fill in status, etc. */
1392 chp->id = chpid; 1392 chp->id = chpid;
1393 chp->state = 1; 1393 chp->state = 1;
1394 chp->dev = (struct device) { 1394 chp->dev.parent = &css[0]->device;
1395 .parent = &css[0]->device, 1395 chp->dev.release = chp_release;
1396 .release = chp_release,
1397 };
1398 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); 1396 snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid);
1399 1397
1400 /* Obtain channel path description and fill it in. */ 1398 /* Obtain channel path description and fill it in. */
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c
index 585fa04233c3..646da5640401 100644
--- a/drivers/s390/cio/device.c
+++ b/drivers/s390/cio/device.c
@@ -556,12 +556,11 @@ get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid,
556 struct ccw_device *sibling) 556 struct ccw_device *sibling)
557{ 557{
558 struct device *dev; 558 struct device *dev;
559 struct match_data data = { 559 struct match_data data;
560 .devno = devno,
561 .ssid = ssid,
562 .sibling = sibling,
563 };
564 560
561 data.devno = devno;
562 data.ssid = ssid;
563 data.sibling = sibling;
565 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); 564 dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno);
566 565
567 return dev ? to_ccwdev(dev) : NULL; 566 return dev ? to_ccwdev(dev) : NULL;
@@ -835,10 +834,8 @@ io_subchannel_probe (struct subchannel *sch)
835 return -ENOMEM; 834 return -ENOMEM;
836 } 835 }
837 atomic_set(&cdev->private->onoff, 0); 836 atomic_set(&cdev->private->onoff, 0);
838 cdev->dev = (struct device) { 837 cdev->dev.parent = &sch->dev;
839 .parent = &sch->dev, 838 cdev->dev.release = ccw_device_release;
840 .release = ccw_device_release,
841 };
842 INIT_LIST_HEAD(&cdev->private->kick_work.entry); 839 INIT_LIST_HEAD(&cdev->private->kick_work.entry);
843 /* Do first half of device_register. */ 840 /* Do first half of device_register. */
844 device_initialize(&cdev->dev); 841 device_initialize(&cdev->dev);
@@ -977,9 +974,7 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch)
977 int rc; 974 int rc;
978 975
979 /* Initialize the ccw_device structure. */ 976 /* Initialize the ccw_device structure. */
980 cdev->dev = (struct device) { 977 cdev->dev.parent= &sch->dev;
981 .parent = &sch->dev,
982 };
983 rc = io_subchannel_recog(cdev, sch); 978 rc = io_subchannel_recog(cdev, sch);
984 if (rc) 979 if (rc)
985 return rc; 980 return rc;
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c
index ac6e0c7e43d9..35e162ba6d54 100644
--- a/drivers/s390/cio/device_fsm.c
+++ b/drivers/s390/cio/device_fsm.c
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev)
152 if (cdev->private->iretry) { 152 if (cdev->private->iretry) {
153 cdev->private->iretry--; 153 cdev->private->iretry--;
154 ret = cio_halt(sch); 154 ret = cio_halt(sch);
155 return (ret == 0) ? -EBUSY : ret; 155 if (ret != -EBUSY)
156 return (ret == 0) ? -EBUSY : ret;
156 } 157 }
157 /* halt io unsuccessful. */ 158 /* halt io unsuccessful. */
158 cdev->private->iretry = 255; /* 255 clear retries. */ 159 cdev->private->iretry = 255; /* 255 clear retries. */
@@ -266,12 +267,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state)
266 notify = 1; 267 notify = 1;
267 } 268 }
268 /* fill out sense information */ 269 /* fill out sense information */
269 cdev->id = (struct ccw_device_id) { 270 cdev->id.cu_type = cdev->private->senseid.cu_type;
270 .cu_type = cdev->private->senseid.cu_type, 271 cdev->id.cu_model = cdev->private->senseid.cu_model;
271 .cu_model = cdev->private->senseid.cu_model, 272 cdev->id.dev_type = cdev->private->senseid.dev_type;
272 .dev_type = cdev->private->senseid.dev_type, 273 cdev->id.dev_model = cdev->private->senseid.dev_model;
273 .dev_model = cdev->private->senseid.dev_model,
274 };
275 if (notify) { 274 if (notify) {
276 cdev->private->state = DEV_STATE_OFFLINE; 275 cdev->private->state = DEV_STATE_OFFLINE;
277 if (same_dev) { 276 if (same_dev) {
@@ -565,12 +564,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err)
565 /* Deliver fake irb to device driver, if needed. */ 564 /* Deliver fake irb to device driver, if needed. */
566 if (cdev->private->flags.fake_irb) { 565 if (cdev->private->flags.fake_irb) {
567 memset(&cdev->private->irb, 0, sizeof(struct irb)); 566 memset(&cdev->private->irb, 0, sizeof(struct irb));
568 cdev->private->irb.scsw = (struct scsw) { 567 cdev->private->irb.scsw.cc = 1;
569 .cc = 1, 568 cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC;
570 .fctl = SCSW_FCTL_START_FUNC, 569 cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND;
571 .actl = SCSW_ACTL_START_PEND, 570 cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND;
572 .stctl = SCSW_STCTL_STATUS_PEND,
573 };
574 cdev->private->flags.fake_irb = 0; 571 cdev->private->flags.fake_irb = 0;
575 if (cdev->handler) 572 if (cdev->handler)
576 cdev->handler(cdev, cdev->private->intparm, 573 cdev->handler(cdev, cdev->private->intparm,
@@ -771,6 +768,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event)
771 stsch(sch->schid, &sch->schib); 768 stsch(sch->schid, &sch->schib);
772 769
773 if (sch->schib.scsw.actl != 0 || 770 if (sch->schib.scsw.actl != 0 ||
771 (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) ||
774 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { 772 (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) {
775 /* 773 /*
776 * No final status yet or final status not yet delivered 774 * No final status yet or final status not yet delivered
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index a60124264bee..9e3de0bd59b5 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -263,6 +263,9 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
263 /* Abuse intparm for error reporting. */ 263 /* Abuse intparm for error reporting. */
264 if (IS_ERR(irb)) 264 if (IS_ERR(irb))
265 cdev->private->intparm = -EIO; 265 cdev->private->intparm = -EIO;
266 else if (irb->scsw.cc == 1)
267 /* Retry for deferred condition code. */
268 cdev->private->intparm = -EAGAIN;
266 else if ((irb->scsw.dstat != 269 else if ((irb->scsw.dstat !=
267 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || 270 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
268 (irb->scsw.cstat != 0)) { 271 (irb->scsw.cstat != 0)) {
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c
index 32610fd8868e..1693a102dcfe 100644
--- a/drivers/s390/cio/device_pgid.c
+++ b/drivers/s390/cio/device_pgid.c
@@ -24,6 +24,21 @@
24#include "ioasm.h" 24#include "ioasm.h"
25 25
26/* 26/*
27 * Helper function called from interrupt context to decide whether an
28 * operation should be tried again.
29 */
30static int __ccw_device_should_retry(struct scsw *scsw)
31{
32 /* CC is only valid if start function bit is set. */
33 if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1)
34 return 1;
35 /* No more activity. For sense and set PGID we stubbornly try again. */
36 if (!scsw->actl)
37 return 1;
38 return 0;
39}
40
41/*
27 * Start Sense Path Group ID helper function. Used in ccw_device_recog 42 * Start Sense Path Group ID helper function. Used in ccw_device_recog
28 * and ccw_device_sense_pgid. 43 * and ccw_device_sense_pgid.
29 */ 44 */
@@ -155,10 +170,10 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event)
155 int ret; 170 int ret;
156 171
157 irb = (struct irb *) __LC_IRB; 172 irb = (struct irb *) __LC_IRB;
158 /* Retry sense pgid for cc=1. */ 173
159 if (irb->scsw.stctl == 174 if (irb->scsw.stctl ==
160 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 175 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
161 if (irb->scsw.cc == 1) { 176 if (__ccw_device_should_retry(&irb->scsw)) {
162 ret = __ccw_device_sense_pgid_start(cdev); 177 ret = __ccw_device_sense_pgid_start(cdev);
163 if (ret && ret != -EBUSY) 178 if (ret && ret != -EBUSY)
164 ccw_device_sense_pgid_done(cdev, ret); 179 ccw_device_sense_pgid_done(cdev, ret);
@@ -391,10 +406,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event)
391 int ret; 406 int ret;
392 407
393 irb = (struct irb *) __LC_IRB; 408 irb = (struct irb *) __LC_IRB;
394 /* Retry set pgid for cc=1. */ 409
395 if (irb->scsw.stctl == 410 if (irb->scsw.stctl ==
396 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 411 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
397 if (irb->scsw.cc == 1) 412 if (__ccw_device_should_retry(&irb->scsw))
398 __ccw_device_verify_start(cdev); 413 __ccw_device_verify_start(cdev);
399 return; 414 return;
400 } 415 }
@@ -494,10 +509,10 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event)
494 int ret; 509 int ret;
495 510
496 irb = (struct irb *) __LC_IRB; 511 irb = (struct irb *) __LC_IRB;
497 /* Retry set pgid for cc=1. */ 512
498 if (irb->scsw.stctl == 513 if (irb->scsw.stctl ==
499 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { 514 (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) {
500 if (irb->scsw.cc == 1) 515 if (__ccw_device_should_retry(&irb->scsw))
501 __ccw_device_disband_start(cdev); 516 __ccw_device_disband_start(cdev);
502 return; 517 return;
503 } 518 }
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c
index 5fff1f93973a..e1327b8fce00 100644
--- a/drivers/s390/net/qeth_main.c
+++ b/drivers/s390/net/qeth_main.c
@@ -8510,9 +8510,9 @@ static int
8510qeth_ipv6_init(void) 8510qeth_ipv6_init(void)
8511{ 8511{
8512 qeth_old_arp_constructor = arp_tbl.constructor; 8512 qeth_old_arp_constructor = arp_tbl.constructor;
8513 write_lock(&arp_tbl.lock); 8513 write_lock_bh(&arp_tbl.lock);
8514 arp_tbl.constructor = qeth_arp_constructor; 8514 arp_tbl.constructor = qeth_arp_constructor;
8515 write_unlock(&arp_tbl.lock); 8515 write_unlock_bh(&arp_tbl.lock);
8516 8516
8517 arp_direct_ops = (struct neigh_ops*) 8517 arp_direct_ops = (struct neigh_ops*)
8518 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL); 8518 kmalloc(sizeof(struct neigh_ops), GFP_KERNEL);
@@ -8528,9 +8528,9 @@ qeth_ipv6_init(void)
8528static void 8528static void
8529qeth_ipv6_uninit(void) 8529qeth_ipv6_uninit(void)
8530{ 8530{
8531 write_lock(&arp_tbl.lock); 8531 write_lock_bh(&arp_tbl.lock);
8532 arp_tbl.constructor = qeth_old_arp_constructor; 8532 arp_tbl.constructor = qeth_old_arp_constructor;
8533 write_unlock(&arp_tbl.lock); 8533 write_unlock_bh(&arp_tbl.lock);
8534 kfree(arp_direct_ops); 8534 kfree(arp_direct_ops);
8535} 8535}
8536#endif /* CONFIG_QETH_IPV6 */ 8536#endif /* CONFIG_QETH_IPV6 */
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 9cd789b8acd4..adc9d8f2c28f 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count)
112 printk("\n"); 112 printk("\n");
113} 113}
114 114
115
116/****************************************************************/
117/****** Functions to handle the request ID hash table ********/
118/****************************************************************/
119
120#define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF
121
122static int zfcp_reqlist_init(struct zfcp_adapter *adapter)
123{
124 int i;
125
126 adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head),
127 GFP_KERNEL);
128
129 if (!adapter->req_list)
130 return -ENOMEM;
131
132 for (i=0; i<REQUEST_LIST_SIZE; i++)
133 INIT_LIST_HEAD(&adapter->req_list[i]);
134
135 return 0;
136}
137
138static void zfcp_reqlist_free(struct zfcp_adapter *adapter)
139{
140 struct zfcp_fsf_req *request, *tmp;
141 unsigned int i;
142
143 for (i=0; i<REQUEST_LIST_SIZE; i++) {
144 if (list_empty(&adapter->req_list[i]))
145 continue;
146
147 list_for_each_entry_safe(request, tmp,
148 &adapter->req_list[i], list)
149 list_del(&request->list);
150 }
151
152 kfree(adapter->req_list);
153}
154
155void zfcp_reqlist_add(struct zfcp_adapter *adapter,
156 struct zfcp_fsf_req *fsf_req)
157{
158 unsigned int i;
159
160 i = fsf_req->req_id % REQUEST_LIST_SIZE;
161 list_add_tail(&fsf_req->list, &adapter->req_list[i]);
162}
163
164void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id)
165{
166 struct zfcp_fsf_req *request, *tmp;
167 unsigned int i, counter;
168 u64 dbg_tmp[2];
169
170 i = req_id % REQUEST_LIST_SIZE;
171 BUG_ON(list_empty(&adapter->req_list[i]));
172
173 counter = 0;
174 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) {
175 if (request->req_id == req_id) {
176 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
177 dbg_tmp[1] = (u64) counter;
178 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
179 list_del(&request->list);
180 break;
181 }
182 counter++;
183 }
184}
185
186struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter,
187 unsigned long req_id)
188{
189 struct zfcp_fsf_req *request, *tmp;
190 unsigned int i;
191
192 i = req_id % REQUEST_LIST_SIZE;
193
194 list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list)
195 if (request->req_id == req_id)
196 return request;
197
198 return NULL;
199}
200
201int zfcp_reqlist_isempty(struct zfcp_adapter *adapter)
202{
203 unsigned int i;
204
205 for (i=0; i<REQUEST_LIST_SIZE; i++)
206 if (!list_empty(&adapter->req_list[i]))
207 return 0;
208
209 return 1;
210}
211
212#undef ZFCP_LOG_AREA
213
115/****************************************************************/ 214/****************************************************************/
116/************** Uncategorised Functions *************************/ 215/************** Uncategorised Functions *************************/
117/****************************************************************/ 216/****************************************************************/
@@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
961 INIT_LIST_HEAD(&adapter->port_remove_lh); 1060 INIT_LIST_HEAD(&adapter->port_remove_lh);
962 1061
963 /* initialize list of fsf requests */ 1062 /* initialize list of fsf requests */
964 spin_lock_init(&adapter->fsf_req_list_lock); 1063 spin_lock_init(&adapter->req_list_lock);
965 INIT_LIST_HEAD(&adapter->fsf_req_list_head); 1064 retval = zfcp_reqlist_init(adapter);
1065 if (retval) {
1066 ZFCP_LOG_INFO("request list initialization failed\n");
1067 goto failed_low_mem_buffers;
1068 }
966 1069
967 /* initialize debug locks */ 1070 /* initialize debug locks */
968 1071
@@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device)
1041 * !0 - struct zfcp_adapter data structure could not be removed 1144 * !0 - struct zfcp_adapter data structure could not be removed
1042 * (e.g. still used) 1145 * (e.g. still used)
1043 * locks: adapter list write lock is assumed to be held by caller 1146 * locks: adapter list write lock is assumed to be held by caller
1044 * adapter->fsf_req_list_lock is taken and released within this
1045 * function and must not be held on entry
1046 */ 1147 */
1047void 1148void
1048zfcp_adapter_dequeue(struct zfcp_adapter *adapter) 1149zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
@@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1054 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); 1155 zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev);
1055 dev_set_drvdata(&adapter->ccw_device->dev, NULL); 1156 dev_set_drvdata(&adapter->ccw_device->dev, NULL);
1056 /* sanity check: no pending FSF requests */ 1157 /* sanity check: no pending FSF requests */
1057 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 1158 spin_lock_irqsave(&adapter->req_list_lock, flags);
1058 retval = !list_empty(&adapter->fsf_req_list_head); 1159 retval = zfcp_reqlist_isempty(adapter);
1059 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 1160 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
1060 if (retval) { 1161 if (!retval) {
1061 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " 1162 ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, "
1062 "%i requests outstanding\n", 1163 "%i requests outstanding\n",
1063 zfcp_get_busid_by_adapter(adapter), adapter, 1164 zfcp_get_busid_by_adapter(adapter), adapter,
1064 atomic_read(&adapter->fsf_reqs_active)); 1165 atomic_read(&adapter->reqs_active));
1065 retval = -EBUSY; 1166 retval = -EBUSY;
1066 goto out; 1167 goto out;
1067 } 1168 }
@@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter)
1087 zfcp_free_low_mem_buffers(adapter); 1188 zfcp_free_low_mem_buffers(adapter);
1088 /* free memory of adapter data structure and queues */ 1189 /* free memory of adapter data structure and queues */
1089 zfcp_qdio_free_queues(adapter); 1190 zfcp_qdio_free_queues(adapter);
1191 zfcp_reqlist_free(adapter);
1090 kfree(adapter->fc_stats); 1192 kfree(adapter->fc_stats);
1091 kfree(adapter->stats_reset_data); 1193 kfree(adapter->stats_reset_data);
1092 ZFCP_LOG_TRACE("freeing adapter structure\n"); 1194 ZFCP_LOG_TRACE("freeing adapter structure\n");
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index 57d8e4bfb8d9..fdabadeaa9ee 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device)
164 retval = zfcp_adapter_scsi_register(adapter); 164 retval = zfcp_adapter_scsi_register(adapter);
165 if (retval) 165 if (retval)
166 goto out_scsi_register; 166 goto out_scsi_register;
167
168 /* initialize request counter */
169 BUG_ON(!zfcp_reqlist_isempty(adapter));
170 adapter->req_no = 0;
171
167 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, 172 zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING,
168 ZFCP_SET); 173 ZFCP_SET);
169 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); 174 zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED);
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 2df512a18e2c..94d1b74db356 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -52,7 +52,7 @@
52/********************* GENERAL DEFINES *********************************/ 52/********************* GENERAL DEFINES *********************************/
53 53
54/* zfcp version number, it consists of major, minor, and patch-level number */ 54/* zfcp version number, it consists of major, minor, and patch-level number */
55#define ZFCP_VERSION "4.7.0" 55#define ZFCP_VERSION "4.8.0"
56 56
57/** 57/**
58 * zfcp_sg_to_address - determine kernel address from struct scatterlist 58 * zfcp_sg_to_address - determine kernel address from struct scatterlist
@@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list)
80#define REQUEST_LIST_SIZE 128 80#define REQUEST_LIST_SIZE 128
81 81
82/********************* SCSI SPECIFIC DEFINES *********************************/ 82/********************* SCSI SPECIFIC DEFINES *********************************/
83#define ZFCP_SCSI_ER_TIMEOUT (100*HZ) 83#define ZFCP_SCSI_ER_TIMEOUT (10*HZ)
84 84
85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/ 85/********************* CIO/QDIO SPECIFIC DEFINES *****************************/
86 86
@@ -886,11 +886,11 @@ struct zfcp_adapter {
886 struct list_head port_remove_lh; /* head of ports to be 886 struct list_head port_remove_lh; /* head of ports to be
887 removed */ 887 removed */
888 u32 ports; /* number of remote ports */ 888 u32 ports; /* number of remote ports */
889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */ 889 struct timer_list scsi_er_timer; /* SCSI err recovery watch */
890 struct list_head fsf_req_list_head; /* head of FSF req list */ 890 atomic_t reqs_active; /* # active FSF reqs */
891 spinlock_t fsf_req_list_lock; /* lock for ops on list of 891 unsigned long req_no; /* unique FSF req number */
892 FSF requests */ 892 struct list_head *req_list; /* list of pending reqs */
893 atomic_t fsf_reqs_active; /* # active FSF reqs */ 893 spinlock_t req_list_lock; /* request list lock */
894 struct zfcp_qdio_queue request_queue; /* request queue */ 894 struct zfcp_qdio_queue request_queue; /* request queue */
895 u32 fsf_req_seq_no; /* FSF cmnd seq number */ 895 u32 fsf_req_seq_no; /* FSF cmnd seq number */
896 wait_queue_head_t request_wq; /* can be used to wait for 896 wait_queue_head_t request_wq; /* can be used to wait for
@@ -986,6 +986,7 @@ struct zfcp_unit {
986/* FSF request */ 986/* FSF request */
987struct zfcp_fsf_req { 987struct zfcp_fsf_req {
988 struct list_head list; /* list of FSF requests */ 988 struct list_head list; /* list of FSF requests */
989 unsigned long req_id; /* unique request ID */
989 struct zfcp_adapter *adapter; /* adapter request belongs to */ 990 struct zfcp_adapter *adapter; /* adapter request belongs to */
990 u8 sbal_number; /* nr of SBALs free for use */ 991 u8 sbal_number; /* nr of SBALs free for use */
991 u8 sbal_first; /* first SBAL for this request */ 992 u8 sbal_first; /* first SBAL for this request */
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index 8ec8da0beaa8..7f60b6fdf724 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int);
64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); 64static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *);
65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); 65static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int);
66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); 66static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *);
67static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); 67static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *);
68static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); 68static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *);
69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); 69static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *);
70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); 70static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *);
71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); 71static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *);
@@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *);
93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); 93static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *);
94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); 94static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *);
95 95
96static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); 96static void zfcp_erp_action_dismiss_port(struct zfcp_port *);
97static int zfcp_erp_action_dismiss_port(struct zfcp_port *); 97static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *);
98static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); 98static void zfcp_erp_action_dismiss(struct zfcp_erp_action *);
99static int zfcp_erp_action_dismiss(struct zfcp_erp_action *);
100 99
101static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, 100static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *,
102 struct zfcp_port *, struct zfcp_unit *); 101 struct zfcp_port *, struct zfcp_unit *);
@@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data)
135 zfcp_erp_adapter_reopen(adapter, 0); 134 zfcp_erp_adapter_reopen(adapter, 0);
136} 135}
137 136
138/* 137/**
139 * function: zfcp_fsf_scsi_er_timeout_handler 138 * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks
140 * 139 *
141 * purpose: This function needs to be called whenever a SCSI error recovery 140 * This function needs to be called whenever a SCSI error recovery
142 * action (abort/reset) does not return. 141 * action (abort/reset) does not return. Re-opening the adapter means
143 * Re-opening the adapter means that the command can be returned 142 * that the abort/reset command can be returned by zfcp. It won't complete
144 * by zfcp (it is guarranteed that it does not return via the 143 * via the adapter anymore (because qdio queues are closed). If ERP is
145 * adapter anymore). The buffer can then be used again. 144 * already running on this adapter it will be stopped.
146 *
147 * returns: sod all
148 */ 145 */
149void 146void zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
150zfcp_fsf_scsi_er_timeout_handler(unsigned long data)
151{ 147{
152 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; 148 struct zfcp_adapter *adapter = (struct zfcp_adapter *) data;
149 unsigned long flags;
153 150
154 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " 151 ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. "
155 "Restarting all operations on the adapter %s\n", 152 "Restarting all operations on the adapter %s\n",
156 zfcp_get_busid_by_adapter(adapter)); 153 zfcp_get_busid_by_adapter(adapter));
157 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); 154 debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout");
158 zfcp_erp_adapter_reopen(adapter, 0);
159 155
160 return; 156 write_lock_irqsave(&adapter->erp_lock, flags);
157 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
158 &adapter->status)) {
159 zfcp_erp_modify_adapter_status(adapter,
160 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
161 ZFCP_CLEAR);
162 zfcp_erp_action_dismiss_adapter(adapter);
163 write_unlock_irqrestore(&adapter->erp_lock, flags);
164 /* dismiss all pending requests including requests for ERP */
165 zfcp_fsf_req_dismiss_all(adapter);
166 adapter->fsf_req_seq_no = 0;
167 } else
168 write_unlock_irqrestore(&adapter->erp_lock, flags);
169 zfcp_erp_adapter_reopen(adapter, 0);
161} 170}
162 171
163/* 172/*
@@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask)
670 return retval; 679 return retval;
671} 680}
672 681
673/* 682/**
674 * function: 683 * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests
675 *
676 * purpose: disable I/O,
677 * return any open requests and clean them up,
678 * aim: no pending and incoming I/O
679 *
680 * returns:
681 */ 684 */
682static void 685static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
683zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
684{ 686{
685 debug_text_event(adapter->erp_dbf, 6, "a_bl"); 687 debug_text_event(adapter->erp_dbf, 6, "a_bl");
686 zfcp_erp_modify_adapter_status(adapter, 688 zfcp_erp_modify_adapter_status(adapter,
@@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask)
688 clear_mask, ZFCP_CLEAR); 690 clear_mask, ZFCP_CLEAR);
689} 691}
690 692
691/* 693/**
692 * function: 694 * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests
693 *
694 * purpose: enable I/O
695 *
696 * returns:
697 */ 695 */
698static void 696static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
699zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
700{ 697{
701 debug_text_event(adapter->erp_dbf, 6, "a_ubl"); 698 debug_text_event(adapter->erp_dbf, 6, "a_ubl");
702 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); 699 atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
@@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
848 struct zfcp_adapter *adapter = erp_action->adapter; 845 struct zfcp_adapter *adapter = erp_action->adapter;
849 846
850 if (erp_action->fsf_req) { 847 if (erp_action->fsf_req) {
851 /* take lock to ensure that request is not being deleted meanwhile */ 848 /* take lock to ensure that request is not deleted meanwhile */
852 spin_lock(&adapter->fsf_req_list_lock); 849 spin_lock(&adapter->req_list_lock);
853 /* check whether fsf req does still exist */ 850 if ((!zfcp_reqlist_ismember(adapter,
854 list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) 851 erp_action->fsf_req->req_id)) &&
855 if (fsf_req == erp_action->fsf_req) 852 (fsf_req->erp_action == erp_action)) {
856 break;
857 if (fsf_req && (fsf_req->erp_action == erp_action)) {
858 /* fsf_req still exists */ 853 /* fsf_req still exists */
859 debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); 854 debug_text_event(adapter->erp_dbf, 3, "a_ca_req");
860 debug_event(adapter->erp_dbf, 3, &fsf_req, 855 debug_event(adapter->erp_dbf, 3, &fsf_req,
861 sizeof (unsigned long)); 856 sizeof (unsigned long));
862 /* dismiss fsf_req of timed out or dismissed erp_action */ 857 /* dismiss fsf_req of timed out/dismissed erp_action */
863 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | 858 if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED |
864 ZFCP_STATUS_ERP_TIMEDOUT)) { 859 ZFCP_STATUS_ERP_TIMEDOUT)) {
865 debug_text_event(adapter->erp_dbf, 3, 860 debug_text_event(adapter->erp_dbf, 3,
@@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action)
892 */ 887 */
893 erp_action->fsf_req = NULL; 888 erp_action->fsf_req = NULL;
894 } 889 }
895 spin_unlock(&adapter->fsf_req_list_lock); 890 spin_unlock(&adapter->req_list_lock);
896 } else 891 } else
897 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); 892 debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq");
898 893
899 return retval; 894 return retval;
900} 895}
901 896
902/* 897/**
903 * purpose: generic handler for asynchronous events related to erp_action events 898 * zfcp_erp_async_handler_nolock - complete erp_action
904 * (normal completion, time-out, dismissing, retry after
905 * low memory condition)
906 *
907 * note: deletion of timer is not required (e.g. in case of a time-out),
908 * but a second try does no harm,
909 * we leave it in here to allow for greater simplification
910 * 899 *
911 * returns: 0 - there was an action to handle 900 * Used for normal completion, time-out, dismissal and failure after
912 * !0 - otherwise 901 * low memory condition.
913 */ 902 */
914static int 903static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
915zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, 904 unsigned long set_mask)
916 unsigned long set_mask)
917{ 905{
918 int retval;
919 struct zfcp_adapter *adapter = erp_action->adapter; 906 struct zfcp_adapter *adapter = erp_action->adapter;
920 907
921 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { 908 if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) {
@@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action,
926 del_timer(&erp_action->timer); 913 del_timer(&erp_action->timer);
927 erp_action->status |= set_mask; 914 erp_action->status |= set_mask;
928 zfcp_erp_action_ready(erp_action); 915 zfcp_erp_action_ready(erp_action);
929 retval = 0;
930 } else { 916 } else {
931 /* action is ready or gone - nothing to do */ 917 /* action is ready or gone - nothing to do */
932 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); 918 debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone");
933 debug_event(adapter->erp_dbf, 3, &erp_action->action, 919 debug_event(adapter->erp_dbf, 3, &erp_action->action,
934 sizeof (int)); 920 sizeof (int));
935 retval = 1;
936 } 921 }
937
938 return retval;
939} 922}
940 923
941/* 924/**
942 * purpose: generic handler for asynchronous events related to erp_action 925 * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking
943 * events (normal completion, time-out, dismissing, retry after
944 * low memory condition)
945 *
946 * note: deletion of timer is not required (e.g. in case of a time-out),
947 * but a second try does no harm,
948 * we leave it in here to allow for greater simplification
949 *
950 * returns: 0 - there was an action to handle
951 * !0 - otherwise
952 */ 926 */
953int 927void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action,
954zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, 928 unsigned long set_mask)
955 unsigned long set_mask)
956{ 929{
957 struct zfcp_adapter *adapter = erp_action->adapter; 930 struct zfcp_adapter *adapter = erp_action->adapter;
958 unsigned long flags; 931 unsigned long flags;
959 int retval;
960 932
961 write_lock_irqsave(&adapter->erp_lock, flags); 933 write_lock_irqsave(&adapter->erp_lock, flags);
962 retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); 934 zfcp_erp_async_handler_nolock(erp_action, set_mask);
963 write_unlock_irqrestore(&adapter->erp_lock, flags); 935 write_unlock_irqrestore(&adapter->erp_lock, flags);
964
965 return retval;
966} 936}
967 937
968/* 938/*
@@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data)
999 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); 969 zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT);
1000} 970}
1001 971
1002/* 972/**
1003 * purpose: is called for an erp_action which needs to be ended 973 * zfcp_erp_action_dismiss - dismiss an erp_action
1004 * though not being done,
1005 * this is usually required if an higher is generated,
1006 * action gets an appropriate flag and will be processed
1007 * accordingly
1008 * 974 *
1009 * locks: erp_lock held (thus we need to call another handler variant) 975 * adapter->erp_lock must be held
976 *
977 * Dismissal of an erp_action is usually required if an erp_action of
978 * higher priority is generated.
1010 */ 979 */
1011static int 980static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1012zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1013{ 981{
1014 struct zfcp_adapter *adapter = erp_action->adapter; 982 struct zfcp_adapter *adapter = erp_action->adapter;
1015 983
@@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action)
1017 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); 985 debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int));
1018 986
1019 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); 987 zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED);
1020
1021 return 0;
1022} 988}
1023 989
1024int 990int
@@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action)
2074 return retval; 2040 return retval;
2075} 2041}
2076 2042
2077/* 2043/**
2078 * function: zfcp_qdio_cleanup 2044 * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter
2079 *
2080 * purpose: cleans up QDIO operation for the specified adapter
2081 *
2082 * returns: 0 - successful cleanup
2083 * !0 - failed cleanup
2084 */ 2045 */
2085int 2046static void
2086zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) 2047zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2087{ 2048{
2088 int retval = ZFCP_ERP_SUCCEEDED;
2089 int first_used; 2049 int first_used;
2090 int used_count; 2050 int used_count;
2091 struct zfcp_adapter *adapter = erp_action->adapter; 2051 struct zfcp_adapter *adapter = erp_action->adapter;
@@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2094 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " 2054 ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO "
2095 "queues on adapter %s\n", 2055 "queues on adapter %s\n",
2096 zfcp_get_busid_by_adapter(adapter)); 2056 zfcp_get_busid_by_adapter(adapter));
2097 retval = ZFCP_ERP_FAILED; 2057 return;
2098 goto out;
2099 } 2058 }
2100 2059
2101 /* 2060 /*
2102 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that 2061 * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that
2103 * do_QDIO won't be called while qdio_shutdown is in progress. 2062 * do_QDIO won't be called while qdio_shutdown is in progress.
2104 */ 2063 */
2105
2106 write_lock_irq(&adapter->request_queue.queue_lock); 2064 write_lock_irq(&adapter->request_queue.queue_lock);
2107 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); 2065 atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
2108 write_unlock_irq(&adapter->request_queue.queue_lock); 2066 write_unlock_irq(&adapter->request_queue.queue_lock);
@@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action)
2134 adapter->request_queue.free_index = 0; 2092 adapter->request_queue.free_index = 0;
2135 atomic_set(&adapter->request_queue.free_count, 0); 2093 atomic_set(&adapter->request_queue.free_count, 0);
2136 adapter->request_queue.distance_from_int = 0; 2094 adapter->request_queue.distance_from_int = 0;
2137 out:
2138 return retval;
2139} 2095}
2140 2096
2141static int 2097static int
@@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action)
2258 "%s)\n", zfcp_get_busid_by_adapter(adapter)); 2214 "%s)\n", zfcp_get_busid_by_adapter(adapter));
2259 ret = ZFCP_ERP_FAILED; 2215 ret = ZFCP_ERP_FAILED;
2260 } 2216 }
2261 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { 2217
2262 ZFCP_LOG_INFO("error: exchange port data failed (adapter " 2218 /* don't treat as error for the sake of compatibility */
2219 if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status))
2220 ZFCP_LOG_INFO("warning: exchange port data failed (adapter "
2263 "%s\n", zfcp_get_busid_by_adapter(adapter)); 2221 "%s\n", zfcp_get_busid_by_adapter(adapter));
2264 ret = ZFCP_ERP_FAILED;
2265 }
2266 2222
2267 return ret; 2223 return ret;
2268} 2224}
@@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action
2292 return retval; 2248 return retval;
2293} 2249}
2294 2250
2295/* 2251/**
2296 * function: zfcp_fsf_cleanup 2252 * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter
2297 *
2298 * purpose: cleanup FSF operation for specified adapter
2299 *
2300 * returns: 0 - FSF operation successfully cleaned up
2301 * !0 - failed to cleanup FSF operation for this adapter
2302 */ 2253 */
2303static int 2254static void
2304zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) 2255zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2305{ 2256{
2306 int retval = ZFCP_ERP_SUCCEEDED;
2307 struct zfcp_adapter *adapter = erp_action->adapter; 2257 struct zfcp_adapter *adapter = erp_action->adapter;
2308 2258
2309 /* 2259 /*
@@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action)
2317 /* all ports and units are closed */ 2267 /* all ports and units are closed */
2318 zfcp_erp_modify_adapter_status(adapter, 2268 zfcp_erp_modify_adapter_status(adapter,
2319 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); 2269 ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
2320
2321 return retval;
2322} 2270}
2323 2271
2324/* 2272/*
@@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter,
3293} 3241}
3294 3242
3295 3243
3296static int 3244void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3297zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3298{ 3245{
3299 int retval = 0;
3300 struct zfcp_port *port; 3246 struct zfcp_port *port;
3301 3247
3302 debug_text_event(adapter->erp_dbf, 5, "a_actab"); 3248 debug_text_event(adapter->erp_dbf, 5, "a_actab");
@@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter)
3305 else 3251 else
3306 list_for_each_entry(port, &adapter->port_list_head, list) 3252 list_for_each_entry(port, &adapter->port_list_head, list)
3307 zfcp_erp_action_dismiss_port(port); 3253 zfcp_erp_action_dismiss_port(port);
3308
3309 return retval;
3310} 3254}
3311 3255
3312static int 3256static void zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3313zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3314{ 3257{
3315 int retval = 0;
3316 struct zfcp_unit *unit; 3258 struct zfcp_unit *unit;
3317 struct zfcp_adapter *adapter = port->adapter; 3259 struct zfcp_adapter *adapter = port->adapter;
3318 3260
@@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port)
3323 else 3265 else
3324 list_for_each_entry(unit, &port->unit_list_head, list) 3266 list_for_each_entry(unit, &port->unit_list_head, list)
3325 zfcp_erp_action_dismiss_unit(unit); 3267 zfcp_erp_action_dismiss_unit(unit);
3326
3327 return retval;
3328} 3268}
3329 3269
3330static int 3270static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3331zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit)
3332{ 3271{
3333 int retval = 0;
3334 struct zfcp_adapter *adapter = unit->port->adapter; 3272 struct zfcp_adapter *adapter = unit->port->adapter;
3335 3273
3336 debug_text_event(adapter->erp_dbf, 5, "u_actab"); 3274 debug_text_event(adapter->erp_dbf, 5, "u_actab");
3337 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); 3275 debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t));
3338 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) 3276 if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status))
3339 zfcp_erp_action_dismiss(&unit->erp_action); 3277 zfcp_erp_action_dismiss(&unit->erp_action);
3340
3341 return retval;
3342} 3278}
3343 3279
3344static inline void 3280static inline void
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index d02366004cdd..146d7a2b4c4a 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *);
63extern void zfcp_qdio_free_queues(struct zfcp_adapter *); 63extern void zfcp_qdio_free_queues(struct zfcp_adapter *);
64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, 64extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *,
65 struct zfcp_fsf_req *); 65 struct zfcp_fsf_req *);
66extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *);
67 66
68extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req 67extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req
69 (struct zfcp_fsf_req *, int, int); 68 (struct zfcp_fsf_req *, int, int);
@@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int);
140extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); 139extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int);
141extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); 140extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int);
142extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); 141extern void zfcp_erp_adapter_failed(struct zfcp_adapter *);
142extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *);
143 143
144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); 144extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int);
145extern int zfcp_erp_port_reopen(struct zfcp_port *, int); 145extern int zfcp_erp_port_reopen(struct zfcp_port *, int);
@@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *);
156extern int zfcp_erp_thread_setup(struct zfcp_adapter *); 156extern int zfcp_erp_thread_setup(struct zfcp_adapter *);
157extern int zfcp_erp_thread_kill(struct zfcp_adapter *); 157extern int zfcp_erp_thread_kill(struct zfcp_adapter *);
158extern int zfcp_erp_wait(struct zfcp_adapter *); 158extern int zfcp_erp_wait(struct zfcp_adapter *);
159extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); 159extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long);
160 160
161extern int zfcp_test_link(struct zfcp_port *); 161extern int zfcp_test_link(struct zfcp_port *);
162 162
@@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
190 struct zfcp_fsf_req *); 190 struct zfcp_fsf_req *);
191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, 191extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
192 struct scsi_cmnd *); 192 struct scsi_cmnd *);
193extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *);
194extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long);
195extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *,
196 unsigned long);
197extern int zfcp_reqlist_isempty(struct zfcp_adapter *);
193 198
194#endif /* ZFCP_EXT_H */ 199#endif /* ZFCP_EXT_H */
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 31db2b06faba..ff2eacf5ec8c 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *);
49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, 49static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *,
50 struct fsf_link_down_info *); 50 struct fsf_link_down_info *);
51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); 51static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *);
52static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *);
53 52
54/* association between FSF command and FSF QTCB type */ 53/* association between FSF command and FSF QTCB type */
55static u32 fsf_qtcb_type[] = { 54static u32 fsf_qtcb_type[] = {
@@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req)
146 kfree(fsf_req); 145 kfree(fsf_req);
147} 146}
148 147
149/* 148/**
150 * function: 149 * zfcp_fsf_req_dismiss - dismiss a single fsf request
151 *
152 * purpose:
153 *
154 * returns:
155 *
156 * note: qdio queues shall be down (no ongoing inbound processing)
157 */ 150 */
158int 151static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter,
159zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) 152 struct zfcp_fsf_req *fsf_req,
153 unsigned int counter)
160{ 154{
161 struct zfcp_fsf_req *fsf_req, *tmp; 155 u64 dbg_tmp[2];
162 unsigned long flags;
163 LIST_HEAD(remove_queue);
164 156
165 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 157 dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active);
166 list_splice_init(&adapter->fsf_req_list_head, &remove_queue); 158 dbg_tmp[1] = (u64) counter;
167 atomic_set(&adapter->fsf_reqs_active, 0); 159 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
168 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 160 list_del(&fsf_req->list);
169 161 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
170 list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { 162 zfcp_fsf_req_complete(fsf_req);
171 list_del(&fsf_req->list);
172 zfcp_fsf_req_dismiss(fsf_req);
173 }
174
175 return 0;
176} 163}
177 164
178/* 165/**
179 * function: 166 * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests
180 *
181 * purpose:
182 *
183 * returns:
184 */ 167 */
185static void 168int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
186zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req)
187{ 169{
188 fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; 170 struct zfcp_fsf_req *request, *tmp;
189 zfcp_fsf_req_complete(fsf_req); 171 unsigned long flags;
172 unsigned int i, counter;
173
174 spin_lock_irqsave(&adapter->req_list_lock, flags);
175 atomic_set(&adapter->reqs_active, 0);
176 for (i=0; i<REQUEST_LIST_SIZE; i++) {
177 if (list_empty(&adapter->req_list[i]))
178 continue;
179
180 counter = 0;
181 list_for_each_entry_safe(request, tmp,
182 &adapter->req_list[i], list) {
183 zfcp_fsf_req_dismiss(adapter, request, counter);
184 counter++;
185 }
186 }
187 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
188
189 return 0;
190} 190}
191 191
192/* 192/*
@@ -4592,12 +4592,14 @@ static inline void
4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) 4592zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req)
4593{ 4593{
4594 if (likely(fsf_req->qtcb != NULL)) { 4594 if (likely(fsf_req->qtcb != NULL)) {
4595 fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; 4595 fsf_req->qtcb->prefix.req_seq_no =
4596 fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; 4596 fsf_req->adapter->fsf_req_seq_no;
4597 fsf_req->qtcb->prefix.req_id = fsf_req->req_id;
4597 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; 4598 fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION;
4598 fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; 4599 fsf_req->qtcb->prefix.qtcb_type =
4600 fsf_qtcb_type[fsf_req->fsf_command];
4599 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; 4601 fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION;
4600 fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; 4602 fsf_req->qtcb->header.req_handle = fsf_req->req_id;
4601 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; 4603 fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command;
4602 } 4604 }
4603} 4605}
@@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4654{ 4656{
4655 volatile struct qdio_buffer_element *sbale; 4657 volatile struct qdio_buffer_element *sbale;
4656 struct zfcp_fsf_req *fsf_req = NULL; 4658 struct zfcp_fsf_req *fsf_req = NULL;
4659 unsigned long flags;
4657 int ret = 0; 4660 int ret = 0;
4658 struct zfcp_qdio_queue *req_queue = &adapter->request_queue; 4661 struct zfcp_qdio_queue *req_queue = &adapter->request_queue;
4659 4662
@@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4668 4671
4669 fsf_req->adapter = adapter; 4672 fsf_req->adapter = adapter;
4670 fsf_req->fsf_command = fsf_cmd; 4673 fsf_req->fsf_command = fsf_cmd;
4674 INIT_LIST_HEAD(&fsf_req->list);
4675
4676 /* unique request id */
4677 spin_lock_irqsave(&adapter->req_list_lock, flags);
4678 fsf_req->req_id = adapter->req_no++;
4679 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
4671 4680
4672 zfcp_fsf_req_qtcb_init(fsf_req); 4681 zfcp_fsf_req_qtcb_init(fsf_req);
4673 4682
@@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags,
4707 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); 4716 sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0);
4708 4717
4709 /* setup common SBALE fields */ 4718 /* setup common SBALE fields */
4710 sbale[0].addr = fsf_req; 4719 sbale[0].addr = (void *) fsf_req->req_id;
4711 sbale[0].flags |= SBAL_FLAGS0_COMMAND; 4720 sbale[0].flags |= SBAL_FLAGS0_COMMAND;
4712 if (likely(fsf_req->qtcb != NULL)) { 4721 if (likely(fsf_req->qtcb != NULL)) {
4713 sbale[1].addr = (void *) fsf_req->qtcb; 4722 sbale[1].addr = (void *) fsf_req->qtcb;
@@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4747 volatile struct qdio_buffer_element *sbale; 4756 volatile struct qdio_buffer_element *sbale;
4748 int inc_seq_no; 4757 int inc_seq_no;
4749 int new_distance_from_int; 4758 int new_distance_from_int;
4750 unsigned long flags; 4759 u64 dbg_tmp[2];
4751 int retval = 0; 4760 int retval = 0;
4752 4761
4753 adapter = fsf_req->adapter; 4762 adapter = fsf_req->adapter;
@@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4761 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, 4770 ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr,
4762 sbale[1].length); 4771 sbale[1].length);
4763 4772
4764 /* put allocated FSF request at list tail */ 4773 /* put allocated FSF request into hash table */
4765 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4774 spin_lock(&adapter->req_list_lock);
4766 list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); 4775 zfcp_reqlist_add(adapter, fsf_req);
4767 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4776 spin_unlock(&adapter->req_list_lock);
4768 4777
4769 inc_seq_no = (fsf_req->qtcb != NULL); 4778 inc_seq_no = (fsf_req->qtcb != NULL);
4770 4779
@@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4803 QDIO_FLAG_SYNC_OUTPUT, 4812 QDIO_FLAG_SYNC_OUTPUT,
4804 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); 4813 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL);
4805 4814
4815 dbg_tmp[0] = (unsigned long) sbale[0].addr;
4816 dbg_tmp[1] = (u64) retval;
4817 debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16);
4818
4806 if (unlikely(retval)) { 4819 if (unlikely(retval)) {
4807 /* Queues are down..... */ 4820 /* Queues are down..... */
4808 retval = -EIO; 4821 retval = -EIO;
@@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4812 */ 4825 */
4813 if (timer) 4826 if (timer)
4814 del_timer(timer); 4827 del_timer(timer);
4815 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); 4828 spin_lock(&adapter->req_list_lock);
4816 list_del(&fsf_req->list); 4829 zfcp_reqlist_remove(adapter, fsf_req->req_id);
4817 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); 4830 spin_unlock(&adapter->req_list_lock);
4818 /* 4831 /* undo changes in request queue made for this request */
4819 * adjust the number of free SBALs in request queue as well as
4820 * position of first one
4821 */
4822 zfcp_qdio_zero_sbals(req_queue->buffer, 4832 zfcp_qdio_zero_sbals(req_queue->buffer,
4823 fsf_req->sbal_first, fsf_req->sbal_number); 4833 fsf_req->sbal_first, fsf_req->sbal_number);
4824 atomic_add(fsf_req->sbal_number, &req_queue->free_count); 4834 atomic_add(fsf_req->sbal_number, &req_queue->free_count);
4825 req_queue->free_index -= fsf_req->sbal_number; /* increase */ 4835 req_queue->free_index -= fsf_req->sbal_number;
4826 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; 4836 req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q;
4827 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ 4837 req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */
4828 ZFCP_LOG_DEBUG 4838 zfcp_erp_adapter_reopen(adapter, 0);
4829 ("error: do_QDIO failed. Buffers could not be enqueued "
4830 "to request queue.\n");
4831 } else { 4839 } else {
4832 req_queue->distance_from_int = new_distance_from_int; 4840 req_queue->distance_from_int = new_distance_from_int;
4833 /* 4841 /*
@@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer)
4843 adapter->fsf_req_seq_no++; 4851 adapter->fsf_req_seq_no++;
4844 4852
4845 /* count FSF requests pending */ 4853 /* count FSF requests pending */
4846 atomic_inc(&adapter->fsf_reqs_active); 4854 atomic_inc(&adapter->reqs_active);
4847 } 4855 }
4848 return retval; 4856 return retval;
4849} 4857}
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 49ea5add4abc..dbd9f48e863e 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device,
282 return; 282 return;
283} 283}
284 284
285/**
286 * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status
287 */
288static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
289 unsigned long req_id)
290{
291 struct zfcp_fsf_req *fsf_req;
292 unsigned long flags;
293
294 debug_long_event(adapter->erp_dbf, 4, req_id);
295
296 spin_lock_irqsave(&adapter->req_list_lock, flags);
297 fsf_req = zfcp_reqlist_ismember(adapter, req_id);
298
299 if (!fsf_req) {
300 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
301 ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id);
302 zfcp_erp_adapter_reopen(adapter, 0);
303 return -EINVAL;
304 }
305
306 zfcp_reqlist_remove(adapter, req_id);
307 atomic_dec(&adapter->reqs_active);
308 spin_unlock_irqrestore(&adapter->req_list_lock, flags);
309
310 /* finish the FSF request */
311 zfcp_fsf_req_complete(fsf_req);
312
313 return 0;
314}
315
285/* 316/*
286 * function: zfcp_qdio_response_handler 317 * function: zfcp_qdio_response_handler
287 * 318 *
@@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
344 /* look for QDIO request identifiers in SB */ 375 /* look for QDIO request identifiers in SB */
345 buffere = &buffer->element[buffere_index]; 376 buffere = &buffer->element[buffere_index];
346 retval = zfcp_qdio_reqid_check(adapter, 377 retval = zfcp_qdio_reqid_check(adapter,
347 (void *) buffere->addr); 378 (unsigned long) buffere->addr);
348 379
349 if (retval) { 380 if (retval) {
350 ZFCP_LOG_NORMAL("bug: unexpected inbound " 381 ZFCP_LOG_NORMAL("bug: unexpected inbound "
@@ -415,52 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device,
415 return; 446 return;
416} 447}
417 448
418/*
419 * function: zfcp_qdio_reqid_check
420 *
421 * purpose: checks for valid reqids or unsolicited status
422 *
423 * returns: 0 - valid request id or unsolicited status
424 * !0 - otherwise
425 */
426int
427zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr)
428{
429 struct zfcp_fsf_req *fsf_req;
430 unsigned long flags;
431
432 /* invalid (per convention used in this driver) */
433 if (unlikely(!sbale_addr)) {
434 ZFCP_LOG_NORMAL("bug: invalid reqid\n");
435 return -EINVAL;
436 }
437
438 /* valid request id and thus (hopefully :) valid fsf_req address */
439 fsf_req = (struct zfcp_fsf_req *) sbale_addr;
440
441 /* serialize with zfcp_fsf_req_dismiss_all */
442 spin_lock_irqsave(&adapter->fsf_req_list_lock, flags);
443 if (list_empty(&adapter->fsf_req_list_head)) {
444 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
445 return 0;
446 }
447 list_del(&fsf_req->list);
448 atomic_dec(&adapter->fsf_reqs_active);
449 spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags);
450
451 if (unlikely(adapter != fsf_req->adapter)) {
452 ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, "
453 "fsf_req->adapter=%p, adapter=%p)\n",
454 fsf_req, fsf_req->adapter, adapter);
455 return -EINVAL;
456 }
457
458 /* finish the FSF request */
459 zfcp_fsf_req_complete(fsf_req);
460
461 return 0;
462}
463
464/** 449/**
465 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue 450 * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue
466 * @queue: queue from which SBALE should be returned 451 * @queue: queue from which SBALE should be returned
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 671f4a6a5d18..1bb55086db9f 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *,
30 void (*done) (struct scsi_cmnd *)); 30 void (*done) (struct scsi_cmnd *));
31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); 31static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *);
32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); 32static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *);
33static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *);
34static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); 33static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *);
35static int zfcp_task_management_function(struct zfcp_unit *, u8, 34static int zfcp_task_management_function(struct zfcp_unit *, u8,
36 struct scsi_cmnd *); 35 struct scsi_cmnd *);
@@ -46,30 +45,22 @@ struct zfcp_data zfcp_data = {
46 .scsi_host_template = { 45 .scsi_host_template = {
47 .name = ZFCP_NAME, 46 .name = ZFCP_NAME,
48 .proc_name = "zfcp", 47 .proc_name = "zfcp",
49 .proc_info = NULL,
50 .detect = NULL,
51 .slave_alloc = zfcp_scsi_slave_alloc, 48 .slave_alloc = zfcp_scsi_slave_alloc,
52 .slave_configure = zfcp_scsi_slave_configure, 49 .slave_configure = zfcp_scsi_slave_configure,
53 .slave_destroy = zfcp_scsi_slave_destroy, 50 .slave_destroy = zfcp_scsi_slave_destroy,
54 .queuecommand = zfcp_scsi_queuecommand, 51 .queuecommand = zfcp_scsi_queuecommand,
55 .eh_abort_handler = zfcp_scsi_eh_abort_handler, 52 .eh_abort_handler = zfcp_scsi_eh_abort_handler,
56 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, 53 .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler,
57 .eh_bus_reset_handler = zfcp_scsi_eh_bus_reset_handler, 54 .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler,
58 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, 55 .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler,
59 .can_queue = 4096, 56 .can_queue = 4096,
60 .this_id = -1, 57 .this_id = -1,
61 /*
62 * FIXME:
63 * one less? can zfcp_create_sbale cope with it?
64 */
65 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, 58 .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ,
66 .cmd_per_lun = 1, 59 .cmd_per_lun = 1,
67 .unchecked_isa_dma = 0,
68 .use_clustering = 1, 60 .use_clustering = 1,
69 .sdev_attrs = zfcp_sysfs_sdev_attrs, 61 .sdev_attrs = zfcp_sysfs_sdev_attrs,
70 }, 62 },
71 .driver_version = ZFCP_VERSION, 63 .driver_version = ZFCP_VERSION,
72 /* rest initialised with zeros */
73}; 64};
74 65
75/* Find start of Response Information in FCP response unit*/ 66/* Find start of Response Information in FCP response unit*/
@@ -176,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp)
176 return retval; 167 return retval;
177} 168}
178 169
179static void 170/**
180zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) 171 * zfcp_scsi_slave_destroy - called when scsi device is removed
172 *
173 * Remove reference to associated scsi device for an zfcp_unit.
174 * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs
175 * or a scan for this device might have failed.
176 */
177static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
181{ 178{
182 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; 179 struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata;
183 180
@@ -185,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt)
185 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); 182 atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status);
186 sdpnt->hostdata = NULL; 183 sdpnt->hostdata = NULL;
187 unit->device = NULL; 184 unit->device = NULL;
185 zfcp_erp_unit_failed(unit);
188 zfcp_unit_put(unit); 186 zfcp_unit_put(unit);
189 } else { 187 } else {
190 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " 188 ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at "
@@ -549,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags,
549} 547}
550 548
551/** 549/**
552 * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) 550 * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset
551 *
552 * If ERP is already running it will be stopped.
553 */ 553 */
554int 554int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
555zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt)
556{ 555{
557 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; 556 struct zfcp_unit *unit;
558 struct zfcp_adapter *adapter = unit->port->adapter; 557 struct zfcp_adapter *adapter;
559 558 unsigned long flags;
560 ZFCP_LOG_NORMAL("bus reset because of problems with "
561 "unit 0x%016Lx\n", unit->fcp_lun);
562 zfcp_erp_adapter_reopen(adapter, 0);
563 zfcp_erp_wait(adapter);
564
565 return SUCCESS;
566}
567 559
568/** 560 unit = (struct zfcp_unit*) scpnt->device->hostdata;
569 * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) 561 adapter = unit->port->adapter;
570 */
571int
572zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt)
573{
574 struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata;
575 struct zfcp_adapter *adapter = unit->port->adapter;
576 562
577 ZFCP_LOG_NORMAL("host reset because of problems with " 563 ZFCP_LOG_NORMAL("host/bus reset because of problems with "
578 "unit 0x%016Lx\n", unit->fcp_lun); 564 "unit 0x%016Lx\n", unit->fcp_lun);
579 zfcp_erp_adapter_reopen(adapter, 0); 565
580 zfcp_erp_wait(adapter); 566 write_lock_irqsave(&adapter->erp_lock, flags);
567 if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING,
568 &adapter->status)) {
569 zfcp_erp_modify_adapter_status(adapter,
570 ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN,
571 ZFCP_CLEAR);
572 zfcp_erp_action_dismiss_adapter(adapter);
573 write_unlock_irqrestore(&adapter->erp_lock, flags);
574 zfcp_fsf_req_dismiss_all(adapter);
575 adapter->fsf_req_seq_no = 0;
576 zfcp_erp_adapter_reopen(adapter, 0);
577 } else {
578 write_unlock_irqrestore(&adapter->erp_lock, flags);
579 zfcp_erp_adapter_reopen(adapter, 0);
580 zfcp_erp_wait(adapter);
581 }
581 582
582 return SUCCESS; 583 return SUCCESS;
583} 584}