diff options
Diffstat (limited to 'drivers/s390')
57 files changed, 1525 insertions, 1107 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index 4bf03fb67f8d..25c1ef6dfd44 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -52,7 +52,7 @@ static void dasd_setup_queue(struct dasd_device * device); | |||
52 | static void dasd_free_queue(struct dasd_device * device); | 52 | static void dasd_free_queue(struct dasd_device * device); |
53 | static void dasd_flush_request_queue(struct dasd_device *); | 53 | static void dasd_flush_request_queue(struct dasd_device *); |
54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); | 54 | static void dasd_int_handler(struct ccw_device *, unsigned long, struct irb *); |
55 | static void dasd_flush_ccw_queue(struct dasd_device *, int); | 55 | static int dasd_flush_ccw_queue(struct dasd_device *, int); |
56 | static void dasd_tasklet(struct dasd_device *); | 56 | static void dasd_tasklet(struct dasd_device *); |
57 | static void do_kick_device(void *data); | 57 | static void do_kick_device(void *data); |
58 | 58 | ||
@@ -60,6 +60,7 @@ static void do_kick_device(void *data); | |||
60 | * SECTION: Operations on the device structure. | 60 | * SECTION: Operations on the device structure. |
61 | */ | 61 | */ |
62 | static wait_queue_head_t dasd_init_waitq; | 62 | static wait_queue_head_t dasd_init_waitq; |
63 | static wait_queue_head_t dasd_flush_wq; | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * Allocate memory for a new device structure. | 66 | * Allocate memory for a new device structure. |
@@ -121,7 +122,7 @@ dasd_free_device(struct dasd_device *device) | |||
121 | /* | 122 | /* |
122 | * Make a new device known to the system. | 123 | * Make a new device known to the system. |
123 | */ | 124 | */ |
124 | static inline int | 125 | static int |
125 | dasd_state_new_to_known(struct dasd_device *device) | 126 | dasd_state_new_to_known(struct dasd_device *device) |
126 | { | 127 | { |
127 | int rc; | 128 | int rc; |
@@ -145,7 +146,7 @@ dasd_state_new_to_known(struct dasd_device *device) | |||
145 | /* | 146 | /* |
146 | * Let the system forget about a device. | 147 | * Let the system forget about a device. |
147 | */ | 148 | */ |
148 | static inline void | 149 | static int |
149 | dasd_state_known_to_new(struct dasd_device * device) | 150 | dasd_state_known_to_new(struct dasd_device * device) |
150 | { | 151 | { |
151 | /* Disable extended error reporting for this device. */ | 152 | /* Disable extended error reporting for this device. */ |
@@ -163,12 +164,13 @@ dasd_state_known_to_new(struct dasd_device * device) | |||
163 | 164 | ||
164 | /* Give up reference we took in dasd_state_new_to_known. */ | 165 | /* Give up reference we took in dasd_state_new_to_known. */ |
165 | dasd_put_device(device); | 166 | dasd_put_device(device); |
167 | return 0; | ||
166 | } | 168 | } |
167 | 169 | ||
168 | /* | 170 | /* |
169 | * Request the irq line for the device. | 171 | * Request the irq line for the device. |
170 | */ | 172 | */ |
171 | static inline int | 173 | static int |
172 | dasd_state_known_to_basic(struct dasd_device * device) | 174 | dasd_state_known_to_basic(struct dasd_device * device) |
173 | { | 175 | { |
174 | int rc; | 176 | int rc; |
@@ -192,17 +194,23 @@ dasd_state_known_to_basic(struct dasd_device * device) | |||
192 | /* | 194 | /* |
193 | * Release the irq line for the device. Terminate any running i/o. | 195 | * Release the irq line for the device. Terminate any running i/o. |
194 | */ | 196 | */ |
195 | static inline void | 197 | static int |
196 | dasd_state_basic_to_known(struct dasd_device * device) | 198 | dasd_state_basic_to_known(struct dasd_device * device) |
197 | { | 199 | { |
200 | int rc; | ||
201 | |||
198 | dasd_gendisk_free(device); | 202 | dasd_gendisk_free(device); |
199 | dasd_flush_ccw_queue(device, 1); | 203 | rc = dasd_flush_ccw_queue(device, 1); |
204 | if (rc) | ||
205 | return rc; | ||
206 | |||
200 | DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); | 207 | DBF_DEV_EVENT(DBF_EMERG, device, "%p debug area deleted", device); |
201 | if (device->debug_area != NULL) { | 208 | if (device->debug_area != NULL) { |
202 | debug_unregister(device->debug_area); | 209 | debug_unregister(device->debug_area); |
203 | device->debug_area = NULL; | 210 | device->debug_area = NULL; |
204 | } | 211 | } |
205 | device->state = DASD_STATE_KNOWN; | 212 | device->state = DASD_STATE_KNOWN; |
213 | return 0; | ||
206 | } | 214 | } |
207 | 215 | ||
208 | /* | 216 | /* |
@@ -219,7 +227,7 @@ dasd_state_basic_to_known(struct dasd_device * device) | |||
219 | * In case the analysis returns an error, the device setup is stopped | 227 | * In case the analysis returns an error, the device setup is stopped |
220 | * (a fake disk was already added to allow formatting). | 228 | * (a fake disk was already added to allow formatting). |
221 | */ | 229 | */ |
222 | static inline int | 230 | static int |
223 | dasd_state_basic_to_ready(struct dasd_device * device) | 231 | dasd_state_basic_to_ready(struct dasd_device * device) |
224 | { | 232 | { |
225 | int rc; | 233 | int rc; |
@@ -247,25 +255,31 @@ dasd_state_basic_to_ready(struct dasd_device * device) | |||
247 | * Forget format information. Check if the target level is basic | 255 | * Forget format information. Check if the target level is basic |
248 | * and if it is create fake disk for formatting. | 256 | * and if it is create fake disk for formatting. |
249 | */ | 257 | */ |
250 | static inline void | 258 | static int |
251 | dasd_state_ready_to_basic(struct dasd_device * device) | 259 | dasd_state_ready_to_basic(struct dasd_device * device) |
252 | { | 260 | { |
253 | dasd_flush_ccw_queue(device, 0); | 261 | int rc; |
262 | |||
263 | rc = dasd_flush_ccw_queue(device, 0); | ||
264 | if (rc) | ||
265 | return rc; | ||
254 | dasd_destroy_partitions(device); | 266 | dasd_destroy_partitions(device); |
255 | dasd_flush_request_queue(device); | 267 | dasd_flush_request_queue(device); |
256 | device->blocks = 0; | 268 | device->blocks = 0; |
257 | device->bp_block = 0; | 269 | device->bp_block = 0; |
258 | device->s2b_shift = 0; | 270 | device->s2b_shift = 0; |
259 | device->state = DASD_STATE_BASIC; | 271 | device->state = DASD_STATE_BASIC; |
272 | return 0; | ||
260 | } | 273 | } |
261 | 274 | ||
262 | /* | 275 | /* |
263 | * Back to basic. | 276 | * Back to basic. |
264 | */ | 277 | */ |
265 | static inline void | 278 | static int |
266 | dasd_state_unfmt_to_basic(struct dasd_device * device) | 279 | dasd_state_unfmt_to_basic(struct dasd_device * device) |
267 | { | 280 | { |
268 | device->state = DASD_STATE_BASIC; | 281 | device->state = DASD_STATE_BASIC; |
282 | return 0; | ||
269 | } | 283 | } |
270 | 284 | ||
271 | /* | 285 | /* |
@@ -273,7 +287,7 @@ dasd_state_unfmt_to_basic(struct dasd_device * device) | |||
273 | * the requeueing of requests from the linux request queue to the | 287 | * the requeueing of requests from the linux request queue to the |
274 | * ccw queue. | 288 | * ccw queue. |
275 | */ | 289 | */ |
276 | static inline int | 290 | static int |
277 | dasd_state_ready_to_online(struct dasd_device * device) | 291 | dasd_state_ready_to_online(struct dasd_device * device) |
278 | { | 292 | { |
279 | device->state = DASD_STATE_ONLINE; | 293 | device->state = DASD_STATE_ONLINE; |
@@ -284,16 +298,17 @@ dasd_state_ready_to_online(struct dasd_device * device) | |||
284 | /* | 298 | /* |
285 | * Stop the requeueing of requests again. | 299 | * Stop the requeueing of requests again. |
286 | */ | 300 | */ |
287 | static inline void | 301 | static int |
288 | dasd_state_online_to_ready(struct dasd_device * device) | 302 | dasd_state_online_to_ready(struct dasd_device * device) |
289 | { | 303 | { |
290 | device->state = DASD_STATE_READY; | 304 | device->state = DASD_STATE_READY; |
305 | return 0; | ||
291 | } | 306 | } |
292 | 307 | ||
293 | /* | 308 | /* |
294 | * Device startup state changes. | 309 | * Device startup state changes. |
295 | */ | 310 | */ |
296 | static inline int | 311 | static int |
297 | dasd_increase_state(struct dasd_device *device) | 312 | dasd_increase_state(struct dasd_device *device) |
298 | { | 313 | { |
299 | int rc; | 314 | int rc; |
@@ -329,30 +344,37 @@ dasd_increase_state(struct dasd_device *device) | |||
329 | /* | 344 | /* |
330 | * Device shutdown state changes. | 345 | * Device shutdown state changes. |
331 | */ | 346 | */ |
332 | static inline int | 347 | static int |
333 | dasd_decrease_state(struct dasd_device *device) | 348 | dasd_decrease_state(struct dasd_device *device) |
334 | { | 349 | { |
350 | int rc; | ||
351 | |||
352 | rc = 0; | ||
335 | if (device->state == DASD_STATE_ONLINE && | 353 | if (device->state == DASD_STATE_ONLINE && |
336 | device->target <= DASD_STATE_READY) | 354 | device->target <= DASD_STATE_READY) |
337 | dasd_state_online_to_ready(device); | 355 | rc = dasd_state_online_to_ready(device); |
338 | 356 | ||
339 | if (device->state == DASD_STATE_READY && | 357 | if (!rc && |
358 | device->state == DASD_STATE_READY && | ||
340 | device->target <= DASD_STATE_BASIC) | 359 | device->target <= DASD_STATE_BASIC) |
341 | dasd_state_ready_to_basic(device); | 360 | rc = dasd_state_ready_to_basic(device); |
342 | 361 | ||
343 | if (device->state == DASD_STATE_UNFMT && | 362 | if (!rc && |
363 | device->state == DASD_STATE_UNFMT && | ||
344 | device->target <= DASD_STATE_BASIC) | 364 | device->target <= DASD_STATE_BASIC) |
345 | dasd_state_unfmt_to_basic(device); | 365 | rc = dasd_state_unfmt_to_basic(device); |
346 | 366 | ||
347 | if (device->state == DASD_STATE_BASIC && | 367 | if (!rc && |
368 | device->state == DASD_STATE_BASIC && | ||
348 | device->target <= DASD_STATE_KNOWN) | 369 | device->target <= DASD_STATE_KNOWN) |
349 | dasd_state_basic_to_known(device); | 370 | rc = dasd_state_basic_to_known(device); |
350 | 371 | ||
351 | if (device->state == DASD_STATE_KNOWN && | 372 | if (!rc && |
373 | device->state == DASD_STATE_KNOWN && | ||
352 | device->target <= DASD_STATE_NEW) | 374 | device->target <= DASD_STATE_NEW) |
353 | dasd_state_known_to_new(device); | 375 | rc = dasd_state_known_to_new(device); |
354 | 376 | ||
355 | return 0; | 377 | return rc; |
356 | } | 378 | } |
357 | 379 | ||
358 | /* | 380 | /* |
@@ -701,6 +723,7 @@ dasd_term_IO(struct dasd_ccw_req * cqr) | |||
701 | cqr->retries--; | 723 | cqr->retries--; |
702 | cqr->status = DASD_CQR_CLEAR; | 724 | cqr->status = DASD_CQR_CLEAR; |
703 | cqr->stopclk = get_clock(); | 725 | cqr->stopclk = get_clock(); |
726 | cqr->starttime = 0; | ||
704 | DBF_DEV_EVENT(DBF_DEBUG, device, | 727 | DBF_DEV_EVENT(DBF_DEBUG, device, |
705 | "terminate cqr %p successful", | 728 | "terminate cqr %p successful", |
706 | cqr); | 729 | cqr); |
@@ -978,6 +1001,7 @@ dasd_int_handler(struct ccw_device *cdev, unsigned long intparm, | |||
978 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { | 1001 | irb->scsw.fctl & SCSW_FCTL_CLEAR_FUNC) { |
979 | cqr->status = DASD_CQR_QUEUED; | 1002 | cqr->status = DASD_CQR_QUEUED; |
980 | dasd_clear_timer(device); | 1003 | dasd_clear_timer(device); |
1004 | wake_up(&dasd_flush_wq); | ||
981 | dasd_schedule_bh(device); | 1005 | dasd_schedule_bh(device); |
982 | return; | 1006 | return; |
983 | } | 1007 | } |
@@ -1241,6 +1265,10 @@ __dasd_check_expire(struct dasd_device * device) | |||
1241 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1265 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); |
1242 | if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) { | 1266 | if (cqr->status == DASD_CQR_IN_IO && cqr->expires != 0) { |
1243 | if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) { | 1267 | if (time_after_eq(jiffies, cqr->expires + cqr->starttime)) { |
1268 | DEV_MESSAGE(KERN_ERR, device, | ||
1269 | "internal error - timeout (%is) expired " | ||
1270 | "for cqr %p (%i retries left)", | ||
1271 | (cqr->expires/HZ), cqr, cqr->retries); | ||
1244 | if (device->discipline->term_IO(cqr) != 0) | 1272 | if (device->discipline->term_IO(cqr) != 0) |
1245 | /* Hmpf, try again in 1/10 sec */ | 1273 | /* Hmpf, try again in 1/10 sec */ |
1246 | dasd_set_timer(device, 10); | 1274 | dasd_set_timer(device, 10); |
@@ -1285,46 +1313,100 @@ __dasd_start_head(struct dasd_device * device) | |||
1285 | dasd_set_timer(device, 50); | 1313 | dasd_set_timer(device, 50); |
1286 | } | 1314 | } |
1287 | 1315 | ||
1316 | static inline int | ||
1317 | _wait_for_clear(struct dasd_ccw_req *cqr) | ||
1318 | { | ||
1319 | return (cqr->status == DASD_CQR_QUEUED); | ||
1320 | } | ||
1321 | |||
1288 | /* | 1322 | /* |
1289 | * Remove requests from the ccw queue. | 1323 | * Remove all requests from the ccw queue (all = '1') or only block device |
1324 | * requests in case all = '0'. | ||
1325 | * Take care of the erp-chain (chained via cqr->refers) and remove either | ||
1326 | * the whole erp-chain or none of the erp-requests. | ||
1327 | * If a request is currently running, term_IO is called and the request | ||
1328 | * is re-queued. Prior to removing the terminated request we need to wait | ||
1329 | * for the clear-interrupt. | ||
1330 | * In case termination is not possible we stop processing and just finishing | ||
1331 | * the already moved requests. | ||
1290 | */ | 1332 | */ |
1291 | static void | 1333 | static int |
1292 | dasd_flush_ccw_queue(struct dasd_device * device, int all) | 1334 | dasd_flush_ccw_queue(struct dasd_device * device, int all) |
1293 | { | 1335 | { |
1336 | struct dasd_ccw_req *cqr, *orig, *n; | ||
1337 | int rc, i; | ||
1338 | |||
1294 | struct list_head flush_queue; | 1339 | struct list_head flush_queue; |
1295 | struct list_head *l, *n; | ||
1296 | struct dasd_ccw_req *cqr; | ||
1297 | 1340 | ||
1298 | INIT_LIST_HEAD(&flush_queue); | 1341 | INIT_LIST_HEAD(&flush_queue); |
1299 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1342 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
1300 | list_for_each_safe(l, n, &device->ccw_queue) { | 1343 | rc = 0; |
1301 | cqr = list_entry(l, struct dasd_ccw_req, list); | 1344 | restart: |
1345 | list_for_each_entry_safe(cqr, n, &device->ccw_queue, list) { | ||
1346 | /* get original request of erp request-chain */ | ||
1347 | for (orig = cqr; orig->refers != NULL; orig = orig->refers); | ||
1348 | |||
1302 | /* Flush all request or only block device requests? */ | 1349 | /* Flush all request or only block device requests? */ |
1303 | if (all == 0 && cqr->callback == dasd_end_request_cb) | 1350 | if (all == 0 && cqr->callback != dasd_end_request_cb && |
1351 | orig->callback != dasd_end_request_cb) { | ||
1304 | continue; | 1352 | continue; |
1305 | if (cqr->status == DASD_CQR_IN_IO) | 1353 | } |
1306 | device->discipline->term_IO(cqr); | 1354 | /* Check status and move request to flush_queue */ |
1307 | if (cqr->status != DASD_CQR_DONE || | 1355 | switch (cqr->status) { |
1308 | cqr->status != DASD_CQR_FAILED) { | 1356 | case DASD_CQR_IN_IO: |
1309 | cqr->status = DASD_CQR_FAILED; | 1357 | rc = device->discipline->term_IO(cqr); |
1358 | if (rc) { | ||
1359 | /* unable to terminate requeust */ | ||
1360 | DEV_MESSAGE(KERN_ERR, device, | ||
1361 | "dasd flush ccw_queue is unable " | ||
1362 | " to terminate request %p", | ||
1363 | cqr); | ||
1364 | /* stop flush processing */ | ||
1365 | goto finished; | ||
1366 | } | ||
1367 | break; | ||
1368 | case DASD_CQR_QUEUED: | ||
1369 | case DASD_CQR_ERROR: | ||
1370 | /* set request to FAILED */ | ||
1310 | cqr->stopclk = get_clock(); | 1371 | cqr->stopclk = get_clock(); |
1372 | cqr->status = DASD_CQR_FAILED; | ||
1373 | break; | ||
1374 | default: /* do not touch the others */ | ||
1375 | break; | ||
1376 | } | ||
1377 | /* Rechain request (including erp chain) */ | ||
1378 | for (i = 0; cqr != NULL; cqr = cqr->refers, i++) { | ||
1379 | cqr->endclk = get_clock(); | ||
1380 | list_move_tail(&cqr->list, &flush_queue); | ||
1381 | } | ||
1382 | if (i > 1) | ||
1383 | /* moved more than one request - need to restart */ | ||
1384 | goto restart; | ||
1385 | } | ||
1386 | |||
1387 | finished: | ||
1388 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1389 | /* Now call the callback function of flushed requests */ | ||
1390 | restart_cb: | ||
1391 | list_for_each_entry_safe(cqr, n, &flush_queue, list) { | ||
1392 | if (cqr->status == DASD_CQR_CLEAR) { | ||
1393 | /* wait for clear interrupt! */ | ||
1394 | wait_event(dasd_flush_wq, _wait_for_clear(cqr)); | ||
1395 | cqr->status = DASD_CQR_FAILED; | ||
1311 | } | 1396 | } |
1312 | /* Process finished ERP request. */ | 1397 | /* Process finished ERP request. */ |
1313 | if (cqr->refers) { | 1398 | if (cqr->refers) { |
1314 | __dasd_process_erp(device, cqr); | 1399 | __dasd_process_erp(device, cqr); |
1315 | continue; | 1400 | /* restart list_for_xx loop since dasd_process_erp |
1401 | * might remove multiple elements */ | ||
1402 | goto restart_cb; | ||
1316 | } | 1403 | } |
1317 | /* Rechain request on device request queue */ | 1404 | /* call the callback function */ |
1318 | cqr->endclk = get_clock(); | 1405 | cqr->endclk = get_clock(); |
1319 | list_move_tail(&cqr->list, &flush_queue); | ||
1320 | } | ||
1321 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | ||
1322 | /* Now call the callback function of flushed requests */ | ||
1323 | list_for_each_safe(l, n, &flush_queue) { | ||
1324 | cqr = list_entry(l, struct dasd_ccw_req, list); | ||
1325 | if (cqr->callback != NULL) | 1406 | if (cqr->callback != NULL) |
1326 | (cqr->callback)(cqr, cqr->callback_data); | 1407 | (cqr->callback)(cqr, cqr->callback_data); |
1327 | } | 1408 | } |
1409 | return rc; | ||
1328 | } | 1410 | } |
1329 | 1411 | ||
1330 | /* | 1412 | /* |
@@ -1510,10 +1592,8 @@ dasd_sleep_on_interruptible(struct dasd_ccw_req * cqr) | |||
1510 | if (device->discipline->term_IO) { | 1592 | if (device->discipline->term_IO) { |
1511 | cqr->retries = -1; | 1593 | cqr->retries = -1; |
1512 | device->discipline->term_IO(cqr); | 1594 | device->discipline->term_IO(cqr); |
1513 | /*nished = | 1595 | /* wait (non-interruptible) for final status |
1514 | * wait (non-interruptible) for final status | 1596 | * because signal ist still pending */ |
1515 | * because signal ist still pending | ||
1516 | */ | ||
1517 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); | 1597 | spin_unlock_irq(get_ccwdev_lock(device->cdev)); |
1518 | wait_event(wait_q, _wait_for_wakeup(cqr)); | 1598 | wait_event(wait_q, _wait_for_wakeup(cqr)); |
1519 | spin_lock_irq(get_ccwdev_lock(device->cdev)); | 1599 | spin_lock_irq(get_ccwdev_lock(device->cdev)); |
@@ -1546,19 +1626,11 @@ static inline int | |||
1546 | _dasd_term_running_cqr(struct dasd_device *device) | 1626 | _dasd_term_running_cqr(struct dasd_device *device) |
1547 | { | 1627 | { |
1548 | struct dasd_ccw_req *cqr; | 1628 | struct dasd_ccw_req *cqr; |
1549 | int rc; | ||
1550 | 1629 | ||
1551 | if (list_empty(&device->ccw_queue)) | 1630 | if (list_empty(&device->ccw_queue)) |
1552 | return 0; | 1631 | return 0; |
1553 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); | 1632 | cqr = list_entry(device->ccw_queue.next, struct dasd_ccw_req, list); |
1554 | rc = device->discipline->term_IO(cqr); | 1633 | return device->discipline->term_IO(cqr); |
1555 | if (rc == 0) { | ||
1556 | /* termination successful */ | ||
1557 | cqr->status = DASD_CQR_QUEUED; | ||
1558 | cqr->startclk = cqr->stopclk = 0; | ||
1559 | cqr->starttime = 0; | ||
1560 | } | ||
1561 | return rc; | ||
1562 | } | 1634 | } |
1563 | 1635 | ||
1564 | int | 1636 | int |
@@ -1726,12 +1798,9 @@ dasd_flush_request_queue(struct dasd_device * device) | |||
1726 | return; | 1798 | return; |
1727 | 1799 | ||
1728 | spin_lock_irq(&device->request_queue_lock); | 1800 | spin_lock_irq(&device->request_queue_lock); |
1729 | while (!list_empty(&device->request_queue->queue_head)) { | 1801 | while ((req = elv_next_request(device->request_queue))) { |
1730 | req = elv_next_request(device->request_queue); | ||
1731 | if (req == NULL) | ||
1732 | break; | ||
1733 | dasd_end_request(req, 0); | ||
1734 | blkdev_dequeue_request(req); | 1802 | blkdev_dequeue_request(req); |
1803 | dasd_end_request(req, 0); | ||
1735 | } | 1804 | } |
1736 | spin_unlock_irq(&device->request_queue_lock); | 1805 | spin_unlock_irq(&device->request_queue_lock); |
1737 | } | 1806 | } |
@@ -2091,6 +2160,7 @@ dasd_init(void) | |||
2091 | int rc; | 2160 | int rc; |
2092 | 2161 | ||
2093 | init_waitqueue_head(&dasd_init_waitq); | 2162 | init_waitqueue_head(&dasd_init_waitq); |
2163 | init_waitqueue_head(&dasd_flush_wq); | ||
2094 | 2164 | ||
2095 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ | 2165 | /* register 'common' DASD debug area, used for all DBF_XXX calls */ |
2096 | dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); | 2166 | dasd_debug_area = debug_register("dasd", 1, 2, 8 * sizeof (long)); |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index d7295386821c..9af02c79ce8a 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -48,18 +48,20 @@ struct dasd_devmap { | |||
48 | }; | 48 | }; |
49 | 49 | ||
50 | /* | 50 | /* |
51 | * dasd_servermap is used to store the server_id of all storage servers | 51 | * dasd_server_ssid_map contains a globally unique storage server subsystem ID. |
52 | * accessed by DASD device driver. | 52 | * dasd_server_ssid_list contains the list of all subsystem IDs accessed by |
53 | * the DASD device driver. | ||
53 | */ | 54 | */ |
54 | struct dasd_servermap { | 55 | struct dasd_server_ssid_map { |
55 | struct list_head list; | 56 | struct list_head list; |
56 | struct server_id { | 57 | struct system_id { |
57 | char vendor[4]; | 58 | char vendor[4]; |
58 | char serial[15]; | 59 | char serial[15]; |
60 | __u16 ssid; | ||
59 | } sid; | 61 | } sid; |
60 | }; | 62 | }; |
61 | 63 | ||
62 | static struct list_head dasd_serverlist; | 64 | static struct list_head dasd_server_ssid_list; |
63 | 65 | ||
64 | /* | 66 | /* |
65 | * Parameter parsing functions for dasd= parameter. The syntax is: | 67 | * Parameter parsing functions for dasd= parameter. The syntax is: |
@@ -89,7 +91,7 @@ static char *dasd[256]; | |||
89 | module_param_array(dasd, charp, NULL, 0); | 91 | module_param_array(dasd, charp, NULL, 0); |
90 | 92 | ||
91 | /* | 93 | /* |
92 | * Single spinlock to protect devmap structures and lists. | 94 | * Single spinlock to protect devmap and servermap structures and lists. |
93 | */ | 95 | */ |
94 | static DEFINE_SPINLOCK(dasd_devmap_lock); | 96 | static DEFINE_SPINLOCK(dasd_devmap_lock); |
95 | 97 | ||
@@ -264,8 +266,9 @@ dasd_parse_keyword( char *parsestring ) { | |||
264 | if (dasd_page_cache) | 266 | if (dasd_page_cache) |
265 | return residual_str; | 267 | return residual_str; |
266 | dasd_page_cache = | 268 | dasd_page_cache = |
267 | kmem_cache_create("dasd_page_cache", PAGE_SIZE, 0, | 269 | kmem_cache_create("dasd_page_cache", PAGE_SIZE, |
268 | SLAB_CACHE_DMA, NULL, NULL ); | 270 | PAGE_SIZE, SLAB_CACHE_DMA, |
271 | NULL, NULL ); | ||
269 | if (!dasd_page_cache) | 272 | if (!dasd_page_cache) |
270 | MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " | 273 | MESSAGE(KERN_WARNING, "%s", "Failed to create slab, " |
271 | "fixed buffer mode disabled."); | 274 | "fixed buffer mode disabled."); |
@@ -394,7 +397,7 @@ dasd_add_busid(char *bus_id, int features) | |||
394 | if (!new) | 397 | if (!new) |
395 | return ERR_PTR(-ENOMEM); | 398 | return ERR_PTR(-ENOMEM); |
396 | spin_lock(&dasd_devmap_lock); | 399 | spin_lock(&dasd_devmap_lock); |
397 | devmap = 0; | 400 | devmap = NULL; |
398 | hash = dasd_hash_busid(bus_id); | 401 | hash = dasd_hash_busid(bus_id); |
399 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) | 402 | list_for_each_entry(tmp, &dasd_hashlists[hash], list) |
400 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { | 403 | if (strncmp(tmp->bus_id, bus_id, BUS_ID_SIZE) == 0) { |
@@ -406,10 +409,10 @@ dasd_add_busid(char *bus_id, int features) | |||
406 | new->devindex = dasd_max_devindex++; | 409 | new->devindex = dasd_max_devindex++; |
407 | strncpy(new->bus_id, bus_id, BUS_ID_SIZE); | 410 | strncpy(new->bus_id, bus_id, BUS_ID_SIZE); |
408 | new->features = features; | 411 | new->features = features; |
409 | new->device = 0; | 412 | new->device = NULL; |
410 | list_add(&new->list, &dasd_hashlists[hash]); | 413 | list_add(&new->list, &dasd_hashlists[hash]); |
411 | devmap = new; | 414 | devmap = new; |
412 | new = 0; | 415 | new = NULL; |
413 | } | 416 | } |
414 | spin_unlock(&dasd_devmap_lock); | 417 | spin_unlock(&dasd_devmap_lock); |
415 | kfree(new); | 418 | kfree(new); |
@@ -479,7 +482,7 @@ dasd_device_from_devindex(int devindex) | |||
479 | int i; | 482 | int i; |
480 | 483 | ||
481 | spin_lock(&dasd_devmap_lock); | 484 | spin_lock(&dasd_devmap_lock); |
482 | devmap = 0; | 485 | devmap = NULL; |
483 | for (i = 0; (i < 256) && !devmap; i++) | 486 | for (i = 0; (i < 256) && !devmap; i++) |
484 | list_for_each_entry(tmp, &dasd_hashlists[i], list) | 487 | list_for_each_entry(tmp, &dasd_hashlists[i], list) |
485 | if (tmp->devindex == devindex) { | 488 | if (tmp->devindex == devindex) { |
@@ -859,39 +862,6 @@ static struct attribute_group dasd_attr_group = { | |||
859 | }; | 862 | }; |
860 | 863 | ||
861 | /* | 864 | /* |
862 | * Check if the related storage server is already contained in the | ||
863 | * dasd_serverlist. If server is not contained, create new entry. | ||
864 | * Return 0 if server was already in serverlist, | ||
865 | * 1 if the server was added successfully | ||
866 | * <0 in case of error. | ||
867 | */ | ||
868 | static int | ||
869 | dasd_add_server(struct dasd_uid *uid) | ||
870 | { | ||
871 | struct dasd_servermap *new, *tmp; | ||
872 | |||
873 | /* check if server is already contained */ | ||
874 | list_for_each_entry(tmp, &dasd_serverlist, list) | ||
875 | // normale cmp? | ||
876 | if (strncmp(tmp->sid.vendor, uid->vendor, | ||
877 | sizeof(tmp->sid.vendor)) == 0 | ||
878 | && strncmp(tmp->sid.serial, uid->serial, | ||
879 | sizeof(tmp->sid.serial)) == 0) | ||
880 | return 0; | ||
881 | |||
882 | new = (struct dasd_servermap *) | ||
883 | kzalloc(sizeof(struct dasd_servermap), GFP_KERNEL); | ||
884 | if (!new) | ||
885 | return -ENOMEM; | ||
886 | |||
887 | strncpy(new->sid.vendor, uid->vendor, sizeof(new->sid.vendor)); | ||
888 | strncpy(new->sid.serial, uid->serial, sizeof(new->sid.serial)); | ||
889 | list_add(&new->list, &dasd_serverlist); | ||
890 | return 1; | ||
891 | } | ||
892 | |||
893 | |||
894 | /* | ||
895 | * Return copy of the device unique identifier. | 865 | * Return copy of the device unique identifier. |
896 | */ | 866 | */ |
897 | int | 867 | int |
@@ -910,6 +880,9 @@ dasd_get_uid(struct ccw_device *cdev, struct dasd_uid *uid) | |||
910 | 880 | ||
911 | /* | 881 | /* |
912 | * Register the given device unique identifier into devmap struct. | 882 | * Register the given device unique identifier into devmap struct. |
883 | * In addition check if the related storage server subsystem ID is already | ||
884 | * contained in the dasd_server_ssid_list. If subsystem ID is not contained, | ||
885 | * create new entry. | ||
913 | * Return 0 if server was already in serverlist, | 886 | * Return 0 if server was already in serverlist, |
914 | * 1 if the server was added successful | 887 | * 1 if the server was added successful |
915 | * <0 in case of error. | 888 | * <0 in case of error. |
@@ -918,16 +891,39 @@ int | |||
918 | dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) | 891 | dasd_set_uid(struct ccw_device *cdev, struct dasd_uid *uid) |
919 | { | 892 | { |
920 | struct dasd_devmap *devmap; | 893 | struct dasd_devmap *devmap; |
921 | int rc; | 894 | struct dasd_server_ssid_map *srv, *tmp; |
922 | 895 | ||
923 | devmap = dasd_find_busid(cdev->dev.bus_id); | 896 | devmap = dasd_find_busid(cdev->dev.bus_id); |
924 | if (IS_ERR(devmap)) | 897 | if (IS_ERR(devmap)) |
925 | return PTR_ERR(devmap); | 898 | return PTR_ERR(devmap); |
899 | |||
900 | /* generate entry for server_ssid_map */ | ||
901 | srv = (struct dasd_server_ssid_map *) | ||
902 | kzalloc(sizeof(struct dasd_server_ssid_map), GFP_KERNEL); | ||
903 | if (!srv) | ||
904 | return -ENOMEM; | ||
905 | strncpy(srv->sid.vendor, uid->vendor, sizeof(srv->sid.vendor) - 1); | ||
906 | strncpy(srv->sid.serial, uid->serial, sizeof(srv->sid.serial) - 1); | ||
907 | srv->sid.ssid = uid->ssid; | ||
908 | |||
909 | /* server is already contained ? */ | ||
926 | spin_lock(&dasd_devmap_lock); | 910 | spin_lock(&dasd_devmap_lock); |
927 | devmap->uid = *uid; | 911 | devmap->uid = *uid; |
928 | rc = dasd_add_server(uid); | 912 | list_for_each_entry(tmp, &dasd_server_ssid_list, list) { |
913 | if (!memcmp(&srv->sid, &tmp->sid, | ||
914 | sizeof(struct system_id))) { | ||
915 | kfree(srv); | ||
916 | srv = NULL; | ||
917 | break; | ||
918 | } | ||
919 | } | ||
920 | |||
921 | /* add servermap to serverlist */ | ||
922 | if (srv) | ||
923 | list_add(&srv->list, &dasd_server_ssid_list); | ||
929 | spin_unlock(&dasd_devmap_lock); | 924 | spin_unlock(&dasd_devmap_lock); |
930 | return rc; | 925 | |
926 | return (srv ? 1 : 0); | ||
931 | } | 927 | } |
932 | EXPORT_SYMBOL_GPL(dasd_set_uid); | 928 | EXPORT_SYMBOL_GPL(dasd_set_uid); |
933 | 929 | ||
@@ -995,7 +991,7 @@ dasd_devmap_init(void) | |||
995 | INIT_LIST_HEAD(&dasd_hashlists[i]); | 991 | INIT_LIST_HEAD(&dasd_hashlists[i]); |
996 | 992 | ||
997 | /* Initialize servermap structure. */ | 993 | /* Initialize servermap structure. */ |
998 | INIT_LIST_HEAD(&dasd_serverlist); | 994 | INIT_LIST_HEAD(&dasd_server_ssid_list); |
999 | return 0; | 995 | return 0; |
1000 | } | 996 | } |
1001 | 997 | ||
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 2e655f466743..b7a7fac3f7c3 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -65,16 +65,16 @@ struct dasd_eckd_private { | |||
65 | /* The ccw bus type uses this table to find devices that it sends to | 65 | /* The ccw bus type uses this table to find devices that it sends to |
66 | * dasd_eckd_probe */ | 66 | * dasd_eckd_probe */ |
67 | static struct ccw_device_id dasd_eckd_ids[] = { | 67 | static struct ccw_device_id dasd_eckd_ids[] = { |
68 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), driver_info: 0x1}, | 68 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3390, 0), .driver_info = 0x1}, |
69 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), driver_info: 0x2}, | 69 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3390, 0), .driver_info = 0x2}, |
70 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), driver_info: 0x3}, | 70 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3390, 0), .driver_info = 0x3}, |
71 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), driver_info: 0x4}, | 71 | { CCW_DEVICE_DEVTYPE (0x3990, 0, 0x3380, 0), .driver_info = 0x4}, |
72 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), driver_info: 0x5}, | 72 | { CCW_DEVICE_DEVTYPE (0x2105, 0, 0x3380, 0), .driver_info = 0x5}, |
73 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), driver_info: 0x6}, | 73 | { CCW_DEVICE_DEVTYPE (0x9343, 0, 0x9345, 0), .driver_info = 0x6}, |
74 | { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), driver_info: 0x7}, | 74 | { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3390, 0), .driver_info = 0x7}, |
75 | { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), driver_info: 0x8}, | 75 | { CCW_DEVICE_DEVTYPE (0x2107, 0, 0x3380, 0), .driver_info = 0x8}, |
76 | { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), driver_info: 0x9}, | 76 | { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3390, 0), .driver_info = 0x9}, |
77 | { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), driver_info: 0xa}, | 77 | { CCW_DEVICE_DEVTYPE (0x1750, 0, 0x3380, 0), .driver_info = 0xa}, |
78 | { /* end of list */ }, | 78 | { /* end of list */ }, |
79 | }; | 79 | }; |
80 | 80 | ||
@@ -468,11 +468,11 @@ dasd_eckd_generate_uid(struct dasd_device *device, struct dasd_uid *uid) | |||
468 | return -ENODEV; | 468 | return -ENODEV; |
469 | 469 | ||
470 | memset(uid, 0, sizeof(struct dasd_uid)); | 470 | memset(uid, 0, sizeof(struct dasd_uid)); |
471 | strncpy(uid->vendor, confdata->ned1.HDA_manufacturer, | 471 | memcpy(uid->vendor, confdata->ned1.HDA_manufacturer, |
472 | sizeof(uid->vendor) - 1); | 472 | sizeof(uid->vendor) - 1); |
473 | EBCASC(uid->vendor, sizeof(uid->vendor) - 1); | 473 | EBCASC(uid->vendor, sizeof(uid->vendor) - 1); |
474 | strncpy(uid->serial, confdata->ned1.HDA_location, | 474 | memcpy(uid->serial, confdata->ned1.HDA_location, |
475 | sizeof(uid->serial) - 1); | 475 | sizeof(uid->serial) - 1); |
476 | EBCASC(uid->serial, sizeof(uid->serial) - 1); | 476 | EBCASC(uid->serial, sizeof(uid->serial) - 1); |
477 | uid->ssid = confdata->neq.subsystemID; | 477 | uid->ssid = confdata->neq.subsystemID; |
478 | if (confdata->ned2.sneq.flags == 0x40) { | 478 | if (confdata->ned2.sneq.flags == 0x40) { |
@@ -607,7 +607,7 @@ dasd_eckd_psf_ssc(struct dasd_device *device) | |||
607 | * Valide storage server of current device. | 607 | * Valide storage server of current device. |
608 | */ | 608 | */ |
609 | static int | 609 | static int |
610 | dasd_eckd_validate_server(struct dasd_device *device) | 610 | dasd_eckd_validate_server(struct dasd_device *device, struct dasd_uid *uid) |
611 | { | 611 | { |
612 | int rc; | 612 | int rc; |
613 | 613 | ||
@@ -616,11 +616,11 @@ dasd_eckd_validate_server(struct dasd_device *device) | |||
616 | return 0; | 616 | return 0; |
617 | 617 | ||
618 | rc = dasd_eckd_psf_ssc(device); | 618 | rc = dasd_eckd_psf_ssc(device); |
619 | if (rc) | 619 | /* may be requested feature is not available on server, |
620 | /* may be requested feature is not available on server, | 620 | * therefore just report error and go ahead */ |
621 | * therefore just report error and go ahead */ | 621 | DEV_MESSAGE(KERN_INFO, device, |
622 | DEV_MESSAGE(KERN_INFO, device, | 622 | "PSF-SSC on storage subsystem %s.%s.%04x returned rc=%d", |
623 | "Perform Subsystem Function returned rc=%d", rc); | 623 | uid->vendor, uid->serial, uid->ssid, rc); |
624 | /* RE-Read Configuration Data */ | 624 | /* RE-Read Configuration Data */ |
625 | return dasd_eckd_read_conf(device); | 625 | return dasd_eckd_read_conf(device); |
626 | } | 626 | } |
@@ -666,7 +666,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
666 | return rc; | 666 | return rc; |
667 | rc = dasd_set_uid(device->cdev, &uid); | 667 | rc = dasd_set_uid(device->cdev, &uid); |
668 | if (rc == 1) /* new server found */ | 668 | if (rc == 1) /* new server found */ |
669 | rc = dasd_eckd_validate_server(device); | 669 | rc = dasd_eckd_validate_server(device, &uid); |
670 | if (rc) | 670 | if (rc) |
671 | return rc; | 671 | return rc; |
672 | 672 | ||
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index 808434d38526..e85015be109b 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c | |||
@@ -44,8 +44,8 @@ struct dasd_fba_private { | |||
44 | }; | 44 | }; |
45 | 45 | ||
46 | static struct ccw_device_id dasd_fba_ids[] = { | 46 | static struct ccw_device_id dasd_fba_ids[] = { |
47 | { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), driver_info: 0x1}, | 47 | { CCW_DEVICE_DEVTYPE (0x6310, 0, 0x9336, 0), .driver_info = 0x1}, |
48 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), driver_info: 0x2}, | 48 | { CCW_DEVICE_DEVTYPE (0x3880, 0, 0x3370, 0), .driver_info = 0x2}, |
49 | { /* end of list */ }, | 49 | { /* end of list */ }, |
50 | }; | 50 | }; |
51 | 51 | ||
diff --git a/drivers/s390/block/dasd_genhd.c b/drivers/s390/block/dasd_genhd.c index 12c7d296eaa8..d163632101d2 100644 --- a/drivers/s390/block/dasd_genhd.c +++ b/drivers/s390/block/dasd_genhd.c | |||
@@ -83,10 +83,12 @@ dasd_gendisk_alloc(struct dasd_device *device) | |||
83 | void | 83 | void |
84 | dasd_gendisk_free(struct dasd_device *device) | 84 | dasd_gendisk_free(struct dasd_device *device) |
85 | { | 85 | { |
86 | del_gendisk(device->gdp); | 86 | if (device->gdp) { |
87 | device->gdp->queue = 0; | 87 | del_gendisk(device->gdp); |
88 | put_disk(device->gdp); | 88 | device->gdp->queue = NULL; |
89 | device->gdp = 0; | 89 | put_disk(device->gdp); |
90 | device->gdp = NULL; | ||
91 | } | ||
90 | } | 92 | } |
91 | 93 | ||
92 | /* | 94 | /* |
@@ -136,7 +138,7 @@ dasd_destroy_partitions(struct dasd_device * device) | |||
136 | * device->bdev to lower the offline open_count limit again. | 138 | * device->bdev to lower the offline open_count limit again. |
137 | */ | 139 | */ |
138 | bdev = device->bdev; | 140 | bdev = device->bdev; |
139 | device->bdev = 0; | 141 | device->bdev = NULL; |
140 | 142 | ||
141 | /* | 143 | /* |
142 | * See fs/partition/check.c:delete_partition | 144 | * See fs/partition/check.c:delete_partition |
@@ -145,7 +147,7 @@ dasd_destroy_partitions(struct dasd_device * device) | |||
145 | */ | 147 | */ |
146 | memset(&bpart, 0, sizeof(struct blkpg_partition)); | 148 | memset(&bpart, 0, sizeof(struct blkpg_partition)); |
147 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); | 149 | memset(&barg, 0, sizeof(struct blkpg_ioctl_arg)); |
148 | barg.data = &bpart; | 150 | barg.data = (void __user *) &bpart; |
149 | barg.op = BLKPG_DEL_PARTITION; | 151 | barg.op = BLKPG_DEL_PARTITION; |
150 | for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) | 152 | for (bpart.pno = device->gdp->minors - 1; bpart.pno > 0; bpart.pno--) |
151 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); | 153 | ioctl_by_bdev(bdev, BLKPG, (unsigned long) &barg); |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index e97f5316ad2d..8fed3603e9ea 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -345,7 +345,7 @@ dasd_ioctl_set_ro(struct block_device *bdev, void __user *argp) | |||
345 | if (bdev != bdev->bd_contains) | 345 | if (bdev != bdev->bd_contains) |
346 | // ro setting is not allowed for partitions | 346 | // ro setting is not allowed for partitions |
347 | return -EINVAL; | 347 | return -EINVAL; |
348 | if (get_user(intval, (int *)argp)) | 348 | if (get_user(intval, (int __user *)argp)) |
349 | return -EFAULT; | 349 | return -EFAULT; |
350 | 350 | ||
351 | set_disk_ro(bdev->bd_disk, intval); | 351 | set_disk_ro(bdev->bd_disk, intval); |
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c index 4c1e56b9b98d..ca7d51f7eccc 100644 --- a/drivers/s390/block/xpram.c +++ b/drivers/s390/block/xpram.c | |||
@@ -48,15 +48,6 @@ | |||
48 | #define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) | 48 | #define PRINT_ERR(x...) printk(KERN_ERR XPRAM_NAME " error:" x) |
49 | 49 | ||
50 | 50 | ||
51 | static struct sysdev_class xpram_sysclass = { | ||
52 | set_kset_name("xpram"), | ||
53 | }; | ||
54 | |||
55 | static struct sys_device xpram_sys_device = { | ||
56 | .id = 0, | ||
57 | .cls = &xpram_sysclass, | ||
58 | }; | ||
59 | |||
60 | typedef struct { | 51 | typedef struct { |
61 | unsigned int size; /* size of xpram segment in pages */ | 52 | unsigned int size; /* size of xpram segment in pages */ |
62 | unsigned int offset; /* start page of xpram segment */ | 53 | unsigned int offset; /* start page of xpram segment */ |
@@ -71,11 +62,11 @@ static int xpram_devs; | |||
71 | /* | 62 | /* |
72 | * Parameter parsing functions. | 63 | * Parameter parsing functions. |
73 | */ | 64 | */ |
74 | static int devs = XPRAM_DEVS; | 65 | static int __initdata devs = XPRAM_DEVS; |
75 | static unsigned int sizes[XPRAM_MAX_DEVS]; | 66 | static char __initdata *sizes[XPRAM_MAX_DEVS]; |
76 | 67 | ||
77 | module_param(devs, int, 0); | 68 | module_param(devs, int, 0); |
78 | module_param_array(sizes, int, NULL, 0); | 69 | module_param_array(sizes, charp, NULL, 0); |
79 | 70 | ||
80 | MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \ | 71 | MODULE_PARM_DESC(devs, "number of devices (\"partitions\"), " \ |
81 | "the default is " __MODULE_STRING(XPRAM_DEVS) "\n"); | 72 | "the default is " __MODULE_STRING(XPRAM_DEVS) "\n"); |
@@ -86,59 +77,6 @@ MODULE_PARM_DESC(sizes, "list of device (partition) sizes " \ | |||
86 | "claimed by explicit sizes\n"); | 77 | "claimed by explicit sizes\n"); |
87 | MODULE_LICENSE("GPL"); | 78 | MODULE_LICENSE("GPL"); |
88 | 79 | ||
89 | #ifndef MODULE | ||
90 | /* | ||
91 | * Parses the kernel parameters given in the kernel parameter line. | ||
92 | * The expected format is | ||
93 | * <number_of_partitions>[","<partition_size>]* | ||
94 | * where | ||
95 | * devices is a positive integer that initializes xpram_devs | ||
96 | * each size is a non-negative integer possibly followed by a | ||
97 | * magnitude (k,K,m,M,g,G), the list of sizes initialises | ||
98 | * xpram_sizes | ||
99 | * | ||
100 | * Arguments | ||
101 | * str: substring of kernel parameter line that contains xprams | ||
102 | * kernel parameters. | ||
103 | * | ||
104 | * Result 0 on success, -EINVAL else -- only for Version > 2.3 | ||
105 | * | ||
106 | * Side effects | ||
107 | * the global variabls devs is set to the value of | ||
108 | * <number_of_partitions> and sizes[i] is set to the i-th | ||
109 | * partition size (if provided). A parsing error of a value | ||
110 | * results in this value being set to -EINVAL. | ||
111 | */ | ||
112 | static int __init xpram_setup (char *str) | ||
113 | { | ||
114 | char *cp; | ||
115 | int i; | ||
116 | |||
117 | devs = simple_strtoul(str, &cp, 10); | ||
118 | if (cp <= str || devs > XPRAM_MAX_DEVS) | ||
119 | return 0; | ||
120 | for (i = 0; (i < devs) && (*cp++ == ','); i++) { | ||
121 | sizes[i] = simple_strtoul(cp, &cp, 10); | ||
122 | if (*cp == 'g' || *cp == 'G') { | ||
123 | sizes[i] <<= 20; | ||
124 | cp++; | ||
125 | } else if (*cp == 'm' || *cp == 'M') { | ||
126 | sizes[i] <<= 10; | ||
127 | cp++; | ||
128 | } else if (*cp == 'k' || *cp == 'K') | ||
129 | cp++; | ||
130 | while (isspace(*cp)) cp++; | ||
131 | } | ||
132 | if (*cp == ',' && i >= devs) | ||
133 | PRINT_WARN("partition sizes list has too many entries.\n"); | ||
134 | else if (*cp != 0) | ||
135 | PRINT_WARN("ignored '%s' at end of parameter string.\n", cp); | ||
136 | return 1; | ||
137 | } | ||
138 | |||
139 | __setup("xpram_parts=", xpram_setup); | ||
140 | #endif | ||
141 | |||
142 | /* | 80 | /* |
143 | * Copy expanded memory page (4kB) into main memory | 81 | * Copy expanded memory page (4kB) into main memory |
144 | * Arguments | 82 | * Arguments |
@@ -357,6 +295,7 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
357 | { | 295 | { |
358 | unsigned long mem_needed; | 296 | unsigned long mem_needed; |
359 | unsigned long mem_auto; | 297 | unsigned long mem_auto; |
298 | unsigned long long size; | ||
360 | int mem_auto_no; | 299 | int mem_auto_no; |
361 | int i; | 300 | int i; |
362 | 301 | ||
@@ -374,7 +313,19 @@ static int __init xpram_setup_sizes(unsigned long pages) | |||
374 | mem_needed = 0; | 313 | mem_needed = 0; |
375 | mem_auto_no = 0; | 314 | mem_auto_no = 0; |
376 | for (i = 0; i < xpram_devs; i++) { | 315 | for (i = 0; i < xpram_devs; i++) { |
377 | xpram_sizes[i] = (sizes[i] + 3) & -4UL; | 316 | if (sizes[i]) { |
317 | size = simple_strtoull(sizes[i], &sizes[i], 0); | ||
318 | switch (sizes[i][0]) { | ||
319 | case 'g': | ||
320 | case 'G': | ||
321 | size <<= 20; | ||
322 | break; | ||
323 | case 'm': | ||
324 | case 'M': | ||
325 | size <<= 10; | ||
326 | } | ||
327 | xpram_sizes[i] = (size + 3) & -4UL; | ||
328 | } | ||
378 | if (xpram_sizes[i]) | 329 | if (xpram_sizes[i]) |
379 | mem_needed += xpram_sizes[i]; | 330 | mem_needed += xpram_sizes[i]; |
380 | else | 331 | else |
@@ -491,8 +442,6 @@ static void __exit xpram_exit(void) | |||
491 | } | 442 | } |
492 | unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); | 443 | unregister_blkdev(XPRAM_MAJOR, XPRAM_NAME); |
493 | blk_cleanup_queue(xpram_queue); | 444 | blk_cleanup_queue(xpram_queue); |
494 | sysdev_unregister(&xpram_sys_device); | ||
495 | sysdev_class_unregister(&xpram_sysclass); | ||
496 | } | 445 | } |
497 | 446 | ||
498 | static int __init xpram_init(void) | 447 | static int __init xpram_init(void) |
@@ -510,19 +459,7 @@ static int __init xpram_init(void) | |||
510 | rc = xpram_setup_sizes(xpram_pages); | 459 | rc = xpram_setup_sizes(xpram_pages); |
511 | if (rc) | 460 | if (rc) |
512 | return rc; | 461 | return rc; |
513 | rc = sysdev_class_register(&xpram_sysclass); | 462 | return xpram_setup_blkdev(); |
514 | if (rc) | ||
515 | return rc; | ||
516 | |||
517 | rc = sysdev_register(&xpram_sys_device); | ||
518 | if (rc) { | ||
519 | sysdev_class_unregister(&xpram_sysclass); | ||
520 | return rc; | ||
521 | } | ||
522 | rc = xpram_setup_blkdev(); | ||
523 | if (rc) | ||
524 | sysdev_unregister(&xpram_sys_device); | ||
525 | return rc; | ||
526 | } | 463 | } |
527 | 464 | ||
528 | module_init(xpram_init); | 465 | module_init(xpram_init); |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index f25c6d116f6f..2fa566fa6da4 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -693,7 +693,7 @@ raw3215_probe (struct ccw_device *cdev) | |||
693 | GFP_KERNEL|GFP_DMA); | 693 | GFP_KERNEL|GFP_DMA); |
694 | if (raw->buffer == NULL) { | 694 | if (raw->buffer == NULL) { |
695 | spin_lock(&raw3215_device_lock); | 695 | spin_lock(&raw3215_device_lock); |
696 | raw3215[line] = 0; | 696 | raw3215[line] = NULL; |
697 | spin_unlock(&raw3215_device_lock); | 697 | spin_unlock(&raw3215_device_lock); |
698 | kfree(raw); | 698 | kfree(raw); |
699 | return -ENOMEM; | 699 | return -ENOMEM; |
diff --git a/drivers/s390/char/ctrlchar.c b/drivers/s390/char/ctrlchar.c index 0ea6f36a2527..d83eb6358bac 100644 --- a/drivers/s390/char/ctrlchar.c +++ b/drivers/s390/char/ctrlchar.c | |||
@@ -23,7 +23,7 @@ ctrlchar_handle_sysrq(void *tty) | |||
23 | handle_sysrq(ctrlchar_sysrq_key, NULL, (struct tty_struct *) tty); | 23 | handle_sysrq(ctrlchar_sysrq_key, NULL, (struct tty_struct *) tty); |
24 | } | 24 | } |
25 | 25 | ||
26 | static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, 0); | 26 | static DECLARE_WORK(ctrlchar_work, ctrlchar_handle_sysrq, NULL); |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | 29 | ||
diff --git a/drivers/s390/char/defkeymap.c b/drivers/s390/char/defkeymap.c index ca15adb140d1..17027d918cf7 100644 --- a/drivers/s390/char/defkeymap.c +++ b/drivers/s390/char/defkeymap.c | |||
@@ -83,8 +83,8 @@ static u_short shift_ctrl_map[NR_KEYS] = { | |||
83 | }; | 83 | }; |
84 | 84 | ||
85 | ushort *key_maps[MAX_NR_KEYMAPS] = { | 85 | ushort *key_maps[MAX_NR_KEYMAPS] = { |
86 | plain_map, shift_map, 0, 0, | 86 | plain_map, shift_map, NULL, NULL, |
87 | ctrl_map, shift_ctrl_map, 0 | 87 | ctrl_map, shift_ctrl_map, NULL, |
88 | }; | 88 | }; |
89 | 89 | ||
90 | unsigned int keymap_count = 4; | 90 | unsigned int keymap_count = 4; |
@@ -145,7 +145,7 @@ char *func_table[MAX_NR_FUNC] = { | |||
145 | func_buf + 97, | 145 | func_buf + 97, |
146 | func_buf + 103, | 146 | func_buf + 103, |
147 | func_buf + 109, | 147 | func_buf + 109, |
148 | 0, | 148 | NULL, |
149 | }; | 149 | }; |
150 | 150 | ||
151 | struct kbdiacr accent_table[MAX_DIACR] = { | 151 | struct kbdiacr accent_table[MAX_DIACR] = { |
diff --git a/drivers/s390/char/fs3270.c b/drivers/s390/char/fs3270.c index 6099c14de429..ef004d089712 100644 --- a/drivers/s390/char/fs3270.c +++ b/drivers/s390/char/fs3270.c | |||
@@ -236,7 +236,7 @@ fs3270_irq(struct fs3270 *fp, struct raw3270_request *rq, struct irb *irb) | |||
236 | * Process reads from fullscreen 3270. | 236 | * Process reads from fullscreen 3270. |
237 | */ | 237 | */ |
238 | static ssize_t | 238 | static ssize_t |
239 | fs3270_read(struct file *filp, char *data, size_t count, loff_t *off) | 239 | fs3270_read(struct file *filp, char __user *data, size_t count, loff_t *off) |
240 | { | 240 | { |
241 | struct fs3270 *fp; | 241 | struct fs3270 *fp; |
242 | struct raw3270_request *rq; | 242 | struct raw3270_request *rq; |
@@ -281,7 +281,7 @@ fs3270_read(struct file *filp, char *data, size_t count, loff_t *off) | |||
281 | * Process writes to fullscreen 3270. | 281 | * Process writes to fullscreen 3270. |
282 | */ | 282 | */ |
283 | static ssize_t | 283 | static ssize_t |
284 | fs3270_write(struct file *filp, const char *data, size_t count, loff_t *off) | 284 | fs3270_write(struct file *filp, const char __user *data, size_t count, loff_t *off) |
285 | { | 285 | { |
286 | struct fs3270 *fp; | 286 | struct fs3270 *fp; |
287 | struct raw3270_request *rq; | 287 | struct raw3270_request *rq; |
@@ -338,10 +338,10 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
338 | fp->write_command = arg; | 338 | fp->write_command = arg; |
339 | break; | 339 | break; |
340 | case TUBGETI: | 340 | case TUBGETI: |
341 | rc = put_user(fp->read_command, (char *) arg); | 341 | rc = put_user(fp->read_command, (char __user *) arg); |
342 | break; | 342 | break; |
343 | case TUBGETO: | 343 | case TUBGETO: |
344 | rc = put_user(fp->write_command,(char *) arg); | 344 | rc = put_user(fp->write_command,(char __user *) arg); |
345 | break; | 345 | break; |
346 | case TUBGETMOD: | 346 | case TUBGETMOD: |
347 | iocb.model = fp->view.model; | 347 | iocb.model = fp->view.model; |
@@ -350,7 +350,7 @@ fs3270_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
350 | iocb.pf_cnt = 24; | 350 | iocb.pf_cnt = 24; |
351 | iocb.re_cnt = 20; | 351 | iocb.re_cnt = 20; |
352 | iocb.map = 0; | 352 | iocb.map = 0; |
353 | if (copy_to_user((char *) arg, &iocb, | 353 | if (copy_to_user((char __user *) arg, &iocb, |
354 | sizeof(struct raw3270_iocb))) | 354 | sizeof(struct raw3270_iocb))) |
355 | rc = -EFAULT; | 355 | rc = -EFAULT; |
356 | break; | 356 | break; |
@@ -479,7 +479,7 @@ fs3270_close(struct inode *inode, struct file *filp) | |||
479 | struct fs3270 *fp; | 479 | struct fs3270 *fp; |
480 | 480 | ||
481 | fp = filp->private_data; | 481 | fp = filp->private_data; |
482 | filp->private_data = 0; | 482 | filp->private_data = NULL; |
483 | if (fp) { | 483 | if (fp) { |
484 | fp->fs_pid = 0; | 484 | fp->fs_pid = 0; |
485 | raw3270_reset(&fp->view); | 485 | raw3270_reset(&fp->view); |
diff --git a/drivers/s390/char/keyboard.c b/drivers/s390/char/keyboard.c index 547ef906ae2c..3be06569180d 100644 --- a/drivers/s390/char/keyboard.c +++ b/drivers/s390/char/keyboard.c | |||
@@ -103,7 +103,7 @@ out_maps: | |||
103 | out_kbd: | 103 | out_kbd: |
104 | kfree(kbd); | 104 | kfree(kbd); |
105 | out: | 105 | out: |
106 | return 0; | 106 | return NULL; |
107 | } | 107 | } |
108 | 108 | ||
109 | void | 109 | void |
@@ -304,7 +304,7 @@ kbd_keycode(struct kbd_data *kbd, unsigned int keycode) | |||
304 | if (kbd->sysrq) { | 304 | if (kbd->sysrq) { |
305 | if (kbd->sysrq == K(KT_LATIN, '-')) { | 305 | if (kbd->sysrq == K(KT_LATIN, '-')) { |
306 | kbd->sysrq = 0; | 306 | kbd->sysrq = 0; |
307 | handle_sysrq(value, 0, kbd->tty); | 307 | handle_sysrq(value, NULL, kbd->tty); |
308 | return; | 308 | return; |
309 | } | 309 | } |
310 | if (value == '-') { | 310 | if (value == '-') { |
@@ -363,7 +363,7 @@ do_kdsk_ioctl(struct kbd_data *kbd, struct kbentry __user *user_kbe, | |||
363 | /* disallocate map */ | 363 | /* disallocate map */ |
364 | key_map = kbd->key_maps[tmp.kb_table]; | 364 | key_map = kbd->key_maps[tmp.kb_table]; |
365 | if (key_map) { | 365 | if (key_map) { |
366 | kbd->key_maps[tmp.kb_table] = 0; | 366 | kbd->key_maps[tmp.kb_table] = NULL; |
367 | kfree(key_map); | 367 | kfree(key_map); |
368 | } | 368 | } |
369 | break; | 369 | break; |
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index e95b56f810db..7a84014f2037 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -555,7 +555,7 @@ raw3270_start_init(struct raw3270 *rp, struct raw3270_view *view, | |||
555 | #ifdef CONFIG_TN3270_CONSOLE | 555 | #ifdef CONFIG_TN3270_CONSOLE |
556 | if (raw3270_registered == 0) { | 556 | if (raw3270_registered == 0) { |
557 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); | 557 | spin_lock_irqsave(get_ccwdev_lock(view->dev->cdev), flags); |
558 | rq->callback = 0; | 558 | rq->callback = NULL; |
559 | rc = __raw3270_start(rp, view, rq); | 559 | rc = __raw3270_start(rp, view, rq); |
560 | if (rc == 0) | 560 | if (rc == 0) |
561 | while (!raw3270_request_final(rq)) { | 561 | while (!raw3270_request_final(rq)) { |
@@ -719,8 +719,8 @@ raw3270_size_device(struct raw3270 *rp) | |||
719 | rc = __raw3270_size_device_vm(rp); | 719 | rc = __raw3270_size_device_vm(rp); |
720 | else | 720 | else |
721 | rc = __raw3270_size_device(rp); | 721 | rc = __raw3270_size_device(rp); |
722 | raw3270_init_view.dev = 0; | 722 | raw3270_init_view.dev = NULL; |
723 | rp->view = 0; | 723 | rp->view = NULL; |
724 | up(&raw3270_init_sem); | 724 | up(&raw3270_init_sem); |
725 | if (rc == 0) { /* Found something. */ | 725 | if (rc == 0) { /* Found something. */ |
726 | /* Try to find a model. */ | 726 | /* Try to find a model. */ |
@@ -761,8 +761,8 @@ raw3270_reset_device(struct raw3270 *rp) | |||
761 | rp->view = &raw3270_init_view; | 761 | rp->view = &raw3270_init_view; |
762 | raw3270_init_view.dev = rp; | 762 | raw3270_init_view.dev = rp; |
763 | rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); | 763 | rc = raw3270_start_init(rp, &raw3270_init_view, &rp->init_request); |
764 | raw3270_init_view.dev = 0; | 764 | raw3270_init_view.dev = NULL; |
765 | rp->view = 0; | 765 | rp->view = NULL; |
766 | up(&raw3270_init_sem); | 766 | up(&raw3270_init_sem); |
767 | return rc; | 767 | return rc; |
768 | } | 768 | } |
@@ -934,7 +934,7 @@ raw3270_activate_view(struct raw3270_view *view) | |||
934 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) | 934 | else if (!test_bit(RAW3270_FLAGS_READY, &rp->flags)) |
935 | rc = -ENODEV; | 935 | rc = -ENODEV; |
936 | else { | 936 | else { |
937 | oldview = 0; | 937 | oldview = NULL; |
938 | if (rp->view) { | 938 | if (rp->view) { |
939 | oldview = rp->view; | 939 | oldview = rp->view; |
940 | oldview->fn->deactivate(oldview); | 940 | oldview->fn->deactivate(oldview); |
@@ -951,7 +951,7 @@ raw3270_activate_view(struct raw3270_view *view) | |||
951 | rp->view = nv; | 951 | rp->view = nv; |
952 | if (nv->fn->activate(nv) == 0) | 952 | if (nv->fn->activate(nv) == 0) |
953 | break; | 953 | break; |
954 | rp->view = 0; | 954 | rp->view = NULL; |
955 | } | 955 | } |
956 | } | 956 | } |
957 | } | 957 | } |
@@ -975,7 +975,7 @@ raw3270_deactivate_view(struct raw3270_view *view) | |||
975 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); | 975 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); |
976 | if (rp->view == view) { | 976 | if (rp->view == view) { |
977 | view->fn->deactivate(view); | 977 | view->fn->deactivate(view); |
978 | rp->view = 0; | 978 | rp->view = NULL; |
979 | /* Move deactivated view to end of list. */ | 979 | /* Move deactivated view to end of list. */ |
980 | list_del_init(&view->list); | 980 | list_del_init(&view->list); |
981 | list_add_tail(&view->list, &rp->view_list); | 981 | list_add_tail(&view->list, &rp->view_list); |
@@ -985,7 +985,7 @@ raw3270_deactivate_view(struct raw3270_view *view) | |||
985 | rp->view = view; | 985 | rp->view = view; |
986 | if (view->fn->activate(view) == 0) | 986 | if (view->fn->activate(view) == 0) |
987 | break; | 987 | break; |
988 | rp->view = 0; | 988 | rp->view = NULL; |
989 | } | 989 | } |
990 | } | 990 | } |
991 | } | 991 | } |
@@ -1076,7 +1076,7 @@ raw3270_del_view(struct raw3270_view *view) | |||
1076 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); | 1076 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); |
1077 | if (rp->view == view) { | 1077 | if (rp->view == view) { |
1078 | view->fn->deactivate(view); | 1078 | view->fn->deactivate(view); |
1079 | rp->view = 0; | 1079 | rp->view = NULL; |
1080 | } | 1080 | } |
1081 | list_del_init(&view->list); | 1081 | list_del_init(&view->list); |
1082 | if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) { | 1082 | if (!rp->view && test_bit(RAW3270_FLAGS_READY, &rp->flags)) { |
@@ -1106,10 +1106,10 @@ raw3270_delete_device(struct raw3270 *rp) | |||
1106 | 1106 | ||
1107 | /* Remove from device chain. */ | 1107 | /* Remove from device chain. */ |
1108 | mutex_lock(&raw3270_mutex); | 1108 | mutex_lock(&raw3270_mutex); |
1109 | if (rp->clttydev) | 1109 | if (rp->clttydev && !IS_ERR(rp->clttydev)) |
1110 | class_device_destroy(class3270, | 1110 | class_device_destroy(class3270, |
1111 | MKDEV(IBM_TTY3270_MAJOR, rp->minor)); | 1111 | MKDEV(IBM_TTY3270_MAJOR, rp->minor)); |
1112 | if (rp->cltubdev) | 1112 | if (rp->cltubdev && !IS_ERR(rp->cltubdev)) |
1113 | class_device_destroy(class3270, | 1113 | class_device_destroy(class3270, |
1114 | MKDEV(IBM_FS3270_MAJOR, rp->minor)); | 1114 | MKDEV(IBM_FS3270_MAJOR, rp->minor)); |
1115 | list_del_init(&rp->list); | 1115 | list_del_init(&rp->list); |
@@ -1117,9 +1117,9 @@ raw3270_delete_device(struct raw3270 *rp) | |||
1117 | 1117 | ||
1118 | /* Disconnect from ccw_device. */ | 1118 | /* Disconnect from ccw_device. */ |
1119 | cdev = rp->cdev; | 1119 | cdev = rp->cdev; |
1120 | rp->cdev = 0; | 1120 | rp->cdev = NULL; |
1121 | cdev->dev.driver_data = 0; | 1121 | cdev->dev.driver_data = NULL; |
1122 | cdev->handler = 0; | 1122 | cdev->handler = NULL; |
1123 | 1123 | ||
1124 | /* Put ccw_device structure. */ | 1124 | /* Put ccw_device structure. */ |
1125 | put_device(&cdev->dev); | 1125 | put_device(&cdev->dev); |
@@ -1144,7 +1144,7 @@ raw3270_model_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
1144 | return snprintf(buf, PAGE_SIZE, "%i\n", | 1144 | return snprintf(buf, PAGE_SIZE, "%i\n", |
1145 | ((struct raw3270 *) dev->driver_data)->model); | 1145 | ((struct raw3270 *) dev->driver_data)->model); |
1146 | } | 1146 | } |
1147 | static DEVICE_ATTR(model, 0444, raw3270_model_show, 0); | 1147 | static DEVICE_ATTR(model, 0444, raw3270_model_show, NULL); |
1148 | 1148 | ||
1149 | static ssize_t | 1149 | static ssize_t |
1150 | raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf) | 1150 | raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -1152,7 +1152,7 @@ raw3270_rows_show(struct device *dev, struct device_attribute *attr, char *buf) | |||
1152 | return snprintf(buf, PAGE_SIZE, "%i\n", | 1152 | return snprintf(buf, PAGE_SIZE, "%i\n", |
1153 | ((struct raw3270 *) dev->driver_data)->rows); | 1153 | ((struct raw3270 *) dev->driver_data)->rows); |
1154 | } | 1154 | } |
1155 | static DEVICE_ATTR(rows, 0444, raw3270_rows_show, 0); | 1155 | static DEVICE_ATTR(rows, 0444, raw3270_rows_show, NULL); |
1156 | 1156 | ||
1157 | static ssize_t | 1157 | static ssize_t |
1158 | raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf) | 1158 | raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *buf) |
@@ -1160,7 +1160,7 @@ raw3270_columns_show(struct device *dev, struct device_attribute *attr, char *bu | |||
1160 | return snprintf(buf, PAGE_SIZE, "%i\n", | 1160 | return snprintf(buf, PAGE_SIZE, "%i\n", |
1161 | ((struct raw3270 *) dev->driver_data)->cols); | 1161 | ((struct raw3270 *) dev->driver_data)->cols); |
1162 | } | 1162 | } |
1163 | static DEVICE_ATTR(columns, 0444, raw3270_columns_show, 0); | 1163 | static DEVICE_ATTR(columns, 0444, raw3270_columns_show, NULL); |
1164 | 1164 | ||
1165 | static struct attribute * raw3270_attrs[] = { | 1165 | static struct attribute * raw3270_attrs[] = { |
1166 | &dev_attr_model.attr, | 1166 | &dev_attr_model.attr, |
@@ -1173,21 +1173,37 @@ static struct attribute_group raw3270_attr_group = { | |||
1173 | .attrs = raw3270_attrs, | 1173 | .attrs = raw3270_attrs, |
1174 | }; | 1174 | }; |
1175 | 1175 | ||
1176 | static void | 1176 | static int raw3270_create_attributes(struct raw3270 *rp) |
1177 | raw3270_create_attributes(struct raw3270 *rp) | ||
1178 | { | 1177 | { |
1179 | //FIXME: check return code | 1178 | int rc; |
1180 | sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); | 1179 | |
1181 | rp->clttydev = | 1180 | rc = sysfs_create_group(&rp->cdev->dev.kobj, &raw3270_attr_group); |
1182 | class_device_create(class3270, NULL, | 1181 | if (rc) |
1183 | MKDEV(IBM_TTY3270_MAJOR, rp->minor), | 1182 | goto out; |
1184 | &rp->cdev->dev, "tty%s", | 1183 | |
1185 | rp->cdev->dev.bus_id); | 1184 | rp->clttydev = class_device_create(class3270, NULL, |
1186 | rp->cltubdev = | 1185 | MKDEV(IBM_TTY3270_MAJOR, rp->minor), |
1187 | class_device_create(class3270, NULL, | 1186 | &rp->cdev->dev, "tty%s", |
1188 | MKDEV(IBM_FS3270_MAJOR, rp->minor), | 1187 | rp->cdev->dev.bus_id); |
1189 | &rp->cdev->dev, "tub%s", | 1188 | if (IS_ERR(rp->clttydev)) { |
1190 | rp->cdev->dev.bus_id); | 1189 | rc = PTR_ERR(rp->clttydev); |
1190 | goto out_ttydev; | ||
1191 | } | ||
1192 | |||
1193 | rp->cltubdev = class_device_create(class3270, NULL, | ||
1194 | MKDEV(IBM_FS3270_MAJOR, rp->minor), | ||
1195 | &rp->cdev->dev, "tub%s", | ||
1196 | rp->cdev->dev.bus_id); | ||
1197 | if (!IS_ERR(rp->cltubdev)) | ||
1198 | goto out; | ||
1199 | |||
1200 | rc = PTR_ERR(rp->cltubdev); | ||
1201 | class_device_destroy(class3270, MKDEV(IBM_TTY3270_MAJOR, rp->minor)); | ||
1202 | |||
1203 | out_ttydev: | ||
1204 | sysfs_remove_group(&rp->cdev->dev.kobj, &raw3270_attr_group); | ||
1205 | out: | ||
1206 | return rc; | ||
1191 | } | 1207 | } |
1192 | 1208 | ||
1193 | /* | 1209 | /* |
@@ -1255,7 +1271,9 @@ raw3270_set_online (struct ccw_device *cdev) | |||
1255 | rc = raw3270_reset_device(rp); | 1271 | rc = raw3270_reset_device(rp); |
1256 | if (rc) | 1272 | if (rc) |
1257 | goto failure; | 1273 | goto failure; |
1258 | raw3270_create_attributes(rp); | 1274 | rc = raw3270_create_attributes(rp); |
1275 | if (rc) | ||
1276 | goto failure; | ||
1259 | set_bit(RAW3270_FLAGS_READY, &rp->flags); | 1277 | set_bit(RAW3270_FLAGS_READY, &rp->flags); |
1260 | mutex_lock(&raw3270_mutex); | 1278 | mutex_lock(&raw3270_mutex); |
1261 | list_for_each_entry(np, &raw3270_notifier, list) | 1279 | list_for_each_entry(np, &raw3270_notifier, list) |
@@ -1296,7 +1314,7 @@ raw3270_remove (struct ccw_device *cdev) | |||
1296 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); | 1314 | spin_lock_irqsave(get_ccwdev_lock(cdev), flags); |
1297 | if (rp->view) { | 1315 | if (rp->view) { |
1298 | rp->view->fn->deactivate(rp->view); | 1316 | rp->view->fn->deactivate(rp->view); |
1299 | rp->view = 0; | 1317 | rp->view = NULL; |
1300 | } | 1318 | } |
1301 | while (!list_empty(&rp->view_list)) { | 1319 | while (!list_empty(&rp->view_list)) { |
1302 | v = list_entry(rp->view_list.next, struct raw3270_view, list); | 1320 | v = list_entry(rp->view_list.next, struct raw3270_view, list); |
diff --git a/drivers/s390/char/raw3270.h b/drivers/s390/char/raw3270.h index b635bf8e7775..90beaa80a782 100644 --- a/drivers/s390/char/raw3270.h +++ b/drivers/s390/char/raw3270.h | |||
@@ -231,7 +231,7 @@ alloc_string(struct list_head *free_list, unsigned long len) | |||
231 | INIT_LIST_HEAD(&cs->update); | 231 | INIT_LIST_HEAD(&cs->update); |
232 | return cs; | 232 | return cs; |
233 | } | 233 | } |
234 | return 0; | 234 | return NULL; |
235 | } | 235 | } |
236 | 236 | ||
237 | static inline unsigned long | 237 | static inline unsigned long |
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index 4138564402b8..985d1613baaa 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -383,6 +383,7 @@ void | |||
383 | sclp_sync_wait(void) | 383 | sclp_sync_wait(void) |
384 | { | 384 | { |
385 | unsigned long psw_mask; | 385 | unsigned long psw_mask; |
386 | unsigned long flags; | ||
386 | unsigned long cr0, cr0_sync; | 387 | unsigned long cr0, cr0_sync; |
387 | u64 timeout; | 388 | u64 timeout; |
388 | 389 | ||
@@ -395,9 +396,11 @@ sclp_sync_wait(void) | |||
395 | sclp_tod_from_jiffies(sclp_request_timer.expires - | 396 | sclp_tod_from_jiffies(sclp_request_timer.expires - |
396 | jiffies); | 397 | jiffies); |
397 | } | 398 | } |
399 | local_irq_save(flags); | ||
398 | /* Prevent bottom half from executing once we force interrupts open */ | 400 | /* Prevent bottom half from executing once we force interrupts open */ |
399 | local_bh_disable(); | 401 | local_bh_disable(); |
400 | /* Enable service-signal interruption, disable timer interrupts */ | 402 | /* Enable service-signal interruption, disable timer interrupts */ |
403 | trace_hardirqs_on(); | ||
401 | __ctl_store(cr0, 0, 0); | 404 | __ctl_store(cr0, 0, 0); |
402 | cr0_sync = cr0; | 405 | cr0_sync = cr0; |
403 | cr0_sync |= 0x00000200; | 406 | cr0_sync |= 0x00000200; |
@@ -415,11 +418,10 @@ sclp_sync_wait(void) | |||
415 | barrier(); | 418 | barrier(); |
416 | cpu_relax(); | 419 | cpu_relax(); |
417 | } | 420 | } |
418 | /* Restore interrupt settings */ | 421 | local_irq_disable(); |
419 | asm volatile ("SSM 0(%0)" | ||
420 | : : "a" (&psw_mask) : "memory"); | ||
421 | __ctl_load(cr0, 0, 0); | 422 | __ctl_load(cr0, 0, 0); |
422 | __local_bh_enable(); | 423 | _local_bh_enable(); |
424 | local_irq_restore(flags); | ||
423 | } | 425 | } |
424 | 426 | ||
425 | EXPORT_SYMBOL(sclp_sync_wait); | 427 | EXPORT_SYMBOL(sclp_sync_wait); |
diff --git a/drivers/s390/char/tape_34xx.c b/drivers/s390/char/tape_34xx.c index 48b4d30a7256..7b95dab913d0 100644 --- a/drivers/s390/char/tape_34xx.c +++ b/drivers/s390/char/tape_34xx.c | |||
@@ -1309,9 +1309,9 @@ static struct tape_discipline tape_discipline_34xx = { | |||
1309 | }; | 1309 | }; |
1310 | 1310 | ||
1311 | static struct ccw_device_id tape_34xx_ids[] = { | 1311 | static struct ccw_device_id tape_34xx_ids[] = { |
1312 | { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), driver_info: tape_3480}, | 1312 | { CCW_DEVICE_DEVTYPE(0x3480, 0, 0x3480, 0), .driver_info = tape_3480}, |
1313 | { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), driver_info: tape_3490}, | 1313 | { CCW_DEVICE_DEVTYPE(0x3490, 0, 0x3490, 0), .driver_info = tape_3490}, |
1314 | { /* end of list */ } | 1314 | { /* end of list */ }, |
1315 | }; | 1315 | }; |
1316 | 1316 | ||
1317 | static int | 1317 | static int |
diff --git a/drivers/s390/char/tape_class.c b/drivers/s390/char/tape_class.c index a5c68e60fcf4..56b87618b100 100644 --- a/drivers/s390/char/tape_class.c +++ b/drivers/s390/char/tape_class.c | |||
@@ -76,14 +76,22 @@ struct tape_class_device *register_tape_dev( | |||
76 | device, | 76 | device, |
77 | "%s", tcd->device_name | 77 | "%s", tcd->device_name |
78 | ); | 78 | ); |
79 | sysfs_create_link( | 79 | rc = IS_ERR(tcd->class_device) ? PTR_ERR(tcd->class_device) : 0; |
80 | if (rc) | ||
81 | goto fail_with_cdev; | ||
82 | rc = sysfs_create_link( | ||
80 | &device->kobj, | 83 | &device->kobj, |
81 | &tcd->class_device->kobj, | 84 | &tcd->class_device->kobj, |
82 | tcd->mode_name | 85 | tcd->mode_name |
83 | ); | 86 | ); |
87 | if (rc) | ||
88 | goto fail_with_class_device; | ||
84 | 89 | ||
85 | return tcd; | 90 | return tcd; |
86 | 91 | ||
92 | fail_with_class_device: | ||
93 | class_device_destroy(tape_class, tcd->char_device->dev); | ||
94 | |||
87 | fail_with_cdev: | 95 | fail_with_cdev: |
88 | cdev_del(tcd->char_device); | 96 | cdev_del(tcd->char_device); |
89 | 97 | ||
diff --git a/drivers/s390/char/tape_core.c b/drivers/s390/char/tape_core.c index 122b4d8965c3..2826aed91043 100644 --- a/drivers/s390/char/tape_core.c +++ b/drivers/s390/char/tape_core.c | |||
@@ -543,20 +543,24 @@ int | |||
543 | tape_generic_probe(struct ccw_device *cdev) | 543 | tape_generic_probe(struct ccw_device *cdev) |
544 | { | 544 | { |
545 | struct tape_device *device; | 545 | struct tape_device *device; |
546 | int ret; | ||
546 | 547 | ||
547 | device = tape_alloc_device(); | 548 | device = tape_alloc_device(); |
548 | if (IS_ERR(device)) | 549 | if (IS_ERR(device)) |
549 | return -ENODEV; | 550 | return -ENODEV; |
550 | PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); | 551 | ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); |
552 | ret = sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); | ||
553 | if (ret) { | ||
554 | tape_put_device(device); | ||
555 | PRINT_ERR("probe failed for tape device %s\n", cdev->dev.bus_id); | ||
556 | return ret; | ||
557 | } | ||
551 | cdev->dev.driver_data = device; | 558 | cdev->dev.driver_data = device; |
559 | cdev->handler = __tape_do_irq; | ||
552 | device->cdev = cdev; | 560 | device->cdev = cdev; |
553 | device->cdev_id = busid_to_int(cdev->dev.bus_id); | 561 | device->cdev_id = busid_to_int(cdev->dev.bus_id); |
554 | cdev->handler = __tape_do_irq; | 562 | PRINT_INFO("tape device %s found\n", cdev->dev.bus_id); |
555 | 563 | return ret; | |
556 | ccw_device_set_options(cdev, CCWDEV_DO_PATHGROUP); | ||
557 | sysfs_create_group(&cdev->dev.kobj, &tape_attr_group); | ||
558 | |||
559 | return 0; | ||
560 | } | 564 | } |
561 | 565 | ||
562 | static inline void | 566 | static inline void |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index f496f236b9c0..29718042c6c9 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -437,7 +437,7 @@ tty3270_rcl_add(struct tty3270 *tp, char *input, int len) | |||
437 | { | 437 | { |
438 | struct string *s; | 438 | struct string *s; |
439 | 439 | ||
440 | tp->rcl_walk = 0; | 440 | tp->rcl_walk = NULL; |
441 | if (len <= 0) | 441 | if (len <= 0) |
442 | return; | 442 | return; |
443 | if (tp->rcl_nr >= tp->rcl_max) { | 443 | if (tp->rcl_nr >= tp->rcl_max) { |
@@ -466,12 +466,12 @@ tty3270_rcl_backward(struct kbd_data *kbd) | |||
466 | else if (!list_empty(&tp->rcl_lines)) | 466 | else if (!list_empty(&tp->rcl_lines)) |
467 | tp->rcl_walk = tp->rcl_lines.prev; | 467 | tp->rcl_walk = tp->rcl_lines.prev; |
468 | s = tp->rcl_walk ? | 468 | s = tp->rcl_walk ? |
469 | list_entry(tp->rcl_walk, struct string, list) : 0; | 469 | list_entry(tp->rcl_walk, struct string, list) : NULL; |
470 | if (tp->rcl_walk) { | 470 | if (tp->rcl_walk) { |
471 | s = list_entry(tp->rcl_walk, struct string, list); | 471 | s = list_entry(tp->rcl_walk, struct string, list); |
472 | tty3270_update_prompt(tp, s->string, s->len); | 472 | tty3270_update_prompt(tp, s->string, s->len); |
473 | } else | 473 | } else |
474 | tty3270_update_prompt(tp, 0, 0); | 474 | tty3270_update_prompt(tp, NULL, 0); |
475 | tty3270_set_timer(tp, 1); | 475 | tty3270_set_timer(tp, 1); |
476 | } | 476 | } |
477 | spin_unlock_bh(&tp->view.lock); | 477 | spin_unlock_bh(&tp->view.lock); |
@@ -553,7 +553,7 @@ tty3270_read_tasklet(struct raw3270_request *rrq) | |||
553 | * has to be emitted to the tty and for 0x6d the screen | 553 | * has to be emitted to the tty and for 0x6d the screen |
554 | * needs to be redrawn. | 554 | * needs to be redrawn. |
555 | */ | 555 | */ |
556 | input = 0; | 556 | input = NULL; |
557 | len = 0; | 557 | len = 0; |
558 | if (tp->input->string[0] == 0x7d) { | 558 | if (tp->input->string[0] == 0x7d) { |
559 | /* Enter: write input to tty. */ | 559 | /* Enter: write input to tty. */ |
@@ -567,7 +567,7 @@ tty3270_read_tasklet(struct raw3270_request *rrq) | |||
567 | tty3270_update_status(tp); | 567 | tty3270_update_status(tp); |
568 | } | 568 | } |
569 | /* Clear input area. */ | 569 | /* Clear input area. */ |
570 | tty3270_update_prompt(tp, 0, 0); | 570 | tty3270_update_prompt(tp, NULL, 0); |
571 | tty3270_set_timer(tp, 1); | 571 | tty3270_set_timer(tp, 1); |
572 | } else if (tp->input->string[0] == 0x6d) { | 572 | } else if (tp->input->string[0] == 0x6d) { |
573 | /* Display has been cleared. Redraw. */ | 573 | /* Display has been cleared. Redraw. */ |
@@ -808,8 +808,8 @@ tty3270_release(struct raw3270_view *view) | |||
808 | tp = (struct tty3270 *) view; | 808 | tp = (struct tty3270 *) view; |
809 | tty = tp->tty; | 809 | tty = tp->tty; |
810 | if (tty) { | 810 | if (tty) { |
811 | tty->driver_data = 0; | 811 | tty->driver_data = NULL; |
812 | tp->tty = tp->kbd->tty = 0; | 812 | tp->tty = tp->kbd->tty = NULL; |
813 | tty_hangup(tty); | 813 | tty_hangup(tty); |
814 | raw3270_put_view(&tp->view); | 814 | raw3270_put_view(&tp->view); |
815 | } | 815 | } |
@@ -948,8 +948,8 @@ tty3270_close(struct tty_struct *tty, struct file * filp) | |||
948 | return; | 948 | return; |
949 | tp = (struct tty3270 *) tty->driver_data; | 949 | tp = (struct tty3270 *) tty->driver_data; |
950 | if (tp) { | 950 | if (tp) { |
951 | tty->driver_data = 0; | 951 | tty->driver_data = NULL; |
952 | tp->tty = tp->kbd->tty = 0; | 952 | tp->tty = tp->kbd->tty = NULL; |
953 | raw3270_put_view(&tp->view); | 953 | raw3270_put_view(&tp->view); |
954 | } | 954 | } |
955 | } | 955 | } |
@@ -1673,7 +1673,7 @@ tty3270_set_termios(struct tty_struct *tty, struct termios *old) | |||
1673 | new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN; | 1673 | new = L_ECHO(tty) ? TF_INPUT: TF_INPUTN; |
1674 | if (new != tp->inattr) { | 1674 | if (new != tp->inattr) { |
1675 | tp->inattr = new; | 1675 | tp->inattr = new; |
1676 | tty3270_update_prompt(tp, 0, 0); | 1676 | tty3270_update_prompt(tp, NULL, 0); |
1677 | tty3270_set_timer(tp, 1); | 1677 | tty3270_set_timer(tp, 1); |
1678 | } | 1678 | } |
1679 | } | 1679 | } |
@@ -1759,7 +1759,7 @@ void | |||
1759 | tty3270_notifier(int index, int active) | 1759 | tty3270_notifier(int index, int active) |
1760 | { | 1760 | { |
1761 | if (active) | 1761 | if (active) |
1762 | tty_register_device(tty3270_driver, index, 0); | 1762 | tty_register_device(tty3270_driver, index, NULL); |
1763 | else | 1763 | else |
1764 | tty_unregister_device(tty3270_driver, index); | 1764 | tty_unregister_device(tty3270_driver, index); |
1765 | } | 1765 | } |
@@ -1818,7 +1818,7 @@ tty3270_exit(void) | |||
1818 | 1818 | ||
1819 | raw3270_unregister_notifier(tty3270_notifier); | 1819 | raw3270_unregister_notifier(tty3270_notifier); |
1820 | driver = tty3270_driver; | 1820 | driver = tty3270_driver; |
1821 | tty3270_driver = 0; | 1821 | tty3270_driver = NULL; |
1822 | tty_unregister_driver(driver); | 1822 | tty_unregister_driver(driver); |
1823 | tty3270_del_views(); | 1823 | tty3270_del_views(); |
1824 | } | 1824 | } |
diff --git a/drivers/s390/char/vmlogrdr.c b/drivers/s390/char/vmlogrdr.c index c625b69ebd19..6cb23040954b 100644 --- a/drivers/s390/char/vmlogrdr.c +++ b/drivers/s390/char/vmlogrdr.c | |||
@@ -86,8 +86,8 @@ struct vmlogrdr_priv_t { | |||
86 | */ | 86 | */ |
87 | static int vmlogrdr_open(struct inode *, struct file *); | 87 | static int vmlogrdr_open(struct inode *, struct file *); |
88 | static int vmlogrdr_release(struct inode *, struct file *); | 88 | static int vmlogrdr_release(struct inode *, struct file *); |
89 | static ssize_t vmlogrdr_read (struct file *filp, char *data, size_t count, | 89 | static ssize_t vmlogrdr_read (struct file *filp, char __user *data, |
90 | loff_t * ppos); | 90 | size_t count, loff_t * ppos); |
91 | 91 | ||
92 | static struct file_operations vmlogrdr_fops = { | 92 | static struct file_operations vmlogrdr_fops = { |
93 | .owner = THIS_MODULE, | 93 | .owner = THIS_MODULE, |
@@ -515,7 +515,7 @@ vmlogrdr_receive_data(struct vmlogrdr_priv_t *priv) { | |||
515 | 515 | ||
516 | 516 | ||
517 | static ssize_t | 517 | static ssize_t |
518 | vmlogrdr_read (struct file *filp, char *data, size_t count, loff_t * ppos) | 518 | vmlogrdr_read(struct file *filp, char __user *data, size_t count, loff_t * ppos) |
519 | { | 519 | { |
520 | int rc; | 520 | int rc; |
521 | struct vmlogrdr_priv_t * priv = filp->private_data; | 521 | struct vmlogrdr_priv_t * priv = filp->private_data; |
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c index 5acc0ace3d7d..807320a41fa4 100644 --- a/drivers/s390/char/vmwatchdog.c +++ b/drivers/s390/char/vmwatchdog.c | |||
@@ -193,7 +193,7 @@ static int vmwdt_ioctl(struct inode *i, struct file *f, | |||
193 | return 0; | 193 | return 0; |
194 | case WDIOC_GETSTATUS: | 194 | case WDIOC_GETSTATUS: |
195 | case WDIOC_GETBOOTSTATUS: | 195 | case WDIOC_GETBOOTSTATUS: |
196 | return put_user(0, (int *)arg); | 196 | return put_user(0, (int __user *)arg); |
197 | case WDIOC_GETTEMP: | 197 | case WDIOC_GETTEMP: |
198 | return -EINVAL; | 198 | return -EINVAL; |
199 | case WDIOC_SETOPTIONS: | 199 | case WDIOC_SETOPTIONS: |
diff --git a/drivers/s390/cio/ccwgroup.c b/drivers/s390/cio/ccwgroup.c index c7319a07ba35..38954f5cd14c 100644 --- a/drivers/s390/cio/ccwgroup.c +++ b/drivers/s390/cio/ccwgroup.c | |||
@@ -152,7 +152,6 @@ ccwgroup_create(struct device *root, | |||
152 | struct ccwgroup_device *gdev; | 152 | struct ccwgroup_device *gdev; |
153 | int i; | 153 | int i; |
154 | int rc; | 154 | int rc; |
155 | int del_drvdata; | ||
156 | 155 | ||
157 | if (argc > 256) /* disallow dumb users */ | 156 | if (argc > 256) /* disallow dumb users */ |
158 | return -EINVAL; | 157 | return -EINVAL; |
@@ -163,7 +162,6 @@ ccwgroup_create(struct device *root, | |||
163 | 162 | ||
164 | atomic_set(&gdev->onoff, 0); | 163 | atomic_set(&gdev->onoff, 0); |
165 | 164 | ||
166 | del_drvdata = 0; | ||
167 | for (i = 0; i < argc; i++) { | 165 | for (i = 0; i < argc; i++) { |
168 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); | 166 | gdev->cdev[i] = get_ccwdev_by_busid(cdrv, argv[i]); |
169 | 167 | ||
@@ -180,18 +178,14 @@ ccwgroup_create(struct device *root, | |||
180 | rc = -EINVAL; | 178 | rc = -EINVAL; |
181 | goto free_dev; | 179 | goto free_dev; |
182 | } | 180 | } |
183 | } | ||
184 | for (i = 0; i < argc; i++) | ||
185 | gdev->cdev[i]->dev.driver_data = gdev; | 181 | gdev->cdev[i]->dev.driver_data = gdev; |
186 | del_drvdata = 1; | 182 | } |
187 | 183 | ||
188 | gdev->creator_id = creator_id; | 184 | gdev->creator_id = creator_id; |
189 | gdev->count = argc; | 185 | gdev->count = argc; |
190 | gdev->dev = (struct device ) { | 186 | gdev->dev.bus = &ccwgroup_bus_type; |
191 | .bus = &ccwgroup_bus_type, | 187 | gdev->dev.parent = root; |
192 | .parent = root, | 188 | gdev->dev.release = ccwgroup_release; |
193 | .release = ccwgroup_release, | ||
194 | }; | ||
195 | 189 | ||
196 | snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", | 190 | snprintf (gdev->dev.bus_id, BUS_ID_SIZE, "%s", |
197 | gdev->cdev[0]->dev.bus_id); | 191 | gdev->cdev[0]->dev.bus_id); |
@@ -226,9 +220,9 @@ error: | |||
226 | free_dev: | 220 | free_dev: |
227 | for (i = 0; i < argc; i++) | 221 | for (i = 0; i < argc; i++) |
228 | if (gdev->cdev[i]) { | 222 | if (gdev->cdev[i]) { |
229 | put_device(&gdev->cdev[i]->dev); | 223 | if (gdev->cdev[i]->dev.driver_data == gdev) |
230 | if (del_drvdata) | ||
231 | gdev->cdev[i]->dev.driver_data = NULL; | 224 | gdev->cdev[i]->dev.driver_data = NULL; |
225 | put_device(&gdev->cdev[i]->dev); | ||
232 | } | 226 | } |
233 | kfree(gdev); | 227 | kfree(gdev); |
234 | return rc; | 228 | return rc; |
@@ -319,7 +313,7 @@ ccwgroup_online_store (struct device *dev, struct device_attribute *attr, const | |||
319 | if (!try_module_get(gdrv->owner)) | 313 | if (!try_module_get(gdrv->owner)) |
320 | return -EINVAL; | 314 | return -EINVAL; |
321 | 315 | ||
322 | value = simple_strtoul(buf, 0, 0); | 316 | value = simple_strtoul(buf, NULL, 0); |
323 | ret = count; | 317 | ret = count; |
324 | if (value == 1) | 318 | if (value == 1) |
325 | ccwgroup_set_online(gdev); | 319 | ccwgroup_set_online(gdev); |
@@ -395,10 +389,8 @@ int | |||
395 | ccwgroup_driver_register (struct ccwgroup_driver *cdriver) | 389 | ccwgroup_driver_register (struct ccwgroup_driver *cdriver) |
396 | { | 390 | { |
397 | /* register our new driver with the core */ | 391 | /* register our new driver with the core */ |
398 | cdriver->driver = (struct device_driver) { | 392 | cdriver->driver.bus = &ccwgroup_bus_type; |
399 | .bus = &ccwgroup_bus_type, | 393 | cdriver->driver.name = cdriver->name; |
400 | .name = cdriver->name, | ||
401 | }; | ||
402 | 394 | ||
403 | return driver_register(&cdriver->driver); | 395 | return driver_register(&cdriver->driver); |
404 | } | 396 | } |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index a01f3bba4a7b..c28444af0919 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -238,8 +238,6 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
238 | /* Check for single path devices. */ | 238 | /* Check for single path devices. */ |
239 | if (sch->schib.pmcw.pim == 0x80) | 239 | if (sch->schib.pmcw.pim == 0x80) |
240 | goto out_unreg; | 240 | goto out_unreg; |
241 | if (sch->vpm == mask) | ||
242 | goto out_unreg; | ||
243 | 241 | ||
244 | if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && | 242 | if ((sch->schib.scsw.actl & SCSW_ACTL_DEVACT) && |
245 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && | 243 | (sch->schib.scsw.actl & SCSW_ACTL_SCHACT) && |
@@ -258,6 +256,8 @@ s390_subchannel_remove_chpid(struct device *dev, void *data) | |||
258 | /* trigger path verification. */ | 256 | /* trigger path verification. */ |
259 | if (sch->driver && sch->driver->verify) | 257 | if (sch->driver && sch->driver->verify) |
260 | sch->driver->verify(&sch->dev); | 258 | sch->driver->verify(&sch->dev); |
259 | else if (sch->vpm == mask) | ||
260 | goto out_unreg; | ||
261 | out_unlock: | 261 | out_unlock: |
262 | spin_unlock_irq(&sch->lock); | 262 | spin_unlock_irq(&sch->lock); |
263 | return 0; | 263 | return 0; |
@@ -1391,10 +1391,8 @@ new_channel_path(int chpid) | |||
1391 | /* fill in status, etc. */ | 1391 | /* fill in status, etc. */ |
1392 | chp->id = chpid; | 1392 | chp->id = chpid; |
1393 | chp->state = 1; | 1393 | chp->state = 1; |
1394 | chp->dev = (struct device) { | 1394 | chp->dev.parent = &css[0]->device; |
1395 | .parent = &css[0]->device, | 1395 | chp->dev.release = chp_release; |
1396 | .release = chp_release, | ||
1397 | }; | ||
1398 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); | 1396 | snprintf(chp->dev.bus_id, BUS_ID_SIZE, "chp0.%x", chpid); |
1399 | 1397 | ||
1400 | /* Obtain channel path description and fill it in. */ | 1398 | /* Obtain channel path description and fill it in. */ |
@@ -1464,6 +1462,40 @@ chsc_get_chp_desc(struct subchannel *sch, int chp_no) | |||
1464 | return desc; | 1462 | return desc; |
1465 | } | 1463 | } |
1466 | 1464 | ||
1465 | static int reset_channel_path(struct channel_path *chp) | ||
1466 | { | ||
1467 | int cc; | ||
1468 | |||
1469 | cc = rchp(chp->id); | ||
1470 | switch (cc) { | ||
1471 | case 0: | ||
1472 | return 0; | ||
1473 | case 2: | ||
1474 | return -EBUSY; | ||
1475 | default: | ||
1476 | return -ENODEV; | ||
1477 | } | ||
1478 | } | ||
1479 | |||
1480 | static void reset_channel_paths_css(struct channel_subsystem *css) | ||
1481 | { | ||
1482 | int i; | ||
1483 | |||
1484 | for (i = 0; i <= __MAX_CHPID; i++) { | ||
1485 | if (css->chps[i]) | ||
1486 | reset_channel_path(css->chps[i]); | ||
1487 | } | ||
1488 | } | ||
1489 | |||
1490 | void cio_reset_channel_paths(void) | ||
1491 | { | ||
1492 | int i; | ||
1493 | |||
1494 | for (i = 0; i <= __MAX_CSSID; i++) { | ||
1495 | if (css[i] && css[i]->valid) | ||
1496 | reset_channel_paths_css(css[i]); | ||
1497 | } | ||
1498 | } | ||
1467 | 1499 | ||
1468 | static int __init | 1500 | static int __init |
1469 | chsc_alloc_sei_area(void) | 1501 | chsc_alloc_sei_area(void) |
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index a3423267467f..89320c1ad825 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -147,7 +147,7 @@ cio_tpi(void) | |||
147 | sch->driver->irq(&sch->dev); | 147 | sch->driver->irq(&sch->dev); |
148 | spin_unlock(&sch->lock); | 148 | spin_unlock(&sch->lock); |
149 | irq_exit (); | 149 | irq_exit (); |
150 | __local_bh_enable(); | 150 | _local_bh_enable(); |
151 | return 1; | 151 | return 1; |
152 | } | 152 | } |
153 | 153 | ||
@@ -519,6 +519,7 @@ cio_validate_subchannel (struct subchannel *sch, struct subchannel_id schid) | |||
519 | memset(sch, 0, sizeof(struct subchannel)); | 519 | memset(sch, 0, sizeof(struct subchannel)); |
520 | 520 | ||
521 | spin_lock_init(&sch->lock); | 521 | spin_lock_init(&sch->lock); |
522 | mutex_init(&sch->reg_mutex); | ||
522 | 523 | ||
523 | /* Set a name for the subchannel */ | 524 | /* Set a name for the subchannel */ |
524 | snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, | 525 | snprintf (sch->dev.bus_id, BUS_ID_SIZE, "0.%x.%04x", schid.ssid, |
@@ -797,7 +798,7 @@ struct subchannel * | |||
797 | cio_get_console_subchannel(void) | 798 | cio_get_console_subchannel(void) |
798 | { | 799 | { |
799 | if (!console_subchannel_in_use) | 800 | if (!console_subchannel_in_use) |
800 | return 0; | 801 | return NULL; |
801 | return &console_subchannel; | 802 | return &console_subchannel; |
802 | } | 803 | } |
803 | 804 | ||
@@ -875,5 +876,6 @@ void | |||
875 | reipl(unsigned long devno) | 876 | reipl(unsigned long devno) |
876 | { | 877 | { |
877 | clear_all_subchannels(); | 878 | clear_all_subchannels(); |
879 | cio_reset_channel_paths(); | ||
878 | do_reipl(devno); | 880 | do_reipl(devno); |
879 | } | 881 | } |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 0ca987344e07..4541c1af4b66 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define S390_CIO_H | 2 | #define S390_CIO_H |
3 | 3 | ||
4 | #include "schid.h" | 4 | #include "schid.h" |
5 | #include <linux/mutex.h> | ||
5 | 6 | ||
6 | /* | 7 | /* |
7 | * where we put the ssd info | 8 | * where we put the ssd info |
@@ -87,7 +88,7 @@ struct orb { | |||
87 | struct subchannel { | 88 | struct subchannel { |
88 | struct subchannel_id schid; | 89 | struct subchannel_id schid; |
89 | spinlock_t lock; /* subchannel lock */ | 90 | spinlock_t lock; /* subchannel lock */ |
90 | 91 | struct mutex reg_mutex; | |
91 | enum { | 92 | enum { |
92 | SUBCHANNEL_TYPE_IO = 0, | 93 | SUBCHANNEL_TYPE_IO = 0, |
93 | SUBCHANNEL_TYPE_CHSC = 1, | 94 | SUBCHANNEL_TYPE_CHSC = 1, |
diff --git a/drivers/s390/cio/cmf.c b/drivers/s390/cio/cmf.c index 1c3e8e9012b0..828b2d334f0a 100644 --- a/drivers/s390/cio/cmf.c +++ b/drivers/s390/cio/cmf.c | |||
@@ -1068,6 +1068,7 @@ cmb_show_avg_sample_interval(struct device *dev, struct device_attribute *attr, | |||
1068 | if (count) { | 1068 | if (count) { |
1069 | interval = cmb_data->last_update - | 1069 | interval = cmb_data->last_update - |
1070 | cdev->private->cmb_start_time; | 1070 | cdev->private->cmb_start_time; |
1071 | interval = (interval * 1000) >> 12; | ||
1071 | interval /= count; | 1072 | interval /= count; |
1072 | } else | 1073 | } else |
1073 | interval = -1; | 1074 | interval = -1; |
@@ -1140,7 +1141,7 @@ static struct attribute *cmf_attributes[] = { | |||
1140 | &dev_attr_avg_device_disconnect_time.attr, | 1141 | &dev_attr_avg_device_disconnect_time.attr, |
1141 | &dev_attr_avg_control_unit_queuing_time.attr, | 1142 | &dev_attr_avg_control_unit_queuing_time.attr, |
1142 | &dev_attr_avg_device_active_only_time.attr, | 1143 | &dev_attr_avg_device_active_only_time.attr, |
1143 | 0, | 1144 | NULL, |
1144 | }; | 1145 | }; |
1145 | 1146 | ||
1146 | static struct attribute_group cmf_attr_group = { | 1147 | static struct attribute_group cmf_attr_group = { |
@@ -1160,7 +1161,7 @@ static struct attribute *cmf_attributes_ext[] = { | |||
1160 | &dev_attr_avg_device_active_only_time.attr, | 1161 | &dev_attr_avg_device_active_only_time.attr, |
1161 | &dev_attr_avg_device_busy_time.attr, | 1162 | &dev_attr_avg_device_busy_time.attr, |
1162 | &dev_attr_avg_initial_command_response_time.attr, | 1163 | &dev_attr_avg_initial_command_response_time.attr, |
1163 | 0, | 1164 | NULL, |
1164 | }; | 1165 | }; |
1165 | 1166 | ||
1166 | static struct attribute_group cmf_attr_group_ext = { | 1167 | static struct attribute_group cmf_attr_group_ext = { |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 1d3be80797f8..13eeea3d547f 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -108,6 +108,24 @@ css_subchannel_release(struct device *dev) | |||
108 | 108 | ||
109 | extern int css_get_ssd_info(struct subchannel *sch); | 109 | extern int css_get_ssd_info(struct subchannel *sch); |
110 | 110 | ||
111 | |||
112 | int css_sch_device_register(struct subchannel *sch) | ||
113 | { | ||
114 | int ret; | ||
115 | |||
116 | mutex_lock(&sch->reg_mutex); | ||
117 | ret = device_register(&sch->dev); | ||
118 | mutex_unlock(&sch->reg_mutex); | ||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | void css_sch_device_unregister(struct subchannel *sch) | ||
123 | { | ||
124 | mutex_lock(&sch->reg_mutex); | ||
125 | device_unregister(&sch->dev); | ||
126 | mutex_unlock(&sch->reg_mutex); | ||
127 | } | ||
128 | |||
111 | static int | 129 | static int |
112 | css_register_subchannel(struct subchannel *sch) | 130 | css_register_subchannel(struct subchannel *sch) |
113 | { | 131 | { |
@@ -119,7 +137,7 @@ css_register_subchannel(struct subchannel *sch) | |||
119 | sch->dev.release = &css_subchannel_release; | 137 | sch->dev.release = &css_subchannel_release; |
120 | 138 | ||
121 | /* make it known to the system */ | 139 | /* make it known to the system */ |
122 | ret = device_register(&sch->dev); | 140 | ret = css_sch_device_register(sch); |
123 | if (ret) | 141 | if (ret) |
124 | printk (KERN_WARNING "%s: could not register %s\n", | 142 | printk (KERN_WARNING "%s: could not register %s\n", |
125 | __func__, sch->dev.bus_id); | 143 | __func__, sch->dev.bus_id); |
@@ -250,7 +268,7 @@ css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
250 | * The device will be killed automatically. | 268 | * The device will be killed automatically. |
251 | */ | 269 | */ |
252 | cio_disable_subchannel(sch); | 270 | cio_disable_subchannel(sch); |
253 | device_unregister(&sch->dev); | 271 | css_sch_device_unregister(sch); |
254 | /* Reset intparm to zeroes. */ | 272 | /* Reset intparm to zeroes. */ |
255 | sch->schib.pmcw.intparm = 0; | 273 | sch->schib.pmcw.intparm = 0; |
256 | cio_modify(sch); | 274 | cio_modify(sch); |
@@ -264,7 +282,7 @@ css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
264 | * away in any case. | 282 | * away in any case. |
265 | */ | 283 | */ |
266 | if (!disc) { | 284 | if (!disc) { |
267 | device_unregister(&sch->dev); | 285 | css_sch_device_unregister(sch); |
268 | /* Reset intparm to zeroes. */ | 286 | /* Reset intparm to zeroes. */ |
269 | sch->schib.pmcw.intparm = 0; | 287 | sch->schib.pmcw.intparm = 0; |
270 | cio_modify(sch); | 288 | cio_modify(sch); |
@@ -605,9 +623,13 @@ init_channel_subsystem (void) | |||
605 | ret = device_register(&css[i]->device); | 623 | ret = device_register(&css[i]->device); |
606 | if (ret) | 624 | if (ret) |
607 | goto out_free; | 625 | goto out_free; |
608 | if (css_characteristics_avail && css_chsc_characteristics.secm) | 626 | if (css_characteristics_avail && |
609 | device_create_file(&css[i]->device, | 627 | css_chsc_characteristics.secm) { |
610 | &dev_attr_cm_enable); | 628 | ret = device_create_file(&css[i]->device, |
629 | &dev_attr_cm_enable); | ||
630 | if (ret) | ||
631 | goto out_device; | ||
632 | } | ||
611 | } | 633 | } |
612 | css_init_done = 1; | 634 | css_init_done = 1; |
613 | 635 | ||
@@ -615,6 +637,8 @@ init_channel_subsystem (void) | |||
615 | 637 | ||
616 | for_each_subchannel(__init_channel_subsystem, NULL); | 638 | for_each_subchannel(__init_channel_subsystem, NULL); |
617 | return 0; | 639 | return 0; |
640 | out_device: | ||
641 | device_unregister(&css[i]->device); | ||
618 | out_free: | 642 | out_free: |
619 | kfree(css[i]); | 643 | kfree(css[i]); |
620 | out_unregister: | 644 | out_unregister: |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index e210f89a2449..8aabb4adeb5f 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -100,7 +100,7 @@ struct ccw_device_private { | |||
100 | struct qdio_irq *qdio_data; | 100 | struct qdio_irq *qdio_data; |
101 | struct irb irb; /* device status */ | 101 | struct irb irb; /* device status */ |
102 | struct senseid senseid; /* SenseID info */ | 102 | struct senseid senseid; /* SenseID info */ |
103 | struct pgid pgid; /* path group ID */ | 103 | struct pgid pgid[8]; /* path group IDs per chpid*/ |
104 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ | 104 | struct ccw1 iccws[2]; /* ccws for SNID/SID/SPGID commands */ |
105 | struct work_struct kick_work; | 105 | struct work_struct kick_work; |
106 | wait_queue_head_t wait_q; | 106 | wait_queue_head_t wait_q; |
@@ -136,6 +136,8 @@ extern struct bus_type css_bus_type; | |||
136 | extern struct css_driver io_subchannel_driver; | 136 | extern struct css_driver io_subchannel_driver; |
137 | 137 | ||
138 | extern int css_probe_device(struct subchannel_id); | 138 | extern int css_probe_device(struct subchannel_id); |
139 | extern int css_sch_device_register(struct subchannel *); | ||
140 | extern void css_sch_device_unregister(struct subchannel *); | ||
139 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); | 141 | extern struct subchannel * get_subchannel_by_schid(struct subchannel_id); |
140 | extern int css_init_done; | 142 | extern int css_init_done; |
141 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 143 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 67f0de6aed33..646da5640401 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -100,7 +100,7 @@ ccw_uevent (struct device *dev, char **envp, int num_envp, | |||
100 | if ((buffer_size - length <= 0) || (i >= num_envp)) | 100 | if ((buffer_size - length <= 0) || (i >= num_envp)) |
101 | return -ENOMEM; | 101 | return -ENOMEM; |
102 | 102 | ||
103 | envp[i] = 0; | 103 | envp[i] = NULL; |
104 | 104 | ||
105 | return 0; | 105 | return 0; |
106 | } | 106 | } |
@@ -280,7 +280,7 @@ ccw_device_remove_disconnected(struct ccw_device *cdev) | |||
280 | * 'throw away device'. | 280 | * 'throw away device'. |
281 | */ | 281 | */ |
282 | sch = to_subchannel(cdev->dev.parent); | 282 | sch = to_subchannel(cdev->dev.parent); |
283 | device_unregister(&sch->dev); | 283 | css_sch_device_unregister(sch); |
284 | /* Reset intparm to zeroes. */ | 284 | /* Reset intparm to zeroes. */ |
285 | sch->schib.pmcw.intparm = 0; | 285 | sch->schib.pmcw.intparm = 0; |
286 | cio_modify(sch); | 286 | cio_modify(sch); |
@@ -556,12 +556,11 @@ get_disc_ccwdev_by_devno(unsigned int devno, unsigned int ssid, | |||
556 | struct ccw_device *sibling) | 556 | struct ccw_device *sibling) |
557 | { | 557 | { |
558 | struct device *dev; | 558 | struct device *dev; |
559 | struct match_data data = { | 559 | struct match_data data; |
560 | .devno = devno, | ||
561 | .ssid = ssid, | ||
562 | .sibling = sibling, | ||
563 | }; | ||
564 | 560 | ||
561 | data.devno = devno; | ||
562 | data.ssid = ssid; | ||
563 | data.sibling = sibling; | ||
565 | dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); | 564 | dev = bus_find_device(&ccw_bus_type, NULL, &data, match_devno); |
566 | 565 | ||
567 | return dev ? to_ccwdev(dev) : NULL; | 566 | return dev ? to_ccwdev(dev) : NULL; |
@@ -625,7 +624,7 @@ ccw_device_do_unreg_rereg(void *data) | |||
625 | other_sch->schib.pmcw.intparm = 0; | 624 | other_sch->schib.pmcw.intparm = 0; |
626 | cio_modify(other_sch); | 625 | cio_modify(other_sch); |
627 | } | 626 | } |
628 | device_unregister(&other_sch->dev); | 627 | css_sch_device_unregister(other_sch); |
629 | } | 628 | } |
630 | } | 629 | } |
631 | /* Update ssd info here. */ | 630 | /* Update ssd info here. */ |
@@ -709,7 +708,7 @@ ccw_device_call_sch_unregister(void *data) | |||
709 | struct subchannel *sch; | 708 | struct subchannel *sch; |
710 | 709 | ||
711 | sch = to_subchannel(cdev->dev.parent); | 710 | sch = to_subchannel(cdev->dev.parent); |
712 | device_unregister(&sch->dev); | 711 | css_sch_device_unregister(sch); |
713 | /* Reset intparm to zeroes. */ | 712 | /* Reset intparm to zeroes. */ |
714 | sch->schib.pmcw.intparm = 0; | 713 | sch->schib.pmcw.intparm = 0; |
715 | cio_modify(sch); | 714 | cio_modify(sch); |
@@ -835,10 +834,8 @@ io_subchannel_probe (struct subchannel *sch) | |||
835 | return -ENOMEM; | 834 | return -ENOMEM; |
836 | } | 835 | } |
837 | atomic_set(&cdev->private->onoff, 0); | 836 | atomic_set(&cdev->private->onoff, 0); |
838 | cdev->dev = (struct device) { | 837 | cdev->dev.parent = &sch->dev; |
839 | .parent = &sch->dev, | 838 | cdev->dev.release = ccw_device_release; |
840 | .release = ccw_device_release, | ||
841 | }; | ||
842 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); | 839 | INIT_LIST_HEAD(&cdev->private->kick_work.entry); |
843 | /* Do first half of device_register. */ | 840 | /* Do first half of device_register. */ |
844 | device_initialize(&cdev->dev); | 841 | device_initialize(&cdev->dev); |
@@ -977,9 +974,7 @@ ccw_device_console_enable (struct ccw_device *cdev, struct subchannel *sch) | |||
977 | int rc; | 974 | int rc; |
978 | 975 | ||
979 | /* Initialize the ccw_device structure. */ | 976 | /* Initialize the ccw_device structure. */ |
980 | cdev->dev = (struct device) { | 977 | cdev->dev.parent= &sch->dev; |
981 | .parent = &sch->dev, | ||
982 | }; | ||
983 | rc = io_subchannel_recog(cdev, sch); | 978 | rc = io_subchannel_recog(cdev, sch); |
984 | if (rc) | 979 | if (rc) |
985 | return rc; | 980 | return rc; |
@@ -1057,7 +1052,7 @@ get_ccwdev_by_busid(struct ccw_driver *cdrv, const char *bus_id) | |||
1057 | __ccwdev_check_busid); | 1052 | __ccwdev_check_busid); |
1058 | put_driver(drv); | 1053 | put_driver(drv); |
1059 | 1054 | ||
1060 | return dev ? to_ccwdev(dev) : 0; | 1055 | return dev ? to_ccwdev(dev) : NULL; |
1061 | } | 1056 | } |
1062 | 1057 | ||
1063 | /************************** device driver handling ************************/ | 1058 | /************************** device driver handling ************************/ |
@@ -1082,7 +1077,7 @@ ccw_device_probe (struct device *dev) | |||
1082 | ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; | 1077 | ret = cdrv->probe ? cdrv->probe(cdev) : -ENODEV; |
1083 | 1078 | ||
1084 | if (ret) { | 1079 | if (ret) { |
1085 | cdev->drv = 0; | 1080 | cdev->drv = NULL; |
1086 | return ret; | 1081 | return ret; |
1087 | } | 1082 | } |
1088 | 1083 | ||
@@ -1113,7 +1108,7 @@ ccw_device_remove (struct device *dev) | |||
1113 | ret, cdev->dev.bus_id); | 1108 | ret, cdev->dev.bus_id); |
1114 | } | 1109 | } |
1115 | ccw_device_set_timeout(cdev, 0); | 1110 | ccw_device_set_timeout(cdev, 0); |
1116 | cdev->drv = 0; | 1111 | cdev->drv = NULL; |
1117 | return 0; | 1112 | return 0; |
1118 | } | 1113 | } |
1119 | 1114 | ||
diff --git a/drivers/s390/cio/device_fsm.c b/drivers/s390/cio/device_fsm.c index cb1af0b6f033..35e162ba6d54 100644 --- a/drivers/s390/cio/device_fsm.c +++ b/drivers/s390/cio/device_fsm.c | |||
@@ -152,7 +152,8 @@ ccw_device_cancel_halt_clear(struct ccw_device *cdev) | |||
152 | if (cdev->private->iretry) { | 152 | if (cdev->private->iretry) { |
153 | cdev->private->iretry--; | 153 | cdev->private->iretry--; |
154 | ret = cio_halt(sch); | 154 | ret = cio_halt(sch); |
155 | return (ret == 0) ? -EBUSY : ret; | 155 | if (ret != -EBUSY) |
156 | return (ret == 0) ? -EBUSY : ret; | ||
156 | } | 157 | } |
157 | /* halt io unsuccessful. */ | 158 | /* halt io unsuccessful. */ |
158 | cdev->private->iretry = 255; /* 255 clear retries. */ | 159 | cdev->private->iretry = 255; /* 255 clear retries. */ |
@@ -266,12 +267,10 @@ ccw_device_recog_done(struct ccw_device *cdev, int state) | |||
266 | notify = 1; | 267 | notify = 1; |
267 | } | 268 | } |
268 | /* fill out sense information */ | 269 | /* fill out sense information */ |
269 | cdev->id = (struct ccw_device_id) { | 270 | cdev->id.cu_type = cdev->private->senseid.cu_type; |
270 | .cu_type = cdev->private->senseid.cu_type, | 271 | cdev->id.cu_model = cdev->private->senseid.cu_model; |
271 | .cu_model = cdev->private->senseid.cu_model, | 272 | cdev->id.dev_type = cdev->private->senseid.dev_type; |
272 | .dev_type = cdev->private->senseid.dev_type, | 273 | cdev->id.dev_model = cdev->private->senseid.dev_model; |
273 | .dev_model = cdev->private->senseid.dev_model, | ||
274 | }; | ||
275 | if (notify) { | 274 | if (notify) { |
276 | cdev->private->state = DEV_STATE_OFFLINE; | 275 | cdev->private->state = DEV_STATE_OFFLINE; |
277 | if (same_dev) { | 276 | if (same_dev) { |
@@ -378,6 +377,56 @@ ccw_device_done(struct ccw_device *cdev, int state) | |||
378 | put_device (&cdev->dev); | 377 | put_device (&cdev->dev); |
379 | } | 378 | } |
380 | 379 | ||
380 | static inline int cmp_pgid(struct pgid *p1, struct pgid *p2) | ||
381 | { | ||
382 | char *c1; | ||
383 | char *c2; | ||
384 | |||
385 | c1 = (char *)p1; | ||
386 | c2 = (char *)p2; | ||
387 | |||
388 | return memcmp(c1 + 1, c2 + 1, sizeof(struct pgid) - 1); | ||
389 | } | ||
390 | |||
391 | static void __ccw_device_get_common_pgid(struct ccw_device *cdev) | ||
392 | { | ||
393 | int i; | ||
394 | int last; | ||
395 | |||
396 | last = 0; | ||
397 | for (i = 0; i < 8; i++) { | ||
398 | if (cdev->private->pgid[i].inf.ps.state1 == SNID_STATE1_RESET) | ||
399 | /* No PGID yet */ | ||
400 | continue; | ||
401 | if (cdev->private->pgid[last].inf.ps.state1 == | ||
402 | SNID_STATE1_RESET) { | ||
403 | /* First non-zero PGID */ | ||
404 | last = i; | ||
405 | continue; | ||
406 | } | ||
407 | if (cmp_pgid(&cdev->private->pgid[i], | ||
408 | &cdev->private->pgid[last]) == 0) | ||
409 | /* Non-conflicting PGIDs */ | ||
410 | continue; | ||
411 | |||
412 | /* PGID mismatch, can't pathgroup. */ | ||
413 | CIO_MSG_EVENT(0, "SNID - pgid mismatch for device " | ||
414 | "0.%x.%04x, can't pathgroup\n", | ||
415 | cdev->private->ssid, cdev->private->devno); | ||
416 | cdev->private->options.pgroup = 0; | ||
417 | return; | ||
418 | } | ||
419 | if (cdev->private->pgid[last].inf.ps.state1 == | ||
420 | SNID_STATE1_RESET) | ||
421 | /* No previous pgid found */ | ||
422 | memcpy(&cdev->private->pgid[0], &css[0]->global_pgid, | ||
423 | sizeof(struct pgid)); | ||
424 | else | ||
425 | /* Use existing pgid */ | ||
426 | memcpy(&cdev->private->pgid[0], &cdev->private->pgid[last], | ||
427 | sizeof(struct pgid)); | ||
428 | } | ||
429 | |||
381 | /* | 430 | /* |
382 | * Function called from device_pgid.c after sense path ground has completed. | 431 | * Function called from device_pgid.c after sense path ground has completed. |
383 | */ | 432 | */ |
@@ -388,24 +437,26 @@ ccw_device_sense_pgid_done(struct ccw_device *cdev, int err) | |||
388 | 437 | ||
389 | sch = to_subchannel(cdev->dev.parent); | 438 | sch = to_subchannel(cdev->dev.parent); |
390 | switch (err) { | 439 | switch (err) { |
391 | case 0: | 440 | case -EOPNOTSUPP: /* path grouping not supported, use nop instead. */ |
392 | /* Start Path Group verification. */ | 441 | cdev->private->options.pgroup = 0; |
393 | sch->vpm = 0; /* Start with no path groups set. */ | 442 | break; |
394 | cdev->private->state = DEV_STATE_VERIFY; | 443 | case 0: /* success */ |
395 | ccw_device_verify_start(cdev); | 444 | case -EACCES: /* partial success, some paths not operational */ |
445 | /* Check if all pgids are equal or 0. */ | ||
446 | __ccw_device_get_common_pgid(cdev); | ||
396 | break; | 447 | break; |
397 | case -ETIME: /* Sense path group id stopped by timeout. */ | 448 | case -ETIME: /* Sense path group id stopped by timeout. */ |
398 | case -EUSERS: /* device is reserved for someone else. */ | 449 | case -EUSERS: /* device is reserved for someone else. */ |
399 | ccw_device_done(cdev, DEV_STATE_BOXED); | 450 | ccw_device_done(cdev, DEV_STATE_BOXED); |
400 | break; | 451 | return; |
401 | case -EOPNOTSUPP: /* path grouping not supported, just set online. */ | ||
402 | cdev->private->options.pgroup = 0; | ||
403 | ccw_device_done(cdev, DEV_STATE_ONLINE); | ||
404 | break; | ||
405 | default: | 452 | default: |
406 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); | 453 | ccw_device_done(cdev, DEV_STATE_NOT_OPER); |
407 | break; | 454 | return; |
408 | } | 455 | } |
456 | /* Start Path Group verification. */ | ||
457 | sch->vpm = 0; /* Start with no path groups set. */ | ||
458 | cdev->private->state = DEV_STATE_VERIFY; | ||
459 | ccw_device_verify_start(cdev); | ||
409 | } | 460 | } |
410 | 461 | ||
411 | /* | 462 | /* |
@@ -513,12 +564,10 @@ ccw_device_verify_done(struct ccw_device *cdev, int err) | |||
513 | /* Deliver fake irb to device driver, if needed. */ | 564 | /* Deliver fake irb to device driver, if needed. */ |
514 | if (cdev->private->flags.fake_irb) { | 565 | if (cdev->private->flags.fake_irb) { |
515 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 566 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
516 | cdev->private->irb.scsw = (struct scsw) { | 567 | cdev->private->irb.scsw.cc = 1; |
517 | .cc = 1, | 568 | cdev->private->irb.scsw.fctl = SCSW_FCTL_START_FUNC; |
518 | .fctl = SCSW_FCTL_START_FUNC, | 569 | cdev->private->irb.scsw.actl = SCSW_ACTL_START_PEND; |
519 | .actl = SCSW_ACTL_START_PEND, | 570 | cdev->private->irb.scsw.stctl = SCSW_STCTL_STATUS_PEND; |
520 | .stctl = SCSW_STCTL_STATUS_PEND, | ||
521 | }; | ||
522 | cdev->private->flags.fake_irb = 0; | 571 | cdev->private->flags.fake_irb = 0; |
523 | if (cdev->handler) | 572 | if (cdev->handler) |
524 | cdev->handler(cdev, cdev->private->intparm, | 573 | cdev->handler(cdev, cdev->private->intparm, |
@@ -562,8 +611,9 @@ ccw_device_online(struct ccw_device *cdev) | |||
562 | } | 611 | } |
563 | /* Do we want to do path grouping? */ | 612 | /* Do we want to do path grouping? */ |
564 | if (!cdev->private->options.pgroup) { | 613 | if (!cdev->private->options.pgroup) { |
565 | /* No, set state online immediately. */ | 614 | /* Start initial path verification. */ |
566 | ccw_device_done(cdev, DEV_STATE_ONLINE); | 615 | cdev->private->state = DEV_STATE_VERIFY; |
616 | ccw_device_verify_start(cdev); | ||
567 | return 0; | 617 | return 0; |
568 | } | 618 | } |
569 | /* Do a SensePGID first. */ | 619 | /* Do a SensePGID first. */ |
@@ -609,6 +659,7 @@ ccw_device_offline(struct ccw_device *cdev) | |||
609 | /* Are we doing path grouping? */ | 659 | /* Are we doing path grouping? */ |
610 | if (!cdev->private->options.pgroup) { | 660 | if (!cdev->private->options.pgroup) { |
611 | /* No, set state offline immediately. */ | 661 | /* No, set state offline immediately. */ |
662 | sch->vpm = 0; | ||
612 | ccw_device_done(cdev, DEV_STATE_OFFLINE); | 663 | ccw_device_done(cdev, DEV_STATE_OFFLINE); |
613 | return 0; | 664 | return 0; |
614 | } | 665 | } |
@@ -705,8 +756,6 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
705 | { | 756 | { |
706 | struct subchannel *sch; | 757 | struct subchannel *sch; |
707 | 758 | ||
708 | if (!cdev->private->options.pgroup) | ||
709 | return; | ||
710 | if (cdev->private->state == DEV_STATE_W4SENSE) { | 759 | if (cdev->private->state == DEV_STATE_W4SENSE) { |
711 | cdev->private->flags.doverify = 1; | 760 | cdev->private->flags.doverify = 1; |
712 | return; | 761 | return; |
@@ -719,6 +768,7 @@ ccw_device_online_verify(struct ccw_device *cdev, enum dev_event dev_event) | |||
719 | stsch(sch->schid, &sch->schib); | 768 | stsch(sch->schid, &sch->schib); |
720 | 769 | ||
721 | if (sch->schib.scsw.actl != 0 || | 770 | if (sch->schib.scsw.actl != 0 || |
771 | (sch->schib.scsw.stctl & SCSW_STCTL_STATUS_PEND) || | ||
722 | (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { | 772 | (cdev->private->irb.scsw.stctl & SCSW_STCTL_STATUS_PEND)) { |
723 | /* | 773 | /* |
724 | * No final status yet or final status not yet delivered | 774 | * No final status yet or final status not yet delivered |
@@ -995,8 +1045,7 @@ static void | |||
995 | ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) | 1045 | ccw_device_wait4io_verify(struct ccw_device *cdev, enum dev_event dev_event) |
996 | { | 1046 | { |
997 | /* When the I/O has terminated, we have to start verification. */ | 1047 | /* When the I/O has terminated, we have to start verification. */ |
998 | if (cdev->private->options.pgroup) | 1048 | cdev->private->flags.doverify = 1; |
999 | cdev->private->flags.doverify = 1; | ||
1000 | } | 1049 | } |
1001 | 1050 | ||
1002 | static void | 1051 | static void |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index a60124264bee..9e3de0bd59b5 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -263,6 +263,9 @@ ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb) | |||
263 | /* Abuse intparm for error reporting. */ | 263 | /* Abuse intparm for error reporting. */ |
264 | if (IS_ERR(irb)) | 264 | if (IS_ERR(irb)) |
265 | cdev->private->intparm = -EIO; | 265 | cdev->private->intparm = -EIO; |
266 | else if (irb->scsw.cc == 1) | ||
267 | /* Retry for deferred condition code. */ | ||
268 | cdev->private->intparm = -EAGAIN; | ||
266 | else if ((irb->scsw.dstat != | 269 | else if ((irb->scsw.dstat != |
267 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || | 270 | (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) || |
268 | (irb->scsw.cstat != 0)) { | 271 | (irb->scsw.cstat != 0)) { |
diff --git a/drivers/s390/cio/device_pgid.c b/drivers/s390/cio/device_pgid.c index 54cb64ed0786..1693a102dcfe 100644 --- a/drivers/s390/cio/device_pgid.c +++ b/drivers/s390/cio/device_pgid.c | |||
@@ -24,6 +24,21 @@ | |||
24 | #include "ioasm.h" | 24 | #include "ioasm.h" |
25 | 25 | ||
26 | /* | 26 | /* |
27 | * Helper function called from interrupt context to decide whether an | ||
28 | * operation should be tried again. | ||
29 | */ | ||
30 | static int __ccw_device_should_retry(struct scsw *scsw) | ||
31 | { | ||
32 | /* CC is only valid if start function bit is set. */ | ||
33 | if ((scsw->fctl & SCSW_FCTL_START_FUNC) && scsw->cc == 1) | ||
34 | return 1; | ||
35 | /* No more activity. For sense and set PGID we stubbornly try again. */ | ||
36 | if (!scsw->actl) | ||
37 | return 1; | ||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | /* | ||
27 | * Start Sense Path Group ID helper function. Used in ccw_device_recog | 42 | * Start Sense Path Group ID helper function. Used in ccw_device_recog |
28 | * and ccw_device_sense_pgid. | 43 | * and ccw_device_sense_pgid. |
29 | */ | 44 | */ |
@@ -33,12 +48,17 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) | |||
33 | struct subchannel *sch; | 48 | struct subchannel *sch; |
34 | struct ccw1 *ccw; | 49 | struct ccw1 *ccw; |
35 | int ret; | 50 | int ret; |
51 | int i; | ||
36 | 52 | ||
37 | sch = to_subchannel(cdev->dev.parent); | 53 | sch = to_subchannel(cdev->dev.parent); |
54 | /* Return if we already checked on all paths. */ | ||
55 | if (cdev->private->imask == 0) | ||
56 | return (sch->lpm == 0) ? -ENODEV : -EACCES; | ||
57 | i = 8 - ffs(cdev->private->imask); | ||
58 | |||
38 | /* Setup sense path group id channel program. */ | 59 | /* Setup sense path group id channel program. */ |
39 | ccw = cdev->private->iccws; | 60 | ccw = cdev->private->iccws; |
40 | ccw->cmd_code = CCW_CMD_SENSE_PGID; | 61 | ccw->cmd_code = CCW_CMD_SENSE_PGID; |
41 | ccw->cda = (__u32) __pa (&cdev->private->pgid); | ||
42 | ccw->count = sizeof (struct pgid); | 62 | ccw->count = sizeof (struct pgid); |
43 | ccw->flags = CCW_FLAG_SLI; | 63 | ccw->flags = CCW_FLAG_SLI; |
44 | 64 | ||
@@ -48,6 +68,7 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) | |||
48 | ret = -ENODEV; | 68 | ret = -ENODEV; |
49 | while (cdev->private->imask != 0) { | 69 | while (cdev->private->imask != 0) { |
50 | /* Try every path multiple times. */ | 70 | /* Try every path multiple times. */ |
71 | ccw->cda = (__u32) __pa (&cdev->private->pgid[i]); | ||
51 | if (cdev->private->iretry > 0) { | 72 | if (cdev->private->iretry > 0) { |
52 | cdev->private->iretry--; | 73 | cdev->private->iretry--; |
53 | ret = cio_start (sch, cdev->private->iccws, | 74 | ret = cio_start (sch, cdev->private->iccws, |
@@ -64,7 +85,9 @@ __ccw_device_sense_pgid_start(struct ccw_device *cdev) | |||
64 | } | 85 | } |
65 | cdev->private->imask >>= 1; | 86 | cdev->private->imask >>= 1; |
66 | cdev->private->iretry = 5; | 87 | cdev->private->iretry = 5; |
88 | i++; | ||
67 | } | 89 | } |
90 | |||
68 | return ret; | 91 | return ret; |
69 | } | 92 | } |
70 | 93 | ||
@@ -76,7 +99,7 @@ ccw_device_sense_pgid_start(struct ccw_device *cdev) | |||
76 | cdev->private->state = DEV_STATE_SENSE_PGID; | 99 | cdev->private->state = DEV_STATE_SENSE_PGID; |
77 | cdev->private->imask = 0x80; | 100 | cdev->private->imask = 0x80; |
78 | cdev->private->iretry = 5; | 101 | cdev->private->iretry = 5; |
79 | memset (&cdev->private->pgid, 0, sizeof (struct pgid)); | 102 | memset (&cdev->private->pgid, 0, sizeof (cdev->private->pgid)); |
80 | ret = __ccw_device_sense_pgid_start(cdev); | 103 | ret = __ccw_device_sense_pgid_start(cdev); |
81 | if (ret && ret != -EBUSY) | 104 | if (ret && ret != -EBUSY) |
82 | ccw_device_sense_pgid_done(cdev, ret); | 105 | ccw_device_sense_pgid_done(cdev, ret); |
@@ -91,6 +114,7 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) | |||
91 | { | 114 | { |
92 | struct subchannel *sch; | 115 | struct subchannel *sch; |
93 | struct irb *irb; | 116 | struct irb *irb; |
117 | int i; | ||
94 | 118 | ||
95 | sch = to_subchannel(cdev->dev.parent); | 119 | sch = to_subchannel(cdev->dev.parent); |
96 | irb = &cdev->private->irb; | 120 | irb = &cdev->private->irb; |
@@ -124,7 +148,8 @@ __ccw_device_check_sense_pgid(struct ccw_device *cdev) | |||
124 | sch->schid.sch_no, sch->orb.lpm); | 148 | sch->schid.sch_no, sch->orb.lpm); |
125 | return -EACCES; | 149 | return -EACCES; |
126 | } | 150 | } |
127 | if (cdev->private->pgid.inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { | 151 | i = 8 - ffs(cdev->private->imask); |
152 | if (cdev->private->pgid[i].inf.ps.state2 == SNID_STATE2_RESVD_ELSE) { | ||
128 | CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x " | 153 | CIO_MSG_EVENT(2, "SNID - Device %04x on Subchannel 0.%x.%04x " |
129 | "is reserved by someone else\n", | 154 | "is reserved by someone else\n", |
130 | cdev->private->devno, sch->schid.ssid, | 155 | cdev->private->devno, sch->schid.ssid, |
@@ -145,10 +170,10 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
145 | int ret; | 170 | int ret; |
146 | 171 | ||
147 | irb = (struct irb *) __LC_IRB; | 172 | irb = (struct irb *) __LC_IRB; |
148 | /* Retry sense pgid for cc=1. */ | 173 | |
149 | if (irb->scsw.stctl == | 174 | if (irb->scsw.stctl == |
150 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 175 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
151 | if (irb->scsw.cc == 1) { | 176 | if (__ccw_device_should_retry(&irb->scsw)) { |
152 | ret = __ccw_device_sense_pgid_start(cdev); | 177 | ret = __ccw_device_sense_pgid_start(cdev); |
153 | if (ret && ret != -EBUSY) | 178 | if (ret && ret != -EBUSY) |
154 | ccw_device_sense_pgid_done(cdev, ret); | 179 | ccw_device_sense_pgid_done(cdev, ret); |
@@ -162,12 +187,6 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
162 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 187 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
163 | switch (ret) { | 188 | switch (ret) { |
164 | /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ | 189 | /* 0, -ETIME, -EOPNOTSUPP, -EAGAIN, -EACCES or -EUSERS */ |
165 | case 0: /* Sense Path Group ID successful. */ | ||
166 | if (cdev->private->pgid.inf.ps.state1 == SNID_STATE1_RESET) | ||
167 | memcpy(&cdev->private->pgid, &css[0]->global_pgid, | ||
168 | sizeof(struct pgid)); | ||
169 | ccw_device_sense_pgid_done(cdev, 0); | ||
170 | break; | ||
171 | case -EOPNOTSUPP: /* Sense Path Group ID not supported */ | 190 | case -EOPNOTSUPP: /* Sense Path Group ID not supported */ |
172 | ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP); | 191 | ccw_device_sense_pgid_done(cdev, -EOPNOTSUPP); |
173 | break; | 192 | break; |
@@ -176,13 +195,15 @@ ccw_device_sense_pgid_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
176 | break; | 195 | break; |
177 | case -EACCES: /* channel is not operational. */ | 196 | case -EACCES: /* channel is not operational. */ |
178 | sch->lpm &= ~cdev->private->imask; | 197 | sch->lpm &= ~cdev->private->imask; |
198 | /* Fall through. */ | ||
199 | case 0: /* Sense Path Group ID successful. */ | ||
179 | cdev->private->imask >>= 1; | 200 | cdev->private->imask >>= 1; |
180 | cdev->private->iretry = 5; | 201 | cdev->private->iretry = 5; |
181 | /* Fall through. */ | 202 | /* Fall through. */ |
182 | case -EAGAIN: /* Try again. */ | 203 | case -EAGAIN: /* Try again. */ |
183 | ret = __ccw_device_sense_pgid_start(cdev); | 204 | ret = __ccw_device_sense_pgid_start(cdev); |
184 | if (ret != 0 && ret != -EBUSY) | 205 | if (ret != 0 && ret != -EBUSY) |
185 | ccw_device_sense_pgid_done(cdev, -ENODEV); | 206 | ccw_device_sense_pgid_done(cdev, ret); |
186 | break; | 207 | break; |
187 | case -EUSERS: /* device is reserved for someone else. */ | 208 | case -EUSERS: /* device is reserved for someone else. */ |
188 | ccw_device_sense_pgid_done(cdev, -EUSERS); | 209 | ccw_device_sense_pgid_done(cdev, -EUSERS); |
@@ -203,20 +224,20 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) | |||
203 | sch = to_subchannel(cdev->dev.parent); | 224 | sch = to_subchannel(cdev->dev.parent); |
204 | 225 | ||
205 | /* Setup sense path group id channel program. */ | 226 | /* Setup sense path group id channel program. */ |
206 | cdev->private->pgid.inf.fc = func; | 227 | cdev->private->pgid[0].inf.fc = func; |
207 | ccw = cdev->private->iccws; | 228 | ccw = cdev->private->iccws; |
208 | if (!cdev->private->flags.pgid_single) { | 229 | if (!cdev->private->flags.pgid_single) { |
209 | cdev->private->pgid.inf.fc |= SPID_FUNC_MULTI_PATH; | 230 | cdev->private->pgid[0].inf.fc |= SPID_FUNC_MULTI_PATH; |
210 | ccw->cmd_code = CCW_CMD_SUSPEND_RECONN; | 231 | ccw->cmd_code = CCW_CMD_SUSPEND_RECONN; |
211 | ccw->cda = 0; | 232 | ccw->cda = 0; |
212 | ccw->count = 0; | 233 | ccw->count = 0; |
213 | ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC; | 234 | ccw->flags = CCW_FLAG_SLI | CCW_FLAG_CC; |
214 | ccw++; | 235 | ccw++; |
215 | } else | 236 | } else |
216 | cdev->private->pgid.inf.fc |= SPID_FUNC_SINGLE_PATH; | 237 | cdev->private->pgid[0].inf.fc |= SPID_FUNC_SINGLE_PATH; |
217 | 238 | ||
218 | ccw->cmd_code = CCW_CMD_SET_PGID; | 239 | ccw->cmd_code = CCW_CMD_SET_PGID; |
219 | ccw->cda = (__u32) __pa (&cdev->private->pgid); | 240 | ccw->cda = (__u32) __pa (&cdev->private->pgid[0]); |
220 | ccw->count = sizeof (struct pgid); | 241 | ccw->count = sizeof (struct pgid); |
221 | ccw->flags = CCW_FLAG_SLI; | 242 | ccw->flags = CCW_FLAG_SLI; |
222 | 243 | ||
@@ -244,6 +265,48 @@ __ccw_device_do_pgid(struct ccw_device *cdev, __u8 func) | |||
244 | } | 265 | } |
245 | 266 | ||
246 | /* | 267 | /* |
268 | * Helper function to send a nop ccw down a path. | ||
269 | */ | ||
270 | static int __ccw_device_do_nop(struct ccw_device *cdev) | ||
271 | { | ||
272 | struct subchannel *sch; | ||
273 | struct ccw1 *ccw; | ||
274 | int ret; | ||
275 | |||
276 | sch = to_subchannel(cdev->dev.parent); | ||
277 | |||
278 | /* Setup nop channel program. */ | ||
279 | ccw = cdev->private->iccws; | ||
280 | ccw->cmd_code = CCW_CMD_NOOP; | ||
281 | ccw->cda = 0; | ||
282 | ccw->count = 0; | ||
283 | ccw->flags = CCW_FLAG_SLI; | ||
284 | |||
285 | /* Reset device status. */ | ||
286 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | ||
287 | |||
288 | /* Try multiple times. */ | ||
289 | ret = -ENODEV; | ||
290 | if (cdev->private->iretry > 0) { | ||
291 | cdev->private->iretry--; | ||
292 | ret = cio_start (sch, cdev->private->iccws, | ||
293 | cdev->private->imask); | ||
294 | /* ret is 0, -EBUSY, -EACCES or -ENODEV */ | ||
295 | if ((ret != -EACCES) && (ret != -ENODEV)) | ||
296 | return ret; | ||
297 | } | ||
298 | /* nop command failed on this path. Switch it off. */ | ||
299 | sch->lpm &= ~cdev->private->imask; | ||
300 | sch->vpm &= ~cdev->private->imask; | ||
301 | CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel " | ||
302 | "0.%x.%04x, lpm %02X, became 'not operational'\n", | ||
303 | cdev->private->devno, sch->schid.ssid, | ||
304 | sch->schid.sch_no, cdev->private->imask); | ||
305 | return ret; | ||
306 | } | ||
307 | |||
308 | |||
309 | /* | ||
247 | * Called from interrupt context to check if a valid answer | 310 | * Called from interrupt context to check if a valid answer |
248 | * to Set Path Group ID was received. | 311 | * to Set Path Group ID was received. |
249 | */ | 312 | */ |
@@ -282,6 +345,29 @@ __ccw_device_check_pgid(struct ccw_device *cdev) | |||
282 | return 0; | 345 | return 0; |
283 | } | 346 | } |
284 | 347 | ||
348 | /* | ||
349 | * Called from interrupt context to check the path status after a nop has | ||
350 | * been send. | ||
351 | */ | ||
352 | static int __ccw_device_check_nop(struct ccw_device *cdev) | ||
353 | { | ||
354 | struct subchannel *sch; | ||
355 | struct irb *irb; | ||
356 | |||
357 | sch = to_subchannel(cdev->dev.parent); | ||
358 | irb = &cdev->private->irb; | ||
359 | if (irb->scsw.fctl & (SCSW_FCTL_HALT_FUNC | SCSW_FCTL_CLEAR_FUNC)) | ||
360 | return -ETIME; | ||
361 | if (irb->scsw.cc == 3) { | ||
362 | CIO_MSG_EVENT(2, "NOP - Device %04x on Subchannel 0.%x.%04x," | ||
363 | " lpm %02X, became 'not operational'\n", | ||
364 | cdev->private->devno, sch->schid.ssid, | ||
365 | sch->schid.sch_no, cdev->private->imask); | ||
366 | return -EACCES; | ||
367 | } | ||
368 | return 0; | ||
369 | } | ||
370 | |||
285 | static void | 371 | static void |
286 | __ccw_device_verify_start(struct ccw_device *cdev) | 372 | __ccw_device_verify_start(struct ccw_device *cdev) |
287 | { | 373 | { |
@@ -296,9 +382,12 @@ __ccw_device_verify_start(struct ccw_device *cdev) | |||
296 | if ((sch->vpm & imask) != (sch->lpm & imask)) | 382 | if ((sch->vpm & imask) != (sch->lpm & imask)) |
297 | break; | 383 | break; |
298 | cdev->private->imask = imask; | 384 | cdev->private->imask = imask; |
299 | func = (sch->vpm & imask) ? | 385 | if (cdev->private->options.pgroup) { |
300 | SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; | 386 | func = (sch->vpm & imask) ? |
301 | ret = __ccw_device_do_pgid(cdev, func); | 387 | SPID_FUNC_RESIGN : SPID_FUNC_ESTABLISH; |
388 | ret = __ccw_device_do_pgid(cdev, func); | ||
389 | } else | ||
390 | ret = __ccw_device_do_nop(cdev); | ||
302 | if (ret == 0 || ret == -EBUSY) | 391 | if (ret == 0 || ret == -EBUSY) |
303 | return; | 392 | return; |
304 | cdev->private->iretry = 5; | 393 | cdev->private->iretry = 5; |
@@ -317,17 +406,20 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
317 | int ret; | 406 | int ret; |
318 | 407 | ||
319 | irb = (struct irb *) __LC_IRB; | 408 | irb = (struct irb *) __LC_IRB; |
320 | /* Retry set pgid for cc=1. */ | 409 | |
321 | if (irb->scsw.stctl == | 410 | if (irb->scsw.stctl == |
322 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 411 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
323 | if (irb->scsw.cc == 1) | 412 | if (__ccw_device_should_retry(&irb->scsw)) |
324 | __ccw_device_verify_start(cdev); | 413 | __ccw_device_verify_start(cdev); |
325 | return; | 414 | return; |
326 | } | 415 | } |
327 | if (ccw_device_accumulate_and_sense(cdev, irb) != 0) | 416 | if (ccw_device_accumulate_and_sense(cdev, irb) != 0) |
328 | return; | 417 | return; |
329 | sch = to_subchannel(cdev->dev.parent); | 418 | sch = to_subchannel(cdev->dev.parent); |
330 | ret = __ccw_device_check_pgid(cdev); | 419 | if (cdev->private->options.pgroup) |
420 | ret = __ccw_device_check_pgid(cdev); | ||
421 | else | ||
422 | ret = __ccw_device_check_nop(cdev); | ||
331 | memset(&cdev->private->irb, 0, sizeof(struct irb)); | 423 | memset(&cdev->private->irb, 0, sizeof(struct irb)); |
332 | switch (ret) { | 424 | switch (ret) { |
333 | /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ | 425 | /* 0, -ETIME, -EAGAIN, -EOPNOTSUPP or -EACCES */ |
@@ -345,11 +437,10 @@ ccw_device_verify_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
345 | * One of those strange devices which claim to be able | 437 | * One of those strange devices which claim to be able |
346 | * to do multipathing but not for Set Path Group ID. | 438 | * to do multipathing but not for Set Path Group ID. |
347 | */ | 439 | */ |
348 | if (cdev->private->flags.pgid_single) { | 440 | if (cdev->private->flags.pgid_single) |
349 | ccw_device_verify_done(cdev, -EOPNOTSUPP); | 441 | cdev->private->options.pgroup = 0; |
350 | break; | 442 | else |
351 | } | 443 | cdev->private->flags.pgid_single = 1; |
352 | cdev->private->flags.pgid_single = 1; | ||
353 | /* fall through. */ | 444 | /* fall through. */ |
354 | case -EAGAIN: /* Try again. */ | 445 | case -EAGAIN: /* Try again. */ |
355 | __ccw_device_verify_start(cdev); | 446 | __ccw_device_verify_start(cdev); |
@@ -418,10 +509,10 @@ ccw_device_disband_irq(struct ccw_device *cdev, enum dev_event dev_event) | |||
418 | int ret; | 509 | int ret; |
419 | 510 | ||
420 | irb = (struct irb *) __LC_IRB; | 511 | irb = (struct irb *) __LC_IRB; |
421 | /* Retry set pgid for cc=1. */ | 512 | |
422 | if (irb->scsw.stctl == | 513 | if (irb->scsw.stctl == |
423 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { | 514 | (SCSW_STCTL_STATUS_PEND | SCSW_STCTL_ALERT_STATUS)) { |
424 | if (irb->scsw.cc == 1) | 515 | if (__ccw_device_should_retry(&irb->scsw)) |
425 | __ccw_device_disband_start(cdev); | 516 | __ccw_device_disband_start(cdev); |
426 | return; | 517 | return; |
427 | } | 518 | } |
diff --git a/drivers/s390/cio/device_status.c b/drivers/s390/cio/device_status.c index 14bef2c179bf..caf148d5caad 100644 --- a/drivers/s390/cio/device_status.c +++ b/drivers/s390/cio/device_status.c | |||
@@ -67,8 +67,7 @@ ccw_device_path_notoper(struct ccw_device *cdev) | |||
67 | sch->schib.pmcw.pnom); | 67 | sch->schib.pmcw.pnom); |
68 | 68 | ||
69 | sch->lpm &= ~sch->schib.pmcw.pnom; | 69 | sch->lpm &= ~sch->schib.pmcw.pnom; |
70 | if (cdev->private->options.pgroup) | 70 | cdev->private->flags.doverify = 1; |
71 | cdev->private->flags.doverify = 1; | ||
72 | } | 71 | } |
73 | 72 | ||
74 | /* | 73 | /* |
@@ -180,7 +179,7 @@ ccw_device_accumulate_esw(struct ccw_device *cdev, struct irb *irb) | |||
180 | cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; | 179 | cdev_irb->esw.esw0.erw.auth = irb->esw.esw0.erw.auth; |
181 | /* Copy path verification required flag. */ | 180 | /* Copy path verification required flag. */ |
182 | cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; | 181 | cdev_irb->esw.esw0.erw.pvrf = irb->esw.esw0.erw.pvrf; |
183 | if (irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) | 182 | if (irb->esw.esw0.erw.pvrf) |
184 | cdev->private->flags.doverify = 1; | 183 | cdev->private->flags.doverify = 1; |
185 | /* Copy concurrent sense bit. */ | 184 | /* Copy concurrent sense bit. */ |
186 | cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; | 185 | cdev_irb->esw.esw0.erw.cons = irb->esw.esw0.erw.cons; |
@@ -354,7 +353,7 @@ ccw_device_accumulate_basic_sense(struct ccw_device *cdev, struct irb *irb) | |||
354 | } | 353 | } |
355 | /* Check if path verification is required. */ | 354 | /* Check if path verification is required. */ |
356 | if (ccw_device_accumulate_esw_valid(irb) && | 355 | if (ccw_device_accumulate_esw_valid(irb) && |
357 | irb->esw.esw0.erw.pvrf && cdev->private->options.pgroup) | 356 | irb->esw.esw0.erw.pvrf) |
358 | cdev->private->flags.doverify = 1; | 357 | cdev->private->flags.doverify = 1; |
359 | } | 358 | } |
360 | 359 | ||
diff --git a/drivers/s390/cio/qdio.c b/drivers/s390/cio/qdio.c index b70039af70d6..7c93a8798d23 100644 --- a/drivers/s390/cio/qdio.c +++ b/drivers/s390/cio/qdio.c | |||
@@ -2735,7 +2735,7 @@ qdio_free(struct ccw_device *cdev) | |||
2735 | QDIO_DBF_TEXT1(0,trace,dbf_text); | 2735 | QDIO_DBF_TEXT1(0,trace,dbf_text); |
2736 | QDIO_DBF_TEXT0(0,setup,dbf_text); | 2736 | QDIO_DBF_TEXT0(0,setup,dbf_text); |
2737 | 2737 | ||
2738 | cdev->private->qdio_data = 0; | 2738 | cdev->private->qdio_data = NULL; |
2739 | 2739 | ||
2740 | up(&irq_ptr->setting_up_sema); | 2740 | up(&irq_ptr->setting_up_sema); |
2741 | 2741 | ||
diff --git a/drivers/s390/net/Kconfig b/drivers/s390/net/Kconfig index 548854754921..1a93fa684e9f 100644 --- a/drivers/s390/net/Kconfig +++ b/drivers/s390/net/Kconfig | |||
@@ -92,15 +92,6 @@ config QETH_VLAN | |||
92 | If CONFIG_QETH is switched on, this option will include IEEE | 92 | If CONFIG_QETH is switched on, this option will include IEEE |
93 | 802.1q VLAN support in the qeth device driver. | 93 | 802.1q VLAN support in the qeth device driver. |
94 | 94 | ||
95 | config QETH_PERF_STATS | ||
96 | bool "Performance statistics in /proc" | ||
97 | depends on QETH | ||
98 | help | ||
99 | When switched on, this option will add a file in the proc-fs | ||
100 | (/proc/qeth_perf_stats) containing performance statistics. It | ||
101 | may slightly impact performance, so this is only recommended for | ||
102 | internal tuning of the device driver. | ||
103 | |||
104 | config CCWGROUP | 95 | config CCWGROUP |
105 | tristate | 96 | tristate |
106 | default (LCS || CTC || QETH) | 97 | default (LCS || CTC || QETH) |
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile index 6775a837d646..4777e36a922f 100644 --- a/drivers/s390/net/Makefile +++ b/drivers/s390/net/Makefile | |||
@@ -10,7 +10,6 @@ obj-$(CONFIG_SMSGIUCV) += smsgiucv.o | |||
10 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o | 10 | obj-$(CONFIG_CTC) += ctc.o fsm.o cu3088.o |
11 | obj-$(CONFIG_LCS) += lcs.o cu3088.o | 11 | obj-$(CONFIG_LCS) += lcs.o cu3088.o |
12 | obj-$(CONFIG_CLAW) += claw.o cu3088.o | 12 | obj-$(CONFIG_CLAW) += claw.o cu3088.o |
13 | obj-$(CONFIG_MPC) += ctcmpc.o fsm.o cu3088.o | ||
14 | qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o | 13 | qeth-y := qeth_main.o qeth_mpc.o qeth_sys.o qeth_eddp.o |
15 | qeth-$(CONFIG_PROC_FS) += qeth_proc.o | 14 | qeth-$(CONFIG_PROC_FS) += qeth_proc.o |
16 | obj-$(CONFIG_QETH) += qeth.o | 15 | obj-$(CONFIG_QETH) += qeth.o |
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c index 23d53bf9daf1..95f4e105cb96 100644 --- a/drivers/s390/net/claw.c +++ b/drivers/s390/net/claw.c | |||
@@ -529,7 +529,7 @@ claw_open(struct net_device *dev) | |||
529 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); | 529 | printk(KERN_INFO "%s:%s Enter \n",dev->name,__FUNCTION__); |
530 | #endif | 530 | #endif |
531 | CLAW_DBF_TEXT(4,trace,"open"); | 531 | CLAW_DBF_TEXT(4,trace,"open"); |
532 | if (!dev | (dev->name[0] == 0x00)) { | 532 | if (!dev || (dev->name[0] == 0x00)) { |
533 | CLAW_DBF_TEXT(2,trace,"BadDev"); | 533 | CLAW_DBF_TEXT(2,trace,"BadDev"); |
534 | printk(KERN_WARNING "claw: Bad device at open failing \n"); | 534 | printk(KERN_WARNING "claw: Bad device at open failing \n"); |
535 | return -ENODEV; | 535 | return -ENODEV; |
diff --git a/drivers/s390/net/ctcmain.c b/drivers/s390/net/ctcmain.c index 20c8eb16f464..3257c22dd79c 100644 --- a/drivers/s390/net/ctcmain.c +++ b/drivers/s390/net/ctcmain.c | |||
@@ -1714,6 +1714,9 @@ add_channel(struct ccw_device *cdev, enum channel_types type) | |||
1714 | kfree(ch); | 1714 | kfree(ch); |
1715 | return 0; | 1715 | return 0; |
1716 | } | 1716 | } |
1717 | |||
1718 | spin_lock_init(&ch->collect_lock); | ||
1719 | |||
1717 | fsm_settimer(ch->fsm, &ch->timer); | 1720 | fsm_settimer(ch->fsm, &ch->timer); |
1718 | skb_queue_head_init(&ch->io_queue); | 1721 | skb_queue_head_init(&ch->io_queue); |
1719 | skb_queue_head_init(&ch->collect_queue); | 1722 | skb_queue_head_init(&ch->collect_queue); |
@@ -2686,9 +2689,17 @@ static struct attribute_group ctc_attr_group = { | |||
2686 | static int | 2689 | static int |
2687 | ctc_add_attributes(struct device *dev) | 2690 | ctc_add_attributes(struct device *dev) |
2688 | { | 2691 | { |
2689 | device_create_file(dev, &dev_attr_loglevel); | 2692 | int rc; |
2690 | device_create_file(dev, &dev_attr_stats); | 2693 | |
2691 | return 0; | 2694 | rc = device_create_file(dev, &dev_attr_loglevel); |
2695 | if (rc) | ||
2696 | goto out; | ||
2697 | rc = device_create_file(dev, &dev_attr_stats); | ||
2698 | if (!rc) | ||
2699 | goto out; | ||
2700 | device_remove_file(dev, &dev_attr_loglevel); | ||
2701 | out: | ||
2702 | return rc; | ||
2692 | } | 2703 | } |
2693 | 2704 | ||
2694 | static void | 2705 | static void |
@@ -2901,7 +2912,12 @@ ctc_new_device(struct ccwgroup_device *cgdev) | |||
2901 | goto out; | 2912 | goto out; |
2902 | } | 2913 | } |
2903 | 2914 | ||
2904 | ctc_add_attributes(&cgdev->dev); | 2915 | if (ctc_add_attributes(&cgdev->dev)) { |
2916 | ctc_netdev_unregister(dev); | ||
2917 | dev->priv = NULL; | ||
2918 | ctc_free_netdevice(dev, 1); | ||
2919 | goto out; | ||
2920 | } | ||
2905 | 2921 | ||
2906 | strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); | 2922 | strlcpy(privptr->fsm->name, dev->name, sizeof (privptr->fsm->name)); |
2907 | 2923 | ||
diff --git a/drivers/s390/net/iucv.c b/drivers/s390/net/iucv.c index 189a49275433..821dde86e240 100644 --- a/drivers/s390/net/iucv.c +++ b/drivers/s390/net/iucv.c | |||
@@ -335,8 +335,8 @@ do { \ | |||
335 | 335 | ||
336 | #else | 336 | #else |
337 | 337 | ||
338 | #define iucv_debug(lvl, fmt, args...) | 338 | #define iucv_debug(lvl, fmt, args...) do { } while (0) |
339 | #define iucv_dumpit(title, buf, len) | 339 | #define iucv_dumpit(title, buf, len) do { } while (0) |
340 | 340 | ||
341 | #endif | 341 | #endif |
342 | 342 | ||
@@ -692,7 +692,7 @@ iucv_retrieve_buffer (void) | |||
692 | iucv_debug(1, "entering"); | 692 | iucv_debug(1, "entering"); |
693 | if (iucv_cpuid != -1) { | 693 | if (iucv_cpuid != -1) { |
694 | smp_call_function_on(iucv_retrieve_buffer_cpuid, | 694 | smp_call_function_on(iucv_retrieve_buffer_cpuid, |
695 | 0, 0, 1, iucv_cpuid); | 695 | NULL, 0, 1, iucv_cpuid); |
696 | /* Release the cpu reserved by iucv_declare_buffer. */ | 696 | /* Release the cpu reserved by iucv_declare_buffer. */ |
697 | smp_put_cpu(iucv_cpuid); | 697 | smp_put_cpu(iucv_cpuid); |
698 | iucv_cpuid = -1; | 698 | iucv_cpuid = -1; |
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c index 2eded55ae88d..16ac68c27a27 100644 --- a/drivers/s390/net/lcs.c +++ b/drivers/s390/net/lcs.c | |||
@@ -670,9 +670,8 @@ lcs_ready_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) | |||
670 | int index, rc; | 670 | int index, rc; |
671 | 671 | ||
672 | LCS_DBF_TEXT(5, trace, "rdybuff"); | 672 | LCS_DBF_TEXT(5, trace, "rdybuff"); |
673 | if (buffer->state != BUF_STATE_LOCKED && | 673 | BUG_ON(buffer->state != BUF_STATE_LOCKED && |
674 | buffer->state != BUF_STATE_PROCESSED) | 674 | buffer->state != BUF_STATE_PROCESSED); |
675 | BUG(); | ||
676 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 675 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
677 | buffer->state = BUF_STATE_READY; | 676 | buffer->state = BUF_STATE_READY; |
678 | index = buffer - channel->iob; | 677 | index = buffer - channel->iob; |
@@ -696,8 +695,7 @@ __lcs_processed_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) | |||
696 | int index, prev, next; | 695 | int index, prev, next; |
697 | 696 | ||
698 | LCS_DBF_TEXT(5, trace, "prcsbuff"); | 697 | LCS_DBF_TEXT(5, trace, "prcsbuff"); |
699 | if (buffer->state != BUF_STATE_READY) | 698 | BUG_ON(buffer->state != BUF_STATE_READY); |
700 | BUG(); | ||
701 | buffer->state = BUF_STATE_PROCESSED; | 699 | buffer->state = BUF_STATE_PROCESSED; |
702 | index = buffer - channel->iob; | 700 | index = buffer - channel->iob; |
703 | prev = (index - 1) & (LCS_NUM_BUFFS - 1); | 701 | prev = (index - 1) & (LCS_NUM_BUFFS - 1); |
@@ -729,9 +727,8 @@ lcs_release_buffer(struct lcs_channel *channel, struct lcs_buffer *buffer) | |||
729 | unsigned long flags; | 727 | unsigned long flags; |
730 | 728 | ||
731 | LCS_DBF_TEXT(5, trace, "relbuff"); | 729 | LCS_DBF_TEXT(5, trace, "relbuff"); |
732 | if (buffer->state != BUF_STATE_LOCKED && | 730 | BUG_ON(buffer->state != BUF_STATE_LOCKED && |
733 | buffer->state != BUF_STATE_PROCESSED) | 731 | buffer->state != BUF_STATE_PROCESSED); |
734 | BUG(); | ||
735 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); | 732 | spin_lock_irqsave(get_ccwdev_lock(channel->ccwdev), flags); |
736 | buffer->state = BUF_STATE_EMPTY; | 733 | buffer->state = BUF_STATE_EMPTY; |
737 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); | 734 | spin_unlock_irqrestore(get_ccwdev_lock(channel->ccwdev), flags); |
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c index b452cc1afd55..d7d1cc0a5c8e 100644 --- a/drivers/s390/net/netiucv.c +++ b/drivers/s390/net/netiucv.c | |||
@@ -112,7 +112,12 @@ struct iucv_connection { | |||
112 | /** | 112 | /** |
113 | * Linked list of all connection structs. | 113 | * Linked list of all connection structs. |
114 | */ | 114 | */ |
115 | static struct iucv_connection *iucv_connections; | 115 | struct iucv_connection_struct { |
116 | struct iucv_connection *iucv_connections; | ||
117 | rwlock_t iucv_rwlock; | ||
118 | }; | ||
119 | |||
120 | static struct iucv_connection_struct iucv_conns; | ||
116 | 121 | ||
117 | /** | 122 | /** |
118 | * Representation of event-data for the | 123 | * Representation of event-data for the |
@@ -1368,8 +1373,10 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1368 | struct net_device *ndev = priv->conn->netdev; | 1373 | struct net_device *ndev = priv->conn->netdev; |
1369 | char *p; | 1374 | char *p; |
1370 | char *tmp; | 1375 | char *tmp; |
1371 | char username[10]; | 1376 | char username[9]; |
1372 | int i; | 1377 | int i; |
1378 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | ||
1379 | unsigned long flags; | ||
1373 | 1380 | ||
1374 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1381 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1375 | if (count>9) { | 1382 | if (count>9) { |
@@ -1382,7 +1389,7 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1382 | tmp = strsep((char **) &buf, "\n"); | 1389 | tmp = strsep((char **) &buf, "\n"); |
1383 | for (i=0, p=tmp; i<8 && *p; i++, p++) { | 1390 | for (i=0, p=tmp; i<8 && *p; i++, p++) { |
1384 | if (isalnum(*p) || (*p == '$')) | 1391 | if (isalnum(*p) || (*p == '$')) |
1385 | username[i]= *p; | 1392 | username[i]= toupper(*p); |
1386 | else if (*p == '\n') { | 1393 | else if (*p == '\n') { |
1387 | /* trailing lf, grr */ | 1394 | /* trailing lf, grr */ |
1388 | break; | 1395 | break; |
@@ -1395,11 +1402,11 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1395 | return -EINVAL; | 1402 | return -EINVAL; |
1396 | } | 1403 | } |
1397 | } | 1404 | } |
1398 | while (i<9) | 1405 | while (i<8) |
1399 | username[i++] = ' '; | 1406 | username[i++] = ' '; |
1400 | username[9] = '\0'; | 1407 | username[8] = '\0'; |
1401 | 1408 | ||
1402 | if (memcmp(username, priv->conn->userid, 8)) { | 1409 | if (memcmp(username, priv->conn->userid, 9)) { |
1403 | /* username changed */ | 1410 | /* username changed */ |
1404 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 1411 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { |
1405 | PRINT_WARN( | 1412 | PRINT_WARN( |
@@ -1410,6 +1417,19 @@ user_write (struct device *dev, struct device_attribute *attr, const char *buf, | |||
1410 | return -EBUSY; | 1417 | return -EBUSY; |
1411 | } | 1418 | } |
1412 | } | 1419 | } |
1420 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | ||
1421 | while (*clist) { | ||
1422 | if (!strncmp(username, (*clist)->userid, 9) || | ||
1423 | ((*clist)->netdev != ndev)) | ||
1424 | break; | ||
1425 | clist = &((*clist)->next); | ||
1426 | } | ||
1427 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
1428 | if (*clist) { | ||
1429 | PRINT_WARN("netiucv: Connection to %s already exists\n", | ||
1430 | username); | ||
1431 | return -EEXIST; | ||
1432 | } | ||
1413 | memcpy(priv->conn->userid, username, 9); | 1433 | memcpy(priv->conn->userid, username, 9); |
1414 | 1434 | ||
1415 | return count; | 1435 | return count; |
@@ -1781,13 +1801,15 @@ netiucv_unregister_device(struct device *dev) | |||
1781 | static struct iucv_connection * | 1801 | static struct iucv_connection * |
1782 | netiucv_new_connection(struct net_device *dev, char *username) | 1802 | netiucv_new_connection(struct net_device *dev, char *username) |
1783 | { | 1803 | { |
1784 | struct iucv_connection **clist = &iucv_connections; | 1804 | unsigned long flags; |
1805 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | ||
1785 | struct iucv_connection *conn = | 1806 | struct iucv_connection *conn = |
1786 | kzalloc(sizeof(struct iucv_connection), GFP_KERNEL); | 1807 | kzalloc(sizeof(struct iucv_connection), GFP_KERNEL); |
1787 | 1808 | ||
1788 | if (conn) { | 1809 | if (conn) { |
1789 | skb_queue_head_init(&conn->collect_queue); | 1810 | skb_queue_head_init(&conn->collect_queue); |
1790 | skb_queue_head_init(&conn->commit_queue); | 1811 | skb_queue_head_init(&conn->commit_queue); |
1812 | spin_lock_init(&conn->collect_lock); | ||
1791 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; | 1813 | conn->max_buffsize = NETIUCV_BUFSIZE_DEFAULT; |
1792 | conn->netdev = dev; | 1814 | conn->netdev = dev; |
1793 | 1815 | ||
@@ -1822,8 +1844,10 @@ netiucv_new_connection(struct net_device *dev, char *username) | |||
1822 | fsm_newstate(conn->fsm, CONN_STATE_STOPPED); | 1844 | fsm_newstate(conn->fsm, CONN_STATE_STOPPED); |
1823 | } | 1845 | } |
1824 | 1846 | ||
1847 | write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | ||
1825 | conn->next = *clist; | 1848 | conn->next = *clist; |
1826 | *clist = conn; | 1849 | *clist = conn; |
1850 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
1827 | } | 1851 | } |
1828 | return conn; | 1852 | return conn; |
1829 | } | 1853 | } |
@@ -1835,14 +1859,17 @@ netiucv_new_connection(struct net_device *dev, char *username) | |||
1835 | static void | 1859 | static void |
1836 | netiucv_remove_connection(struct iucv_connection *conn) | 1860 | netiucv_remove_connection(struct iucv_connection *conn) |
1837 | { | 1861 | { |
1838 | struct iucv_connection **clist = &iucv_connections; | 1862 | struct iucv_connection **clist = &iucv_conns.iucv_connections; |
1863 | unsigned long flags; | ||
1839 | 1864 | ||
1840 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1865 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1841 | if (conn == NULL) | 1866 | if (conn == NULL) |
1842 | return; | 1867 | return; |
1868 | write_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | ||
1843 | while (*clist) { | 1869 | while (*clist) { |
1844 | if (*clist == conn) { | 1870 | if (*clist == conn) { |
1845 | *clist = conn->next; | 1871 | *clist = conn->next; |
1872 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
1846 | if (conn->handle) { | 1873 | if (conn->handle) { |
1847 | iucv_unregister_program(conn->handle); | 1874 | iucv_unregister_program(conn->handle); |
1848 | conn->handle = NULL; | 1875 | conn->handle = NULL; |
@@ -1855,6 +1882,7 @@ netiucv_remove_connection(struct iucv_connection *conn) | |||
1855 | } | 1882 | } |
1856 | clist = &((*clist)->next); | 1883 | clist = &((*clist)->next); |
1857 | } | 1884 | } |
1885 | write_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
1858 | } | 1886 | } |
1859 | 1887 | ||
1860 | /** | 1888 | /** |
@@ -1947,9 +1975,11 @@ static ssize_t | |||
1947 | conn_write(struct device_driver *drv, const char *buf, size_t count) | 1975 | conn_write(struct device_driver *drv, const char *buf, size_t count) |
1948 | { | 1976 | { |
1949 | char *p; | 1977 | char *p; |
1950 | char username[10]; | 1978 | char username[9]; |
1951 | int i, ret; | 1979 | int i, ret; |
1952 | struct net_device *dev; | 1980 | struct net_device *dev; |
1981 | struct iucv_connection **clist = &iucv_conns.iucv_connections; | ||
1982 | unsigned long flags; | ||
1953 | 1983 | ||
1954 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 1984 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
1955 | if (count>9) { | 1985 | if (count>9) { |
@@ -1960,7 +1990,7 @@ conn_write(struct device_driver *drv, const char *buf, size_t count) | |||
1960 | 1990 | ||
1961 | for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { | 1991 | for (i=0, p=(char *)buf; i<8 && *p; i++, p++) { |
1962 | if (isalnum(*p) || (*p == '$')) | 1992 | if (isalnum(*p) || (*p == '$')) |
1963 | username[i]= *p; | 1993 | username[i]= toupper(*p); |
1964 | else if (*p == '\n') { | 1994 | else if (*p == '\n') { |
1965 | /* trailing lf, grr */ | 1995 | /* trailing lf, grr */ |
1966 | break; | 1996 | break; |
@@ -1971,9 +2001,22 @@ conn_write(struct device_driver *drv, const char *buf, size_t count) | |||
1971 | return -EINVAL; | 2001 | return -EINVAL; |
1972 | } | 2002 | } |
1973 | } | 2003 | } |
1974 | while (i<9) | 2004 | while (i<8) |
1975 | username[i++] = ' '; | 2005 | username[i++] = ' '; |
1976 | username[9] = '\0'; | 2006 | username[8] = '\0'; |
2007 | |||
2008 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | ||
2009 | while (*clist) { | ||
2010 | if (!strncmp(username, (*clist)->userid, 9)) | ||
2011 | break; | ||
2012 | clist = &((*clist)->next); | ||
2013 | } | ||
2014 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
2015 | if (*clist) { | ||
2016 | PRINT_WARN("netiucv: Connection to %s already exists\n", | ||
2017 | username); | ||
2018 | return -EEXIST; | ||
2019 | } | ||
1977 | dev = netiucv_init_netdevice(username); | 2020 | dev = netiucv_init_netdevice(username); |
1978 | if (!dev) { | 2021 | if (!dev) { |
1979 | PRINT_WARN( | 2022 | PRINT_WARN( |
@@ -2015,7 +2058,8 @@ DRIVER_ATTR(connection, 0200, NULL, conn_write); | |||
2015 | static ssize_t | 2058 | static ssize_t |
2016 | remove_write (struct device_driver *drv, const char *buf, size_t count) | 2059 | remove_write (struct device_driver *drv, const char *buf, size_t count) |
2017 | { | 2060 | { |
2018 | struct iucv_connection **clist = &iucv_connections; | 2061 | struct iucv_connection **clist = &iucv_conns.iucv_connections; |
2062 | unsigned long flags; | ||
2019 | struct net_device *ndev; | 2063 | struct net_device *ndev; |
2020 | struct netiucv_priv *priv; | 2064 | struct netiucv_priv *priv; |
2021 | struct device *dev; | 2065 | struct device *dev; |
@@ -2026,10 +2070,10 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2026 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2070 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
2027 | 2071 | ||
2028 | if (count >= IFNAMSIZ) | 2072 | if (count >= IFNAMSIZ) |
2029 | count = IFNAMSIZ-1; | 2073 | count = IFNAMSIZ - 1;; |
2030 | 2074 | ||
2031 | for (i=0, p=(char *)buf; i<count && *p; i++, p++) { | 2075 | for (i=0, p=(char *)buf; i<count && *p; i++, p++) { |
2032 | if ((*p == '\n') | (*p == ' ')) { | 2076 | if ((*p == '\n') || (*p == ' ')) { |
2033 | /* trailing lf, grr */ | 2077 | /* trailing lf, grr */ |
2034 | break; | 2078 | break; |
2035 | } else { | 2079 | } else { |
@@ -2038,6 +2082,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2038 | } | 2082 | } |
2039 | name[i] = '\0'; | 2083 | name[i] = '\0'; |
2040 | 2084 | ||
2085 | read_lock_irqsave(&iucv_conns.iucv_rwlock, flags); | ||
2041 | while (*clist) { | 2086 | while (*clist) { |
2042 | ndev = (*clist)->netdev; | 2087 | ndev = (*clist)->netdev; |
2043 | priv = (struct netiucv_priv*)ndev->priv; | 2088 | priv = (struct netiucv_priv*)ndev->priv; |
@@ -2047,6 +2092,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2047 | clist = &((*clist)->next); | 2092 | clist = &((*clist)->next); |
2048 | continue; | 2093 | continue; |
2049 | } | 2094 | } |
2095 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
2050 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { | 2096 | if (ndev->flags & (IFF_UP | IFF_RUNNING)) { |
2051 | PRINT_WARN( | 2097 | PRINT_WARN( |
2052 | "netiucv: net device %s active with peer %s\n", | 2098 | "netiucv: net device %s active with peer %s\n", |
@@ -2060,6 +2106,7 @@ remove_write (struct device_driver *drv, const char *buf, size_t count) | |||
2060 | netiucv_unregister_device(dev); | 2106 | netiucv_unregister_device(dev); |
2061 | return count; | 2107 | return count; |
2062 | } | 2108 | } |
2109 | read_unlock_irqrestore(&iucv_conns.iucv_rwlock, flags); | ||
2063 | PRINT_WARN("netiucv: net device %s unknown\n", name); | 2110 | PRINT_WARN("netiucv: net device %s unknown\n", name); |
2064 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); | 2111 | IUCV_DBF_TEXT(data, 2, "remove_write: unknown device\n"); |
2065 | return -EINVAL; | 2112 | return -EINVAL; |
@@ -2077,8 +2124,8 @@ static void __exit | |||
2077 | netiucv_exit(void) | 2124 | netiucv_exit(void) |
2078 | { | 2125 | { |
2079 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); | 2126 | IUCV_DBF_TEXT(trace, 3, __FUNCTION__); |
2080 | while (iucv_connections) { | 2127 | while (iucv_conns.iucv_connections) { |
2081 | struct net_device *ndev = iucv_connections->netdev; | 2128 | struct net_device *ndev = iucv_conns.iucv_connections->netdev; |
2082 | struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; | 2129 | struct netiucv_priv *priv = (struct netiucv_priv*)ndev->priv; |
2083 | struct device *dev = priv->dev; | 2130 | struct device *dev = priv->dev; |
2084 | 2131 | ||
@@ -2120,6 +2167,7 @@ netiucv_init(void) | |||
2120 | if (!ret) { | 2167 | if (!ret) { |
2121 | ret = driver_create_file(&netiucv_driver, &driver_attr_remove); | 2168 | ret = driver_create_file(&netiucv_driver, &driver_attr_remove); |
2122 | netiucv_banner(); | 2169 | netiucv_banner(); |
2170 | rwlock_init(&iucv_conns.iucv_rwlock); | ||
2123 | } else { | 2171 | } else { |
2124 | PRINT_ERR("NETIUCV: failed to add driver attribute.\n"); | 2172 | PRINT_ERR("NETIUCV: failed to add driver attribute.\n"); |
2125 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret); | 2173 | IUCV_DBF_TEXT_(setup, 2, "ret %d from driver_create_file\n", ret); |
diff --git a/drivers/s390/net/qeth.h b/drivers/s390/net/qeth.h index 619f4a0c7160..821383d8cbe7 100644 --- a/drivers/s390/net/qeth.h +++ b/drivers/s390/net/qeth.h | |||
@@ -176,7 +176,6 @@ extern struct ccwgroup_driver qeth_ccwgroup_driver; | |||
176 | /** | 176 | /** |
177 | * card stuff | 177 | * card stuff |
178 | */ | 178 | */ |
179 | #ifdef CONFIG_QETH_PERF_STATS | ||
180 | struct qeth_perf_stats { | 179 | struct qeth_perf_stats { |
181 | unsigned int bufs_rec; | 180 | unsigned int bufs_rec; |
182 | unsigned int bufs_sent; | 181 | unsigned int bufs_sent; |
@@ -211,8 +210,10 @@ struct qeth_perf_stats { | |||
211 | unsigned int large_send_cnt; | 210 | unsigned int large_send_cnt; |
212 | unsigned int sg_skbs_sent; | 211 | unsigned int sg_skbs_sent; |
213 | unsigned int sg_frags_sent; | 212 | unsigned int sg_frags_sent; |
213 | /* initial values when measuring starts */ | ||
214 | unsigned long initial_rx_packets; | ||
215 | unsigned long initial_tx_packets; | ||
214 | }; | 216 | }; |
215 | #endif /* CONFIG_QETH_PERF_STATS */ | ||
216 | 217 | ||
217 | /* Routing stuff */ | 218 | /* Routing stuff */ |
218 | struct qeth_routing_info { | 219 | struct qeth_routing_info { |
@@ -462,6 +463,7 @@ enum qeth_qdio_info_states { | |||
462 | QETH_QDIO_UNINITIALIZED, | 463 | QETH_QDIO_UNINITIALIZED, |
463 | QETH_QDIO_ALLOCATED, | 464 | QETH_QDIO_ALLOCATED, |
464 | QETH_QDIO_ESTABLISHED, | 465 | QETH_QDIO_ESTABLISHED, |
466 | QETH_QDIO_CLEANING | ||
465 | }; | 467 | }; |
466 | 468 | ||
467 | struct qeth_buffer_pool_entry { | 469 | struct qeth_buffer_pool_entry { |
@@ -536,7 +538,7 @@ struct qeth_qdio_out_q { | |||
536 | } __attribute__ ((aligned(256))); | 538 | } __attribute__ ((aligned(256))); |
537 | 539 | ||
538 | struct qeth_qdio_info { | 540 | struct qeth_qdio_info { |
539 | volatile enum qeth_qdio_info_states state; | 541 | atomic_t state; |
540 | /* input */ | 542 | /* input */ |
541 | struct qeth_qdio_q *in_q; | 543 | struct qeth_qdio_q *in_q; |
542 | struct qeth_qdio_buffer_pool in_buf_pool; | 544 | struct qeth_qdio_buffer_pool in_buf_pool; |
@@ -767,6 +769,7 @@ struct qeth_card_options { | |||
767 | int fake_ll; | 769 | int fake_ll; |
768 | int layer2; | 770 | int layer2; |
769 | enum qeth_large_send_types large_send; | 771 | enum qeth_large_send_types large_send; |
772 | int performance_stats; | ||
770 | }; | 773 | }; |
771 | 774 | ||
772 | /* | 775 | /* |
@@ -819,9 +822,7 @@ struct qeth_card { | |||
819 | struct list_head cmd_waiter_list; | 822 | struct list_head cmd_waiter_list; |
820 | /* QDIO buffer handling */ | 823 | /* QDIO buffer handling */ |
821 | struct qeth_qdio_info qdio; | 824 | struct qeth_qdio_info qdio; |
822 | #ifdef CONFIG_QETH_PERF_STATS | ||
823 | struct qeth_perf_stats perf_stats; | 825 | struct qeth_perf_stats perf_stats; |
824 | #endif /* CONFIG_QETH_PERF_STATS */ | ||
825 | int use_hard_stop; | 826 | int use_hard_stop; |
826 | int (*orig_hard_header)(struct sk_buff *,struct net_device *, | 827 | int (*orig_hard_header)(struct sk_buff *,struct net_device *, |
827 | unsigned short,void *,void *,unsigned); | 828 | unsigned short,void *,void *,unsigned); |
@@ -859,23 +860,18 @@ qeth_get_ipa_adp_type(enum qeth_link_types link_type) | |||
859 | } | 860 | } |
860 | } | 861 | } |
861 | 862 | ||
862 | static inline int | 863 | static inline struct sk_buff * |
863 | qeth_realloc_headroom(struct qeth_card *card, struct sk_buff **skb, int size) | 864 | qeth_realloc_headroom(struct qeth_card *card, struct sk_buff *skb, int size) |
864 | { | 865 | { |
865 | struct sk_buff *new_skb = NULL; | 866 | struct sk_buff *new_skb = skb; |
866 | 867 | ||
867 | if (skb_headroom(*skb) < size){ | 868 | if (skb_headroom(skb) >= size) |
868 | new_skb = skb_realloc_headroom(*skb, size); | 869 | return skb; |
869 | if (!new_skb) { | 870 | new_skb = skb_realloc_headroom(skb, size); |
870 | PRINT_ERR("qeth_prepare_skb: could " | 871 | if (!new_skb) |
871 | "not realloc headroom for qeth_hdr " | 872 | PRINT_ERR("Could not realloc headroom for qeth_hdr " |
872 | "on interface %s", QETH_CARD_IFNAME(card)); | 873 | "on interface %s", QETH_CARD_IFNAME(card)); |
873 | return -ENOMEM; | 874 | return new_skb; |
874 | } | ||
875 | kfree_skb(*skb); | ||
876 | *skb = new_skb; | ||
877 | } | ||
878 | return 0; | ||
879 | } | 875 | } |
880 | 876 | ||
881 | static inline struct sk_buff * | 877 | static inline struct sk_buff * |
@@ -885,16 +881,15 @@ qeth_pskb_unshare(struct sk_buff *skb, int pri) | |||
885 | if (!skb_cloned(skb)) | 881 | if (!skb_cloned(skb)) |
886 | return skb; | 882 | return skb; |
887 | nskb = skb_copy(skb, pri); | 883 | nskb = skb_copy(skb, pri); |
888 | kfree_skb(skb); /* free our shared copy */ | ||
889 | return nskb; | 884 | return nskb; |
890 | } | 885 | } |
891 | 886 | ||
892 | static inline void * | 887 | static inline void * |
893 | qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size) | 888 | qeth_push_skb(struct qeth_card *card, struct sk_buff *skb, int size) |
894 | { | 889 | { |
895 | void *hdr; | 890 | void *hdr; |
896 | 891 | ||
897 | hdr = (void *) skb_push(*skb, size); | 892 | hdr = (void *) skb_push(skb, size); |
898 | /* | 893 | /* |
899 | * sanity check, the Linux memory allocation scheme should | 894 | * sanity check, the Linux memory allocation scheme should |
900 | * never present us cases like this one (the qdio header size plus | 895 | * never present us cases like this one (the qdio header size plus |
@@ -903,8 +898,7 @@ qeth_push_skb(struct qeth_card *card, struct sk_buff **skb, int size) | |||
903 | if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) != | 898 | if ((((unsigned long) hdr) & (~(PAGE_SIZE - 1))) != |
904 | (((unsigned long) hdr + size + | 899 | (((unsigned long) hdr + size + |
905 | QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) { | 900 | QETH_IP_HEADER_SIZE) & (~(PAGE_SIZE - 1)))) { |
906 | PRINT_ERR("qeth_prepare_skb: misaligned " | 901 | PRINT_ERR("Misaligned packet on interface %s. Discarded.", |
907 | "packet on interface %s. Discarded.", | ||
908 | QETH_CARD_IFNAME(card)); | 902 | QETH_CARD_IFNAME(card)); |
909 | return NULL; | 903 | return NULL; |
910 | } | 904 | } |
@@ -1056,13 +1050,11 @@ qeth_get_arphdr_type(int cardtype, int linktype) | |||
1056 | } | 1050 | } |
1057 | } | 1051 | } |
1058 | 1052 | ||
1059 | #ifdef CONFIG_QETH_PERF_STATS | ||
1060 | static inline int | 1053 | static inline int |
1061 | qeth_get_micros(void) | 1054 | qeth_get_micros(void) |
1062 | { | 1055 | { |
1063 | return (int) (get_clock() >> 12); | 1056 | return (int) (get_clock() >> 12); |
1064 | } | 1057 | } |
1065 | #endif | ||
1066 | 1058 | ||
1067 | static inline int | 1059 | static inline int |
1068 | qeth_get_qdio_q_format(struct qeth_card *card) | 1060 | qeth_get_qdio_q_format(struct qeth_card *card) |
@@ -1096,10 +1088,11 @@ qeth_string_to_ipaddr4(const char *buf, __u8 *addr) | |||
1096 | { | 1088 | { |
1097 | int count = 0, rc = 0; | 1089 | int count = 0, rc = 0; |
1098 | int in[4]; | 1090 | int in[4]; |
1091 | char c; | ||
1099 | 1092 | ||
1100 | rc = sscanf(buf, "%d.%d.%d.%d%n", | 1093 | rc = sscanf(buf, "%u.%u.%u.%u%c", |
1101 | &in[0], &in[1], &in[2], &in[3], &count); | 1094 | &in[0], &in[1], &in[2], &in[3], &c); |
1102 | if (rc != 4 || count<=0) | 1095 | if (rc != 4 && (rc != 5 || c != '\n')) |
1103 | return -EINVAL; | 1096 | return -EINVAL; |
1104 | for (count = 0; count < 4; count++) { | 1097 | for (count = 0; count < 4; count++) { |
1105 | if (in[count] > 255) | 1098 | if (in[count] > 255) |
@@ -1123,24 +1116,28 @@ qeth_ipaddr6_to_string(const __u8 *addr, char *buf) | |||
1123 | static inline int | 1116 | static inline int |
1124 | qeth_string_to_ipaddr6(const char *buf, __u8 *addr) | 1117 | qeth_string_to_ipaddr6(const char *buf, __u8 *addr) |
1125 | { | 1118 | { |
1126 | char *end, *start; | 1119 | const char *end, *end_tmp, *start; |
1127 | __u16 *in; | 1120 | __u16 *in; |
1128 | char num[5]; | 1121 | char num[5]; |
1129 | int num2, cnt, out, found, save_cnt; | 1122 | int num2, cnt, out, found, save_cnt; |
1130 | unsigned short in_tmp[8] = {0, }; | 1123 | unsigned short in_tmp[8] = {0, }; |
1131 | 1124 | ||
1132 | cnt = out = found = save_cnt = num2 = 0; | 1125 | cnt = out = found = save_cnt = num2 = 0; |
1133 | end = start = (char *) buf; | 1126 | end = start = buf; |
1134 | in = (__u16 *) addr; | 1127 | in = (__u16 *) addr; |
1135 | memset(in, 0, 16); | 1128 | memset(in, 0, 16); |
1136 | while (end) { | 1129 | while (*end) { |
1137 | end = strchr(end,':'); | 1130 | end = strchr(start,':'); |
1138 | if (end == NULL) { | 1131 | if (end == NULL) { |
1139 | end = (char *)buf + (strlen(buf)); | 1132 | end = buf + strlen(buf); |
1140 | out = 1; | 1133 | if ((end_tmp = strchr(start, '\n')) != NULL) |
1134 | end = end_tmp; | ||
1135 | out = 1; | ||
1141 | } | 1136 | } |
1142 | if ((end - start)) { | 1137 | if ((end - start)) { |
1143 | memset(num, 0, 5); | 1138 | memset(num, 0, 5); |
1139 | if ((end - start) > 4) | ||
1140 | return -EINVAL; | ||
1144 | memcpy(num, start, end - start); | 1141 | memcpy(num, start, end - start); |
1145 | if (!qeth_isxdigit(num)) | 1142 | if (!qeth_isxdigit(num)) |
1146 | return -EINVAL; | 1143 | return -EINVAL; |
@@ -1158,6 +1155,8 @@ qeth_string_to_ipaddr6(const char *buf, __u8 *addr) | |||
1158 | } | 1155 | } |
1159 | start = ++end; | 1156 | start = ++end; |
1160 | } | 1157 | } |
1158 | if (cnt + save_cnt > 8) | ||
1159 | return -EINVAL; | ||
1161 | cnt = 7; | 1160 | cnt = 7; |
1162 | while (save_cnt) | 1161 | while (save_cnt) |
1163 | in[cnt--] = in_tmp[--save_cnt]; | 1162 | in[cnt--] = in_tmp[--save_cnt]; |
diff --git a/drivers/s390/net/qeth_eddp.c b/drivers/s390/net/qeth_eddp.c index 8491598f9149..a363721cf28d 100644 --- a/drivers/s390/net/qeth_eddp.c +++ b/drivers/s390/net/qeth_eddp.c | |||
@@ -179,9 +179,8 @@ out_check: | |||
179 | flush_cnt++; | 179 | flush_cnt++; |
180 | } | 180 | } |
181 | } else { | 181 | } else { |
182 | #ifdef CONFIG_QETH_PERF_STATS | 182 | if (queue->card->options.performance_stats) |
183 | queue->card->perf_stats.skbs_sent_pack++; | 183 | queue->card->perf_stats.skbs_sent_pack++; |
184 | #endif | ||
185 | QETH_DBF_TEXT(trace, 6, "fillbfpa"); | 184 | QETH_DBF_TEXT(trace, 6, "fillbfpa"); |
186 | if (buf->next_element_to_fill >= | 185 | if (buf->next_element_to_fill >= |
187 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { | 186 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { |
diff --git a/drivers/s390/net/qeth_main.c b/drivers/s390/net/qeth_main.c index 36733b9823c6..5613b4564fa2 100644 --- a/drivers/s390/net/qeth_main.c +++ b/drivers/s390/net/qeth_main.c | |||
@@ -84,6 +84,8 @@ static debug_info_t *qeth_dbf_qerr = NULL; | |||
84 | 84 | ||
85 | DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf); | 85 | DEFINE_PER_CPU(char[256], qeth_dbf_txt_buf); |
86 | 86 | ||
87 | static struct lock_class_key qdio_out_skb_queue_key; | ||
88 | |||
87 | /** | 89 | /** |
88 | * some more definitions and declarations | 90 | * some more definitions and declarations |
89 | */ | 91 | */ |
@@ -1071,6 +1073,7 @@ qeth_set_intial_options(struct qeth_card *card) | |||
1071 | card->options.layer2 = 1; | 1073 | card->options.layer2 = 1; |
1072 | else | 1074 | else |
1073 | card->options.layer2 = 0; | 1075 | card->options.layer2 = 0; |
1076 | card->options.performance_stats = 1; | ||
1074 | } | 1077 | } |
1075 | 1078 | ||
1076 | /** | 1079 | /** |
@@ -1706,6 +1709,7 @@ qeth_check_ipa_data(struct qeth_card *card, struct qeth_cmd_buffer *iob) | |||
1706 | "IP address reset.\n", | 1709 | "IP address reset.\n", |
1707 | QETH_CARD_IFNAME(card), | 1710 | QETH_CARD_IFNAME(card), |
1708 | card->info.chpid); | 1711 | card->info.chpid); |
1712 | netif_carrier_on(card->dev); | ||
1709 | qeth_schedule_recovery(card); | 1713 | qeth_schedule_recovery(card); |
1710 | return NULL; | 1714 | return NULL; |
1711 | case IPA_CMD_MODCCID: | 1715 | case IPA_CMD_MODCCID: |
@@ -2462,24 +2466,6 @@ qeth_rebuild_skb_fake_ll(struct qeth_card *card, struct sk_buff *skb, | |||
2462 | qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); | 2466 | qeth_rebuild_skb_fake_ll_eth(card, skb, hdr); |
2463 | } | 2467 | } |
2464 | 2468 | ||
2465 | static inline void | ||
2466 | qeth_rebuild_skb_vlan(struct qeth_card *card, struct sk_buff *skb, | ||
2467 | struct qeth_hdr *hdr) | ||
2468 | { | ||
2469 | #ifdef CONFIG_QETH_VLAN | ||
2470 | u16 *vlan_tag; | ||
2471 | |||
2472 | if (hdr->hdr.l3.ext_flags & | ||
2473 | (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { | ||
2474 | vlan_tag = (u16 *) skb_push(skb, VLAN_HLEN); | ||
2475 | *vlan_tag = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? | ||
2476 | hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); | ||
2477 | *(vlan_tag + 1) = skb->protocol; | ||
2478 | skb->protocol = __constant_htons(ETH_P_8021Q); | ||
2479 | } | ||
2480 | #endif /* CONFIG_QETH_VLAN */ | ||
2481 | } | ||
2482 | |||
2483 | static inline __u16 | 2469 | static inline __u16 |
2484 | qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | 2470 | qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, |
2485 | struct qeth_hdr *hdr) | 2471 | struct qeth_hdr *hdr) |
@@ -2508,15 +2494,16 @@ qeth_layer2_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2508 | return vlan_id; | 2494 | return vlan_id; |
2509 | } | 2495 | } |
2510 | 2496 | ||
2511 | static inline void | 2497 | static inline __u16 |
2512 | qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | 2498 | qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, |
2513 | struct qeth_hdr *hdr) | 2499 | struct qeth_hdr *hdr) |
2514 | { | 2500 | { |
2501 | unsigned short vlan_id = 0; | ||
2515 | #ifdef CONFIG_QETH_IPV6 | 2502 | #ifdef CONFIG_QETH_IPV6 |
2516 | if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { | 2503 | if (hdr->hdr.l3.flags & QETH_HDR_PASSTHRU) { |
2517 | skb->pkt_type = PACKET_HOST; | 2504 | skb->pkt_type = PACKET_HOST; |
2518 | skb->protocol = qeth_type_trans(skb, card->dev); | 2505 | skb->protocol = qeth_type_trans(skb, card->dev); |
2519 | return; | 2506 | return 0; |
2520 | } | 2507 | } |
2521 | #endif /* CONFIG_QETH_IPV6 */ | 2508 | #endif /* CONFIG_QETH_IPV6 */ |
2522 | skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : | 2509 | skb->protocol = htons((hdr->hdr.l3.flags & QETH_HDR_IPV6)? ETH_P_IPV6 : |
@@ -2538,7 +2525,13 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2538 | default: | 2525 | default: |
2539 | skb->pkt_type = PACKET_HOST; | 2526 | skb->pkt_type = PACKET_HOST; |
2540 | } | 2527 | } |
2541 | qeth_rebuild_skb_vlan(card, skb, hdr); | 2528 | |
2529 | if (hdr->hdr.l3.ext_flags & | ||
2530 | (QETH_HDR_EXT_VLAN_FRAME | QETH_HDR_EXT_INCLUDE_VLAN_TAG)) { | ||
2531 | vlan_id = (hdr->hdr.l3.ext_flags & QETH_HDR_EXT_VLAN_FRAME)? | ||
2532 | hdr->hdr.l3.vlan_id : *((u16 *)&hdr->hdr.l3.dest_addr[12]); | ||
2533 | } | ||
2534 | |||
2542 | if (card->options.fake_ll) | 2535 | if (card->options.fake_ll) |
2543 | qeth_rebuild_skb_fake_ll(card, skb, hdr); | 2536 | qeth_rebuild_skb_fake_ll(card, skb, hdr); |
2544 | else | 2537 | else |
@@ -2554,6 +2547,7 @@ qeth_rebuild_skb(struct qeth_card *card, struct sk_buff *skb, | |||
2554 | else | 2547 | else |
2555 | skb->ip_summed = SW_CHECKSUMMING; | 2548 | skb->ip_summed = SW_CHECKSUMMING; |
2556 | } | 2549 | } |
2550 | return vlan_id; | ||
2557 | } | 2551 | } |
2558 | 2552 | ||
2559 | static inline void | 2553 | static inline void |
@@ -2566,20 +2560,20 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2566 | int offset; | 2560 | int offset; |
2567 | int rxrc; | 2561 | int rxrc; |
2568 | __u16 vlan_tag = 0; | 2562 | __u16 vlan_tag = 0; |
2563 | __u16 *vlan_addr; | ||
2569 | 2564 | ||
2570 | /* get first element of current buffer */ | 2565 | /* get first element of current buffer */ |
2571 | element = (struct qdio_buffer_element *)&buf->buffer->element[0]; | 2566 | element = (struct qdio_buffer_element *)&buf->buffer->element[0]; |
2572 | offset = 0; | 2567 | offset = 0; |
2573 | #ifdef CONFIG_QETH_PERF_STATS | 2568 | if (card->options.performance_stats) |
2574 | card->perf_stats.bufs_rec++; | 2569 | card->perf_stats.bufs_rec++; |
2575 | #endif | ||
2576 | while((skb = qeth_get_next_skb(card, buf->buffer, &element, | 2570 | while((skb = qeth_get_next_skb(card, buf->buffer, &element, |
2577 | &offset, &hdr))) { | 2571 | &offset, &hdr))) { |
2578 | skb->dev = card->dev; | 2572 | skb->dev = card->dev; |
2579 | if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) | 2573 | if (hdr->hdr.l2.id == QETH_HEADER_TYPE_LAYER2) |
2580 | vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); | 2574 | vlan_tag = qeth_layer2_rebuild_skb(card, skb, hdr); |
2581 | else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) | 2575 | else if (hdr->hdr.l3.id == QETH_HEADER_TYPE_LAYER3) |
2582 | qeth_rebuild_skb(card, skb, hdr); | 2576 | vlan_tag = qeth_rebuild_skb(card, skb, hdr); |
2583 | else { /*in case of OSN*/ | 2577 | else { /*in case of OSN*/ |
2584 | skb_push(skb, sizeof(struct qeth_hdr)); | 2578 | skb_push(skb, sizeof(struct qeth_hdr)); |
2585 | memcpy(skb->data, hdr, sizeof(struct qeth_hdr)); | 2579 | memcpy(skb->data, hdr, sizeof(struct qeth_hdr)); |
@@ -2589,14 +2583,19 @@ qeth_process_inbound_buffer(struct qeth_card *card, | |||
2589 | dev_kfree_skb_any(skb); | 2583 | dev_kfree_skb_any(skb); |
2590 | continue; | 2584 | continue; |
2591 | } | 2585 | } |
2586 | if (card->info.type == QETH_CARD_TYPE_OSN) | ||
2587 | rxrc = card->osn_info.data_cb(skb); | ||
2588 | else | ||
2592 | #ifdef CONFIG_QETH_VLAN | 2589 | #ifdef CONFIG_QETH_VLAN |
2593 | if (vlan_tag) | 2590 | if (vlan_tag) |
2594 | vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag); | 2591 | if (card->vlangrp) |
2592 | vlan_hwaccel_rx(skb, card->vlangrp, vlan_tag); | ||
2593 | else { | ||
2594 | dev_kfree_skb_any(skb); | ||
2595 | continue; | ||
2596 | } | ||
2595 | else | 2597 | else |
2596 | #endif | 2598 | #endif |
2597 | if (card->info.type == QETH_CARD_TYPE_OSN) | ||
2598 | rxrc = card->osn_info.data_cb(skb); | ||
2599 | else | ||
2600 | rxrc = netif_rx(skb); | 2599 | rxrc = netif_rx(skb); |
2601 | card->dev->last_rx = jiffies; | 2600 | card->dev->last_rx = jiffies; |
2602 | card->stats.rx_packets++; | 2601 | card->stats.rx_packets++; |
@@ -2624,7 +2623,7 @@ qeth_init_input_buffer(struct qeth_card *card, struct qeth_qdio_buffer *buf) | |||
2624 | { | 2623 | { |
2625 | struct qeth_buffer_pool_entry *pool_entry; | 2624 | struct qeth_buffer_pool_entry *pool_entry; |
2626 | int i; | 2625 | int i; |
2627 | 2626 | ||
2628 | pool_entry = qeth_get_buffer_pool_entry(card); | 2627 | pool_entry = qeth_get_buffer_pool_entry(card); |
2629 | /* | 2628 | /* |
2630 | * since the buffer is accessed only from the input_tasklet | 2629 | * since the buffer is accessed only from the input_tasklet |
@@ -2698,17 +2697,18 @@ qeth_queue_input_buffer(struct qeth_card *card, int index) | |||
2698 | * 'index') un-requeued -> this buffer is the first buffer that | 2697 | * 'index') un-requeued -> this buffer is the first buffer that |
2699 | * will be requeued the next time | 2698 | * will be requeued the next time |
2700 | */ | 2699 | */ |
2701 | #ifdef CONFIG_QETH_PERF_STATS | 2700 | if (card->options.performance_stats) { |
2702 | card->perf_stats.inbound_do_qdio_cnt++; | 2701 | card->perf_stats.inbound_do_qdio_cnt++; |
2703 | card->perf_stats.inbound_do_qdio_start_time = qeth_get_micros(); | 2702 | card->perf_stats.inbound_do_qdio_start_time = |
2704 | #endif | 2703 | qeth_get_micros(); |
2704 | } | ||
2705 | rc = do_QDIO(CARD_DDEV(card), | 2705 | rc = do_QDIO(CARD_DDEV(card), |
2706 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, | 2706 | QDIO_FLAG_SYNC_INPUT | QDIO_FLAG_UNDER_INTERRUPT, |
2707 | 0, queue->next_buf_to_init, count, NULL); | 2707 | 0, queue->next_buf_to_init, count, NULL); |
2708 | #ifdef CONFIG_QETH_PERF_STATS | 2708 | if (card->options.performance_stats) |
2709 | card->perf_stats.inbound_do_qdio_time += qeth_get_micros() - | 2709 | card->perf_stats.inbound_do_qdio_time += |
2710 | card->perf_stats.inbound_do_qdio_start_time; | 2710 | qeth_get_micros() - |
2711 | #endif | 2711 | card->perf_stats.inbound_do_qdio_start_time; |
2712 | if (rc){ | 2712 | if (rc){ |
2713 | PRINT_WARN("qeth_queue_input_buffer's do_QDIO " | 2713 | PRINT_WARN("qeth_queue_input_buffer's do_QDIO " |
2714 | "return %i (device %s).\n", | 2714 | "return %i (device %s).\n", |
@@ -2744,10 +2744,10 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status, | |||
2744 | QETH_DBF_TEXT(trace, 6, "qdinput"); | 2744 | QETH_DBF_TEXT(trace, 6, "qdinput"); |
2745 | card = (struct qeth_card *) card_ptr; | 2745 | card = (struct qeth_card *) card_ptr; |
2746 | net_dev = card->dev; | 2746 | net_dev = card->dev; |
2747 | #ifdef CONFIG_QETH_PERF_STATS | 2747 | if (card->options.performance_stats) { |
2748 | card->perf_stats.inbound_cnt++; | 2748 | card->perf_stats.inbound_cnt++; |
2749 | card->perf_stats.inbound_start_time = qeth_get_micros(); | 2749 | card->perf_stats.inbound_start_time = qeth_get_micros(); |
2750 | #endif | 2750 | } |
2751 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { | 2751 | if (status & QDIO_STATUS_LOOK_FOR_ERROR) { |
2752 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){ | 2752 | if (status & QDIO_STATUS_ACTIVATE_CHECK_CONDITION){ |
2753 | QETH_DBF_TEXT(trace, 1,"qdinchk"); | 2753 | QETH_DBF_TEXT(trace, 1,"qdinchk"); |
@@ -2769,10 +2769,9 @@ qeth_qdio_input_handler(struct ccw_device * ccwdev, unsigned int status, | |||
2769 | qeth_put_buffer_pool_entry(card, buffer->pool_entry); | 2769 | qeth_put_buffer_pool_entry(card, buffer->pool_entry); |
2770 | qeth_queue_input_buffer(card, index); | 2770 | qeth_queue_input_buffer(card, index); |
2771 | } | 2771 | } |
2772 | #ifdef CONFIG_QETH_PERF_STATS | 2772 | if (card->options.performance_stats) |
2773 | card->perf_stats.inbound_time += qeth_get_micros() - | 2773 | card->perf_stats.inbound_time += qeth_get_micros() - |
2774 | card->perf_stats.inbound_start_time; | 2774 | card->perf_stats.inbound_start_time; |
2775 | #endif | ||
2776 | } | 2775 | } |
2777 | 2776 | ||
2778 | static inline int | 2777 | static inline int |
@@ -2862,10 +2861,11 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
2862 | } | 2861 | } |
2863 | 2862 | ||
2864 | queue->card->dev->trans_start = jiffies; | 2863 | queue->card->dev->trans_start = jiffies; |
2865 | #ifdef CONFIG_QETH_PERF_STATS | 2864 | if (queue->card->options.performance_stats) { |
2866 | queue->card->perf_stats.outbound_do_qdio_cnt++; | 2865 | queue->card->perf_stats.outbound_do_qdio_cnt++; |
2867 | queue->card->perf_stats.outbound_do_qdio_start_time = qeth_get_micros(); | 2866 | queue->card->perf_stats.outbound_do_qdio_start_time = |
2868 | #endif | 2867 | qeth_get_micros(); |
2868 | } | ||
2869 | if (under_int) | 2869 | if (under_int) |
2870 | rc = do_QDIO(CARD_DDEV(queue->card), | 2870 | rc = do_QDIO(CARD_DDEV(queue->card), |
2871 | QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT, | 2871 | QDIO_FLAG_SYNC_OUTPUT | QDIO_FLAG_UNDER_INTERRUPT, |
@@ -2873,10 +2873,10 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
2873 | else | 2873 | else |
2874 | rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT, | 2874 | rc = do_QDIO(CARD_DDEV(queue->card), QDIO_FLAG_SYNC_OUTPUT, |
2875 | queue->queue_no, index, count, NULL); | 2875 | queue->queue_no, index, count, NULL); |
2876 | #ifdef CONFIG_QETH_PERF_STATS | 2876 | if (queue->card->options.performance_stats) |
2877 | queue->card->perf_stats.outbound_do_qdio_time += qeth_get_micros() - | 2877 | queue->card->perf_stats.outbound_do_qdio_time += |
2878 | queue->card->perf_stats.outbound_do_qdio_start_time; | 2878 | qeth_get_micros() - |
2879 | #endif | 2879 | queue->card->perf_stats.outbound_do_qdio_start_time; |
2880 | if (rc){ | 2880 | if (rc){ |
2881 | QETH_DBF_TEXT(trace, 2, "flushbuf"); | 2881 | QETH_DBF_TEXT(trace, 2, "flushbuf"); |
2882 | QETH_DBF_TEXT_(trace, 2, " err%d", rc); | 2882 | QETH_DBF_TEXT_(trace, 2, " err%d", rc); |
@@ -2888,9 +2888,8 @@ qeth_flush_buffers(struct qeth_qdio_out_q *queue, int under_int, | |||
2888 | return; | 2888 | return; |
2889 | } | 2889 | } |
2890 | atomic_add(count, &queue->used_buffers); | 2890 | atomic_add(count, &queue->used_buffers); |
2891 | #ifdef CONFIG_QETH_PERF_STATS | 2891 | if (queue->card->options.performance_stats) |
2892 | queue->card->perf_stats.bufs_sent += count; | 2892 | queue->card->perf_stats.bufs_sent += count; |
2893 | #endif | ||
2894 | } | 2893 | } |
2895 | 2894 | ||
2896 | /* | 2895 | /* |
@@ -2905,9 +2904,8 @@ qeth_switch_to_packing_if_needed(struct qeth_qdio_out_q *queue) | |||
2905 | >= QETH_HIGH_WATERMARK_PACK){ | 2904 | >= QETH_HIGH_WATERMARK_PACK){ |
2906 | /* switch non-PACKING -> PACKING */ | 2905 | /* switch non-PACKING -> PACKING */ |
2907 | QETH_DBF_TEXT(trace, 6, "np->pack"); | 2906 | QETH_DBF_TEXT(trace, 6, "np->pack"); |
2908 | #ifdef CONFIG_QETH_PERF_STATS | 2907 | if (queue->card->options.performance_stats) |
2909 | queue->card->perf_stats.sc_dp_p++; | 2908 | queue->card->perf_stats.sc_dp_p++; |
2910 | #endif | ||
2911 | queue->do_pack = 1; | 2909 | queue->do_pack = 1; |
2912 | } | 2910 | } |
2913 | } | 2911 | } |
@@ -2930,9 +2928,8 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | |||
2930 | <= QETH_LOW_WATERMARK_PACK) { | 2928 | <= QETH_LOW_WATERMARK_PACK) { |
2931 | /* switch PACKING -> non-PACKING */ | 2929 | /* switch PACKING -> non-PACKING */ |
2932 | QETH_DBF_TEXT(trace, 6, "pack->np"); | 2930 | QETH_DBF_TEXT(trace, 6, "pack->np"); |
2933 | #ifdef CONFIG_QETH_PERF_STATS | 2931 | if (queue->card->options.performance_stats) |
2934 | queue->card->perf_stats.sc_p_dp++; | 2932 | queue->card->perf_stats.sc_p_dp++; |
2935 | #endif | ||
2936 | queue->do_pack = 0; | 2933 | queue->do_pack = 0; |
2937 | /* flush packing buffers */ | 2934 | /* flush packing buffers */ |
2938 | buffer = &queue->bufs[queue->next_buf_to_fill]; | 2935 | buffer = &queue->bufs[queue->next_buf_to_fill]; |
@@ -2944,7 +2941,7 @@ qeth_switch_to_nonpacking_if_needed(struct qeth_qdio_out_q *queue) | |||
2944 | queue->next_buf_to_fill = | 2941 | queue->next_buf_to_fill = |
2945 | (queue->next_buf_to_fill + 1) % | 2942 | (queue->next_buf_to_fill + 1) % |
2946 | QDIO_MAX_BUFFERS_PER_Q; | 2943 | QDIO_MAX_BUFFERS_PER_Q; |
2947 | } | 2944 | } |
2948 | } | 2945 | } |
2949 | } | 2946 | } |
2950 | return flush_count; | 2947 | return flush_count; |
@@ -3000,11 +2997,10 @@ qeth_check_outbound_queue(struct qeth_qdio_out_q *queue) | |||
3000 | !atomic_read(&queue->set_pci_flags_count)) | 2997 | !atomic_read(&queue->set_pci_flags_count)) |
3001 | flush_cnt += | 2998 | flush_cnt += |
3002 | qeth_flush_buffers_on_no_pci(queue); | 2999 | qeth_flush_buffers_on_no_pci(queue); |
3003 | #ifdef CONFIG_QETH_PERF_STATS | 3000 | if (queue->card->options.performance_stats && |
3004 | if (q_was_packing) | 3001 | q_was_packing) |
3005 | queue->card->perf_stats.bufs_sent_pack += | 3002 | queue->card->perf_stats.bufs_sent_pack += |
3006 | flush_cnt; | 3003 | flush_cnt; |
3007 | #endif | ||
3008 | if (flush_cnt) | 3004 | if (flush_cnt) |
3009 | qeth_flush_buffers(queue, 1, index, flush_cnt); | 3005 | qeth_flush_buffers(queue, 1, index, flush_cnt); |
3010 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | 3006 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); |
@@ -3034,10 +3030,11 @@ qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status, | |||
3034 | return; | 3030 | return; |
3035 | } | 3031 | } |
3036 | } | 3032 | } |
3037 | #ifdef CONFIG_QETH_PERF_STATS | 3033 | if (card->options.performance_stats) { |
3038 | card->perf_stats.outbound_handler_cnt++; | 3034 | card->perf_stats.outbound_handler_cnt++; |
3039 | card->perf_stats.outbound_handler_start_time = qeth_get_micros(); | 3035 | card->perf_stats.outbound_handler_start_time = |
3040 | #endif | 3036 | qeth_get_micros(); |
3037 | } | ||
3041 | for(i = first_element; i < (first_element + count); ++i){ | 3038 | for(i = first_element; i < (first_element + count); ++i){ |
3042 | buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; | 3039 | buffer = &queue->bufs[i % QDIO_MAX_BUFFERS_PER_Q]; |
3043 | /*we only handle the KICK_IT error by doing a recovery */ | 3040 | /*we only handle the KICK_IT error by doing a recovery */ |
@@ -3056,10 +3053,9 @@ qeth_qdio_output_handler(struct ccw_device * ccwdev, unsigned int status, | |||
3056 | qeth_check_outbound_queue(queue); | 3053 | qeth_check_outbound_queue(queue); |
3057 | 3054 | ||
3058 | netif_wake_queue(queue->card->dev); | 3055 | netif_wake_queue(queue->card->dev); |
3059 | #ifdef CONFIG_QETH_PERF_STATS | 3056 | if (card->options.performance_stats) |
3060 | card->perf_stats.outbound_handler_time += qeth_get_micros() - | 3057 | card->perf_stats.outbound_handler_time += qeth_get_micros() - |
3061 | card->perf_stats.outbound_handler_start_time; | 3058 | card->perf_stats.outbound_handler_start_time; |
3062 | #endif | ||
3063 | } | 3059 | } |
3064 | 3060 | ||
3065 | static void | 3061 | static void |
@@ -3183,13 +3179,14 @@ qeth_alloc_qdio_buffers(struct qeth_card *card) | |||
3183 | 3179 | ||
3184 | QETH_DBF_TEXT(setup, 2, "allcqdbf"); | 3180 | QETH_DBF_TEXT(setup, 2, "allcqdbf"); |
3185 | 3181 | ||
3186 | if (card->qdio.state == QETH_QDIO_ALLOCATED) | 3182 | if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_UNINITIALIZED, |
3183 | QETH_QDIO_ALLOCATED) != QETH_QDIO_UNINITIALIZED) | ||
3187 | return 0; | 3184 | return 0; |
3188 | 3185 | ||
3189 | card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), | 3186 | card->qdio.in_q = kmalloc(sizeof(struct qeth_qdio_q), |
3190 | GFP_KERNEL|GFP_DMA); | 3187 | GFP_KERNEL|GFP_DMA); |
3191 | if (!card->qdio.in_q) | 3188 | if (!card->qdio.in_q) |
3192 | return - ENOMEM; | 3189 | goto out_nomem; |
3193 | QETH_DBF_TEXT(setup, 2, "inq"); | 3190 | QETH_DBF_TEXT(setup, 2, "inq"); |
3194 | QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *)); | 3191 | QETH_DBF_HEX(setup, 2, &card->qdio.in_q, sizeof(void *)); |
3195 | memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); | 3192 | memset(card->qdio.in_q, 0, sizeof(struct qeth_qdio_q)); |
@@ -3198,27 +3195,19 @@ qeth_alloc_qdio_buffers(struct qeth_card *card) | |||
3198 | card->qdio.in_q->bufs[i].buffer = | 3195 | card->qdio.in_q->bufs[i].buffer = |
3199 | &card->qdio.in_q->qdio_bufs[i]; | 3196 | &card->qdio.in_q->qdio_bufs[i]; |
3200 | /* inbound buffer pool */ | 3197 | /* inbound buffer pool */ |
3201 | if (qeth_alloc_buffer_pool(card)){ | 3198 | if (qeth_alloc_buffer_pool(card)) |
3202 | kfree(card->qdio.in_q); | 3199 | goto out_freeinq; |
3203 | return -ENOMEM; | ||
3204 | } | ||
3205 | /* outbound */ | 3200 | /* outbound */ |
3206 | card->qdio.out_qs = | 3201 | card->qdio.out_qs = |
3207 | kmalloc(card->qdio.no_out_queues * | 3202 | kmalloc(card->qdio.no_out_queues * |
3208 | sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); | 3203 | sizeof(struct qeth_qdio_out_q *), GFP_KERNEL); |
3209 | if (!card->qdio.out_qs){ | 3204 | if (!card->qdio.out_qs) |
3210 | qeth_free_buffer_pool(card); | 3205 | goto out_freepool; |
3211 | return -ENOMEM; | 3206 | for (i = 0; i < card->qdio.no_out_queues; ++i) { |
3212 | } | ||
3213 | for (i = 0; i < card->qdio.no_out_queues; ++i){ | ||
3214 | card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), | 3207 | card->qdio.out_qs[i] = kmalloc(sizeof(struct qeth_qdio_out_q), |
3215 | GFP_KERNEL|GFP_DMA); | 3208 | GFP_KERNEL|GFP_DMA); |
3216 | if (!card->qdio.out_qs[i]){ | 3209 | if (!card->qdio.out_qs[i]) |
3217 | while (i > 0) | 3210 | goto out_freeoutq; |
3218 | kfree(card->qdio.out_qs[--i]); | ||
3219 | kfree(card->qdio.out_qs); | ||
3220 | return -ENOMEM; | ||
3221 | } | ||
3222 | QETH_DBF_TEXT_(setup, 2, "outq %i", i); | 3211 | QETH_DBF_TEXT_(setup, 2, "outq %i", i); |
3223 | QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *)); | 3212 | QETH_DBF_HEX(setup, 2, &card->qdio.out_qs[i], sizeof(void *)); |
3224 | memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q)); | 3213 | memset(card->qdio.out_qs[i], 0, sizeof(struct qeth_qdio_out_q)); |
@@ -3229,11 +3218,25 @@ qeth_alloc_qdio_buffers(struct qeth_card *card) | |||
3229 | &card->qdio.out_qs[i]->qdio_bufs[j]; | 3218 | &card->qdio.out_qs[i]->qdio_bufs[j]; |
3230 | skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. | 3219 | skb_queue_head_init(&card->qdio.out_qs[i]->bufs[j]. |
3231 | skb_list); | 3220 | skb_list); |
3221 | lockdep_set_class( | ||
3222 | &card->qdio.out_qs[i]->bufs[j].skb_list.lock, | ||
3223 | &qdio_out_skb_queue_key); | ||
3232 | INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list); | 3224 | INIT_LIST_HEAD(&card->qdio.out_qs[i]->bufs[j].ctx_list); |
3233 | } | 3225 | } |
3234 | } | 3226 | } |
3235 | card->qdio.state = QETH_QDIO_ALLOCATED; | ||
3236 | return 0; | 3227 | return 0; |
3228 | |||
3229 | out_freeoutq: | ||
3230 | while (i > 0) | ||
3231 | kfree(card->qdio.out_qs[--i]); | ||
3232 | kfree(card->qdio.out_qs); | ||
3233 | out_freepool: | ||
3234 | qeth_free_buffer_pool(card); | ||
3235 | out_freeinq: | ||
3236 | kfree(card->qdio.in_q); | ||
3237 | out_nomem: | ||
3238 | atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); | ||
3239 | return -ENOMEM; | ||
3237 | } | 3240 | } |
3238 | 3241 | ||
3239 | static void | 3242 | static void |
@@ -3242,7 +3245,8 @@ qeth_free_qdio_buffers(struct qeth_card *card) | |||
3242 | int i, j; | 3245 | int i, j; |
3243 | 3246 | ||
3244 | QETH_DBF_TEXT(trace, 2, "freeqdbf"); | 3247 | QETH_DBF_TEXT(trace, 2, "freeqdbf"); |
3245 | if (card->qdio.state == QETH_QDIO_UNINITIALIZED) | 3248 | if (atomic_swap(&card->qdio.state, QETH_QDIO_UNINITIALIZED) == |
3249 | QETH_QDIO_UNINITIALIZED) | ||
3246 | return; | 3250 | return; |
3247 | kfree(card->qdio.in_q); | 3251 | kfree(card->qdio.in_q); |
3248 | /* inbound buffer pool */ | 3252 | /* inbound buffer pool */ |
@@ -3255,7 +3259,6 @@ qeth_free_qdio_buffers(struct qeth_card *card) | |||
3255 | kfree(card->qdio.out_qs[i]); | 3259 | kfree(card->qdio.out_qs[i]); |
3256 | } | 3260 | } |
3257 | kfree(card->qdio.out_qs); | 3261 | kfree(card->qdio.out_qs); |
3258 | card->qdio.state = QETH_QDIO_UNINITIALIZED; | ||
3259 | } | 3262 | } |
3260 | 3263 | ||
3261 | static void | 3264 | static void |
@@ -3277,7 +3280,7 @@ static void | |||
3277 | qeth_init_qdio_info(struct qeth_card *card) | 3280 | qeth_init_qdio_info(struct qeth_card *card) |
3278 | { | 3281 | { |
3279 | QETH_DBF_TEXT(setup, 4, "intqdinf"); | 3282 | QETH_DBF_TEXT(setup, 4, "intqdinf"); |
3280 | card->qdio.state = QETH_QDIO_UNINITIALIZED; | 3283 | atomic_set(&card->qdio.state, QETH_QDIO_UNINITIALIZED); |
3281 | /* inbound */ | 3284 | /* inbound */ |
3282 | card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; | 3285 | card->qdio.in_buf_size = QETH_IN_BUF_SIZE_DEFAULT; |
3283 | card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; | 3286 | card->qdio.init_pool.buf_count = QETH_IN_BUF_COUNT_DEFAULT; |
@@ -3340,7 +3343,7 @@ qeth_qdio_establish(struct qeth_card *card) | |||
3340 | struct qdio_buffer **in_sbal_ptrs; | 3343 | struct qdio_buffer **in_sbal_ptrs; |
3341 | struct qdio_buffer **out_sbal_ptrs; | 3344 | struct qdio_buffer **out_sbal_ptrs; |
3342 | int i, j, k; | 3345 | int i, j, k; |
3343 | int rc; | 3346 | int rc = 0; |
3344 | 3347 | ||
3345 | QETH_DBF_TEXT(setup, 2, "qdioest"); | 3348 | QETH_DBF_TEXT(setup, 2, "qdioest"); |
3346 | 3349 | ||
@@ -3399,8 +3402,10 @@ qeth_qdio_establish(struct qeth_card *card) | |||
3399 | init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; | 3402 | init_data.input_sbal_addr_array = (void **) in_sbal_ptrs; |
3400 | init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; | 3403 | init_data.output_sbal_addr_array = (void **) out_sbal_ptrs; |
3401 | 3404 | ||
3402 | if (!(rc = qdio_initialize(&init_data))) | 3405 | if (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ALLOCATED, |
3403 | card->qdio.state = QETH_QDIO_ESTABLISHED; | 3406 | QETH_QDIO_ESTABLISHED) == QETH_QDIO_ALLOCATED) |
3407 | if ((rc = qdio_initialize(&init_data))) | ||
3408 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); | ||
3404 | 3409 | ||
3405 | kfree(out_sbal_ptrs); | 3410 | kfree(out_sbal_ptrs); |
3406 | kfree(in_sbal_ptrs); | 3411 | kfree(in_sbal_ptrs); |
@@ -3516,13 +3521,20 @@ qeth_qdio_clear_card(struct qeth_card *card, int use_halt) | |||
3516 | int rc = 0; | 3521 | int rc = 0; |
3517 | 3522 | ||
3518 | QETH_DBF_TEXT(trace,3,"qdioclr"); | 3523 | QETH_DBF_TEXT(trace,3,"qdioclr"); |
3519 | if (card->qdio.state == QETH_QDIO_ESTABLISHED){ | 3524 | switch (atomic_cmpxchg(&card->qdio.state, QETH_QDIO_ESTABLISHED, |
3525 | QETH_QDIO_CLEANING)) { | ||
3526 | case QETH_QDIO_ESTABLISHED: | ||
3520 | if ((rc = qdio_cleanup(CARD_DDEV(card), | 3527 | if ((rc = qdio_cleanup(CARD_DDEV(card), |
3521 | (card->info.type == QETH_CARD_TYPE_IQD) ? | 3528 | (card->info.type == QETH_CARD_TYPE_IQD) ? |
3522 | QDIO_FLAG_CLEANUP_USING_HALT : | 3529 | QDIO_FLAG_CLEANUP_USING_HALT : |
3523 | QDIO_FLAG_CLEANUP_USING_CLEAR))) | 3530 | QDIO_FLAG_CLEANUP_USING_CLEAR))) |
3524 | QETH_DBF_TEXT_(trace, 3, "1err%d", rc); | 3531 | QETH_DBF_TEXT_(trace, 3, "1err%d", rc); |
3525 | card->qdio.state = QETH_QDIO_ALLOCATED; | 3532 | atomic_set(&card->qdio.state, QETH_QDIO_ALLOCATED); |
3533 | break; | ||
3534 | case QETH_QDIO_CLEANING: | ||
3535 | return rc; | ||
3536 | default: | ||
3537 | break; | ||
3526 | } | 3538 | } |
3527 | if ((rc = qeth_clear_halt_card(card, use_halt))) | 3539 | if ((rc = qeth_clear_halt_card(card, use_halt))) |
3528 | QETH_DBF_TEXT_(trace, 3, "2err%d", rc); | 3540 | QETH_DBF_TEXT_(trace, 3, "2err%d", rc); |
@@ -3682,10 +3694,10 @@ qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3682 | /* return OK; otherwise ksoftirqd goes to 100% */ | 3694 | /* return OK; otherwise ksoftirqd goes to 100% */ |
3683 | return NETDEV_TX_OK; | 3695 | return NETDEV_TX_OK; |
3684 | } | 3696 | } |
3685 | #ifdef CONFIG_QETH_PERF_STATS | 3697 | if (card->options.performance_stats) { |
3686 | card->perf_stats.outbound_cnt++; | 3698 | card->perf_stats.outbound_cnt++; |
3687 | card->perf_stats.outbound_start_time = qeth_get_micros(); | 3699 | card->perf_stats.outbound_start_time = qeth_get_micros(); |
3688 | #endif | 3700 | } |
3689 | netif_stop_queue(dev); | 3701 | netif_stop_queue(dev); |
3690 | if ((rc = qeth_send_packet(card, skb))) { | 3702 | if ((rc = qeth_send_packet(card, skb))) { |
3691 | if (rc == -EBUSY) { | 3703 | if (rc == -EBUSY) { |
@@ -3699,10 +3711,9 @@ qeth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) | |||
3699 | } | 3711 | } |
3700 | } | 3712 | } |
3701 | netif_wake_queue(dev); | 3713 | netif_wake_queue(dev); |
3702 | #ifdef CONFIG_QETH_PERF_STATS | 3714 | if (card->options.performance_stats) |
3703 | card->perf_stats.outbound_time += qeth_get_micros() - | 3715 | card->perf_stats.outbound_time += qeth_get_micros() - |
3704 | card->perf_stats.outbound_start_time; | 3716 | card->perf_stats.outbound_start_time; |
3705 | #endif | ||
3706 | return rc; | 3717 | return rc; |
3707 | } | 3718 | } |
3708 | 3719 | ||
@@ -3917,49 +3928,59 @@ qeth_get_ip_version(struct sk_buff *skb) | |||
3917 | } | 3928 | } |
3918 | } | 3929 | } |
3919 | 3930 | ||
3920 | static inline int | 3931 | static inline struct qeth_hdr * |
3921 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff **skb, | 3932 | __qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, int ipv) |
3922 | struct qeth_hdr **hdr, int ipv) | ||
3923 | { | 3933 | { |
3924 | int rc = 0; | ||
3925 | #ifdef CONFIG_QETH_VLAN | 3934 | #ifdef CONFIG_QETH_VLAN |
3926 | u16 *tag; | 3935 | u16 *tag; |
3927 | #endif | 3936 | if (card->vlangrp && vlan_tx_tag_present(skb) && |
3928 | |||
3929 | QETH_DBF_TEXT(trace, 6, "prepskb"); | ||
3930 | if (card->info.type == QETH_CARD_TYPE_OSN) { | ||
3931 | *hdr = (struct qeth_hdr *)(*skb)->data; | ||
3932 | return rc; | ||
3933 | } | ||
3934 | rc = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); | ||
3935 | if (rc) | ||
3936 | return rc; | ||
3937 | #ifdef CONFIG_QETH_VLAN | ||
3938 | if (card->vlangrp && vlan_tx_tag_present(*skb) && | ||
3939 | ((ipv == 6) || card->options.layer2) ) { | 3937 | ((ipv == 6) || card->options.layer2) ) { |
3940 | /* | 3938 | /* |
3941 | * Move the mac addresses (6 bytes src, 6 bytes dest) | 3939 | * Move the mac addresses (6 bytes src, 6 bytes dest) |
3942 | * to the beginning of the new header. We are using three | 3940 | * to the beginning of the new header. We are using three |
3943 | * memcpys instead of one memmove to save cycles. | 3941 | * memcpys instead of one memmove to save cycles. |
3944 | */ | 3942 | */ |
3945 | skb_push(*skb, VLAN_HLEN); | 3943 | skb_push(skb, VLAN_HLEN); |
3946 | memcpy((*skb)->data, (*skb)->data + 4, 4); | 3944 | memcpy(skb->data, skb->data + 4, 4); |
3947 | memcpy((*skb)->data + 4, (*skb)->data + 8, 4); | 3945 | memcpy(skb->data + 4, skb->data + 8, 4); |
3948 | memcpy((*skb)->data + 8, (*skb)->data + 12, 4); | 3946 | memcpy(skb->data + 8, skb->data + 12, 4); |
3949 | tag = (u16 *)((*skb)->data + 12); | 3947 | tag = (u16 *)(skb->data + 12); |
3950 | /* | 3948 | /* |
3951 | * first two bytes = ETH_P_8021Q (0x8100) | 3949 | * first two bytes = ETH_P_8021Q (0x8100) |
3952 | * second two bytes = VLANID | 3950 | * second two bytes = VLANID |
3953 | */ | 3951 | */ |
3954 | *tag = __constant_htons(ETH_P_8021Q); | 3952 | *tag = __constant_htons(ETH_P_8021Q); |
3955 | *(tag + 1) = htons(vlan_tx_tag_get(*skb)); | 3953 | *(tag + 1) = htons(vlan_tx_tag_get(skb)); |
3956 | } | 3954 | } |
3957 | #endif | 3955 | #endif |
3958 | *hdr = (struct qeth_hdr *) | 3956 | return ((struct qeth_hdr *) |
3959 | qeth_push_skb(card, skb, sizeof(struct qeth_hdr)); | 3957 | qeth_push_skb(card, skb, sizeof(struct qeth_hdr))); |
3960 | if (*hdr == NULL) | 3958 | } |
3961 | return -EINVAL; | 3959 | |
3962 | return 0; | 3960 | static inline void |
3961 | __qeth_free_new_skb(struct sk_buff *orig_skb, struct sk_buff *new_skb) | ||
3962 | { | ||
3963 | if (orig_skb != new_skb) | ||
3964 | dev_kfree_skb_any(new_skb); | ||
3965 | } | ||
3966 | |||
3967 | static inline struct sk_buff * | ||
3968 | qeth_prepare_skb(struct qeth_card *card, struct sk_buff *skb, | ||
3969 | struct qeth_hdr **hdr, int ipv) | ||
3970 | { | ||
3971 | struct sk_buff *new_skb; | ||
3972 | |||
3973 | QETH_DBF_TEXT(trace, 6, "prepskb"); | ||
3974 | |||
3975 | new_skb = qeth_realloc_headroom(card, skb, sizeof(struct qeth_hdr)); | ||
3976 | if (new_skb == NULL) | ||
3977 | return NULL; | ||
3978 | *hdr = __qeth_prepare_skb(card, new_skb, ipv); | ||
3979 | if (*hdr == NULL) { | ||
3980 | __qeth_free_new_skb(skb, new_skb); | ||
3981 | return NULL; | ||
3982 | } | ||
3983 | return new_skb; | ||
3963 | } | 3984 | } |
3964 | 3985 | ||
3965 | static inline u8 | 3986 | static inline u8 |
@@ -4201,9 +4222,8 @@ qeth_fill_buffer(struct qeth_qdio_out_q *queue, | |||
4201 | flush_cnt = 1; | 4222 | flush_cnt = 1; |
4202 | } else { | 4223 | } else { |
4203 | QETH_DBF_TEXT(trace, 6, "fillbfpa"); | 4224 | QETH_DBF_TEXT(trace, 6, "fillbfpa"); |
4204 | #ifdef CONFIG_QETH_PERF_STATS | 4225 | if (queue->card->options.performance_stats) |
4205 | queue->card->perf_stats.skbs_sent_pack++; | 4226 | queue->card->perf_stats.skbs_sent_pack++; |
4206 | #endif | ||
4207 | if (buf->next_element_to_fill >= | 4227 | if (buf->next_element_to_fill >= |
4208 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { | 4228 | QETH_MAX_BUFFER_ELEMENTS(queue->card)) { |
4209 | /* | 4229 | /* |
@@ -4240,21 +4260,15 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4240 | * check if buffer is empty to make sure that we do not 'overtake' | 4260 | * check if buffer is empty to make sure that we do not 'overtake' |
4241 | * ourselves and try to fill a buffer that is already primed | 4261 | * ourselves and try to fill a buffer that is already primed |
4242 | */ | 4262 | */ |
4243 | if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { | 4263 | if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) |
4244 | card->stats.tx_dropped++; | 4264 | goto out; |
4245 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | ||
4246 | return -EBUSY; | ||
4247 | } | ||
4248 | if (ctx == NULL) | 4265 | if (ctx == NULL) |
4249 | queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % | 4266 | queue->next_buf_to_fill = (queue->next_buf_to_fill + 1) % |
4250 | QDIO_MAX_BUFFERS_PER_Q; | 4267 | QDIO_MAX_BUFFERS_PER_Q; |
4251 | else { | 4268 | else { |
4252 | buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx); | 4269 | buffers_needed = qeth_eddp_check_buffers_for_context(queue,ctx); |
4253 | if (buffers_needed < 0) { | 4270 | if (buffers_needed < 0) |
4254 | card->stats.tx_dropped++; | 4271 | goto out; |
4255 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | ||
4256 | return -EBUSY; | ||
4257 | } | ||
4258 | queue->next_buf_to_fill = | 4272 | queue->next_buf_to_fill = |
4259 | (queue->next_buf_to_fill + buffers_needed) % | 4273 | (queue->next_buf_to_fill + buffers_needed) % |
4260 | QDIO_MAX_BUFFERS_PER_Q; | 4274 | QDIO_MAX_BUFFERS_PER_Q; |
@@ -4269,6 +4283,9 @@ qeth_do_send_packet_fast(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4269 | qeth_flush_buffers(queue, 0, index, flush_cnt); | 4283 | qeth_flush_buffers(queue, 0, index, flush_cnt); |
4270 | } | 4284 | } |
4271 | return 0; | 4285 | return 0; |
4286 | out: | ||
4287 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | ||
4288 | return -EBUSY; | ||
4272 | } | 4289 | } |
4273 | 4290 | ||
4274 | static inline int | 4291 | static inline int |
@@ -4294,8 +4311,7 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4294 | * check if buffer is empty to make sure that we do not 'overtake' | 4311 | * check if buffer is empty to make sure that we do not 'overtake' |
4295 | * ourselves and try to fill a buffer that is already primed | 4312 | * ourselves and try to fill a buffer that is already primed |
4296 | */ | 4313 | */ |
4297 | if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY){ | 4314 | if (atomic_read(&buffer->state) != QETH_QDIO_BUF_EMPTY) { |
4298 | card->stats.tx_dropped++; | ||
4299 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | 4315 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); |
4300 | return -EBUSY; | 4316 | return -EBUSY; |
4301 | } | 4317 | } |
@@ -4318,7 +4334,6 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4318 | * again */ | 4334 | * again */ |
4319 | if (atomic_read(&buffer->state) != | 4335 | if (atomic_read(&buffer->state) != |
4320 | QETH_QDIO_BUF_EMPTY){ | 4336 | QETH_QDIO_BUF_EMPTY){ |
4321 | card->stats.tx_dropped++; | ||
4322 | qeth_flush_buffers(queue, 0, start_index, flush_count); | 4337 | qeth_flush_buffers(queue, 0, start_index, flush_count); |
4323 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); | 4338 | atomic_set(&queue->state, QETH_OUT_Q_UNLOCKED); |
4324 | return -EBUSY; | 4339 | return -EBUSY; |
@@ -4329,7 +4344,6 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4329 | * free buffers) to handle eddp context */ | 4344 | * free buffers) to handle eddp context */ |
4330 | if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){ | 4345 | if (qeth_eddp_check_buffers_for_context(queue,ctx) < 0){ |
4331 | printk("eddp tx_dropped 1\n"); | 4346 | printk("eddp tx_dropped 1\n"); |
4332 | card->stats.tx_dropped++; | ||
4333 | rc = -EBUSY; | 4347 | rc = -EBUSY; |
4334 | goto out; | 4348 | goto out; |
4335 | } | 4349 | } |
@@ -4341,7 +4355,6 @@ qeth_do_send_packet(struct qeth_card *card, struct qeth_qdio_out_q *queue, | |||
4341 | tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill); | 4355 | tmp = qeth_eddp_fill_buffer(queue,ctx,queue->next_buf_to_fill); |
4342 | if (tmp < 0) { | 4356 | if (tmp < 0) { |
4343 | printk("eddp tx_dropped 2\n"); | 4357 | printk("eddp tx_dropped 2\n"); |
4344 | card->stats.tx_dropped++; | ||
4345 | rc = - EBUSY; | 4358 | rc = - EBUSY; |
4346 | goto out; | 4359 | goto out; |
4347 | } | 4360 | } |
@@ -4375,10 +4388,8 @@ out: | |||
4375 | qeth_flush_buffers(queue, 0, start_index, flush_count); | 4388 | qeth_flush_buffers(queue, 0, start_index, flush_count); |
4376 | } | 4389 | } |
4377 | /* at this point the queue is UNLOCKED again */ | 4390 | /* at this point the queue is UNLOCKED again */ |
4378 | #ifdef CONFIG_QETH_PERF_STATS | 4391 | if (queue->card->options.performance_stats && do_pack) |
4379 | if (do_pack) | ||
4380 | queue->card->perf_stats.bufs_sent_pack += flush_count; | 4392 | queue->card->perf_stats.bufs_sent_pack += flush_count; |
4381 | #endif /* CONFIG_QETH_PERF_STATS */ | ||
4382 | 4393 | ||
4383 | return rc; | 4394 | return rc; |
4384 | } | 4395 | } |
@@ -4389,21 +4400,21 @@ qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
4389 | { | 4400 | { |
4390 | int elements_needed = 0; | 4401 | int elements_needed = 0; |
4391 | 4402 | ||
4392 | if (skb_shinfo(skb)->nr_frags > 0) { | 4403 | if (skb_shinfo(skb)->nr_frags > 0) |
4393 | elements_needed = (skb_shinfo(skb)->nr_frags + 1); | 4404 | elements_needed = (skb_shinfo(skb)->nr_frags + 1); |
4394 | } | 4405 | if (elements_needed == 0) |
4395 | if (elements_needed == 0 ) | ||
4396 | elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) | 4406 | elements_needed = 1 + (((((unsigned long) hdr) % PAGE_SIZE) |
4397 | + skb->len) >> PAGE_SHIFT); | 4407 | + skb->len) >> PAGE_SHIFT); |
4398 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){ | 4408 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)){ |
4399 | PRINT_ERR("qeth_do_send_packet: invalid size of " | 4409 | PRINT_ERR("Invalid size of IP packet " |
4400 | "IP packet (Number=%d / Length=%d). Discarded.\n", | 4410 | "(Number=%d / Length=%d). Discarded.\n", |
4401 | (elements_needed+elems), skb->len); | 4411 | (elements_needed+elems), skb->len); |
4402 | return 0; | 4412 | return 0; |
4403 | } | 4413 | } |
4404 | return elements_needed; | 4414 | return elements_needed; |
4405 | } | 4415 | } |
4406 | 4416 | ||
4417 | |||
4407 | static inline int | 4418 | static inline int |
4408 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | 4419 | qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) |
4409 | { | 4420 | { |
@@ -4417,108 +4428,110 @@ qeth_send_packet(struct qeth_card *card, struct sk_buff *skb) | |||
4417 | int tx_bytes = skb->len; | 4428 | int tx_bytes = skb->len; |
4418 | unsigned short nr_frags = skb_shinfo(skb)->nr_frags; | 4429 | unsigned short nr_frags = skb_shinfo(skb)->nr_frags; |
4419 | unsigned short tso_size = skb_shinfo(skb)->gso_size; | 4430 | unsigned short tso_size = skb_shinfo(skb)->gso_size; |
4431 | struct sk_buff *new_skb, *new_skb2; | ||
4420 | int rc; | 4432 | int rc; |
4421 | 4433 | ||
4422 | QETH_DBF_TEXT(trace, 6, "sendpkt"); | 4434 | QETH_DBF_TEXT(trace, 6, "sendpkt"); |
4423 | 4435 | ||
4436 | new_skb = skb; | ||
4437 | if ((card->info.type == QETH_CARD_TYPE_OSN) && | ||
4438 | (skb->protocol == htons(ETH_P_IPV6))) | ||
4439 | return -EPERM; | ||
4440 | cast_type = qeth_get_cast_type(card, skb); | ||
4441 | if ((cast_type == RTN_BROADCAST) && | ||
4442 | (card->info.broadcast_capable == 0)) | ||
4443 | return -EPERM; | ||
4444 | queue = card->qdio.out_qs | ||
4445 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; | ||
4424 | if (!card->options.layer2) { | 4446 | if (!card->options.layer2) { |
4425 | ipv = qeth_get_ip_version(skb); | 4447 | ipv = qeth_get_ip_version(skb); |
4426 | if ((card->dev->hard_header == qeth_fake_header) && ipv) { | 4448 | if ((card->dev->hard_header == qeth_fake_header) && ipv) { |
4427 | if ((skb = qeth_pskb_unshare(skb,GFP_ATOMIC)) == NULL) { | 4449 | new_skb = qeth_pskb_unshare(skb, GFP_ATOMIC); |
4428 | card->stats.tx_dropped++; | 4450 | if (!new_skb) |
4429 | dev_kfree_skb_irq(skb); | 4451 | return -ENOMEM; |
4430 | return 0; | ||
4431 | } | ||
4432 | if(card->dev->type == ARPHRD_IEEE802_TR){ | 4452 | if(card->dev->type == ARPHRD_IEEE802_TR){ |
4433 | skb_pull(skb, QETH_FAKE_LL_LEN_TR); | 4453 | skb_pull(new_skb, QETH_FAKE_LL_LEN_TR); |
4434 | } else { | 4454 | } else { |
4435 | skb_pull(skb, QETH_FAKE_LL_LEN_ETH); | 4455 | skb_pull(new_skb, QETH_FAKE_LL_LEN_ETH); |
4436 | } | 4456 | } |
4437 | } | 4457 | } |
4438 | } | 4458 | } |
4439 | if ((card->info.type == QETH_CARD_TYPE_OSN) && | 4459 | if (skb_is_gso(skb)) |
4440 | (skb->protocol == htons(ETH_P_IPV6))) { | ||
4441 | dev_kfree_skb_any(skb); | ||
4442 | return 0; | ||
4443 | } | ||
4444 | cast_type = qeth_get_cast_type(card, skb); | ||
4445 | if ((cast_type == RTN_BROADCAST) && | ||
4446 | (card->info.broadcast_capable == 0)){ | ||
4447 | card->stats.tx_dropped++; | ||
4448 | card->stats.tx_errors++; | ||
4449 | dev_kfree_skb_any(skb); | ||
4450 | return NETDEV_TX_OK; | ||
4451 | } | ||
4452 | queue = card->qdio.out_qs | ||
4453 | [qeth_get_priority_queue(card, skb, ipv, cast_type)]; | ||
4454 | |||
4455 | if (skb_shinfo(skb)->gso_size) | ||
4456 | large_send = card->options.large_send; | 4460 | large_send = card->options.large_send; |
4457 | 4461 | /* check on OSN device*/ | |
4458 | /*are we able to do TSO ? If so ,prepare and send it from here */ | 4462 | if (card->info.type == QETH_CARD_TYPE_OSN) |
4463 | hdr = (struct qeth_hdr *)new_skb->data; | ||
4464 | /*are we able to do TSO ? */ | ||
4459 | if ((large_send == QETH_LARGE_SEND_TSO) && | 4465 | if ((large_send == QETH_LARGE_SEND_TSO) && |
4460 | (cast_type == RTN_UNSPEC)) { | 4466 | (cast_type == RTN_UNSPEC)) { |
4461 | rc = qeth_tso_prepare_packet(card, skb, ipv, cast_type); | 4467 | rc = qeth_tso_prepare_packet(card, new_skb, ipv, cast_type); |
4462 | if (rc) { | 4468 | if (rc) { |
4463 | card->stats.tx_dropped++; | 4469 | __qeth_free_new_skb(skb, new_skb); |
4464 | card->stats.tx_errors++; | 4470 | return rc; |
4465 | dev_kfree_skb_any(skb); | ||
4466 | return NETDEV_TX_OK; | ||
4467 | } | 4471 | } |
4468 | elements_needed++; | 4472 | elements_needed++; |
4469 | } else { | 4473 | } else if (card->info.type != QETH_CARD_TYPE_OSN) { |
4470 | if ((rc = qeth_prepare_skb(card, &skb, &hdr, ipv))) { | 4474 | new_skb2 = qeth_prepare_skb(card, new_skb, &hdr, ipv); |
4471 | QETH_DBF_TEXT_(trace, 4, "pskbe%d", rc); | 4475 | if (!new_skb2) { |
4472 | return rc; | 4476 | __qeth_free_new_skb(skb, new_skb); |
4477 | return -EINVAL; | ||
4473 | } | 4478 | } |
4474 | if (card->info.type != QETH_CARD_TYPE_OSN) | 4479 | if (new_skb != skb) |
4475 | qeth_fill_header(card, hdr, skb, ipv, cast_type); | 4480 | __qeth_free_new_skb(new_skb2, new_skb); |
4481 | new_skb = new_skb2; | ||
4482 | qeth_fill_header(card, hdr, new_skb, ipv, cast_type); | ||
4476 | } | 4483 | } |
4477 | |||
4478 | if (large_send == QETH_LARGE_SEND_EDDP) { | 4484 | if (large_send == QETH_LARGE_SEND_EDDP) { |
4479 | ctx = qeth_eddp_create_context(card, skb, hdr); | 4485 | ctx = qeth_eddp_create_context(card, new_skb, hdr); |
4480 | if (ctx == NULL) { | 4486 | if (ctx == NULL) { |
4487 | __qeth_free_new_skb(skb, new_skb); | ||
4481 | PRINT_WARN("could not create eddp context\n"); | 4488 | PRINT_WARN("could not create eddp context\n"); |
4482 | return -EINVAL; | 4489 | return -EINVAL; |
4483 | } | 4490 | } |
4484 | } else { | 4491 | } else { |
4485 | int elems = qeth_get_elements_no(card,(void*) hdr, skb, | 4492 | int elems = qeth_get_elements_no(card,(void*) hdr, new_skb, |
4486 | elements_needed); | 4493 | elements_needed); |
4487 | if (!elems) | 4494 | if (!elems) { |
4495 | __qeth_free_new_skb(skb, new_skb); | ||
4488 | return -EINVAL; | 4496 | return -EINVAL; |
4497 | } | ||
4489 | elements_needed += elems; | 4498 | elements_needed += elems; |
4490 | } | 4499 | } |
4491 | 4500 | ||
4492 | if (card->info.type != QETH_CARD_TYPE_IQD) | 4501 | if (card->info.type != QETH_CARD_TYPE_IQD) |
4493 | rc = qeth_do_send_packet(card, queue, skb, hdr, | 4502 | rc = qeth_do_send_packet(card, queue, new_skb, hdr, |
4494 | elements_needed, ctx); | 4503 | elements_needed, ctx); |
4495 | else | 4504 | else |
4496 | rc = qeth_do_send_packet_fast(card, queue, skb, hdr, | 4505 | rc = qeth_do_send_packet_fast(card, queue, new_skb, hdr, |
4497 | elements_needed, ctx); | 4506 | elements_needed, ctx); |
4498 | if (!rc){ | 4507 | if (!rc) { |
4499 | card->stats.tx_packets++; | 4508 | card->stats.tx_packets++; |
4500 | card->stats.tx_bytes += tx_bytes; | 4509 | card->stats.tx_bytes += tx_bytes; |
4501 | #ifdef CONFIG_QETH_PERF_STATS | 4510 | if (new_skb != skb) |
4502 | if (tso_size && | 4511 | dev_kfree_skb_any(skb); |
4503 | !(large_send == QETH_LARGE_SEND_NO)) { | 4512 | if (card->options.performance_stats) { |
4504 | card->perf_stats.large_send_bytes += tx_bytes; | 4513 | if (tso_size && |
4505 | card->perf_stats.large_send_cnt++; | 4514 | !(large_send == QETH_LARGE_SEND_NO)) { |
4506 | } | 4515 | card->perf_stats.large_send_bytes += tx_bytes; |
4507 | if (nr_frags > 0){ | 4516 | card->perf_stats.large_send_cnt++; |
4508 | card->perf_stats.sg_skbs_sent++; | 4517 | } |
4509 | /* nr_frags + skb->data */ | 4518 | if (nr_frags > 0) { |
4510 | card->perf_stats.sg_frags_sent += | 4519 | card->perf_stats.sg_skbs_sent++; |
4511 | nr_frags + 1; | 4520 | /* nr_frags + skb->data */ |
4521 | card->perf_stats.sg_frags_sent += | ||
4522 | nr_frags + 1; | ||
4523 | } | ||
4512 | } | 4524 | } |
4513 | #endif /* CONFIG_QETH_PERF_STATS */ | 4525 | } else { |
4526 | card->stats.tx_dropped++; | ||
4527 | __qeth_free_new_skb(skb, new_skb); | ||
4514 | } | 4528 | } |
4515 | if (ctx != NULL) { | 4529 | if (ctx != NULL) { |
4516 | /* drop creator's reference */ | 4530 | /* drop creator's reference */ |
4517 | qeth_eddp_put_context(ctx); | 4531 | qeth_eddp_put_context(ctx); |
4518 | /* free skb; it's not referenced by a buffer */ | 4532 | /* free skb; it's not referenced by a buffer */ |
4519 | if (rc == 0) | 4533 | if (!rc) |
4520 | dev_kfree_skb_any(skb); | 4534 | dev_kfree_skb_any(new_skb); |
4521 | |||
4522 | } | 4535 | } |
4523 | return rc; | 4536 | return rc; |
4524 | } | 4537 | } |
@@ -4797,7 +4810,7 @@ static struct qeth_cmd_buffer * | |||
4797 | qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs, | 4810 | qeth_get_setassparms_cmd(struct qeth_card *, enum qeth_ipa_funcs, |
4798 | __u16, __u16, enum qeth_prot_versions); | 4811 | __u16, __u16, enum qeth_prot_versions); |
4799 | static int | 4812 | static int |
4800 | qeth_arp_query(struct qeth_card *card, char *udata) | 4813 | qeth_arp_query(struct qeth_card *card, char __user *udata) |
4801 | { | 4814 | { |
4802 | struct qeth_cmd_buffer *iob; | 4815 | struct qeth_cmd_buffer *iob; |
4803 | struct qeth_arp_query_info qinfo = {0, }; | 4816 | struct qeth_arp_query_info qinfo = {0, }; |
@@ -4930,7 +4943,7 @@ qeth_get_adapter_cmd(struct qeth_card *card, __u32 command, __u32 cmdlen) | |||
4930 | * function to send SNMP commands to OSA-E card | 4943 | * function to send SNMP commands to OSA-E card |
4931 | */ | 4944 | */ |
4932 | static int | 4945 | static int |
4933 | qeth_snmp_command(struct qeth_card *card, char *udata) | 4946 | qeth_snmp_command(struct qeth_card *card, char __user *udata) |
4934 | { | 4947 | { |
4935 | struct qeth_cmd_buffer *iob; | 4948 | struct qeth_cmd_buffer *iob; |
4936 | struct qeth_ipa_cmd *cmd; | 4949 | struct qeth_ipa_cmd *cmd; |
@@ -5272,6 +5285,7 @@ qeth_free_vlan_buffer(struct qeth_card *card, struct qeth_qdio_out_buffer *buf, | |||
5272 | struct sk_buff_head tmp_list; | 5285 | struct sk_buff_head tmp_list; |
5273 | 5286 | ||
5274 | skb_queue_head_init(&tmp_list); | 5287 | skb_queue_head_init(&tmp_list); |
5288 | lockdep_set_class(&tmp_list.lock, &qdio_out_skb_queue_key); | ||
5275 | for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){ | 5289 | for(i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i){ |
5276 | while ((skb = skb_dequeue(&buf->skb_list))){ | 5290 | while ((skb = skb_dequeue(&buf->skb_list))){ |
5277 | if (vlan_tx_tag_present(skb) && | 5291 | if (vlan_tx_tag_present(skb) && |
@@ -7330,6 +7344,8 @@ qeth_setrouting_v6(struct qeth_card *card) | |||
7330 | QETH_DBF_TEXT(trace,3,"setrtg6"); | 7344 | QETH_DBF_TEXT(trace,3,"setrtg6"); |
7331 | #ifdef CONFIG_QETH_IPV6 | 7345 | #ifdef CONFIG_QETH_IPV6 |
7332 | 7346 | ||
7347 | if (!qeth_is_supported(card, IPA_IPV6)) | ||
7348 | return 0; | ||
7333 | qeth_correct_routing_type(card, &card->options.route6.type, | 7349 | qeth_correct_routing_type(card, &card->options.route6.type, |
7334 | QETH_PROT_IPV6); | 7350 | QETH_PROT_IPV6); |
7335 | 7351 | ||
@@ -7868,12 +7884,12 @@ __qeth_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
7868 | QETH_DBF_TEXT_(setup, 2, "5err%d", rc); | 7884 | QETH_DBF_TEXT_(setup, 2, "5err%d", rc); |
7869 | goto out_remove; | 7885 | goto out_remove; |
7870 | } | 7886 | } |
7871 | card->state = CARD_STATE_SOFTSETUP; | ||
7872 | 7887 | ||
7873 | if ((rc = qeth_init_qdio_queues(card))){ | 7888 | if ((rc = qeth_init_qdio_queues(card))){ |
7874 | QETH_DBF_TEXT_(setup, 2, "6err%d", rc); | 7889 | QETH_DBF_TEXT_(setup, 2, "6err%d", rc); |
7875 | goto out_remove; | 7890 | goto out_remove; |
7876 | } | 7891 | } |
7892 | card->state = CARD_STATE_SOFTSETUP; | ||
7877 | netif_carrier_on(card->dev); | 7893 | netif_carrier_on(card->dev); |
7878 | 7894 | ||
7879 | qeth_set_allowed_threads(card, 0xffffffff, 0); | 7895 | qeth_set_allowed_threads(card, 0xffffffff, 0); |
@@ -7901,9 +7917,9 @@ qeth_set_online(struct ccwgroup_device *gdev) | |||
7901 | } | 7917 | } |
7902 | 7918 | ||
7903 | static struct ccw_device_id qeth_ids[] = { | 7919 | static struct ccw_device_id qeth_ids[] = { |
7904 | {CCW_DEVICE(0x1731, 0x01), driver_info:QETH_CARD_TYPE_OSAE}, | 7920 | {CCW_DEVICE(0x1731, 0x01), .driver_info = QETH_CARD_TYPE_OSAE}, |
7905 | {CCW_DEVICE(0x1731, 0x05), driver_info:QETH_CARD_TYPE_IQD}, | 7921 | {CCW_DEVICE(0x1731, 0x05), .driver_info = QETH_CARD_TYPE_IQD}, |
7906 | {CCW_DEVICE(0x1731, 0x06), driver_info:QETH_CARD_TYPE_OSN}, | 7922 | {CCW_DEVICE(0x1731, 0x06), .driver_info = QETH_CARD_TYPE_OSN}, |
7907 | {}, | 7923 | {}, |
7908 | }; | 7924 | }; |
7909 | MODULE_DEVICE_TABLE(ccw, qeth_ids); | 7925 | MODULE_DEVICE_TABLE(ccw, qeth_ids); |
@@ -8372,7 +8388,7 @@ out: | |||
8372 | 8388 | ||
8373 | static struct notifier_block qeth_ip_notifier = { | 8389 | static struct notifier_block qeth_ip_notifier = { |
8374 | qeth_ip_event, | 8390 | qeth_ip_event, |
8375 | 0 | 8391 | NULL, |
8376 | }; | 8392 | }; |
8377 | 8393 | ||
8378 | #ifdef CONFIG_QETH_IPV6 | 8394 | #ifdef CONFIG_QETH_IPV6 |
@@ -8425,7 +8441,7 @@ out: | |||
8425 | 8441 | ||
8426 | static struct notifier_block qeth_ip6_notifier = { | 8442 | static struct notifier_block qeth_ip6_notifier = { |
8427 | qeth_ip6_event, | 8443 | qeth_ip6_event, |
8428 | 0 | 8444 | NULL, |
8429 | }; | 8445 | }; |
8430 | #endif | 8446 | #endif |
8431 | 8447 | ||
@@ -8443,16 +8459,17 @@ __qeth_reboot_event_card(struct device *dev, void *data) | |||
8443 | static int | 8459 | static int |
8444 | qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) | 8460 | qeth_reboot_event(struct notifier_block *this, unsigned long event, void *ptr) |
8445 | { | 8461 | { |
8462 | int ret; | ||
8446 | 8463 | ||
8447 | driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, | 8464 | ret = driver_for_each_device(&qeth_ccwgroup_driver.driver, NULL, NULL, |
8448 | __qeth_reboot_event_card); | 8465 | __qeth_reboot_event_card); |
8449 | return NOTIFY_DONE; | 8466 | return ret ? NOTIFY_BAD : NOTIFY_DONE; |
8450 | } | 8467 | } |
8451 | 8468 | ||
8452 | 8469 | ||
8453 | static struct notifier_block qeth_reboot_notifier = { | 8470 | static struct notifier_block qeth_reboot_notifier = { |
8454 | qeth_reboot_event, | 8471 | qeth_reboot_event, |
8455 | 0 | 8472 | NULL, |
8456 | }; | 8473 | }; |
8457 | 8474 | ||
8458 | static int | 8475 | static int |
@@ -8501,9 +8518,9 @@ static int | |||
8501 | qeth_ipv6_init(void) | 8518 | qeth_ipv6_init(void) |
8502 | { | 8519 | { |
8503 | qeth_old_arp_constructor = arp_tbl.constructor; | 8520 | qeth_old_arp_constructor = arp_tbl.constructor; |
8504 | write_lock(&arp_tbl.lock); | 8521 | write_lock_bh(&arp_tbl.lock); |
8505 | arp_tbl.constructor = qeth_arp_constructor; | 8522 | arp_tbl.constructor = qeth_arp_constructor; |
8506 | write_unlock(&arp_tbl.lock); | 8523 | write_unlock_bh(&arp_tbl.lock); |
8507 | 8524 | ||
8508 | arp_direct_ops = (struct neigh_ops*) | 8525 | arp_direct_ops = (struct neigh_ops*) |
8509 | kmalloc(sizeof(struct neigh_ops), GFP_KERNEL); | 8526 | kmalloc(sizeof(struct neigh_ops), GFP_KERNEL); |
@@ -8519,9 +8536,9 @@ qeth_ipv6_init(void) | |||
8519 | static void | 8536 | static void |
8520 | qeth_ipv6_uninit(void) | 8537 | qeth_ipv6_uninit(void) |
8521 | { | 8538 | { |
8522 | write_lock(&arp_tbl.lock); | 8539 | write_lock_bh(&arp_tbl.lock); |
8523 | arp_tbl.constructor = qeth_old_arp_constructor; | 8540 | arp_tbl.constructor = qeth_old_arp_constructor; |
8524 | write_unlock(&arp_tbl.lock); | 8541 | write_unlock_bh(&arp_tbl.lock); |
8525 | kfree(arp_direct_ops); | 8542 | kfree(arp_direct_ops); |
8526 | } | 8543 | } |
8527 | #endif /* CONFIG_QETH_IPV6 */ | 8544 | #endif /* CONFIG_QETH_IPV6 */ |
@@ -8529,34 +8546,44 @@ qeth_ipv6_uninit(void) | |||
8529 | static void | 8546 | static void |
8530 | qeth_sysfs_unregister(void) | 8547 | qeth_sysfs_unregister(void) |
8531 | { | 8548 | { |
8549 | s390_root_dev_unregister(qeth_root_dev); | ||
8532 | qeth_remove_driver_attributes(); | 8550 | qeth_remove_driver_attributes(); |
8533 | ccw_driver_unregister(&qeth_ccw_driver); | 8551 | ccw_driver_unregister(&qeth_ccw_driver); |
8534 | ccwgroup_driver_unregister(&qeth_ccwgroup_driver); | 8552 | ccwgroup_driver_unregister(&qeth_ccwgroup_driver); |
8535 | s390_root_dev_unregister(qeth_root_dev); | ||
8536 | } | 8553 | } |
8554 | |||
8537 | /** | 8555 | /** |
8538 | * register qeth at sysfs | 8556 | * register qeth at sysfs |
8539 | */ | 8557 | */ |
8540 | static int | 8558 | static int |
8541 | qeth_sysfs_register(void) | 8559 | qeth_sysfs_register(void) |
8542 | { | 8560 | { |
8543 | int rc=0; | 8561 | int rc; |
8544 | 8562 | ||
8545 | rc = ccwgroup_driver_register(&qeth_ccwgroup_driver); | 8563 | rc = ccwgroup_driver_register(&qeth_ccwgroup_driver); |
8546 | if (rc) | 8564 | if (rc) |
8547 | return rc; | 8565 | goto out; |
8566 | |||
8548 | rc = ccw_driver_register(&qeth_ccw_driver); | 8567 | rc = ccw_driver_register(&qeth_ccw_driver); |
8549 | if (rc) | 8568 | if (rc) |
8550 | return rc; | 8569 | goto out_ccw_driver; |
8570 | |||
8551 | rc = qeth_create_driver_attributes(); | 8571 | rc = qeth_create_driver_attributes(); |
8552 | if (rc) | 8572 | if (rc) |
8553 | return rc; | 8573 | goto out_qeth_attr; |
8574 | |||
8554 | qeth_root_dev = s390_root_dev_register("qeth"); | 8575 | qeth_root_dev = s390_root_dev_register("qeth"); |
8555 | if (IS_ERR(qeth_root_dev)) { | 8576 | rc = IS_ERR(qeth_root_dev) ? PTR_ERR(qeth_root_dev) : 0; |
8556 | rc = PTR_ERR(qeth_root_dev); | 8577 | if (!rc) |
8557 | return rc; | 8578 | goto out; |
8558 | } | 8579 | |
8559 | return 0; | 8580 | qeth_remove_driver_attributes(); |
8581 | out_qeth_attr: | ||
8582 | ccw_driver_unregister(&qeth_ccw_driver); | ||
8583 | out_ccw_driver: | ||
8584 | ccwgroup_driver_unregister(&qeth_ccwgroup_driver); | ||
8585 | out: | ||
8586 | return rc; | ||
8560 | } | 8587 | } |
8561 | 8588 | ||
8562 | /*** | 8589 | /*** |
@@ -8565,7 +8592,7 @@ qeth_sysfs_register(void) | |||
8565 | static int __init | 8592 | static int __init |
8566 | qeth_init(void) | 8593 | qeth_init(void) |
8567 | { | 8594 | { |
8568 | int rc=0; | 8595 | int rc; |
8569 | 8596 | ||
8570 | PRINT_INFO("loading %s\n", version); | 8597 | PRINT_INFO("loading %s\n", version); |
8571 | 8598 | ||
@@ -8574,20 +8601,26 @@ qeth_init(void) | |||
8574 | spin_lock_init(&qeth_notify_lock); | 8601 | spin_lock_init(&qeth_notify_lock); |
8575 | rwlock_init(&qeth_card_list.rwlock); | 8602 | rwlock_init(&qeth_card_list.rwlock); |
8576 | 8603 | ||
8577 | if (qeth_register_dbf_views()) | 8604 | rc = qeth_register_dbf_views(); |
8605 | if (rc) | ||
8578 | goto out_err; | 8606 | goto out_err; |
8579 | if (qeth_sysfs_register()) | 8607 | |
8580 | goto out_sysfs; | 8608 | rc = qeth_sysfs_register(); |
8609 | if (rc) | ||
8610 | goto out_dbf; | ||
8581 | 8611 | ||
8582 | #ifdef CONFIG_QETH_IPV6 | 8612 | #ifdef CONFIG_QETH_IPV6 |
8583 | if (qeth_ipv6_init()) { | 8613 | rc = qeth_ipv6_init(); |
8584 | PRINT_ERR("Out of memory during ipv6 init.\n"); | 8614 | if (rc) { |
8615 | PRINT_ERR("Out of memory during ipv6 init code = %d\n", rc); | ||
8585 | goto out_sysfs; | 8616 | goto out_sysfs; |
8586 | } | 8617 | } |
8587 | #endif /* QETH_IPV6 */ | 8618 | #endif /* QETH_IPV6 */ |
8588 | if (qeth_register_notifiers()) | 8619 | rc = qeth_register_notifiers(); |
8620 | if (rc) | ||
8589 | goto out_ipv6; | 8621 | goto out_ipv6; |
8590 | if (qeth_create_procfs_entries()) | 8622 | rc = qeth_create_procfs_entries(); |
8623 | if (rc) | ||
8591 | goto out_notifiers; | 8624 | goto out_notifiers; |
8592 | 8625 | ||
8593 | return rc; | 8626 | return rc; |
@@ -8597,12 +8630,13 @@ out_notifiers: | |||
8597 | out_ipv6: | 8630 | out_ipv6: |
8598 | #ifdef CONFIG_QETH_IPV6 | 8631 | #ifdef CONFIG_QETH_IPV6 |
8599 | qeth_ipv6_uninit(); | 8632 | qeth_ipv6_uninit(); |
8600 | #endif /* QETH_IPV6 */ | ||
8601 | out_sysfs: | 8633 | out_sysfs: |
8634 | #endif /* QETH_IPV6 */ | ||
8602 | qeth_sysfs_unregister(); | 8635 | qeth_sysfs_unregister(); |
8636 | out_dbf: | ||
8603 | qeth_unregister_dbf_views(); | 8637 | qeth_unregister_dbf_views(); |
8604 | out_err: | 8638 | out_err: |
8605 | PRINT_ERR("Initialization failed"); | 8639 | PRINT_ERR("Initialization failed with code %d\n", rc); |
8606 | return rc; | 8640 | return rc; |
8607 | } | 8641 | } |
8608 | 8642 | ||
diff --git a/drivers/s390/net/qeth_proc.c b/drivers/s390/net/qeth_proc.c index 66f2da14e6e3..faa768e59257 100644 --- a/drivers/s390/net/qeth_proc.c +++ b/drivers/s390/net/qeth_proc.c | |||
@@ -173,7 +173,6 @@ static struct file_operations qeth_procfile_fops = { | |||
173 | #define QETH_PERF_PROCFILE_NAME "qeth_perf" | 173 | #define QETH_PERF_PROCFILE_NAME "qeth_perf" |
174 | static struct proc_dir_entry *qeth_perf_procfile; | 174 | static struct proc_dir_entry *qeth_perf_procfile; |
175 | 175 | ||
176 | #ifdef CONFIG_QETH_PERF_STATS | ||
177 | static int | 176 | static int |
178 | qeth_perf_procfile_seq_show(struct seq_file *s, void *it) | 177 | qeth_perf_procfile_seq_show(struct seq_file *s, void *it) |
179 | { | 178 | { |
@@ -192,14 +191,21 @@ qeth_perf_procfile_seq_show(struct seq_file *s, void *it) | |||
192 | CARD_DDEV_ID(card), | 191 | CARD_DDEV_ID(card), |
193 | QETH_CARD_IFNAME(card) | 192 | QETH_CARD_IFNAME(card) |
194 | ); | 193 | ); |
194 | if (!card->options.performance_stats) | ||
195 | seq_printf(s, "Performance statistics are deactivated.\n"); | ||
195 | seq_printf(s, " Skb's/buffers received : %lu/%u\n" | 196 | seq_printf(s, " Skb's/buffers received : %lu/%u\n" |
196 | " Skb's/buffers sent : %lu/%u\n\n", | 197 | " Skb's/buffers sent : %lu/%u\n\n", |
197 | card->stats.rx_packets, card->perf_stats.bufs_rec, | 198 | card->stats.rx_packets - |
198 | card->stats.tx_packets, card->perf_stats.bufs_sent | 199 | card->perf_stats.initial_rx_packets, |
200 | card->perf_stats.bufs_rec, | ||
201 | card->stats.tx_packets - | ||
202 | card->perf_stats.initial_tx_packets, | ||
203 | card->perf_stats.bufs_sent | ||
199 | ); | 204 | ); |
200 | seq_printf(s, " Skb's/buffers sent without packing : %lu/%u\n" | 205 | seq_printf(s, " Skb's/buffers sent without packing : %lu/%u\n" |
201 | " Skb's/buffers sent with packing : %u/%u\n\n", | 206 | " Skb's/buffers sent with packing : %u/%u\n\n", |
202 | card->stats.tx_packets - card->perf_stats.skbs_sent_pack, | 207 | card->stats.tx_packets - card->perf_stats.initial_tx_packets |
208 | - card->perf_stats.skbs_sent_pack, | ||
203 | card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack, | 209 | card->perf_stats.bufs_sent - card->perf_stats.bufs_sent_pack, |
204 | card->perf_stats.skbs_sent_pack, | 210 | card->perf_stats.skbs_sent_pack, |
205 | card->perf_stats.bufs_sent_pack | 211 | card->perf_stats.bufs_sent_pack |
@@ -275,11 +281,6 @@ static struct file_operations qeth_perf_procfile_fops = { | |||
275 | .release = seq_release, | 281 | .release = seq_release, |
276 | }; | 282 | }; |
277 | 283 | ||
278 | #define qeth_perf_procfile_created qeth_perf_procfile | ||
279 | #else | ||
280 | #define qeth_perf_procfile_created 1 | ||
281 | #endif /* CONFIG_QETH_PERF_STATS */ | ||
282 | |||
283 | int __init | 284 | int __init |
284 | qeth_create_procfs_entries(void) | 285 | qeth_create_procfs_entries(void) |
285 | { | 286 | { |
@@ -288,15 +289,13 @@ qeth_create_procfs_entries(void) | |||
288 | if (qeth_procfile) | 289 | if (qeth_procfile) |
289 | qeth_procfile->proc_fops = &qeth_procfile_fops; | 290 | qeth_procfile->proc_fops = &qeth_procfile_fops; |
290 | 291 | ||
291 | #ifdef CONFIG_QETH_PERF_STATS | ||
292 | qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME, | 292 | qeth_perf_procfile = create_proc_entry(QETH_PERF_PROCFILE_NAME, |
293 | S_IFREG | 0444, NULL); | 293 | S_IFREG | 0444, NULL); |
294 | if (qeth_perf_procfile) | 294 | if (qeth_perf_procfile) |
295 | qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops; | 295 | qeth_perf_procfile->proc_fops = &qeth_perf_procfile_fops; |
296 | #endif /* CONFIG_QETH_PERF_STATS */ | ||
297 | 296 | ||
298 | if (qeth_procfile && | 297 | if (qeth_procfile && |
299 | qeth_perf_procfile_created) | 298 | qeth_perf_procfile) |
300 | return 0; | 299 | return 0; |
301 | else | 300 | else |
302 | return -ENOMEM; | 301 | return -ENOMEM; |
diff --git a/drivers/s390/net/qeth_sys.c b/drivers/s390/net/qeth_sys.c index 185a9cfbcbdc..5836737ac58f 100644 --- a/drivers/s390/net/qeth_sys.c +++ b/drivers/s390/net/qeth_sys.c | |||
@@ -743,6 +743,47 @@ static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, | |||
743 | qeth_dev_layer2_store); | 743 | qeth_dev_layer2_store); |
744 | 744 | ||
745 | static ssize_t | 745 | static ssize_t |
746 | qeth_dev_performance_stats_show(struct device *dev, struct device_attribute *attr, char *buf) | ||
747 | { | ||
748 | struct qeth_card *card = dev->driver_data; | ||
749 | |||
750 | if (!card) | ||
751 | return -EINVAL; | ||
752 | |||
753 | return sprintf(buf, "%i\n", card->options.performance_stats ? 1:0); | ||
754 | } | ||
755 | |||
756 | static ssize_t | ||
757 | qeth_dev_performance_stats_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) | ||
758 | { | ||
759 | struct qeth_card *card = dev->driver_data; | ||
760 | char *tmp; | ||
761 | int i; | ||
762 | |||
763 | if (!card) | ||
764 | return -EINVAL; | ||
765 | |||
766 | i = simple_strtoul(buf, &tmp, 16); | ||
767 | if ((i == 0) || (i == 1)) { | ||
768 | if (i == card->options.performance_stats) | ||
769 | return count; | ||
770 | card->options.performance_stats = i; | ||
771 | if (i == 0) | ||
772 | memset(&card->perf_stats, 0, | ||
773 | sizeof(struct qeth_perf_stats)); | ||
774 | card->perf_stats.initial_rx_packets = card->stats.rx_packets; | ||
775 | card->perf_stats.initial_tx_packets = card->stats.tx_packets; | ||
776 | } else { | ||
777 | PRINT_WARN("performance_stats: write 0 or 1 to this file!\n"); | ||
778 | return -EINVAL; | ||
779 | } | ||
780 | return count; | ||
781 | } | ||
782 | |||
783 | static DEVICE_ATTR(performance_stats, 0644, qeth_dev_performance_stats_show, | ||
784 | qeth_dev_performance_stats_store); | ||
785 | |||
786 | static ssize_t | ||
746 | qeth_dev_large_send_show(struct device *dev, struct device_attribute *attr, char *buf) | 787 | qeth_dev_large_send_show(struct device *dev, struct device_attribute *attr, char *buf) |
747 | { | 788 | { |
748 | struct qeth_card *card = dev->driver_data; | 789 | struct qeth_card *card = dev->driver_data; |
@@ -928,6 +969,7 @@ static struct device_attribute * qeth_device_attrs[] = { | |||
928 | &dev_attr_canonical_macaddr, | 969 | &dev_attr_canonical_macaddr, |
929 | &dev_attr_layer2, | 970 | &dev_attr_layer2, |
930 | &dev_attr_large_send, | 971 | &dev_attr_large_send, |
972 | &dev_attr_performance_stats, | ||
931 | NULL, | 973 | NULL, |
932 | }; | 974 | }; |
933 | 975 | ||
@@ -1110,12 +1152,12 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, | |||
1110 | { | 1152 | { |
1111 | const char *start, *end; | 1153 | const char *start, *end; |
1112 | char *tmp; | 1154 | char *tmp; |
1113 | char buffer[49] = {0, }; | 1155 | char buffer[40] = {0, }; |
1114 | 1156 | ||
1115 | start = buf; | 1157 | start = buf; |
1116 | /* get address string */ | 1158 | /* get address string */ |
1117 | end = strchr(start, '/'); | 1159 | end = strchr(start, '/'); |
1118 | if (!end || (end-start >= 49)){ | 1160 | if (!end || (end - start >= 40)){ |
1119 | PRINT_WARN("Invalid format for ipato_addx/delx. " | 1161 | PRINT_WARN("Invalid format for ipato_addx/delx. " |
1120 | "Use <ip addr>/<mask bits>\n"); | 1162 | "Use <ip addr>/<mask bits>\n"); |
1121 | return -EINVAL; | 1163 | return -EINVAL; |
@@ -1127,7 +1169,12 @@ qeth_parse_ipatoe(const char* buf, enum qeth_prot_versions proto, | |||
1127 | } | 1169 | } |
1128 | start = end + 1; | 1170 | start = end + 1; |
1129 | *mask_bits = simple_strtoul(start, &tmp, 10); | 1171 | *mask_bits = simple_strtoul(start, &tmp, 10); |
1130 | 1172 | if (!strlen(start) || | |
1173 | (tmp == start) || | ||
1174 | (*mask_bits > ((proto == QETH_PROT_IPV4) ? 32 : 128))) { | ||
1175 | PRINT_WARN("Invalid mask bits for ipato_addx/delx !\n"); | ||
1176 | return -EINVAL; | ||
1177 | } | ||
1131 | return 0; | 1178 | return 0; |
1132 | } | 1179 | } |
1133 | 1180 | ||
@@ -1698,11 +1745,16 @@ qeth_create_device_attributes(struct device *dev) | |||
1698 | sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); | 1745 | sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); |
1699 | sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); | 1746 | sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); |
1700 | sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); | 1747 | sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); |
1748 | return ret; | ||
1701 | } | 1749 | } |
1702 | if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group))) | 1750 | if ((ret = sysfs_create_group(&dev->kobj, &qeth_device_blkt_group))){ |
1751 | sysfs_remove_group(&dev->kobj, &qeth_device_attr_group); | ||
1752 | sysfs_remove_group(&dev->kobj, &qeth_device_ipato_group); | ||
1753 | sysfs_remove_group(&dev->kobj, &qeth_device_vipa_group); | ||
1754 | sysfs_remove_group(&dev->kobj, &qeth_device_rxip_group); | ||
1703 | return ret; | 1755 | return ret; |
1704 | 1756 | } | |
1705 | return ret; | 1757 | return 0; |
1706 | } | 1758 | } |
1707 | 1759 | ||
1708 | void | 1760 | void |
@@ -1755,7 +1807,7 @@ qeth_driver_group_store(struct device_driver *ddrv, const char *buf, | |||
1755 | } | 1807 | } |
1756 | 1808 | ||
1757 | 1809 | ||
1758 | static DRIVER_ATTR(group, 0200, 0, qeth_driver_group_store); | 1810 | static DRIVER_ATTR(group, 0200, NULL, qeth_driver_group_store); |
1759 | 1811 | ||
1760 | static ssize_t | 1812 | static ssize_t |
1761 | qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf, | 1813 | qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf, |
@@ -1783,7 +1835,7 @@ qeth_driver_notifier_register_store(struct device_driver *ddrv, const char *buf, | |||
1783 | return count; | 1835 | return count; |
1784 | } | 1836 | } |
1785 | 1837 | ||
1786 | static DRIVER_ATTR(notifier_register, 0200, 0, | 1838 | static DRIVER_ATTR(notifier_register, 0200, NULL, |
1787 | qeth_driver_notifier_register_store); | 1839 | qeth_driver_notifier_register_store); |
1788 | 1840 | ||
1789 | int | 1841 | int |
diff --git a/drivers/s390/net/qeth_tso.h b/drivers/s390/net/qeth_tso.h index 593f298142c1..14504afb044e 100644 --- a/drivers/s390/net/qeth_tso.h +++ b/drivers/s390/net/qeth_tso.h | |||
@@ -24,7 +24,7 @@ static inline struct qeth_hdr_tso * | |||
24 | qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb) | 24 | qeth_tso_prepare_skb(struct qeth_card *card, struct sk_buff **skb) |
25 | { | 25 | { |
26 | QETH_DBF_TEXT(trace, 5, "tsoprsk"); | 26 | QETH_DBF_TEXT(trace, 5, "tsoprsk"); |
27 | return qeth_push_skb(card, skb, sizeof(struct qeth_hdr_tso)); | 27 | return qeth_push_skb(card, *skb, sizeof(struct qeth_hdr_tso)); |
28 | } | 28 | } |
29 | 29 | ||
30 | /** | 30 | /** |
diff --git a/drivers/s390/net/smsgiucv.c b/drivers/s390/net/smsgiucv.c index 72118ee68954..b8179c27ceb6 100644 --- a/drivers/s390/net/smsgiucv.c +++ b/drivers/s390/net/smsgiucv.c | |||
@@ -66,7 +66,7 @@ smsg_message_pending(iucv_MessagePending *eib, void *pgm_data) | |||
66 | return; | 66 | return; |
67 | } | 67 | } |
68 | rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, | 68 | rc = iucv_receive(eib->ippathid, eib->ipmsgid, eib->iptrgcls, |
69 | msg, len, 0, 0, 0); | 69 | msg, len, NULL, NULL, NULL); |
70 | if (rc == 0) { | 70 | if (rc == 0) { |
71 | msg[len] = 0; | 71 | msg[len] = 0; |
72 | EBCASC(msg, len); | 72 | EBCASC(msg, len); |
@@ -122,7 +122,7 @@ smsg_unregister_callback(char *prefix, void (*callback)(char *from, char *str)) | |||
122 | struct smsg_callback *cb, *tmp; | 122 | struct smsg_callback *cb, *tmp; |
123 | 123 | ||
124 | spin_lock(&smsg_list_lock); | 124 | spin_lock(&smsg_list_lock); |
125 | cb = 0; | 125 | cb = NULL; |
126 | list_for_each_entry(tmp, &smsg_list, list) | 126 | list_for_each_entry(tmp, &smsg_list, list) |
127 | if (tmp->callback == callback && | 127 | if (tmp->callback == callback && |
128 | strcmp(tmp->prefix, prefix) == 0) { | 128 | strcmp(tmp->prefix, prefix) == 0) { |
@@ -139,7 +139,7 @@ smsg_exit(void) | |||
139 | { | 139 | { |
140 | if (smsg_handle > 0) { | 140 | if (smsg_handle > 0) { |
141 | cpcmd("SET SMSG OFF", NULL, 0, NULL); | 141 | cpcmd("SET SMSG OFF", NULL, 0, NULL); |
142 | iucv_sever(smsg_pathid, 0); | 142 | iucv_sever(smsg_pathid, NULL); |
143 | iucv_unregister_program(smsg_handle); | 143 | iucv_unregister_program(smsg_handle); |
144 | driver_unregister(&smsg_driver); | 144 | driver_unregister(&smsg_driver); |
145 | } | 145 | } |
@@ -162,19 +162,19 @@ smsg_init(void) | |||
162 | return rc; | 162 | return rc; |
163 | } | 163 | } |
164 | smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ", | 164 | smsg_handle = iucv_register_program("SMSGIUCV ", "*MSG ", |
165 | pgmmask, &smsg_ops, 0); | 165 | pgmmask, &smsg_ops, NULL); |
166 | if (!smsg_handle) { | 166 | if (!smsg_handle) { |
167 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); | 167 | printk(KERN_ERR "SMSGIUCV: failed to register to iucv"); |
168 | driver_unregister(&smsg_driver); | 168 | driver_unregister(&smsg_driver); |
169 | return -EIO; /* better errno ? */ | 169 | return -EIO; /* better errno ? */ |
170 | } | 170 | } |
171 | rc = iucv_connect (&smsg_pathid, 255, 0, "*MSG ", 0, 0, 0, 0, | 171 | rc = iucv_connect (&smsg_pathid, 255, NULL, "*MSG ", NULL, 0, |
172 | smsg_handle, 0); | 172 | NULL, NULL, smsg_handle, NULL); |
173 | if (rc) { | 173 | if (rc) { |
174 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); | 174 | printk(KERN_ERR "SMSGIUCV: failed to connect to *MSG"); |
175 | iucv_unregister_program(smsg_handle); | 175 | iucv_unregister_program(smsg_handle); |
176 | driver_unregister(&smsg_driver); | 176 | driver_unregister(&smsg_driver); |
177 | smsg_handle = 0; | 177 | smsg_handle = NULL; |
178 | return -EIO; | 178 | return -EIO; |
179 | } | 179 | } |
180 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); | 180 | cpcmd("SET SMSG IUCV", NULL, 0, NULL); |
diff --git a/drivers/s390/s390mach.c b/drivers/s390/s390mach.c index 432136f96e64..5399c5d99b81 100644 --- a/drivers/s390/s390mach.c +++ b/drivers/s390/s390mach.c | |||
@@ -111,6 +111,16 @@ repeat: | |||
111 | break; | 111 | break; |
112 | case CRW_RSC_CPATH: | 112 | case CRW_RSC_CPATH: |
113 | pr_debug("source is channel path %02X\n", crw[0].rsid); | 113 | pr_debug("source is channel path %02X\n", crw[0].rsid); |
114 | /* | ||
115 | * Check for solicited machine checks. These are | ||
116 | * created by reset channel path and need not be | ||
117 | * reported to the common I/O layer. | ||
118 | */ | ||
119 | if (crw[chain].slct) { | ||
120 | DBG(KERN_INFO"solicited machine check for " | ||
121 | "channel path %02X\n", crw[0].rsid); | ||
122 | break; | ||
123 | } | ||
114 | switch (crw[0].erc) { | 124 | switch (crw[0].erc) { |
115 | case CRW_ERC_IPARM: /* Path has come. */ | 125 | case CRW_ERC_IPARM: /* Path has come. */ |
116 | ret = chp_process_crw(crw[0].rsid, 1); | 126 | ret = chp_process_crw(crw[0].rsid, 1); |
@@ -378,6 +388,8 @@ s390_do_machine_check(struct pt_regs *regs) | |||
378 | struct mcck_struct *mcck; | 388 | struct mcck_struct *mcck; |
379 | int umode; | 389 | int umode; |
380 | 390 | ||
391 | lockdep_off(); | ||
392 | |||
381 | mci = (struct mci *) &S390_lowcore.mcck_interruption_code; | 393 | mci = (struct mci *) &S390_lowcore.mcck_interruption_code; |
382 | mcck = &__get_cpu_var(cpu_mcck); | 394 | mcck = &__get_cpu_var(cpu_mcck); |
383 | umode = user_mode(regs); | 395 | umode = user_mode(regs); |
@@ -482,6 +494,7 @@ s390_do_machine_check(struct pt_regs *regs) | |||
482 | mcck->warning = 1; | 494 | mcck->warning = 1; |
483 | set_thread_flag(TIF_MCCK_PENDING); | 495 | set_thread_flag(TIF_MCCK_PENDING); |
484 | } | 496 | } |
497 | lockdep_on(); | ||
485 | } | 498 | } |
486 | 499 | ||
487 | /* | 500 | /* |
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 9cd789b8acd4..adc9d8f2c28f 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c | |||
@@ -112,6 +112,105 @@ _zfcp_hex_dump(char *addr, int count) | |||
112 | printk("\n"); | 112 | printk("\n"); |
113 | } | 113 | } |
114 | 114 | ||
115 | |||
116 | /****************************************************************/ | ||
117 | /****** Functions to handle the request ID hash table ********/ | ||
118 | /****************************************************************/ | ||
119 | |||
120 | #define ZFCP_LOG_AREA ZFCP_LOG_AREA_FSF | ||
121 | |||
122 | static int zfcp_reqlist_init(struct zfcp_adapter *adapter) | ||
123 | { | ||
124 | int i; | ||
125 | |||
126 | adapter->req_list = kcalloc(REQUEST_LIST_SIZE, sizeof(struct list_head), | ||
127 | GFP_KERNEL); | ||
128 | |||
129 | if (!adapter->req_list) | ||
130 | return -ENOMEM; | ||
131 | |||
132 | for (i=0; i<REQUEST_LIST_SIZE; i++) | ||
133 | INIT_LIST_HEAD(&adapter->req_list[i]); | ||
134 | |||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static void zfcp_reqlist_free(struct zfcp_adapter *adapter) | ||
139 | { | ||
140 | struct zfcp_fsf_req *request, *tmp; | ||
141 | unsigned int i; | ||
142 | |||
143 | for (i=0; i<REQUEST_LIST_SIZE; i++) { | ||
144 | if (list_empty(&adapter->req_list[i])) | ||
145 | continue; | ||
146 | |||
147 | list_for_each_entry_safe(request, tmp, | ||
148 | &adapter->req_list[i], list) | ||
149 | list_del(&request->list); | ||
150 | } | ||
151 | |||
152 | kfree(adapter->req_list); | ||
153 | } | ||
154 | |||
155 | void zfcp_reqlist_add(struct zfcp_adapter *adapter, | ||
156 | struct zfcp_fsf_req *fsf_req) | ||
157 | { | ||
158 | unsigned int i; | ||
159 | |||
160 | i = fsf_req->req_id % REQUEST_LIST_SIZE; | ||
161 | list_add_tail(&fsf_req->list, &adapter->req_list[i]); | ||
162 | } | ||
163 | |||
164 | void zfcp_reqlist_remove(struct zfcp_adapter *adapter, unsigned long req_id) | ||
165 | { | ||
166 | struct zfcp_fsf_req *request, *tmp; | ||
167 | unsigned int i, counter; | ||
168 | u64 dbg_tmp[2]; | ||
169 | |||
170 | i = req_id % REQUEST_LIST_SIZE; | ||
171 | BUG_ON(list_empty(&adapter->req_list[i])); | ||
172 | |||
173 | counter = 0; | ||
174 | list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) { | ||
175 | if (request->req_id == req_id) { | ||
176 | dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); | ||
177 | dbg_tmp[1] = (u64) counter; | ||
178 | debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); | ||
179 | list_del(&request->list); | ||
180 | break; | ||
181 | } | ||
182 | counter++; | ||
183 | } | ||
184 | } | ||
185 | |||
186 | struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *adapter, | ||
187 | unsigned long req_id) | ||
188 | { | ||
189 | struct zfcp_fsf_req *request, *tmp; | ||
190 | unsigned int i; | ||
191 | |||
192 | i = req_id % REQUEST_LIST_SIZE; | ||
193 | |||
194 | list_for_each_entry_safe(request, tmp, &adapter->req_list[i], list) | ||
195 | if (request->req_id == req_id) | ||
196 | return request; | ||
197 | |||
198 | return NULL; | ||
199 | } | ||
200 | |||
201 | int zfcp_reqlist_isempty(struct zfcp_adapter *adapter) | ||
202 | { | ||
203 | unsigned int i; | ||
204 | |||
205 | for (i=0; i<REQUEST_LIST_SIZE; i++) | ||
206 | if (!list_empty(&adapter->req_list[i])) | ||
207 | return 0; | ||
208 | |||
209 | return 1; | ||
210 | } | ||
211 | |||
212 | #undef ZFCP_LOG_AREA | ||
213 | |||
115 | /****************************************************************/ | 214 | /****************************************************************/ |
116 | /************** Uncategorised Functions *************************/ | 215 | /************** Uncategorised Functions *************************/ |
117 | /****************************************************************/ | 216 | /****************************************************************/ |
@@ -961,8 +1060,12 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
961 | INIT_LIST_HEAD(&adapter->port_remove_lh); | 1060 | INIT_LIST_HEAD(&adapter->port_remove_lh); |
962 | 1061 | ||
963 | /* initialize list of fsf requests */ | 1062 | /* initialize list of fsf requests */ |
964 | spin_lock_init(&adapter->fsf_req_list_lock); | 1063 | spin_lock_init(&adapter->req_list_lock); |
965 | INIT_LIST_HEAD(&adapter->fsf_req_list_head); | 1064 | retval = zfcp_reqlist_init(adapter); |
1065 | if (retval) { | ||
1066 | ZFCP_LOG_INFO("request list initialization failed\n"); | ||
1067 | goto failed_low_mem_buffers; | ||
1068 | } | ||
966 | 1069 | ||
967 | /* initialize debug locks */ | 1070 | /* initialize debug locks */ |
968 | 1071 | ||
@@ -1041,8 +1144,6 @@ zfcp_adapter_enqueue(struct ccw_device *ccw_device) | |||
1041 | * !0 - struct zfcp_adapter data structure could not be removed | 1144 | * !0 - struct zfcp_adapter data structure could not be removed |
1042 | * (e.g. still used) | 1145 | * (e.g. still used) |
1043 | * locks: adapter list write lock is assumed to be held by caller | 1146 | * locks: adapter list write lock is assumed to be held by caller |
1044 | * adapter->fsf_req_list_lock is taken and released within this | ||
1045 | * function and must not be held on entry | ||
1046 | */ | 1147 | */ |
1047 | void | 1148 | void |
1048 | zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | 1149 | zfcp_adapter_dequeue(struct zfcp_adapter *adapter) |
@@ -1054,14 +1155,14 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
1054 | zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); | 1155 | zfcp_sysfs_adapter_remove_files(&adapter->ccw_device->dev); |
1055 | dev_set_drvdata(&adapter->ccw_device->dev, NULL); | 1156 | dev_set_drvdata(&adapter->ccw_device->dev, NULL); |
1056 | /* sanity check: no pending FSF requests */ | 1157 | /* sanity check: no pending FSF requests */ |
1057 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | 1158 | spin_lock_irqsave(&adapter->req_list_lock, flags); |
1058 | retval = !list_empty(&adapter->fsf_req_list_head); | 1159 | retval = zfcp_reqlist_isempty(adapter); |
1059 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | 1160 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); |
1060 | if (retval) { | 1161 | if (!retval) { |
1061 | ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " | 1162 | ZFCP_LOG_NORMAL("bug: adapter %s (%p) still in use, " |
1062 | "%i requests outstanding\n", | 1163 | "%i requests outstanding\n", |
1063 | zfcp_get_busid_by_adapter(adapter), adapter, | 1164 | zfcp_get_busid_by_adapter(adapter), adapter, |
1064 | atomic_read(&adapter->fsf_reqs_active)); | 1165 | atomic_read(&adapter->reqs_active)); |
1065 | retval = -EBUSY; | 1166 | retval = -EBUSY; |
1066 | goto out; | 1167 | goto out; |
1067 | } | 1168 | } |
@@ -1087,6 +1188,7 @@ zfcp_adapter_dequeue(struct zfcp_adapter *adapter) | |||
1087 | zfcp_free_low_mem_buffers(adapter); | 1188 | zfcp_free_low_mem_buffers(adapter); |
1088 | /* free memory of adapter data structure and queues */ | 1189 | /* free memory of adapter data structure and queues */ |
1089 | zfcp_qdio_free_queues(adapter); | 1190 | zfcp_qdio_free_queues(adapter); |
1191 | zfcp_reqlist_free(adapter); | ||
1090 | kfree(adapter->fc_stats); | 1192 | kfree(adapter->fc_stats); |
1091 | kfree(adapter->stats_reset_data); | 1193 | kfree(adapter->stats_reset_data); |
1092 | ZFCP_LOG_TRACE("freeing adapter structure\n"); | 1194 | ZFCP_LOG_TRACE("freeing adapter structure\n"); |
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c index 57d8e4bfb8d9..fdabadeaa9ee 100644 --- a/drivers/s390/scsi/zfcp_ccw.c +++ b/drivers/s390/scsi/zfcp_ccw.c | |||
@@ -164,6 +164,11 @@ zfcp_ccw_set_online(struct ccw_device *ccw_device) | |||
164 | retval = zfcp_adapter_scsi_register(adapter); | 164 | retval = zfcp_adapter_scsi_register(adapter); |
165 | if (retval) | 165 | if (retval) |
166 | goto out_scsi_register; | 166 | goto out_scsi_register; |
167 | |||
168 | /* initialize request counter */ | ||
169 | BUG_ON(!zfcp_reqlist_isempty(adapter)); | ||
170 | adapter->req_no = 0; | ||
171 | |||
167 | zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, | 172 | zfcp_erp_modify_adapter_status(adapter, ZFCP_STATUS_COMMON_RUNNING, |
168 | ZFCP_SET); | 173 | ZFCP_SET); |
169 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); | 174 | zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED); |
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h index 2df512a18e2c..94d1b74db356 100644 --- a/drivers/s390/scsi/zfcp_def.h +++ b/drivers/s390/scsi/zfcp_def.h | |||
@@ -52,7 +52,7 @@ | |||
52 | /********************* GENERAL DEFINES *********************************/ | 52 | /********************* GENERAL DEFINES *********************************/ |
53 | 53 | ||
54 | /* zfcp version number, it consists of major, minor, and patch-level number */ | 54 | /* zfcp version number, it consists of major, minor, and patch-level number */ |
55 | #define ZFCP_VERSION "4.7.0" | 55 | #define ZFCP_VERSION "4.8.0" |
56 | 56 | ||
57 | /** | 57 | /** |
58 | * zfcp_sg_to_address - determine kernel address from struct scatterlist | 58 | * zfcp_sg_to_address - determine kernel address from struct scatterlist |
@@ -80,7 +80,7 @@ zfcp_address_to_sg(void *address, struct scatterlist *list) | |||
80 | #define REQUEST_LIST_SIZE 128 | 80 | #define REQUEST_LIST_SIZE 128 |
81 | 81 | ||
82 | /********************* SCSI SPECIFIC DEFINES *********************************/ | 82 | /********************* SCSI SPECIFIC DEFINES *********************************/ |
83 | #define ZFCP_SCSI_ER_TIMEOUT (100*HZ) | 83 | #define ZFCP_SCSI_ER_TIMEOUT (10*HZ) |
84 | 84 | ||
85 | /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ | 85 | /********************* CIO/QDIO SPECIFIC DEFINES *****************************/ |
86 | 86 | ||
@@ -886,11 +886,11 @@ struct zfcp_adapter { | |||
886 | struct list_head port_remove_lh; /* head of ports to be | 886 | struct list_head port_remove_lh; /* head of ports to be |
887 | removed */ | 887 | removed */ |
888 | u32 ports; /* number of remote ports */ | 888 | u32 ports; /* number of remote ports */ |
889 | struct timer_list scsi_er_timer; /* SCSI err recovery watch */ | 889 | struct timer_list scsi_er_timer; /* SCSI err recovery watch */ |
890 | struct list_head fsf_req_list_head; /* head of FSF req list */ | 890 | atomic_t reqs_active; /* # active FSF reqs */ |
891 | spinlock_t fsf_req_list_lock; /* lock for ops on list of | 891 | unsigned long req_no; /* unique FSF req number */ |
892 | FSF requests */ | 892 | struct list_head *req_list; /* list of pending reqs */ |
893 | atomic_t fsf_reqs_active; /* # active FSF reqs */ | 893 | spinlock_t req_list_lock; /* request list lock */ |
894 | struct zfcp_qdio_queue request_queue; /* request queue */ | 894 | struct zfcp_qdio_queue request_queue; /* request queue */ |
895 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ | 895 | u32 fsf_req_seq_no; /* FSF cmnd seq number */ |
896 | wait_queue_head_t request_wq; /* can be used to wait for | 896 | wait_queue_head_t request_wq; /* can be used to wait for |
@@ -986,6 +986,7 @@ struct zfcp_unit { | |||
986 | /* FSF request */ | 986 | /* FSF request */ |
987 | struct zfcp_fsf_req { | 987 | struct zfcp_fsf_req { |
988 | struct list_head list; /* list of FSF requests */ | 988 | struct list_head list; /* list of FSF requests */ |
989 | unsigned long req_id; /* unique request ID */ | ||
989 | struct zfcp_adapter *adapter; /* adapter request belongs to */ | 990 | struct zfcp_adapter *adapter; /* adapter request belongs to */ |
990 | u8 sbal_number; /* nr of SBALs free for use */ | 991 | u8 sbal_number; /* nr of SBALs free for use */ |
991 | u8 sbal_first; /* first SBAL for this request */ | 992 | u8 sbal_first; /* first SBAL for this request */ |
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index 909731b99d26..7f60b6fdf724 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c | |||
@@ -64,8 +64,8 @@ static int zfcp_erp_strategy_check_action(struct zfcp_erp_action *, int); | |||
64 | static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); | 64 | static int zfcp_erp_adapter_strategy(struct zfcp_erp_action *); |
65 | static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); | 65 | static int zfcp_erp_adapter_strategy_generic(struct zfcp_erp_action *, int); |
66 | static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); | 66 | static int zfcp_erp_adapter_strategy_close(struct zfcp_erp_action *); |
67 | static int zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); | 67 | static void zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *); |
68 | static int zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); | 68 | static void zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *); |
69 | static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); | 69 | static int zfcp_erp_adapter_strategy_open(struct zfcp_erp_action *); |
70 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); | 70 | static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *); |
71 | static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); | 71 | static int zfcp_erp_adapter_strategy_open_fsf(struct zfcp_erp_action *); |
@@ -93,10 +93,9 @@ static int zfcp_erp_unit_strategy_clearstati(struct zfcp_unit *); | |||
93 | static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); | 93 | static int zfcp_erp_unit_strategy_close(struct zfcp_erp_action *); |
94 | static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); | 94 | static int zfcp_erp_unit_strategy_open(struct zfcp_erp_action *); |
95 | 95 | ||
96 | static int zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); | 96 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *); |
97 | static int zfcp_erp_action_dismiss_port(struct zfcp_port *); | 97 | static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *); |
98 | static int zfcp_erp_action_dismiss_unit(struct zfcp_unit *); | 98 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *); |
99 | static int zfcp_erp_action_dismiss(struct zfcp_erp_action *); | ||
100 | 99 | ||
101 | static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, | 100 | static int zfcp_erp_action_enqueue(int, struct zfcp_adapter *, |
102 | struct zfcp_port *, struct zfcp_unit *); | 101 | struct zfcp_port *, struct zfcp_unit *); |
@@ -135,29 +134,39 @@ zfcp_fsf_request_timeout_handler(unsigned long data) | |||
135 | zfcp_erp_adapter_reopen(adapter, 0); | 134 | zfcp_erp_adapter_reopen(adapter, 0); |
136 | } | 135 | } |
137 | 136 | ||
138 | /* | 137 | /** |
139 | * function: zfcp_fsf_scsi_er_timeout_handler | 138 | * zfcp_fsf_scsi_er_timeout_handler - timeout handler for scsi eh tasks |
140 | * | 139 | * |
141 | * purpose: This function needs to be called whenever a SCSI error recovery | 140 | * This function needs to be called whenever a SCSI error recovery |
142 | * action (abort/reset) does not return. | 141 | * action (abort/reset) does not return. Re-opening the adapter means |
143 | * Re-opening the adapter means that the command can be returned | 142 | * that the abort/reset command can be returned by zfcp. It won't complete |
144 | * by zfcp (it is guarranteed that it does not return via the | 143 | * via the adapter anymore (because qdio queues are closed). If ERP is |
145 | * adapter anymore). The buffer can then be used again. | 144 | * already running on this adapter it will be stopped. |
146 | * | ||
147 | * returns: sod all | ||
148 | */ | 145 | */ |
149 | void | 146 | void zfcp_fsf_scsi_er_timeout_handler(unsigned long data) |
150 | zfcp_fsf_scsi_er_timeout_handler(unsigned long data) | ||
151 | { | 147 | { |
152 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; | 148 | struct zfcp_adapter *adapter = (struct zfcp_adapter *) data; |
149 | unsigned long flags; | ||
153 | 150 | ||
154 | ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " | 151 | ZFCP_LOG_NORMAL("warning: SCSI error recovery timed out. " |
155 | "Restarting all operations on the adapter %s\n", | 152 | "Restarting all operations on the adapter %s\n", |
156 | zfcp_get_busid_by_adapter(adapter)); | 153 | zfcp_get_busid_by_adapter(adapter)); |
157 | debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); | 154 | debug_text_event(adapter->erp_dbf, 1, "eh_lmem_tout"); |
158 | zfcp_erp_adapter_reopen(adapter, 0); | ||
159 | 155 | ||
160 | return; | 156 | write_lock_irqsave(&adapter->erp_lock, flags); |
157 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, | ||
158 | &adapter->status)) { | ||
159 | zfcp_erp_modify_adapter_status(adapter, | ||
160 | ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, | ||
161 | ZFCP_CLEAR); | ||
162 | zfcp_erp_action_dismiss_adapter(adapter); | ||
163 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
164 | /* dismiss all pending requests including requests for ERP */ | ||
165 | zfcp_fsf_req_dismiss_all(adapter); | ||
166 | adapter->fsf_req_seq_no = 0; | ||
167 | } else | ||
168 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
169 | zfcp_erp_adapter_reopen(adapter, 0); | ||
161 | } | 170 | } |
162 | 171 | ||
163 | /* | 172 | /* |
@@ -670,17 +679,10 @@ zfcp_erp_unit_reopen(struct zfcp_unit *unit, int clear_mask) | |||
670 | return retval; | 679 | return retval; |
671 | } | 680 | } |
672 | 681 | ||
673 | /* | 682 | /** |
674 | * function: | 683 | * zfcp_erp_adapter_block - mark adapter as blocked, block scsi requests |
675 | * | ||
676 | * purpose: disable I/O, | ||
677 | * return any open requests and clean them up, | ||
678 | * aim: no pending and incoming I/O | ||
679 | * | ||
680 | * returns: | ||
681 | */ | 684 | */ |
682 | static void | 685 | static void zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) |
683 | zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) | ||
684 | { | 686 | { |
685 | debug_text_event(adapter->erp_dbf, 6, "a_bl"); | 687 | debug_text_event(adapter->erp_dbf, 6, "a_bl"); |
686 | zfcp_erp_modify_adapter_status(adapter, | 688 | zfcp_erp_modify_adapter_status(adapter, |
@@ -688,15 +690,10 @@ zfcp_erp_adapter_block(struct zfcp_adapter *adapter, int clear_mask) | |||
688 | clear_mask, ZFCP_CLEAR); | 690 | clear_mask, ZFCP_CLEAR); |
689 | } | 691 | } |
690 | 692 | ||
691 | /* | 693 | /** |
692 | * function: | 694 | * zfcp_erp_adapter_unblock - mark adapter as unblocked, allow scsi requests |
693 | * | ||
694 | * purpose: enable I/O | ||
695 | * | ||
696 | * returns: | ||
697 | */ | 695 | */ |
698 | static void | 696 | static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) |
699 | zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter) | ||
700 | { | 697 | { |
701 | debug_text_event(adapter->erp_dbf, 6, "a_ubl"); | 698 | debug_text_event(adapter->erp_dbf, 6, "a_ubl"); |
702 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); | 699 | atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status); |
@@ -848,18 +845,16 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
848 | struct zfcp_adapter *adapter = erp_action->adapter; | 845 | struct zfcp_adapter *adapter = erp_action->adapter; |
849 | 846 | ||
850 | if (erp_action->fsf_req) { | 847 | if (erp_action->fsf_req) { |
851 | /* take lock to ensure that request is not being deleted meanwhile */ | 848 | /* take lock to ensure that request is not deleted meanwhile */ |
852 | spin_lock(&adapter->fsf_req_list_lock); | 849 | spin_lock(&adapter->req_list_lock); |
853 | /* check whether fsf req does still exist */ | 850 | if ((!zfcp_reqlist_ismember(adapter, |
854 | list_for_each_entry(fsf_req, &adapter->fsf_req_list_head, list) | 851 | erp_action->fsf_req->req_id)) && |
855 | if (fsf_req == erp_action->fsf_req) | 852 | (fsf_req->erp_action == erp_action)) { |
856 | break; | ||
857 | if (fsf_req && (fsf_req->erp_action == erp_action)) { | ||
858 | /* fsf_req still exists */ | 853 | /* fsf_req still exists */ |
859 | debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); | 854 | debug_text_event(adapter->erp_dbf, 3, "a_ca_req"); |
860 | debug_event(adapter->erp_dbf, 3, &fsf_req, | 855 | debug_event(adapter->erp_dbf, 3, &fsf_req, |
861 | sizeof (unsigned long)); | 856 | sizeof (unsigned long)); |
862 | /* dismiss fsf_req of timed out or dismissed erp_action */ | 857 | /* dismiss fsf_req of timed out/dismissed erp_action */ |
863 | if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | | 858 | if (erp_action->status & (ZFCP_STATUS_ERP_DISMISSED | |
864 | ZFCP_STATUS_ERP_TIMEDOUT)) { | 859 | ZFCP_STATUS_ERP_TIMEDOUT)) { |
865 | debug_text_event(adapter->erp_dbf, 3, | 860 | debug_text_event(adapter->erp_dbf, 3, |
@@ -892,30 +887,22 @@ zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *erp_action) | |||
892 | */ | 887 | */ |
893 | erp_action->fsf_req = NULL; | 888 | erp_action->fsf_req = NULL; |
894 | } | 889 | } |
895 | spin_unlock(&adapter->fsf_req_list_lock); | 890 | spin_unlock(&adapter->req_list_lock); |
896 | } else | 891 | } else |
897 | debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); | 892 | debug_text_event(adapter->erp_dbf, 3, "a_ca_noreq"); |
898 | 893 | ||
899 | return retval; | 894 | return retval; |
900 | } | 895 | } |
901 | 896 | ||
902 | /* | 897 | /** |
903 | * purpose: generic handler for asynchronous events related to erp_action events | 898 | * zfcp_erp_async_handler_nolock - complete erp_action |
904 | * (normal completion, time-out, dismissing, retry after | ||
905 | * low memory condition) | ||
906 | * | ||
907 | * note: deletion of timer is not required (e.g. in case of a time-out), | ||
908 | * but a second try does no harm, | ||
909 | * we leave it in here to allow for greater simplification | ||
910 | * | 899 | * |
911 | * returns: 0 - there was an action to handle | 900 | * Used for normal completion, time-out, dismissal and failure after |
912 | * !0 - otherwise | 901 | * low memory condition. |
913 | */ | 902 | */ |
914 | static int | 903 | static void zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, |
915 | zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, | 904 | unsigned long set_mask) |
916 | unsigned long set_mask) | ||
917 | { | 905 | { |
918 | int retval; | ||
919 | struct zfcp_adapter *adapter = erp_action->adapter; | 906 | struct zfcp_adapter *adapter = erp_action->adapter; |
920 | 907 | ||
921 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { | 908 | if (zfcp_erp_action_exists(erp_action) == ZFCP_ERP_ACTION_RUNNING) { |
@@ -926,43 +913,26 @@ zfcp_erp_async_handler_nolock(struct zfcp_erp_action *erp_action, | |||
926 | del_timer(&erp_action->timer); | 913 | del_timer(&erp_action->timer); |
927 | erp_action->status |= set_mask; | 914 | erp_action->status |= set_mask; |
928 | zfcp_erp_action_ready(erp_action); | 915 | zfcp_erp_action_ready(erp_action); |
929 | retval = 0; | ||
930 | } else { | 916 | } else { |
931 | /* action is ready or gone - nothing to do */ | 917 | /* action is ready or gone - nothing to do */ |
932 | debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); | 918 | debug_text_event(adapter->erp_dbf, 3, "a_asyh_gone"); |
933 | debug_event(adapter->erp_dbf, 3, &erp_action->action, | 919 | debug_event(adapter->erp_dbf, 3, &erp_action->action, |
934 | sizeof (int)); | 920 | sizeof (int)); |
935 | retval = 1; | ||
936 | } | 921 | } |
937 | |||
938 | return retval; | ||
939 | } | 922 | } |
940 | 923 | ||
941 | /* | 924 | /** |
942 | * purpose: generic handler for asynchronous events related to erp_action | 925 | * zfcp_erp_async_handler - wrapper for erp_async_handler_nolock w/ locking |
943 | * events (normal completion, time-out, dismissing, retry after | ||
944 | * low memory condition) | ||
945 | * | ||
946 | * note: deletion of timer is not required (e.g. in case of a time-out), | ||
947 | * but a second try does no harm, | ||
948 | * we leave it in here to allow for greater simplification | ||
949 | * | ||
950 | * returns: 0 - there was an action to handle | ||
951 | * !0 - otherwise | ||
952 | */ | 926 | */ |
953 | int | 927 | void zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, |
954 | zfcp_erp_async_handler(struct zfcp_erp_action *erp_action, | 928 | unsigned long set_mask) |
955 | unsigned long set_mask) | ||
956 | { | 929 | { |
957 | struct zfcp_adapter *adapter = erp_action->adapter; | 930 | struct zfcp_adapter *adapter = erp_action->adapter; |
958 | unsigned long flags; | 931 | unsigned long flags; |
959 | int retval; | ||
960 | 932 | ||
961 | write_lock_irqsave(&adapter->erp_lock, flags); | 933 | write_lock_irqsave(&adapter->erp_lock, flags); |
962 | retval = zfcp_erp_async_handler_nolock(erp_action, set_mask); | 934 | zfcp_erp_async_handler_nolock(erp_action, set_mask); |
963 | write_unlock_irqrestore(&adapter->erp_lock, flags); | 935 | write_unlock_irqrestore(&adapter->erp_lock, flags); |
964 | |||
965 | return retval; | ||
966 | } | 936 | } |
967 | 937 | ||
968 | /* | 938 | /* |
@@ -999,17 +969,15 @@ zfcp_erp_timeout_handler(unsigned long data) | |||
999 | zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); | 969 | zfcp_erp_async_handler(erp_action, ZFCP_STATUS_ERP_TIMEDOUT); |
1000 | } | 970 | } |
1001 | 971 | ||
1002 | /* | 972 | /** |
1003 | * purpose: is called for an erp_action which needs to be ended | 973 | * zfcp_erp_action_dismiss - dismiss an erp_action |
1004 | * though not being done, | ||
1005 | * this is usually required if an higher is generated, | ||
1006 | * action gets an appropriate flag and will be processed | ||
1007 | * accordingly | ||
1008 | * | 974 | * |
1009 | * locks: erp_lock held (thus we need to call another handler variant) | 975 | * adapter->erp_lock must be held |
976 | * | ||
977 | * Dismissal of an erp_action is usually required if an erp_action of | ||
978 | * higher priority is generated. | ||
1010 | */ | 979 | */ |
1011 | static int | 980 | static void zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) |
1012 | zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) | ||
1013 | { | 981 | { |
1014 | struct zfcp_adapter *adapter = erp_action->adapter; | 982 | struct zfcp_adapter *adapter = erp_action->adapter; |
1015 | 983 | ||
@@ -1017,8 +985,6 @@ zfcp_erp_action_dismiss(struct zfcp_erp_action *erp_action) | |||
1017 | debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); | 985 | debug_event(adapter->erp_dbf, 2, &erp_action->action, sizeof (int)); |
1018 | 986 | ||
1019 | zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); | 987 | zfcp_erp_async_handler_nolock(erp_action, ZFCP_STATUS_ERP_DISMISSED); |
1020 | |||
1021 | return 0; | ||
1022 | } | 988 | } |
1023 | 989 | ||
1024 | int | 990 | int |
@@ -2074,18 +2040,12 @@ zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *erp_action) | |||
2074 | return retval; | 2040 | return retval; |
2075 | } | 2041 | } |
2076 | 2042 | ||
2077 | /* | 2043 | /** |
2078 | * function: zfcp_qdio_cleanup | 2044 | * zfcp_erp_adapter_strategy_close_qdio - close qdio queues for an adapter |
2079 | * | ||
2080 | * purpose: cleans up QDIO operation for the specified adapter | ||
2081 | * | ||
2082 | * returns: 0 - successful cleanup | ||
2083 | * !0 - failed cleanup | ||
2084 | */ | 2045 | */ |
2085 | int | 2046 | static void |
2086 | zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) | 2047 | zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) |
2087 | { | 2048 | { |
2088 | int retval = ZFCP_ERP_SUCCEEDED; | ||
2089 | int first_used; | 2049 | int first_used; |
2090 | int used_count; | 2050 | int used_count; |
2091 | struct zfcp_adapter *adapter = erp_action->adapter; | 2051 | struct zfcp_adapter *adapter = erp_action->adapter; |
@@ -2094,15 +2054,13 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) | |||
2094 | ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " | 2054 | ZFCP_LOG_DEBUG("error: attempt to shut down inactive QDIO " |
2095 | "queues on adapter %s\n", | 2055 | "queues on adapter %s\n", |
2096 | zfcp_get_busid_by_adapter(adapter)); | 2056 | zfcp_get_busid_by_adapter(adapter)); |
2097 | retval = ZFCP_ERP_FAILED; | 2057 | return; |
2098 | goto out; | ||
2099 | } | 2058 | } |
2100 | 2059 | ||
2101 | /* | 2060 | /* |
2102 | * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that | 2061 | * Get queue_lock and clear QDIOUP flag. Thus it's guaranteed that |
2103 | * do_QDIO won't be called while qdio_shutdown is in progress. | 2062 | * do_QDIO won't be called while qdio_shutdown is in progress. |
2104 | */ | 2063 | */ |
2105 | |||
2106 | write_lock_irq(&adapter->request_queue.queue_lock); | 2064 | write_lock_irq(&adapter->request_queue.queue_lock); |
2107 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); | 2065 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status); |
2108 | write_unlock_irq(&adapter->request_queue.queue_lock); | 2066 | write_unlock_irq(&adapter->request_queue.queue_lock); |
@@ -2134,8 +2092,6 @@ zfcp_erp_adapter_strategy_close_qdio(struct zfcp_erp_action *erp_action) | |||
2134 | adapter->request_queue.free_index = 0; | 2092 | adapter->request_queue.free_index = 0; |
2135 | atomic_set(&adapter->request_queue.free_count, 0); | 2093 | atomic_set(&adapter->request_queue.free_count, 0); |
2136 | adapter->request_queue.distance_from_int = 0; | 2094 | adapter->request_queue.distance_from_int = 0; |
2137 | out: | ||
2138 | return retval; | ||
2139 | } | 2095 | } |
2140 | 2096 | ||
2141 | static int | 2097 | static int |
@@ -2168,9 +2124,9 @@ zfcp_erp_adapter_strategy_open_fsf_xconfig(struct zfcp_erp_action *erp_action) | |||
2168 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, | 2124 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_HOST_CON_INIT, |
2169 | &adapter->status); | 2125 | &adapter->status); |
2170 | ZFCP_LOG_DEBUG("Doing exchange config data\n"); | 2126 | ZFCP_LOG_DEBUG("Doing exchange config data\n"); |
2171 | write_lock(&adapter->erp_lock); | 2127 | write_lock_irq(&adapter->erp_lock); |
2172 | zfcp_erp_action_to_running(erp_action); | 2128 | zfcp_erp_action_to_running(erp_action); |
2173 | write_unlock(&adapter->erp_lock); | 2129 | write_unlock_irq(&adapter->erp_lock); |
2174 | zfcp_erp_timeout_init(erp_action); | 2130 | zfcp_erp_timeout_init(erp_action); |
2175 | if (zfcp_fsf_exchange_config_data(erp_action)) { | 2131 | if (zfcp_fsf_exchange_config_data(erp_action)) { |
2176 | retval = ZFCP_ERP_FAILED; | 2132 | retval = ZFCP_ERP_FAILED; |
@@ -2236,9 +2192,9 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) | |||
2236 | adapter = erp_action->adapter; | 2192 | adapter = erp_action->adapter; |
2237 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); | 2193 | atomic_clear_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status); |
2238 | 2194 | ||
2239 | write_lock(&adapter->erp_lock); | 2195 | write_lock_irq(&adapter->erp_lock); |
2240 | zfcp_erp_action_to_running(erp_action); | 2196 | zfcp_erp_action_to_running(erp_action); |
2241 | write_unlock(&adapter->erp_lock); | 2197 | write_unlock_irq(&adapter->erp_lock); |
2242 | 2198 | ||
2243 | zfcp_erp_timeout_init(erp_action); | 2199 | zfcp_erp_timeout_init(erp_action); |
2244 | ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); | 2200 | ret = zfcp_fsf_exchange_port_data(erp_action, adapter, NULL); |
@@ -2258,11 +2214,11 @@ zfcp_erp_adapter_strategy_open_fsf_xport(struct zfcp_erp_action *erp_action) | |||
2258 | "%s)\n", zfcp_get_busid_by_adapter(adapter)); | 2214 | "%s)\n", zfcp_get_busid_by_adapter(adapter)); |
2259 | ret = ZFCP_ERP_FAILED; | 2215 | ret = ZFCP_ERP_FAILED; |
2260 | } | 2216 | } |
2261 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) { | 2217 | |
2262 | ZFCP_LOG_INFO("error: exchange port data failed (adapter " | 2218 | /* don't treat as error for the sake of compatibility */ |
2219 | if (!atomic_test_mask(ZFCP_STATUS_ADAPTER_XPORT_OK, &adapter->status)) | ||
2220 | ZFCP_LOG_INFO("warning: exchange port data failed (adapter " | ||
2263 | "%s\n", zfcp_get_busid_by_adapter(adapter)); | 2221 | "%s\n", zfcp_get_busid_by_adapter(adapter)); |
2264 | ret = ZFCP_ERP_FAILED; | ||
2265 | } | ||
2266 | 2222 | ||
2267 | return ret; | 2223 | return ret; |
2268 | } | 2224 | } |
@@ -2292,18 +2248,12 @@ zfcp_erp_adapter_strategy_open_fsf_statusread(struct zfcp_erp_action | |||
2292 | return retval; | 2248 | return retval; |
2293 | } | 2249 | } |
2294 | 2250 | ||
2295 | /* | 2251 | /** |
2296 | * function: zfcp_fsf_cleanup | 2252 | * zfcp_erp_adapter_strategy_close_fsf - stop FSF operations for an adapter |
2297 | * | ||
2298 | * purpose: cleanup FSF operation for specified adapter | ||
2299 | * | ||
2300 | * returns: 0 - FSF operation successfully cleaned up | ||
2301 | * !0 - failed to cleanup FSF operation for this adapter | ||
2302 | */ | 2253 | */ |
2303 | static int | 2254 | static void |
2304 | zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) | 2255 | zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) |
2305 | { | 2256 | { |
2306 | int retval = ZFCP_ERP_SUCCEEDED; | ||
2307 | struct zfcp_adapter *adapter = erp_action->adapter; | 2257 | struct zfcp_adapter *adapter = erp_action->adapter; |
2308 | 2258 | ||
2309 | /* | 2259 | /* |
@@ -2317,8 +2267,6 @@ zfcp_erp_adapter_strategy_close_fsf(struct zfcp_erp_action *erp_action) | |||
2317 | /* all ports and units are closed */ | 2267 | /* all ports and units are closed */ |
2318 | zfcp_erp_modify_adapter_status(adapter, | 2268 | zfcp_erp_modify_adapter_status(adapter, |
2319 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); | 2269 | ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR); |
2320 | |||
2321 | return retval; | ||
2322 | } | 2270 | } |
2323 | 2271 | ||
2324 | /* | 2272 | /* |
@@ -3293,10 +3241,8 @@ zfcp_erp_action_cleanup(int action, struct zfcp_adapter *adapter, | |||
3293 | } | 3241 | } |
3294 | 3242 | ||
3295 | 3243 | ||
3296 | static int | 3244 | void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) |
3297 | zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | ||
3298 | { | 3245 | { |
3299 | int retval = 0; | ||
3300 | struct zfcp_port *port; | 3246 | struct zfcp_port *port; |
3301 | 3247 | ||
3302 | debug_text_event(adapter->erp_dbf, 5, "a_actab"); | 3248 | debug_text_event(adapter->erp_dbf, 5, "a_actab"); |
@@ -3305,14 +3251,10 @@ zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) | |||
3305 | else | 3251 | else |
3306 | list_for_each_entry(port, &adapter->port_list_head, list) | 3252 | list_for_each_entry(port, &adapter->port_list_head, list) |
3307 | zfcp_erp_action_dismiss_port(port); | 3253 | zfcp_erp_action_dismiss_port(port); |
3308 | |||
3309 | return retval; | ||
3310 | } | 3254 | } |
3311 | 3255 | ||
3312 | static int | 3256 | static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) |
3313 | zfcp_erp_action_dismiss_port(struct zfcp_port *port) | ||
3314 | { | 3257 | { |
3315 | int retval = 0; | ||
3316 | struct zfcp_unit *unit; | 3258 | struct zfcp_unit *unit; |
3317 | struct zfcp_adapter *adapter = port->adapter; | 3259 | struct zfcp_adapter *adapter = port->adapter; |
3318 | 3260 | ||
@@ -3323,22 +3265,16 @@ zfcp_erp_action_dismiss_port(struct zfcp_port *port) | |||
3323 | else | 3265 | else |
3324 | list_for_each_entry(unit, &port->unit_list_head, list) | 3266 | list_for_each_entry(unit, &port->unit_list_head, list) |
3325 | zfcp_erp_action_dismiss_unit(unit); | 3267 | zfcp_erp_action_dismiss_unit(unit); |
3326 | |||
3327 | return retval; | ||
3328 | } | 3268 | } |
3329 | 3269 | ||
3330 | static int | 3270 | static void zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) |
3331 | zfcp_erp_action_dismiss_unit(struct zfcp_unit *unit) | ||
3332 | { | 3271 | { |
3333 | int retval = 0; | ||
3334 | struct zfcp_adapter *adapter = unit->port->adapter; | 3272 | struct zfcp_adapter *adapter = unit->port->adapter; |
3335 | 3273 | ||
3336 | debug_text_event(adapter->erp_dbf, 5, "u_actab"); | 3274 | debug_text_event(adapter->erp_dbf, 5, "u_actab"); |
3337 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); | 3275 | debug_event(adapter->erp_dbf, 5, &unit->fcp_lun, sizeof (fcp_lun_t)); |
3338 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) | 3276 | if (atomic_test_mask(ZFCP_STATUS_COMMON_ERP_INUSE, &unit->status)) |
3339 | zfcp_erp_action_dismiss(&unit->erp_action); | 3277 | zfcp_erp_action_dismiss(&unit->erp_action); |
3340 | |||
3341 | return retval; | ||
3342 | } | 3278 | } |
3343 | 3279 | ||
3344 | static inline void | 3280 | static inline void |
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h index d02366004cdd..146d7a2b4c4a 100644 --- a/drivers/s390/scsi/zfcp_ext.h +++ b/drivers/s390/scsi/zfcp_ext.h | |||
@@ -63,7 +63,6 @@ extern int zfcp_qdio_allocate_queues(struct zfcp_adapter *); | |||
63 | extern void zfcp_qdio_free_queues(struct zfcp_adapter *); | 63 | extern void zfcp_qdio_free_queues(struct zfcp_adapter *); |
64 | extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, | 64 | extern int zfcp_qdio_determine_pci(struct zfcp_qdio_queue *, |
65 | struct zfcp_fsf_req *); | 65 | struct zfcp_fsf_req *); |
66 | extern int zfcp_qdio_reqid_check(struct zfcp_adapter *, void *); | ||
67 | 66 | ||
68 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req | 67 | extern volatile struct qdio_buffer_element *zfcp_qdio_sbale_req |
69 | (struct zfcp_fsf_req *, int, int); | 68 | (struct zfcp_fsf_req *, int, int); |
@@ -140,6 +139,7 @@ extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, u32, int); | |||
140 | extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); | 139 | extern int zfcp_erp_adapter_reopen(struct zfcp_adapter *, int); |
141 | extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); | 140 | extern int zfcp_erp_adapter_shutdown(struct zfcp_adapter *, int); |
142 | extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); | 141 | extern void zfcp_erp_adapter_failed(struct zfcp_adapter *); |
142 | extern void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *); | ||
143 | 143 | ||
144 | extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); | 144 | extern void zfcp_erp_modify_port_status(struct zfcp_port *, u32, int); |
145 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int); | 145 | extern int zfcp_erp_port_reopen(struct zfcp_port *, int); |
@@ -156,7 +156,7 @@ extern void zfcp_erp_unit_failed(struct zfcp_unit *); | |||
156 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); | 156 | extern int zfcp_erp_thread_setup(struct zfcp_adapter *); |
157 | extern int zfcp_erp_thread_kill(struct zfcp_adapter *); | 157 | extern int zfcp_erp_thread_kill(struct zfcp_adapter *); |
158 | extern int zfcp_erp_wait(struct zfcp_adapter *); | 158 | extern int zfcp_erp_wait(struct zfcp_adapter *); |
159 | extern int zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); | 159 | extern void zfcp_erp_async_handler(struct zfcp_erp_action *, unsigned long); |
160 | 160 | ||
161 | extern int zfcp_test_link(struct zfcp_port *); | 161 | extern int zfcp_test_link(struct zfcp_port *); |
162 | 162 | ||
@@ -190,5 +190,10 @@ extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *, | |||
190 | struct zfcp_fsf_req *); | 190 | struct zfcp_fsf_req *); |
191 | extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, | 191 | extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *, |
192 | struct scsi_cmnd *); | 192 | struct scsi_cmnd *); |
193 | extern void zfcp_reqlist_add(struct zfcp_adapter *, struct zfcp_fsf_req *); | ||
194 | extern void zfcp_reqlist_remove(struct zfcp_adapter *, unsigned long); | ||
195 | extern struct zfcp_fsf_req *zfcp_reqlist_ismember(struct zfcp_adapter *, | ||
196 | unsigned long); | ||
197 | extern int zfcp_reqlist_isempty(struct zfcp_adapter *); | ||
193 | 198 | ||
194 | #endif /* ZFCP_EXT_H */ | 199 | #endif /* ZFCP_EXT_H */ |
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6335f9229184..ff2eacf5ec8c 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c | |||
@@ -49,7 +49,6 @@ static int zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *); | |||
49 | static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, | 49 | static void zfcp_fsf_link_down_info_eval(struct zfcp_adapter *, |
50 | struct fsf_link_down_info *); | 50 | struct fsf_link_down_info *); |
51 | static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); | 51 | static int zfcp_fsf_req_dispatch(struct zfcp_fsf_req *); |
52 | static void zfcp_fsf_req_dismiss(struct zfcp_fsf_req *); | ||
53 | 52 | ||
54 | /* association between FSF command and FSF QTCB type */ | 53 | /* association between FSF command and FSF QTCB type */ |
55 | static u32 fsf_qtcb_type[] = { | 54 | static u32 fsf_qtcb_type[] = { |
@@ -146,47 +145,48 @@ zfcp_fsf_req_free(struct zfcp_fsf_req *fsf_req) | |||
146 | kfree(fsf_req); | 145 | kfree(fsf_req); |
147 | } | 146 | } |
148 | 147 | ||
149 | /* | 148 | /** |
150 | * function: | 149 | * zfcp_fsf_req_dismiss - dismiss a single fsf request |
151 | * | ||
152 | * purpose: | ||
153 | * | ||
154 | * returns: | ||
155 | * | ||
156 | * note: qdio queues shall be down (no ongoing inbound processing) | ||
157 | */ | 150 | */ |
158 | int | 151 | static void zfcp_fsf_req_dismiss(struct zfcp_adapter *adapter, |
159 | zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) | 152 | struct zfcp_fsf_req *fsf_req, |
153 | unsigned int counter) | ||
160 | { | 154 | { |
161 | struct zfcp_fsf_req *fsf_req, *tmp; | 155 | u64 dbg_tmp[2]; |
162 | unsigned long flags; | ||
163 | LIST_HEAD(remove_queue); | ||
164 | |||
165 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | ||
166 | list_splice_init(&adapter->fsf_req_list_head, &remove_queue); | ||
167 | atomic_set(&adapter->fsf_reqs_active, 0); | ||
168 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | ||
169 | 156 | ||
170 | list_for_each_entry_safe(fsf_req, tmp, &remove_queue, list) { | 157 | dbg_tmp[0] = (u64) atomic_read(&adapter->reqs_active); |
171 | list_del(&fsf_req->list); | 158 | dbg_tmp[1] = (u64) counter; |
172 | zfcp_fsf_req_dismiss(fsf_req); | 159 | debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); |
173 | } | 160 | list_del(&fsf_req->list); |
174 | 161 | fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | |
175 | return 0; | 162 | zfcp_fsf_req_complete(fsf_req); |
176 | } | 163 | } |
177 | 164 | ||
178 | /* | 165 | /** |
179 | * function: | 166 | * zfcp_fsf_req_dismiss_all - dismiss all remaining fsf requests |
180 | * | ||
181 | * purpose: | ||
182 | * | ||
183 | * returns: | ||
184 | */ | 167 | */ |
185 | static void | 168 | int zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter) |
186 | zfcp_fsf_req_dismiss(struct zfcp_fsf_req *fsf_req) | ||
187 | { | 169 | { |
188 | fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED; | 170 | struct zfcp_fsf_req *request, *tmp; |
189 | zfcp_fsf_req_complete(fsf_req); | 171 | unsigned long flags; |
172 | unsigned int i, counter; | ||
173 | |||
174 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
175 | atomic_set(&adapter->reqs_active, 0); | ||
176 | for (i=0; i<REQUEST_LIST_SIZE; i++) { | ||
177 | if (list_empty(&adapter->req_list[i])) | ||
178 | continue; | ||
179 | |||
180 | counter = 0; | ||
181 | list_for_each_entry_safe(request, tmp, | ||
182 | &adapter->req_list[i], list) { | ||
183 | zfcp_fsf_req_dismiss(adapter, request, counter); | ||
184 | counter++; | ||
185 | } | ||
186 | } | ||
187 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
188 | |||
189 | return 0; | ||
190 | } | 190 | } |
191 | 191 | ||
192 | /* | 192 | /* |
@@ -2227,7 +2227,7 @@ zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action, | |||
2227 | /* setup new FSF request */ | 2227 | /* setup new FSF request */ |
2228 | retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, | 2228 | retval = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, |
2229 | erp_action ? ZFCP_REQ_AUTO_CLEANUP : 0, | 2229 | erp_action ? ZFCP_REQ_AUTO_CLEANUP : 0, |
2230 | 0, &lock_flags, &fsf_req); | 2230 | NULL, &lock_flags, &fsf_req); |
2231 | if (retval < 0) { | 2231 | if (retval < 0) { |
2232 | ZFCP_LOG_INFO("error: Out of resources. Could not create an " | 2232 | ZFCP_LOG_INFO("error: Out of resources. Could not create an " |
2233 | "exchange port data request for" | 2233 | "exchange port data request for" |
@@ -4592,12 +4592,14 @@ static inline void | |||
4592 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) | 4592 | zfcp_fsf_req_qtcb_init(struct zfcp_fsf_req *fsf_req) |
4593 | { | 4593 | { |
4594 | if (likely(fsf_req->qtcb != NULL)) { | 4594 | if (likely(fsf_req->qtcb != NULL)) { |
4595 | fsf_req->qtcb->prefix.req_seq_no = fsf_req->adapter->fsf_req_seq_no; | 4595 | fsf_req->qtcb->prefix.req_seq_no = |
4596 | fsf_req->qtcb->prefix.req_id = (unsigned long)fsf_req; | 4596 | fsf_req->adapter->fsf_req_seq_no; |
4597 | fsf_req->qtcb->prefix.req_id = fsf_req->req_id; | ||
4597 | fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; | 4598 | fsf_req->qtcb->prefix.ulp_info = ZFCP_ULP_INFO_VERSION; |
4598 | fsf_req->qtcb->prefix.qtcb_type = fsf_qtcb_type[fsf_req->fsf_command]; | 4599 | fsf_req->qtcb->prefix.qtcb_type = |
4600 | fsf_qtcb_type[fsf_req->fsf_command]; | ||
4599 | fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; | 4601 | fsf_req->qtcb->prefix.qtcb_version = ZFCP_QTCB_VERSION; |
4600 | fsf_req->qtcb->header.req_handle = (unsigned long)fsf_req; | 4602 | fsf_req->qtcb->header.req_handle = fsf_req->req_id; |
4601 | fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; | 4603 | fsf_req->qtcb->header.fsf_command = fsf_req->fsf_command; |
4602 | } | 4604 | } |
4603 | } | 4605 | } |
@@ -4654,6 +4656,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4654 | { | 4656 | { |
4655 | volatile struct qdio_buffer_element *sbale; | 4657 | volatile struct qdio_buffer_element *sbale; |
4656 | struct zfcp_fsf_req *fsf_req = NULL; | 4658 | struct zfcp_fsf_req *fsf_req = NULL; |
4659 | unsigned long flags; | ||
4657 | int ret = 0; | 4660 | int ret = 0; |
4658 | struct zfcp_qdio_queue *req_queue = &adapter->request_queue; | 4661 | struct zfcp_qdio_queue *req_queue = &adapter->request_queue; |
4659 | 4662 | ||
@@ -4668,6 +4671,12 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4668 | 4671 | ||
4669 | fsf_req->adapter = adapter; | 4672 | fsf_req->adapter = adapter; |
4670 | fsf_req->fsf_command = fsf_cmd; | 4673 | fsf_req->fsf_command = fsf_cmd; |
4674 | INIT_LIST_HEAD(&fsf_req->list); | ||
4675 | |||
4676 | /* unique request id */ | ||
4677 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
4678 | fsf_req->req_id = adapter->req_no++; | ||
4679 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
4671 | 4680 | ||
4672 | zfcp_fsf_req_qtcb_init(fsf_req); | 4681 | zfcp_fsf_req_qtcb_init(fsf_req); |
4673 | 4682 | ||
@@ -4707,7 +4716,7 @@ zfcp_fsf_req_create(struct zfcp_adapter *adapter, u32 fsf_cmd, int req_flags, | |||
4707 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); | 4716 | sbale = zfcp_qdio_sbale_req(fsf_req, fsf_req->sbal_curr, 0); |
4708 | 4717 | ||
4709 | /* setup common SBALE fields */ | 4718 | /* setup common SBALE fields */ |
4710 | sbale[0].addr = fsf_req; | 4719 | sbale[0].addr = (void *) fsf_req->req_id; |
4711 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; | 4720 | sbale[0].flags |= SBAL_FLAGS0_COMMAND; |
4712 | if (likely(fsf_req->qtcb != NULL)) { | 4721 | if (likely(fsf_req->qtcb != NULL)) { |
4713 | sbale[1].addr = (void *) fsf_req->qtcb; | 4722 | sbale[1].addr = (void *) fsf_req->qtcb; |
@@ -4747,7 +4756,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4747 | volatile struct qdio_buffer_element *sbale; | 4756 | volatile struct qdio_buffer_element *sbale; |
4748 | int inc_seq_no; | 4757 | int inc_seq_no; |
4749 | int new_distance_from_int; | 4758 | int new_distance_from_int; |
4750 | unsigned long flags; | 4759 | u64 dbg_tmp[2]; |
4751 | int retval = 0; | 4760 | int retval = 0; |
4752 | 4761 | ||
4753 | adapter = fsf_req->adapter; | 4762 | adapter = fsf_req->adapter; |
@@ -4761,10 +4770,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4761 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, | 4770 | ZFCP_HEX_DUMP(ZFCP_LOG_LEVEL_TRACE, (char *) sbale[1].addr, |
4762 | sbale[1].length); | 4771 | sbale[1].length); |
4763 | 4772 | ||
4764 | /* put allocated FSF request at list tail */ | 4773 | /* put allocated FSF request into hash table */ |
4765 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | 4774 | spin_lock(&adapter->req_list_lock); |
4766 | list_add_tail(&fsf_req->list, &adapter->fsf_req_list_head); | 4775 | zfcp_reqlist_add(adapter, fsf_req); |
4767 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | 4776 | spin_unlock(&adapter->req_list_lock); |
4768 | 4777 | ||
4769 | inc_seq_no = (fsf_req->qtcb != NULL); | 4778 | inc_seq_no = (fsf_req->qtcb != NULL); |
4770 | 4779 | ||
@@ -4803,6 +4812,10 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4803 | QDIO_FLAG_SYNC_OUTPUT, | 4812 | QDIO_FLAG_SYNC_OUTPUT, |
4804 | 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); | 4813 | 0, fsf_req->sbal_first, fsf_req->sbal_number, NULL); |
4805 | 4814 | ||
4815 | dbg_tmp[0] = (unsigned long) sbale[0].addr; | ||
4816 | dbg_tmp[1] = (u64) retval; | ||
4817 | debug_event(adapter->erp_dbf, 4, (void *) dbg_tmp, 16); | ||
4818 | |||
4806 | if (unlikely(retval)) { | 4819 | if (unlikely(retval)) { |
4807 | /* Queues are down..... */ | 4820 | /* Queues are down..... */ |
4808 | retval = -EIO; | 4821 | retval = -EIO; |
@@ -4812,22 +4825,17 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4812 | */ | 4825 | */ |
4813 | if (timer) | 4826 | if (timer) |
4814 | del_timer(timer); | 4827 | del_timer(timer); |
4815 | spin_lock_irqsave(&adapter->fsf_req_list_lock, flags); | 4828 | spin_lock(&adapter->req_list_lock); |
4816 | list_del(&fsf_req->list); | 4829 | zfcp_reqlist_remove(adapter, fsf_req->req_id); |
4817 | spin_unlock_irqrestore(&adapter->fsf_req_list_lock, flags); | 4830 | spin_unlock(&adapter->req_list_lock); |
4818 | /* | 4831 | /* undo changes in request queue made for this request */ |
4819 | * adjust the number of free SBALs in request queue as well as | ||
4820 | * position of first one | ||
4821 | */ | ||
4822 | zfcp_qdio_zero_sbals(req_queue->buffer, | 4832 | zfcp_qdio_zero_sbals(req_queue->buffer, |
4823 | fsf_req->sbal_first, fsf_req->sbal_number); | 4833 | fsf_req->sbal_first, fsf_req->sbal_number); |
4824 | atomic_add(fsf_req->sbal_number, &req_queue->free_count); | 4834 | atomic_add(fsf_req->sbal_number, &req_queue->free_count); |
4825 | req_queue->free_index -= fsf_req->sbal_number; /* increase */ | 4835 | req_queue->free_index -= fsf_req->sbal_number; |
4826 | req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; | 4836 | req_queue->free_index += QDIO_MAX_BUFFERS_PER_Q; |
4827 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ | 4837 | req_queue->free_index %= QDIO_MAX_BUFFERS_PER_Q; /* wrap */ |
4828 | ZFCP_LOG_DEBUG | 4838 | zfcp_erp_adapter_reopen(adapter, 0); |
4829 | ("error: do_QDIO failed. Buffers could not be enqueued " | ||
4830 | "to request queue.\n"); | ||
4831 | } else { | 4839 | } else { |
4832 | req_queue->distance_from_int = new_distance_from_int; | 4840 | req_queue->distance_from_int = new_distance_from_int; |
4833 | /* | 4841 | /* |
@@ -4843,7 +4851,7 @@ zfcp_fsf_req_send(struct zfcp_fsf_req *fsf_req, struct timer_list *timer) | |||
4843 | adapter->fsf_req_seq_no++; | 4851 | adapter->fsf_req_seq_no++; |
4844 | 4852 | ||
4845 | /* count FSF requests pending */ | 4853 | /* count FSF requests pending */ |
4846 | atomic_inc(&adapter->fsf_reqs_active); | 4854 | atomic_inc(&adapter->reqs_active); |
4847 | } | 4855 | } |
4848 | return retval; | 4856 | return retval; |
4849 | } | 4857 | } |
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index 345a191926a4..dbd9f48e863e 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c | |||
@@ -282,6 +282,37 @@ zfcp_qdio_request_handler(struct ccw_device *ccw_device, | |||
282 | return; | 282 | return; |
283 | } | 283 | } |
284 | 284 | ||
285 | /** | ||
286 | * zfcp_qdio_reqid_check - checks for valid reqids or unsolicited status | ||
287 | */ | ||
288 | static int zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, | ||
289 | unsigned long req_id) | ||
290 | { | ||
291 | struct zfcp_fsf_req *fsf_req; | ||
292 | unsigned long flags; | ||
293 | |||
294 | debug_long_event(adapter->erp_dbf, 4, req_id); | ||
295 | |||
296 | spin_lock_irqsave(&adapter->req_list_lock, flags); | ||
297 | fsf_req = zfcp_reqlist_ismember(adapter, req_id); | ||
298 | |||
299 | if (!fsf_req) { | ||
300 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
301 | ZFCP_LOG_NORMAL("error: unknown request id (%ld).\n", req_id); | ||
302 | zfcp_erp_adapter_reopen(adapter, 0); | ||
303 | return -EINVAL; | ||
304 | } | ||
305 | |||
306 | zfcp_reqlist_remove(adapter, req_id); | ||
307 | atomic_dec(&adapter->reqs_active); | ||
308 | spin_unlock_irqrestore(&adapter->req_list_lock, flags); | ||
309 | |||
310 | /* finish the FSF request */ | ||
311 | zfcp_fsf_req_complete(fsf_req); | ||
312 | |||
313 | return 0; | ||
314 | } | ||
315 | |||
285 | /* | 316 | /* |
286 | * function: zfcp_qdio_response_handler | 317 | * function: zfcp_qdio_response_handler |
287 | * | 318 | * |
@@ -344,7 +375,7 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, | |||
344 | /* look for QDIO request identifiers in SB */ | 375 | /* look for QDIO request identifiers in SB */ |
345 | buffere = &buffer->element[buffere_index]; | 376 | buffere = &buffer->element[buffere_index]; |
346 | retval = zfcp_qdio_reqid_check(adapter, | 377 | retval = zfcp_qdio_reqid_check(adapter, |
347 | (void *) buffere->addr); | 378 | (unsigned long) buffere->addr); |
348 | 379 | ||
349 | if (retval) { | 380 | if (retval) { |
350 | ZFCP_LOG_NORMAL("bug: unexpected inbound " | 381 | ZFCP_LOG_NORMAL("bug: unexpected inbound " |
@@ -415,51 +446,6 @@ zfcp_qdio_response_handler(struct ccw_device *ccw_device, | |||
415 | return; | 446 | return; |
416 | } | 447 | } |
417 | 448 | ||
418 | /* | ||
419 | * function: zfcp_qdio_reqid_check | ||
420 | * | ||
421 | * purpose: checks for valid reqids or unsolicited status | ||
422 | * | ||
423 | * returns: 0 - valid request id or unsolicited status | ||
424 | * !0 - otherwise | ||
425 | */ | ||
426 | int | ||
427 | zfcp_qdio_reqid_check(struct zfcp_adapter *adapter, void *sbale_addr) | ||
428 | { | ||
429 | struct zfcp_fsf_req *fsf_req; | ||
430 | |||
431 | /* invalid (per convention used in this driver) */ | ||
432 | if (unlikely(!sbale_addr)) { | ||
433 | ZFCP_LOG_NORMAL("bug: invalid reqid\n"); | ||
434 | return -EINVAL; | ||
435 | } | ||
436 | |||
437 | /* valid request id and thus (hopefully :) valid fsf_req address */ | ||
438 | fsf_req = (struct zfcp_fsf_req *) sbale_addr; | ||
439 | |||
440 | /* serialize with zfcp_fsf_req_dismiss_all */ | ||
441 | spin_lock(&adapter->fsf_req_list_lock); | ||
442 | if (list_empty(&adapter->fsf_req_list_head)) { | ||
443 | spin_unlock(&adapter->fsf_req_list_lock); | ||
444 | return 0; | ||
445 | } | ||
446 | list_del(&fsf_req->list); | ||
447 | atomic_dec(&adapter->fsf_reqs_active); | ||
448 | spin_unlock(&adapter->fsf_req_list_lock); | ||
449 | |||
450 | if (unlikely(adapter != fsf_req->adapter)) { | ||
451 | ZFCP_LOG_NORMAL("bug: invalid reqid (fsf_req=%p, " | ||
452 | "fsf_req->adapter=%p, adapter=%p)\n", | ||
453 | fsf_req, fsf_req->adapter, adapter); | ||
454 | return -EINVAL; | ||
455 | } | ||
456 | |||
457 | /* finish the FSF request */ | ||
458 | zfcp_fsf_req_complete(fsf_req); | ||
459 | |||
460 | return 0; | ||
461 | } | ||
462 | |||
463 | /** | 449 | /** |
464 | * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue | 450 | * zfcp_qdio_sbale_get - return pointer to SBALE of qdio_queue |
465 | * @queue: queue from which SBALE should be returned | 451 | * @queue: queue from which SBALE should be returned |
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c index 46e14f22ec18..1bb55086db9f 100644 --- a/drivers/s390/scsi/zfcp_scsi.c +++ b/drivers/s390/scsi/zfcp_scsi.c | |||
@@ -30,7 +30,6 @@ static int zfcp_scsi_queuecommand(struct scsi_cmnd *, | |||
30 | void (*done) (struct scsi_cmnd *)); | 30 | void (*done) (struct scsi_cmnd *)); |
31 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); | 31 | static int zfcp_scsi_eh_abort_handler(struct scsi_cmnd *); |
32 | static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); | 32 | static int zfcp_scsi_eh_device_reset_handler(struct scsi_cmnd *); |
33 | static int zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *); | ||
34 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); | 33 | static int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *); |
35 | static int zfcp_task_management_function(struct zfcp_unit *, u8, | 34 | static int zfcp_task_management_function(struct zfcp_unit *, u8, |
36 | struct scsi_cmnd *); | 35 | struct scsi_cmnd *); |
@@ -44,33 +43,24 @@ struct scsi_transport_template *zfcp_transport_template; | |||
44 | 43 | ||
45 | struct zfcp_data zfcp_data = { | 44 | struct zfcp_data zfcp_data = { |
46 | .scsi_host_template = { | 45 | .scsi_host_template = { |
47 | name: ZFCP_NAME, | 46 | .name = ZFCP_NAME, |
48 | proc_name: "zfcp", | 47 | .proc_name = "zfcp", |
49 | proc_info: NULL, | 48 | .slave_alloc = zfcp_scsi_slave_alloc, |
50 | detect: NULL, | 49 | .slave_configure = zfcp_scsi_slave_configure, |
51 | slave_alloc: zfcp_scsi_slave_alloc, | 50 | .slave_destroy = zfcp_scsi_slave_destroy, |
52 | slave_configure: zfcp_scsi_slave_configure, | 51 | .queuecommand = zfcp_scsi_queuecommand, |
53 | slave_destroy: zfcp_scsi_slave_destroy, | 52 | .eh_abort_handler = zfcp_scsi_eh_abort_handler, |
54 | queuecommand: zfcp_scsi_queuecommand, | 53 | .eh_device_reset_handler = zfcp_scsi_eh_device_reset_handler, |
55 | eh_abort_handler: zfcp_scsi_eh_abort_handler, | 54 | .eh_bus_reset_handler = zfcp_scsi_eh_host_reset_handler, |
56 | eh_device_reset_handler: zfcp_scsi_eh_device_reset_handler, | 55 | .eh_host_reset_handler = zfcp_scsi_eh_host_reset_handler, |
57 | eh_bus_reset_handler: zfcp_scsi_eh_bus_reset_handler, | 56 | .can_queue = 4096, |
58 | eh_host_reset_handler: zfcp_scsi_eh_host_reset_handler, | 57 | .this_id = -1, |
59 | /* FIXME(openfcp): Tune */ | 58 | .sg_tablesize = ZFCP_MAX_SBALES_PER_REQ, |
60 | can_queue: 4096, | 59 | .cmd_per_lun = 1, |
61 | this_id: -1, | 60 | .use_clustering = 1, |
62 | /* | 61 | .sdev_attrs = zfcp_sysfs_sdev_attrs, |
63 | * FIXME: | ||
64 | * one less? can zfcp_create_sbale cope with it? | ||
65 | */ | ||
66 | sg_tablesize: ZFCP_MAX_SBALES_PER_REQ, | ||
67 | cmd_per_lun: 1, | ||
68 | unchecked_isa_dma: 0, | ||
69 | use_clustering: 1, | ||
70 | sdev_attrs: zfcp_sysfs_sdev_attrs, | ||
71 | }, | 62 | }, |
72 | .driver_version = ZFCP_VERSION, | 63 | .driver_version = ZFCP_VERSION, |
73 | /* rest initialised with zeros */ | ||
74 | }; | 64 | }; |
75 | 65 | ||
76 | /* Find start of Response Information in FCP response unit*/ | 66 | /* Find start of Response Information in FCP response unit*/ |
@@ -177,8 +167,14 @@ zfcp_scsi_slave_alloc(struct scsi_device *sdp) | |||
177 | return retval; | 167 | return retval; |
178 | } | 168 | } |
179 | 169 | ||
180 | static void | 170 | /** |
181 | zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | 171 | * zfcp_scsi_slave_destroy - called when scsi device is removed |
172 | * | ||
173 | * Remove reference to associated scsi device for an zfcp_unit. | ||
174 | * Mark zfcp_unit as failed. The scsi device might be deleted via sysfs | ||
175 | * or a scan for this device might have failed. | ||
176 | */ | ||
177 | static void zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | ||
182 | { | 178 | { |
183 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; | 179 | struct zfcp_unit *unit = (struct zfcp_unit *) sdpnt->hostdata; |
184 | 180 | ||
@@ -186,6 +182,7 @@ zfcp_scsi_slave_destroy(struct scsi_device *sdpnt) | |||
186 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); | 182 | atomic_clear_mask(ZFCP_STATUS_UNIT_REGISTERED, &unit->status); |
187 | sdpnt->hostdata = NULL; | 183 | sdpnt->hostdata = NULL; |
188 | unit->device = NULL; | 184 | unit->device = NULL; |
185 | zfcp_erp_unit_failed(unit); | ||
189 | zfcp_unit_put(unit); | 186 | zfcp_unit_put(unit); |
190 | } else { | 187 | } else { |
191 | ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " | 188 | ZFCP_LOG_NORMAL("bug: no unit associated with SCSI device at " |
@@ -550,35 +547,38 @@ zfcp_task_management_function(struct zfcp_unit *unit, u8 tm_flags, | |||
550 | } | 547 | } |
551 | 548 | ||
552 | /** | 549 | /** |
553 | * zfcp_scsi_eh_bus_reset_handler - reset bus (reopen adapter) | 550 | * zfcp_scsi_eh_host_reset_handler - handler for host and bus reset |
551 | * | ||
552 | * If ERP is already running it will be stopped. | ||
554 | */ | 553 | */ |
555 | int | 554 | int zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) |
556 | zfcp_scsi_eh_bus_reset_handler(struct scsi_cmnd *scpnt) | ||
557 | { | 555 | { |
558 | struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; | 556 | struct zfcp_unit *unit; |
559 | struct zfcp_adapter *adapter = unit->port->adapter; | 557 | struct zfcp_adapter *adapter; |
560 | 558 | unsigned long flags; | |
561 | ZFCP_LOG_NORMAL("bus reset because of problems with " | ||
562 | "unit 0x%016Lx\n", unit->fcp_lun); | ||
563 | zfcp_erp_adapter_reopen(adapter, 0); | ||
564 | zfcp_erp_wait(adapter); | ||
565 | |||
566 | return SUCCESS; | ||
567 | } | ||
568 | 559 | ||
569 | /** | 560 | unit = (struct zfcp_unit*) scpnt->device->hostdata; |
570 | * zfcp_scsi_eh_host_reset_handler - reset host (reopen adapter) | 561 | adapter = unit->port->adapter; |
571 | */ | ||
572 | int | ||
573 | zfcp_scsi_eh_host_reset_handler(struct scsi_cmnd *scpnt) | ||
574 | { | ||
575 | struct zfcp_unit *unit = (struct zfcp_unit*) scpnt->device->hostdata; | ||
576 | struct zfcp_adapter *adapter = unit->port->adapter; | ||
577 | 562 | ||
578 | ZFCP_LOG_NORMAL("host reset because of problems with " | 563 | ZFCP_LOG_NORMAL("host/bus reset because of problems with " |
579 | "unit 0x%016Lx\n", unit->fcp_lun); | 564 | "unit 0x%016Lx\n", unit->fcp_lun); |
580 | zfcp_erp_adapter_reopen(adapter, 0); | 565 | |
581 | zfcp_erp_wait(adapter); | 566 | write_lock_irqsave(&adapter->erp_lock, flags); |
567 | if (atomic_test_mask(ZFCP_STATUS_ADAPTER_ERP_PENDING, | ||
568 | &adapter->status)) { | ||
569 | zfcp_erp_modify_adapter_status(adapter, | ||
570 | ZFCP_STATUS_COMMON_UNBLOCKED|ZFCP_STATUS_COMMON_OPEN, | ||
571 | ZFCP_CLEAR); | ||
572 | zfcp_erp_action_dismiss_adapter(adapter); | ||
573 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
574 | zfcp_fsf_req_dismiss_all(adapter); | ||
575 | adapter->fsf_req_seq_no = 0; | ||
576 | zfcp_erp_adapter_reopen(adapter, 0); | ||
577 | } else { | ||
578 | write_unlock_irqrestore(&adapter->erp_lock, flags); | ||
579 | zfcp_erp_adapter_reopen(adapter, 0); | ||
580 | zfcp_erp_wait(adapter); | ||
581 | } | ||
582 | 582 | ||
583 | return SUCCESS; | 583 | return SUCCESS; |
584 | } | 584 | } |