diff options
Diffstat (limited to 'drivers/s390')
33 files changed, 863 insertions, 467 deletions
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c index f1b7fdc58a5f..82758cbb220b 100644 --- a/drivers/s390/block/dasd.c +++ b/drivers/s390/block/dasd.c | |||
@@ -246,7 +246,7 @@ static struct dentry *dasd_debugfs_setup(const char *name, | |||
246 | static int dasd_state_known_to_basic(struct dasd_device *device) | 246 | static int dasd_state_known_to_basic(struct dasd_device *device) |
247 | { | 247 | { |
248 | struct dasd_block *block = device->block; | 248 | struct dasd_block *block = device->block; |
249 | int rc; | 249 | int rc = 0; |
250 | 250 | ||
251 | /* Allocate and register gendisk structure. */ | 251 | /* Allocate and register gendisk structure. */ |
252 | if (block) { | 252 | if (block) { |
@@ -273,7 +273,8 @@ static int dasd_state_known_to_basic(struct dasd_device *device) | |||
273 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); | 273 | DBF_DEV_EVENT(DBF_EMERG, device, "%s", "debug area created"); |
274 | 274 | ||
275 | device->state = DASD_STATE_BASIC; | 275 | device->state = DASD_STATE_BASIC; |
276 | return 0; | 276 | |
277 | return rc; | ||
277 | } | 278 | } |
278 | 279 | ||
279 | /* | 280 | /* |
@@ -282,6 +283,7 @@ static int dasd_state_known_to_basic(struct dasd_device *device) | |||
282 | static int dasd_state_basic_to_known(struct dasd_device *device) | 283 | static int dasd_state_basic_to_known(struct dasd_device *device) |
283 | { | 284 | { |
284 | int rc; | 285 | int rc; |
286 | |||
285 | if (device->block) { | 287 | if (device->block) { |
286 | dasd_profile_exit(&device->block->profile); | 288 | dasd_profile_exit(&device->block->profile); |
287 | if (device->block->debugfs_dentry) | 289 | if (device->block->debugfs_dentry) |
@@ -332,8 +334,10 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) | |||
332 | if (block->base->discipline->do_analysis != NULL) | 334 | if (block->base->discipline->do_analysis != NULL) |
333 | rc = block->base->discipline->do_analysis(block); | 335 | rc = block->base->discipline->do_analysis(block); |
334 | if (rc) { | 336 | if (rc) { |
335 | if (rc != -EAGAIN) | 337 | if (rc != -EAGAIN) { |
336 | device->state = DASD_STATE_UNFMT; | 338 | device->state = DASD_STATE_UNFMT; |
339 | goto out; | ||
340 | } | ||
337 | return rc; | 341 | return rc; |
338 | } | 342 | } |
339 | dasd_setup_queue(block); | 343 | dasd_setup_queue(block); |
@@ -341,11 +345,16 @@ static int dasd_state_basic_to_ready(struct dasd_device *device) | |||
341 | block->blocks << block->s2b_shift); | 345 | block->blocks << block->s2b_shift); |
342 | device->state = DASD_STATE_READY; | 346 | device->state = DASD_STATE_READY; |
343 | rc = dasd_scan_partitions(block); | 347 | rc = dasd_scan_partitions(block); |
344 | if (rc) | 348 | if (rc) { |
345 | device->state = DASD_STATE_BASIC; | 349 | device->state = DASD_STATE_BASIC; |
350 | return rc; | ||
351 | } | ||
346 | } else { | 352 | } else { |
347 | device->state = DASD_STATE_READY; | 353 | device->state = DASD_STATE_READY; |
348 | } | 354 | } |
355 | out: | ||
356 | if (device->discipline->basic_to_ready) | ||
357 | rc = device->discipline->basic_to_ready(device); | ||
349 | return rc; | 358 | return rc; |
350 | } | 359 | } |
351 | 360 | ||
@@ -368,6 +377,11 @@ static int dasd_state_ready_to_basic(struct dasd_device *device) | |||
368 | { | 377 | { |
369 | int rc; | 378 | int rc; |
370 | 379 | ||
380 | if (device->discipline->ready_to_basic) { | ||
381 | rc = device->discipline->ready_to_basic(device); | ||
382 | if (rc) | ||
383 | return rc; | ||
384 | } | ||
371 | device->state = DASD_STATE_BASIC; | 385 | device->state = DASD_STATE_BASIC; |
372 | if (device->block) { | 386 | if (device->block) { |
373 | struct dasd_block *block = device->block; | 387 | struct dasd_block *block = device->block; |
@@ -402,16 +416,10 @@ static int dasd_state_unfmt_to_basic(struct dasd_device *device) | |||
402 | static int | 416 | static int |
403 | dasd_state_ready_to_online(struct dasd_device * device) | 417 | dasd_state_ready_to_online(struct dasd_device * device) |
404 | { | 418 | { |
405 | int rc; | ||
406 | struct gendisk *disk; | 419 | struct gendisk *disk; |
407 | struct disk_part_iter piter; | 420 | struct disk_part_iter piter; |
408 | struct hd_struct *part; | 421 | struct hd_struct *part; |
409 | 422 | ||
410 | if (device->discipline->ready_to_online) { | ||
411 | rc = device->discipline->ready_to_online(device); | ||
412 | if (rc) | ||
413 | return rc; | ||
414 | } | ||
415 | device->state = DASD_STATE_ONLINE; | 423 | device->state = DASD_STATE_ONLINE; |
416 | if (device->block) { | 424 | if (device->block) { |
417 | dasd_schedule_block_bh(device->block); | 425 | dasd_schedule_block_bh(device->block); |
@@ -444,6 +452,7 @@ static int dasd_state_online_to_ready(struct dasd_device *device) | |||
444 | if (rc) | 452 | if (rc) |
445 | return rc; | 453 | return rc; |
446 | } | 454 | } |
455 | |||
447 | device->state = DASD_STATE_READY; | 456 | device->state = DASD_STATE_READY; |
448 | if (device->block && !(device->features & DASD_FEATURE_USERAW)) { | 457 | if (device->block && !(device->features & DASD_FEATURE_USERAW)) { |
449 | disk = device->block->bdev->bd_disk; | 458 | disk = device->block->bdev->bd_disk; |
@@ -2223,6 +2232,77 @@ static int _dasd_sleep_on(struct dasd_ccw_req *maincqr, int interruptible) | |||
2223 | return rc; | 2232 | return rc; |
2224 | } | 2233 | } |
2225 | 2234 | ||
2235 | static inline int _wait_for_wakeup_queue(struct list_head *ccw_queue) | ||
2236 | { | ||
2237 | struct dasd_ccw_req *cqr; | ||
2238 | |||
2239 | list_for_each_entry(cqr, ccw_queue, blocklist) { | ||
2240 | if (cqr->callback_data != DASD_SLEEPON_END_TAG) | ||
2241 | return 0; | ||
2242 | } | ||
2243 | |||
2244 | return 1; | ||
2245 | } | ||
2246 | |||
2247 | static int _dasd_sleep_on_queue(struct list_head *ccw_queue, int interruptible) | ||
2248 | { | ||
2249 | struct dasd_device *device; | ||
2250 | int rc; | ||
2251 | struct dasd_ccw_req *cqr, *n; | ||
2252 | |||
2253 | retry: | ||
2254 | list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { | ||
2255 | device = cqr->startdev; | ||
2256 | if (cqr->status != DASD_CQR_FILLED) /*could be failed*/ | ||
2257 | continue; | ||
2258 | |||
2259 | if (test_bit(DASD_FLAG_LOCK_STOLEN, &device->flags) && | ||
2260 | !test_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags)) { | ||
2261 | cqr->status = DASD_CQR_FAILED; | ||
2262 | cqr->intrc = -EPERM; | ||
2263 | continue; | ||
2264 | } | ||
2265 | /*Non-temporary stop condition will trigger fail fast*/ | ||
2266 | if (device->stopped & ~DASD_STOPPED_PENDING && | ||
2267 | test_bit(DASD_CQR_FLAGS_FAILFAST, &cqr->flags) && | ||
2268 | !dasd_eer_enabled(device)) { | ||
2269 | cqr->status = DASD_CQR_FAILED; | ||
2270 | cqr->intrc = -EAGAIN; | ||
2271 | continue; | ||
2272 | } | ||
2273 | |||
2274 | /*Don't try to start requests if device is stopped*/ | ||
2275 | if (interruptible) { | ||
2276 | rc = wait_event_interruptible( | ||
2277 | generic_waitq, !device->stopped); | ||
2278 | if (rc == -ERESTARTSYS) { | ||
2279 | cqr->status = DASD_CQR_FAILED; | ||
2280 | cqr->intrc = rc; | ||
2281 | continue; | ||
2282 | } | ||
2283 | } else | ||
2284 | wait_event(generic_waitq, !(device->stopped)); | ||
2285 | |||
2286 | if (!cqr->callback) | ||
2287 | cqr->callback = dasd_wakeup_cb; | ||
2288 | cqr->callback_data = DASD_SLEEPON_START_TAG; | ||
2289 | dasd_add_request_tail(cqr); | ||
2290 | } | ||
2291 | |||
2292 | wait_event(generic_waitq, _wait_for_wakeup_queue(ccw_queue)); | ||
2293 | |||
2294 | rc = 0; | ||
2295 | list_for_each_entry_safe(cqr, n, ccw_queue, blocklist) { | ||
2296 | if (__dasd_sleep_on_erp(cqr)) | ||
2297 | rc = 1; | ||
2298 | } | ||
2299 | if (rc) | ||
2300 | goto retry; | ||
2301 | |||
2302 | |||
2303 | return 0; | ||
2304 | } | ||
2305 | |||
2226 | /* | 2306 | /* |
2227 | * Queue a request to the tail of the device ccw_queue and wait for | 2307 | * Queue a request to the tail of the device ccw_queue and wait for |
2228 | * it's completion. | 2308 | * it's completion. |
@@ -2233,6 +2313,15 @@ int dasd_sleep_on(struct dasd_ccw_req *cqr) | |||
2233 | } | 2313 | } |
2234 | 2314 | ||
2235 | /* | 2315 | /* |
2316 | * Start requests from a ccw_queue and wait for their completion. | ||
2317 | */ | ||
2318 | int dasd_sleep_on_queue(struct list_head *ccw_queue) | ||
2319 | { | ||
2320 | return _dasd_sleep_on_queue(ccw_queue, 0); | ||
2321 | } | ||
2322 | EXPORT_SYMBOL(dasd_sleep_on_queue); | ||
2323 | |||
2324 | /* | ||
2236 | * Queue a request to the tail of the device ccw_queue and wait | 2325 | * Queue a request to the tail of the device ccw_queue and wait |
2237 | * interruptible for it's completion. | 2326 | * interruptible for it's completion. |
2238 | */ | 2327 | */ |
@@ -2663,6 +2752,26 @@ static void _dasd_wake_block_flush_cb(struct dasd_ccw_req *cqr, void *data) | |||
2663 | } | 2752 | } |
2664 | 2753 | ||
2665 | /* | 2754 | /* |
2755 | * Requeue a request back to the block request queue | ||
2756 | * only works for block requests | ||
2757 | */ | ||
2758 | static int _dasd_requeue_request(struct dasd_ccw_req *cqr) | ||
2759 | { | ||
2760 | struct dasd_block *block = cqr->block; | ||
2761 | struct request *req; | ||
2762 | unsigned long flags; | ||
2763 | |||
2764 | if (!block) | ||
2765 | return -EINVAL; | ||
2766 | spin_lock_irqsave(&block->queue_lock, flags); | ||
2767 | req = (struct request *) cqr->callback_data; | ||
2768 | blk_requeue_request(block->request_queue, req); | ||
2769 | spin_unlock_irqrestore(&block->queue_lock, flags); | ||
2770 | |||
2771 | return 0; | ||
2772 | } | ||
2773 | |||
2774 | /* | ||
2666 | * Go through all request on the dasd_block request queue, cancel them | 2775 | * Go through all request on the dasd_block request queue, cancel them |
2667 | * on the respective dasd_device, and return them to the generic | 2776 | * on the respective dasd_device, and return them to the generic |
2668 | * block layer. | 2777 | * block layer. |
@@ -3380,10 +3489,11 @@ EXPORT_SYMBOL_GPL(dasd_generic_verify_path); | |||
3380 | 3489 | ||
3381 | int dasd_generic_pm_freeze(struct ccw_device *cdev) | 3490 | int dasd_generic_pm_freeze(struct ccw_device *cdev) |
3382 | { | 3491 | { |
3492 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
3493 | struct list_head freeze_queue; | ||
3383 | struct dasd_ccw_req *cqr, *n; | 3494 | struct dasd_ccw_req *cqr, *n; |
3495 | struct dasd_ccw_req *refers; | ||
3384 | int rc; | 3496 | int rc; |
3385 | struct list_head freeze_queue; | ||
3386 | struct dasd_device *device = dasd_device_from_cdev(cdev); | ||
3387 | 3497 | ||
3388 | if (IS_ERR(device)) | 3498 | if (IS_ERR(device)) |
3389 | return PTR_ERR(device); | 3499 | return PTR_ERR(device); |
@@ -3396,7 +3506,8 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) | |||
3396 | 3506 | ||
3397 | /* disallow new I/O */ | 3507 | /* disallow new I/O */ |
3398 | dasd_device_set_stop_bits(device, DASD_STOPPED_PM); | 3508 | dasd_device_set_stop_bits(device, DASD_STOPPED_PM); |
3399 | /* clear active requests */ | 3509 | |
3510 | /* clear active requests and requeue them to block layer if possible */ | ||
3400 | INIT_LIST_HEAD(&freeze_queue); | 3511 | INIT_LIST_HEAD(&freeze_queue); |
3401 | spin_lock_irq(get_ccwdev_lock(cdev)); | 3512 | spin_lock_irq(get_ccwdev_lock(cdev)); |
3402 | rc = 0; | 3513 | rc = 0; |
@@ -3416,7 +3527,6 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) | |||
3416 | } | 3527 | } |
3417 | list_move_tail(&cqr->devlist, &freeze_queue); | 3528 | list_move_tail(&cqr->devlist, &freeze_queue); |
3418 | } | 3529 | } |
3419 | |||
3420 | spin_unlock_irq(get_ccwdev_lock(cdev)); | 3530 | spin_unlock_irq(get_ccwdev_lock(cdev)); |
3421 | 3531 | ||
3422 | list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { | 3532 | list_for_each_entry_safe(cqr, n, &freeze_queue, devlist) { |
@@ -3424,12 +3534,38 @@ int dasd_generic_pm_freeze(struct ccw_device *cdev) | |||
3424 | (cqr->status != DASD_CQR_CLEAR_PENDING)); | 3534 | (cqr->status != DASD_CQR_CLEAR_PENDING)); |
3425 | if (cqr->status == DASD_CQR_CLEARED) | 3535 | if (cqr->status == DASD_CQR_CLEARED) |
3426 | cqr->status = DASD_CQR_QUEUED; | 3536 | cqr->status = DASD_CQR_QUEUED; |
3537 | |||
3538 | /* requeue requests to blocklayer will only work for | ||
3539 | block device requests */ | ||
3540 | if (_dasd_requeue_request(cqr)) | ||
3541 | continue; | ||
3542 | |||
3543 | /* remove requests from device and block queue */ | ||
3544 | list_del_init(&cqr->devlist); | ||
3545 | while (cqr->refers != NULL) { | ||
3546 | refers = cqr->refers; | ||
3547 | /* remove the request from the block queue */ | ||
3548 | list_del(&cqr->blocklist); | ||
3549 | /* free the finished erp request */ | ||
3550 | dasd_free_erp_request(cqr, cqr->memdev); | ||
3551 | cqr = refers; | ||
3552 | } | ||
3553 | if (cqr->block) | ||
3554 | list_del_init(&cqr->blocklist); | ||
3555 | cqr->block->base->discipline->free_cp( | ||
3556 | cqr, (struct request *) cqr->callback_data); | ||
3427 | } | 3557 | } |
3428 | /* move freeze_queue to start of the ccw_queue */ | ||
3429 | spin_lock_irq(get_ccwdev_lock(cdev)); | ||
3430 | list_splice_tail(&freeze_queue, &device->ccw_queue); | ||
3431 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
3432 | 3558 | ||
3559 | /* | ||
3560 | * if requests remain then they are internal request | ||
3561 | * and go back to the device queue | ||
3562 | */ | ||
3563 | if (!list_empty(&freeze_queue)) { | ||
3564 | /* move freeze_queue to start of the ccw_queue */ | ||
3565 | spin_lock_irq(get_ccwdev_lock(cdev)); | ||
3566 | list_splice_tail(&freeze_queue, &device->ccw_queue); | ||
3567 | spin_unlock_irq(get_ccwdev_lock(cdev)); | ||
3568 | } | ||
3433 | dasd_put_device(device); | 3569 | dasd_put_device(device); |
3434 | return rc; | 3570 | return rc; |
3435 | } | 3571 | } |
diff --git a/drivers/s390/block/dasd_devmap.c b/drivers/s390/block/dasd_devmap.c index c196827c228f..a71bb8aaca1d 100644 --- a/drivers/s390/block/dasd_devmap.c +++ b/drivers/s390/block/dasd_devmap.c | |||
@@ -410,8 +410,7 @@ dasd_add_busid(const char *bus_id, int features) | |||
410 | struct dasd_devmap *devmap, *new, *tmp; | 410 | struct dasd_devmap *devmap, *new, *tmp; |
411 | int hash; | 411 | int hash; |
412 | 412 | ||
413 | new = (struct dasd_devmap *) | 413 | new = kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL); |
414 | kzalloc(sizeof(struct dasd_devmap), GFP_KERNEL); | ||
415 | if (!new) | 414 | if (!new) |
416 | return ERR_PTR(-ENOMEM); | 415 | return ERR_PTR(-ENOMEM); |
417 | spin_lock(&dasd_devmap_lock); | 416 | spin_lock(&dasd_devmap_lock); |
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index 6999fd919e94..6a44b27623ed 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -2022,7 +2022,7 @@ static int dasd_eckd_do_analysis(struct dasd_block *block) | |||
2022 | return dasd_eckd_end_analysis(block); | 2022 | return dasd_eckd_end_analysis(block); |
2023 | } | 2023 | } |
2024 | 2024 | ||
2025 | static int dasd_eckd_ready_to_online(struct dasd_device *device) | 2025 | static int dasd_eckd_basic_to_ready(struct dasd_device *device) |
2026 | { | 2026 | { |
2027 | return dasd_alias_add_device(device); | 2027 | return dasd_alias_add_device(device); |
2028 | }; | 2028 | }; |
@@ -2031,6 +2031,11 @@ static int dasd_eckd_online_to_ready(struct dasd_device *device) | |||
2031 | { | 2031 | { |
2032 | cancel_work_sync(&device->reload_device); | 2032 | cancel_work_sync(&device->reload_device); |
2033 | cancel_work_sync(&device->kick_validate); | 2033 | cancel_work_sync(&device->kick_validate); |
2034 | return 0; | ||
2035 | }; | ||
2036 | |||
2037 | static int dasd_eckd_ready_to_basic(struct dasd_device *device) | ||
2038 | { | ||
2034 | return dasd_alias_remove_device(device); | 2039 | return dasd_alias_remove_device(device); |
2035 | }; | 2040 | }; |
2036 | 2041 | ||
@@ -2050,45 +2055,34 @@ dasd_eckd_fill_geometry(struct dasd_block *block, struct hd_geometry *geo) | |||
2050 | } | 2055 | } |
2051 | 2056 | ||
2052 | static struct dasd_ccw_req * | 2057 | static struct dasd_ccw_req * |
2053 | dasd_eckd_format_device(struct dasd_device * device, | 2058 | dasd_eckd_build_format(struct dasd_device *base, |
2054 | struct format_data_t * fdata) | 2059 | struct format_data_t *fdata) |
2055 | { | 2060 | { |
2056 | struct dasd_eckd_private *private; | 2061 | struct dasd_eckd_private *base_priv; |
2062 | struct dasd_eckd_private *start_priv; | ||
2063 | struct dasd_device *startdev; | ||
2057 | struct dasd_ccw_req *fcp; | 2064 | struct dasd_ccw_req *fcp; |
2058 | struct eckd_count *ect; | 2065 | struct eckd_count *ect; |
2066 | struct ch_t address; | ||
2059 | struct ccw1 *ccw; | 2067 | struct ccw1 *ccw; |
2060 | void *data; | 2068 | void *data; |
2061 | int rpt; | 2069 | int rpt; |
2062 | struct ch_t address; | ||
2063 | int cplength, datasize; | 2070 | int cplength, datasize; |
2064 | int i; | 2071 | int i, j; |
2065 | int intensity = 0; | 2072 | int intensity = 0; |
2066 | int r0_perm; | 2073 | int r0_perm; |
2074 | int nr_tracks; | ||
2067 | 2075 | ||
2068 | private = (struct dasd_eckd_private *) device->private; | 2076 | startdev = dasd_alias_get_start_dev(base); |
2069 | rpt = recs_per_track(&private->rdc_data, 0, fdata->blksize); | 2077 | if (!startdev) |
2070 | set_ch_t(&address, | 2078 | startdev = base; |
2071 | fdata->start_unit / private->rdc_data.trk_per_cyl, | ||
2072 | fdata->start_unit % private->rdc_data.trk_per_cyl); | ||
2073 | 2079 | ||
2074 | /* Sanity checks. */ | 2080 | start_priv = (struct dasd_eckd_private *) startdev->private; |
2075 | if (fdata->start_unit >= | 2081 | base_priv = (struct dasd_eckd_private *) base->private; |
2076 | (private->real_cyl * private->rdc_data.trk_per_cyl)) { | 2082 | |
2077 | dev_warn(&device->cdev->dev, "Start track number %d used in " | 2083 | rpt = recs_per_track(&base_priv->rdc_data, 0, fdata->blksize); |
2078 | "formatting is too big\n", fdata->start_unit); | 2084 | |
2079 | return ERR_PTR(-EINVAL); | 2085 | nr_tracks = fdata->stop_unit - fdata->start_unit + 1; |
2080 | } | ||
2081 | if (fdata->start_unit > fdata->stop_unit) { | ||
2082 | dev_warn(&device->cdev->dev, "Start track %d used in " | ||
2083 | "formatting exceeds end track\n", fdata->start_unit); | ||
2084 | return ERR_PTR(-EINVAL); | ||
2085 | } | ||
2086 | if (dasd_check_blocksize(fdata->blksize) != 0) { | ||
2087 | dev_warn(&device->cdev->dev, | ||
2088 | "The DASD cannot be formatted with block size %d\n", | ||
2089 | fdata->blksize); | ||
2090 | return ERR_PTR(-EINVAL); | ||
2091 | } | ||
2092 | 2086 | ||
2093 | /* | 2087 | /* |
2094 | * fdata->intensity is a bit string that tells us what to do: | 2088 | * fdata->intensity is a bit string that tells us what to do: |
@@ -2106,149 +2100,282 @@ dasd_eckd_format_device(struct dasd_device * device, | |||
2106 | r0_perm = 1; | 2100 | r0_perm = 1; |
2107 | intensity = fdata->intensity; | 2101 | intensity = fdata->intensity; |
2108 | } | 2102 | } |
2103 | |||
2109 | switch (intensity) { | 2104 | switch (intensity) { |
2110 | case 0x00: /* Normal format */ | 2105 | case 0x00: /* Normal format */ |
2111 | case 0x08: /* Normal format, use cdl. */ | 2106 | case 0x08: /* Normal format, use cdl. */ |
2112 | cplength = 2 + rpt; | 2107 | cplength = 2 + (rpt*nr_tracks); |
2113 | datasize = sizeof(struct DE_eckd_data) + | 2108 | datasize = sizeof(struct PFX_eckd_data) + |
2114 | sizeof(struct LO_eckd_data) + | 2109 | sizeof(struct LO_eckd_data) + |
2115 | rpt * sizeof(struct eckd_count); | 2110 | rpt * nr_tracks * sizeof(struct eckd_count); |
2116 | break; | 2111 | break; |
2117 | case 0x01: /* Write record zero and format track. */ | 2112 | case 0x01: /* Write record zero and format track. */ |
2118 | case 0x09: /* Write record zero and format track, use cdl. */ | 2113 | case 0x09: /* Write record zero and format track, use cdl. */ |
2119 | cplength = 3 + rpt; | 2114 | cplength = 2 + rpt * nr_tracks; |
2120 | datasize = sizeof(struct DE_eckd_data) + | 2115 | datasize = sizeof(struct PFX_eckd_data) + |
2121 | sizeof(struct LO_eckd_data) + | 2116 | sizeof(struct LO_eckd_data) + |
2122 | sizeof(struct eckd_count) + | 2117 | sizeof(struct eckd_count) + |
2123 | rpt * sizeof(struct eckd_count); | 2118 | rpt * nr_tracks * sizeof(struct eckd_count); |
2124 | break; | 2119 | break; |
2125 | case 0x04: /* Invalidate track. */ | 2120 | case 0x04: /* Invalidate track. */ |
2126 | case 0x0c: /* Invalidate track, use cdl. */ | 2121 | case 0x0c: /* Invalidate track, use cdl. */ |
2127 | cplength = 3; | 2122 | cplength = 3; |
2128 | datasize = sizeof(struct DE_eckd_data) + | 2123 | datasize = sizeof(struct PFX_eckd_data) + |
2129 | sizeof(struct LO_eckd_data) + | 2124 | sizeof(struct LO_eckd_data) + |
2130 | sizeof(struct eckd_count); | 2125 | sizeof(struct eckd_count); |
2131 | break; | 2126 | break; |
2132 | default: | 2127 | default: |
2133 | dev_warn(&device->cdev->dev, "An I/O control call used " | 2128 | dev_warn(&startdev->cdev->dev, |
2134 | "incorrect flags 0x%x\n", fdata->intensity); | 2129 | "An I/O control call used incorrect flags 0x%x\n", |
2130 | fdata->intensity); | ||
2135 | return ERR_PTR(-EINVAL); | 2131 | return ERR_PTR(-EINVAL); |
2136 | } | 2132 | } |
2137 | /* Allocate the format ccw request. */ | 2133 | /* Allocate the format ccw request. */ |
2138 | fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, datasize, device); | 2134 | fcp = dasd_smalloc_request(DASD_ECKD_MAGIC, cplength, |
2135 | datasize, startdev); | ||
2139 | if (IS_ERR(fcp)) | 2136 | if (IS_ERR(fcp)) |
2140 | return fcp; | 2137 | return fcp; |
2141 | 2138 | ||
2139 | start_priv->count++; | ||
2142 | data = fcp->data; | 2140 | data = fcp->data; |
2143 | ccw = fcp->cpaddr; | 2141 | ccw = fcp->cpaddr; |
2144 | 2142 | ||
2145 | switch (intensity & ~0x08) { | 2143 | switch (intensity & ~0x08) { |
2146 | case 0x00: /* Normal format. */ | 2144 | case 0x00: /* Normal format. */ |
2147 | define_extent(ccw++, (struct DE_eckd_data *) data, | 2145 | prefix(ccw++, (struct PFX_eckd_data *) data, |
2148 | fdata->start_unit, fdata->start_unit, | 2146 | fdata->start_unit, fdata->stop_unit, |
2149 | DASD_ECKD_CCW_WRITE_CKD, device); | 2147 | DASD_ECKD_CCW_WRITE_CKD, base, startdev); |
2150 | /* grant subsystem permission to format R0 */ | 2148 | /* grant subsystem permission to format R0 */ |
2151 | if (r0_perm) | 2149 | if (r0_perm) |
2152 | ((struct DE_eckd_data *)data)->ga_extended |= 0x04; | 2150 | ((struct PFX_eckd_data *)data) |
2153 | data += sizeof(struct DE_eckd_data); | 2151 | ->define_extent.ga_extended |= 0x04; |
2152 | data += sizeof(struct PFX_eckd_data); | ||
2154 | ccw[-1].flags |= CCW_FLAG_CC; | 2153 | ccw[-1].flags |= CCW_FLAG_CC; |
2155 | locate_record(ccw++, (struct LO_eckd_data *) data, | 2154 | locate_record(ccw++, (struct LO_eckd_data *) data, |
2156 | fdata->start_unit, 0, rpt, | 2155 | fdata->start_unit, 0, rpt*nr_tracks, |
2157 | DASD_ECKD_CCW_WRITE_CKD, device, | 2156 | DASD_ECKD_CCW_WRITE_CKD, base, |
2158 | fdata->blksize); | 2157 | fdata->blksize); |
2159 | data += sizeof(struct LO_eckd_data); | 2158 | data += sizeof(struct LO_eckd_data); |
2160 | break; | 2159 | break; |
2161 | case 0x01: /* Write record zero + format track. */ | 2160 | case 0x01: /* Write record zero + format track. */ |
2162 | define_extent(ccw++, (struct DE_eckd_data *) data, | 2161 | prefix(ccw++, (struct PFX_eckd_data *) data, |
2163 | fdata->start_unit, fdata->start_unit, | 2162 | fdata->start_unit, fdata->stop_unit, |
2164 | DASD_ECKD_CCW_WRITE_RECORD_ZERO, | 2163 | DASD_ECKD_CCW_WRITE_RECORD_ZERO, |
2165 | device); | 2164 | base, startdev); |
2166 | data += sizeof(struct DE_eckd_data); | 2165 | data += sizeof(struct PFX_eckd_data); |
2167 | ccw[-1].flags |= CCW_FLAG_CC; | 2166 | ccw[-1].flags |= CCW_FLAG_CC; |
2168 | locate_record(ccw++, (struct LO_eckd_data *) data, | 2167 | locate_record(ccw++, (struct LO_eckd_data *) data, |
2169 | fdata->start_unit, 0, rpt + 1, | 2168 | fdata->start_unit, 0, rpt * nr_tracks + 1, |
2170 | DASD_ECKD_CCW_WRITE_RECORD_ZERO, device, | 2169 | DASD_ECKD_CCW_WRITE_RECORD_ZERO, base, |
2171 | device->block->bp_block); | 2170 | base->block->bp_block); |
2172 | data += sizeof(struct LO_eckd_data); | 2171 | data += sizeof(struct LO_eckd_data); |
2173 | break; | 2172 | break; |
2174 | case 0x04: /* Invalidate track. */ | 2173 | case 0x04: /* Invalidate track. */ |
2175 | define_extent(ccw++, (struct DE_eckd_data *) data, | 2174 | prefix(ccw++, (struct PFX_eckd_data *) data, |
2176 | fdata->start_unit, fdata->start_unit, | 2175 | fdata->start_unit, fdata->stop_unit, |
2177 | DASD_ECKD_CCW_WRITE_CKD, device); | 2176 | DASD_ECKD_CCW_WRITE_CKD, base, startdev); |
2178 | data += sizeof(struct DE_eckd_data); | 2177 | data += sizeof(struct PFX_eckd_data); |
2179 | ccw[-1].flags |= CCW_FLAG_CC; | 2178 | ccw[-1].flags |= CCW_FLAG_CC; |
2180 | locate_record(ccw++, (struct LO_eckd_data *) data, | 2179 | locate_record(ccw++, (struct LO_eckd_data *) data, |
2181 | fdata->start_unit, 0, 1, | 2180 | fdata->start_unit, 0, 1, |
2182 | DASD_ECKD_CCW_WRITE_CKD, device, 8); | 2181 | DASD_ECKD_CCW_WRITE_CKD, base, 8); |
2183 | data += sizeof(struct LO_eckd_data); | 2182 | data += sizeof(struct LO_eckd_data); |
2184 | break; | 2183 | break; |
2185 | } | 2184 | } |
2186 | if (intensity & 0x01) { /* write record zero */ | 2185 | |
2187 | ect = (struct eckd_count *) data; | 2186 | for (j = 0; j < nr_tracks; j++) { |
2188 | data += sizeof(struct eckd_count); | 2187 | /* calculate cylinder and head for the current track */ |
2189 | ect->cyl = address.cyl; | 2188 | set_ch_t(&address, |
2190 | ect->head = address.head; | 2189 | (fdata->start_unit + j) / |
2191 | ect->record = 0; | 2190 | base_priv->rdc_data.trk_per_cyl, |
2192 | ect->kl = 0; | 2191 | (fdata->start_unit + j) % |
2193 | ect->dl = 8; | 2192 | base_priv->rdc_data.trk_per_cyl); |
2194 | ccw[-1].flags |= CCW_FLAG_CC; | 2193 | if (intensity & 0x01) { /* write record zero */ |
2195 | ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; | ||
2196 | ccw->flags = CCW_FLAG_SLI; | ||
2197 | ccw->count = 8; | ||
2198 | ccw->cda = (__u32)(addr_t) ect; | ||
2199 | ccw++; | ||
2200 | } | ||
2201 | if ((intensity & ~0x08) & 0x04) { /* erase track */ | ||
2202 | ect = (struct eckd_count *) data; | ||
2203 | data += sizeof(struct eckd_count); | ||
2204 | ect->cyl = address.cyl; | ||
2205 | ect->head = address.head; | ||
2206 | ect->record = 1; | ||
2207 | ect->kl = 0; | ||
2208 | ect->dl = 0; | ||
2209 | ccw[-1].flags |= CCW_FLAG_CC; | ||
2210 | ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; | ||
2211 | ccw->flags = CCW_FLAG_SLI; | ||
2212 | ccw->count = 8; | ||
2213 | ccw->cda = (__u32)(addr_t) ect; | ||
2214 | } else { /* write remaining records */ | ||
2215 | for (i = 0; i < rpt; i++) { | ||
2216 | ect = (struct eckd_count *) data; | 2194 | ect = (struct eckd_count *) data; |
2217 | data += sizeof(struct eckd_count); | 2195 | data += sizeof(struct eckd_count); |
2218 | ect->cyl = address.cyl; | 2196 | ect->cyl = address.cyl; |
2219 | ect->head = address.head; | 2197 | ect->head = address.head; |
2220 | ect->record = i + 1; | 2198 | ect->record = 0; |
2221 | ect->kl = 0; | 2199 | ect->kl = 0; |
2222 | ect->dl = fdata->blksize; | 2200 | ect->dl = 8; |
2223 | /* Check for special tracks 0-1 when formatting CDL */ | ||
2224 | if ((intensity & 0x08) && | ||
2225 | fdata->start_unit == 0) { | ||
2226 | if (i < 3) { | ||
2227 | ect->kl = 4; | ||
2228 | ect->dl = sizes_trk0[i] - 4; | ||
2229 | } | ||
2230 | } | ||
2231 | if ((intensity & 0x08) && | ||
2232 | fdata->start_unit == 1) { | ||
2233 | ect->kl = 44; | ||
2234 | ect->dl = LABEL_SIZE - 44; | ||
2235 | } | ||
2236 | ccw[-1].flags |= CCW_FLAG_CC; | 2201 | ccw[-1].flags |= CCW_FLAG_CC; |
2237 | ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; | 2202 | ccw->cmd_code = DASD_ECKD_CCW_WRITE_RECORD_ZERO; |
2238 | ccw->flags = CCW_FLAG_SLI; | 2203 | ccw->flags = CCW_FLAG_SLI; |
2239 | ccw->count = 8; | 2204 | ccw->count = 8; |
2240 | ccw->cda = (__u32)(addr_t) ect; | 2205 | ccw->cda = (__u32)(addr_t) ect; |
2241 | ccw++; | 2206 | ccw++; |
2242 | } | 2207 | } |
2208 | if ((intensity & ~0x08) & 0x04) { /* erase track */ | ||
2209 | ect = (struct eckd_count *) data; | ||
2210 | data += sizeof(struct eckd_count); | ||
2211 | ect->cyl = address.cyl; | ||
2212 | ect->head = address.head; | ||
2213 | ect->record = 1; | ||
2214 | ect->kl = 0; | ||
2215 | ect->dl = 0; | ||
2216 | ccw[-1].flags |= CCW_FLAG_CC; | ||
2217 | ccw->cmd_code = DASD_ECKD_CCW_WRITE_CKD; | ||
2218 | ccw->flags = CCW_FLAG_SLI; | ||
2219 | ccw->count = 8; | ||
2220 | ccw->cda = (__u32)(addr_t) ect; | ||
2221 | } else { /* write remaining records */ | ||
2222 | for (i = 0; i < rpt; i++) { | ||
2223 | ect = (struct eckd_count *) data; | ||
2224 | data += sizeof(struct eckd_count); | ||
2225 | ect->cyl = address.cyl; | ||
2226 | ect->head = address.head; | ||
2227 | ect->record = i + 1; | ||
2228 | ect->kl = 0; | ||
2229 | ect->dl = fdata->blksize; | ||
2230 | /* | ||
2231 | * Check for special tracks 0-1 | ||
2232 | * when formatting CDL | ||
2233 | */ | ||
2234 | if ((intensity & 0x08) && | ||
2235 | fdata->start_unit == 0) { | ||
2236 | if (i < 3) { | ||
2237 | ect->kl = 4; | ||
2238 | ect->dl = sizes_trk0[i] - 4; | ||
2239 | } | ||
2240 | } | ||
2241 | if ((intensity & 0x08) && | ||
2242 | fdata->start_unit == 1) { | ||
2243 | ect->kl = 44; | ||
2244 | ect->dl = LABEL_SIZE - 44; | ||
2245 | } | ||
2246 | ccw[-1].flags |= CCW_FLAG_CC; | ||
2247 | if (i != 0 || j == 0) | ||
2248 | ccw->cmd_code = | ||
2249 | DASD_ECKD_CCW_WRITE_CKD; | ||
2250 | else | ||
2251 | ccw->cmd_code = | ||
2252 | DASD_ECKD_CCW_WRITE_CKD_MT; | ||
2253 | ccw->flags = CCW_FLAG_SLI; | ||
2254 | ccw->count = 8; | ||
2255 | ccw->cda = (__u32)(addr_t) ect; | ||
2256 | ccw++; | ||
2257 | } | ||
2258 | } | ||
2243 | } | 2259 | } |
2244 | fcp->startdev = device; | 2260 | |
2245 | fcp->memdev = device; | 2261 | fcp->startdev = startdev; |
2262 | fcp->memdev = startdev; | ||
2246 | fcp->retries = 256; | 2263 | fcp->retries = 256; |
2264 | fcp->expires = startdev->default_expires * HZ; | ||
2247 | fcp->buildclk = get_tod_clock(); | 2265 | fcp->buildclk = get_tod_clock(); |
2248 | fcp->status = DASD_CQR_FILLED; | 2266 | fcp->status = DASD_CQR_FILLED; |
2267 | |||
2249 | return fcp; | 2268 | return fcp; |
2250 | } | 2269 | } |
2251 | 2270 | ||
2271 | static int | ||
2272 | dasd_eckd_format_device(struct dasd_device *base, | ||
2273 | struct format_data_t *fdata) | ||
2274 | { | ||
2275 | struct dasd_ccw_req *cqr, *n; | ||
2276 | struct dasd_block *block; | ||
2277 | struct dasd_eckd_private *private; | ||
2278 | struct list_head format_queue; | ||
2279 | struct dasd_device *device; | ||
2280 | int old_stop, format_step; | ||
2281 | int step, rc = 0; | ||
2282 | |||
2283 | block = base->block; | ||
2284 | private = (struct dasd_eckd_private *) base->private; | ||
2285 | |||
2286 | /* Sanity checks. */ | ||
2287 | if (fdata->start_unit >= | ||
2288 | (private->real_cyl * private->rdc_data.trk_per_cyl)) { | ||
2289 | dev_warn(&base->cdev->dev, | ||
2290 | "Start track number %u used in formatting is too big\n", | ||
2291 | fdata->start_unit); | ||
2292 | return -EINVAL; | ||
2293 | } | ||
2294 | if (fdata->stop_unit >= | ||
2295 | (private->real_cyl * private->rdc_data.trk_per_cyl)) { | ||
2296 | dev_warn(&base->cdev->dev, | ||
2297 | "Stop track number %u used in formatting is too big\n", | ||
2298 | fdata->stop_unit); | ||
2299 | return -EINVAL; | ||
2300 | } | ||
2301 | if (fdata->start_unit > fdata->stop_unit) { | ||
2302 | dev_warn(&base->cdev->dev, | ||
2303 | "Start track %u used in formatting exceeds end track\n", | ||
2304 | fdata->start_unit); | ||
2305 | return -EINVAL; | ||
2306 | } | ||
2307 | if (dasd_check_blocksize(fdata->blksize) != 0) { | ||
2308 | dev_warn(&base->cdev->dev, | ||
2309 | "The DASD cannot be formatted with block size %u\n", | ||
2310 | fdata->blksize); | ||
2311 | return -EINVAL; | ||
2312 | } | ||
2313 | |||
2314 | INIT_LIST_HEAD(&format_queue); | ||
2315 | old_stop = fdata->stop_unit; | ||
2316 | |||
2317 | while (fdata->start_unit <= 1) { | ||
2318 | fdata->stop_unit = fdata->start_unit; | ||
2319 | cqr = dasd_eckd_build_format(base, fdata); | ||
2320 | list_add(&cqr->blocklist, &format_queue); | ||
2321 | |||
2322 | fdata->stop_unit = old_stop; | ||
2323 | fdata->start_unit++; | ||
2324 | |||
2325 | if (fdata->start_unit > fdata->stop_unit) | ||
2326 | goto sleep; | ||
2327 | } | ||
2328 | |||
2329 | retry: | ||
2330 | format_step = 255 / recs_per_track(&private->rdc_data, 0, | ||
2331 | fdata->blksize); | ||
2332 | while (fdata->start_unit <= old_stop) { | ||
2333 | step = fdata->stop_unit - fdata->start_unit + 1; | ||
2334 | if (step > format_step) | ||
2335 | fdata->stop_unit = fdata->start_unit + format_step - 1; | ||
2336 | |||
2337 | cqr = dasd_eckd_build_format(base, fdata); | ||
2338 | if (IS_ERR(cqr)) { | ||
2339 | if (PTR_ERR(cqr) == -ENOMEM) { | ||
2340 | /* | ||
2341 | * not enough memory available | ||
2342 | * go to out and start requests | ||
2343 | * retry after first requests were finished | ||
2344 | */ | ||
2345 | fdata->stop_unit = old_stop; | ||
2346 | goto sleep; | ||
2347 | } else | ||
2348 | return PTR_ERR(cqr); | ||
2349 | } | ||
2350 | list_add(&cqr->blocklist, &format_queue); | ||
2351 | |||
2352 | fdata->start_unit = fdata->stop_unit + 1; | ||
2353 | fdata->stop_unit = old_stop; | ||
2354 | } | ||
2355 | |||
2356 | sleep: | ||
2357 | dasd_sleep_on_queue(&format_queue); | ||
2358 | |||
2359 | list_for_each_entry_safe(cqr, n, &format_queue, blocklist) { | ||
2360 | device = cqr->startdev; | ||
2361 | private = (struct dasd_eckd_private *) device->private; | ||
2362 | if (cqr->status == DASD_CQR_FAILED) | ||
2363 | rc = -EIO; | ||
2364 | list_del_init(&cqr->blocklist); | ||
2365 | dasd_sfree_request(cqr, device); | ||
2366 | private->count--; | ||
2367 | } | ||
2368 | |||
2369 | /* | ||
2370 | * in case of ENOMEM we need to retry after | ||
2371 | * first requests are finished | ||
2372 | */ | ||
2373 | if (fdata->start_unit <= fdata->stop_unit) | ||
2374 | goto retry; | ||
2375 | |||
2376 | return rc; | ||
2377 | } | ||
2378 | |||
2252 | static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) | 2379 | static void dasd_eckd_handle_terminated_request(struct dasd_ccw_req *cqr) |
2253 | { | 2380 | { |
2254 | cqr->status = DASD_CQR_FILLED; | 2381 | cqr->status = DASD_CQR_FILLED; |
@@ -4305,8 +4432,9 @@ static struct dasd_discipline dasd_eckd_discipline = { | |||
4305 | .uncheck_device = dasd_eckd_uncheck_device, | 4432 | .uncheck_device = dasd_eckd_uncheck_device, |
4306 | .do_analysis = dasd_eckd_do_analysis, | 4433 | .do_analysis = dasd_eckd_do_analysis, |
4307 | .verify_path = dasd_eckd_verify_path, | 4434 | .verify_path = dasd_eckd_verify_path, |
4308 | .ready_to_online = dasd_eckd_ready_to_online, | 4435 | .basic_to_ready = dasd_eckd_basic_to_ready, |
4309 | .online_to_ready = dasd_eckd_online_to_ready, | 4436 | .online_to_ready = dasd_eckd_online_to_ready, |
4437 | .ready_to_basic = dasd_eckd_ready_to_basic, | ||
4310 | .fill_geometry = dasd_eckd_fill_geometry, | 4438 | .fill_geometry = dasd_eckd_fill_geometry, |
4311 | .start_IO = dasd_start_IO, | 4439 | .start_IO = dasd_start_IO, |
4312 | .term_IO = dasd_term_IO, | 4440 | .term_IO = dasd_term_IO, |
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h index 899e3f5a56e5..0785bd9bd5b6 100644 --- a/drivers/s390/block/dasd_int.h +++ b/drivers/s390/block/dasd_int.h | |||
@@ -300,10 +300,11 @@ struct dasd_discipline { | |||
300 | * Last things to do when a device is set online, and first things | 300 | * Last things to do when a device is set online, and first things |
301 | * when it is set offline. | 301 | * when it is set offline. |
302 | */ | 302 | */ |
303 | int (*ready_to_online) (struct dasd_device *); | 303 | int (*basic_to_ready) (struct dasd_device *); |
304 | int (*online_to_ready) (struct dasd_device *); | 304 | int (*online_to_ready) (struct dasd_device *); |
305 | int (*ready_to_basic) (struct dasd_device *); | ||
305 | 306 | ||
306 | /* | 307 | /* (struct dasd_device *); |
307 | * Device operation functions. build_cp creates a ccw chain for | 308 | * Device operation functions. build_cp creates a ccw chain for |
308 | * a block device request, start_io starts the request and | 309 | * a block device request, start_io starts the request and |
309 | * term_IO cancels it (e.g. in case of a timeout). format_device | 310 | * term_IO cancels it (e.g. in case of a timeout). format_device |
@@ -317,8 +318,8 @@ struct dasd_discipline { | |||
317 | int (*start_IO) (struct dasd_ccw_req *); | 318 | int (*start_IO) (struct dasd_ccw_req *); |
318 | int (*term_IO) (struct dasd_ccw_req *); | 319 | int (*term_IO) (struct dasd_ccw_req *); |
319 | void (*handle_terminated_request) (struct dasd_ccw_req *); | 320 | void (*handle_terminated_request) (struct dasd_ccw_req *); |
320 | struct dasd_ccw_req *(*format_device) (struct dasd_device *, | 321 | int (*format_device) (struct dasd_device *, |
321 | struct format_data_t *); | 322 | struct format_data_t *); |
322 | int (*free_cp) (struct dasd_ccw_req *, struct request *); | 323 | int (*free_cp) (struct dasd_ccw_req *, struct request *); |
323 | 324 | ||
324 | /* | 325 | /* |
@@ -672,6 +673,7 @@ int dasd_term_IO(struct dasd_ccw_req *); | |||
672 | void dasd_schedule_device_bh(struct dasd_device *); | 673 | void dasd_schedule_device_bh(struct dasd_device *); |
673 | void dasd_schedule_block_bh(struct dasd_block *); | 674 | void dasd_schedule_block_bh(struct dasd_block *); |
674 | int dasd_sleep_on(struct dasd_ccw_req *); | 675 | int dasd_sleep_on(struct dasd_ccw_req *); |
676 | int dasd_sleep_on_queue(struct list_head *); | ||
675 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *); | 677 | int dasd_sleep_on_immediatly(struct dasd_ccw_req *); |
676 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *); | 678 | int dasd_sleep_on_interruptible(struct dasd_ccw_req *); |
677 | void dasd_device_set_timer(struct dasd_device *, int); | 679 | void dasd_device_set_timer(struct dasd_device *, int); |
diff --git a/drivers/s390/block/dasd_ioctl.c b/drivers/s390/block/dasd_ioctl.c index 03c0e0444553..8be1b51e9311 100644 --- a/drivers/s390/block/dasd_ioctl.c +++ b/drivers/s390/block/dasd_ioctl.c | |||
@@ -143,12 +143,12 @@ static int dasd_ioctl_resume(struct dasd_block *block) | |||
143 | /* | 143 | /* |
144 | * performs formatting of _device_ according to _fdata_ | 144 | * performs formatting of _device_ according to _fdata_ |
145 | * Note: The discipline's format_function is assumed to deliver formatting | 145 | * Note: The discipline's format_function is assumed to deliver formatting |
146 | * commands to format a single unit of the device. In terms of the ECKD | 146 | * commands to format multiple units of the device. In terms of the ECKD |
147 | * devices this means CCWs are generated to format a single track. | 147 | * devices this means CCWs are generated to format multiple tracks. |
148 | */ | 148 | */ |
149 | static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) | 149 | static int |
150 | dasd_format(struct dasd_block *block, struct format_data_t *fdata) | ||
150 | { | 151 | { |
151 | struct dasd_ccw_req *cqr; | ||
152 | struct dasd_device *base; | 152 | struct dasd_device *base; |
153 | int rc; | 153 | int rc; |
154 | 154 | ||
@@ -157,8 +157,8 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) | |||
157 | return -EPERM; | 157 | return -EPERM; |
158 | 158 | ||
159 | if (base->state != DASD_STATE_BASIC) { | 159 | if (base->state != DASD_STATE_BASIC) { |
160 | pr_warning("%s: The DASD cannot be formatted while it is " | 160 | pr_warn("%s: The DASD cannot be formatted while it is enabled\n", |
161 | "enabled\n", dev_name(&base->cdev->dev)); | 161 | dev_name(&base->cdev->dev)); |
162 | return -EBUSY; | 162 | return -EBUSY; |
163 | } | 163 | } |
164 | 164 | ||
@@ -178,21 +178,10 @@ static int dasd_format(struct dasd_block *block, struct format_data_t *fdata) | |||
178 | bdput(bdev); | 178 | bdput(bdev); |
179 | } | 179 | } |
180 | 180 | ||
181 | while (fdata->start_unit <= fdata->stop_unit) { | 181 | rc = base->discipline->format_device(base, fdata); |
182 | cqr = base->discipline->format_device(base, fdata); | 182 | if (rc) |
183 | if (IS_ERR(cqr)) | 183 | return rc; |
184 | return PTR_ERR(cqr); | 184 | |
185 | rc = dasd_sleep_on_interruptible(cqr); | ||
186 | dasd_sfree_request(cqr, cqr->memdev); | ||
187 | if (rc) { | ||
188 | if (rc != -ERESTARTSYS) | ||
189 | pr_err("%s: Formatting unit %d failed with " | ||
190 | "rc=%d\n", dev_name(&base->cdev->dev), | ||
191 | fdata->start_unit, rc); | ||
192 | return rc; | ||
193 | } | ||
194 | fdata->start_unit++; | ||
195 | } | ||
196 | return 0; | 185 | return 0; |
197 | } | 186 | } |
198 | 187 | ||
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c index 9978ad4433cb..b303cab76a7f 100644 --- a/drivers/s390/block/scm_blk.c +++ b/drivers/s390/block/scm_blk.c | |||
@@ -135,6 +135,11 @@ static const struct block_device_operations scm_blk_devops = { | |||
135 | .release = scm_release, | 135 | .release = scm_release, |
136 | }; | 136 | }; |
137 | 137 | ||
138 | static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req) | ||
139 | { | ||
140 | return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT; | ||
141 | } | ||
142 | |||
138 | static void scm_request_prepare(struct scm_request *scmrq) | 143 | static void scm_request_prepare(struct scm_request *scmrq) |
139 | { | 144 | { |
140 | struct scm_blk_dev *bdev = scmrq->bdev; | 145 | struct scm_blk_dev *bdev = scmrq->bdev; |
@@ -195,14 +200,18 @@ void scm_request_requeue(struct scm_request *scmrq) | |||
195 | 200 | ||
196 | scm_release_cluster(scmrq); | 201 | scm_release_cluster(scmrq); |
197 | blk_requeue_request(bdev->rq, scmrq->request); | 202 | blk_requeue_request(bdev->rq, scmrq->request); |
203 | atomic_dec(&bdev->queued_reqs); | ||
198 | scm_request_done(scmrq); | 204 | scm_request_done(scmrq); |
199 | scm_ensure_queue_restart(bdev); | 205 | scm_ensure_queue_restart(bdev); |
200 | } | 206 | } |
201 | 207 | ||
202 | void scm_request_finish(struct scm_request *scmrq) | 208 | void scm_request_finish(struct scm_request *scmrq) |
203 | { | 209 | { |
210 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
211 | |||
204 | scm_release_cluster(scmrq); | 212 | scm_release_cluster(scmrq); |
205 | blk_end_request_all(scmrq->request, scmrq->error); | 213 | blk_end_request_all(scmrq->request, scmrq->error); |
214 | atomic_dec(&bdev->queued_reqs); | ||
206 | scm_request_done(scmrq); | 215 | scm_request_done(scmrq); |
207 | } | 216 | } |
208 | 217 | ||
@@ -218,6 +227,10 @@ static void scm_blk_request(struct request_queue *rq) | |||
218 | if (req->cmd_type != REQ_TYPE_FS) | 227 | if (req->cmd_type != REQ_TYPE_FS) |
219 | continue; | 228 | continue; |
220 | 229 | ||
230 | if (!scm_permit_request(bdev, req)) { | ||
231 | scm_ensure_queue_restart(bdev); | ||
232 | return; | ||
233 | } | ||
221 | scmrq = scm_request_fetch(); | 234 | scmrq = scm_request_fetch(); |
222 | if (!scmrq) { | 235 | if (!scmrq) { |
223 | SCM_LOG(5, "no request"); | 236 | SCM_LOG(5, "no request"); |
@@ -231,11 +244,13 @@ static void scm_blk_request(struct request_queue *rq) | |||
231 | return; | 244 | return; |
232 | } | 245 | } |
233 | if (scm_need_cluster_request(scmrq)) { | 246 | if (scm_need_cluster_request(scmrq)) { |
247 | atomic_inc(&bdev->queued_reqs); | ||
234 | blk_start_request(req); | 248 | blk_start_request(req); |
235 | scm_initiate_cluster_request(scmrq); | 249 | scm_initiate_cluster_request(scmrq); |
236 | return; | 250 | return; |
237 | } | 251 | } |
238 | scm_request_prepare(scmrq); | 252 | scm_request_prepare(scmrq); |
253 | atomic_inc(&bdev->queued_reqs); | ||
239 | blk_start_request(req); | 254 | blk_start_request(req); |
240 | 255 | ||
241 | ret = scm_start_aob(scmrq->aob); | 256 | ret = scm_start_aob(scmrq->aob); |
@@ -244,7 +259,6 @@ static void scm_blk_request(struct request_queue *rq) | |||
244 | scm_request_requeue(scmrq); | 259 | scm_request_requeue(scmrq); |
245 | return; | 260 | return; |
246 | } | 261 | } |
247 | atomic_inc(&bdev->queued_reqs); | ||
248 | } | 262 | } |
249 | } | 263 | } |
250 | 264 | ||
@@ -280,6 +294,38 @@ void scm_blk_irq(struct scm_device *scmdev, void *data, int error) | |||
280 | tasklet_hi_schedule(&bdev->tasklet); | 294 | tasklet_hi_schedule(&bdev->tasklet); |
281 | } | 295 | } |
282 | 296 | ||
297 | static void scm_blk_handle_error(struct scm_request *scmrq) | ||
298 | { | ||
299 | struct scm_blk_dev *bdev = scmrq->bdev; | ||
300 | unsigned long flags; | ||
301 | |||
302 | if (scmrq->error != -EIO) | ||
303 | goto restart; | ||
304 | |||
305 | /* For -EIO the response block is valid. */ | ||
306 | switch (scmrq->aob->response.eqc) { | ||
307 | case EQC_WR_PROHIBIT: | ||
308 | spin_lock_irqsave(&bdev->lock, flags); | ||
309 | if (bdev->state != SCM_WR_PROHIBIT) | ||
310 | pr_info("%lx: Write access to the SCM increment is suspended\n", | ||
311 | (unsigned long) bdev->scmdev->address); | ||
312 | bdev->state = SCM_WR_PROHIBIT; | ||
313 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
314 | goto requeue; | ||
315 | default: | ||
316 | break; | ||
317 | } | ||
318 | |||
319 | restart: | ||
320 | if (!scm_start_aob(scmrq->aob)) | ||
321 | return; | ||
322 | |||
323 | requeue: | ||
324 | spin_lock_irqsave(&bdev->rq_lock, flags); | ||
325 | scm_request_requeue(scmrq); | ||
326 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
327 | } | ||
328 | |||
283 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) | 329 | static void scm_blk_tasklet(struct scm_blk_dev *bdev) |
284 | { | 330 | { |
285 | struct scm_request *scmrq; | 331 | struct scm_request *scmrq; |
@@ -293,11 +339,8 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
293 | spin_unlock_irqrestore(&bdev->lock, flags); | 339 | spin_unlock_irqrestore(&bdev->lock, flags); |
294 | 340 | ||
295 | if (scmrq->error && scmrq->retries-- > 0) { | 341 | if (scmrq->error && scmrq->retries-- > 0) { |
296 | if (scm_start_aob(scmrq->aob)) { | 342 | scm_blk_handle_error(scmrq); |
297 | spin_lock_irqsave(&bdev->rq_lock, flags); | 343 | |
298 | scm_request_requeue(scmrq); | ||
299 | spin_unlock_irqrestore(&bdev->rq_lock, flags); | ||
300 | } | ||
301 | /* Request restarted or requeued, handle next. */ | 344 | /* Request restarted or requeued, handle next. */ |
302 | spin_lock_irqsave(&bdev->lock, flags); | 345 | spin_lock_irqsave(&bdev->lock, flags); |
303 | continue; | 346 | continue; |
@@ -310,7 +353,6 @@ static void scm_blk_tasklet(struct scm_blk_dev *bdev) | |||
310 | } | 353 | } |
311 | 354 | ||
312 | scm_request_finish(scmrq); | 355 | scm_request_finish(scmrq); |
313 | atomic_dec(&bdev->queued_reqs); | ||
314 | spin_lock_irqsave(&bdev->lock, flags); | 356 | spin_lock_irqsave(&bdev->lock, flags); |
315 | } | 357 | } |
316 | spin_unlock_irqrestore(&bdev->lock, flags); | 358 | spin_unlock_irqrestore(&bdev->lock, flags); |
@@ -332,6 +374,7 @@ int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev) | |||
332 | } | 374 | } |
333 | 375 | ||
334 | bdev->scmdev = scmdev; | 376 | bdev->scmdev = scmdev; |
377 | bdev->state = SCM_OPER; | ||
335 | spin_lock_init(&bdev->rq_lock); | 378 | spin_lock_init(&bdev->rq_lock); |
336 | spin_lock_init(&bdev->lock); | 379 | spin_lock_init(&bdev->lock); |
337 | INIT_LIST_HEAD(&bdev->finished_requests); | 380 | INIT_LIST_HEAD(&bdev->finished_requests); |
@@ -396,6 +439,18 @@ void scm_blk_dev_cleanup(struct scm_blk_dev *bdev) | |||
396 | put_disk(bdev->gendisk); | 439 | put_disk(bdev->gendisk); |
397 | } | 440 | } |
398 | 441 | ||
442 | void scm_blk_set_available(struct scm_blk_dev *bdev) | ||
443 | { | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&bdev->lock, flags); | ||
447 | if (bdev->state == SCM_WR_PROHIBIT) | ||
448 | pr_info("%lx: Write access to the SCM increment is restored\n", | ||
449 | (unsigned long) bdev->scmdev->address); | ||
450 | bdev->state = SCM_OPER; | ||
451 | spin_unlock_irqrestore(&bdev->lock, flags); | ||
452 | } | ||
453 | |||
399 | static int __init scm_blk_init(void) | 454 | static int __init scm_blk_init(void) |
400 | { | 455 | { |
401 | int ret = -EINVAL; | 456 | int ret = -EINVAL; |
@@ -408,12 +463,15 @@ static int __init scm_blk_init(void) | |||
408 | goto out; | 463 | goto out; |
409 | 464 | ||
410 | scm_major = ret; | 465 | scm_major = ret; |
411 | if (scm_alloc_rqs(nr_requests)) | 466 | ret = scm_alloc_rqs(nr_requests); |
412 | goto out_unreg; | 467 | if (ret) |
468 | goto out_free; | ||
413 | 469 | ||
414 | scm_debug = debug_register("scm_log", 16, 1, 16); | 470 | scm_debug = debug_register("scm_log", 16, 1, 16); |
415 | if (!scm_debug) | 471 | if (!scm_debug) { |
472 | ret = -ENOMEM; | ||
416 | goto out_free; | 473 | goto out_free; |
474 | } | ||
417 | 475 | ||
418 | debug_register_view(scm_debug, &debug_hex_ascii_view); | 476 | debug_register_view(scm_debug, &debug_hex_ascii_view); |
419 | debug_set_level(scm_debug, 2); | 477 | debug_set_level(scm_debug, 2); |
@@ -428,7 +486,6 @@ out_dbf: | |||
428 | debug_unregister(scm_debug); | 486 | debug_unregister(scm_debug); |
429 | out_free: | 487 | out_free: |
430 | scm_free_rqs(); | 488 | scm_free_rqs(); |
431 | out_unreg: | ||
432 | unregister_blkdev(scm_major, "scm"); | 489 | unregister_blkdev(scm_major, "scm"); |
433 | out: | 490 | out: |
434 | return ret; | 491 | return ret; |
diff --git a/drivers/s390/block/scm_blk.h b/drivers/s390/block/scm_blk.h index 3c1ccf494647..8b387b32fd62 100644 --- a/drivers/s390/block/scm_blk.h +++ b/drivers/s390/block/scm_blk.h | |||
@@ -21,6 +21,7 @@ struct scm_blk_dev { | |||
21 | spinlock_t rq_lock; /* guard the request queue */ | 21 | spinlock_t rq_lock; /* guard the request queue */ |
22 | spinlock_t lock; /* guard the rest of the blockdev */ | 22 | spinlock_t lock; /* guard the rest of the blockdev */ |
23 | atomic_t queued_reqs; | 23 | atomic_t queued_reqs; |
24 | enum {SCM_OPER, SCM_WR_PROHIBIT} state; | ||
24 | struct list_head finished_requests; | 25 | struct list_head finished_requests; |
25 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE | 26 | #ifdef CONFIG_SCM_BLOCK_CLUSTER_WRITE |
26 | struct list_head cluster_list; | 27 | struct list_head cluster_list; |
@@ -48,6 +49,7 @@ struct scm_request { | |||
48 | 49 | ||
49 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); | 50 | int scm_blk_dev_setup(struct scm_blk_dev *, struct scm_device *); |
50 | void scm_blk_dev_cleanup(struct scm_blk_dev *); | 51 | void scm_blk_dev_cleanup(struct scm_blk_dev *); |
52 | void scm_blk_set_available(struct scm_blk_dev *); | ||
51 | void scm_blk_irq(struct scm_device *, void *, int); | 53 | void scm_blk_irq(struct scm_device *, void *, int); |
52 | 54 | ||
53 | void scm_request_finish(struct scm_request *); | 55 | void scm_request_finish(struct scm_request *); |
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c index f4bb61b0cea1..c0d102e3a48b 100644 --- a/drivers/s390/block/scm_blk_cluster.c +++ b/drivers/s390/block/scm_blk_cluster.c | |||
@@ -223,6 +223,8 @@ void scm_cluster_request_irq(struct scm_request *scmrq) | |||
223 | 223 | ||
224 | bool scm_cluster_size_valid(void) | 224 | bool scm_cluster_size_valid(void) |
225 | { | 225 | { |
226 | return write_cluster_size == 0 || write_cluster_size == 32 || | 226 | if (write_cluster_size == 1 || write_cluster_size > 128) |
227 | write_cluster_size == 64 || write_cluster_size == 128; | 227 | return false; |
228 | |||
229 | return !(write_cluster_size & (write_cluster_size - 1)); | ||
228 | } | 230 | } |
diff --git a/drivers/s390/block/scm_drv.c b/drivers/s390/block/scm_drv.c index 9fa0a908607b..c98cf52d78d1 100644 --- a/drivers/s390/block/scm_drv.c +++ b/drivers/s390/block/scm_drv.c | |||
@@ -13,12 +13,23 @@ | |||
13 | #include <asm/eadm.h> | 13 | #include <asm/eadm.h> |
14 | #include "scm_blk.h" | 14 | #include "scm_blk.h" |
15 | 15 | ||
16 | static void notify(struct scm_device *scmdev) | 16 | static void scm_notify(struct scm_device *scmdev, enum scm_event event) |
17 | { | 17 | { |
18 | pr_info("%lu: The capabilities of the SCM increment changed\n", | 18 | struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev); |
19 | (unsigned long) scmdev->address); | 19 | |
20 | SCM_LOG(2, "State changed"); | 20 | switch (event) { |
21 | SCM_LOG_STATE(2, scmdev); | 21 | case SCM_CHANGE: |
22 | pr_info("%lx: The capabilities of the SCM increment changed\n", | ||
23 | (unsigned long) scmdev->address); | ||
24 | SCM_LOG(2, "State changed"); | ||
25 | SCM_LOG_STATE(2, scmdev); | ||
26 | break; | ||
27 | case SCM_AVAIL: | ||
28 | SCM_LOG(2, "Increment available"); | ||
29 | SCM_LOG_STATE(2, scmdev); | ||
30 | scm_blk_set_available(bdev); | ||
31 | break; | ||
32 | } | ||
22 | } | 33 | } |
23 | 34 | ||
24 | static int scm_probe(struct scm_device *scmdev) | 35 | static int scm_probe(struct scm_device *scmdev) |
@@ -64,7 +75,7 @@ static struct scm_driver scm_drv = { | |||
64 | .name = "scm_block", | 75 | .name = "scm_block", |
65 | .owner = THIS_MODULE, | 76 | .owner = THIS_MODULE, |
66 | }, | 77 | }, |
67 | .notify = notify, | 78 | .notify = scm_notify, |
68 | .probe = scm_probe, | 79 | .probe = scm_probe, |
69 | .remove = scm_remove, | 80 | .remove = scm_remove, |
70 | .handler = scm_blk_irq, | 81 | .handler = scm_blk_irq, |
diff --git a/drivers/s390/char/con3215.c b/drivers/s390/char/con3215.c index 7b00fa634d40..eb5d22795c47 100644 --- a/drivers/s390/char/con3215.c +++ b/drivers/s390/char/con3215.c | |||
@@ -502,7 +502,7 @@ static void raw3215_make_room(struct raw3215_info *raw, unsigned int length) | |||
502 | raw3215_try_io(raw); | 502 | raw3215_try_io(raw); |
503 | raw->flags &= ~RAW3215_FLUSHING; | 503 | raw->flags &= ~RAW3215_FLUSHING; |
504 | #ifdef CONFIG_TN3215_CONSOLE | 504 | #ifdef CONFIG_TN3215_CONSOLE |
505 | wait_cons_dev(); | 505 | ccw_device_wait_idle(raw->cdev); |
506 | #endif | 506 | #endif |
507 | /* Enough room freed up ? */ | 507 | /* Enough room freed up ? */ |
508 | if (RAW3215_BUFFER_SIZE - raw->count >= length) | 508 | if (RAW3215_BUFFER_SIZE - raw->count >= length) |
@@ -858,7 +858,7 @@ static void con3215_flush(void) | |||
858 | raw = raw3215[0]; /* console 3215 is the first one */ | 858 | raw = raw3215[0]; /* console 3215 is the first one */ |
859 | if (raw->port.flags & ASYNC_SUSPENDED) | 859 | if (raw->port.flags & ASYNC_SUSPENDED) |
860 | /* The console is still frozen for suspend. */ | 860 | /* The console is still frozen for suspend. */ |
861 | if (ccw_device_force_console()) | 861 | if (ccw_device_force_console(raw->cdev)) |
862 | /* Forcing didn't work, no panic message .. */ | 862 | /* Forcing didn't work, no panic message .. */ |
863 | return; | 863 | return; |
864 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); | 864 | spin_lock_irqsave(get_ccwdev_lock(raw->cdev), flags); |
diff --git a/drivers/s390/char/monreader.c b/drivers/s390/char/monreader.c index f4ff515db251..0da3ae3cd63b 100644 --- a/drivers/s390/char/monreader.c +++ b/drivers/s390/char/monreader.c | |||
@@ -174,8 +174,7 @@ static void mon_free_mem(struct mon_private *monpriv) | |||
174 | int i; | 174 | int i; |
175 | 175 | ||
176 | for (i = 0; i < MON_MSGLIM; i++) | 176 | for (i = 0; i < MON_MSGLIM; i++) |
177 | if (monpriv->msg_array[i]) | 177 | kfree(monpriv->msg_array[i]); |
178 | kfree(monpriv->msg_array[i]); | ||
179 | kfree(monpriv); | 178 | kfree(monpriv); |
180 | } | 179 | } |
181 | 180 | ||
diff --git a/drivers/s390/char/raw3270.c b/drivers/s390/char/raw3270.c index 4c9030a5b9f2..24a08e8f19e1 100644 --- a/drivers/s390/char/raw3270.c +++ b/drivers/s390/char/raw3270.c | |||
@@ -796,7 +796,7 @@ struct raw3270 __init *raw3270_setup_console(struct ccw_device *cdev) | |||
796 | do { | 796 | do { |
797 | __raw3270_reset_device(rp); | 797 | __raw3270_reset_device(rp); |
798 | while (!raw3270_state_final(rp)) { | 798 | while (!raw3270_state_final(rp)) { |
799 | wait_cons_dev(); | 799 | ccw_device_wait_idle(rp->cdev); |
800 | barrier(); | 800 | barrier(); |
801 | } | 801 | } |
802 | } while (rp->state != RAW3270_STATE_READY); | 802 | } while (rp->state != RAW3270_STATE_READY); |
@@ -810,7 +810,7 @@ raw3270_wait_cons_dev(struct raw3270 *rp) | |||
810 | unsigned long flags; | 810 | unsigned long flags; |
811 | 811 | ||
812 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); | 812 | spin_lock_irqsave(get_ccwdev_lock(rp->cdev), flags); |
813 | wait_cons_dev(); | 813 | ccw_device_wait_idle(rp->cdev); |
814 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); | 814 | spin_unlock_irqrestore(get_ccwdev_lock(rp->cdev), flags); |
815 | } | 815 | } |
816 | 816 | ||
@@ -1274,7 +1274,7 @@ void raw3270_pm_unfreeze(struct raw3270_view *view) | |||
1274 | 1274 | ||
1275 | rp = view->dev; | 1275 | rp = view->dev; |
1276 | if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) | 1276 | if (rp && test_bit(RAW3270_FLAGS_FROZEN, &rp->flags)) |
1277 | ccw_device_force_console(); | 1277 | ccw_device_force_console(rp->cdev); |
1278 | #endif | 1278 | #endif |
1279 | } | 1279 | } |
1280 | 1280 | ||
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index 30a2255389e5..178836ec252b 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c | |||
@@ -561,6 +561,8 @@ static void __init sclp_add_standby_memory(void) | |||
561 | add_memory_merged(0); | 561 | add_memory_merged(0); |
562 | } | 562 | } |
563 | 563 | ||
564 | #define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS) | ||
565 | |||
564 | static void __init insert_increment(u16 rn, int standby, int assigned) | 566 | static void __init insert_increment(u16 rn, int standby, int assigned) |
565 | { | 567 | { |
566 | struct memory_increment *incr, *new_incr; | 568 | struct memory_increment *incr, *new_incr; |
@@ -573,7 +575,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) | |||
573 | new_incr->rn = rn; | 575 | new_incr->rn = rn; |
574 | new_incr->standby = standby; | 576 | new_incr->standby = standby; |
575 | if (!standby) | 577 | if (!standby) |
576 | new_incr->usecount = 1; | 578 | new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1; |
577 | last_rn = 0; | 579 | last_rn = 0; |
578 | prev = &sclp_mem_list; | 580 | prev = &sclp_mem_list; |
579 | list_for_each_entry(incr, &sclp_mem_list, list) { | 581 | list_for_each_entry(incr, &sclp_mem_list, list) { |
@@ -627,6 +629,8 @@ static int __init sclp_detect_standby_memory(void) | |||
627 | struct read_storage_sccb *sccb; | 629 | struct read_storage_sccb *sccb; |
628 | int i, id, assigned, rc; | 630 | int i, id, assigned, rc; |
629 | 631 | ||
632 | if (OLDMEM_BASE) /* No standby memory in kdump mode */ | ||
633 | return 0; | ||
630 | if (!early_read_info_sccb_valid) | 634 | if (!early_read_info_sccb_valid) |
631 | return 0; | 635 | return 0; |
632 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) | 636 | if ((sclp_facilities & 0xe00000000000ULL) != 0xe00000000000ULL) |
diff --git a/drivers/s390/char/tty3270.c b/drivers/s390/char/tty3270.c index b907dba24025..cee69dac3e18 100644 --- a/drivers/s390/char/tty3270.c +++ b/drivers/s390/char/tty3270.c | |||
@@ -915,7 +915,7 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
915 | int i, rc; | 915 | int i, rc; |
916 | 916 | ||
917 | /* Check if the tty3270 is already there. */ | 917 | /* Check if the tty3270 is already there. */ |
918 | view = raw3270_find_view(&tty3270_fn, tty->index); | 918 | view = raw3270_find_view(&tty3270_fn, tty->index + RAW3270_FIRSTMINOR); |
919 | if (!IS_ERR(view)) { | 919 | if (!IS_ERR(view)) { |
920 | tp = container_of(view, struct tty3270, view); | 920 | tp = container_of(view, struct tty3270, view); |
921 | tty->driver_data = tp; | 921 | tty->driver_data = tp; |
@@ -927,15 +927,16 @@ static int tty3270_install(struct tty_driver *driver, struct tty_struct *tty) | |||
927 | tp->inattr = TF_INPUT; | 927 | tp->inattr = TF_INPUT; |
928 | return tty_port_install(&tp->port, driver, tty); | 928 | return tty_port_install(&tp->port, driver, tty); |
929 | } | 929 | } |
930 | if (tty3270_max_index < tty->index) | 930 | if (tty3270_max_index < tty->index + 1) |
931 | tty3270_max_index = tty->index; | 931 | tty3270_max_index = tty->index + 1; |
932 | 932 | ||
933 | /* Allocate tty3270 structure on first open. */ | 933 | /* Allocate tty3270 structure on first open. */ |
934 | tp = tty3270_alloc_view(); | 934 | tp = tty3270_alloc_view(); |
935 | if (IS_ERR(tp)) | 935 | if (IS_ERR(tp)) |
936 | return PTR_ERR(tp); | 936 | return PTR_ERR(tp); |
937 | 937 | ||
938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, tty->index); | 938 | rc = raw3270_add_view(&tp->view, &tty3270_fn, |
939 | tty->index + RAW3270_FIRSTMINOR); | ||
939 | if (rc) { | 940 | if (rc) { |
940 | tty3270_free_view(tp); | 941 | tty3270_free_view(tp); |
941 | return rc; | 942 | return rc; |
@@ -1846,12 +1847,12 @@ static const struct tty_operations tty3270_ops = { | |||
1846 | 1847 | ||
1847 | void tty3270_create_cb(int minor) | 1848 | void tty3270_create_cb(int minor) |
1848 | { | 1849 | { |
1849 | tty_register_device(tty3270_driver, minor, NULL); | 1850 | tty_register_device(tty3270_driver, minor - RAW3270_FIRSTMINOR, NULL); |
1850 | } | 1851 | } |
1851 | 1852 | ||
1852 | void tty3270_destroy_cb(int minor) | 1853 | void tty3270_destroy_cb(int minor) |
1853 | { | 1854 | { |
1854 | tty_unregister_device(tty3270_driver, minor); | 1855 | tty_unregister_device(tty3270_driver, minor - RAW3270_FIRSTMINOR); |
1855 | } | 1856 | } |
1856 | 1857 | ||
1857 | struct raw3270_notifier tty3270_notifier = | 1858 | struct raw3270_notifier tty3270_notifier = |
@@ -1884,7 +1885,8 @@ static int __init tty3270_init(void) | |||
1884 | driver->driver_name = "tty3270"; | 1885 | driver->driver_name = "tty3270"; |
1885 | driver->name = "3270/tty"; | 1886 | driver->name = "3270/tty"; |
1886 | driver->major = IBM_TTY3270_MAJOR; | 1887 | driver->major = IBM_TTY3270_MAJOR; |
1887 | driver->minor_start = 0; | 1888 | driver->minor_start = RAW3270_FIRSTMINOR; |
1889 | driver->name_base = RAW3270_FIRSTMINOR; | ||
1888 | driver->type = TTY_DRIVER_TYPE_SYSTEM; | 1890 | driver->type = TTY_DRIVER_TYPE_SYSTEM; |
1889 | driver->subtype = SYSTEM_TYPE_TTY; | 1891 | driver->subtype = SYSTEM_TYPE_TTY; |
1890 | driver->init_termios = tty_std_termios; | 1892 | driver->init_termios = tty_std_termios; |
diff --git a/drivers/s390/char/zcore.c b/drivers/s390/char/zcore.c index 1d61a01576d2..22820610022c 100644 --- a/drivers/s390/char/zcore.c +++ b/drivers/s390/char/zcore.c | |||
@@ -127,7 +127,7 @@ static int memcpy_hsa(void *dest, unsigned long src, size_t count, int mode) | |||
127 | } | 127 | } |
128 | if (mode == TO_USER) { | 128 | if (mode == TO_USER) { |
129 | if (copy_to_user((__force __user void*) dest + offs, buf, | 129 | if (copy_to_user((__force __user void*) dest + offs, buf, |
130 | PAGE_SIZE)) | 130 | count - offs)) |
131 | return -EFAULT; | 131 | return -EFAULT; |
132 | } else | 132 | } else |
133 | memcpy(dest + offs, buf, count - offs); | 133 | memcpy(dest + offs, buf, count - offs); |
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c index 50ad5fdd815d..21fabc6d5a9c 100644 --- a/drivers/s390/cio/chp.c +++ b/drivers/s390/cio/chp.c | |||
@@ -377,6 +377,26 @@ static void chp_release(struct device *dev) | |||
377 | } | 377 | } |
378 | 378 | ||
379 | /** | 379 | /** |
380 | * chp_update_desc - update channel-path description | ||
381 | * @chp - channel-path | ||
382 | * | ||
383 | * Update the channel-path description of the specified channel-path. | ||
384 | * Return zero on success, non-zero otherwise. | ||
385 | */ | ||
386 | int chp_update_desc(struct channel_path *chp) | ||
387 | { | ||
388 | int rc; | ||
389 | |||
390 | rc = chsc_determine_base_channel_path_desc(chp->chpid, &chp->desc); | ||
391 | if (rc) | ||
392 | return rc; | ||
393 | |||
394 | rc = chsc_determine_fmt1_channel_path_desc(chp->chpid, &chp->desc_fmt1); | ||
395 | |||
396 | return rc; | ||
397 | } | ||
398 | |||
399 | /** | ||
380 | * chp_new - register a new channel-path | 400 | * chp_new - register a new channel-path |
381 | * @chpid - channel-path ID | 401 | * @chpid - channel-path ID |
382 | * | 402 | * |
@@ -403,7 +423,7 @@ int chp_new(struct chp_id chpid) | |||
403 | mutex_init(&chp->lock); | 423 | mutex_init(&chp->lock); |
404 | 424 | ||
405 | /* Obtain channel path description and fill it in. */ | 425 | /* Obtain channel path description and fill it in. */ |
406 | ret = chsc_determine_base_channel_path_desc(chpid, &chp->desc); | 426 | ret = chp_update_desc(chp); |
407 | if (ret) | 427 | if (ret) |
408 | goto out_free; | 428 | goto out_free; |
409 | if ((chp->desc.flags & 0x80) == 0) { | 429 | if ((chp->desc.flags & 0x80) == 0) { |
diff --git a/drivers/s390/cio/chp.h b/drivers/s390/cio/chp.h index e1399dbee834..9284b785a06f 100644 --- a/drivers/s390/cio/chp.h +++ b/drivers/s390/cio/chp.h | |||
@@ -44,6 +44,7 @@ struct channel_path { | |||
44 | struct mutex lock; /* Serialize access to below members. */ | 44 | struct mutex lock; /* Serialize access to below members. */ |
45 | int state; | 45 | int state; |
46 | struct channel_path_desc desc; | 46 | struct channel_path_desc desc; |
47 | struct channel_path_desc_fmt1 desc_fmt1; | ||
47 | /* Channel-measurement related stuff: */ | 48 | /* Channel-measurement related stuff: */ |
48 | int cmg; | 49 | int cmg; |
49 | int shared; | 50 | int shared; |
@@ -62,6 +63,7 @@ int chp_is_registered(struct chp_id chpid); | |||
62 | void *chp_get_chp_desc(struct chp_id chpid); | 63 | void *chp_get_chp_desc(struct chp_id chpid); |
63 | void chp_remove_cmg_attr(struct channel_path *chp); | 64 | void chp_remove_cmg_attr(struct channel_path *chp); |
64 | int chp_add_cmg_attr(struct channel_path *chp); | 65 | int chp_add_cmg_attr(struct channel_path *chp); |
66 | int chp_update_desc(struct channel_path *chp); | ||
65 | int chp_new(struct chp_id chpid); | 67 | int chp_new(struct chp_id chpid); |
66 | void chp_cfg_schedule(struct chp_id chpid, int configure); | 68 | void chp_cfg_schedule(struct chp_id chpid, int configure); |
67 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); | 69 | void chp_cfg_cancel_deconfigure(struct chp_id chpid); |
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index 31ceef1beb8b..8ea7d9b2c671 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -376,7 +376,7 @@ static void chsc_process_sei_chp_avail(struct chsc_sei_nt0_area *sei_area) | |||
376 | continue; | 376 | continue; |
377 | } | 377 | } |
378 | mutex_lock(&chp->lock); | 378 | mutex_lock(&chp->lock); |
379 | chsc_determine_base_channel_path_desc(chpid, &chp->desc); | 379 | chp_update_desc(chp); |
380 | mutex_unlock(&chp->lock); | 380 | mutex_unlock(&chp->lock); |
381 | } | 381 | } |
382 | } | 382 | } |
@@ -433,6 +433,20 @@ static void chsc_process_sei_scm_change(struct chsc_sei_nt0_area *sei_area) | |||
433 | " failed (rc=%d).\n", ret); | 433 | " failed (rc=%d).\n", ret); |
434 | } | 434 | } |
435 | 435 | ||
436 | static void chsc_process_sei_scm_avail(struct chsc_sei_nt0_area *sei_area) | ||
437 | { | ||
438 | int ret; | ||
439 | |||
440 | CIO_CRW_EVENT(4, "chsc: scm available information\n"); | ||
441 | if (sei_area->rs != 7) | ||
442 | return; | ||
443 | |||
444 | ret = scm_process_availability_information(); | ||
445 | if (ret) | ||
446 | CIO_CRW_EVENT(0, "chsc: process availability information" | ||
447 | " failed (rc=%d).\n", ret); | ||
448 | } | ||
449 | |||
436 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) | 450 | static void chsc_process_sei_nt2(struct chsc_sei_nt2_area *sei_area) |
437 | { | 451 | { |
438 | switch (sei_area->cc) { | 452 | switch (sei_area->cc) { |
@@ -468,6 +482,9 @@ static void chsc_process_sei_nt0(struct chsc_sei_nt0_area *sei_area) | |||
468 | case 12: /* scm change notification */ | 482 | case 12: /* scm change notification */ |
469 | chsc_process_sei_scm_change(sei_area); | 483 | chsc_process_sei_scm_change(sei_area); |
470 | break; | 484 | break; |
485 | case 14: /* scm available notification */ | ||
486 | chsc_process_sei_scm_avail(sei_area); | ||
487 | break; | ||
471 | default: /* other stuff */ | 488 | default: /* other stuff */ |
472 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", | 489 | CIO_CRW_EVENT(2, "chsc: sei nt0 unhandled cc=%d\n", |
473 | sei_area->cc); | 490 | sei_area->cc); |
@@ -614,8 +631,8 @@ int chsc_chp_vary(struct chp_id chpid, int on) | |||
614 | * Redo PathVerification on the devices the chpid connects to | 631 | * Redo PathVerification on the devices the chpid connects to |
615 | */ | 632 | */ |
616 | if (on) { | 633 | if (on) { |
617 | /* Try to update the channel path descritor. */ | 634 | /* Try to update the channel path description. */ |
618 | chsc_determine_base_channel_path_desc(chpid, &chp->desc); | 635 | chp_update_desc(chp); |
619 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, | 636 | for_each_subchannel_staged(s390_subchannel_vary_chpid_on, |
620 | __s390_vary_chpid_on, &chpid); | 637 | __s390_vary_chpid_on, &chpid); |
621 | } else | 638 | } else |
@@ -808,9 +825,10 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, | |||
808 | { | 825 | { |
809 | struct chsc_response_struct *chsc_resp; | 826 | struct chsc_response_struct *chsc_resp; |
810 | struct chsc_scpd *scpd_area; | 827 | struct chsc_scpd *scpd_area; |
828 | unsigned long flags; | ||
811 | int ret; | 829 | int ret; |
812 | 830 | ||
813 | spin_lock_irq(&chsc_page_lock); | 831 | spin_lock_irqsave(&chsc_page_lock, flags); |
814 | scpd_area = chsc_page; | 832 | scpd_area = chsc_page; |
815 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); | 833 | ret = chsc_determine_channel_path_desc(chpid, 0, 0, 1, 0, scpd_area); |
816 | if (ret) | 834 | if (ret) |
@@ -818,7 +836,7 @@ int chsc_determine_fmt1_channel_path_desc(struct chp_id chpid, | |||
818 | chsc_resp = (void *)&scpd_area->response; | 836 | chsc_resp = (void *)&scpd_area->response; |
819 | memcpy(desc, &chsc_resp->data, sizeof(*desc)); | 837 | memcpy(desc, &chsc_resp->data, sizeof(*desc)); |
820 | out: | 838 | out: |
821 | spin_unlock_irq(&chsc_page_lock); | 839 | spin_unlock_irqrestore(&chsc_page_lock, flags); |
822 | return ret; | 840 | return ret; |
823 | } | 841 | } |
824 | 842 | ||
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 227e05f674b3..349d5fc47196 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -156,8 +156,10 @@ int chsc_scm_info(struct chsc_scm_info *scm_area, u64 token); | |||
156 | 156 | ||
157 | #ifdef CONFIG_SCM_BUS | 157 | #ifdef CONFIG_SCM_BUS |
158 | int scm_update_information(void); | 158 | int scm_update_information(void); |
159 | int scm_process_availability_information(void); | ||
159 | #else /* CONFIG_SCM_BUS */ | 160 | #else /* CONFIG_SCM_BUS */ |
160 | static inline int scm_update_information(void) { return 0; } | 161 | static inline int scm_update_information(void) { return 0; } |
162 | static inline int scm_process_availability_information(void) { return 0; } | ||
161 | #endif /* CONFIG_SCM_BUS */ | 163 | #endif /* CONFIG_SCM_BUS */ |
162 | 164 | ||
163 | 165 | ||
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c index 986ef6a92a41..935d80b4e9ce 100644 --- a/drivers/s390/cio/cio.c +++ b/drivers/s390/cio/cio.c | |||
@@ -471,15 +471,6 @@ int cio_disable_subchannel(struct subchannel *sch) | |||
471 | } | 471 | } |
472 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); | 472 | EXPORT_SYMBOL_GPL(cio_disable_subchannel); |
473 | 473 | ||
474 | int cio_create_sch_lock(struct subchannel *sch) | ||
475 | { | ||
476 | sch->lock = kmalloc(sizeof(spinlock_t), GFP_KERNEL); | ||
477 | if (!sch->lock) | ||
478 | return -ENOMEM; | ||
479 | spin_lock_init(sch->lock); | ||
480 | return 0; | ||
481 | } | ||
482 | |||
483 | static int cio_check_devno_blacklisted(struct subchannel *sch) | 474 | static int cio_check_devno_blacklisted(struct subchannel *sch) |
484 | { | 475 | { |
485 | if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { | 476 | if (is_blacklisted(sch->schid.ssid, sch->schib.pmcw.dev)) { |
@@ -536,32 +527,19 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) | |||
536 | sprintf(dbf_txt, "valsch%x", schid.sch_no); | 527 | sprintf(dbf_txt, "valsch%x", schid.sch_no); |
537 | CIO_TRACE_EVENT(4, dbf_txt); | 528 | CIO_TRACE_EVENT(4, dbf_txt); |
538 | 529 | ||
539 | /* Nuke all fields. */ | ||
540 | memset(sch, 0, sizeof(struct subchannel)); | ||
541 | |||
542 | sch->schid = schid; | ||
543 | if (cio_is_console(schid)) { | ||
544 | sch->lock = cio_get_console_lock(); | ||
545 | } else { | ||
546 | err = cio_create_sch_lock(sch); | ||
547 | if (err) | ||
548 | goto out; | ||
549 | } | ||
550 | mutex_init(&sch->reg_mutex); | ||
551 | |||
552 | /* | 530 | /* |
553 | * The first subchannel that is not-operational (ccode==3) | 531 | * The first subchannel that is not-operational (ccode==3) |
554 | * indicates that there aren't any more devices available. | 532 | * indicates that there aren't any more devices available. |
555 | * If stsch gets an exception, it means the current subchannel set | 533 | * If stsch gets an exception, it means the current subchannel set |
556 | * is not valid. | 534 | * is not valid. |
557 | */ | 535 | */ |
558 | ccode = stsch_err (schid, &sch->schib); | 536 | ccode = stsch_err(schid, &sch->schib); |
559 | if (ccode) { | 537 | if (ccode) { |
560 | err = (ccode == 3) ? -ENXIO : ccode; | 538 | err = (ccode == 3) ? -ENXIO : ccode; |
561 | goto out; | 539 | goto out; |
562 | } | 540 | } |
563 | /* Copy subchannel type from path management control word. */ | ||
564 | sch->st = sch->schib.pmcw.st; | 541 | sch->st = sch->schib.pmcw.st; |
542 | sch->schid = schid; | ||
565 | 543 | ||
566 | switch (sch->st) { | 544 | switch (sch->st) { |
567 | case SUBCHANNEL_TYPE_IO: | 545 | case SUBCHANNEL_TYPE_IO: |
@@ -578,11 +556,7 @@ int cio_validate_subchannel(struct subchannel *sch, struct subchannel_id schid) | |||
578 | 556 | ||
579 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", | 557 | CIO_MSG_EVENT(4, "Subchannel 0.%x.%04x reports subchannel type %04X\n", |
580 | sch->schid.ssid, sch->schid.sch_no, sch->st); | 558 | sch->schid.ssid, sch->schid.sch_no, sch->st); |
581 | return 0; | ||
582 | out: | 559 | out: |
583 | if (!cio_is_console(schid)) | ||
584 | kfree(sch->lock); | ||
585 | sch->lock = NULL; | ||
586 | return err; | 560 | return err; |
587 | } | 561 | } |
588 | 562 | ||
@@ -650,15 +624,13 @@ void __irq_entry do_IRQ(struct pt_regs *regs) | |||
650 | } | 624 | } |
651 | 625 | ||
652 | #ifdef CONFIG_CCW_CONSOLE | 626 | #ifdef CONFIG_CCW_CONSOLE |
653 | static struct subchannel console_subchannel; | 627 | static struct subchannel *console_sch; |
654 | static struct io_subchannel_private console_priv; | ||
655 | static int console_subchannel_in_use; | ||
656 | 628 | ||
657 | /* | 629 | /* |
658 | * Use cio_tsch to update the subchannel status and call the interrupt handler | 630 | * Use cio_tsch to update the subchannel status and call the interrupt handler |
659 | * if status had been pending. Called with the console_subchannel lock. | 631 | * if status had been pending. Called with the subchannel's lock held. |
660 | */ | 632 | */ |
661 | static void cio_tsch(struct subchannel *sch) | 633 | void cio_tsch(struct subchannel *sch) |
662 | { | 634 | { |
663 | struct irb *irb; | 635 | struct irb *irb; |
664 | int irq_context; | 636 | int irq_context; |
@@ -675,6 +647,7 @@ static void cio_tsch(struct subchannel *sch) | |||
675 | local_bh_disable(); | 647 | local_bh_disable(); |
676 | irq_enter(); | 648 | irq_enter(); |
677 | } | 649 | } |
650 | kstat_incr_irqs_this_cpu(IO_INTERRUPT, NULL); | ||
678 | if (sch->driver && sch->driver->irq) | 651 | if (sch->driver && sch->driver->irq) |
679 | sch->driver->irq(sch); | 652 | sch->driver->irq(sch); |
680 | else | 653 | else |
@@ -685,135 +658,90 @@ static void cio_tsch(struct subchannel *sch) | |||
685 | } | 658 | } |
686 | } | 659 | } |
687 | 660 | ||
688 | void *cio_get_console_priv(void) | 661 | static int cio_test_for_console(struct subchannel_id schid, void *data) |
689 | { | ||
690 | return &console_priv; | ||
691 | } | ||
692 | |||
693 | /* | ||
694 | * busy wait for the next interrupt on the console | ||
695 | */ | ||
696 | void wait_cons_dev(void) | ||
697 | { | 662 | { |
698 | if (!console_subchannel_in_use) | 663 | struct schib schib; |
699 | return; | ||
700 | |||
701 | while (1) { | ||
702 | cio_tsch(&console_subchannel); | ||
703 | if (console_subchannel.schib.scsw.cmd.actl == 0) | ||
704 | break; | ||
705 | udelay_simple(100); | ||
706 | } | ||
707 | } | ||
708 | 664 | ||
709 | static int | 665 | if (stsch_err(schid, &schib) != 0) |
710 | cio_test_for_console(struct subchannel_id schid, void *data) | ||
711 | { | ||
712 | if (stsch_err(schid, &console_subchannel.schib) != 0) | ||
713 | return -ENXIO; | 666 | return -ENXIO; |
714 | if ((console_subchannel.schib.pmcw.st == SUBCHANNEL_TYPE_IO) && | 667 | if ((schib.pmcw.st == SUBCHANNEL_TYPE_IO) && schib.pmcw.dnv && |
715 | console_subchannel.schib.pmcw.dnv && | 668 | (schib.pmcw.dev == console_devno)) { |
716 | (console_subchannel.schib.pmcw.dev == console_devno)) { | ||
717 | console_irq = schid.sch_no; | 669 | console_irq = schid.sch_no; |
718 | return 1; /* found */ | 670 | return 1; /* found */ |
719 | } | 671 | } |
720 | return 0; | 672 | return 0; |
721 | } | 673 | } |
722 | 674 | ||
723 | 675 | static int cio_get_console_sch_no(void) | |
724 | static int | ||
725 | cio_get_console_sch_no(void) | ||
726 | { | 676 | { |
727 | struct subchannel_id schid; | 677 | struct subchannel_id schid; |
728 | 678 | struct schib schib; | |
679 | |||
729 | init_subchannel_id(&schid); | 680 | init_subchannel_id(&schid); |
730 | if (console_irq != -1) { | 681 | if (console_irq != -1) { |
731 | /* VM provided us with the irq number of the console. */ | 682 | /* VM provided us with the irq number of the console. */ |
732 | schid.sch_no = console_irq; | 683 | schid.sch_no = console_irq; |
733 | if (stsch_err(schid, &console_subchannel.schib) != 0 || | 684 | if (stsch_err(schid, &schib) != 0 || |
734 | (console_subchannel.schib.pmcw.st != SUBCHANNEL_TYPE_IO) || | 685 | (schib.pmcw.st != SUBCHANNEL_TYPE_IO) || !schib.pmcw.dnv) |
735 | !console_subchannel.schib.pmcw.dnv) | ||
736 | return -1; | 686 | return -1; |
737 | console_devno = console_subchannel.schib.pmcw.dev; | 687 | console_devno = schib.pmcw.dev; |
738 | } else if (console_devno != -1) { | 688 | } else if (console_devno != -1) { |
739 | /* At least the console device number is known. */ | 689 | /* At least the console device number is known. */ |
740 | for_each_subchannel(cio_test_for_console, NULL); | 690 | for_each_subchannel(cio_test_for_console, NULL); |
741 | if (console_irq == -1) | ||
742 | return -1; | ||
743 | } else { | ||
744 | /* unlike in 2.4, we cannot autoprobe here, since | ||
745 | * the channel subsystem is not fully initialized. | ||
746 | * With some luck, the HWC console can take over */ | ||
747 | return -1; | ||
748 | } | 691 | } |
749 | return console_irq; | 692 | return console_irq; |
750 | } | 693 | } |
751 | 694 | ||
752 | struct subchannel * | 695 | struct subchannel *cio_probe_console(void) |
753 | cio_probe_console(void) | ||
754 | { | 696 | { |
755 | int sch_no, ret; | ||
756 | struct subchannel_id schid; | 697 | struct subchannel_id schid; |
698 | struct subchannel *sch; | ||
699 | int sch_no, ret; | ||
757 | 700 | ||
758 | if (xchg(&console_subchannel_in_use, 1) != 0) | ||
759 | return ERR_PTR(-EBUSY); | ||
760 | sch_no = cio_get_console_sch_no(); | 701 | sch_no = cio_get_console_sch_no(); |
761 | if (sch_no == -1) { | 702 | if (sch_no == -1) { |
762 | console_subchannel_in_use = 0; | ||
763 | pr_warning("No CCW console was found\n"); | 703 | pr_warning("No CCW console was found\n"); |
764 | return ERR_PTR(-ENODEV); | 704 | return ERR_PTR(-ENODEV); |
765 | } | 705 | } |
766 | memset(&console_subchannel, 0, sizeof(struct subchannel)); | ||
767 | init_subchannel_id(&schid); | 706 | init_subchannel_id(&schid); |
768 | schid.sch_no = sch_no; | 707 | schid.sch_no = sch_no; |
769 | ret = cio_validate_subchannel(&console_subchannel, schid); | 708 | sch = css_alloc_subchannel(schid); |
770 | if (ret) { | 709 | if (IS_ERR(sch)) |
771 | console_subchannel_in_use = 0; | 710 | return sch; |
772 | return ERR_PTR(-ENODEV); | ||
773 | } | ||
774 | 711 | ||
775 | /* | ||
776 | * enable console I/O-interrupt subclass | ||
777 | */ | ||
778 | isc_register(CONSOLE_ISC); | 712 | isc_register(CONSOLE_ISC); |
779 | console_subchannel.config.isc = CONSOLE_ISC; | 713 | sch->config.isc = CONSOLE_ISC; |
780 | console_subchannel.config.intparm = (u32)(addr_t)&console_subchannel; | 714 | sch->config.intparm = (u32)(addr_t)sch; |
781 | ret = cio_commit_config(&console_subchannel); | 715 | ret = cio_commit_config(sch); |
782 | if (ret) { | 716 | if (ret) { |
783 | isc_unregister(CONSOLE_ISC); | 717 | isc_unregister(CONSOLE_ISC); |
784 | console_subchannel_in_use = 0; | 718 | put_device(&sch->dev); |
785 | return ERR_PTR(ret); | 719 | return ERR_PTR(ret); |
786 | } | 720 | } |
787 | return &console_subchannel; | 721 | console_sch = sch; |
788 | } | 722 | return sch; |
789 | |||
790 | void | ||
791 | cio_release_console(void) | ||
792 | { | ||
793 | console_subchannel.config.intparm = 0; | ||
794 | cio_commit_config(&console_subchannel); | ||
795 | isc_unregister(CONSOLE_ISC); | ||
796 | console_subchannel_in_use = 0; | ||
797 | } | 723 | } |
798 | 724 | ||
799 | /* Bah... hack to catch console special sausages. */ | 725 | int cio_is_console(struct subchannel_id schid) |
800 | int | ||
801 | cio_is_console(struct subchannel_id schid) | ||
802 | { | 726 | { |
803 | if (!console_subchannel_in_use) | 727 | if (!console_sch) |
804 | return 0; | 728 | return 0; |
805 | return schid_equal(&schid, &console_subchannel.schid); | 729 | return schid_equal(&schid, &console_sch->schid); |
806 | } | 730 | } |
807 | 731 | ||
808 | struct subchannel * | 732 | void cio_register_early_subchannels(void) |
809 | cio_get_console_subchannel(void) | ||
810 | { | 733 | { |
811 | if (!console_subchannel_in_use) | 734 | int ret; |
812 | return NULL; | 735 | |
813 | return &console_subchannel; | 736 | if (!console_sch) |
737 | return; | ||
738 | |||
739 | ret = css_register_subchannel(console_sch); | ||
740 | if (ret) | ||
741 | put_device(&console_sch->dev); | ||
814 | } | 742 | } |
743 | #endif /* CONFIG_CCW_CONSOLE */ | ||
815 | 744 | ||
816 | #endif | ||
817 | static int | 745 | static int |
818 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) | 746 | __disable_subchannel_easy(struct subchannel_id schid, struct schib *schib) |
819 | { | 747 | { |
diff --git a/drivers/s390/cio/cio.h b/drivers/s390/cio/cio.h index 4a1ff5c2eb88..d62f5e7f3cf1 100644 --- a/drivers/s390/cio/cio.h +++ b/drivers/s390/cio/cio.h | |||
@@ -121,23 +121,18 @@ extern int cio_commit_config(struct subchannel *sch); | |||
121 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); | 121 | int cio_tm_start_key(struct subchannel *sch, struct tcw *tcw, u8 lpm, u8 key); |
122 | int cio_tm_intrg(struct subchannel *sch); | 122 | int cio_tm_intrg(struct subchannel *sch); |
123 | 123 | ||
124 | int cio_create_sch_lock(struct subchannel *); | ||
125 | void do_adapter_IO(u8 isc); | 124 | void do_adapter_IO(u8 isc); |
126 | void do_IRQ(struct pt_regs *); | 125 | void do_IRQ(struct pt_regs *); |
127 | 126 | ||
128 | /* Use with care. */ | 127 | /* Use with care. */ |
129 | #ifdef CONFIG_CCW_CONSOLE | 128 | #ifdef CONFIG_CCW_CONSOLE |
130 | extern struct subchannel *cio_probe_console(void); | 129 | extern struct subchannel *cio_probe_console(void); |
131 | extern void cio_release_console(void); | ||
132 | extern int cio_is_console(struct subchannel_id); | 130 | extern int cio_is_console(struct subchannel_id); |
133 | extern struct subchannel *cio_get_console_subchannel(void); | 131 | extern void cio_register_early_subchannels(void); |
134 | extern spinlock_t * cio_get_console_lock(void); | 132 | extern void cio_tsch(struct subchannel *sch); |
135 | extern void *cio_get_console_priv(void); | ||
136 | #else | 133 | #else |
137 | #define cio_is_console(schid) 0 | 134 | #define cio_is_console(schid) 0 |
138 | #define cio_get_console_subchannel() NULL | 135 | static inline void cio_register_early_subchannels(void) {} |
139 | #define cio_get_console_lock() NULL | ||
140 | #define cio_get_console_priv() NULL | ||
141 | #endif | 136 | #endif |
142 | 137 | ||
143 | #endif | 138 | #endif |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index a239237d43f3..1ebe5d3ddebb 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -137,37 +137,53 @@ out: | |||
137 | 137 | ||
138 | static void css_sch_todo(struct work_struct *work); | 138 | static void css_sch_todo(struct work_struct *work); |
139 | 139 | ||
140 | static struct subchannel * | 140 | static int css_sch_create_locks(struct subchannel *sch) |
141 | css_alloc_subchannel(struct subchannel_id schid) | 141 | { |
142 | sch->lock = kmalloc(sizeof(*sch->lock), GFP_KERNEL); | ||
143 | if (!sch->lock) | ||
144 | return -ENOMEM; | ||
145 | |||
146 | spin_lock_init(sch->lock); | ||
147 | mutex_init(&sch->reg_mutex); | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static void css_subchannel_release(struct device *dev) | ||
153 | { | ||
154 | struct subchannel *sch = to_subchannel(dev); | ||
155 | |||
156 | sch->config.intparm = 0; | ||
157 | cio_commit_config(sch); | ||
158 | kfree(sch->lock); | ||
159 | kfree(sch); | ||
160 | } | ||
161 | |||
162 | struct subchannel *css_alloc_subchannel(struct subchannel_id schid) | ||
142 | { | 163 | { |
143 | struct subchannel *sch; | 164 | struct subchannel *sch; |
144 | int ret; | 165 | int ret; |
145 | 166 | ||
146 | sch = kmalloc (sizeof (*sch), GFP_KERNEL | GFP_DMA); | 167 | sch = kzalloc(sizeof(*sch), GFP_KERNEL | GFP_DMA); |
147 | if (sch == NULL) | 168 | if (!sch) |
148 | return ERR_PTR(-ENOMEM); | 169 | return ERR_PTR(-ENOMEM); |
149 | ret = cio_validate_subchannel (sch, schid); | 170 | |
150 | if (ret < 0) { | 171 | ret = cio_validate_subchannel(sch, schid); |
151 | kfree(sch); | 172 | if (ret < 0) |
152 | return ERR_PTR(ret); | 173 | goto err; |
153 | } | 174 | |
175 | ret = css_sch_create_locks(sch); | ||
176 | if (ret) | ||
177 | goto err; | ||
178 | |||
154 | INIT_WORK(&sch->todo_work, css_sch_todo); | 179 | INIT_WORK(&sch->todo_work, css_sch_todo); |
180 | sch->dev.release = &css_subchannel_release; | ||
181 | device_initialize(&sch->dev); | ||
155 | return sch; | 182 | return sch; |
156 | } | ||
157 | |||
158 | static void | ||
159 | css_subchannel_release(struct device *dev) | ||
160 | { | ||
161 | struct subchannel *sch; | ||
162 | 183 | ||
163 | sch = to_subchannel(dev); | 184 | err: |
164 | if (!cio_is_console(sch->schid)) { | 185 | kfree(sch); |
165 | /* Reset intparm to zeroes. */ | 186 | return ERR_PTR(ret); |
166 | sch->config.intparm = 0; | ||
167 | cio_commit_config(sch); | ||
168 | kfree(sch->lock); | ||
169 | kfree(sch); | ||
170 | } | ||
171 | } | 187 | } |
172 | 188 | ||
173 | static int css_sch_device_register(struct subchannel *sch) | 189 | static int css_sch_device_register(struct subchannel *sch) |
@@ -177,7 +193,7 @@ static int css_sch_device_register(struct subchannel *sch) | |||
177 | mutex_lock(&sch->reg_mutex); | 193 | mutex_lock(&sch->reg_mutex); |
178 | dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, | 194 | dev_set_name(&sch->dev, "0.%x.%04x", sch->schid.ssid, |
179 | sch->schid.sch_no); | 195 | sch->schid.sch_no); |
180 | ret = device_register(&sch->dev); | 196 | ret = device_add(&sch->dev); |
181 | mutex_unlock(&sch->reg_mutex); | 197 | mutex_unlock(&sch->reg_mutex); |
182 | return ret; | 198 | return ret; |
183 | } | 199 | } |
@@ -228,16 +244,11 @@ void css_update_ssd_info(struct subchannel *sch) | |||
228 | { | 244 | { |
229 | int ret; | 245 | int ret; |
230 | 246 | ||
231 | if (cio_is_console(sch->schid)) { | 247 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); |
232 | /* Console is initialized too early for functions requiring | 248 | if (ret) |
233 | * memory allocation. */ | ||
234 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | 249 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); |
235 | } else { | 250 | |
236 | ret = chsc_get_ssd_info(sch->schid, &sch->ssd_info); | 251 | ssd_register_chpids(&sch->ssd_info); |
237 | if (ret) | ||
238 | ssd_from_pmcw(&sch->ssd_info, &sch->schib.pmcw); | ||
239 | ssd_register_chpids(&sch->ssd_info); | ||
240 | } | ||
241 | } | 252 | } |
242 | 253 | ||
243 | static ssize_t type_show(struct device *dev, struct device_attribute *attr, | 254 | static ssize_t type_show(struct device *dev, struct device_attribute *attr, |
@@ -275,14 +286,13 @@ static const struct attribute_group *default_subch_attr_groups[] = { | |||
275 | NULL, | 286 | NULL, |
276 | }; | 287 | }; |
277 | 288 | ||
278 | static int css_register_subchannel(struct subchannel *sch) | 289 | int css_register_subchannel(struct subchannel *sch) |
279 | { | 290 | { |
280 | int ret; | 291 | int ret; |
281 | 292 | ||
282 | /* Initialize the subchannel structure */ | 293 | /* Initialize the subchannel structure */ |
283 | sch->dev.parent = &channel_subsystems[0]->device; | 294 | sch->dev.parent = &channel_subsystems[0]->device; |
284 | sch->dev.bus = &css_bus_type; | 295 | sch->dev.bus = &css_bus_type; |
285 | sch->dev.release = &css_subchannel_release; | ||
286 | sch->dev.groups = default_subch_attr_groups; | 296 | sch->dev.groups = default_subch_attr_groups; |
287 | /* | 297 | /* |
288 | * We don't want to generate uevents for I/O subchannels that don't | 298 | * We don't want to generate uevents for I/O subchannels that don't |
@@ -314,23 +324,19 @@ static int css_register_subchannel(struct subchannel *sch) | |||
314 | return ret; | 324 | return ret; |
315 | } | 325 | } |
316 | 326 | ||
317 | int css_probe_device(struct subchannel_id schid) | 327 | static int css_probe_device(struct subchannel_id schid) |
318 | { | 328 | { |
319 | int ret; | ||
320 | struct subchannel *sch; | 329 | struct subchannel *sch; |
330 | int ret; | ||
331 | |||
332 | sch = css_alloc_subchannel(schid); | ||
333 | if (IS_ERR(sch)) | ||
334 | return PTR_ERR(sch); | ||
321 | 335 | ||
322 | if (cio_is_console(schid)) | ||
323 | sch = cio_get_console_subchannel(); | ||
324 | else { | ||
325 | sch = css_alloc_subchannel(schid); | ||
326 | if (IS_ERR(sch)) | ||
327 | return PTR_ERR(sch); | ||
328 | } | ||
329 | ret = css_register_subchannel(sch); | 336 | ret = css_register_subchannel(sch); |
330 | if (ret) { | 337 | if (ret) |
331 | if (!cio_is_console(schid)) | 338 | put_device(&sch->dev); |
332 | put_device(&sch->dev); | 339 | |
333 | } | ||
334 | return ret; | 340 | return ret; |
335 | } | 341 | } |
336 | 342 | ||
@@ -770,7 +776,7 @@ static int __init setup_css(int nr) | |||
770 | css->pseudo_subchannel->dev.release = css_subchannel_release; | 776 | css->pseudo_subchannel->dev.release = css_subchannel_release; |
771 | dev_set_name(&css->pseudo_subchannel->dev, "defunct"); | 777 | dev_set_name(&css->pseudo_subchannel->dev, "defunct"); |
772 | mutex_init(&css->pseudo_subchannel->reg_mutex); | 778 | mutex_init(&css->pseudo_subchannel->reg_mutex); |
773 | ret = cio_create_sch_lock(css->pseudo_subchannel); | 779 | ret = css_sch_create_locks(css->pseudo_subchannel); |
774 | if (ret) { | 780 | if (ret) { |
775 | kfree(css->pseudo_subchannel); | 781 | kfree(css->pseudo_subchannel); |
776 | return ret; | 782 | return ret; |
@@ -870,8 +876,7 @@ static struct notifier_block css_power_notifier = { | |||
870 | 876 | ||
871 | /* | 877 | /* |
872 | * Now that the driver core is running, we can setup our channel subsystem. | 878 | * Now that the driver core is running, we can setup our channel subsystem. |
873 | * The struct subchannel's are created during probing (except for the | 879 | * The struct subchannel's are created during probing. |
874 | * static console subchannel). | ||
875 | */ | 880 | */ |
876 | static int __init css_bus_init(void) | 881 | static int __init css_bus_init(void) |
877 | { | 882 | { |
@@ -1050,6 +1055,8 @@ int css_complete_work(void) | |||
1050 | */ | 1055 | */ |
1051 | static int __init channel_subsystem_init_sync(void) | 1056 | static int __init channel_subsystem_init_sync(void) |
1052 | { | 1057 | { |
1058 | /* Register subchannels which are already in use. */ | ||
1059 | cio_register_early_subchannels(); | ||
1053 | /* Start initial subchannel evaluation. */ | 1060 | /* Start initial subchannel evaluation. */ |
1054 | css_schedule_eval_all(); | 1061 | css_schedule_eval_all(); |
1055 | css_complete_work(); | 1062 | css_complete_work(); |
@@ -1065,9 +1072,8 @@ void channel_subsystem_reinit(void) | |||
1065 | chsc_enable_facility(CHSC_SDA_OC_MSS); | 1072 | chsc_enable_facility(CHSC_SDA_OC_MSS); |
1066 | chp_id_for_each(&chpid) { | 1073 | chp_id_for_each(&chpid) { |
1067 | chp = chpid_to_chp(chpid); | 1074 | chp = chpid_to_chp(chpid); |
1068 | if (!chp) | 1075 | if (chp) |
1069 | continue; | 1076 | chp_update_desc(chp); |
1070 | chsc_determine_base_channel_path_desc(chpid, &chp->desc); | ||
1071 | } | 1077 | } |
1072 | } | 1078 | } |
1073 | 1079 | ||
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 4af3dfe70ef5..b1de60335238 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -101,7 +101,8 @@ extern int css_driver_register(struct css_driver *); | |||
101 | extern void css_driver_unregister(struct css_driver *); | 101 | extern void css_driver_unregister(struct css_driver *); |
102 | 102 | ||
103 | extern void css_sch_device_unregister(struct subchannel *); | 103 | extern void css_sch_device_unregister(struct subchannel *); |
104 | extern int css_probe_device(struct subchannel_id); | 104 | extern int css_register_subchannel(struct subchannel *); |
105 | extern struct subchannel *css_alloc_subchannel(struct subchannel_id); | ||
105 | extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); | 106 | extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); |
106 | extern int css_init_done; | 107 | extern int css_init_done; |
107 | extern int max_ssid; | 108 | extern int max_ssid; |
@@ -109,7 +110,6 @@ int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | |||
109 | int (*fn_unknown)(struct subchannel_id, | 110 | int (*fn_unknown)(struct subchannel_id, |
110 | void *), void *data); | 111 | void *), void *data); |
111 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); | 112 | extern int for_each_subchannel(int(*fn)(struct subchannel_id, void *), void *); |
112 | extern void css_reiterate_subchannels(void); | ||
113 | void css_update_ssd_info(struct subchannel *sch); | 113 | void css_update_ssd_info(struct subchannel *sch); |
114 | 114 | ||
115 | struct channel_subsystem { | 115 | struct channel_subsystem { |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index c6767f5a58b2..1ab5f6c36d9b 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/list.h> | 19 | #include <linux/list.h> |
20 | #include <linux/device.h> | 20 | #include <linux/device.h> |
21 | #include <linux/workqueue.h> | 21 | #include <linux/workqueue.h> |
22 | #include <linux/delay.h> | ||
22 | #include <linux/timer.h> | 23 | #include <linux/timer.h> |
23 | #include <linux/kernel_stat.h> | 24 | #include <linux/kernel_stat.h> |
24 | 25 | ||
@@ -43,6 +44,10 @@ static DEFINE_SPINLOCK(recovery_lock); | |||
43 | static int recovery_phase; | 44 | static int recovery_phase; |
44 | static const unsigned long recovery_delay[] = { 3, 30, 300 }; | 45 | static const unsigned long recovery_delay[] = { 3, 30, 300 }; |
45 | 46 | ||
47 | static atomic_t ccw_device_init_count = ATOMIC_INIT(0); | ||
48 | static DECLARE_WAIT_QUEUE_HEAD(ccw_device_init_wq); | ||
49 | static struct bus_type ccw_bus_type; | ||
50 | |||
46 | /******************* bus type handling ***********************/ | 51 | /******************* bus type handling ***********************/ |
47 | 52 | ||
48 | /* The Linux driver model distinguishes between a bus type and | 53 | /* The Linux driver model distinguishes between a bus type and |
@@ -127,8 +132,6 @@ static int ccw_uevent(struct device *dev, struct kobj_uevent_env *env) | |||
127 | return ret; | 132 | return ret; |
128 | } | 133 | } |
129 | 134 | ||
130 | static struct bus_type ccw_bus_type; | ||
131 | |||
132 | static void io_subchannel_irq(struct subchannel *); | 135 | static void io_subchannel_irq(struct subchannel *); |
133 | static int io_subchannel_probe(struct subchannel *); | 136 | static int io_subchannel_probe(struct subchannel *); |
134 | static int io_subchannel_remove(struct subchannel *); | 137 | static int io_subchannel_remove(struct subchannel *); |
@@ -137,8 +140,6 @@ static int io_subchannel_sch_event(struct subchannel *, int); | |||
137 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, | 140 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, |
138 | int); | 141 | int); |
139 | static void recovery_func(unsigned long data); | 142 | static void recovery_func(unsigned long data); |
140 | wait_queue_head_t ccw_device_init_wq; | ||
141 | atomic_t ccw_device_init_count; | ||
142 | 143 | ||
143 | static struct css_device_id io_subchannel_ids[] = { | 144 | static struct css_device_id io_subchannel_ids[] = { |
144 | { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, | 145 | { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, |
@@ -191,10 +192,7 @@ int __init io_subchannel_init(void) | |||
191 | { | 192 | { |
192 | int ret; | 193 | int ret; |
193 | 194 | ||
194 | init_waitqueue_head(&ccw_device_init_wq); | ||
195 | atomic_set(&ccw_device_init_count, 0); | ||
196 | setup_timer(&recovery_timer, recovery_func, 0); | 195 | setup_timer(&recovery_timer, recovery_func, 0); |
197 | |||
198 | ret = bus_register(&ccw_bus_type); | 196 | ret = bus_register(&ccw_bus_type); |
199 | if (ret) | 197 | if (ret) |
200 | return ret; | 198 | return ret; |
@@ -1086,19 +1084,14 @@ static int io_subchannel_probe(struct subchannel *sch) | |||
1086 | dev_set_uevent_suppress(&sch->dev, 0); | 1084 | dev_set_uevent_suppress(&sch->dev, 0); |
1087 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); | 1085 | kobject_uevent(&sch->dev.kobj, KOBJ_ADD); |
1088 | cdev = sch_get_cdev(sch); | 1086 | cdev = sch_get_cdev(sch); |
1089 | cdev->dev.groups = ccwdev_attr_groups; | 1087 | rc = ccw_device_register(cdev); |
1090 | device_initialize(&cdev->dev); | 1088 | if (rc) { |
1091 | cdev->private->flags.initialized = 1; | 1089 | /* Release online reference. */ |
1092 | ccw_device_register(cdev); | 1090 | put_device(&cdev->dev); |
1093 | /* | 1091 | goto out_schedule; |
1094 | * Check if the device is already online. If it is | 1092 | } |
1095 | * the reference count needs to be corrected since we | 1093 | if (atomic_dec_and_test(&ccw_device_init_count)) |
1096 | * didn't obtain a reference in ccw_device_set_online. | 1094 | wake_up(&ccw_device_init_wq); |
1097 | */ | ||
1098 | if (cdev->private->state != DEV_STATE_NOT_OPER && | ||
1099 | cdev->private->state != DEV_STATE_OFFLINE && | ||
1100 | cdev->private->state != DEV_STATE_BOXED) | ||
1101 | get_device(&cdev->dev); | ||
1102 | return 0; | 1095 | return 0; |
1103 | } | 1096 | } |
1104 | io_subchannel_init_fields(sch); | 1097 | io_subchannel_init_fields(sch); |
@@ -1580,88 +1573,102 @@ out: | |||
1580 | } | 1573 | } |
1581 | 1574 | ||
1582 | #ifdef CONFIG_CCW_CONSOLE | 1575 | #ifdef CONFIG_CCW_CONSOLE |
1583 | static struct ccw_device console_cdev; | ||
1584 | static struct ccw_device_private console_private; | ||
1585 | static int console_cdev_in_use; | ||
1586 | |||
1587 | static DEFINE_SPINLOCK(ccw_console_lock); | ||
1588 | |||
1589 | spinlock_t * cio_get_console_lock(void) | ||
1590 | { | ||
1591 | return &ccw_console_lock; | ||
1592 | } | ||
1593 | |||
1594 | static int ccw_device_console_enable(struct ccw_device *cdev, | 1576 | static int ccw_device_console_enable(struct ccw_device *cdev, |
1595 | struct subchannel *sch) | 1577 | struct subchannel *sch) |
1596 | { | 1578 | { |
1597 | struct io_subchannel_private *io_priv = cio_get_console_priv(); | ||
1598 | int rc; | 1579 | int rc; |
1599 | 1580 | ||
1600 | /* Attach subchannel private data. */ | ||
1601 | memset(io_priv, 0, sizeof(*io_priv)); | ||
1602 | set_io_private(sch, io_priv); | ||
1603 | io_subchannel_init_fields(sch); | 1581 | io_subchannel_init_fields(sch); |
1604 | rc = cio_commit_config(sch); | 1582 | rc = cio_commit_config(sch); |
1605 | if (rc) | 1583 | if (rc) |
1606 | return rc; | 1584 | return rc; |
1607 | sch->driver = &io_subchannel_driver; | 1585 | sch->driver = &io_subchannel_driver; |
1608 | /* Initialize the ccw_device structure. */ | ||
1609 | cdev->dev.parent= &sch->dev; | ||
1610 | sch_set_cdev(sch, cdev); | 1586 | sch_set_cdev(sch, cdev); |
1611 | io_subchannel_recog(cdev, sch); | 1587 | io_subchannel_recog(cdev, sch); |
1612 | /* Now wait for the async. recognition to come to an end. */ | 1588 | /* Now wait for the async. recognition to come to an end. */ |
1613 | spin_lock_irq(cdev->ccwlock); | 1589 | spin_lock_irq(cdev->ccwlock); |
1614 | while (!dev_fsm_final_state(cdev)) | 1590 | while (!dev_fsm_final_state(cdev)) |
1615 | wait_cons_dev(); | 1591 | ccw_device_wait_idle(cdev); |
1616 | rc = -EIO; | 1592 | |
1617 | if (cdev->private->state != DEV_STATE_OFFLINE) | 1593 | /* Hold on to an extra reference while device is online. */ |
1594 | get_device(&cdev->dev); | ||
1595 | rc = ccw_device_online(cdev); | ||
1596 | if (rc) | ||
1618 | goto out_unlock; | 1597 | goto out_unlock; |
1619 | ccw_device_online(cdev); | 1598 | |
1620 | while (!dev_fsm_final_state(cdev)) | 1599 | while (!dev_fsm_final_state(cdev)) |
1621 | wait_cons_dev(); | 1600 | ccw_device_wait_idle(cdev); |
1622 | if (cdev->private->state != DEV_STATE_ONLINE) | 1601 | |
1623 | goto out_unlock; | 1602 | if (cdev->private->state == DEV_STATE_ONLINE) |
1624 | rc = 0; | 1603 | cdev->online = 1; |
1604 | else | ||
1605 | rc = -EIO; | ||
1625 | out_unlock: | 1606 | out_unlock: |
1626 | spin_unlock_irq(cdev->ccwlock); | 1607 | spin_unlock_irq(cdev->ccwlock); |
1608 | if (rc) /* Give up online reference since onlining failed. */ | ||
1609 | put_device(&cdev->dev); | ||
1627 | return rc; | 1610 | return rc; |
1628 | } | 1611 | } |
1629 | 1612 | ||
1630 | struct ccw_device * | 1613 | struct ccw_device *ccw_device_probe_console(void) |
1631 | ccw_device_probe_console(void) | ||
1632 | { | 1614 | { |
1615 | struct io_subchannel_private *io_priv; | ||
1616 | struct ccw_device *cdev; | ||
1633 | struct subchannel *sch; | 1617 | struct subchannel *sch; |
1634 | int ret; | 1618 | int ret; |
1635 | 1619 | ||
1636 | if (xchg(&console_cdev_in_use, 1) != 0) | ||
1637 | return ERR_PTR(-EBUSY); | ||
1638 | sch = cio_probe_console(); | 1620 | sch = cio_probe_console(); |
1639 | if (IS_ERR(sch)) { | 1621 | if (IS_ERR(sch)) |
1640 | console_cdev_in_use = 0; | 1622 | return ERR_CAST(sch); |
1641 | return (void *) sch; | 1623 | |
1624 | io_priv = kzalloc(sizeof(*io_priv), GFP_KERNEL | GFP_DMA); | ||
1625 | if (!io_priv) { | ||
1626 | put_device(&sch->dev); | ||
1627 | return ERR_PTR(-ENOMEM); | ||
1642 | } | 1628 | } |
1643 | memset(&console_cdev, 0, sizeof(struct ccw_device)); | 1629 | cdev = io_subchannel_create_ccwdev(sch); |
1644 | memset(&console_private, 0, sizeof(struct ccw_device_private)); | 1630 | if (IS_ERR(cdev)) { |
1645 | console_cdev.private = &console_private; | 1631 | put_device(&sch->dev); |
1646 | console_private.cdev = &console_cdev; | 1632 | kfree(io_priv); |
1647 | console_private.int_class = IRQIO_CIO; | 1633 | return cdev; |
1648 | ret = ccw_device_console_enable(&console_cdev, sch); | 1634 | } |
1635 | set_io_private(sch, io_priv); | ||
1636 | ret = ccw_device_console_enable(cdev, sch); | ||
1649 | if (ret) { | 1637 | if (ret) { |
1650 | cio_release_console(); | 1638 | set_io_private(sch, NULL); |
1651 | console_cdev_in_use = 0; | 1639 | put_device(&sch->dev); |
1640 | put_device(&cdev->dev); | ||
1641 | kfree(io_priv); | ||
1652 | return ERR_PTR(ret); | 1642 | return ERR_PTR(ret); |
1653 | } | 1643 | } |
1654 | console_cdev.online = 1; | 1644 | return cdev; |
1655 | return &console_cdev; | 1645 | } |
1646 | |||
1647 | /** | ||
1648 | * ccw_device_wait_idle() - busy wait for device to become idle | ||
1649 | * @cdev: ccw device | ||
1650 | * | ||
1651 | * Poll until activity control is zero, that is, no function or data | ||
1652 | * transfer is pending/active. | ||
1653 | * Called with device lock being held. | ||
1654 | */ | ||
1655 | void ccw_device_wait_idle(struct ccw_device *cdev) | ||
1656 | { | ||
1657 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | ||
1658 | |||
1659 | while (1) { | ||
1660 | cio_tsch(sch); | ||
1661 | if (sch->schib.scsw.cmd.actl == 0) | ||
1662 | break; | ||
1663 | udelay_simple(100); | ||
1664 | } | ||
1656 | } | 1665 | } |
1657 | 1666 | ||
1658 | static int ccw_device_pm_restore(struct device *dev); | 1667 | static int ccw_device_pm_restore(struct device *dev); |
1659 | 1668 | ||
1660 | int ccw_device_force_console(void) | 1669 | int ccw_device_force_console(struct ccw_device *cdev) |
1661 | { | 1670 | { |
1662 | if (!console_cdev_in_use) | 1671 | return ccw_device_pm_restore(&cdev->dev); |
1663 | return -ENODEV; | ||
1664 | return ccw_device_pm_restore(&console_cdev.dev); | ||
1665 | } | 1672 | } |
1666 | EXPORT_SYMBOL_GPL(ccw_device_force_console); | 1673 | EXPORT_SYMBOL_GPL(ccw_device_force_console); |
1667 | #endif | 1674 | #endif |
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index 7d4ecb65db00..8d1d29873172 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -81,8 +81,6 @@ dev_fsm_final_state(struct ccw_device *cdev) | |||
81 | cdev->private->state == DEV_STATE_BOXED); | 81 | cdev->private->state == DEV_STATE_BOXED); |
82 | } | 82 | } |
83 | 83 | ||
84 | extern wait_queue_head_t ccw_device_init_wq; | ||
85 | extern atomic_t ccw_device_init_count; | ||
86 | int __init io_subchannel_init(void); | 84 | int __init io_subchannel_init(void); |
87 | 85 | ||
88 | void io_subchannel_recog_done(struct ccw_device *cdev); | 86 | void io_subchannel_recog_done(struct ccw_device *cdev); |
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c index c77b6e06bf64..4845d64f2842 100644 --- a/drivers/s390/cio/device_ops.c +++ b/drivers/s390/cio/device_ops.c | |||
@@ -704,9 +704,9 @@ EXPORT_SYMBOL(ccw_device_tm_start_timeout); | |||
704 | int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) | 704 | int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) |
705 | { | 705 | { |
706 | struct subchannel *sch = to_subchannel(cdev->dev.parent); | 706 | struct subchannel *sch = to_subchannel(cdev->dev.parent); |
707 | struct channel_path_desc_fmt1 desc; | 707 | struct channel_path *chp; |
708 | struct chp_id chpid; | 708 | struct chp_id chpid; |
709 | int mdc = 0, ret, i; | 709 | int mdc = 0, i; |
710 | 710 | ||
711 | /* Adjust requested path mask to excluded varied off paths. */ | 711 | /* Adjust requested path mask to excluded varied off paths. */ |
712 | if (mask) | 712 | if (mask) |
@@ -719,14 +719,20 @@ int ccw_device_get_mdc(struct ccw_device *cdev, u8 mask) | |||
719 | if (!(mask & (0x80 >> i))) | 719 | if (!(mask & (0x80 >> i))) |
720 | continue; | 720 | continue; |
721 | chpid.id = sch->schib.pmcw.chpid[i]; | 721 | chpid.id = sch->schib.pmcw.chpid[i]; |
722 | ret = chsc_determine_fmt1_channel_path_desc(chpid, &desc); | 722 | chp = chpid_to_chp(chpid); |
723 | if (ret) | 723 | if (!chp) |
724 | return ret; | 724 | continue; |
725 | if (!desc.f) | 725 | |
726 | mutex_lock(&chp->lock); | ||
727 | if (!chp->desc_fmt1.f) { | ||
728 | mutex_unlock(&chp->lock); | ||
726 | return 0; | 729 | return 0; |
727 | if (!desc.r) | 730 | } |
731 | if (!chp->desc_fmt1.r) | ||
728 | mdc = 1; | 732 | mdc = 1; |
729 | mdc = mdc ? min(mdc, (int)desc.mdc) : desc.mdc; | 733 | mdc = mdc ? min_t(int, mdc, chp->desc_fmt1.mdc) : |
734 | chp->desc_fmt1.mdc; | ||
735 | mutex_unlock(&chp->lock); | ||
730 | } | 736 | } |
731 | 737 | ||
732 | return mdc; | 738 | return mdc; |
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index 65d13e38803f..5a999084a229 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c | |||
@@ -17,7 +17,7 @@ struct idset { | |||
17 | 17 | ||
18 | static inline unsigned long bitmap_size(int num_ssid, int num_id) | 18 | static inline unsigned long bitmap_size(int num_ssid, int num_id) |
19 | { | 19 | { |
20 | return __BITOPS_WORDS(num_ssid * num_id) * sizeof(unsigned long); | 20 | return BITS_TO_LONGS(num_ssid * num_id) * sizeof(unsigned long); |
21 | } | 21 | } |
22 | 22 | ||
23 | static struct idset *idset_new(int num_ssid, int num_id) | 23 | static struct idset *idset_new(int num_ssid, int num_id) |
diff --git a/drivers/s390/cio/scm.c b/drivers/s390/cio/scm.c index bcf20f3aa51b..46ec25632e8b 100644 --- a/drivers/s390/cio/scm.c +++ b/drivers/s390/cio/scm.c | |||
@@ -211,7 +211,7 @@ static void scmdev_update(struct scm_device *scmdev, struct sale *sale) | |||
211 | goto out; | 211 | goto out; |
212 | scmdrv = to_scm_drv(scmdev->dev.driver); | 212 | scmdrv = to_scm_drv(scmdev->dev.driver); |
213 | if (changed && scmdrv->notify) | 213 | if (changed && scmdrv->notify) |
214 | scmdrv->notify(scmdev); | 214 | scmdrv->notify(scmdev, SCM_CHANGE); |
215 | out: | 215 | out: |
216 | device_unlock(&scmdev->dev); | 216 | device_unlock(&scmdev->dev); |
217 | if (changed) | 217 | if (changed) |
@@ -297,6 +297,22 @@ int scm_update_information(void) | |||
297 | return ret; | 297 | return ret; |
298 | } | 298 | } |
299 | 299 | ||
300 | static int scm_dev_avail(struct device *dev, void *unused) | ||
301 | { | ||
302 | struct scm_driver *scmdrv = to_scm_drv(dev->driver); | ||
303 | struct scm_device *scmdev = to_scm_dev(dev); | ||
304 | |||
305 | if (dev->driver && scmdrv->notify) | ||
306 | scmdrv->notify(scmdev, SCM_AVAIL); | ||
307 | |||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | int scm_process_availability_information(void) | ||
312 | { | ||
313 | return bus_for_each_dev(&scm_bus_type, NULL, NULL, scm_dev_avail); | ||
314 | } | ||
315 | |||
300 | static int __init scm_init(void) | 316 | static int __init scm_init(void) |
301 | { | 317 | { |
302 | int ret; | 318 | int ret; |
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h index d87961d4c0de..6ccb7457746b 100644 --- a/drivers/s390/net/qeth_core.h +++ b/drivers/s390/net/qeth_core.h | |||
@@ -769,6 +769,7 @@ struct qeth_card { | |||
769 | unsigned long thread_start_mask; | 769 | unsigned long thread_start_mask; |
770 | unsigned long thread_allowed_mask; | 770 | unsigned long thread_allowed_mask; |
771 | unsigned long thread_running_mask; | 771 | unsigned long thread_running_mask; |
772 | struct task_struct *recovery_task; | ||
772 | spinlock_t ip_lock; | 773 | spinlock_t ip_lock; |
773 | struct list_head ip_list; | 774 | struct list_head ip_list; |
774 | struct list_head *ip_tbd_list; | 775 | struct list_head *ip_tbd_list; |
@@ -862,6 +863,8 @@ extern struct qeth_card_list_struct qeth_core_card_list; | |||
862 | extern struct kmem_cache *qeth_core_header_cache; | 863 | extern struct kmem_cache *qeth_core_header_cache; |
863 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; | 864 | extern struct qeth_dbf_info qeth_dbf[QETH_DBF_INFOS]; |
864 | 865 | ||
866 | void qeth_set_recovery_task(struct qeth_card *); | ||
867 | void qeth_clear_recovery_task(struct qeth_card *); | ||
865 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); | 868 | void qeth_set_allowed_threads(struct qeth_card *, unsigned long , int); |
866 | int qeth_threads_running(struct qeth_card *, unsigned long); | 869 | int qeth_threads_running(struct qeth_card *, unsigned long); |
867 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); | 870 | int qeth_wait_for_threads(struct qeth_card *, unsigned long); |
@@ -916,6 +919,7 @@ int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *, | |||
916 | void *reply_param); | 919 | void *reply_param); |
917 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); | 920 | int qeth_get_priority_queue(struct qeth_card *, struct sk_buff *, int, int); |
918 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); | 921 | int qeth_get_elements_no(struct qeth_card *, void *, struct sk_buff *, int); |
922 | int qeth_get_elements_for_frags(struct sk_buff *); | ||
919 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, | 923 | int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *, |
920 | struct sk_buff *, struct qeth_hdr *, int, int, int); | 924 | struct sk_buff *, struct qeth_hdr *, int, int, int); |
921 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, | 925 | int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, |
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c index 0d8cdff81813..451f92020599 100644 --- a/drivers/s390/net/qeth_core_main.c +++ b/drivers/s390/net/qeth_core_main.c | |||
@@ -177,6 +177,23 @@ const char *qeth_get_cardname_short(struct qeth_card *card) | |||
177 | return "n/a"; | 177 | return "n/a"; |
178 | } | 178 | } |
179 | 179 | ||
180 | void qeth_set_recovery_task(struct qeth_card *card) | ||
181 | { | ||
182 | card->recovery_task = current; | ||
183 | } | ||
184 | EXPORT_SYMBOL_GPL(qeth_set_recovery_task); | ||
185 | |||
186 | void qeth_clear_recovery_task(struct qeth_card *card) | ||
187 | { | ||
188 | card->recovery_task = NULL; | ||
189 | } | ||
190 | EXPORT_SYMBOL_GPL(qeth_clear_recovery_task); | ||
191 | |||
192 | static bool qeth_is_recovery_task(const struct qeth_card *card) | ||
193 | { | ||
194 | return card->recovery_task == current; | ||
195 | } | ||
196 | |||
180 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, | 197 | void qeth_set_allowed_threads(struct qeth_card *card, unsigned long threads, |
181 | int clear_start_mask) | 198 | int clear_start_mask) |
182 | { | 199 | { |
@@ -205,6 +222,8 @@ EXPORT_SYMBOL_GPL(qeth_threads_running); | |||
205 | 222 | ||
206 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) | 223 | int qeth_wait_for_threads(struct qeth_card *card, unsigned long threads) |
207 | { | 224 | { |
225 | if (qeth_is_recovery_task(card)) | ||
226 | return 0; | ||
208 | return wait_event_interruptible(card->wait_q, | 227 | return wait_event_interruptible(card->wait_q, |
209 | qeth_threads_running(card, threads) == 0); | 228 | qeth_threads_running(card, threads) == 0); |
210 | } | 229 | } |
@@ -3679,6 +3698,25 @@ int qeth_get_priority_queue(struct qeth_card *card, struct sk_buff *skb, | |||
3679 | } | 3698 | } |
3680 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); | 3699 | EXPORT_SYMBOL_GPL(qeth_get_priority_queue); |
3681 | 3700 | ||
3701 | int qeth_get_elements_for_frags(struct sk_buff *skb) | ||
3702 | { | ||
3703 | int cnt, length, e, elements = 0; | ||
3704 | struct skb_frag_struct *frag; | ||
3705 | char *data; | ||
3706 | |||
3707 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | ||
3708 | frag = &skb_shinfo(skb)->frags[cnt]; | ||
3709 | data = (char *)page_to_phys(skb_frag_page(frag)) + | ||
3710 | frag->page_offset; | ||
3711 | length = frag->size; | ||
3712 | e = PFN_UP((unsigned long)data + length - 1) - | ||
3713 | PFN_DOWN((unsigned long)data); | ||
3714 | elements += e; | ||
3715 | } | ||
3716 | return elements; | ||
3717 | } | ||
3718 | EXPORT_SYMBOL_GPL(qeth_get_elements_for_frags); | ||
3719 | |||
3682 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, | 3720 | int qeth_get_elements_no(struct qeth_card *card, void *hdr, |
3683 | struct sk_buff *skb, int elems) | 3721 | struct sk_buff *skb, int elems) |
3684 | { | 3722 | { |
@@ -3686,7 +3724,8 @@ int qeth_get_elements_no(struct qeth_card *card, void *hdr, | |||
3686 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - | 3724 | int elements_needed = PFN_UP((unsigned long)skb->data + dlen - 1) - |
3687 | PFN_DOWN((unsigned long)skb->data); | 3725 | PFN_DOWN((unsigned long)skb->data); |
3688 | 3726 | ||
3689 | elements_needed += skb_shinfo(skb)->nr_frags; | 3727 | elements_needed += qeth_get_elements_for_frags(skb); |
3728 | |||
3690 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { | 3729 | if ((elements_needed + elems) > QETH_MAX_BUFFER_ELEMENTS(card)) { |
3691 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " | 3730 | QETH_DBF_MESSAGE(2, "Invalid size of IP packet " |
3692 | "(Number=%d / Length=%d). Discarded.\n", | 3731 | "(Number=%d / Length=%d). Discarded.\n", |
@@ -3771,12 +3810,23 @@ static inline void __qeth_fill_buffer(struct sk_buff *skb, | |||
3771 | 3810 | ||
3772 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { | 3811 | for (cnt = 0; cnt < skb_shinfo(skb)->nr_frags; cnt++) { |
3773 | frag = &skb_shinfo(skb)->frags[cnt]; | 3812 | frag = &skb_shinfo(skb)->frags[cnt]; |
3774 | buffer->element[element].addr = (char *) | 3813 | data = (char *)page_to_phys(skb_frag_page(frag)) + |
3775 | page_to_phys(skb_frag_page(frag)) | 3814 | frag->page_offset; |
3776 | + frag->page_offset; | 3815 | length = frag->size; |
3777 | buffer->element[element].length = frag->size; | 3816 | while (length > 0) { |
3778 | buffer->element[element].eflags = SBAL_EFLAGS_MIDDLE_FRAG; | 3817 | length_here = PAGE_SIZE - |
3779 | element++; | 3818 | ((unsigned long) data % PAGE_SIZE); |
3819 | if (length < length_here) | ||
3820 | length_here = length; | ||
3821 | |||
3822 | buffer->element[element].addr = data; | ||
3823 | buffer->element[element].length = length_here; | ||
3824 | buffer->element[element].eflags = | ||
3825 | SBAL_EFLAGS_MIDDLE_FRAG; | ||
3826 | length -= length_here; | ||
3827 | data += length_here; | ||
3828 | element++; | ||
3829 | } | ||
3780 | } | 3830 | } |
3781 | 3831 | ||
3782 | if (buffer->element[element - 1].eflags) | 3832 | if (buffer->element[element - 1].eflags) |
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index d690166efeaf..155b101bd730 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c | |||
@@ -1143,6 +1143,7 @@ static int qeth_l2_recover(void *ptr) | |||
1143 | QETH_CARD_TEXT(card, 2, "recover2"); | 1143 | QETH_CARD_TEXT(card, 2, "recover2"); |
1144 | dev_warn(&card->gdev->dev, | 1144 | dev_warn(&card->gdev->dev, |
1145 | "A recovery process has been started for the device\n"); | 1145 | "A recovery process has been started for the device\n"); |
1146 | qeth_set_recovery_task(card); | ||
1146 | __qeth_l2_set_offline(card->gdev, 1); | 1147 | __qeth_l2_set_offline(card->gdev, 1); |
1147 | rc = __qeth_l2_set_online(card->gdev, 1); | 1148 | rc = __qeth_l2_set_online(card->gdev, 1); |
1148 | if (!rc) | 1149 | if (!rc) |
@@ -1153,6 +1154,7 @@ static int qeth_l2_recover(void *ptr) | |||
1153 | dev_warn(&card->gdev->dev, "The qeth device driver " | 1154 | dev_warn(&card->gdev->dev, "The qeth device driver " |
1154 | "failed to recover an error on the device\n"); | 1155 | "failed to recover an error on the device\n"); |
1155 | } | 1156 | } |
1157 | qeth_clear_recovery_task(card); | ||
1156 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 1158 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
1157 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 1159 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
1158 | return 0; | 1160 | return 0; |
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 091ca0efa1c5..1f7edf1b26c3 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c | |||
@@ -623,7 +623,7 @@ static int qeth_l3_send_setrouting(struct qeth_card *card, | |||
623 | return rc; | 623 | return rc; |
624 | } | 624 | } |
625 | 625 | ||
626 | static void qeth_l3_correct_routing_type(struct qeth_card *card, | 626 | static int qeth_l3_correct_routing_type(struct qeth_card *card, |
627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) | 627 | enum qeth_routing_types *type, enum qeth_prot_versions prot) |
628 | { | 628 | { |
629 | if (card->info.type == QETH_CARD_TYPE_IQD) { | 629 | if (card->info.type == QETH_CARD_TYPE_IQD) { |
@@ -632,7 +632,7 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
632 | case PRIMARY_CONNECTOR: | 632 | case PRIMARY_CONNECTOR: |
633 | case SECONDARY_CONNECTOR: | 633 | case SECONDARY_CONNECTOR: |
634 | case MULTICAST_ROUTER: | 634 | case MULTICAST_ROUTER: |
635 | return; | 635 | return 0; |
636 | default: | 636 | default: |
637 | goto out_inval; | 637 | goto out_inval; |
638 | } | 638 | } |
@@ -641,17 +641,18 @@ static void qeth_l3_correct_routing_type(struct qeth_card *card, | |||
641 | case NO_ROUTER: | 641 | case NO_ROUTER: |
642 | case PRIMARY_ROUTER: | 642 | case PRIMARY_ROUTER: |
643 | case SECONDARY_ROUTER: | 643 | case SECONDARY_ROUTER: |
644 | return; | 644 | return 0; |
645 | case MULTICAST_ROUTER: | 645 | case MULTICAST_ROUTER: |
646 | if (qeth_is_ipafunc_supported(card, prot, | 646 | if (qeth_is_ipafunc_supported(card, prot, |
647 | IPA_OSA_MC_ROUTER)) | 647 | IPA_OSA_MC_ROUTER)) |
648 | return; | 648 | return 0; |
649 | default: | 649 | default: |
650 | goto out_inval; | 650 | goto out_inval; |
651 | } | 651 | } |
652 | } | 652 | } |
653 | out_inval: | 653 | out_inval: |
654 | *type = NO_ROUTER; | 654 | *type = NO_ROUTER; |
655 | return -EINVAL; | ||
655 | } | 656 | } |
656 | 657 | ||
657 | int qeth_l3_setrouting_v4(struct qeth_card *card) | 658 | int qeth_l3_setrouting_v4(struct qeth_card *card) |
@@ -660,8 +661,10 @@ int qeth_l3_setrouting_v4(struct qeth_card *card) | |||
660 | 661 | ||
661 | QETH_CARD_TEXT(card, 3, "setrtg4"); | 662 | QETH_CARD_TEXT(card, 3, "setrtg4"); |
662 | 663 | ||
663 | qeth_l3_correct_routing_type(card, &card->options.route4.type, | 664 | rc = qeth_l3_correct_routing_type(card, &card->options.route4.type, |
664 | QETH_PROT_IPV4); | 665 | QETH_PROT_IPV4); |
666 | if (rc) | ||
667 | return rc; | ||
665 | 668 | ||
666 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, | 669 | rc = qeth_l3_send_setrouting(card, card->options.route4.type, |
667 | QETH_PROT_IPV4); | 670 | QETH_PROT_IPV4); |
@@ -683,8 +686,10 @@ int qeth_l3_setrouting_v6(struct qeth_card *card) | |||
683 | 686 | ||
684 | if (!qeth_is_supported(card, IPA_IPV6)) | 687 | if (!qeth_is_supported(card, IPA_IPV6)) |
685 | return 0; | 688 | return 0; |
686 | qeth_l3_correct_routing_type(card, &card->options.route6.type, | 689 | rc = qeth_l3_correct_routing_type(card, &card->options.route6.type, |
687 | QETH_PROT_IPV6); | 690 | QETH_PROT_IPV6); |
691 | if (rc) | ||
692 | return rc; | ||
688 | 693 | ||
689 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, | 694 | rc = qeth_l3_send_setrouting(card, card->options.route6.type, |
690 | QETH_PROT_IPV6); | 695 | QETH_PROT_IPV6); |
@@ -2898,7 +2903,9 @@ static inline int qeth_l3_tso_elements(struct sk_buff *skb) | |||
2898 | tcp_hdr(skb)->doff * 4; | 2903 | tcp_hdr(skb)->doff * 4; |
2899 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); | 2904 | int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data); |
2900 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); | 2905 | int elements = PFN_UP(tcpd + tcpd_len - 1) - PFN_DOWN(tcpd); |
2901 | elements += skb_shinfo(skb)->nr_frags; | 2906 | |
2907 | elements += qeth_get_elements_for_frags(skb); | ||
2908 | |||
2902 | return elements; | 2909 | return elements; |
2903 | } | 2910 | } |
2904 | 2911 | ||
@@ -3348,7 +3355,6 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode) | |||
3348 | rc = -ENODEV; | 3355 | rc = -ENODEV; |
3349 | goto out_remove; | 3356 | goto out_remove; |
3350 | } | 3357 | } |
3351 | qeth_trace_features(card); | ||
3352 | 3358 | ||
3353 | if (!card->dev && qeth_l3_setup_netdev(card)) { | 3359 | if (!card->dev && qeth_l3_setup_netdev(card)) { |
3354 | rc = -ENODEV; | 3360 | rc = -ENODEV; |
@@ -3425,6 +3431,7 @@ contin: | |||
3425 | qeth_l3_set_multicast_list(card->dev); | 3431 | qeth_l3_set_multicast_list(card->dev); |
3426 | rtnl_unlock(); | 3432 | rtnl_unlock(); |
3427 | } | 3433 | } |
3434 | qeth_trace_features(card); | ||
3428 | /* let user_space know that device is online */ | 3435 | /* let user_space know that device is online */ |
3429 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); | 3436 | kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); |
3430 | mutex_unlock(&card->conf_mutex); | 3437 | mutex_unlock(&card->conf_mutex); |
@@ -3508,6 +3515,7 @@ static int qeth_l3_recover(void *ptr) | |||
3508 | QETH_CARD_TEXT(card, 2, "recover2"); | 3515 | QETH_CARD_TEXT(card, 2, "recover2"); |
3509 | dev_warn(&card->gdev->dev, | 3516 | dev_warn(&card->gdev->dev, |
3510 | "A recovery process has been started for the device\n"); | 3517 | "A recovery process has been started for the device\n"); |
3518 | qeth_set_recovery_task(card); | ||
3511 | __qeth_l3_set_offline(card->gdev, 1); | 3519 | __qeth_l3_set_offline(card->gdev, 1); |
3512 | rc = __qeth_l3_set_online(card->gdev, 1); | 3520 | rc = __qeth_l3_set_online(card->gdev, 1); |
3513 | if (!rc) | 3521 | if (!rc) |
@@ -3518,6 +3526,7 @@ static int qeth_l3_recover(void *ptr) | |||
3518 | dev_warn(&card->gdev->dev, "The qeth device driver " | 3526 | dev_warn(&card->gdev->dev, "The qeth device driver " |
3519 | "failed to recover an error on the device\n"); | 3527 | "failed to recover an error on the device\n"); |
3520 | } | 3528 | } |
3529 | qeth_clear_recovery_task(card); | ||
3521 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); | 3530 | qeth_clear_thread_start_bit(card, QETH_RECOVER_THREAD); |
3522 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); | 3531 | qeth_clear_thread_running_bit(card, QETH_RECOVER_THREAD); |
3523 | return 0; | 3532 | return 0; |
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c index ebc379486267..e70af2406ff9 100644 --- a/drivers/s390/net/qeth_l3_sys.c +++ b/drivers/s390/net/qeth_l3_sys.c | |||
@@ -87,6 +87,8 @@ static ssize_t qeth_l3_dev_route_store(struct qeth_card *card, | |||
87 | rc = qeth_l3_setrouting_v6(card); | 87 | rc = qeth_l3_setrouting_v6(card); |
88 | } | 88 | } |
89 | out: | 89 | out: |
90 | if (rc) | ||
91 | route->type = old_route_type; | ||
90 | mutex_unlock(&card->conf_mutex); | 92 | mutex_unlock(&card->conf_mutex); |
91 | return rc ? rc : count; | 93 | return rc ? rc : count; |
92 | } | 94 | } |