aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/s390/cio/device_ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/s390/cio/device_ops.c')
-rw-r--r--drivers/s390/cio/device_ops.c250
1 files changed, 0 insertions, 250 deletions
diff --git a/drivers/s390/cio/device_ops.c b/drivers/s390/cio/device_ops.c
index c8cfbf161d44..14eba854b155 100644
--- a/drivers/s390/cio/device_ops.c
+++ b/drivers/s390/cio/device_ops.c
@@ -288,253 +288,6 @@ ccw_device_get_path_mask(struct ccw_device *cdev)
288 return sch->lpm; 288 return sch->lpm;
289} 289}
290 290
291static void
292ccw_device_wake_up(struct ccw_device *cdev, unsigned long ip, struct irb *irb)
293{
294 if (!ip)
295 /* unsolicited interrupt */
296 return;
297
298 /* Abuse intparm for error reporting. */
299 if (IS_ERR(irb))
300 cdev->private->intparm = -EIO;
301 else if (irb->scsw.cc == 1)
302 /* Retry for deferred condition code. */
303 cdev->private->intparm = -EAGAIN;
304 else if ((irb->scsw.dstat !=
305 (DEV_STAT_CHN_END|DEV_STAT_DEV_END)) ||
306 (irb->scsw.cstat != 0)) {
307 /*
308 * We didn't get channel end / device end. Check if path
309 * verification has been started; we can retry after it has
310 * finished. We also retry unit checks except for command reject
311 * or intervention required. Also check for long busy
312 * conditions.
313 */
314 if (cdev->private->flags.doverify ||
315 cdev->private->state == DEV_STATE_VERIFY)
316 cdev->private->intparm = -EAGAIN;
317 else if ((irb->scsw.dstat & DEV_STAT_UNIT_CHECK) &&
318 !(irb->ecw[0] &
319 (SNS0_CMD_REJECT | SNS0_INTERVENTION_REQ)))
320 cdev->private->intparm = -EAGAIN;
321 else if ((irb->scsw.dstat & DEV_STAT_ATTENTION) &&
322 (irb->scsw.dstat & DEV_STAT_DEV_END) &&
323 (irb->scsw.dstat & DEV_STAT_UNIT_EXCEP))
324 cdev->private->intparm = -EAGAIN;
325 else
326 cdev->private->intparm = -EIO;
327
328 } else
329 cdev->private->intparm = 0;
330 wake_up(&cdev->private->wait_q);
331}
332
333static int
334__ccw_device_retry_loop(struct ccw_device *cdev, struct ccw1 *ccw, long magic, __u8 lpm)
335{
336 int ret;
337 struct subchannel *sch;
338
339 sch = to_subchannel(cdev->dev.parent);
340 do {
341 ccw_device_set_timeout(cdev, 60 * HZ);
342 ret = cio_start (sch, ccw, lpm);
343 if (ret != 0)
344 ccw_device_set_timeout(cdev, 0);
345 if (ret == -EBUSY) {
346 /* Try again later. */
347 spin_unlock_irq(sch->lock);
348 msleep(10);
349 spin_lock_irq(sch->lock);
350 continue;
351 }
352 if (ret != 0)
353 /* Non-retryable error. */
354 break;
355 /* Wait for end of request. */
356 cdev->private->intparm = magic;
357 spin_unlock_irq(sch->lock);
358 wait_event(cdev->private->wait_q,
359 (cdev->private->intparm == -EIO) ||
360 (cdev->private->intparm == -EAGAIN) ||
361 (cdev->private->intparm == 0));
362 spin_lock_irq(sch->lock);
363 /* Check at least for channel end / device end */
364 if (cdev->private->intparm == -EIO) {
365 /* Non-retryable error. */
366 ret = -EIO;
367 break;
368 }
369 if (cdev->private->intparm == 0)
370 /* Success. */
371 break;
372 /* Try again later. */
373 spin_unlock_irq(sch->lock);
374 msleep(10);
375 spin_lock_irq(sch->lock);
376 } while (1);
377
378 return ret;
379}
380
381/**
382 * read_dev_chars() - read device characteristics
383 * @param cdev target ccw device
384 * @param buffer pointer to buffer for rdc data
385 * @param length size of rdc data
386 * @returns 0 for success, negative error value on failure
387 *
388 * Context:
389 * called for online device, lock not held
390 **/
391int
392read_dev_chars (struct ccw_device *cdev, void **buffer, int length)
393{
394 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
395 struct subchannel *sch;
396 int ret;
397 struct ccw1 *rdc_ccw;
398
399 if (!cdev)
400 return -ENODEV;
401 if (!buffer || !length)
402 return -EINVAL;
403 sch = to_subchannel(cdev->dev.parent);
404
405 CIO_TRACE_EVENT (4, "rddevch");
406 CIO_TRACE_EVENT (4, sch->dev.bus_id);
407
408 rdc_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
409 if (!rdc_ccw)
410 return -ENOMEM;
411 rdc_ccw->cmd_code = CCW_CMD_RDC;
412 rdc_ccw->count = length;
413 rdc_ccw->flags = CCW_FLAG_SLI;
414 ret = set_normalized_cda (rdc_ccw, (*buffer));
415 if (ret != 0) {
416 kfree(rdc_ccw);
417 return ret;
418 }
419
420 spin_lock_irq(sch->lock);
421 /* Save interrupt handler. */
422 handler = cdev->handler;
423 /* Temporarily install own handler. */
424 cdev->handler = ccw_device_wake_up;
425 if (cdev->private->state != DEV_STATE_ONLINE)
426 ret = -ENODEV;
427 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
428 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
429 cdev->private->flags.doverify)
430 ret = -EBUSY;
431 else
432 /* 0x00D9C4C3 == ebcdic "RDC" */
433 ret = __ccw_device_retry_loop(cdev, rdc_ccw, 0x00D9C4C3, 0);
434
435 /* Restore interrupt handler. */
436 cdev->handler = handler;
437 spin_unlock_irq(sch->lock);
438
439 clear_normalized_cda (rdc_ccw);
440 kfree(rdc_ccw);
441
442 return ret;
443}
444
445/*
446 * Read Configuration data using path mask
447 */
448int
449read_conf_data_lpm (struct ccw_device *cdev, void **buffer, int *length, __u8 lpm)
450{
451 void (*handler)(struct ccw_device *, unsigned long, struct irb *);
452 struct subchannel *sch;
453 struct ciw *ciw;
454 char *rcd_buf;
455 int ret;
456 struct ccw1 *rcd_ccw;
457
458 if (!cdev)
459 return -ENODEV;
460 if (!buffer || !length)
461 return -EINVAL;
462 sch = to_subchannel(cdev->dev.parent);
463
464 CIO_TRACE_EVENT (4, "rdconf");
465 CIO_TRACE_EVENT (4, sch->dev.bus_id);
466
467 /*
468 * scan for RCD command in extended SenseID data
469 */
470 ciw = ccw_device_get_ciw(cdev, CIW_TYPE_RCD);
471 if (!ciw || ciw->cmd == 0)
472 return -EOPNOTSUPP;
473
474 /* Adjust requested path mask to excluded varied off paths. */
475 if (lpm) {
476 lpm &= sch->opm;
477 if (lpm == 0)
478 return -EACCES;
479 }
480
481 rcd_ccw = kzalloc(sizeof(struct ccw1), GFP_KERNEL | GFP_DMA);
482 if (!rcd_ccw)
483 return -ENOMEM;
484 rcd_buf = kzalloc(ciw->count, GFP_KERNEL | GFP_DMA);
485 if (!rcd_buf) {
486 kfree(rcd_ccw);
487 return -ENOMEM;
488 }
489 rcd_ccw->cmd_code = ciw->cmd;
490 rcd_ccw->cda = (__u32) __pa (rcd_buf);
491 rcd_ccw->count = ciw->count;
492 rcd_ccw->flags = CCW_FLAG_SLI;
493
494 spin_lock_irq(sch->lock);
495 /* Save interrupt handler. */
496 handler = cdev->handler;
497 /* Temporarily install own handler. */
498 cdev->handler = ccw_device_wake_up;
499 if (cdev->private->state != DEV_STATE_ONLINE)
500 ret = -ENODEV;
501 else if (((sch->schib.scsw.stctl & SCSW_STCTL_PRIM_STATUS) &&
502 !(sch->schib.scsw.stctl & SCSW_STCTL_SEC_STATUS)) ||
503 cdev->private->flags.doverify)
504 ret = -EBUSY;
505 else
506 /* 0x00D9C3C4 == ebcdic "RCD" */
507 ret = __ccw_device_retry_loop(cdev, rcd_ccw, 0x00D9C3C4, lpm);
508
509 /* Restore interrupt handler. */
510 cdev->handler = handler;
511 spin_unlock_irq(sch->lock);
512
513 /*
514 * on success we update the user input parms
515 */
516 if (ret) {
517 kfree (rcd_buf);
518 *buffer = NULL;
519 *length = 0;
520 } else {
521 *length = ciw->count;
522 *buffer = rcd_buf;
523 }
524 kfree(rcd_ccw);
525
526 return ret;
527}
528
529/*
530 * Read Configuration data
531 */
532int
533read_conf_data (struct ccw_device *cdev, void **buffer, int *length)
534{
535 return read_conf_data_lpm (cdev, buffer, length, 0);
536}
537
538/* 291/*
539 * Try to break the lock on a boxed device. 292 * Try to break the lock on a boxed device.
540 */ 293 */
@@ -649,8 +402,5 @@ EXPORT_SYMBOL(ccw_device_start_timeout_key);
649EXPORT_SYMBOL(ccw_device_start_key); 402EXPORT_SYMBOL(ccw_device_start_key);
650EXPORT_SYMBOL(ccw_device_get_ciw); 403EXPORT_SYMBOL(ccw_device_get_ciw);
651EXPORT_SYMBOL(ccw_device_get_path_mask); 404EXPORT_SYMBOL(ccw_device_get_path_mask);
652EXPORT_SYMBOL(read_conf_data);
653EXPORT_SYMBOL(read_dev_chars);
654EXPORT_SYMBOL(_ccw_device_get_subchannel_number); 405EXPORT_SYMBOL(_ccw_device_get_subchannel_number);
655EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc); 406EXPORT_SYMBOL_GPL(ccw_device_get_chp_desc);
656EXPORT_SYMBOL_GPL(read_conf_data_lpm);