diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 13:02:14 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-23 13:02:14 -0400 |
commit | 9fd815b55f31be48dbb3dd23922587d247a4e497 (patch) | |
tree | 63814130acf3e472cc660ae71208c146f16dc5d6 /drivers/s390 | |
parent | 31bbb9b58d1e8ebcf2b28c95c2250a9f8e31e397 (diff) | |
parent | ed87b27e00d2ca240f62f3903583a2f1541fb9ef (diff) |
Merge branch 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6
* 'for-linus' of git://git390.marist.edu/pub/scm/linux-2.6: (22 commits)
[S390] Update default configuration.
[S390] hibernate: Do real CPU swap at resume time
[S390] dasd: tolerate devices that have no feature codes
[S390] zcrypt: Do not add/remove devices in s/r callbacks
[S390] hibernate: make sure pfn_is_nosave handles lowcore pages
[S390] smp: introduce LC_ORDER and simplify lowcore handling
[S390] ptrace: use common code for simple peek/poke operations
[S390] fix disabled_wait inline assembly clobber list
[S390] Change kernel_page_present coding style.
[S390] hibernation: reset system after resume
[S390] hibernation: fix guest page hinting related crash
[S390] Get rid of init_module/delete_module compat functions.
[S390] Convert sys_execve to function with parameters.
[S390] Convert sys_clone to function with parameters.
[S390] qdio: change state of all primed input buffers
[S390] qdio: reduce per device debug messages
[S390] cio: introduce consistent subchannel scanning
[S390] cio: idset use actual number of ssids
[S390] cio: dont kfree vmalloced memory
[S390] cio: introduce css_settle
...
Diffstat (limited to 'drivers/s390')
-rw-r--r-- | drivers/s390/block/dasd_eckd.c | 13 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 252 | ||||
-rw-r--r-- | drivers/s390/cio/css.h | 3 | ||||
-rw-r--r-- | drivers/s390/cio/device.c | 38 | ||||
-rw-r--r-- | drivers/s390/cio/device.h | 1 | ||||
-rw-r--r-- | drivers/s390/cio/idset.c | 22 | ||||
-rw-r--r-- | drivers/s390/cio/idset.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/qdio_main.c | 32 | ||||
-rw-r--r-- | drivers/s390/crypto/ap_bus.c | 40 |
9 files changed, 219 insertions, 184 deletions
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c index bd9fe2e36dce..ab3521755588 100644 --- a/drivers/s390/block/dasd_eckd.c +++ b/drivers/s390/block/dasd_eckd.c | |||
@@ -935,6 +935,7 @@ static int dasd_eckd_read_features(struct dasd_device *device) | |||
935 | struct dasd_eckd_private *private; | 935 | struct dasd_eckd_private *private; |
936 | 936 | ||
937 | private = (struct dasd_eckd_private *) device->private; | 937 | private = (struct dasd_eckd_private *) device->private; |
938 | memset(&private->features, 0, sizeof(struct dasd_rssd_features)); | ||
938 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, | 939 | cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 /* PSF */ + 1 /* RSSD */, |
939 | (sizeof(struct dasd_psf_prssd_data) + | 940 | (sizeof(struct dasd_psf_prssd_data) + |
940 | sizeof(struct dasd_rssd_features)), | 941 | sizeof(struct dasd_rssd_features)), |
@@ -982,7 +983,9 @@ static int dasd_eckd_read_features(struct dasd_device *device) | |||
982 | features = (struct dasd_rssd_features *) (prssdp + 1); | 983 | features = (struct dasd_rssd_features *) (prssdp + 1); |
983 | memcpy(&private->features, features, | 984 | memcpy(&private->features, features, |
984 | sizeof(struct dasd_rssd_features)); | 985 | sizeof(struct dasd_rssd_features)); |
985 | } | 986 | } else |
987 | dev_warn(&device->cdev->dev, "Reading device feature codes" | ||
988 | " failed with rc=%d\n", rc); | ||
986 | dasd_sfree_request(cqr, cqr->memdev); | 989 | dasd_sfree_request(cqr, cqr->memdev); |
987 | return rc; | 990 | return rc; |
988 | } | 991 | } |
@@ -1144,9 +1147,7 @@ dasd_eckd_check_characteristics(struct dasd_device *device) | |||
1144 | } | 1147 | } |
1145 | 1148 | ||
1146 | /* Read Feature Codes */ | 1149 | /* Read Feature Codes */ |
1147 | rc = dasd_eckd_read_features(device); | 1150 | dasd_eckd_read_features(device); |
1148 | if (rc) | ||
1149 | goto out_err3; | ||
1150 | 1151 | ||
1151 | /* Read Device Characteristics */ | 1152 | /* Read Device Characteristics */ |
1152 | rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, | 1153 | rc = dasd_generic_read_dev_chars(device, DASD_ECKD_MAGIC, |
@@ -3241,9 +3242,7 @@ int dasd_eckd_restore_device(struct dasd_device *device) | |||
3241 | } | 3242 | } |
3242 | 3243 | ||
3243 | /* Read Feature Codes */ | 3244 | /* Read Feature Codes */ |
3244 | rc = dasd_eckd_read_features(device); | 3245 | dasd_eckd_read_features(device); |
3245 | if (rc) | ||
3246 | goto out_err; | ||
3247 | 3246 | ||
3248 | /* Read Device Characteristics */ | 3247 | /* Read Device Characteristics */ |
3249 | memset(&private->rdc_data, 0, sizeof(private->rdc_data)); | 3248 | memset(&private->rdc_data, 0, sizeof(private->rdc_data)); |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index 393c73c47f87..91c25706fa83 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -31,8 +31,7 @@ | |||
31 | #include "chp.h" | 31 | #include "chp.h" |
32 | 32 | ||
33 | int css_init_done = 0; | 33 | int css_init_done = 0; |
34 | static int need_reprobe = 0; | 34 | int max_ssid; |
35 | static int max_ssid = 0; | ||
36 | 35 | ||
37 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; | 36 | struct channel_subsystem *channel_subsystems[__MAX_CSSID + 1]; |
38 | 37 | ||
@@ -315,12 +314,18 @@ int css_probe_device(struct subchannel_id schid) | |||
315 | int ret; | 314 | int ret; |
316 | struct subchannel *sch; | 315 | struct subchannel *sch; |
317 | 316 | ||
318 | sch = css_alloc_subchannel(schid); | 317 | if (cio_is_console(schid)) |
319 | if (IS_ERR(sch)) | 318 | sch = cio_get_console_subchannel(); |
320 | return PTR_ERR(sch); | 319 | else { |
320 | sch = css_alloc_subchannel(schid); | ||
321 | if (IS_ERR(sch)) | ||
322 | return PTR_ERR(sch); | ||
323 | } | ||
321 | ret = css_register_subchannel(sch); | 324 | ret = css_register_subchannel(sch); |
322 | if (ret) | 325 | if (ret) { |
323 | put_device(&sch->dev); | 326 | if (!cio_is_console(schid)) |
327 | put_device(&sch->dev); | ||
328 | } | ||
324 | return ret; | 329 | return ret; |
325 | } | 330 | } |
326 | 331 | ||
@@ -409,10 +414,14 @@ static void css_evaluate_subchannel(struct subchannel_id schid, int slow) | |||
409 | 414 | ||
410 | static struct idset *slow_subchannel_set; | 415 | static struct idset *slow_subchannel_set; |
411 | static spinlock_t slow_subchannel_lock; | 416 | static spinlock_t slow_subchannel_lock; |
417 | static wait_queue_head_t css_eval_wq; | ||
418 | static atomic_t css_eval_scheduled; | ||
412 | 419 | ||
413 | static int __init slow_subchannel_init(void) | 420 | static int __init slow_subchannel_init(void) |
414 | { | 421 | { |
415 | spin_lock_init(&slow_subchannel_lock); | 422 | spin_lock_init(&slow_subchannel_lock); |
423 | atomic_set(&css_eval_scheduled, 0); | ||
424 | init_waitqueue_head(&css_eval_wq); | ||
416 | slow_subchannel_set = idset_sch_new(); | 425 | slow_subchannel_set = idset_sch_new(); |
417 | if (!slow_subchannel_set) { | 426 | if (!slow_subchannel_set) { |
418 | CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); | 427 | CIO_MSG_EVENT(0, "could not allocate slow subchannel set\n"); |
@@ -468,9 +477,17 @@ static int slow_eval_unknown_fn(struct subchannel_id schid, void *data) | |||
468 | 477 | ||
469 | static void css_slow_path_func(struct work_struct *unused) | 478 | static void css_slow_path_func(struct work_struct *unused) |
470 | { | 479 | { |
480 | unsigned long flags; | ||
481 | |||
471 | CIO_TRACE_EVENT(4, "slowpath"); | 482 | CIO_TRACE_EVENT(4, "slowpath"); |
472 | for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, | 483 | for_each_subchannel_staged(slow_eval_known_fn, slow_eval_unknown_fn, |
473 | NULL); | 484 | NULL); |
485 | spin_lock_irqsave(&slow_subchannel_lock, flags); | ||
486 | if (idset_is_empty(slow_subchannel_set)) { | ||
487 | atomic_set(&css_eval_scheduled, 0); | ||
488 | wake_up(&css_eval_wq); | ||
489 | } | ||
490 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
474 | } | 491 | } |
475 | 492 | ||
476 | static DECLARE_WORK(slow_path_work, css_slow_path_func); | 493 | static DECLARE_WORK(slow_path_work, css_slow_path_func); |
@@ -482,6 +499,7 @@ void css_schedule_eval(struct subchannel_id schid) | |||
482 | 499 | ||
483 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 500 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
484 | idset_sch_add(slow_subchannel_set, schid); | 501 | idset_sch_add(slow_subchannel_set, schid); |
502 | atomic_set(&css_eval_scheduled, 1); | ||
485 | queue_work(slow_path_wq, &slow_path_work); | 503 | queue_work(slow_path_wq, &slow_path_work); |
486 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 504 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
487 | } | 505 | } |
@@ -492,80 +510,53 @@ void css_schedule_eval_all(void) | |||
492 | 510 | ||
493 | spin_lock_irqsave(&slow_subchannel_lock, flags); | 511 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
494 | idset_fill(slow_subchannel_set); | 512 | idset_fill(slow_subchannel_set); |
513 | atomic_set(&css_eval_scheduled, 1); | ||
495 | queue_work(slow_path_wq, &slow_path_work); | 514 | queue_work(slow_path_wq, &slow_path_work); |
496 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | 515 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); |
497 | } | 516 | } |
498 | 517 | ||
499 | void css_wait_for_slow_path(void) | 518 | static int __unset_registered(struct device *dev, void *data) |
500 | { | 519 | { |
501 | flush_workqueue(slow_path_wq); | 520 | struct idset *set = data; |
502 | } | 521 | struct subchannel *sch = to_subchannel(dev); |
503 | |||
504 | /* Reprobe subchannel if unregistered. */ | ||
505 | static int reprobe_subchannel(struct subchannel_id schid, void *data) | ||
506 | { | ||
507 | int ret; | ||
508 | |||
509 | CIO_MSG_EVENT(6, "cio: reprobe 0.%x.%04x\n", | ||
510 | schid.ssid, schid.sch_no); | ||
511 | if (need_reprobe) | ||
512 | return -EAGAIN; | ||
513 | |||
514 | ret = css_probe_device(schid); | ||
515 | switch (ret) { | ||
516 | case 0: | ||
517 | break; | ||
518 | case -ENXIO: | ||
519 | case -ENOMEM: | ||
520 | case -EIO: | ||
521 | /* These should abort looping */ | ||
522 | break; | ||
523 | default: | ||
524 | ret = 0; | ||
525 | } | ||
526 | |||
527 | return ret; | ||
528 | } | ||
529 | 522 | ||
530 | static void reprobe_after_idle(struct work_struct *unused) | 523 | idset_sch_del(set, sch->schid); |
531 | { | 524 | return 0; |
532 | /* Make sure initial subchannel scan is done. */ | ||
533 | wait_event(ccw_device_init_wq, | ||
534 | atomic_read(&ccw_device_init_count) == 0); | ||
535 | if (need_reprobe) | ||
536 | css_schedule_reprobe(); | ||
537 | } | 525 | } |
538 | 526 | ||
539 | static DECLARE_WORK(reprobe_idle_work, reprobe_after_idle); | 527 | void css_schedule_eval_all_unreg(void) |
540 | |||
541 | /* Work function used to reprobe all unregistered subchannels. */ | ||
542 | static void reprobe_all(struct work_struct *unused) | ||
543 | { | 528 | { |
544 | int ret; | 529 | unsigned long flags; |
545 | 530 | struct idset *unreg_set; | |
546 | CIO_MSG_EVENT(4, "reprobe start\n"); | ||
547 | 531 | ||
548 | /* Make sure initial subchannel scan is done. */ | 532 | /* Find unregistered subchannels. */ |
549 | if (atomic_read(&ccw_device_init_count) != 0) { | 533 | unreg_set = idset_sch_new(); |
550 | queue_work(ccw_device_work, &reprobe_idle_work); | 534 | if (!unreg_set) { |
535 | /* Fallback. */ | ||
536 | css_schedule_eval_all(); | ||
551 | return; | 537 | return; |
552 | } | 538 | } |
553 | need_reprobe = 0; | 539 | idset_fill(unreg_set); |
554 | ret = for_each_subchannel_staged(NULL, reprobe_subchannel, NULL); | 540 | bus_for_each_dev(&css_bus_type, NULL, unreg_set, __unset_registered); |
555 | 541 | /* Apply to slow_subchannel_set. */ | |
556 | CIO_MSG_EVENT(4, "reprobe done (rc=%d, need_reprobe=%d)\n", ret, | 542 | spin_lock_irqsave(&slow_subchannel_lock, flags); |
557 | need_reprobe); | 543 | idset_add_set(slow_subchannel_set, unreg_set); |
544 | atomic_set(&css_eval_scheduled, 1); | ||
545 | queue_work(slow_path_wq, &slow_path_work); | ||
546 | spin_unlock_irqrestore(&slow_subchannel_lock, flags); | ||
547 | idset_free(unreg_set); | ||
558 | } | 548 | } |
559 | 549 | ||
560 | static DECLARE_WORK(css_reprobe_work, reprobe_all); | 550 | void css_wait_for_slow_path(void) |
551 | { | ||
552 | flush_workqueue(slow_path_wq); | ||
553 | } | ||
561 | 554 | ||
562 | /* Schedule reprobing of all unregistered subchannels. */ | 555 | /* Schedule reprobing of all unregistered subchannels. */ |
563 | void css_schedule_reprobe(void) | 556 | void css_schedule_reprobe(void) |
564 | { | 557 | { |
565 | need_reprobe = 1; | 558 | css_schedule_eval_all_unreg(); |
566 | queue_work(slow_path_wq, &css_reprobe_work); | ||
567 | } | 559 | } |
568 | |||
569 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); | 560 | EXPORT_SYMBOL_GPL(css_schedule_reprobe); |
570 | 561 | ||
571 | /* | 562 | /* |
@@ -601,49 +592,6 @@ static void css_process_crw(struct crw *crw0, struct crw *crw1, int overflow) | |||
601 | css_evaluate_subchannel(mchk_schid, 0); | 592 | css_evaluate_subchannel(mchk_schid, 0); |
602 | } | 593 | } |
603 | 594 | ||
604 | static int __init | ||
605 | __init_channel_subsystem(struct subchannel_id schid, void *data) | ||
606 | { | ||
607 | struct subchannel *sch; | ||
608 | int ret; | ||
609 | |||
610 | if (cio_is_console(schid)) | ||
611 | sch = cio_get_console_subchannel(); | ||
612 | else { | ||
613 | sch = css_alloc_subchannel(schid); | ||
614 | if (IS_ERR(sch)) | ||
615 | ret = PTR_ERR(sch); | ||
616 | else | ||
617 | ret = 0; | ||
618 | switch (ret) { | ||
619 | case 0: | ||
620 | break; | ||
621 | case -ENOMEM: | ||
622 | panic("Out of memory in init_channel_subsystem\n"); | ||
623 | /* -ENXIO: no more subchannels. */ | ||
624 | case -ENXIO: | ||
625 | return ret; | ||
626 | /* -EIO: this subchannel set not supported. */ | ||
627 | case -EIO: | ||
628 | return ret; | ||
629 | default: | ||
630 | return 0; | ||
631 | } | ||
632 | } | ||
633 | /* | ||
634 | * We register ALL valid subchannels in ioinfo, even those | ||
635 | * that have been present before init_channel_subsystem. | ||
636 | * These subchannels can't have been registered yet (kmalloc | ||
637 | * not working) so we do it now. This is true e.g. for the | ||
638 | * console subchannel. | ||
639 | */ | ||
640 | if (css_register_subchannel(sch)) { | ||
641 | if (!cio_is_console(schid)) | ||
642 | put_device(&sch->dev); | ||
643 | } | ||
644 | return 0; | ||
645 | } | ||
646 | |||
647 | static void __init | 595 | static void __init |
648 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) | 596 | css_generate_pgid(struct channel_subsystem *css, u32 tod_high) |
649 | { | 597 | { |
@@ -854,19 +802,30 @@ static struct notifier_block css_power_notifier = { | |||
854 | * The struct subchannel's are created during probing (except for the | 802 | * The struct subchannel's are created during probing (except for the |
855 | * static console subchannel). | 803 | * static console subchannel). |
856 | */ | 804 | */ |
857 | static int __init | 805 | static int __init css_bus_init(void) |
858 | init_channel_subsystem (void) | ||
859 | { | 806 | { |
860 | int ret, i; | 807 | int ret, i; |
861 | 808 | ||
862 | ret = chsc_determine_css_characteristics(); | 809 | ret = chsc_determine_css_characteristics(); |
863 | if (ret == -ENOMEM) | 810 | if (ret == -ENOMEM) |
864 | goto out; /* No need to continue. */ | 811 | goto out; |
865 | 812 | ||
866 | ret = chsc_alloc_sei_area(); | 813 | ret = chsc_alloc_sei_area(); |
867 | if (ret) | 814 | if (ret) |
868 | goto out; | 815 | goto out; |
869 | 816 | ||
817 | /* Try to enable MSS. */ | ||
818 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | ||
819 | switch (ret) { | ||
820 | case 0: /* Success. */ | ||
821 | max_ssid = __MAX_SSID; | ||
822 | break; | ||
823 | case -ENOMEM: | ||
824 | goto out; | ||
825 | default: | ||
826 | max_ssid = 0; | ||
827 | } | ||
828 | |||
870 | ret = slow_subchannel_init(); | 829 | ret = slow_subchannel_init(); |
871 | if (ret) | 830 | if (ret) |
872 | goto out; | 831 | goto out; |
@@ -878,17 +837,6 @@ init_channel_subsystem (void) | |||
878 | if ((ret = bus_register(&css_bus_type))) | 837 | if ((ret = bus_register(&css_bus_type))) |
879 | goto out; | 838 | goto out; |
880 | 839 | ||
881 | /* Try to enable MSS. */ | ||
882 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | ||
883 | switch (ret) { | ||
884 | case 0: /* Success. */ | ||
885 | max_ssid = __MAX_SSID; | ||
886 | break; | ||
887 | case -ENOMEM: | ||
888 | goto out_bus; | ||
889 | default: | ||
890 | max_ssid = 0; | ||
891 | } | ||
892 | /* Setup css structure. */ | 840 | /* Setup css structure. */ |
893 | for (i = 0; i <= __MAX_CSSID; i++) { | 841 | for (i = 0; i <= __MAX_CSSID; i++) { |
894 | struct channel_subsystem *css; | 842 | struct channel_subsystem *css; |
@@ -934,7 +882,6 @@ init_channel_subsystem (void) | |||
934 | /* Enable default isc for I/O subchannels. */ | 882 | /* Enable default isc for I/O subchannels. */ |
935 | isc_register(IO_SCH_ISC); | 883 | isc_register(IO_SCH_ISC); |
936 | 884 | ||
937 | for_each_subchannel(__init_channel_subsystem, NULL); | ||
938 | return 0; | 885 | return 0; |
939 | out_file: | 886 | out_file: |
940 | if (css_chsc_characteristics.secm) | 887 | if (css_chsc_characteristics.secm) |
@@ -955,17 +902,76 @@ out_unregister: | |||
955 | &dev_attr_cm_enable); | 902 | &dev_attr_cm_enable); |
956 | device_unregister(&css->device); | 903 | device_unregister(&css->device); |
957 | } | 904 | } |
958 | out_bus: | ||
959 | bus_unregister(&css_bus_type); | 905 | bus_unregister(&css_bus_type); |
960 | out: | 906 | out: |
961 | crw_unregister_handler(CRW_RSC_CSS); | 907 | crw_unregister_handler(CRW_RSC_CSS); |
962 | chsc_free_sei_area(); | 908 | chsc_free_sei_area(); |
963 | kfree(slow_subchannel_set); | 909 | idset_free(slow_subchannel_set); |
964 | pr_alert("The CSS device driver initialization failed with " | 910 | pr_alert("The CSS device driver initialization failed with " |
965 | "errno=%d\n", ret); | 911 | "errno=%d\n", ret); |
966 | return ret; | 912 | return ret; |
967 | } | 913 | } |
968 | 914 | ||
915 | static void __init css_bus_cleanup(void) | ||
916 | { | ||
917 | struct channel_subsystem *css; | ||
918 | int i; | ||
919 | |||
920 | for (i = 0; i <= __MAX_CSSID; i++) { | ||
921 | css = channel_subsystems[i]; | ||
922 | device_unregister(&css->pseudo_subchannel->dev); | ||
923 | css->pseudo_subchannel = NULL; | ||
924 | if (css_chsc_characteristics.secm) | ||
925 | device_remove_file(&css->device, &dev_attr_cm_enable); | ||
926 | device_unregister(&css->device); | ||
927 | } | ||
928 | bus_unregister(&css_bus_type); | ||
929 | crw_unregister_handler(CRW_RSC_CSS); | ||
930 | chsc_free_sei_area(); | ||
931 | idset_free(slow_subchannel_set); | ||
932 | isc_unregister(IO_SCH_ISC); | ||
933 | } | ||
934 | |||
935 | static int __init channel_subsystem_init(void) | ||
936 | { | ||
937 | int ret; | ||
938 | |||
939 | ret = css_bus_init(); | ||
940 | if (ret) | ||
941 | return ret; | ||
942 | |||
943 | ret = io_subchannel_init(); | ||
944 | if (ret) | ||
945 | css_bus_cleanup(); | ||
946 | |||
947 | return ret; | ||
948 | } | ||
949 | subsys_initcall(channel_subsystem_init); | ||
950 | |||
951 | static int css_settle(struct device_driver *drv, void *unused) | ||
952 | { | ||
953 | struct css_driver *cssdrv = to_cssdriver(drv); | ||
954 | |||
955 | if (cssdrv->settle) | ||
956 | cssdrv->settle(); | ||
957 | return 0; | ||
958 | } | ||
959 | |||
960 | /* | ||
961 | * Wait for the initialization of devices to finish, to make sure we are | ||
962 | * done with our setup if the search for the root device starts. | ||
963 | */ | ||
964 | static int __init channel_subsystem_init_sync(void) | ||
965 | { | ||
966 | /* Start initial subchannel evaluation. */ | ||
967 | css_schedule_eval_all(); | ||
968 | /* Wait for the evaluation of subchannels to finish. */ | ||
969 | wait_event(css_eval_wq, atomic_read(&css_eval_scheduled) == 0); | ||
970 | /* Wait for the subchannel type specific initialization to finish */ | ||
971 | return bus_for_each_drv(&css_bus_type, NULL, NULL, css_settle); | ||
972 | } | ||
973 | subsys_initcall_sync(channel_subsystem_init_sync); | ||
974 | |||
969 | int sch_is_pseudo_sch(struct subchannel *sch) | 975 | int sch_is_pseudo_sch(struct subchannel *sch) |
970 | { | 976 | { |
971 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; | 977 | return sch == to_css(sch->dev.parent)->pseudo_subchannel; |
@@ -1135,7 +1141,5 @@ void css_driver_unregister(struct css_driver *cdrv) | |||
1135 | } | 1141 | } |
1136 | EXPORT_SYMBOL_GPL(css_driver_unregister); | 1142 | EXPORT_SYMBOL_GPL(css_driver_unregister); |
1137 | 1143 | ||
1138 | subsys_initcall(init_channel_subsystem); | ||
1139 | |||
1140 | MODULE_LICENSE("GPL"); | 1144 | MODULE_LICENSE("GPL"); |
1141 | EXPORT_SYMBOL(css_bus_type); | 1145 | EXPORT_SYMBOL(css_bus_type); |
diff --git a/drivers/s390/cio/css.h b/drivers/s390/cio/css.h index 9763eeec7458..68d6b0bf151c 100644 --- a/drivers/s390/cio/css.h +++ b/drivers/s390/cio/css.h | |||
@@ -75,6 +75,7 @@ struct chp_link; | |||
75 | * @freeze: callback for freezing during hibernation snapshotting | 75 | * @freeze: callback for freezing during hibernation snapshotting |
76 | * @thaw: undo work done in @freeze | 76 | * @thaw: undo work done in @freeze |
77 | * @restore: callback for restoring after hibernation | 77 | * @restore: callback for restoring after hibernation |
78 | * @settle: wait for asynchronous work to finish | ||
78 | * @name: name of the device driver | 79 | * @name: name of the device driver |
79 | */ | 80 | */ |
80 | struct css_driver { | 81 | struct css_driver { |
@@ -92,6 +93,7 @@ struct css_driver { | |||
92 | int (*freeze)(struct subchannel *); | 93 | int (*freeze)(struct subchannel *); |
93 | int (*thaw) (struct subchannel *); | 94 | int (*thaw) (struct subchannel *); |
94 | int (*restore)(struct subchannel *); | 95 | int (*restore)(struct subchannel *); |
96 | void (*settle)(void); | ||
95 | const char *name; | 97 | const char *name; |
96 | }; | 98 | }; |
97 | 99 | ||
@@ -109,6 +111,7 @@ extern void css_sch_device_unregister(struct subchannel *); | |||
109 | extern int css_probe_device(struct subchannel_id); | 111 | extern int css_probe_device(struct subchannel_id); |
110 | extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); | 112 | extern struct subchannel *get_subchannel_by_schid(struct subchannel_id); |
111 | extern int css_init_done; | 113 | extern int css_init_done; |
114 | extern int max_ssid; | ||
112 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), | 115 | int for_each_subchannel_staged(int (*fn_known)(struct subchannel *, void *), |
113 | int (*fn_unknown)(struct subchannel_id, | 116 | int (*fn_unknown)(struct subchannel_id, |
114 | void *), void *data); | 117 | void *), void *data); |
diff --git a/drivers/s390/cio/device.c b/drivers/s390/cio/device.c index 6527f3f34493..f780bdd3a04e 100644 --- a/drivers/s390/cio/device.c +++ b/drivers/s390/cio/device.c | |||
@@ -131,6 +131,10 @@ static void io_subchannel_shutdown(struct subchannel *); | |||
131 | static int io_subchannel_sch_event(struct subchannel *, int); | 131 | static int io_subchannel_sch_event(struct subchannel *, int); |
132 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, | 132 | static int io_subchannel_chp_event(struct subchannel *, struct chp_link *, |
133 | int); | 133 | int); |
134 | static void recovery_func(unsigned long data); | ||
135 | struct workqueue_struct *ccw_device_work; | ||
136 | wait_queue_head_t ccw_device_init_wq; | ||
137 | atomic_t ccw_device_init_count; | ||
134 | 138 | ||
135 | static struct css_device_id io_subchannel_ids[] = { | 139 | static struct css_device_id io_subchannel_ids[] = { |
136 | { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, | 140 | { .match_flags = 0x1, .type = SUBCHANNEL_TYPE_IO, }, |
@@ -151,6 +155,13 @@ static int io_subchannel_prepare(struct subchannel *sch) | |||
151 | return 0; | 155 | return 0; |
152 | } | 156 | } |
153 | 157 | ||
158 | static void io_subchannel_settle(void) | ||
159 | { | ||
160 | wait_event(ccw_device_init_wq, | ||
161 | atomic_read(&ccw_device_init_count) == 0); | ||
162 | flush_workqueue(ccw_device_work); | ||
163 | } | ||
164 | |||
154 | static struct css_driver io_subchannel_driver = { | 165 | static struct css_driver io_subchannel_driver = { |
155 | .owner = THIS_MODULE, | 166 | .owner = THIS_MODULE, |
156 | .subchannel_type = io_subchannel_ids, | 167 | .subchannel_type = io_subchannel_ids, |
@@ -162,16 +173,10 @@ static struct css_driver io_subchannel_driver = { | |||
162 | .remove = io_subchannel_remove, | 173 | .remove = io_subchannel_remove, |
163 | .shutdown = io_subchannel_shutdown, | 174 | .shutdown = io_subchannel_shutdown, |
164 | .prepare = io_subchannel_prepare, | 175 | .prepare = io_subchannel_prepare, |
176 | .settle = io_subchannel_settle, | ||
165 | }; | 177 | }; |
166 | 178 | ||
167 | struct workqueue_struct *ccw_device_work; | 179 | int __init io_subchannel_init(void) |
168 | wait_queue_head_t ccw_device_init_wq; | ||
169 | atomic_t ccw_device_init_count; | ||
170 | |||
171 | static void recovery_func(unsigned long data); | ||
172 | |||
173 | static int __init | ||
174 | init_ccw_bus_type (void) | ||
175 | { | 180 | { |
176 | int ret; | 181 | int ret; |
177 | 182 | ||
@@ -181,10 +186,10 @@ init_ccw_bus_type (void) | |||
181 | 186 | ||
182 | ccw_device_work = create_singlethread_workqueue("cio"); | 187 | ccw_device_work = create_singlethread_workqueue("cio"); |
183 | if (!ccw_device_work) | 188 | if (!ccw_device_work) |
184 | return -ENOMEM; /* FIXME: better errno ? */ | 189 | return -ENOMEM; |
185 | slow_path_wq = create_singlethread_workqueue("kslowcrw"); | 190 | slow_path_wq = create_singlethread_workqueue("kslowcrw"); |
186 | if (!slow_path_wq) { | 191 | if (!slow_path_wq) { |
187 | ret = -ENOMEM; /* FIXME: better errno ? */ | 192 | ret = -ENOMEM; |
188 | goto out_err; | 193 | goto out_err; |
189 | } | 194 | } |
190 | if ((ret = bus_register (&ccw_bus_type))) | 195 | if ((ret = bus_register (&ccw_bus_type))) |
@@ -194,9 +199,6 @@ init_ccw_bus_type (void) | |||
194 | if (ret) | 199 | if (ret) |
195 | goto out_err; | 200 | goto out_err; |
196 | 201 | ||
197 | wait_event(ccw_device_init_wq, | ||
198 | atomic_read(&ccw_device_init_count) == 0); | ||
199 | flush_workqueue(ccw_device_work); | ||
200 | return 0; | 202 | return 0; |
201 | out_err: | 203 | out_err: |
202 | if (ccw_device_work) | 204 | if (ccw_device_work) |
@@ -206,16 +208,6 @@ out_err: | |||
206 | return ret; | 208 | return ret; |
207 | } | 209 | } |
208 | 210 | ||
209 | static void __exit | ||
210 | cleanup_ccw_bus_type (void) | ||
211 | { | ||
212 | css_driver_unregister(&io_subchannel_driver); | ||
213 | bus_unregister(&ccw_bus_type); | ||
214 | destroy_workqueue(ccw_device_work); | ||
215 | } | ||
216 | |||
217 | subsys_initcall(init_ccw_bus_type); | ||
218 | module_exit(cleanup_ccw_bus_type); | ||
219 | 211 | ||
220 | /************************ device handling **************************/ | 212 | /************************ device handling **************************/ |
221 | 213 | ||
diff --git a/drivers/s390/cio/device.h b/drivers/s390/cio/device.h index e3975107a578..ed39a2caaf47 100644 --- a/drivers/s390/cio/device.h +++ b/drivers/s390/cio/device.h | |||
@@ -74,6 +74,7 @@ dev_fsm_final_state(struct ccw_device *cdev) | |||
74 | extern struct workqueue_struct *ccw_device_work; | 74 | extern struct workqueue_struct *ccw_device_work; |
75 | extern wait_queue_head_t ccw_device_init_wq; | 75 | extern wait_queue_head_t ccw_device_init_wq; |
76 | extern atomic_t ccw_device_init_count; | 76 | extern atomic_t ccw_device_init_count; |
77 | int __init io_subchannel_init(void); | ||
77 | 78 | ||
78 | void io_subchannel_recog_done(struct ccw_device *cdev); | 79 | void io_subchannel_recog_done(struct ccw_device *cdev); |
79 | void io_subchannel_init_config(struct subchannel *sch); | 80 | void io_subchannel_init_config(struct subchannel *sch); |
diff --git a/drivers/s390/cio/idset.c b/drivers/s390/cio/idset.c index cf8f24a4b5eb..4d10981c7cc1 100644 --- a/drivers/s390/cio/idset.c +++ b/drivers/s390/cio/idset.c | |||
@@ -78,7 +78,7 @@ static inline int idset_get_first(struct idset *set, int *ssid, int *id) | |||
78 | 78 | ||
79 | struct idset *idset_sch_new(void) | 79 | struct idset *idset_sch_new(void) |
80 | { | 80 | { |
81 | return idset_new(__MAX_SSID + 1, __MAX_SUBCHANNEL + 1); | 81 | return idset_new(max_ssid + 1, __MAX_SUBCHANNEL + 1); |
82 | } | 82 | } |
83 | 83 | ||
84 | void idset_sch_add(struct idset *set, struct subchannel_id schid) | 84 | void idset_sch_add(struct idset *set, struct subchannel_id schid) |
@@ -110,3 +110,23 @@ int idset_sch_get_first(struct idset *set, struct subchannel_id *schid) | |||
110 | } | 110 | } |
111 | return rc; | 111 | return rc; |
112 | } | 112 | } |
113 | |||
114 | int idset_is_empty(struct idset *set) | ||
115 | { | ||
116 | int bitnum; | ||
117 | |||
118 | bitnum = find_first_bit(set->bitmap, set->num_ssid * set->num_id); | ||
119 | if (bitnum >= set->num_ssid * set->num_id) | ||
120 | return 1; | ||
121 | return 0; | ||
122 | } | ||
123 | |||
124 | void idset_add_set(struct idset *to, struct idset *from) | ||
125 | { | ||
126 | unsigned long i, len; | ||
127 | |||
128 | len = min(__BITOPS_WORDS(to->num_ssid * to->num_id), | ||
129 | __BITOPS_WORDS(from->num_ssid * from->num_id)); | ||
130 | for (i = 0; i < len ; i++) | ||
131 | to->bitmap[i] |= from->bitmap[i]; | ||
132 | } | ||
diff --git a/drivers/s390/cio/idset.h b/drivers/s390/cio/idset.h index 528065cb5021..7543da4529f9 100644 --- a/drivers/s390/cio/idset.h +++ b/drivers/s390/cio/idset.h | |||
@@ -21,5 +21,7 @@ void idset_sch_add(struct idset *set, struct subchannel_id id); | |||
21 | void idset_sch_del(struct idset *set, struct subchannel_id id); | 21 | void idset_sch_del(struct idset *set, struct subchannel_id id); |
22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); | 22 | int idset_sch_contains(struct idset *set, struct subchannel_id id); |
23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); | 23 | int idset_sch_get_first(struct idset *set, struct subchannel_id *id); |
24 | int idset_is_empty(struct idset *set); | ||
25 | void idset_add_set(struct idset *to, struct idset *from); | ||
24 | 26 | ||
25 | #endif /* S390_IDSET_H */ | 27 | #endif /* S390_IDSET_H */ |
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c index 9aef402a5f1b..4be6e84b9599 100644 --- a/drivers/s390/cio/qdio_main.c +++ b/drivers/s390/cio/qdio_main.c | |||
@@ -401,7 +401,7 @@ static void announce_buffer_error(struct qdio_q *q, int count) | |||
401 | if ((!q->is_input_q && | 401 | if ((!q->is_input_q && |
402 | (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { | 402 | (q->sbal[q->first_to_check]->element[15].flags & 0xff) == 0x10)) { |
403 | qdio_perf_stat_inc(&perf_stats.outbound_target_full); | 403 | qdio_perf_stat_inc(&perf_stats.outbound_target_full); |
404 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%3d", | 404 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "OUTFULL FTC:%02x", |
405 | q->first_to_check); | 405 | q->first_to_check); |
406 | return; | 406 | return; |
407 | } | 407 | } |
@@ -418,7 +418,7 @@ static inline void inbound_primed(struct qdio_q *q, int count) | |||
418 | { | 418 | { |
419 | int new; | 419 | int new; |
420 | 420 | ||
421 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %3d", count); | 421 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in prim: %02x", count); |
422 | 422 | ||
423 | /* for QEBSM the ACK was already set by EQBS */ | 423 | /* for QEBSM the ACK was already set by EQBS */ |
424 | if (is_qebsm(q)) { | 424 | if (is_qebsm(q)) { |
@@ -455,6 +455,8 @@ static inline void inbound_primed(struct qdio_q *q, int count) | |||
455 | count--; | 455 | count--; |
456 | if (!count) | 456 | if (!count) |
457 | return; | 457 | return; |
458 | /* need to change ALL buffers to get more interrupts */ | ||
459 | set_buf_states(q, q->first_to_check, SLSB_P_INPUT_NOT_INIT, count); | ||
458 | } | 460 | } |
459 | 461 | ||
460 | static int get_inbound_buffer_frontier(struct qdio_q *q) | 462 | static int get_inbound_buffer_frontier(struct qdio_q *q) |
@@ -545,7 +547,7 @@ static inline int qdio_inbound_q_done(struct qdio_q *q) | |||
545 | * has (probably) not moved (see qdio_inbound_processing). | 547 | * has (probably) not moved (see qdio_inbound_processing). |
546 | */ | 548 | */ |
547 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { | 549 | if (get_usecs() > q->u.in.timestamp + QDIO_INPUT_THRESHOLD) { |
548 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%3d", | 550 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "in done:%02x", |
549 | q->first_to_check); | 551 | q->first_to_check); |
550 | return 1; | 552 | return 1; |
551 | } else | 553 | } else |
@@ -565,11 +567,10 @@ static void qdio_kick_handler(struct qdio_q *q) | |||
565 | 567 | ||
566 | if (q->is_input_q) { | 568 | if (q->is_input_q) { |
567 | qdio_perf_stat_inc(&perf_stats.inbound_handler); | 569 | qdio_perf_stat_inc(&perf_stats.inbound_handler); |
568 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%3d c:%3d", start, count); | 570 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "kih s:%02x c:%02x", start, count); |
569 | } else { | 571 | } else |
570 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: nr:%1d", q->nr); | 572 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "koh: s:%02x c:%02x", |
571 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "s:%3d c:%3d", start, count); | 573 | start, count); |
572 | } | ||
573 | 574 | ||
574 | q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, | 575 | q->handler(q->irq_ptr->cdev, q->qdio_error, q->nr, start, count, |
575 | q->irq_ptr->int_parm); | 576 | q->irq_ptr->int_parm); |
@@ -633,7 +634,7 @@ static int get_outbound_buffer_frontier(struct qdio_q *q) | |||
633 | switch (state) { | 634 | switch (state) { |
634 | case SLSB_P_OUTPUT_EMPTY: | 635 | case SLSB_P_OUTPUT_EMPTY: |
635 | /* the adapter got it */ | 636 | /* the adapter got it */ |
636 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %3d", q->nr, count); | 637 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "out empty:%1d %02x", q->nr, count); |
637 | 638 | ||
638 | atomic_sub(count, &q->nr_buf_used); | 639 | atomic_sub(count, &q->nr_buf_used); |
639 | q->first_to_check = add_buf(q->first_to_check, count); | 640 | q->first_to_check = add_buf(q->first_to_check, count); |
@@ -1481,10 +1482,9 @@ static int handle_outbound(struct qdio_q *q, unsigned int callflags, | |||
1481 | get_buf_state(q, prev_buf(bufnr), &state, 0); | 1482 | get_buf_state(q, prev_buf(bufnr), &state, 0); |
1482 | if (state != SLSB_CU_OUTPUT_PRIMED) | 1483 | if (state != SLSB_CU_OUTPUT_PRIMED) |
1483 | rc = qdio_kick_outbound_q(q); | 1484 | rc = qdio_kick_outbound_q(q); |
1484 | else { | 1485 | else |
1485 | DBF_DEV_EVENT(DBF_INFO, q->irq_ptr, "fast-req"); | ||
1486 | qdio_perf_stat_inc(&perf_stats.fast_requeue); | 1486 | qdio_perf_stat_inc(&perf_stats.fast_requeue); |
1487 | } | 1487 | |
1488 | out: | 1488 | out: |
1489 | tasklet_schedule(&q->tasklet); | 1489 | tasklet_schedule(&q->tasklet); |
1490 | return rc; | 1490 | return rc; |
@@ -1510,12 +1510,8 @@ int do_QDIO(struct ccw_device *cdev, unsigned int callflags, | |||
1510 | if (!irq_ptr) | 1510 | if (!irq_ptr) |
1511 | return -ENODEV; | 1511 | return -ENODEV; |
1512 | 1512 | ||
1513 | if (callflags & QDIO_FLAG_SYNC_INPUT) | 1513 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, |
1514 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO input"); | 1514 | "do%02x b:%02x c:%02x", callflags, bufnr, count); |
1515 | else | ||
1516 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "doQDIO output"); | ||
1517 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "q:%1d flag:%4x", q_nr, callflags); | ||
1518 | DBF_DEV_EVENT(DBF_INFO, irq_ptr, "buf:%2d cnt:%3d", bufnr, count); | ||
1519 | 1515 | ||
1520 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) | 1516 | if (irq_ptr->state != QDIO_IRQ_STATE_ACTIVE) |
1521 | return -EBUSY; | 1517 | return -EBUSY; |
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c index 090b32a339c6..1294876bf7b4 100644 --- a/drivers/s390/crypto/ap_bus.c +++ b/drivers/s390/crypto/ap_bus.c | |||
@@ -60,6 +60,7 @@ static int ap_device_probe(struct device *dev); | |||
60 | static void ap_interrupt_handler(void *unused1, void *unused2); | 60 | static void ap_interrupt_handler(void *unused1, void *unused2); |
61 | static void ap_reset(struct ap_device *ap_dev); | 61 | static void ap_reset(struct ap_device *ap_dev); |
62 | static void ap_config_timeout(unsigned long ptr); | 62 | static void ap_config_timeout(unsigned long ptr); |
63 | static int ap_select_domain(void); | ||
63 | 64 | ||
64 | /* | 65 | /* |
65 | * Module description. | 66 | * Module description. |
@@ -109,6 +110,10 @@ static unsigned long long poll_timeout = 250000; | |||
109 | 110 | ||
110 | /* Suspend flag */ | 111 | /* Suspend flag */ |
111 | static int ap_suspend_flag; | 112 | static int ap_suspend_flag; |
113 | /* Flag to check if domain was set through module parameter domain=. This is | ||
114 | * important when supsend and resume is done in a z/VM environment where the | ||
115 | * domain might change. */ | ||
116 | static int user_set_domain = 0; | ||
112 | static struct bus_type ap_bus_type; | 117 | static struct bus_type ap_bus_type; |
113 | 118 | ||
114 | /** | 119 | /** |
@@ -643,6 +648,7 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state) | |||
643 | destroy_workqueue(ap_work_queue); | 648 | destroy_workqueue(ap_work_queue); |
644 | ap_work_queue = NULL; | 649 | ap_work_queue = NULL; |
645 | } | 650 | } |
651 | |||
646 | tasklet_disable(&ap_tasklet); | 652 | tasklet_disable(&ap_tasklet); |
647 | } | 653 | } |
648 | /* Poll on the device until all requests are finished. */ | 654 | /* Poll on the device until all requests are finished. */ |
@@ -653,7 +659,10 @@ static int ap_bus_suspend(struct device *dev, pm_message_t state) | |||
653 | spin_unlock_bh(&ap_dev->lock); | 659 | spin_unlock_bh(&ap_dev->lock); |
654 | } while ((flags & 1) || (flags & 2)); | 660 | } while ((flags & 1) || (flags & 2)); |
655 | 661 | ||
656 | ap_device_remove(dev); | 662 | spin_lock_bh(&ap_dev->lock); |
663 | ap_dev->unregistered = 1; | ||
664 | spin_unlock_bh(&ap_dev->lock); | ||
665 | |||
657 | return 0; | 666 | return 0; |
658 | } | 667 | } |
659 | 668 | ||
@@ -666,11 +675,10 @@ static int ap_bus_resume(struct device *dev) | |||
666 | ap_suspend_flag = 0; | 675 | ap_suspend_flag = 0; |
667 | if (!ap_interrupts_available()) | 676 | if (!ap_interrupts_available()) |
668 | ap_interrupt_indicator = NULL; | 677 | ap_interrupt_indicator = NULL; |
669 | ap_device_probe(dev); | 678 | if (!user_set_domain) { |
670 | ap_reset(ap_dev); | 679 | ap_domain_index = -1; |
671 | setup_timer(&ap_dev->timeout, ap_request_timeout, | 680 | ap_select_domain(); |
672 | (unsigned long) ap_dev); | 681 | } |
673 | ap_scan_bus(NULL); | ||
674 | init_timer(&ap_config_timer); | 682 | init_timer(&ap_config_timer); |
675 | ap_config_timer.function = ap_config_timeout; | 683 | ap_config_timer.function = ap_config_timeout; |
676 | ap_config_timer.data = 0; | 684 | ap_config_timer.data = 0; |
@@ -686,12 +694,14 @@ static int ap_bus_resume(struct device *dev) | |||
686 | tasklet_schedule(&ap_tasklet); | 694 | tasklet_schedule(&ap_tasklet); |
687 | if (ap_thread_flag) | 695 | if (ap_thread_flag) |
688 | rc = ap_poll_thread_start(); | 696 | rc = ap_poll_thread_start(); |
689 | } else { | ||
690 | ap_device_probe(dev); | ||
691 | ap_reset(ap_dev); | ||
692 | setup_timer(&ap_dev->timeout, ap_request_timeout, | ||
693 | (unsigned long) ap_dev); | ||
694 | } | 697 | } |
698 | if (AP_QID_QUEUE(ap_dev->qid) != ap_domain_index) { | ||
699 | spin_lock_bh(&ap_dev->lock); | ||
700 | ap_dev->qid = AP_MKQID(AP_QID_DEVICE(ap_dev->qid), | ||
701 | ap_domain_index); | ||
702 | spin_unlock_bh(&ap_dev->lock); | ||
703 | } | ||
704 | queue_work(ap_work_queue, &ap_config_work); | ||
695 | 705 | ||
696 | return rc; | 706 | return rc; |
697 | } | 707 | } |
@@ -1079,6 +1089,8 @@ static void ap_scan_bus(struct work_struct *unused) | |||
1079 | spin_lock_bh(&ap_dev->lock); | 1089 | spin_lock_bh(&ap_dev->lock); |
1080 | if (rc || ap_dev->unregistered) { | 1090 | if (rc || ap_dev->unregistered) { |
1081 | spin_unlock_bh(&ap_dev->lock); | 1091 | spin_unlock_bh(&ap_dev->lock); |
1092 | if (ap_dev->unregistered) | ||
1093 | i--; | ||
1082 | device_unregister(dev); | 1094 | device_unregister(dev); |
1083 | put_device(dev); | 1095 | put_device(dev); |
1084 | continue; | 1096 | continue; |
@@ -1586,6 +1598,12 @@ int __init ap_module_init(void) | |||
1586 | ap_domain_index); | 1598 | ap_domain_index); |
1587 | return -EINVAL; | 1599 | return -EINVAL; |
1588 | } | 1600 | } |
1601 | /* In resume callback we need to know if the user had set the domain. | ||
1602 | * If so, we can not just reset it. | ||
1603 | */ | ||
1604 | if (ap_domain_index >= 0) | ||
1605 | user_set_domain = 1; | ||
1606 | |||
1589 | if (ap_instructions_available() != 0) { | 1607 | if (ap_instructions_available() != 0) { |
1590 | pr_warning("The hardware system does not support " | 1608 | pr_warning("The hardware system does not support " |
1591 | "AP instructions\n"); | 1609 | "AP instructions\n"); |