diff options
author | Sebastian Ott <sebott@linux.vnet.ibm.com> | 2010-10-25 10:10:29 -0400 |
---|---|---|
committer | Martin Schwidefsky <sky@mschwide.boeblingen.de.ibm.com> | 2010-10-25 10:10:18 -0400 |
commit | 34196f82b16749e119db5572271944c4add0a9aa (patch) | |
tree | 94db487608b30d8c123419c19c12544686189c10 /drivers/s390/cio | |
parent | 34aec07c170b972a29c954b37047184bd0f9f294 (diff) |
[S390] chsc: consolidate memory allocations
Most wrappers around the channel subsystem call have their own logic
to allocate memory (with proper alignment) or use preallocated or
static memory. This patch converts most users of the channel
subsystem call to use the same preallocated page (proteced by a
spinlock).
Note: The sei_page which is used in our crw handler to call
"store event information" has to coexist, since
a) in crw context, while accessing the sei_page, sleeping is allowed
(which will conflict with the spinlock protection of the chsc_page)
b) in crw context, while accessing the sei_page, channel subsystem
calls are allowed (which itself would require the page).
Signed-off-by: Sebastian Ott <sebott@linux.vnet.ibm.com>
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'drivers/s390/cio')
-rw-r--r-- | drivers/s390/cio/chsc.c | 226 | ||||
-rw-r--r-- | drivers/s390/cio/chsc.h | 2 | ||||
-rw-r--r-- | drivers/s390/cio/css.c | 24 |
3 files changed, 116 insertions, 136 deletions
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c index f26cc3e16181..d12c152cb691 100644 --- a/drivers/s390/cio/chsc.c +++ b/drivers/s390/cio/chsc.c | |||
@@ -29,8 +29,8 @@ | |||
29 | #include "chsc.h" | 29 | #include "chsc.h" |
30 | 30 | ||
31 | static void *sei_page; | 31 | static void *sei_page; |
32 | static DEFINE_SPINLOCK(siosl_lock); | 32 | static void *chsc_page; |
33 | static DEFINE_SPINLOCK(sda_lock); | 33 | static DEFINE_SPINLOCK(chsc_page_lock); |
34 | 34 | ||
35 | /** | 35 | /** |
36 | * chsc_error_from_response() - convert a chsc response to an error | 36 | * chsc_error_from_response() - convert a chsc response to an error |
@@ -85,17 +85,15 @@ struct chsc_ssd_area { | |||
85 | 85 | ||
86 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | 86 | int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) |
87 | { | 87 | { |
88 | unsigned long page; | ||
89 | struct chsc_ssd_area *ssd_area; | 88 | struct chsc_ssd_area *ssd_area; |
90 | int ccode; | 89 | int ccode; |
91 | int ret; | 90 | int ret; |
92 | int i; | 91 | int i; |
93 | int mask; | 92 | int mask; |
94 | 93 | ||
95 | page = get_zeroed_page(GFP_KERNEL | GFP_DMA); | 94 | spin_lock_irq(&chsc_page_lock); |
96 | if (!page) | 95 | memset(chsc_page, 0, PAGE_SIZE); |
97 | return -ENOMEM; | 96 | ssd_area = chsc_page; |
98 | ssd_area = (struct chsc_ssd_area *) page; | ||
99 | ssd_area->request.length = 0x0010; | 97 | ssd_area->request.length = 0x0010; |
100 | ssd_area->request.code = 0x0004; | 98 | ssd_area->request.code = 0x0004; |
101 | ssd_area->ssid = schid.ssid; | 99 | ssd_area->ssid = schid.ssid; |
@@ -106,25 +104,25 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | |||
106 | /* Check response. */ | 104 | /* Check response. */ |
107 | if (ccode > 0) { | 105 | if (ccode > 0) { |
108 | ret = (ccode == 3) ? -ENODEV : -EBUSY; | 106 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
109 | goto out_free; | 107 | goto out; |
110 | } | 108 | } |
111 | ret = chsc_error_from_response(ssd_area->response.code); | 109 | ret = chsc_error_from_response(ssd_area->response.code); |
112 | if (ret != 0) { | 110 | if (ret != 0) { |
113 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", | 111 | CIO_MSG_EVENT(2, "chsc: ssd failed for 0.%x.%04x (rc=%04x)\n", |
114 | schid.ssid, schid.sch_no, | 112 | schid.ssid, schid.sch_no, |
115 | ssd_area->response.code); | 113 | ssd_area->response.code); |
116 | goto out_free; | 114 | goto out; |
117 | } | 115 | } |
118 | if (!ssd_area->sch_valid) { | 116 | if (!ssd_area->sch_valid) { |
119 | ret = -ENODEV; | 117 | ret = -ENODEV; |
120 | goto out_free; | 118 | goto out; |
121 | } | 119 | } |
122 | /* Copy data */ | 120 | /* Copy data */ |
123 | ret = 0; | 121 | ret = 0; |
124 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); | 122 | memset(ssd, 0, sizeof(struct chsc_ssd_info)); |
125 | if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && | 123 | if ((ssd_area->st != SUBCHANNEL_TYPE_IO) && |
126 | (ssd_area->st != SUBCHANNEL_TYPE_MSG)) | 124 | (ssd_area->st != SUBCHANNEL_TYPE_MSG)) |
127 | goto out_free; | 125 | goto out; |
128 | ssd->path_mask = ssd_area->path_mask; | 126 | ssd->path_mask = ssd_area->path_mask; |
129 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; | 127 | ssd->fla_valid_mask = ssd_area->fla_valid_mask; |
130 | for (i = 0; i < 8; i++) { | 128 | for (i = 0; i < 8; i++) { |
@@ -136,8 +134,8 @@ int chsc_get_ssd_info(struct subchannel_id schid, struct chsc_ssd_info *ssd) | |||
136 | if (ssd_area->fla_valid_mask & mask) | 134 | if (ssd_area->fla_valid_mask & mask) |
137 | ssd->fla[i] = ssd_area->fla[i]; | 135 | ssd->fla[i] = ssd_area->fla[i]; |
138 | } | 136 | } |
139 | out_free: | 137 | out: |
140 | free_page(page); | 138 | spin_unlock_irq(&chsc_page_lock); |
141 | return ret; | 139 | return ret; |
142 | } | 140 | } |
143 | 141 | ||
@@ -552,7 +550,7 @@ cleanup: | |||
552 | return ret; | 550 | return ret; |
553 | } | 551 | } |
554 | 552 | ||
555 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | 553 | int __chsc_do_secm(struct channel_subsystem *css, int enable) |
556 | { | 554 | { |
557 | struct { | 555 | struct { |
558 | struct chsc_header request; | 556 | struct chsc_header request; |
@@ -573,7 +571,9 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
573 | } __attribute__ ((packed)) *secm_area; | 571 | } __attribute__ ((packed)) *secm_area; |
574 | int ret, ccode; | 572 | int ret, ccode; |
575 | 573 | ||
576 | secm_area = page; | 574 | spin_lock_irq(&chsc_page_lock); |
575 | memset(chsc_page, 0, PAGE_SIZE); | ||
576 | secm_area = chsc_page; | ||
577 | secm_area->request.length = 0x0050; | 577 | secm_area->request.length = 0x0050; |
578 | secm_area->request.code = 0x0016; | 578 | secm_area->request.code = 0x0016; |
579 | 579 | ||
@@ -584,8 +584,10 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
584 | secm_area->operation_code = enable ? 0 : 1; | 584 | secm_area->operation_code = enable ? 0 : 1; |
585 | 585 | ||
586 | ccode = chsc(secm_area); | 586 | ccode = chsc(secm_area); |
587 | if (ccode > 0) | 587 | if (ccode > 0) { |
588 | return (ccode == 3) ? -ENODEV : -EBUSY; | 588 | ret = (ccode == 3) ? -ENODEV : -EBUSY; |
589 | goto out; | ||
590 | } | ||
589 | 591 | ||
590 | switch (secm_area->response.code) { | 592 | switch (secm_area->response.code) { |
591 | case 0x0102: | 593 | case 0x0102: |
@@ -598,37 +600,32 @@ int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page) | |||
598 | if (ret != 0) | 600 | if (ret != 0) |
599 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", | 601 | CIO_CRW_EVENT(2, "chsc: secm failed (rc=%04x)\n", |
600 | secm_area->response.code); | 602 | secm_area->response.code); |
603 | out: | ||
604 | spin_unlock_irq(&chsc_page_lock); | ||
601 | return ret; | 605 | return ret; |
602 | } | 606 | } |
603 | 607 | ||
604 | int | 608 | int |
605 | chsc_secm(struct channel_subsystem *css, int enable) | 609 | chsc_secm(struct channel_subsystem *css, int enable) |
606 | { | 610 | { |
607 | void *secm_area; | ||
608 | int ret; | 611 | int ret; |
609 | 612 | ||
610 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
611 | if (!secm_area) | ||
612 | return -ENOMEM; | ||
613 | |||
614 | if (enable && !css->cm_enabled) { | 613 | if (enable && !css->cm_enabled) { |
615 | css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 614 | css->cub_addr1 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
616 | css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 615 | css->cub_addr2 = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
617 | if (!css->cub_addr1 || !css->cub_addr2) { | 616 | if (!css->cub_addr1 || !css->cub_addr2) { |
618 | free_page((unsigned long)css->cub_addr1); | 617 | free_page((unsigned long)css->cub_addr1); |
619 | free_page((unsigned long)css->cub_addr2); | 618 | free_page((unsigned long)css->cub_addr2); |
620 | free_page((unsigned long)secm_area); | ||
621 | return -ENOMEM; | 619 | return -ENOMEM; |
622 | } | 620 | } |
623 | } | 621 | } |
624 | ret = __chsc_do_secm(css, enable, secm_area); | 622 | ret = __chsc_do_secm(css, enable); |
625 | if (!ret) { | 623 | if (!ret) { |
626 | css->cm_enabled = enable; | 624 | css->cm_enabled = enable; |
627 | if (css->cm_enabled) { | 625 | if (css->cm_enabled) { |
628 | ret = chsc_add_cmg_attr(css); | 626 | ret = chsc_add_cmg_attr(css); |
629 | if (ret) { | 627 | if (ret) { |
630 | memset(secm_area, 0, PAGE_SIZE); | 628 | __chsc_do_secm(css, 0); |
631 | __chsc_do_secm(css, 0, secm_area); | ||
632 | css->cm_enabled = 0; | 629 | css->cm_enabled = 0; |
633 | } | 630 | } |
634 | } else | 631 | } else |
@@ -638,7 +635,6 @@ chsc_secm(struct channel_subsystem *css, int enable) | |||
638 | free_page((unsigned long)css->cub_addr1); | 635 | free_page((unsigned long)css->cub_addr1); |
639 | free_page((unsigned long)css->cub_addr2); | 636 | free_page((unsigned long)css->cub_addr2); |
640 | } | 637 | } |
641 | free_page((unsigned long)secm_area); | ||
642 | return ret; | 638 | return ret; |
643 | } | 639 | } |
644 | 640 | ||
@@ -669,13 +665,12 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | |||
669 | return -EINVAL; | 665 | return -EINVAL; |
670 | if ((rfmt == 2) && !css_general_characteristics.cib) | 666 | if ((rfmt == 2) && !css_general_characteristics.cib) |
671 | return -EINVAL; | 667 | return -EINVAL; |
672 | scpd_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | ||
673 | if (!scpd_area) | ||
674 | return -ENOMEM; | ||
675 | 668 | ||
669 | spin_lock_irq(&chsc_page_lock); | ||
670 | memset(chsc_page, 0, PAGE_SIZE); | ||
671 | scpd_area = chsc_page; | ||
676 | scpd_area->request.length = 0x0010; | 672 | scpd_area->request.length = 0x0010; |
677 | scpd_area->request.code = 0x0002; | 673 | scpd_area->request.code = 0x0002; |
678 | |||
679 | scpd_area->cssid = chpid.cssid; | 674 | scpd_area->cssid = chpid.cssid; |
680 | scpd_area->first_chpid = chpid.id; | 675 | scpd_area->first_chpid = chpid.id; |
681 | scpd_area->last_chpid = chpid.id; | 676 | scpd_area->last_chpid = chpid.id; |
@@ -698,7 +693,7 @@ int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | |||
698 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", | 693 | CIO_CRW_EVENT(2, "chsc: scpd failed (rc=%04x)\n", |
699 | scpd_area->response.code); | 694 | scpd_area->response.code); |
700 | out: | 695 | out: |
701 | free_page((unsigned long)scpd_area); | 696 | spin_unlock_irq(&chsc_page_lock); |
702 | return ret; | 697 | return ret; |
703 | } | 698 | } |
704 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); | 699 | EXPORT_SYMBOL_GPL(chsc_determine_channel_path_desc); |
@@ -725,33 +720,22 @@ static void | |||
725 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, | 720 | chsc_initialize_cmg_chars(struct channel_path *chp, u8 cmcv, |
726 | struct cmg_chars *chars) | 721 | struct cmg_chars *chars) |
727 | { | 722 | { |
728 | switch (chp->cmg) { | 723 | struct cmg_chars *cmg_chars; |
729 | case 2: | 724 | int i, mask; |
730 | case 3: | 725 | |
731 | chp->cmg_chars = kmalloc(sizeof(struct cmg_chars), | 726 | cmg_chars = chp->cmg_chars; |
732 | GFP_KERNEL); | 727 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { |
733 | if (chp->cmg_chars) { | 728 | mask = 0x80 >> (i + 3); |
734 | int i, mask; | 729 | if (cmcv & mask) |
735 | struct cmg_chars *cmg_chars; | 730 | cmg_chars->values[i] = chars->values[i]; |
736 | 731 | else | |
737 | cmg_chars = chp->cmg_chars; | 732 | cmg_chars->values[i] = 0; |
738 | for (i = 0; i < NR_MEASUREMENT_CHARS; i++) { | ||
739 | mask = 0x80 >> (i + 3); | ||
740 | if (cmcv & mask) | ||
741 | cmg_chars->values[i] = chars->values[i]; | ||
742 | else | ||
743 | cmg_chars->values[i] = 0; | ||
744 | } | ||
745 | } | ||
746 | break; | ||
747 | default: | ||
748 | /* No cmg-dependent data. */ | ||
749 | break; | ||
750 | } | 733 | } |
751 | } | 734 | } |
752 | 735 | ||
753 | int chsc_get_channel_measurement_chars(struct channel_path *chp) | 736 | int chsc_get_channel_measurement_chars(struct channel_path *chp) |
754 | { | 737 | { |
738 | struct cmg_chars *cmg_chars; | ||
755 | int ccode, ret; | 739 | int ccode, ret; |
756 | 740 | ||
757 | struct { | 741 | struct { |
@@ -775,13 +759,16 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
775 | u32 data[NR_MEASUREMENT_CHARS]; | 759 | u32 data[NR_MEASUREMENT_CHARS]; |
776 | } __attribute__ ((packed)) *scmc_area; | 760 | } __attribute__ ((packed)) *scmc_area; |
777 | 761 | ||
778 | scmc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 762 | chp->cmg_chars = NULL; |
779 | if (!scmc_area) | 763 | cmg_chars = kmalloc(sizeof(*cmg_chars), GFP_KERNEL); |
764 | if (!cmg_chars) | ||
780 | return -ENOMEM; | 765 | return -ENOMEM; |
781 | 766 | ||
767 | spin_lock_irq(&chsc_page_lock); | ||
768 | memset(chsc_page, 0, PAGE_SIZE); | ||
769 | scmc_area = chsc_page; | ||
782 | scmc_area->request.length = 0x0010; | 770 | scmc_area->request.length = 0x0010; |
783 | scmc_area->request.code = 0x0022; | 771 | scmc_area->request.code = 0x0022; |
784 | |||
785 | scmc_area->first_chpid = chp->chpid.id; | 772 | scmc_area->first_chpid = chp->chpid.id; |
786 | scmc_area->last_chpid = chp->chpid.id; | 773 | scmc_area->last_chpid = chp->chpid.id; |
787 | 774 | ||
@@ -792,24 +779,30 @@ int chsc_get_channel_measurement_chars(struct channel_path *chp) | |||
792 | } | 779 | } |
793 | 780 | ||
794 | ret = chsc_error_from_response(scmc_area->response.code); | 781 | ret = chsc_error_from_response(scmc_area->response.code); |
795 | if (ret == 0) { | 782 | if (ret) { |
796 | /* Success. */ | ||
797 | if (!scmc_area->not_valid) { | ||
798 | chp->cmg = scmc_area->cmg; | ||
799 | chp->shared = scmc_area->shared; | ||
800 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, | ||
801 | (struct cmg_chars *) | ||
802 | &scmc_area->data); | ||
803 | } else { | ||
804 | chp->cmg = -1; | ||
805 | chp->shared = -1; | ||
806 | } | ||
807 | } else { | ||
808 | CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", | 783 | CIO_CRW_EVENT(2, "chsc: scmc failed (rc=%04x)\n", |
809 | scmc_area->response.code); | 784 | scmc_area->response.code); |
785 | goto out; | ||
786 | } | ||
787 | if (scmc_area->not_valid) { | ||
788 | chp->cmg = -1; | ||
789 | chp->shared = -1; | ||
790 | goto out; | ||
791 | } | ||
792 | chp->cmg = scmc_area->cmg; | ||
793 | chp->shared = scmc_area->shared; | ||
794 | if (chp->cmg != 2 && chp->cmg != 3) { | ||
795 | /* No cmg-dependent data. */ | ||
796 | goto out; | ||
810 | } | 797 | } |
798 | chp->cmg_chars = cmg_chars; | ||
799 | chsc_initialize_cmg_chars(chp, scmc_area->cmcv, | ||
800 | (struct cmg_chars *) &scmc_area->data); | ||
811 | out: | 801 | out: |
812 | free_page((unsigned long)scmc_area); | 802 | spin_unlock_irq(&chsc_page_lock); |
803 | if (!chp->cmg_chars) | ||
804 | kfree(cmg_chars); | ||
805 | |||
813 | return ret; | 806 | return ret; |
814 | } | 807 | } |
815 | 808 | ||
@@ -818,27 +811,33 @@ int __init chsc_init(void) | |||
818 | int ret; | 811 | int ret; |
819 | 812 | ||
820 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 813 | sei_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
821 | if (!sei_page) { | 814 | chsc_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); |
822 | CIO_MSG_EVENT(0, "Can't allocate page for processing of " | 815 | if (!sei_page || !chsc_page) { |
823 | "chsc machine checks!\n"); | 816 | ret = -ENOMEM; |
824 | return -ENOMEM; | 817 | goto out_err; |
825 | } | 818 | } |
826 | ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); | 819 | ret = crw_register_handler(CRW_RSC_CSS, chsc_process_crw); |
827 | if (ret) | 820 | if (ret) |
828 | free_page((unsigned long)sei_page); | 821 | goto out_err; |
822 | return ret; | ||
823 | out_err: | ||
824 | free_page((unsigned long)chsc_page); | ||
825 | free_page((unsigned long)sei_page); | ||
829 | return ret; | 826 | return ret; |
830 | } | 827 | } |
831 | 828 | ||
832 | void __init chsc_init_cleanup(void) | 829 | void __init chsc_init_cleanup(void) |
833 | { | 830 | { |
834 | crw_unregister_handler(CRW_RSC_CSS); | 831 | crw_unregister_handler(CRW_RSC_CSS); |
832 | free_page((unsigned long)chsc_page); | ||
835 | free_page((unsigned long)sei_page); | 833 | free_page((unsigned long)sei_page); |
836 | } | 834 | } |
837 | 835 | ||
838 | int chsc_enable_facility(int operation_code) | 836 | int chsc_enable_facility(int operation_code) |
839 | { | 837 | { |
838 | unsigned long flags; | ||
840 | int ret; | 839 | int ret; |
841 | static struct { | 840 | struct { |
842 | struct chsc_header request; | 841 | struct chsc_header request; |
843 | u8 reserved1:4; | 842 | u8 reserved1:4; |
844 | u8 format:4; | 843 | u8 format:4; |
@@ -851,32 +850,33 @@ int chsc_enable_facility(int operation_code) | |||
851 | u32 reserved5:4; | 850 | u32 reserved5:4; |
852 | u32 format2:4; | 851 | u32 format2:4; |
853 | u32 reserved6:24; | 852 | u32 reserved6:24; |
854 | } __attribute__ ((packed, aligned(4096))) sda_area; | 853 | } __attribute__ ((packed)) *sda_area; |
855 | 854 | ||
856 | spin_lock(&sda_lock); | 855 | spin_lock_irqsave(&chsc_page_lock, flags); |
857 | memset(&sda_area, 0, sizeof(sda_area)); | 856 | memset(chsc_page, 0, PAGE_SIZE); |
858 | sda_area.request.length = 0x0400; | 857 | sda_area = chsc_page; |
859 | sda_area.request.code = 0x0031; | 858 | sda_area->request.length = 0x0400; |
860 | sda_area.operation_code = operation_code; | 859 | sda_area->request.code = 0x0031; |
860 | sda_area->operation_code = operation_code; | ||
861 | 861 | ||
862 | ret = chsc(&sda_area); | 862 | ret = chsc(sda_area); |
863 | if (ret > 0) { | 863 | if (ret > 0) { |
864 | ret = (ret == 3) ? -ENODEV : -EBUSY; | 864 | ret = (ret == 3) ? -ENODEV : -EBUSY; |
865 | goto out; | 865 | goto out; |
866 | } | 866 | } |
867 | 867 | ||
868 | switch (sda_area.response.code) { | 868 | switch (sda_area->response.code) { |
869 | case 0x0101: | 869 | case 0x0101: |
870 | ret = -EOPNOTSUPP; | 870 | ret = -EOPNOTSUPP; |
871 | break; | 871 | break; |
872 | default: | 872 | default: |
873 | ret = chsc_error_from_response(sda_area.response.code); | 873 | ret = chsc_error_from_response(sda_area->response.code); |
874 | } | 874 | } |
875 | if (ret != 0) | 875 | if (ret != 0) |
876 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", | 876 | CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n", |
877 | operation_code, sda_area.response.code); | 877 | operation_code, sda_area->response.code); |
878 | out: | 878 | out: |
879 | spin_unlock(&sda_lock); | 879 | spin_unlock_irqrestore(&chsc_page_lock, flags); |
880 | return ret; | 880 | return ret; |
881 | } | 881 | } |
882 | 882 | ||
@@ -898,10 +898,9 @@ chsc_determine_css_characteristics(void) | |||
898 | u32 chsc_char[508]; | 898 | u32 chsc_char[508]; |
899 | } __attribute__ ((packed)) *scsc_area; | 899 | } __attribute__ ((packed)) *scsc_area; |
900 | 900 | ||
901 | scsc_area = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); | 901 | spin_lock_irq(&chsc_page_lock); |
902 | if (!scsc_area) | 902 | memset(chsc_page, 0, PAGE_SIZE); |
903 | return -ENOMEM; | 903 | scsc_area = chsc_page; |
904 | |||
905 | scsc_area->request.length = 0x0010; | 904 | scsc_area->request.length = 0x0010; |
906 | scsc_area->request.code = 0x0010; | 905 | scsc_area->request.code = 0x0010; |
907 | 906 | ||
@@ -921,7 +920,7 @@ chsc_determine_css_characteristics(void) | |||
921 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", | 920 | CIO_CRW_EVENT(2, "chsc: scsc failed (rc=%04x)\n", |
922 | scsc_area->response.code); | 921 | scsc_area->response.code); |
923 | exit: | 922 | exit: |
924 | free_page ((unsigned long) scsc_area); | 923 | spin_unlock_irq(&chsc_page_lock); |
925 | return result; | 924 | return result; |
926 | } | 925 | } |
927 | 926 | ||
@@ -976,29 +975,29 @@ int chsc_sstpi(void *page, void *result, size_t size) | |||
976 | return (rr->response.code == 0x0001) ? 0 : -EIO; | 975 | return (rr->response.code == 0x0001) ? 0 : -EIO; |
977 | } | 976 | } |
978 | 977 | ||
979 | static struct { | ||
980 | struct chsc_header request; | ||
981 | u32 word1; | ||
982 | struct subchannel_id sid; | ||
983 | u32 word3; | ||
984 | struct chsc_header response; | ||
985 | u32 word[11]; | ||
986 | } __attribute__ ((packed)) siosl_area __attribute__ ((__aligned__(PAGE_SIZE))); | ||
987 | |||
988 | int chsc_siosl(struct subchannel_id schid) | 978 | int chsc_siosl(struct subchannel_id schid) |
989 | { | 979 | { |
980 | struct { | ||
981 | struct chsc_header request; | ||
982 | u32 word1; | ||
983 | struct subchannel_id sid; | ||
984 | u32 word3; | ||
985 | struct chsc_header response; | ||
986 | u32 word[11]; | ||
987 | } __attribute__ ((packed)) *siosl_area; | ||
990 | unsigned long flags; | 988 | unsigned long flags; |
991 | int ccode; | 989 | int ccode; |
992 | int rc; | 990 | int rc; |
993 | 991 | ||
994 | spin_lock_irqsave(&siosl_lock, flags); | 992 | spin_lock_irqsave(&chsc_page_lock, flags); |
995 | memset(&siosl_area, 0, sizeof(siosl_area)); | 993 | memset(chsc_page, 0, PAGE_SIZE); |
996 | siosl_area.request.length = 0x0010; | 994 | siosl_area = chsc_page; |
997 | siosl_area.request.code = 0x0046; | 995 | siosl_area->request.length = 0x0010; |
998 | siosl_area.word1 = 0x80000000; | 996 | siosl_area->request.code = 0x0046; |
999 | siosl_area.sid = schid; | 997 | siosl_area->word1 = 0x80000000; |
998 | siosl_area->sid = schid; | ||
1000 | 999 | ||
1001 | ccode = chsc(&siosl_area); | 1000 | ccode = chsc(siosl_area); |
1002 | if (ccode > 0) { | 1001 | if (ccode > 0) { |
1003 | if (ccode == 3) | 1002 | if (ccode == 3) |
1004 | rc = -ENODEV; | 1003 | rc = -ENODEV; |
@@ -1008,17 +1007,16 @@ int chsc_siosl(struct subchannel_id schid) | |||
1008 | schid.ssid, schid.sch_no, ccode); | 1007 | schid.ssid, schid.sch_no, ccode); |
1009 | goto out; | 1008 | goto out; |
1010 | } | 1009 | } |
1011 | rc = chsc_error_from_response(siosl_area.response.code); | 1010 | rc = chsc_error_from_response(siosl_area->response.code); |
1012 | if (rc) | 1011 | if (rc) |
1013 | CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", | 1012 | CIO_MSG_EVENT(2, "chsc: siosl failed for 0.%x.%04x (rc=%04x)\n", |
1014 | schid.ssid, schid.sch_no, | 1013 | schid.ssid, schid.sch_no, |
1015 | siosl_area.response.code); | 1014 | siosl_area->response.code); |
1016 | else | 1015 | else |
1017 | CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", | 1016 | CIO_MSG_EVENT(4, "chsc: siosl succeeded for 0.%x.%04x\n", |
1018 | schid.ssid, schid.sch_no); | 1017 | schid.ssid, schid.sch_no); |
1019 | out: | 1018 | out: |
1020 | spin_unlock_irqrestore(&siosl_lock, flags); | 1019 | spin_unlock_irqrestore(&chsc_page_lock, flags); |
1021 | |||
1022 | return rc; | 1020 | return rc; |
1023 | } | 1021 | } |
1024 | EXPORT_SYMBOL_GPL(chsc_siosl); | 1022 | EXPORT_SYMBOL_GPL(chsc_siosl); |
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h index 6d669dd0dd84..852b61fc56ea 100644 --- a/drivers/s390/cio/chsc.h +++ b/drivers/s390/cio/chsc.h | |||
@@ -66,7 +66,7 @@ extern void chsc_init_cleanup(void); | |||
66 | extern int chsc_enable_facility(int); | 66 | extern int chsc_enable_facility(int); |
67 | struct channel_subsystem; | 67 | struct channel_subsystem; |
68 | extern int chsc_secm(struct channel_subsystem *, int); | 68 | extern int chsc_secm(struct channel_subsystem *, int); |
69 | int __chsc_do_secm(struct channel_subsystem *css, int enable, void *page); | 69 | int __chsc_do_secm(struct channel_subsystem *css, int enable); |
70 | 70 | ||
71 | int chsc_chp_vary(struct chp_id chpid, int on); | 71 | int chsc_chp_vary(struct chp_id chpid, int on); |
72 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, | 72 | int chsc_determine_channel_path_desc(struct chp_id chpid, int fmt, int rfmt, |
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c index fa1ad3aab66f..5e1235c6aba0 100644 --- a/drivers/s390/cio/css.c +++ b/drivers/s390/cio/css.c | |||
@@ -790,7 +790,6 @@ static struct notifier_block css_reboot_notifier = { | |||
790 | static int css_power_event(struct notifier_block *this, unsigned long event, | 790 | static int css_power_event(struct notifier_block *this, unsigned long event, |
791 | void *ptr) | 791 | void *ptr) |
792 | { | 792 | { |
793 | void *secm_area; | ||
794 | int ret, i; | 793 | int ret, i; |
795 | 794 | ||
796 | switch (event) { | 795 | switch (event) { |
@@ -806,15 +805,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, | |||
806 | mutex_unlock(&css->mutex); | 805 | mutex_unlock(&css->mutex); |
807 | continue; | 806 | continue; |
808 | } | 807 | } |
809 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | 808 | if (__chsc_do_secm(css, 0)) |
810 | GFP_DMA); | ||
811 | if (secm_area) { | ||
812 | if (__chsc_do_secm(css, 0, secm_area)) | ||
813 | ret = NOTIFY_BAD; | ||
814 | free_page((unsigned long)secm_area); | ||
815 | } else | ||
816 | ret = NOTIFY_BAD; | 809 | ret = NOTIFY_BAD; |
817 | |||
818 | mutex_unlock(&css->mutex); | 810 | mutex_unlock(&css->mutex); |
819 | } | 811 | } |
820 | break; | 812 | break; |
@@ -830,15 +822,8 @@ static int css_power_event(struct notifier_block *this, unsigned long event, | |||
830 | mutex_unlock(&css->mutex); | 822 | mutex_unlock(&css->mutex); |
831 | continue; | 823 | continue; |
832 | } | 824 | } |
833 | secm_area = (void *)get_zeroed_page(GFP_KERNEL | | 825 | if (__chsc_do_secm(css, 1)) |
834 | GFP_DMA); | ||
835 | if (secm_area) { | ||
836 | if (__chsc_do_secm(css, 1, secm_area)) | ||
837 | ret = NOTIFY_BAD; | ||
838 | free_page((unsigned long)secm_area); | ||
839 | } else | ||
840 | ret = NOTIFY_BAD; | 826 | ret = NOTIFY_BAD; |
841 | |||
842 | mutex_unlock(&css->mutex); | 827 | mutex_unlock(&css->mutex); |
843 | } | 828 | } |
844 | /* search for subchannels, which appeared during hibernation */ | 829 | /* search for subchannels, which appeared during hibernation */ |
@@ -867,10 +852,7 @@ static int __init css_bus_init(void) | |||
867 | if (ret) | 852 | if (ret) |
868 | return ret; | 853 | return ret; |
869 | 854 | ||
870 | ret = chsc_determine_css_characteristics(); | 855 | chsc_determine_css_characteristics(); |
871 | if (ret == -ENOMEM) | ||
872 | goto out; | ||
873 | |||
874 | /* Try to enable MSS. */ | 856 | /* Try to enable MSS. */ |
875 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); | 857 | ret = chsc_enable_facility(CHSC_SDA_OC_MSS); |
876 | if (ret) | 858 | if (ret) |